1
1
openmpi/ompi/mca/pml/cm/pml_cm_recvreq.c
George Bosilca 433f8a7694 This patch bring full support for message queues in Open MPI. Now the send and
receive queues are shared among all PMLs, they are declared in the base PML,
and the selected PML is in charge of initializing and releasing them. 

The CM PML is slightly different compared with OB1 or DR. Internally it use
2 different types of requests: light and heavy. However, now with this patch
both types of requests are stored in the same queue, and cast appropriately
on the allocation macro. This means we might use less memory than we allocate,
but in exchange we got full support for most of the parallel debuggers.

Another thing with this patch, is that now for all PML (CM included) the basic
PML requests start with the same fields, and they are declared in the same order
in the request structure. Moreover, the fields have been moved in such a way
that only one volatile/atomic will exist per line of cache (hopefully).

This commit was SVN r15346.
2007-07-10 22:16:38 +00:00

82 строки
2.7 KiB
C

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2007 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2006 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "pml_cm.h"
#include "pml_cm_recvreq.h"
static int
mca_pml_cm_recv_request_free(struct ompi_request_t** request)
{
mca_pml_cm_request_t* recvreq = *(mca_pml_cm_request_t**)request;
assert( false == recvreq->req_free_called );
OPAL_THREAD_LOCK(&ompi_request_lock);
recvreq->req_free_called = true;
if( true == recvreq->req_pml_complete ) {
if( MCA_PML_CM_REQUEST_RECV_THIN == recvreq->req_pml_type ) {
MCA_PML_CM_THIN_RECV_REQUEST_RETURN((mca_pml_cm_hvy_recv_request_t*)recvreq );
} else {
MCA_PML_CM_HVY_RECV_REQUEST_RETURN((mca_pml_cm_hvy_recv_request_t*)recvreq );
}
}
OPAL_THREAD_UNLOCK(&ompi_request_lock);
*request = MPI_REQUEST_NULL;
return OMPI_SUCCESS;
}
static void
mca_pml_cm_recv_request_completion(struct mca_mtl_request_t *mtl_request)
{
mca_pml_cm_request_t *base_request =
(mca_pml_cm_request_t*) mtl_request->ompi_req;
if( MCA_PML_CM_REQUEST_RECV_THIN == base_request->req_pml_type ) {
MCA_PML_CM_THIN_RECV_REQUEST_PML_COMPLETE(((mca_pml_cm_thin_recv_request_t*)base_request));
} else {
MCA_PML_CM_HVY_RECV_REQUEST_PML_COMPLETE(((mca_pml_cm_hvy_recv_request_t*)base_request));
}
}
static void
mca_pml_cm_recv_request_construct(mca_pml_cm_thin_recv_request_t* recvreq)
{
recvreq->req_mtl.ompi_req = (ompi_request_t*) recvreq;
recvreq->req_mtl.completion_callback = mca_pml_cm_recv_request_completion;
recvreq->req_base.req_ompi.req_free = mca_pml_cm_recv_request_free;
recvreq->req_base.req_ompi.req_cancel = mca_pml_cm_cancel;
OBJ_CONSTRUCT( &(recvreq->req_base.req_convertor), ompi_convertor_t );
}
OBJ_CLASS_INSTANCE(mca_pml_cm_thin_recv_request_t,
mca_pml_cm_request_t,
mca_pml_cm_recv_request_construct,
NULL);
OBJ_CLASS_INSTANCE(mca_pml_cm_hvy_recv_request_t,
mca_pml_cm_request_t,
mca_pml_cm_recv_request_construct,
NULL);