1
1
openmpi/ompi/mca/pml/dr/pml_dr_recvreq.h

364 строки
19 KiB
C
Исходник Обычный вид История

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
/**
* @file
*/
#ifndef OMPI_PML_DR_RECV_REQUEST_H
#define OMPI_PML_DR_RECV_REQUEST_H
#include "ompi_config.h"
#include "ompi/mca/mpool/base/base.h"
#include "ompi/mca/pml/base/pml_base_recvreq.h"
#include "pml_dr.h"
#include "pml_dr_proc.h"
#include "pml_dr_vfrag.h"
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
struct mca_pml_dr_recv_request_t {
mca_pml_base_recv_request_t req_recv;
struct ompi_proc_t *req_proc;
#if OMPI_HAVE_THREAD_SUPPORT
volatile int32_t req_lock;
#else
int32_t req_lock;
#endif
size_t req_pipeline_depth;
size_t req_bytes_received;
size_t req_bytes_delivered;
mca_pml_dr_vfrag_t *req_vfrag;
mca_pml_dr_vfrag_t req_vfrag0;
opal_list_t req_vfrags;
opal_mutex_t req_mutex;
};
typedef struct mca_pml_dr_recv_request_t mca_pml_dr_recv_request_t;
OBJ_CLASS_DECLARATION(mca_pml_dr_recv_request_t);
/**
* Allocate a recv request from the modules free list.
*
* @param rc (OUT) OMPI_SUCCESS or error status on failure.
* @return Receive request.
*/
#define MCA_PML_DR_RECV_REQUEST_ALLOC(recvreq, rc) \
do { \
opal_list_item_t* item; \
rc = OMPI_SUCCESS; \
OMPI_FREE_LIST_GET(&mca_pml_dr.recv_requests, item, rc); \
recvreq = (mca_pml_dr_recv_request_t*)item; \
} while(0)
/**
* Initialize a receive request with call parameters.
*
* @param request (IN) Receive request.
* @param addr (IN) User buffer.
* @param count (IN) Number of elements of indicated datatype.
* @param datatype (IN) User defined datatype.
* @param src (IN) Source rank w/in the communicator.
* @param tag (IN) User defined tag.
* @param comm (IN) Communicator.
* @param persistent (IN) Is this a ersistent request.
*/
#define MCA_PML_DR_RECV_REQUEST_INIT( \
request, \
addr, \
count, \
datatype, \
src, \
tag, \
comm, \
persistent) \
do { \
MCA_PML_BASE_RECV_REQUEST_INIT( \
&(request)->req_recv, \
addr, \
count, \
datatype, \
src, \
tag, \
comm, \
persistent); \
} while(0)
/**
* Mark a recv request complete.
*
* @param request (IN) Receive request.
*/
#define MCA_PML_DR_RECV_REQUEST_PML_COMPLETE(recvreq) \
do { \
assert( false == recvreq->req_recv.req_base.req_pml_complete ); \
\
OPAL_THREAD_LOCK(&ompi_request_lock); \
/* initialize request status */ \
recvreq->req_recv.req_base.req_pml_complete = true; \
recvreq->req_recv.req_base.req_ompi.req_status._count = \
(recvreq->req_bytes_received < recvreq->req_bytes_delivered ? \
recvreq->req_bytes_received : recvreq->req_bytes_delivered); \
MCA_PML_BASE_REQUEST_MPI_COMPLETE( &(recvreq->req_recv.req_base.req_ompi) ); \
\
if( true == recvreq->req_recv.req_base.req_free_called ) { \
MCA_PML_DR_RECV_REQUEST_RETURN( recvreq ); \
} else { \
if(recvreq->req_recv.req_base.req_ompi.req_persistent) { \
if( !recvreq->req_recv.req_base.req_free_called ) { \
recvreq->req_recv.req_base.req_ompi.req_state = OMPI_REQUEST_INACTIVE; \
} \
} \
} \
OPAL_THREAD_UNLOCK(&ompi_request_lock); \
} while(0)
/**
* Return a recv request to the modules free list.
*
* @param request (IN) Receive request.
*/
#define MCA_PML_DR_RECV_REQUEST_RETURN(recvreq) \
do { \
opal_list_item_t* item; \
\
/* return vfrags */ \
OPAL_THREAD_LOCK(&(recvreq)->req_mutex); \
while(NULL != (item = opal_list_remove_first(&(recvreq)->req_vfrags))) { \
OMPI_FREE_LIST_RETURN(&mca_pml_dr.vfrags, item); \
} \
OPAL_THREAD_UNLOCK(&(recvreq)->req_mutex); \
\
/* decrement reference counts */ \
MCA_PML_BASE_RECV_REQUEST_FINI(&(recvreq)->req_recv); \
OMPI_FREE_LIST_RETURN(&mca_pml_dr.recv_requests, (opal_list_item_t*)(recvreq)); \
} while(0)
/**
* Attempt to match the request against the unexpected fragment list
* for all source ranks w/in the communicator.
*
* @param request (IN) Request to match.
*/
void mca_pml_dr_recv_request_match_wild(mca_pml_dr_recv_request_t* request);
/**
* Attempt to match the request against the unexpected fragment list
* for a specific source rank.
*
* @param request (IN) Request to match.
*/
void mca_pml_dr_recv_request_match_specific(mca_pml_dr_recv_request_t* request);
/**
* Start an initialized request.
*
* @param request Receive request.
* @return OMPI_SUCESS or error status on failure.
*/
#define MCA_PML_DR_RECV_REQUEST_START(request) \
do { \
/* init/re-init the request */ \
(request)->req_bytes_received = 0; \
(request)->req_bytes_delivered = 0; \
(request)->req_lock = 0; \
(request)->req_pipeline_depth = 0; \
(request)->req_recv.req_base.req_pml_complete = false; \
(request)->req_recv.req_base.req_ompi.req_complete = false; \
(request)->req_recv.req_base.req_ompi.req_state = OMPI_REQUEST_ACTIVE; \
(request)->req_vfrag = &(request)->req_vfrag0; \
\
/* always set the req_status.MPI_TAG to ANY_TAG before starting the \
* request. This field is used if cancelled to find out if the request \
* has been matched or not. \
*/ \
(request)->req_recv.req_base.req_ompi.req_status.MPI_TAG = OMPI_ANY_TAG; \
(request)->req_recv.req_base.req_ompi.req_status.MPI_ERROR = OMPI_SUCCESS; \
(request)->req_recv.req_base.req_ompi.req_status._cancelled = 0; \
\
/* attempt to match posted recv */ \
if((request)->req_recv.req_base.req_peer == OMPI_ANY_SOURCE) { \
mca_pml_dr_recv_request_match_wild(request); \
} else { \
mca_pml_dr_recv_request_match_specific(request); \
} \
} while (0)
/**
*
*/
#define MCA_PML_DR_RECV_REQUEST_MATCHED( \
request, \
hdr) \
do { \
(request)->req_recv.req_base.req_ompi.req_status.MPI_TAG = (hdr)->hdr_tag; \
(request)->req_recv.req_base.req_ompi.req_status.MPI_SOURCE = (hdr)->hdr_src; \
if((request)->req_recv.req_bytes_packed != 0) { \
ompi_proc_t *proc = \
ompi_comm_peer_lookup( \
(request)->req_recv.req_base.req_comm, (hdr)->hdr_src); \
\
(request)->req_proc = proc; \
ompi_convertor_copy_and_prepare_for_recv( proc->proc_convertor, \
(request)->req_recv.req_base.req_datatype, \
(request)->req_recv.req_base.req_count, \
(request)->req_recv.req_base.req_addr, \
&(request)->req_recv.req_convertor ); \
} else { \
(request)->req_proc = NULL; \
} \
} while (0)
/**
*
*/
#define MCA_PML_DR_RECV_REQUEST_UNPACK( \
request, \
segments, \
num_segments, \
seg_offset, \
data_offset, \
bytes_received, \
bytes_delivered, \
csum) \
do { \
if(request->req_recv.req_bytes_packed > 0) { \
struct iovec iov[MCA_BTL_DES_MAX_SEGMENTS]; \
uint32_t iov_count = 0; \
size_t max_data = bytes_received; \
int32_t free_after = 0; \
size_t n, offset = seg_offset; \
\
for(n=0; n<num_segments; n++) { \
mca_btl_base_segment_t* segment = segments+n; \
if(offset >= segment->seg_len) { \
offset -= segment->seg_len; \
} else { \
iov[iov_count].iov_len = segment->seg_len - seg_offset; \
iov[iov_count].iov_base = (void*)((unsigned char*)segment->seg_addr.pval + seg_offset); \
iov_count++; \
} \
} \
ompi_convertor_set_position( \
&(request->req_recv.req_convertor), \
&data_offset); \
ompi_convertor_unpack( \
&(request)->req_recv.req_convertor, \
iov, \
&iov_count, \
&max_data, \
&free_after); \
bytes_delivered = max_data; \
csum = request->req_recv.req_convertor.checksum; \
} else { \
bytes_delivered = 0; \
csum = OMPI_CSUM_ZERO; \
} \
} while (0)
/**
*
*/
void mca_pml_dr_recv_request_progress(
mca_pml_dr_recv_request_t* req,
struct mca_btl_base_module_t* btl,
mca_btl_base_segment_t* segments,
size_t num_segments);
/**
*
*/
void mca_pml_dr_recv_request_matched_probe(
mca_pml_dr_recv_request_t* req,
struct mca_btl_base_module_t* btl,
mca_btl_base_segment_t* segments,
size_t num_segments);
/**
*
*/
void mca_pml_dr_recv_request_schedule(
mca_pml_dr_recv_request_t* req);
/*
*
*/
#define MCA_PML_DR_RECV_REQUEST_VFRAG_LOOKUP(recvreq,hdr,vfrag) \
do { \
if((recvreq)->req_vfrag->vf_id == (hdr)->hdr_vid) { \
vfrag = (recvreq)->req_vfrag; \
} else if ((hdr)->hdr_frag_offset == 0) { \
vfrag = &(recvreq)->req_vfrag0; \
} else { \
opal_list_item_t* item; \
int rc; \
\
vfrag = NULL; \
OPAL_THREAD_LOCK(&(recvreq)->req_mutex); \
for(item = opal_list_get_first(&(recvreq)->req_vfrags); \
item != opal_list_get_end(&(recvreq)->req_vfrags); \
item = opal_list_get_next(item)) { \
mca_pml_dr_vfrag_t* vf = (mca_pml_dr_vfrag_t*)item; \
if(vf->vf_id == (hdr)->hdr_vid) { \
vfrag = vf; \
break; \
} \
} \
if(NULL == vfrag) { \
MCA_PML_DR_VFRAG_ALLOC(vfrag,rc); \
if(NULL != vfrag) { \
(vfrag)->vf_id = (hdr)->hdr_vid; \
(vfrag)->vf_len = (hdr)->hdr_vlen; \
(vfrag)->vf_ack = 0; \
(vfrag)->vf_mask_processed = 0; \
if((hdr)->hdr_vlen == 64) { \
(vfrag)->vf_mask = ~(uint64_t)0; \
} else { \
(vfrag)->vf_mask = (((uint64_t)1 << (hdr)->hdr_vlen)-1); \
} \
opal_list_append(&(recvreq)->req_vfrags, (opal_list_item_t*)vfrag); \
(recvreq)->req_vfrag = vfrag; \
} \
} \
OPAL_THREAD_UNLOCK(&(recvreq)->req_mutex); \
} \
} while(0)
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
#endif