remove some function calls in critical path
This commit was SVN r3516.
Этот коммит содержится в:
родитель
83281249e5
Коммит
705065888f
@ -36,6 +36,7 @@ struct mca_pml_base_send_request_t {
|
||||
size_t req_peer_size; /**< size of peers remote buffer */
|
||||
bool req_cached; /**< has this request been obtained from the ptls cache */
|
||||
ompi_convertor_t req_convertor; /**< convertor that describes this datatype */
|
||||
volatile int32_t req_lock; /**< lock used by the scheduler */
|
||||
};
|
||||
typedef struct mca_pml_base_send_request_t mca_pml_base_send_request_t;
|
||||
|
||||
@ -134,7 +135,10 @@ static inline void mca_pml_base_send_request_offset(
|
||||
mca_pml_base_send_request_t* request,
|
||||
size_t offset)
|
||||
{
|
||||
ompi_atomic_add( &(request->req_offset), offset );
|
||||
if(ompi_using_threads())
|
||||
ompi_atomic_add( &(request->req_offset), offset );
|
||||
else
|
||||
request->req_offset += offset;
|
||||
}
|
||||
|
||||
#if defined(c_plusplus) || defined(__cplusplus)
|
||||
|
@ -241,6 +241,7 @@ OMPI_COMP_EXPORT extern int mca_pml_teg_start(
|
||||
case MCA_PML_REQUEST_SEND: \
|
||||
{ \
|
||||
mca_pml_base_send_request_t* sendreq = (mca_pml_base_send_request_t*)pml_request; \
|
||||
while(sendreq->req_lock > 0); \
|
||||
if(sendreq->req_send_mode == MCA_PML_BASE_SEND_BUFFERED) { \
|
||||
mca_pml_base_bsend_request_fini((ompi_request_t*)sendreq); \
|
||||
} \
|
||||
|
@ -29,10 +29,10 @@ bool mca_pml_teg_recv_frag_match(
|
||||
mca_ptl_base_match_header_t* header)
|
||||
{
|
||||
bool matched;
|
||||
bool matches = false;
|
||||
ompi_list_t matched_frags;
|
||||
OBJ_CONSTRUCT(&matched_frags, ompi_list_t);
|
||||
if((matched = mca_ptl_base_match(header, frag, &matched_frags)) == false) {
|
||||
frag = (mca_ptl_base_recv_frag_t*)ompi_list_remove_first(&matched_frags);
|
||||
if((matched = mca_ptl_base_match(header, frag, &matched_frags, &matches)) == false) {
|
||||
frag = (matches ? (mca_ptl_base_recv_frag_t*)ompi_list_remove_first(&matched_frags) : NULL);
|
||||
}
|
||||
|
||||
while(NULL != frag) {
|
||||
@ -69,9 +69,10 @@ bool mca_pml_teg_recv_frag_match(
|
||||
/* notify ptl of match */
|
||||
ptl->ptl_matched(ptl, frag);
|
||||
|
||||
/* process any additional fragments that arrived out of order */
|
||||
frag = (mca_ptl_base_recv_frag_t*)ompi_list_remove_first(&matched_frags);
|
||||
};
|
||||
|
||||
/* process any additional fragments that arrived out of order */
|
||||
frag = (matches ? (mca_ptl_base_recv_frag_t*)ompi_list_remove_first(&matched_frags) : NULL);
|
||||
};
|
||||
return matched;
|
||||
}
|
||||
|
@ -14,6 +14,9 @@
|
||||
#include "pml_teg_recvreq.h"
|
||||
|
||||
|
||||
#define OMPI_THREAD_ADD(x,y) \
|
||||
(ompi_using_threads() ? ompi_atomic_add_32(x,y) : (*x += y))
|
||||
|
||||
|
||||
static int mca_pml_teg_send_request_fini(struct ompi_request_t** request)
|
||||
{
|
||||
@ -63,55 +66,66 @@ OBJ_CLASS_INSTANCE(
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
int mca_pml_teg_send_request_schedule(mca_pml_base_send_request_t* req)
|
||||
{
|
||||
ompi_proc_t *proc = ompi_comm_peer_lookup(req->req_base.req_comm, req->req_base.req_peer);
|
||||
mca_pml_proc_t* proc_pml = proc->proc_pml;
|
||||
int send_count = 0;
|
||||
size_t bytes_remaining;
|
||||
size_t num_ptl_avail;
|
||||
size_t num_ptl;
|
||||
|
||||
/* allocate remaining bytes to PTLs */
|
||||
size_t bytes_remaining = req->req_bytes_packed - req->req_offset;
|
||||
size_t num_ptl_avail = proc_pml->proc_ptl_next.ptl_size;
|
||||
size_t num_ptl = 0;
|
||||
while(bytes_remaining > 0 && num_ptl++ < num_ptl_avail) {
|
||||
mca_ptl_proc_t* ptl_proc = mca_ptl_array_get_next(&proc_pml->proc_ptl_next);
|
||||
mca_ptl_base_module_t* ptl = ptl_proc->ptl;
|
||||
int rc;
|
||||
|
||||
/* if this is the last PTL that is available to use, or the number of
|
||||
* bytes remaining in the message is less than the PTLs minimum fragment
|
||||
* size, then go ahead and give the rest of the message to this PTL.
|
||||
*/
|
||||
size_t bytes_to_frag;
|
||||
if(num_ptl == num_ptl_avail || bytes_remaining < ptl->ptl_min_frag_size) {
|
||||
bytes_to_frag = bytes_remaining;
|
||||
|
||||
/* otherwise attempt to give the PTL a percentage of the message
|
||||
* based on a weighting factor. for simplicity calculate this as
|
||||
* a percentage of the overall message length (regardless of amount
|
||||
* previously assigned)
|
||||
*/
|
||||
} else {
|
||||
bytes_to_frag = (ptl_proc->ptl_weight * bytes_remaining) / 100;
|
||||
}
|
||||
|
||||
/* makes sure that we don't exceed ptl_max_frag_size */
|
||||
if(ptl->ptl_max_frag_size != 0 && bytes_to_frag > ptl->ptl_max_frag_size)
|
||||
bytes_to_frag = ptl->ptl_max_frag_size;
|
||||
|
||||
rc = ptl->ptl_put(ptl, ptl_proc->ptl_peer, req, req->req_offset, bytes_to_frag, 0);
|
||||
if(rc == OMPI_SUCCESS) {
|
||||
send_count++;
|
||||
if(OMPI_THREAD_ADD(&req->req_lock,1) == 1) {
|
||||
do {
|
||||
/* allocate remaining bytes to PTLs */
|
||||
bytes_remaining = req->req_bytes_packed - req->req_offset;
|
||||
}
|
||||
}
|
||||
num_ptl_avail = proc_pml->proc_ptl_next.ptl_size;
|
||||
num_ptl = 0;
|
||||
while(bytes_remaining > 0 && num_ptl++ < num_ptl_avail) {
|
||||
mca_ptl_proc_t* ptl_proc = mca_ptl_array_get_next(&proc_pml->proc_ptl_next);
|
||||
mca_ptl_base_module_t* ptl = ptl_proc->ptl;
|
||||
int rc;
|
||||
|
||||
/* unable to complete send - queue for later */
|
||||
if(send_count == 0) {
|
||||
OMPI_THREAD_LOCK(&mca_pml_teg.teg_lock);
|
||||
ompi_list_append(&mca_pml_teg.teg_send_pending, (ompi_list_item_t*)req);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_teg.teg_lock);
|
||||
return OMPI_ERR_OUT_OF_RESOURCE;
|
||||
/* if this is the last PTL that is available to use, or the number of
|
||||
* bytes remaining in the message is less than the PTLs minimum fragment
|
||||
* size, then go ahead and give the rest of the message to this PTL.
|
||||
*/
|
||||
size_t bytes_to_frag;
|
||||
if(num_ptl == num_ptl_avail || bytes_remaining < ptl->ptl_min_frag_size) {
|
||||
bytes_to_frag = bytes_remaining;
|
||||
|
||||
/* otherwise attempt to give the PTL a percentage of the message
|
||||
* based on a weighting factor. for simplicity calculate this as
|
||||
* a percentage of the overall message length (regardless of amount
|
||||
* previously assigned)
|
||||
*/
|
||||
} else {
|
||||
bytes_to_frag = (ptl_proc->ptl_weight * bytes_remaining) / 100;
|
||||
}
|
||||
|
||||
/* makes sure that we don't exceed ptl_max_frag_size */
|
||||
if(ptl->ptl_max_frag_size != 0 && bytes_to_frag > ptl->ptl_max_frag_size)
|
||||
bytes_to_frag = ptl->ptl_max_frag_size;
|
||||
|
||||
rc = ptl->ptl_put(ptl, ptl_proc->ptl_peer, req, req->req_offset, bytes_to_frag, 0);
|
||||
if(rc == OMPI_SUCCESS) {
|
||||
send_count++;
|
||||
bytes_remaining = req->req_bytes_packed - req->req_offset;
|
||||
}
|
||||
}
|
||||
|
||||
/* unable to complete send - queue for later */
|
||||
if(send_count == 0) {
|
||||
OMPI_THREAD_LOCK(&mca_pml_teg.teg_lock);
|
||||
ompi_list_append(&mca_pml_teg.teg_send_pending, (ompi_list_item_t*)req);
|
||||
OMPI_THREAD_UNLOCK(&mca_pml_teg.teg_lock);
|
||||
req->req_lock = 0;
|
||||
return OMPI_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
|
||||
/* fragments completed while scheduling - so retry */
|
||||
} while(OMPI_THREAD_ADD(&req->req_lock,-1) > 0);
|
||||
}
|
||||
return OMPI_SUCCESS;
|
||||
}
|
||||
@ -149,9 +163,8 @@ void mca_pml_teg_send_request_progress(
|
||||
} else if (req->req_base.req_free_called) {
|
||||
MCA_PML_TEG_FREE((ompi_request_t**)&req);
|
||||
}
|
||||
}
|
||||
/* test to see if we have scheduled the entire request */
|
||||
if (req->req_offset < req->req_bytes_packed)
|
||||
} else if (req->req_offset < req->req_bytes_packed)
|
||||
schedule = true;
|
||||
OMPI_THREAD_UNLOCK(&ompi_request_lock);
|
||||
|
||||
|
@ -121,6 +121,7 @@ OBJ_CLASS_DECLARATION(mca_pml_teg_send_request_t);
|
||||
* no additional cost \
|
||||
*/ \
|
||||
req->req_offset = 0; \
|
||||
req->req_lock = 0; \
|
||||
req->req_bytes_sent = 0; \
|
||||
req->req_peer_match.lval = 0; \
|
||||
req->req_peer_addr.lval = 0; \
|
||||
|
@ -19,27 +19,260 @@
|
||||
#include "mca/ptl/base/ptl_base_match.h"
|
||||
|
||||
|
||||
/**
|
||||
* Try and match the incoming message fragment to the list of
|
||||
* "wild" receives
|
||||
*
|
||||
* @param frag_header Matching data from recived fragment (IN)
|
||||
*
|
||||
* @param pml_comm Pointer to the communicator structure used for
|
||||
* matching purposes. (IN)
|
||||
*
|
||||
* @return Matched receive
|
||||
*
|
||||
* This routine assumes that the appropriate matching locks are
|
||||
* set by the upper level routine.
|
||||
*/
|
||||
|
||||
#define MCA_PTL_BASE_CHECK_WILD_RECEIVES_FOR_MATCH(frag_header,pml_comm,return_match) \
|
||||
do { \
|
||||
/* local parameters */ \
|
||||
mca_pml_base_recv_request_t *wild_recv; \
|
||||
int frag_tag,recv_tag; \
|
||||
\
|
||||
/* initialization */ \
|
||||
frag_tag=frag_header->hdr_tag; \
|
||||
\
|
||||
/* \
|
||||
* Loop over the wild irecvs - no need to lock, the upper level \
|
||||
* locking is protecting from having other threads trying to \
|
||||
* change this list. \
|
||||
*/ \
|
||||
for(wild_recv = (mca_pml_base_recv_request_t *) \
|
||||
ompi_list_get_first(&(pml_comm->c_wild_receives)); \
|
||||
wild_recv != (mca_pml_base_recv_request_t *) \
|
||||
ompi_list_get_end(&(pml_comm->c_wild_receives)); \
|
||||
wild_recv = (mca_pml_base_recv_request_t *) \
|
||||
((ompi_list_item_t *)wild_recv)->ompi_list_next) { \
|
||||
\
|
||||
recv_tag = wild_recv->req_base.req_tag; \
|
||||
if ( \
|
||||
/* exact tag match */ \
|
||||
(frag_tag == recv_tag) || \
|
||||
/* wild tag match - negative tags (except for \
|
||||
* OMPI_ANY_TAG) are reserved for internal use, and will \
|
||||
* not be matched with OMPI_ANY_TAG */ \
|
||||
( (recv_tag == OMPI_ANY_TAG) && (0 <= frag_tag) ) ) \
|
||||
\
|
||||
{ \
|
||||
/* \
|
||||
* Mark that this is the matching irecv, and go to process it. \
|
||||
*/ \
|
||||
return_match = wild_recv; \
|
||||
\
|
||||
/* remove this irecv from the postd wild ireceive list */ \
|
||||
ompi_list_remove_item(&(pml_comm->c_wild_receives), \
|
||||
(ompi_list_item_t *)wild_recv); \
|
||||
\
|
||||
/* found match - no need to continue */ \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
|
||||
/**
|
||||
* Try and match the incoming message fragment to the list of
|
||||
* "specific" receives
|
||||
*
|
||||
* @param frag_header Matching data from recived fragment (IN)
|
||||
*
|
||||
* @param pml_comm Pointer to the communicator structure used for
|
||||
* matching purposes. (IN)
|
||||
*
|
||||
* @return Matched receive
|
||||
*
|
||||
* This routine assumes that the appropriate matching locks are
|
||||
* set by the upper level routine.
|
||||
*/
|
||||
#define MCA_PTL_BASE_CHECK_SPECIFIC_RECEIVES_FOR_MATCH(frag_header, pml_comm, return_match) \
|
||||
do { \
|
||||
/* local variables */ \
|
||||
mca_pml_base_recv_request_t *specific_recv; \
|
||||
int frag_src,recv_tag,frag_tag; \
|
||||
\
|
||||
/* initialization */ \
|
||||
frag_src = frag_header->hdr_src; \
|
||||
frag_tag=frag_header->hdr_tag; \
|
||||
\
|
||||
/* \
|
||||
* Loop over the specific irecvs. \
|
||||
*/ \
|
||||
for(specific_recv = (mca_pml_base_recv_request_t *) \
|
||||
ompi_list_get_first((pml_comm->c_specific_receives)+frag_src); \
|
||||
specific_recv != (mca_pml_base_recv_request_t *) \
|
||||
ompi_list_get_end((pml_comm->c_specific_receives)+frag_src); \
|
||||
specific_recv = (mca_pml_base_recv_request_t *) \
|
||||
((ompi_list_item_t *)specific_recv)->ompi_list_next) { \
|
||||
/* \
|
||||
* Check for a match \
|
||||
*/ \
|
||||
recv_tag = specific_recv->req_base.req_tag; \
|
||||
if ( (frag_tag == recv_tag) || \
|
||||
( (recv_tag == OMPI_ANY_TAG) && (0 <= frag_tag) ) ) { \
|
||||
\
|
||||
/* \
|
||||
* Match made \
|
||||
*/ \
|
||||
return_match = specific_recv; \
|
||||
\
|
||||
/* remove descriptor from posted specific ireceive list */ \
|
||||
ompi_list_remove_item((pml_comm->c_specific_receives)+frag_src, \
|
||||
(ompi_list_item_t *)specific_recv); \
|
||||
\
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
/**
|
||||
* Try and match the incoming message fragment to the list of
|
||||
* "wild" receives and "specific" receives. Used when both types
|
||||
* of receives have been posted, i.e. when we need to coordinate
|
||||
* between multiple lists to make sure ordered delivery occurs.
|
||||
*
|
||||
* @param frag_header Matching data from recived fragment (IN)
|
||||
*
|
||||
* @param pml_comm Pointer to the communicator structure used for
|
||||
* matching purposes. (IN)
|
||||
*
|
||||
* @return Matched receive
|
||||
*
|
||||
* This routine assumes that the appropriate matching locks are
|
||||
* set by the upper level routine.
|
||||
*/
|
||||
|
||||
mca_pml_base_recv_request_t*
|
||||
mca_ptl_base_check_specific_and_wild_receives_for_match(
|
||||
mca_ptl_base_match_header_t* frag_header,
|
||||
mca_pml_ptl_comm_t *pml_comm)
|
||||
{
|
||||
/* local variables */
|
||||
mca_pml_base_recv_request_t *specific_recv, *wild_recv, *return_match = NULL;
|
||||
mca_ptl_sequence_t wild_recv_seq, specific_recv_seq;
|
||||
int frag_src,frag_tag, wild_recv_tag, specific_recv_tag;
|
||||
|
||||
/* initialization */
|
||||
frag_src = frag_header->hdr_src;
|
||||
frag_tag=frag_header->hdr_tag;
|
||||
|
||||
/*
|
||||
* We know that when this is called, both specific and wild irecvs
|
||||
* have been posted.
|
||||
*/
|
||||
specific_recv = (mca_pml_base_recv_request_t *)
|
||||
ompi_list_get_first((pml_comm->c_specific_receives)+frag_src);
|
||||
wild_recv = (mca_pml_base_recv_request_t *)
|
||||
ompi_list_get_first(&(pml_comm->c_wild_receives));
|
||||
|
||||
specific_recv_seq = specific_recv->req_base.req_sequence;
|
||||
wild_recv_seq = wild_recv->req_base.req_sequence;
|
||||
|
||||
while (true) {
|
||||
if (wild_recv_seq < specific_recv_seq) {
|
||||
/*
|
||||
* wild recv is earlier than the specific one.
|
||||
*/
|
||||
/*
|
||||
* try and match
|
||||
*/
|
||||
wild_recv_tag = wild_recv->req_base.req_tag;
|
||||
if ( (frag_tag == wild_recv_tag) ||
|
||||
( (wild_recv_tag == OMPI_ANY_TAG) && (0 <= frag_tag) ) ) {
|
||||
/*
|
||||
* Match made
|
||||
*/
|
||||
return_match=wild_recv;
|
||||
|
||||
/* remove this recv from the wild receive queue */
|
||||
ompi_list_remove_item(&(pml_comm->c_wild_receives),
|
||||
(ompi_list_item_t *)wild_recv);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* No match, go to the next.
|
||||
*/
|
||||
wild_recv=(mca_pml_base_recv_request_t *)
|
||||
((ompi_list_item_t *)wild_recv)->ompi_list_next;
|
||||
|
||||
/*
|
||||
* If that was the last wild one, just look at the
|
||||
* rest of the specific ones.
|
||||
*/
|
||||
if (wild_recv == (mca_pml_base_recv_request_t *)
|
||||
ompi_list_get_end(&(pml_comm->c_wild_receives)) )
|
||||
{
|
||||
MCA_PTL_BASE_CHECK_SPECIFIC_RECEIVES_FOR_MATCH(frag_header, pml_comm, return_match);
|
||||
return return_match;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the sequence number for this recv, and go
|
||||
* back to the top of the loop.
|
||||
*/
|
||||
wild_recv_seq = wild_recv->req_base.req_sequence;
|
||||
|
||||
} else {
|
||||
/*
|
||||
* specific recv is earlier than the wild one.
|
||||
*/
|
||||
specific_recv_tag=specific_recv->req_base.req_tag;
|
||||
if ( (frag_tag == specific_recv_tag) ||
|
||||
( (specific_recv_tag == OMPI_ANY_TAG) && (0<=frag_tag)) )
|
||||
{
|
||||
/*
|
||||
* Match made
|
||||
*/
|
||||
return_match = specific_recv;
|
||||
/* remove descriptor from specific receive list */
|
||||
ompi_list_remove_item((pml_comm->c_specific_receives)+frag_src,
|
||||
(ompi_list_item_t *)specific_recv);
|
||||
return return_match;
|
||||
}
|
||||
|
||||
/*
|
||||
* No match, go on to the next specific irecv.
|
||||
*/
|
||||
specific_recv = (mca_pml_base_recv_request_t *)
|
||||
((ompi_list_item_t *)specific_recv)->ompi_list_next;
|
||||
|
||||
/*
|
||||
* If that was the last specific irecv, process the
|
||||
* rest of the wild ones.
|
||||
*/
|
||||
if (specific_recv == (mca_pml_base_recv_request_t *) \
|
||||
ompi_list_get_end((pml_comm->c_specific_receives)+frag_src) ) \
|
||||
{
|
||||
MCA_PTL_BASE_CHECK_WILD_RECEIVES_FOR_MATCH(frag_header, pml_comm, return_match);
|
||||
return return_match;
|
||||
}
|
||||
/*
|
||||
* Get the sequence number for this recv, and go
|
||||
* back to the top of the loop.
|
||||
*/
|
||||
specific_recv_seq = specific_recv->req_base.req_sequence;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Specialized matching routines for internal use only.
|
||||
*/
|
||||
|
||||
static mca_pml_base_recv_request_t *mca_ptl_base_check_receives_for_match(
|
||||
mca_ptl_base_match_header_t *frag_header,
|
||||
mca_pml_ptl_comm_t *ptl_comm);
|
||||
|
||||
static mca_pml_base_recv_request_t *mca_ptl_base_check_wild_receives_for_match(
|
||||
mca_ptl_base_match_header_t *frag_header,
|
||||
mca_pml_ptl_comm_t *ptl_comm);
|
||||
|
||||
static mca_pml_base_recv_request_t *mca_ptl_base_check_specific_receives_for_match(
|
||||
mca_ptl_base_match_header_t *frag_header,
|
||||
mca_pml_ptl_comm_t *ptl_comm);
|
||||
|
||||
static mca_pml_base_recv_request_t *mca_ptl_base_check_specific_and_wild_receives_for_match(
|
||||
mca_ptl_base_match_header_t *frag_header,
|
||||
mca_pml_ptl_comm_t *ptl_comm);
|
||||
|
||||
static void mca_ptl_base_check_cantmatch_for_match(
|
||||
static bool mca_ptl_base_check_cantmatch_for_match(
|
||||
ompi_list_t *additional_matches,
|
||||
mca_pml_ptl_comm_t *pml_comm, int frag_src);
|
||||
|
||||
@ -77,12 +310,13 @@ static void mca_ptl_base_check_cantmatch_for_match(
|
||||
bool mca_ptl_base_match(
|
||||
mca_ptl_base_match_header_t *frag_header,
|
||||
mca_ptl_base_recv_frag_t *frag_desc,
|
||||
ompi_list_t *additional_matches)
|
||||
ompi_list_t *additional_matches,
|
||||
bool* additional_match)
|
||||
{
|
||||
/* local variables */
|
||||
mca_ptl_sequence_t frag_msg_seq,next_msg_seq_expected;
|
||||
ompi_communicator_t *comm_ptr;
|
||||
mca_pml_base_recv_request_t *matched_receive;
|
||||
mca_pml_base_recv_request_t *matched_receive = NULL;
|
||||
mca_pml_ptl_comm_t *pml_comm;
|
||||
int frag_src;
|
||||
bool match_made=false;
|
||||
@ -120,9 +354,28 @@ bool mca_ptl_base_match(
|
||||
/* We're now expecting the next sequence number. */
|
||||
(pml_comm->c_next_msg_seq[frag_src])++;
|
||||
|
||||
/* see if receive has already been posted */
|
||||
matched_receive = mca_ptl_base_check_receives_for_match(frag_header,
|
||||
pml_comm);
|
||||
/*
|
||||
* figure out what sort of matching logic to use, if need to
|
||||
* look only at "specific" receives, or "wild" receives,
|
||||
* or if we need to traverse both sets at the same time.
|
||||
*/
|
||||
if (ompi_list_get_size((pml_comm->c_specific_receives)+frag_src) == 0 ){
|
||||
/*
|
||||
* There are only wild irecvs, so specialize the algorithm.
|
||||
*/
|
||||
MCA_PTL_BASE_CHECK_WILD_RECEIVES_FOR_MATCH(frag_header, pml_comm, matched_receive);
|
||||
|
||||
} else if (ompi_list_get_size(&(pml_comm->c_wild_receives)) == 0 ) {
|
||||
/*
|
||||
* There are only specific irecvs, so specialize the algorithm.
|
||||
*/
|
||||
MCA_PTL_BASE_CHECK_SPECIFIC_RECEIVES_FOR_MATCH(frag_header, pml_comm, matched_receive);
|
||||
} else {
|
||||
/*
|
||||
* There are some of each.
|
||||
*/
|
||||
matched_receive = mca_ptl_base_check_specific_and_wild_receives_for_match(frag_header, pml_comm);
|
||||
}
|
||||
|
||||
/* if match found, process data */
|
||||
if (matched_receive) {
|
||||
@ -155,7 +408,7 @@ bool mca_ptl_base_match(
|
||||
*/
|
||||
if (0 < ompi_list_get_size((pml_comm->c_frags_cant_match)+frag_src)) {
|
||||
|
||||
mca_ptl_base_check_cantmatch_for_match(additional_matches,pml_comm,frag_src);
|
||||
*additional_match = mca_ptl_base_check_cantmatch_for_match(additional_matches,pml_comm,frag_src);
|
||||
|
||||
}
|
||||
|
||||
@ -193,310 +446,36 @@ static mca_pml_base_recv_request_t *mca_ptl_base_check_receives_for_match
|
||||
(mca_ptl_base_match_header_t *frag_header, mca_pml_ptl_comm_t *pml_comm)
|
||||
{
|
||||
/* local parameters */
|
||||
mca_pml_base_recv_request_t *return_match;
|
||||
mca_pml_base_recv_request_t *return_match = NULL;
|
||||
int frag_src;
|
||||
|
||||
/* initialization */
|
||||
return_match=(mca_pml_base_recv_request_t *)NULL;
|
||||
|
||||
/*
|
||||
* figure out what sort of matching logic to use, if need to
|
||||
* look only at "specific" receives, or "wild" receives,
|
||||
* or if we need to traverse both sets at the same time.
|
||||
*/
|
||||
frag_src = frag_header->hdr_src;
|
||||
|
||||
if (ompi_list_get_size((pml_comm->c_specific_receives)+frag_src) == 0 ){
|
||||
/*
|
||||
* There are only wild irecvs, so specialize the algorithm.
|
||||
*/
|
||||
return_match = mca_ptl_base_check_wild_receives_for_match(frag_header, pml_comm);
|
||||
MCA_PTL_BASE_CHECK_WILD_RECEIVES_FOR_MATCH(frag_header, pml_comm, return_match);
|
||||
|
||||
} else if (ompi_list_get_size(&(pml_comm->c_wild_receives)) == 0 ) {
|
||||
/*
|
||||
* There are only specific irecvs, so specialize the algorithm.
|
||||
*/
|
||||
return_match = mca_ptl_base_check_specific_receives_for_match(frag_header,
|
||||
pml_comm);
|
||||
MCA_PTL_BASE_CHECK_SPECIFIC_RECEIVES_FOR_MATCH(frag_header, pml_comm, return_match);
|
||||
} else {
|
||||
/*
|
||||
* There are some of each.
|
||||
*/
|
||||
return_match = mca_ptl_base_check_specific_and_wild_receives_for_match(frag_header,
|
||||
pml_comm);
|
||||
MCA_PTL_BASE_CHECK_SPECIFIC_AND_WILD_RECEIVES_FOR_MATCH(frag_header, pml_comm, return_match);
|
||||
}
|
||||
|
||||
return return_match;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Try and match the incoming message fragment to the list of
|
||||
* "wild" receives
|
||||
*
|
||||
* @param frag_header Matching data from recived fragment (IN)
|
||||
*
|
||||
* @param pml_comm Pointer to the communicator structure used for
|
||||
* matching purposes. (IN)
|
||||
*
|
||||
* @return Matched receive
|
||||
*
|
||||
* This routine assumes that the appropriate matching locks are
|
||||
* set by the upper level routine.
|
||||
*/
|
||||
static mca_pml_base_recv_request_t *mca_ptl_base_check_wild_receives_for_match(
|
||||
mca_ptl_base_match_header_t *frag_header,
|
||||
mca_pml_ptl_comm_t *pml_comm)
|
||||
{
|
||||
/* local parameters */
|
||||
mca_pml_base_recv_request_t *return_match, *wild_recv;
|
||||
int frag_tag,recv_tag;
|
||||
|
||||
/* initialization */
|
||||
return_match=(mca_pml_base_recv_request_t *)NULL;
|
||||
frag_tag=frag_header->hdr_tag;
|
||||
|
||||
/*
|
||||
* Loop over the wild irecvs - no need to lock, the upper level
|
||||
* locking is protecting from having other threads trying to
|
||||
* change this list.
|
||||
*/
|
||||
for(wild_recv = (mca_pml_base_recv_request_t *)
|
||||
ompi_list_get_first(&(pml_comm->c_wild_receives));
|
||||
wild_recv != (mca_pml_base_recv_request_t *)
|
||||
ompi_list_get_end(&(pml_comm->c_wild_receives));
|
||||
wild_recv = (mca_pml_base_recv_request_t *)
|
||||
((ompi_list_item_t *)wild_recv)->ompi_list_next) {
|
||||
|
||||
recv_tag = wild_recv->req_base.req_tag;
|
||||
if (
|
||||
/* exact tag match */
|
||||
(frag_tag == recv_tag) ||
|
||||
/* wild tag match - negative tags (except for
|
||||
* OMPI_ANY_TAG) are reserved for internal use, and will
|
||||
* not be matched with OMPI_ANY_TAG */
|
||||
( (recv_tag == OMPI_ANY_TAG) && (0 <= frag_tag) ) )
|
||||
|
||||
{
|
||||
/*
|
||||
* Mark that this is the matching irecv, and go to process it.
|
||||
*/
|
||||
return_match = wild_recv;
|
||||
|
||||
/* remove this irecv from the postd wild ireceive list */
|
||||
ompi_list_remove_item(&(pml_comm->c_wild_receives),
|
||||
(ompi_list_item_t *)wild_recv);
|
||||
|
||||
/* found match - no need to continue */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return return_match;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Try and match the incoming message fragment to the list of
|
||||
* "specific" receives
|
||||
*
|
||||
* @param frag_header Matching data from recived fragment (IN)
|
||||
*
|
||||
* @param pml_comm Pointer to the communicator structure used for
|
||||
* matching purposes. (IN)
|
||||
*
|
||||
* @return Matched receive
|
||||
*
|
||||
* This routine assumes that the appropriate matching locks are
|
||||
* set by the upper level routine.
|
||||
*/
|
||||
static mca_pml_base_recv_request_t *mca_ptl_base_check_specific_receives_for_match(
|
||||
mca_ptl_base_match_header_t *frag_header,
|
||||
mca_pml_ptl_comm_t *pml_comm)
|
||||
{
|
||||
/* local variables */
|
||||
mca_pml_base_recv_request_t *specific_recv, *return_match;
|
||||
int frag_src,recv_tag,frag_tag;
|
||||
|
||||
|
||||
/* initialization */
|
||||
return_match=(mca_pml_base_recv_request_t *)NULL;
|
||||
frag_src = frag_header->hdr_src;
|
||||
frag_tag=frag_header->hdr_tag;
|
||||
|
||||
/*
|
||||
* Loop over the specific irecvs.
|
||||
*/
|
||||
for(specific_recv = (mca_pml_base_recv_request_t *)
|
||||
ompi_list_get_first((pml_comm->c_specific_receives)+frag_src);
|
||||
specific_recv != (mca_pml_base_recv_request_t *)
|
||||
ompi_list_get_end((pml_comm->c_specific_receives)+frag_src);
|
||||
specific_recv = (mca_pml_base_recv_request_t *)
|
||||
((ompi_list_item_t *)specific_recv)->ompi_list_next) {
|
||||
/*
|
||||
* Check for a match
|
||||
*/
|
||||
recv_tag = specific_recv->req_base.req_tag;
|
||||
if ( (frag_tag == recv_tag) ||
|
||||
( (recv_tag == OMPI_ANY_TAG) && (0 <= frag_tag) ) ) {
|
||||
|
||||
/*
|
||||
* Match made
|
||||
*/
|
||||
return_match = specific_recv;
|
||||
|
||||
/* remove descriptor from posted specific ireceive list */
|
||||
ompi_list_remove_item((pml_comm->c_specific_receives)+frag_src,
|
||||
(ompi_list_item_t *)specific_recv);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return return_match;
|
||||
}
|
||||
|
||||
/**
|
||||
* Try and match the incoming message fragment to the list of
|
||||
* "wild" receives and "specific" receives. Used when both types
|
||||
* of receives have been posted, i.e. when we need to coordinate
|
||||
* between multiple lists to make sure ordered delivery occurs.
|
||||
*
|
||||
* @param frag_header Matching data from recived fragment (IN)
|
||||
*
|
||||
* @param pml_comm Pointer to the communicator structure used for
|
||||
* matching purposes. (IN)
|
||||
*
|
||||
* @return Matched receive
|
||||
*
|
||||
* This routine assumes that the appropriate matching locks are
|
||||
* set by the upper level routine.
|
||||
*/
|
||||
static mca_pml_base_recv_request_t *mca_ptl_base_check_specific_and_wild_receives_for_match(
|
||||
mca_ptl_base_match_header_t *frag_header,
|
||||
mca_pml_ptl_comm_t *pml_comm)
|
||||
{
|
||||
/* local variables */
|
||||
mca_pml_base_recv_request_t *specific_recv, *wild_recv, *return_match;
|
||||
mca_ptl_sequence_t wild_recv_seq, specific_recv_seq;
|
||||
int frag_src,frag_tag, wild_recv_tag, specific_recv_tag;
|
||||
|
||||
/* initialization */
|
||||
return_match=(mca_pml_base_recv_request_t *)NULL;
|
||||
frag_src = frag_header->hdr_src;
|
||||
frag_tag=frag_header->hdr_tag;
|
||||
|
||||
/*
|
||||
* We know that when this is called, both specific and wild irecvs
|
||||
* have been posted.
|
||||
*/
|
||||
specific_recv = (mca_pml_base_recv_request_t *)
|
||||
ompi_list_get_first((pml_comm->c_specific_receives)+frag_src);
|
||||
wild_recv = (mca_pml_base_recv_request_t *)
|
||||
ompi_list_get_first(&(pml_comm->c_wild_receives));
|
||||
|
||||
specific_recv_seq = specific_recv->req_base.req_sequence;
|
||||
wild_recv_seq = wild_recv->req_base.req_sequence;
|
||||
|
||||
while (true) {
|
||||
if (wild_recv_seq < specific_recv_seq) {
|
||||
/*
|
||||
* wild recv is earlier than the specific one.
|
||||
*/
|
||||
/*
|
||||
* try and match
|
||||
*/
|
||||
wild_recv_tag = wild_recv->req_base.req_tag;
|
||||
if ( (frag_tag == wild_recv_tag) ||
|
||||
( (wild_recv_tag == OMPI_ANY_TAG) && (0 <= frag_tag) ) ) {
|
||||
|
||||
/*
|
||||
* Match made
|
||||
*/
|
||||
return_match=wild_recv;
|
||||
|
||||
/* remove this recv from the wild receive queue */
|
||||
ompi_list_remove_item(&(pml_comm->c_wild_receives),
|
||||
(ompi_list_item_t *)wild_recv);
|
||||
|
||||
return return_match;
|
||||
}
|
||||
|
||||
/*
|
||||
* No match, go to the next.
|
||||
*/
|
||||
wild_recv=(mca_pml_base_recv_request_t *)
|
||||
((ompi_list_item_t *)wild_recv)->ompi_list_next;
|
||||
|
||||
/*
|
||||
* If that was the last wild one, just look at the
|
||||
* rest of the specific ones.
|
||||
*/
|
||||
if (wild_recv == (mca_pml_base_recv_request_t *)
|
||||
ompi_list_get_end(&(pml_comm->c_wild_receives)) )
|
||||
{
|
||||
return_match = mca_ptl_base_check_specific_receives_for_match(frag_header,
|
||||
pml_comm);
|
||||
|
||||
return return_match;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the sequence number for this recv, and go
|
||||
* back to the top of the loop.
|
||||
*/
|
||||
wild_recv_seq = wild_recv->req_base.req_sequence;
|
||||
|
||||
} else {
|
||||
/*
|
||||
* specific recv is earlier than the wild one.
|
||||
*/
|
||||
specific_recv_tag=specific_recv->req_base.req_tag;
|
||||
if ( (frag_tag == specific_recv_tag) ||
|
||||
( (specific_recv_tag == OMPI_ANY_TAG) && (0<=frag_tag)) )
|
||||
{
|
||||
|
||||
/*
|
||||
* Match made
|
||||
*/
|
||||
return_match = specific_recv;
|
||||
|
||||
/* remove descriptor from specific receive list */
|
||||
ompi_list_remove_item((pml_comm->c_specific_receives)+frag_src,
|
||||
(ompi_list_item_t *)specific_recv);
|
||||
|
||||
return return_match;
|
||||
}
|
||||
|
||||
/*
|
||||
* No match, go on to the next specific irecv.
|
||||
*/
|
||||
specific_recv = (mca_pml_base_recv_request_t *)
|
||||
((ompi_list_item_t *)specific_recv)->ompi_list_next;
|
||||
|
||||
/*
|
||||
* If that was the last specific irecv, process the
|
||||
* rest of the wild ones.
|
||||
*/
|
||||
if (specific_recv == (mca_pml_base_recv_request_t *)
|
||||
ompi_list_get_end((pml_comm->c_specific_receives)+frag_src) )
|
||||
{
|
||||
return_match = mca_ptl_base_check_wild_receives_for_match(frag_header,
|
||||
pml_comm);
|
||||
|
||||
return return_match;
|
||||
}
|
||||
/*
|
||||
* Get the sequence number for this recv, and go
|
||||
* back to the top of the loop.
|
||||
*/
|
||||
specific_recv_seq = specific_recv->req_base.req_sequence;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* need to handle this -- mca_ptl_base_check_cantmatch_for_match();
|
||||
*/
|
||||
/**
|
||||
* Scan the list of frags that came in ahead of time to see if any
|
||||
* can be processed at this time. If they can, try and match the
|
||||
@ -512,14 +491,15 @@ static mca_pml_base_recv_request_t *mca_ptl_base_check_specific_and_wild_receive
|
||||
* set by the upper level routine.
|
||||
*/
|
||||
|
||||
static void mca_ptl_base_check_cantmatch_for_match(ompi_list_t *additional_matches,
|
||||
static bool mca_ptl_base_check_cantmatch_for_match(ompi_list_t *additional_matches,
|
||||
mca_pml_ptl_comm_t *pml_comm, int frag_src)
|
||||
{
|
||||
/* local parameters */
|
||||
int match_found;
|
||||
mca_ptl_sequence_t next_msg_seq_expected, frag_seq;
|
||||
mca_ptl_base_recv_frag_t *frag_desc;
|
||||
mca_pml_base_recv_request_t *matched_receive;
|
||||
mca_pml_base_recv_request_t *matched_receive = NULL;
|
||||
bool match_made = false;
|
||||
|
||||
/*
|
||||
* Loop over all the out of sequence messages. No ordering is assumed
|
||||
@ -551,6 +531,9 @@ static void mca_ptl_base_check_cantmatch_for_match(ompi_list_t *additional_match
|
||||
*/
|
||||
frag_seq=frag_desc->frag_base.frag_header.hdr_match.hdr_msg_seq;
|
||||
if (frag_seq == next_msg_seq_expected) {
|
||||
int frag_src;
|
||||
mca_ptl_base_match_header_t* frag_header =
|
||||
&frag_desc->frag_base.frag_header.hdr_match;
|
||||
|
||||
/* We're now expecting the next sequence number. */
|
||||
(pml_comm->c_next_msg_seq[frag_src])++;
|
||||
@ -564,11 +547,28 @@ static void mca_ptl_base_check_cantmatch_for_match(ompi_list_t *additional_match
|
||||
ompi_list_remove_item((pml_comm->c_frags_cant_match)+frag_src,
|
||||
(ompi_list_item_t *)frag_desc);
|
||||
|
||||
/*
|
||||
* check to see if this frag matches a posted message
|
||||
*/
|
||||
matched_receive = mca_ptl_base_check_receives_for_match(
|
||||
&frag_desc->frag_base.frag_header.hdr_match, pml_comm);
|
||||
/*
|
||||
* figure out what sort of matching logic to use, if need to
|
||||
* look only at "specific" receives, or "wild" receives,
|
||||
* or if we need to traverse both sets at the same time.
|
||||
*/
|
||||
frag_src = frag_header->hdr_src;
|
||||
if (ompi_list_get_size((pml_comm->c_specific_receives)+frag_src) == 0 ) {
|
||||
/*
|
||||
* There are only wild irecvs, so specialize the algorithm.
|
||||
*/
|
||||
MCA_PTL_BASE_CHECK_WILD_RECEIVES_FOR_MATCH(frag_header, pml_comm, matched_receive);
|
||||
} else if (ompi_list_get_size(&(pml_comm->c_wild_receives)) == 0 ) {
|
||||
/*
|
||||
* There are only specific irecvs, so specialize the algorithm.
|
||||
*/
|
||||
MCA_PTL_BASE_CHECK_SPECIFIC_RECEIVES_FOR_MATCH(frag_header, pml_comm, matched_receive);
|
||||
} else {
|
||||
/*
|
||||
* There are some of each.
|
||||
*/
|
||||
MCA_PTL_BASE_CHECK_SPECIFIC_AND_WILD_RECEIVES_FOR_MATCH(frag_header, pml_comm, matched_receive);
|
||||
}
|
||||
|
||||
/* if match found, process data */
|
||||
if (matched_receive) {
|
||||
@ -580,8 +580,11 @@ static void mca_ptl_base_check_cantmatch_for_match(ompi_list_t *additional_match
|
||||
/* add this fragment descriptor to the list of
|
||||
* descriptors to be processed later
|
||||
*/
|
||||
ompi_list_append(additional_matches,
|
||||
(ompi_list_item_t *)frag_desc);
|
||||
if(match_made == false) {
|
||||
match_made = true;
|
||||
OBJ_CONSTRUCT(additional_matches, ompi_list_t);
|
||||
}
|
||||
ompi_list_append(additional_matches, (ompi_list_item_t *)frag_desc);
|
||||
|
||||
} else {
|
||||
|
||||
@ -601,7 +604,7 @@ static void mca_ptl_base_check_cantmatch_for_match(ompi_list_t *additional_match
|
||||
|
||||
} /* end while loop */
|
||||
|
||||
return;
|
||||
return match_made;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -640,7 +643,7 @@ bool mca_ptl_base_match_in_order_network_delivery(
|
||||
/* local variables */
|
||||
mca_ptl_sequence_t frag_msg_seq,next_msg_seq_expected;
|
||||
ompi_communicator_t *comm_ptr;
|
||||
mca_pml_base_recv_request_t *matched_receive;
|
||||
mca_pml_base_recv_request_t *matched_receive = NULL;
|
||||
mca_pml_ptl_comm_t *pml_comm;
|
||||
int frag_src;
|
||||
|
||||
@ -665,9 +668,28 @@ bool mca_ptl_base_match_in_order_network_delivery(
|
||||
*/
|
||||
OMPI_THREAD_LOCK(&pml_comm->c_matching_lock);
|
||||
|
||||
/* see if receive has already been posted */
|
||||
matched_receive = mca_ptl_base_check_receives_for_match(frag_header,
|
||||
pml_comm);
|
||||
/*
|
||||
* figure out what sort of matching logic to use, if need to
|
||||
* look only at "specific" receives, or "wild" receives,
|
||||
* or if we need to traverse both sets at the same time.
|
||||
*/
|
||||
if (ompi_list_get_size((pml_comm->c_specific_receives)+frag_src) == 0 ){
|
||||
/*
|
||||
* There are only wild irecvs, so specialize the algorithm.
|
||||
*/
|
||||
MCA_PTL_BASE_CHECK_WILD_RECEIVES_FOR_MATCH(frag_header, pml_comm, matched_receive);
|
||||
|
||||
} else if (ompi_list_get_size(&(pml_comm->c_wild_receives)) == 0 ) {
|
||||
/*
|
||||
* There are only specific irecvs, so specialize the algorithm.
|
||||
*/
|
||||
MCA_PTL_BASE_CHECK_SPECIFIC_RECEIVES_FOR_MATCH(frag_header, pml_comm, matched_receive);
|
||||
} else {
|
||||
/*
|
||||
* There are some of each.
|
||||
*/
|
||||
MCA_PTL_BASE_CHECK_SPECIFIC_AND_WILD_RECEIVES_FOR_MATCH(frag_header, pml_comm, matched_receive);
|
||||
}
|
||||
|
||||
/* if match found, process data */
|
||||
if (matched_receive) {
|
||||
|
@ -23,8 +23,11 @@ struct mca_ptl_base_recv_frag_t;
|
||||
* @param additional_matches (OUT) List of additional matches
|
||||
* @return OMPI_SUCCESS or error status on failure.
|
||||
*/
|
||||
OMPI_DECLSPEC bool mca_ptl_base_match(mca_ptl_base_match_header_t *frag_header,
|
||||
struct mca_ptl_base_recv_frag_t *frag_desc, ompi_list_t *additional_matches);
|
||||
OMPI_DECLSPEC bool mca_ptl_base_match(
|
||||
mca_ptl_base_match_header_t *frag_header,
|
||||
struct mca_ptl_base_recv_frag_t *frag_desc,
|
||||
ompi_list_t *additional_matches,
|
||||
bool* additional_match);
|
||||
|
||||
/**
|
||||
* RCS/CTS receive side matching
|
||||
|
Загрузка…
x
Ссылка в новой задаче
Block a user