1
1

Small cleanups. Remove some switch cases that cannot be reached. Rename

a struct field.

This commit was SVN r18931.
Этот коммит содержится в:
George Bosilca 2008-07-17 04:50:39 +00:00
родитель 319a8b3219
Коммит 939fa3001d
5 изменённых файлов: 93 добавлений и 136 удалений

Просмотреть файл

@ -97,12 +97,13 @@ void mca_pml_ob1_recv_frag_callback_match(mca_btl_base_module_t* btl,
/* communicator pointer */
comm_ptr = ompi_comm_lookup(hdr->hdr_ctx);
if(OPAL_UNLIKELY(NULL == comm_ptr)) {
/* This is a special case. A message for a not yet existing communicator can
* happens, but right now we segfault. Instead, and until we find a better
* solution, just drop the message. However, in the near future we should
* store this fragment in a global list, and deliver it to the right
* communicator once it get created.
*/
/* This is a special case. A message for a not yet existing
* communicator can happens, but right now we
* segfault. Instead, and until we find a better solution,
* just drop the message. However, in the near future we
* should store this fragment in a global list, and deliver
* it to the right communicator once it get created.
*/
opal_output( 0, "Dropped message for the non-existing communicator %d\n",
(int)hdr->hdr_ctx );
return;
@ -112,55 +113,52 @@ void mca_pml_ob1_recv_frag_callback_match(mca_btl_base_module_t* btl,
/* source sequence number */
proc = &comm->procs[hdr->hdr_src];
/**
* We generate the MSG_ARRIVED event as soon as the PML is aware of a matching
* fragment arrival. Independing if it is received on the correct order or not.
* This will allow the tools to figure out if the messages are not received in the
* correct order (if multiple network interfaces).
*/
/* We generate the MSG_ARRIVED event as soon as the PML is aware
* of a matching fragment arrival. Independing if it is received
* on the correct order or not. This will allow the tools to
* figure out if the messages are not received in the correct
* order (if multiple network interfaces).
*/
PERUSE_TRACE_MSG_EVENT(PERUSE_COMM_MSG_ARRIVED, comm_ptr,
hdr->hdr_src, hdr->hdr_tag, PERUSE_RECV);
/* get next expected message sequence number - if threaded
* run, lock to make sure that if another thread is processing
* a frag from the same message a match is made only once.
* Also, this prevents other posted receives (for a pair of
* end points) from being processed, and potentially "loosing"
* the fragment.
*/
* run, lock to make sure that if another thread is processing
* a frag from the same message a match is made only once.
* Also, this prevents other posted receives (for a pair of
* end points) from being processed, and potentially "loosing"
* the fragment.
*/
OPAL_THREAD_LOCK(&comm->matching_lock);
/* get sequence number of next message that can be processed */
if(OPAL_UNLIKELY((((uint16_t) hdr->hdr_seq) != ((uint16_t) proc->expected_sequence)) ||
(opal_list_get_size(&proc->frags_cant_match) > 0 ) ||
(num_segments > 1))) {
goto slow_path;
goto slow_path;
}
/*
* This is the sequence number we were expecting,
* so we can try matching it to already posted
* receives.
*/
/* This is the sequence number we were expecting, so we can try
* matching it to already posted receives.
*/
/* We're now expecting the next sequence number. */
proc->expected_sequence++;
/**
* We generate the SEARCH_POSTED_QUEUE only when the message is received
* in the correct sequence. Otherwise, we delay the event generation until
* we reach the correct sequence number.
*/
/* We generate the SEARCH_POSTED_QUEUE only when the message is
* received in the correct sequence. Otherwise, we delay the event
* generation until we reach the correct sequence number.
*/
PERUSE_TRACE_MSG_EVENT(PERUSE_COMM_SEARCH_POSTED_Q_BEGIN, comm_ptr,
hdr->hdr_src, hdr->hdr_tag, PERUSE_RECV);
match = match_one(btl, hdr, segment, num_segments, comm_ptr, proc, frag);
/**
* The match is over. We generate the SEARCH_POSTED_Q_END here, before going
* into the mca_pml_ob1_check_cantmatch_for_match so we can make a difference
* for the searching time for all messages.
*/
/* The match is over. We generate the SEARCH_POSTED_Q_END here,
* before going into the mca_pml_ob1_check_cantmatch_for_match so
* we can make a difference for the searching time for all
* messages.
*/
PERUSE_TRACE_MSG_EVENT(PERUSE_COMM_SEARCH_POSTED_Q_END, comm_ptr,
hdr->hdr_src, hdr->hdr_tag, PERUSE_RECV);
@ -466,13 +464,13 @@ static mca_pml_ob1_recv_request_t *match_one(mca_btl_base_module_t *btl,
if(MCA_PML_REQUEST_PROBE == match->req_recv.req_base.req_type) {
/* complete the probe */
mca_pml_ob1_recv_request_matched_probe(match, btl, segments,
num_segments);
num_segments);
/* attempt to match actual request */
continue;
}
PERUSE_TRACE_COMM_EVENT(PERUSE_COMM_MSG_MATCH_POSTED_REQ,
&(match->req_recv.req_base), PERUSE_RECV);
&(match->req_recv.req_base), PERUSE_RECV);
break;
} while(true);
@ -485,9 +483,9 @@ static mca_pml_ob1_recv_frag_t *check_cantmatch_for_match(
/* local parameters */
mca_pml_ob1_recv_frag_t *frag;
/* search the list for a fragment from the send with sequence
* number next_msg_seq_expected
*/
/* search the list for a fragment from the send with sequence
* number next_msg_seq_expected
*/
for(frag = (mca_pml_ob1_recv_frag_t *)
opal_list_get_first(&proc->frags_cant_match);
frag != (mca_pml_ob1_recv_frag_t *)

Просмотреть файл

@ -184,7 +184,7 @@ static void mca_pml_ob1_put_completion( mca_btl_base_module_t* btl,
if( OPAL_LIKELY(status == OMPI_SUCCESS) ) {
MCA_PML_OB1_COMPUTE_SEGMENT_LENGTH( des->des_dst, des->des_dst_cnt,
0, bytes_received );
0, bytes_received );
}
OPAL_THREAD_ADD_SIZE_T(&recvreq->req_pipeline_depth,-1);
@ -552,7 +552,7 @@ void mca_pml_ob1_recv_request_progress_rndv( mca_pml_ob1_recv_request_t* recvreq
bytes_received -= sizeof(mca_pml_ob1_rendezvous_hdr_t);
recvreq->req_recv.req_bytes_packed = hdr->hdr_rndv.hdr_msg_length;
recvreq->req_send = hdr->hdr_rndv.hdr_src_req;
recvreq->remote_req_send = hdr->hdr_rndv.hdr_src_req;
recvreq->req_rdma_offset = bytes_received;
MCA_PML_OB1_RECV_REQUEST_MATCHED(recvreq, &hdr->hdr_match);
mca_pml_ob1_recv_request_ack(recvreq, &hdr->hdr_rndv, bytes_received);
@ -591,7 +591,6 @@ void mca_pml_ob1_recv_request_progress_rndv( mca_pml_ob1_recv_request_t* recvreq
}
}
/*
* Update the recv request status to reflect the number of bytes
* received and actually delivered to the application.
@ -602,9 +601,7 @@ void mca_pml_ob1_recv_request_progress_match( mca_pml_ob1_recv_request_t* recvre
mca_btl_base_segment_t* segments,
size_t num_segments )
{
size_t bytes_received = 0;
size_t bytes_delivered = 0;
size_t data_offset = 0;
size_t bytes_received = 0, bytes_delivered = 0, data_offset = 0;
mca_pml_ob1_hdr_t* hdr = (mca_pml_ob1_hdr_t*)segments->seg_addr.pval;
bool complete;
@ -640,9 +637,12 @@ void mca_pml_ob1_recv_request_progress_match( mca_pml_ob1_recv_request_t* recvre
recvreq->req_recv.req_base.req_datatype);
);
OPAL_THREAD_ADD_SIZE_T(&recvreq->req_bytes_received, bytes_received);
complete = recv_request_pml_complete_check(recvreq);
assert(complete);
/*
* No need for atomic here, as we know there is only one fragment
* for this request.
*/
recvreq->req_bytes_received += bytes_received;
recv_request_pml_complete(recvreq);
}
@ -793,7 +793,7 @@ int mca_pml_ob1_recv_request_schedule_once(
hdr->hdr_common.hdr_type = MCA_PML_OB1_HDR_TYPE_PUT;
hdr->hdr_common.hdr_flags =
(!recvreq->req_ack_sent) ? MCA_PML_OB1_HDR_TYPE_ACK : 0;
hdr->hdr_req = recvreq->req_send;
hdr->hdr_req = recvreq->remote_req_send;
hdr->hdr_des.pval = dst;
hdr->hdr_rdma_offset = recvreq->req_rdma_offset;
hdr->hdr_seg_cnt = dst->des_dst_cnt;
@ -853,7 +853,7 @@ static inline void append_recv_req_to_queue(opal_list_t *queue,
*/
if(req->req_recv.req_base.req_type != MCA_PML_REQUEST_PROBE) {
PERUSE_TRACE_COMM_EVENT(PERUSE_COMM_REQ_INSERT_IN_POSTED_Q,
&(req->req_recv.req_base), PERUSE_RECV);
&(req->req_recv.req_base), PERUSE_RECV);
}
}
@ -862,9 +862,9 @@ static inline void append_recv_req_to_queue(opal_list_t *queue,
* it places the request in the appropriate matched receive list. This
* function has to be called with the communicator matching lock held.
*/
static mca_pml_ob1_recv_frag_t *recv_req_match_specific_proc(
const mca_pml_ob1_recv_request_t *req,
mca_pml_ob1_comm_proc_t *proc)
static mca_pml_ob1_recv_frag_t*
recv_req_match_specific_proc( const mca_pml_ob1_recv_request_t *req,
mca_pml_ob1_comm_proc_t *proc )
{
opal_list_t* unexpected_frags = &proc->unexpected_frags;
opal_list_item_t *i;
@ -874,14 +874,24 @@ static mca_pml_ob1_recv_frag_t *recv_req_match_specific_proc(
if(opal_list_get_size(unexpected_frags) == 0)
return NULL;
for (i = opal_list_get_first(unexpected_frags);
i != opal_list_get_end(unexpected_frags);
i = opal_list_get_next(i)) {
frag = (mca_pml_ob1_recv_frag_t*)i;
if(frag->hdr.hdr_match.hdr_tag == tag ||
(OMPI_ANY_TAG == tag && frag->hdr.hdr_match.hdr_tag >= 0))
return frag;
if( OMPI_ANY_TAG == tag ) {
for (i = opal_list_get_first(unexpected_frags);
i != opal_list_get_end(unexpected_frags);
i = opal_list_get_next(i)) {
frag = (mca_pml_ob1_recv_frag_t*)i;
if( frag->hdr.hdr_match.hdr_tag >= 0 )
return frag;
}
} else {
for (i = opal_list_get_first(unexpected_frags);
i != opal_list_get_end(unexpected_frags);
i = opal_list_get_next(i)) {
frag = (mca_pml_ob1_recv_frag_t*)i;
if( frag->hdr.hdr_match.hdr_tag == tag )
return frag;
}
}
return NULL;
@ -891,21 +901,20 @@ static mca_pml_ob1_recv_frag_t *recv_req_match_specific_proc(
* this routine is used to try and match a wild posted receive - where
* wild is determined by the value assigned to the source process
*/
static mca_pml_ob1_recv_frag_t *recv_req_match_wild(
mca_pml_ob1_recv_request_t* req, mca_pml_ob1_comm_proc_t **p)
static mca_pml_ob1_recv_frag_t*
recv_req_match_wild( mca_pml_ob1_recv_request_t* req,
mca_pml_ob1_comm_proc_t **p)
{
mca_pml_ob1_comm_t* comm = req->req_recv.req_base.req_comm->c_pml_comm;
mca_pml_ob1_comm_proc_t* proc = comm->procs;
size_t proc_count = comm->num_procs;
size_t i;
size_t proc_count = comm->num_procs, i;
/*
* Loop over all the outstanding messages to find one that matches.
* There is an outer loop over lists of messages from each
* process, then an inner loop over the messages from the
* process.
*/
*/
for (i = 0; i < proc_count; i++) {
mca_pml_ob1_recv_frag_t* frag;
@ -940,7 +949,6 @@ void mca_pml_ob1_recv_req_start(mca_pml_ob1_recv_request_t *req)
req->req_rdma_idx = 0;
req->req_pending = false;
req->req_ack_sent = false;
req->req_match_received = false;
MCA_PML_BASE_RECV_START(&req->req_recv.req_base);
@ -955,7 +963,6 @@ void mca_pml_ob1_recv_req_start(mca_pml_ob1_recv_request_t *req)
/* assign sequence number */
req->req_recv.req_base.req_sequence = comm->recv_sequence++;
/* attempt to match posted recv */
if(req->req_recv.req_base.req_peer == OMPI_ANY_SOURCE) {
frag = recv_req_match_wild(req, &proc);
@ -975,6 +982,7 @@ void mca_pml_ob1_recv_req_start(mca_pml_ob1_recv_request_t *req)
/* We didn't find any matches. Record this irecv so we can match
it when the message comes in. */
append_recv_req_to_queue(queue, req);
req->req_match_received = false;
OPAL_THREAD_UNLOCK(&comm->matching_lock);
} else {
if(OPAL_LIKELY(!IS_PROB_REQ(req))) {
@ -991,7 +999,7 @@ void mca_pml_ob1_recv_req_start(mca_pml_ob1_recv_request_t *req)
&(req->req_recv.req_base), PERUSE_RECV);
opal_list_remove_item(&proc->unexpected_frags,
(opal_list_item_t*)frag);
(opal_list_item_t*)frag);
OPAL_THREAD_UNLOCK(&comm->matching_lock);
hdr = (mca_pml_ob1_hdr_t*)frag->segments->seg_addr.pval;
@ -1008,10 +1016,8 @@ void mca_pml_ob1_recv_req_start(mca_pml_ob1_recv_request_t *req)
mca_pml_ob1_recv_request_progress_rget(req, frag->btl, frag->segments,
frag->num_segments);
break;
case MCA_PML_OB1_HDR_TYPE_FRAG:
mca_pml_ob1_recv_request_progress_frag(req, frag->btl, frag->segments,
frag->num_segments);
break;
default:
assert(0);
}
MCA_PML_OB1_RECV_FRAG_RETURN(frag);

Просмотреть файл

@ -35,7 +35,7 @@ BEGIN_C_DECLS
struct mca_pml_ob1_recv_request_t {
mca_pml_base_recv_request_t req_recv;
ompi_ptr_t req_send;
ompi_ptr_t remote_req_send;
int32_t req_lock;
size_t req_pipeline_depth;
size_t req_bytes_received; /**< amount of data transferred into the user buffer */
@ -208,13 +208,13 @@ static inline void prepare_recv_req_converter(mca_pml_ob1_recv_request_t *req)
ompi_convertor_get_unpacked_size(&req->req_recv.req_base.req_convertor,
&req->req_bytes_delivered);
}
}
}
#define MCA_PML_OB1_RECV_REQUEST_MATCHED(request, hdr) \
recv_req_matched(request, hdr)
static inline void recv_req_matched(mca_pml_ob1_recv_request_t *req,
mca_pml_ob1_match_hdr_t *hdr)
mca_pml_ob1_match_hdr_t *hdr)
{
req->req_recv.req_base.req_ompi.req_status.MPI_SOURCE = hdr->hdr_src;
req->req_recv.req_base.req_ompi.req_status.MPI_TAG = hdr->hdr_tag;
@ -238,14 +238,13 @@ static inline void recv_req_matched(mca_pml_ob1_recv_request_t *req,
*
*/
#define MCA_PML_OB1_RECV_REQUEST_UNPACK( \
request, \
segments, \
num_segments, \
seg_offset, \
data_offset, \
bytes_received, \
bytes_delivered) \
#define MCA_PML_OB1_RECV_REQUEST_UNPACK( request, \
segments, \
num_segments, \
seg_offset, \
data_offset, \
bytes_received, \
bytes_delivered) \
do { \
bytes_delivered = 0; \
if(request->req_recv.req_bytes_packed > 0) { \
@ -261,7 +260,8 @@ do {
offset -= segment->seg_len; \
} else { \
iov[iov_count].iov_len = segment->seg_len - offset; \
iov[iov_count].iov_base = (IOVBASE_TYPE*)((unsigned char*)segment->seg_addr.pval + offset); \
iov[iov_count].iov_base = (IOVBASE_TYPE*) \
((unsigned char*)segment->seg_addr.pval + offset); \
iov_count++; \
} \
} \
@ -280,52 +280,6 @@ do {
} while (0)
/**
*
*/
#define MCA_PML_OB1_RECV_REQUEST_UNPACK_ALL( \
request, \
segments, \
num_segments, \
seg_offset, \
data_offset, \
bytes_received, \
bytes_delivered) \
do { \
bytes_delivered = 0; \
if(request->req_recv.req_bytes_packed > 0) { \
struct iovec iov[MCA_BTL_DES_MAX_SEGMENTS]; \
uint32_t iov_count = 0; \
size_t max_data = bytes_received; \
size_t n, offset = seg_offset; \
mca_btl_base_segment_t* segment = segments; \
\
OPAL_THREAD_LOCK(&request->lock); \
for( n = 0; n < num_segments; n++, segment++ ) { \
if(offset >= segment->seg_len) { \
offset -= segment->seg_len; \
} else { \
iov[iov_count].iov_len = segment->seg_len - offset; \
iov[iov_count].iov_base = (IOVBASE_TYPE*)((unsigned char*)segment->seg_addr.pval + offset); \
iov_count++; \
} \
} \
PERUSE_TRACE_COMM_OMPI_EVENT (PERUSE_COMM_REQ_XFER_CONTINUE, \
&(recvreq->req_recv.req_base), \
bytes_received, \
PERUSE_RECV); \
ompi_convertor_set_position( &(request->req_recv.req_base.req_convertor), \
&data_offset ); \
ompi_convertor_unpack( &(request)->req_recv.req_base.req_convertor, \
iov, \
&iov_count, \
&max_data ); \
bytes_delivered = max_data; \
OPAL_THREAD_UNLOCK(&request->lock); \
} \
} while (0)
/**
*
*/

Просмотреть файл

@ -559,7 +559,6 @@ int mca_pml_ob1_send_request_start_copy( mca_pml_ob1_send_request_t* sendreq,
des->des_cbdata = sendreq;
des->des_cbfunc = mca_pml_ob1_match_completion_free;
/* send */
rc = mca_bml_base_send_status(bml_btl, des, MCA_PML_OB1_HDR_TYPE_MATCH);
if( OPAL_LIKELY( rc >= 0 ) ) {

Просмотреть файл

@ -230,8 +230,8 @@ send_request_pml_complete(mca_pml_ob1_send_request_t *sendreq)
mca_pml_ob1_free_rdma_resources(sendreq);
if (sendreq->req_send.req_send_mode == MCA_PML_BASE_SEND_BUFFERED &&
sendreq->req_send.req_addr != sendreq->req_send.req_base.req_addr) {
mca_pml_base_bsend_request_fini((ompi_request_t*)sendreq);
sendreq->req_send.req_addr != sendreq->req_send.req_base.req_addr) {
mca_pml_base_bsend_request_fini((ompi_request_t*)sendreq);
}
OPAL_THREAD_LOCK(&ompi_request_lock);