1
1

Keep track of the ompi_proc in the comm_proc. This avoid a lookup for the processor and

simplify the execution path. The peer proc (ompi_proc_t) is set at the matching stage.

This commit was SVN r8962.
Этот коммит содержится в:
George Bosilca 2006-02-10 18:55:43 +00:00
родитель 44fe6c3896
Коммит 0376dce258
6 изменённых файлов: 22 добавлений и 30 удалений

Просмотреть файл

@ -87,6 +87,7 @@ int mca_pml_ob1_add_comm(ompi_communicator_t* comm)
pml_proc = OBJ_NEW(mca_pml_ob1_proc_t);
pml_proc->base.proc_ompi = comm->c_remote_group->grp_proc_pointers[i];
comm->c_pml_procs[i] = (mca_pml_proc_t*) pml_proc; /* comm->c_remote_group->grp_proc_pointers[i]->proc_pml; */
pml_comm->procs[i].proc_ompi = comm->c_remote_group->grp_proc_pointers[i];
}
return OMPI_SUCCESS;
}

Просмотреть файл

@ -28,6 +28,7 @@ static void mca_pml_ob1_comm_proc_construct(mca_pml_ob1_comm_proc_t* proc)
{
proc->expected_sequence = 1;
proc->send_sequence = 0;
proc->proc_ompi = NULL;
OBJ_CONSTRUCT(&proc->frags_cant_match, opal_list_t);
OBJ_CONSTRUCT(&proc->specific_receives, opal_list_t);
OBJ_CONSTRUCT(&proc->unexpected_frags, opal_list_t);

Просмотреть файл

@ -25,6 +25,7 @@
#include "opal/threads/condition.h"
#include "mca/ptl/ptl.h"
#include "opal/class/opal_list.h"
#include "proc/proc.h"
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
@ -33,6 +34,7 @@ extern "C" {
struct mca_pml_ob1_comm_proc_t {
opal_object_t super;
uint16_t expected_sequence; /**< send message sequence number - receiver side */
struct ompi_proc_t* proc_ompi;
#if OMPI_HAVE_THREAD_SUPPORT
volatile int32_t send_sequence; /**< send side sequence number */
#else

Просмотреть файл

@ -125,7 +125,7 @@ void mca_pml_ob1_recv_frag_callback(
* This routine assumes that the appropriate matching locks are
* set by the upper level routine.
*/
#define MCA_PML_OB1_MATCH_GENERIC_RECEIVES(hdr,generic_receives,return_match) \
#define MCA_PML_OB1_MATCH_GENERIC_RECEIVES(hdr,generic_receives,proc,return_match) \
do { \
/* local variables */ \
mca_pml_ob1_recv_request_t *generic_recv; \
@ -143,6 +143,7 @@ do { \
if ( (frag_tag == recv_tag) || \
( (recv_tag == OMPI_ANY_TAG) && (0 <= frag_tag) ) ) { \
\
generic_recv->req_recv.req_base.req_proc = proc->proc_ompi; \
/* Match made */ \
return_match = generic_recv; \
\
@ -174,7 +175,7 @@ do { \
do { \
/* local parameters */ \
opal_list_t* wild_receives = &comm->wild_receives; \
MCA_PML_OB1_MATCH_GENERIC_RECEIVES(hdr,wild_receives,return_match); \
MCA_PML_OB1_MATCH_GENERIC_RECEIVES(hdr,wild_receives,proc,return_match); \
} while(0)
@ -196,7 +197,7 @@ do { \
do { \
/* local variables */ \
opal_list_t* specific_receives = &proc->specific_receives; \
MCA_PML_OB1_MATCH_GENERIC_RECEIVES(hdr,specific_receives,return_match); \
MCA_PML_OB1_MATCH_GENERIC_RECEIVES(hdr,specific_receives,proc,return_match); \
} while(0)
/**

Просмотреть файл

@ -174,12 +174,6 @@ static void mca_pml_ob1_recv_request_ack(
mca_pml_ob1_ack_hdr_t* ack;
int rc;
/* if this hasn't been initialized yet - this is a synchronous send */
if(NULL == proc) {
ompi_proc_t *ompi_proc = ompi_comm_peer_lookup(
recvreq->req_recv.req_base.req_comm, hdr->hdr_match.hdr_src);
proc = recvreq->req_recv.req_base.req_proc = ompi_proc;
}
bml_endpoint = (mca_bml_base_endpoint_t*) proc->proc_pml;
bml_btl = mca_bml_base_btl_array_get_next(&bml_endpoint->btl_eager);
@ -586,7 +580,7 @@ void mca_pml_ob1_recv_request_matched_probe(
void mca_pml_ob1_recv_request_schedule(mca_pml_ob1_recv_request_t* recvreq)
{
if(OPAL_THREAD_ADD32(&recvreq->req_lock,1) == 1) {
ompi_proc_t* proc = (ompi_proc_t*)recvreq->req_recv.req_base.req_proc;
ompi_proc_t* proc = recvreq->req_recv.req_base.req_proc;
mca_bml_base_endpoint_t* bml_endpoint = (mca_bml_base_endpoint_t*) proc->proc_pml;
mca_bml_base_btl_t* bml_btl;
do {
@ -885,6 +879,7 @@ static mca_pml_ob1_recv_frag_t* mca_pml_ob1_recv_request_match_specific_proc(
}
return NULL;
find_fragment:
request->req_recv.req_base.req_proc = proc->proc_ompi;
if( !((MCA_PML_REQUEST_IPROBE == request->req_recv.req_base.req_type) ||
(MCA_PML_REQUEST_PROBE == request->req_recv.req_base.req_type)) ) {
opal_list_remove_item(unexpected_frags, (opal_list_item_t*)frag);

Просмотреть файл

@ -194,26 +194,18 @@ do {
*
*/
#define MCA_PML_OB1_RECV_REQUEST_MATCHED( \
request, \
hdr) \
do { \
(request)->req_recv.req_base.req_ompi.req_status.MPI_TAG = (hdr)->hdr_tag; \
(request)->req_recv.req_base.req_ompi.req_status.MPI_SOURCE = (hdr)->hdr_src; \
if((request)->req_recv.req_bytes_packed != 0) { \
ompi_proc_t *proc = \
ompi_comm_peer_lookup( \
(request)->req_recv.req_base.req_comm, (hdr)->hdr_src); \
\
(request)->req_recv.req_base.req_proc = proc; \
ompi_convertor_copy_and_prepare_for_recv( proc->proc_convertor, \
(request)->req_recv.req_base.req_datatype, \
(request)->req_recv.req_base.req_count, \
(request)->req_recv.req_base.req_addr, \
&(request)->req_recv.req_convertor ); \
} else { \
(request)->req_recv.req_base.req_proc = NULL; \
} \
#define MCA_PML_OB1_RECV_REQUEST_MATCHED( request, hdr ) \
do { \
(request)->req_recv.req_base.req_ompi.req_status.MPI_TAG = (hdr)->hdr_tag; \
(request)->req_recv.req_base.req_ompi.req_status.MPI_SOURCE = (hdr)->hdr_src; \
if((request)->req_recv.req_bytes_packed != 0) { \
ompi_convertor_copy_and_prepare_for_recv( \
(request)->req_recv.req_base.req_proc->proc_convertor, \
(request)->req_recv.req_base.req_datatype, \
(request)->req_recv.req_base.req_count, \
(request)->req_recv.req_base.req_addr, \
&(request)->req_recv.req_convertor ); \
} \
} while (0)