1
1

Don't poke at internal structure fiealds of communicators or groups, but

instead use accessor functions

This commit was SVN r15366.
Этот коммит содержится в:
Brian Barrett 2007-07-11 17:16:06 +00:00
родитель 82c8d224d6
Коммит 739fed9dc9
8 изменённых файлов: 79 добавлений и 88 удалений

Просмотреть файл

@ -38,7 +38,7 @@ ompi_osc_pt2pt_module_free(ompi_win_t *win)
opal_output_verbose(1, ompi_osc_base_output,
"pt2pt component destroying window with id %d",
module->p2p_comm->c_contextid);
ompi_comm_get_cid(module->p2p_comm));
/* finish with a barrier */
if (ompi_group_size(win->w_group) > 1) {
@ -48,7 +48,7 @@ ompi_osc_pt2pt_module_free(ompi_win_t *win)
/* remove from component information */
OPAL_THREAD_LOCK(&mca_osc_pt2pt_component.p2p_c_lock);
tmp = opal_hash_table_remove_value_uint32(&mca_osc_pt2pt_component.p2p_c_modules,
module->p2p_comm->c_contextid);
ompi_comm_get_cid(module->p2p_comm));
/* only take the output of hast_table_remove if there wasn't already an error */
ret = (ret != OMPI_SUCCESS) ? ret : tmp;

Просмотреть файл

@ -288,7 +288,7 @@ ompi_osc_pt2pt_component_select(ompi_win_t *win,
opal_output_verbose(1, ompi_osc_base_output,
"pt2pt component creating window with id %d",
module->p2p_comm->c_contextid);
ompi_comm_get_cid(module->p2p_comm));
module->p2p_num_pending_sendreqs = (unsigned int*)
malloc(sizeof(unsigned int) * ompi_comm_size(module->p2p_comm));
@ -350,7 +350,7 @@ ompi_osc_pt2pt_component_select(ompi_win_t *win,
/* update component data */
OPAL_THREAD_LOCK(&mca_osc_pt2pt_component.p2p_c_lock);
opal_hash_table_set_value_uint32(&mca_osc_pt2pt_component.p2p_c_modules,
module->p2p_comm->c_contextid,
ompi_comm_get_cid(module->p2p_comm),
module);
ret = opal_hash_table_get_size(&mca_osc_pt2pt_component.p2p_c_modules);
if (ret == 1) {

Просмотреть файл

@ -90,7 +90,7 @@ ompi_osc_pt2pt_sendreq_send_long_cb(ompi_osc_pt2pt_mpireq_t *mpireq)
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d completed long sendreq to %d",
sendreq->req_module->p2p_comm->c_my_rank,
ompi_comm_rank(sendreq->req_module->p2p_comm),
sendreq->req_target_rank));
OPAL_THREAD_LOCK(&sendreq->req_module->p2p_lock);
@ -189,7 +189,7 @@ ompi_osc_pt2pt_sendreq_send(ompi_osc_pt2pt_module_t *module,
header = (ompi_osc_pt2pt_send_header_t*) buffer->payload;
written_data += sizeof(ompi_osc_pt2pt_send_header_t);
header->hdr_base.hdr_flags = 0;
header->hdr_origin = sendreq->req_module->p2p_comm->c_my_rank;
header->hdr_origin = ompi_comm_rank(sendreq->req_module->p2p_comm);
header->hdr_origin_sendreq.pval = (void*) sendreq;
header->hdr_origin_tag = 0;
header->hdr_target_disp = sendreq->req_target_disp;
@ -267,7 +267,7 @@ ompi_osc_pt2pt_sendreq_send(ompi_osc_pt2pt_module_t *module,
/* send fragment */
OPAL_OUTPUT_VERBOSE((51, ompi_osc_base_output,
"%d sending sendreq to %d",
sendreq->req_module->p2p_comm->c_my_rank,
ompi_comm_rank(sendreq->req_module->p2p_comm),
sendreq->req_target_rank));
ret = MCA_PML_CALL(isend(buffer->payload,
@ -292,7 +292,7 @@ ompi_osc_pt2pt_sendreq_send(ompi_osc_pt2pt_module_t *module,
longreq->mpireq.cbdata = sendreq;
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d starting long sendreq to %d (%d)",
sendreq->req_module->p2p_comm->c_my_rank,
ompi_comm_rank(sendreq->req_module->p2p_comm),
sendreq->req_target_rank,
header->hdr_origin_tag));
@ -620,7 +620,7 @@ ompi_osc_pt2pt_sendreq_recv_accum_long_cb(ompi_osc_pt2pt_mpireq_t *mpireq)
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d finished receiving long accum message from %d",
longreq->req_module->p2p_comm->c_my_rank,
ompi_comm_rank(longreq->req_module->p2p_comm),
header->hdr_origin));
/* free the temp buffer */
@ -672,7 +672,7 @@ ompi_osc_pt2pt_sendreq_recv_accum(ompi_osc_pt2pt_module_t *module,
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d received accum message from %d",
module->p2p_comm->c_my_rank,
ompi_comm_rank(module->p2p_comm),
header->hdr_origin));
} else {
ompi_osc_pt2pt_longreq_t *longreq;
@ -711,7 +711,7 @@ ompi_osc_pt2pt_sendreq_recv_accum(ompi_osc_pt2pt_module_t *module,
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d started long recv accum message from %d (%d)",
module->p2p_comm->c_my_rank,
ompi_comm_rank(module->p2p_comm),
header->hdr_origin,
header->hdr_origin_tag));
@ -838,8 +838,8 @@ ompi_osc_pt2pt_control_send(ompi_osc_pt2pt_module_t *module,
int rank = -1, i;
/* find the rank */
for (i = 0 ; i < module->p2p_comm->c_remote_group->grp_proc_count ; ++i) {
if (proc == module->p2p_comm->c_remote_group->grp_proc_pointers[i]) {
for (i = 0 ; i < ompi_comm_size(module->p2p_comm) ; ++i) {
if (proc == ompi_comm_peer_lookup(module->p2p_comm, i)) {
rank = i;
}
}

Просмотреть файл

@ -188,16 +188,10 @@ ompi_osc_pt2pt_module_start(ompi_group_t *group,
for (i = 0 ; i < ompi_group_size(group) ; i++) {
int comm_rank = -1, j;
/* no need to increment ref count - the communicator isn't
going anywhere while we're here */
ompi_group_t *comm_group = module->p2p_comm->c_local_group;
/* find the rank in the communicator associated with this windows */
for (j = 0 ;
j < ompi_group_size(comm_group) ;
++j) {
if (module->p2p_sc_group->grp_proc_pointers[i] ==
comm_group->grp_proc_pointers[j]) {
for (j = 0 ; j < ompi_comm_size(module->p2p_comm) ; ++j) {
if (ompi_group_peer_lookup(module->p2p_sc_group, i) ==
ompi_comm_peer_lookup(module->p2p_comm, j)) {
comm_rank = j;
break;
}
@ -254,7 +248,7 @@ ompi_osc_pt2pt_module_complete(ompi_win_t *win)
for (i = 0 ; i < ompi_group_size(module->p2p_sc_group) ; ++i) {
int comm_rank = module->p2p_sc_remote_ranks[i];
ret = ompi_osc_pt2pt_control_send(module,
module->p2p_sc_group->grp_proc_pointers[i],
ompi_group_peer_lookup(module->p2p_sc_group, i),
OMPI_OSC_PT2PT_HDR_COMPLETE,
module->p2p_copy_num_pending_sendreqs[comm_rank],
0);
@ -327,7 +321,7 @@ ompi_osc_pt2pt_module_post(ompi_group_t *group,
/* send a hello counter to everyone in group */
for (i = 0 ; i < ompi_group_size(module->p2p_pw_group) ; ++i) {
ompi_osc_pt2pt_control_send(module,
group->grp_proc_pointers[i],
ompi_group_peer_lookup(group, i),
OMPI_OSC_PT2PT_HDR_POST, 1, 0);
}
@ -420,13 +414,13 @@ ompi_osc_pt2pt_module_lock(int lock_type,
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d: sending lock request to %d",
module->p2p_comm->c_my_rank,
ompi_comm_rank(module->p2p_comm),
target));
/* generate a lock request */
ompi_osc_pt2pt_control_send(module,
proc,
OMPI_OSC_PT2PT_HDR_LOCK_REQ,
module->p2p_comm->c_my_rank,
ompi_comm_rank(module->p2p_comm),
lock_type);
/* return */
@ -467,12 +461,12 @@ ompi_osc_pt2pt_module_unlock(int target,
/* send the unlock request */
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d: sending unlock request to %d with %d requests",
module->p2p_comm->c_my_rank, target,
ompi_comm_rank(module->p2p_comm), target,
out_count));
ompi_osc_pt2pt_control_send(module,
proc,
OMPI_OSC_PT2PT_HDR_UNLOCK_REQ,
module->p2p_comm->c_my_rank,
ompi_comm_rank(module->p2p_comm),
out_count);
while (NULL !=
@ -499,7 +493,7 @@ ompi_osc_pt2pt_module_unlock(int target,
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d: finished unlock to %d",
module->p2p_comm->c_my_rank, target));
ompi_comm_rank(module->p2p_comm), target));
/* set our mode on the window */
ompi_win_remove_mode(win, OMPI_WIN_ACCESS_EPOCH | OMPI_WIN_LOCK_ACCESS);
@ -524,13 +518,13 @@ ompi_osc_pt2pt_passive_lock(ompi_osc_pt2pt_module_t *module,
module->p2p_lock_status = MPI_LOCK_EXCLUSIVE;
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d: setting lock status to EXCLUSIVE (from %d)",
module->p2p_comm->c_my_rank, origin));
ompi_comm_rank(module->p2p_comm), origin));
ompi_win_append_mode(module->p2p_win, OMPI_WIN_EXPOSE_EPOCH);
send_ack = true;
} else {
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d: queuing lock request from %d (type=%d)",
module->p2p_comm->c_my_rank, origin, lock_type));
ompi_comm_rank(module->p2p_comm), origin, lock_type));
new_pending = OBJ_NEW(ompi_osc_pt2pt_pending_lock_t);
new_pending->proc = proc;
new_pending->lock_type = lock_type;
@ -542,13 +536,13 @@ ompi_osc_pt2pt_passive_lock(ompi_osc_pt2pt_module_t *module,
module->p2p_shared_count++;
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d: setting lock status to SHARED (from %d), count %d",
module->p2p_comm->c_my_rank, origin, module->p2p_shared_count));
ompi_comm_rank(module->p2p_comm), origin, module->p2p_shared_count));
ompi_win_append_mode(module->p2p_win, OMPI_WIN_EXPOSE_EPOCH);
send_ack = true;
} else {
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d: queuing lock request from %d (type=%d)",
module->p2p_comm->c_my_rank, origin, lock_type));
ompi_comm_rank(module->p2p_comm), origin, lock_type));
new_pending = OBJ_NEW(ompi_osc_pt2pt_pending_lock_t);
new_pending->proc = proc;
new_pending->lock_type = lock_type;
@ -562,10 +556,10 @@ ompi_osc_pt2pt_passive_lock(ompi_osc_pt2pt_module_t *module,
if (send_ack) {
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d: sending lock ack to %d",
module->p2p_comm->c_my_rank, origin));
ompi_comm_rank(module->p2p_comm), origin));
ompi_osc_pt2pt_control_send(module, proc,
OMPI_OSC_PT2PT_HDR_LOCK_REQ,
module->p2p_comm->c_my_rank,
ompi_comm_rank(module->p2p_comm),
OMPI_SUCCESS);
}
@ -585,7 +579,7 @@ ompi_osc_pt2pt_passive_unlock(ompi_osc_pt2pt_module_t *module,
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d: received unlock request from %d with %d requests\n",
module->p2p_comm->c_my_rank,
ompi_comm_rank(module->p2p_comm),
origin, count));
new_pending = OBJ_NEW(ompi_osc_pt2pt_pending_lock_t);
@ -621,7 +615,8 @@ ompi_osc_pt2pt_passive_unlock_complete(ompi_osc_pt2pt_module_t *module)
module->p2p_shared_count -= opal_list_get_size(&module->p2p_unlocks_pending);
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d: decrementing shared count to %d",
module->p2p_comm->c_my_rank, module->p2p_shared_count));
ompi_comm_rank(module->p2p_comm),
module->p2p_shared_count));
if (module->p2p_shared_count == 0) {
ompi_win_remove_mode(module->p2p_win, OMPI_WIN_EXPOSE_EPOCH);
module->p2p_lock_status = 0;
@ -641,7 +636,7 @@ ompi_osc_pt2pt_passive_unlock_complete(ompi_osc_pt2pt_module_t *module)
opal_list_remove_first(&copy_unlock_acks))) {
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d: sending unlock ack to proc %d",
module->p2p_comm->c_my_rank,
ompi_comm_rank(module->p2p_comm),
new_pending->proc->proc_name.vpid));
ompi_osc_pt2pt_control_send(module,
new_pending->proc,
@ -661,7 +656,7 @@ ompi_osc_pt2pt_passive_unlock_complete(ompi_osc_pt2pt_module_t *module)
if (NULL != new_pending) {
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d: sending lock ack to proc %d",
module->p2p_comm->c_my_rank,
ompi_comm_rank(module->p2p_comm),
new_pending->proc->proc_name.vpid));
ompi_win_append_mode(module->p2p_win, OMPI_WIN_EXPOSE_EPOCH);
/* set lock state and generate a lock request */
@ -679,7 +674,7 @@ ompi_osc_pt2pt_passive_unlock_complete(ompi_osc_pt2pt_module_t *module)
ompi_osc_pt2pt_control_send(module,
new_pending->proc,
OMPI_OSC_PT2PT_HDR_LOCK_REQ,
module->p2p_comm->c_my_rank,
ompi_comm_rank(module->p2p_comm),
OMPI_SUCCESS);
OBJ_RELEASE(new_pending);
}

Просмотреть файл

@ -39,7 +39,7 @@ ompi_osc_rdma_module_free(ompi_win_t *win)
opal_output_verbose(1, ompi_osc_base_output,
"rdma component destroying window with id %d",
module->m_comm->c_contextid);
ompi_comm_get_cid(module->m_comm));
/* finish with a barrier */
if (ompi_group_size(win->w_group) > 1) {
@ -49,7 +49,7 @@ ompi_osc_rdma_module_free(ompi_win_t *win)
/* remove from component information */
OPAL_THREAD_LOCK(&mca_osc_rdma_component.c_lock);
tmp = opal_hash_table_remove_value_uint32(&mca_osc_rdma_component.c_modules,
module->m_comm->c_contextid);
ompi_comm_get_cid(module->m_comm));
/* only take the output of hast_table_remove if there wasn't already an error */
ret = (ret != OMPI_SUCCESS) ? ret : tmp;

Просмотреть файл

@ -324,7 +324,7 @@ ompi_osc_rdma_component_select(ompi_win_t *win,
opal_output_verbose(1, ompi_osc_base_output,
"rdma component creating window with id %d",
module->m_comm->c_contextid);
ompi_comm_get_cid(module->m_comm));
module->m_num_pending_sendreqs = (unsigned int*)
malloc(sizeof(unsigned int) * ompi_comm_size(module->m_comm));
@ -404,7 +404,7 @@ ompi_osc_rdma_component_select(ompi_win_t *win,
/* update component data */
OPAL_THREAD_LOCK(&mca_osc_rdma_component.c_lock);
opal_hash_table_set_value_uint32(&mca_osc_rdma_component.c_modules,
module->m_comm->c_contextid,
ompi_comm_get_cid(module->m_comm),
module);
ret = opal_hash_table_get_size(&mca_osc_rdma_component.c_modules);
if (ret == 1) {
@ -453,7 +453,7 @@ ompi_osc_rdma_component_select(ompi_win_t *win,
if (OMPI_SUCCESS != ret) goto cleanup;
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"done creating window %d", module->m_comm->c_contextid));
"done creating window %d", ompi_comm_get_cid(module->m_comm)));
return OMPI_SUCCESS;
@ -1061,7 +1061,7 @@ rdma_send_info_send(ompi_osc_rdma_module_t *module,
header->hdr_base.hdr_flags = 0;
header->hdr_segkey = peer_send_info->seg_key;
header->hdr_origin = ompi_comm_rank(module->m_comm);
header->hdr_windx = module->m_comm->c_contextid;
header->hdr_windx = ompi_comm_get_cid(module->m_comm);
#ifdef WORDS_BIGENDIAN
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;

Просмотреть файл

@ -268,7 +268,7 @@ ompi_osc_rdma_sendreq_send_long_cb(ompi_osc_rdma_longreq_t *longreq)
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d completed long sendreq to %d",
sendreq->req_module->m_comm->c_my_rank,
ompi_comm_rank(sendreq->req_module->m_comm),
sendreq->req_target_rank));
OPAL_THREAD_LOCK(&sendreq->req_module->m_lock);
@ -342,7 +342,7 @@ ompi_osc_rdma_sendreq_send_cb(struct mca_btl_base_module_t* btl,
longreq->cbdata = sendreq;
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d starting long sendreq to %d (%d)",
sendreq->req_module->m_comm->c_my_rank,
ompi_comm_rank(sendreq->req_module->m_comm),
sendreq->req_target_rank,
header->hdr_origin_tag));
@ -484,8 +484,8 @@ ompi_osc_rdma_sendreq_send(ompi_osc_rdma_module_t *module,
((char*) descriptor->des_src[0].seg_addr.pval + descriptor->des_src[0].seg_len);
written_data += sizeof(ompi_osc_rdma_send_header_t);
header->hdr_base.hdr_flags = 0;
header->hdr_windx = sendreq->req_module->m_comm->c_contextid;
header->hdr_origin = sendreq->req_module->m_comm->c_my_rank;
header->hdr_windx = ompi_comm_get_cid(sendreq->req_module->m_comm);
header->hdr_origin = ompi_comm_rank(sendreq->req_module->m_comm);
header->hdr_origin_sendreq.pval = (void*) sendreq;
header->hdr_origin_tag = 0;
header->hdr_target_disp = sendreq->req_target_disp;
@ -587,7 +587,7 @@ ompi_osc_rdma_sendreq_send(ompi_osc_rdma_module_t *module,
/* send fragment */
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d sending sendreq to %d",
sendreq->req_module->m_comm->c_my_rank,
ompi_comm_rank(sendreq->req_module->m_comm),
sendreq->req_target_rank));
module->m_pending_buffers[sendreq->req_target_rank].bml_btl = NULL;
@ -787,7 +787,7 @@ ompi_osc_rdma_sendreq_recv_put_long_cb(ompi_osc_rdma_longreq_t *longreq)
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d finished receiving long put message",
longreq->req_module->m_comm->c_my_rank));
ompi_comm_rank(longreq->req_module->m_comm)));
inmsg_mark_complete(longreq->req_module);
}
@ -843,7 +843,7 @@ ompi_osc_rdma_sendreq_recv_put(ompi_osc_rdma_module_t *module,
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d received put message from %d",
module->m_comm->c_my_rank,
ompi_comm_rank(module->m_comm),
header->hdr_origin));
} else {
@ -865,7 +865,7 @@ ompi_osc_rdma_sendreq_recv_put(ompi_osc_rdma_module_t *module,
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d started long recv put message from %d (%d)",
module->m_comm->c_my_rank,
ompi_comm_rank(module->m_comm),
header->hdr_origin,
header->hdr_origin_tag));
@ -911,7 +911,7 @@ ompi_osc_rdma_sendreq_recv_accum_long_cb(ompi_osc_rdma_longreq_t *longreq)
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d finished receiving long accum message from %d",
longreq->req_module->m_comm->c_my_rank,
ompi_comm_rank(longreq->req_module->m_comm),
header->hdr_origin));
/* free the temp buffer */
@ -963,7 +963,7 @@ ompi_osc_rdma_sendreq_recv_accum(ompi_osc_rdma_module_t *module,
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d received accum message from %d",
module->m_comm->c_my_rank,
ompi_comm_rank(module->m_comm),
header->hdr_origin));
*payload = ((char*) *payload) + header->hdr_msg_length;
@ -1004,7 +1004,7 @@ ompi_osc_rdma_sendreq_recv_accum(ompi_osc_rdma_module_t *module,
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d started long recv accum message from %d (%d)",
module->m_comm->c_my_rank,
ompi_comm_rank(module->m_comm),
header->hdr_origin,
header->hdr_origin_tag));
@ -1160,7 +1160,7 @@ ompi_osc_rdma_control_send(ompi_osc_rdma_module_t *module,
header->hdr_base.hdr_flags = 0;
header->hdr_value[0] = value0;
header->hdr_value[1] = value1;
header->hdr_windx = module->m_comm->c_contextid;
header->hdr_windx = ompi_comm_get_cid(module->m_comm);
#ifdef WORDS_BIGENDIAN
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
@ -1222,7 +1222,7 @@ ompi_osc_rdma_rdma_ack_send(ompi_osc_rdma_module_t *module,
header->hdr_base.hdr_flags = 0;
header->hdr_value[0] = rdma_btl->num_sent;
header->hdr_value[1] = 0;
header->hdr_windx = module->m_comm->c_contextid;
header->hdr_windx = ompi_comm_get_cid(module->m_comm);
#ifdef WORDS_BIGENDIAN
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;

Просмотреть файл

@ -229,16 +229,10 @@ ompi_osc_rdma_module_start(ompi_group_t *group,
for (i = 0 ; i < ompi_group_size(group) ; i++) {
int comm_rank = -1, j;
/* no need to increment ref count - the communicator isn't
going anywhere while we're here */
ompi_group_t *comm_group = module->m_comm->c_local_group;
/* find the rank in the communicator associated with this windows */
for (j = 0 ;
j < ompi_group_size(comm_group) ;
++j) {
if (module->m_sc_group->grp_proc_pointers[i] ==
comm_group->grp_proc_pointers[j]) {
for (j = 0 ; j < ompi_comm_size(module->m_comm) ; ++j) {
if (ompi_group_peer_lookup(module->m_sc_group, i) ==
ompi_comm_peer_lookup(module->m_comm, j)) {
comm_rank = j;
break;
}
@ -306,7 +300,7 @@ ompi_osc_rdma_module_complete(ompi_win_t *win)
for (j = 0 ; j < module->m_peer_info[comm_rank].peer_num_btls ; ++j) {
if (module->m_peer_info[comm_rank].peer_btls[j].num_sent > 0) {
ret = ompi_osc_rdma_rdma_ack_send(module,
module->m_sc_group->grp_proc_pointers[i],
ompi_group_peer_lookup(module->m_sc_group, i),
&(module->m_peer_info[comm_rank].peer_btls[j]));
if (OPAL_LIKELY(OMPI_SUCCESS == ret)) {
module->m_peer_info[comm_rank].peer_btls[j].num_sent = 0;
@ -318,7 +312,7 @@ ompi_osc_rdma_module_complete(ompi_win_t *win)
}
}
ret = ompi_osc_rdma_control_send(module,
module->m_sc_group->grp_proc_pointers[i],
ompi_group_peer_lookup(module->m_sc_group, i),
OMPI_OSC_RDMA_HDR_COMPLETE,
module->m_copy_num_pending_sendreqs[comm_rank],
0);
@ -384,7 +378,7 @@ ompi_osc_rdma_module_post(ompi_group_t *group,
OPAL_THREAD_LOCK(&(module->m_lock));
assert(NULL == module->m_pw_group);
module->m_pw_group = group;
module->m_pw_group = group;
/* Set our mode to expose w/ post */
ompi_win_remove_mode(win, OMPI_WIN_FENCE);
@ -398,8 +392,8 @@ ompi_osc_rdma_module_post(ompi_group_t *group,
/* send a hello counter to everyone in group */
for (i = 0 ; i < ompi_group_size(module->m_pw_group) ; ++i) {
ompi_osc_rdma_control_send(module,
group->grp_proc_pointers[i],
OMPI_OSC_RDMA_HDR_POST, 1, 0);
ompi_group_peer_lookup(group, i),
OMPI_OSC_RDMA_HDR_POST, 1, 0);
}
return OMPI_SUCCESS;
@ -491,12 +485,12 @@ ompi_osc_rdma_module_lock(int lock_type,
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d sending lock request to %d",
module->m_comm->c_my_rank, target));
ompi_comm_rank(module->m_comm), target));
/* generate a lock request */
ompi_osc_rdma_control_send(module,
proc,
OMPI_OSC_RDMA_HDR_LOCK_REQ,
module->m_comm->c_my_rank,
ompi_comm_rank(module->m_comm),
lock_type);
module->m_eager_send_active = false;
@ -539,13 +533,13 @@ ompi_osc_rdma_module_unlock(int target,
/* send the unlock request */
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d sending unlock request to %d with %d requests",
module->m_comm->c_my_rank, target,
ompi_comm_rank(module->m_comm), target,
out_count));
ompi_osc_rdma_control_send(module,
proc,
OMPI_OSC_RDMA_HDR_UNLOCK_REQ,
module->m_comm->c_my_rank,
out_count);
proc,
OMPI_OSC_RDMA_HDR_UNLOCK_REQ,
ompi_comm_rank(module->m_comm),
out_count);
/* try to start all the requests. We've copied everything we
need out of pending_sendreqs, so don't need the lock
@ -607,7 +601,8 @@ ompi_osc_rdma_passive_lock(ompi_osc_rdma_module_t *module,
} else {
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d queuing lock request from %d (%d)",
module->m_comm->c_my_rank, origin, lock_type));
ompi_comm_rank(module->m_comm),
origin, lock_type));
new_pending = OBJ_NEW(ompi_osc_rdma_pending_lock_t);
new_pending->proc = proc;
new_pending->lock_type = lock_type;
@ -622,7 +617,8 @@ ompi_osc_rdma_passive_lock(ompi_osc_rdma_module_t *module,
} else {
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"queuing lock request from %d (%d) lock_type:%d",
module->m_comm->c_my_rank, origin, lock_type));
ompi_comm_rank(module->m_comm),
origin, lock_type));
new_pending = OBJ_NEW(ompi_osc_rdma_pending_lock_t);
new_pending->proc = proc;
new_pending->lock_type = lock_type;
@ -636,10 +632,10 @@ ompi_osc_rdma_passive_lock(ompi_osc_rdma_module_t *module,
if (send_ack) {
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d sending lock ack to %d",
module->m_comm->c_my_rank, origin));
ompi_comm_rank(module->m_comm), origin));
ompi_osc_rdma_control_send(module, proc,
OMPI_OSC_RDMA_HDR_LOCK_REQ,
module->m_comm->c_my_rank,
ompi_comm_rank(module->m_comm),
OMPI_SUCCESS);
}
@ -741,10 +737,10 @@ ompi_osc_rdma_passive_unlock_complete(ompi_osc_rdma_module_t *module)
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"sending lock request to proc"));
ompi_osc_rdma_control_send(module,
new_pending->proc,
OMPI_OSC_RDMA_HDR_LOCK_REQ,
module->m_comm->c_my_rank,
OMPI_SUCCESS);
new_pending->proc,
OMPI_OSC_RDMA_HDR_LOCK_REQ,
ompi_comm_rank(module->m_comm),
OMPI_SUCCESS);
OBJ_RELEASE(new_pending);
}