Changes to accomodate procid->vpid and to move away from accessing the
vpid directly. Instead, use the ompi_name_server.get_vpid() (and friends) accessor functions from the selected ns module. This fixes compile issues, but still leaves linker issues -- Ralph will be committing a fix for those shortly. This commit was SVN r1592.
Этот коммит содержится в:
родитель
5803b79dcd
Коммит
d1c5e93cd4
@ -15,6 +15,7 @@
|
|||||||
#include "mca/pml/pml.h"
|
#include "mca/pml/pml.h"
|
||||||
#include "mca/coll/coll.h"
|
#include "mca/coll/coll.h"
|
||||||
#include "mca/coll/base/base.h"
|
#include "mca/coll/base/base.h"
|
||||||
|
#include "mca/ns/base/base.h"
|
||||||
|
|
||||||
#include "mca/pml/pml.h"
|
#include "mca/pml/pml.h"
|
||||||
#include "mca/ptl/ptl.h"
|
#include "mca/ptl/ptl.h"
|
||||||
@ -681,6 +682,7 @@ int ompi_comm_determine_first ( ompi_communicator_t *intercomm, int high )
|
|||||||
int flag, rhigh;
|
int flag, rhigh;
|
||||||
int local_rank, rc;
|
int local_rank, rc;
|
||||||
ompi_proc_t *lvpid, *rvpid;
|
ompi_proc_t *lvpid, *rvpid;
|
||||||
|
ompi_ns_cmp_bitmask_t mask;
|
||||||
|
|
||||||
lvpid = intercomm->c_local_group->grp_proc_pointers[0];
|
lvpid = intercomm->c_local_group->grp_proc_pointers[0];
|
||||||
rvpid = intercomm->c_remote_group->grp_proc_pointers[0];
|
rvpid = intercomm->c_remote_group->grp_proc_pointers[0];
|
||||||
@ -689,7 +691,9 @@ int ompi_comm_determine_first ( ompi_communicator_t *intercomm, int high )
|
|||||||
/*
|
/*
|
||||||
* determine maximal high value over the intercomm
|
* determine maximal high value over the intercomm
|
||||||
*/
|
*/
|
||||||
if ( lvpid->proc_name.procid > rvpid->proc_name.procid ) {
|
mask = OMPI_NS_CMP_CELLID | OMPI_NS_CMP_JOBID | OMPI_NS_CMP_VPID;
|
||||||
|
if ( ompi_name_server.compare(mask, &lvpid->proc_name,
|
||||||
|
&rvpid->proc_name) < 0 ) {
|
||||||
if ( 0 == local_rank ) {
|
if ( 0 == local_rank ) {
|
||||||
rc = intercomm->c_coll.coll_bcast(&high, 1, MPI_INT, MPI_ROOT,
|
rc = intercomm->c_coll.coll_bcast(&high, 1, MPI_INT, MPI_ROOT,
|
||||||
intercomm );
|
intercomm );
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
#include "mca/pml/pml.h"
|
#include "mca/pml/pml.h"
|
||||||
#include "mca/coll/coll.h"
|
#include "mca/coll/coll.h"
|
||||||
#include "mca/coll/base/base.h"
|
#include "mca/coll/base/base.h"
|
||||||
|
#include "mca/ns/base/base.h"
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -51,7 +52,7 @@ int ompi_comm_init(void)
|
|||||||
OBJ_CONSTRUCT(&ompi_mpi_comm_world, ompi_communicator_t);
|
OBJ_CONSTRUCT(&ompi_mpi_comm_world, ompi_communicator_t);
|
||||||
group = OBJ_NEW(ompi_group_t);
|
group = OBJ_NEW(ompi_group_t);
|
||||||
group->grp_proc_pointers = ompi_proc_world(&size);
|
group->grp_proc_pointers = ompi_proc_world(&size);
|
||||||
group->grp_my_rank = ompi_proc_local()->proc_name.procid;
|
group->grp_my_rank = ompi_name_server.get_vpid(&(ompi_proc_local()->proc_name));
|
||||||
group->grp_proc_count = size;
|
group->grp_proc_count = size;
|
||||||
group->grp_flags |= OMPI_GROUP_INTRINSIC;
|
group->grp_flags |= OMPI_GROUP_INTRINSIC;
|
||||||
OBJ_RETAIN(group); /* bump reference count for remote reference */
|
OBJ_RETAIN(group); /* bump reference count for remote reference */
|
||||||
|
@ -51,7 +51,7 @@ int mca_oob_base_init(bool *user_threads, bool *hidden_threads)
|
|||||||
/* setup wildcard name */
|
/* setup wildcard name */
|
||||||
mca_oob_base_any.cellid = -1;
|
mca_oob_base_any.cellid = -1;
|
||||||
mca_oob_base_any.jobid = -1;
|
mca_oob_base_any.jobid = -1;
|
||||||
mca_oob_base_any.procid = -1;
|
mca_oob_base_any.vpid = -1;
|
||||||
|
|
||||||
/* Traverse the list of available modules; call their init functions. */
|
/* Traverse the list of available modules; call their init functions. */
|
||||||
for (item = ompi_list_get_first(&mca_oob_base_components);
|
for (item = ompi_list_get_first(&mca_oob_base_components);
|
||||||
|
@ -9,9 +9,10 @@
|
|||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
#include "ompi_config.h"
|
#include "ompi_config.h"
|
||||||
|
|
||||||
|
#include "include/types.h"
|
||||||
#include "mca/oob/oob.h"
|
#include "mca/oob/oob.h"
|
||||||
#include "mca/oob/cofs/src/oob_cofs.h"
|
#include "mca/oob/cofs/src/oob_cofs.h"
|
||||||
#include "include/types.h"
|
#include "mca/ns/base/base.h"
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
@ -45,9 +46,13 @@ int mca_oob_cofs_send(
|
|||||||
|
|
||||||
/* create the file and open it... */
|
/* create the file and open it... */
|
||||||
snprintf(msg_file, OMPI_PATH_MAX, "%s/%d_%d_%d_%ld.msg", mca_oob_cofs_comm_loc,
|
snprintf(msg_file, OMPI_PATH_MAX, "%s/%d_%d_%d_%ld.msg", mca_oob_cofs_comm_loc,
|
||||||
mca_oob_base_self.jobid, mca_oob_base_self.procid, peer->procid, (long)mca_oob_cofs_serial);
|
ompi_name_server.get_jobid(&mca_oob_base_self),
|
||||||
|
ompi_name_server.get_vpid(&mca_oob_base_self),
|
||||||
|
ompi_name_server.get_vpid(peer), (long)mca_oob_cofs_serial);
|
||||||
snprintf(msg_file_tmp, OMPI_PATH_MAX, "%s/.%d_%d_%d_%ld.msg", mca_oob_cofs_comm_loc,
|
snprintf(msg_file_tmp, OMPI_PATH_MAX, "%s/.%d_%d_%d_%ld.msg", mca_oob_cofs_comm_loc,
|
||||||
mca_oob_base_self.jobid, mca_oob_base_self.procid, peer->procid, (long)mca_oob_cofs_serial);
|
ompi_name_server.get_jobid(&mca_oob_base_self),
|
||||||
|
ompi_name_server.get_vpid(&mca_oob_base_self),
|
||||||
|
ompi_name_server.get_vpid(peer), (long)mca_oob_cofs_serial);
|
||||||
|
|
||||||
fp = fopen(msg_file_tmp, "w");
|
fp = fopen(msg_file_tmp, "w");
|
||||||
if (fp == NULL) {
|
if (fp == NULL) {
|
||||||
@ -101,7 +106,8 @@ mca_oob_cofs_recv(ompi_process_name_t* peer, const struct iovec* iov, int count,
|
|||||||
{
|
{
|
||||||
int ret = OMPI_ERR_WOULD_BLOCK;
|
int ret = OMPI_ERR_WOULD_BLOCK;
|
||||||
while (ret == OMPI_ERR_WOULD_BLOCK) {
|
while (ret == OMPI_ERR_WOULD_BLOCK) {
|
||||||
ret = do_recv(peer->jobid, peer->procid, iov, count, flags);
|
ret = do_recv(ompi_name_server.get_jobid(peer),
|
||||||
|
ompi_name_server.get_vpid(peer), iov, count, flags);
|
||||||
sleep(1);
|
sleep(1);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -153,7 +159,7 @@ find_match(ompi_process_id_t jobid, ompi_process_id_t procid)
|
|||||||
if (tmp_jobid != jobid) {
|
if (tmp_jobid != jobid) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (tmp_myprocid != mca_oob_base_self.procid) {
|
if (tmp_myprocid != ompi_name_server.get_vpid(&mca_oob_base_self)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (tmp_procid != procid) {
|
if (tmp_procid != procid) {
|
||||||
|
@ -125,7 +125,7 @@ mca_pcm_cofs_init(int *priority, bool *allow_multi_user_threads,
|
|||||||
for(i=0; i<mca_pcm_cofs_num_procs; i++) {
|
for(i=0; i<mca_pcm_cofs_num_procs; i++) {
|
||||||
mca_pcm_cofs_procs[i].cellid = cellid;
|
mca_pcm_cofs_procs[i].cellid = cellid;
|
||||||
mca_pcm_cofs_procs[i].jobid = jobid;
|
mca_pcm_cofs_procs[i].jobid = jobid;
|
||||||
mca_pcm_cofs_procs[i].procid = i;
|
mca_pcm_cofs_procs[i].vpid = i;
|
||||||
}
|
}
|
||||||
return &mca_pcm_cofs_1_0_0;
|
return &mca_pcm_cofs_1_0_0;
|
||||||
}
|
}
|
||||||
|
@ -10,8 +10,10 @@
|
|||||||
#include <netinet/tcp.h>
|
#include <netinet/tcp.h>
|
||||||
#include <netinet/in.h>
|
#include <netinet/in.h>
|
||||||
#include <arpa/inet.h>
|
#include <arpa/inet.h>
|
||||||
|
|
||||||
#include "include/types.h"
|
#include "include/types.h"
|
||||||
#include "mca/pml/base/pml_base_sendreq.h"
|
#include "mca/pml/base/pml_base_sendreq.h"
|
||||||
|
#include "mca/ns/base/base.h"
|
||||||
#include "ptl_tcp.h"
|
#include "ptl_tcp.h"
|
||||||
#include "ptl_tcp_addr.h"
|
#include "ptl_tcp_addr.h"
|
||||||
#include "ptl_tcp_peer.h"
|
#include "ptl_tcp_peer.h"
|
||||||
@ -237,6 +239,8 @@ bool mca_ptl_tcp_peer_accept(mca_ptl_base_peer_t* ptl_peer, struct sockaddr_in*
|
|||||||
{
|
{
|
||||||
mca_ptl_tcp_addr_t* ptl_addr;
|
mca_ptl_tcp_addr_t* ptl_addr;
|
||||||
mca_ptl_tcp_proc_t* this_proc = mca_ptl_tcp_proc_local();
|
mca_ptl_tcp_proc_t* this_proc = mca_ptl_tcp_proc_local();
|
||||||
|
ompi_ns_cmp_bitmask_t mask = OMPI_NS_CMP_CELLID | OMPI_NS_CMP_JOBID | OMPI_NS_CMP_VPID;
|
||||||
|
|
||||||
OMPI_THREAD_LOCK(&ptl_peer->peer_recv_lock);
|
OMPI_THREAD_LOCK(&ptl_peer->peer_recv_lock);
|
||||||
OMPI_THREAD_LOCK(&ptl_peer->peer_send_lock);
|
OMPI_THREAD_LOCK(&ptl_peer->peer_send_lock);
|
||||||
if((ptl_addr = ptl_peer->peer_addr) != NULL &&
|
if((ptl_addr = ptl_peer->peer_addr) != NULL &&
|
||||||
@ -244,7 +248,9 @@ bool mca_ptl_tcp_peer_accept(mca_ptl_base_peer_t* ptl_peer, struct sockaddr_in*
|
|||||||
mca_ptl_tcp_proc_t *peer_proc = ptl_peer->peer_proc;
|
mca_ptl_tcp_proc_t *peer_proc = ptl_peer->peer_proc;
|
||||||
if((ptl_peer->peer_sd < 0) ||
|
if((ptl_peer->peer_sd < 0) ||
|
||||||
(ptl_peer->peer_state != MCA_PTL_TCP_CONNECTED &&
|
(ptl_peer->peer_state != MCA_PTL_TCP_CONNECTED &&
|
||||||
peer_proc->proc_ompi->proc_name.procid < this_proc->proc_ompi->proc_name.procid)) {
|
ompi_name_server.compare(mask,
|
||||||
|
&peer_proc->proc_ompi->proc_name,
|
||||||
|
&this_proc->proc_ompi->proc_name) < 0)) {
|
||||||
mca_ptl_tcp_peer_close(ptl_peer);
|
mca_ptl_tcp_peer_close(ptl_peer);
|
||||||
ptl_peer->peer_sd = sd;
|
ptl_peer->peer_sd = sd;
|
||||||
if(mca_ptl_tcp_peer_send_connect_ack(ptl_peer) != OMPI_SUCCESS) {
|
if(mca_ptl_tcp_peer_send_connect_ack(ptl_peer) != OMPI_SUCCESS) {
|
||||||
|
@ -1,9 +1,15 @@
|
|||||||
|
/*
|
||||||
|
* $HEADER
|
||||||
|
*/
|
||||||
|
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
#include "threads/mutex.h"
|
#include "threads/mutex.h"
|
||||||
#include "util/output.h"
|
#include "util/output.h"
|
||||||
#include "proc/proc.h"
|
#include "proc/proc.h"
|
||||||
#include "mca/pcm/pcm.h"
|
#include "mca/pcm/pcm.h"
|
||||||
#include "mca/oob/oob.h"
|
#include "mca/oob/oob.h"
|
||||||
|
#include "mca/ns/base/base.h"
|
||||||
|
|
||||||
|
|
||||||
static ompi_list_t ompi_proc_list;
|
static ompi_list_t ompi_proc_list;
|
||||||
@ -138,15 +144,16 @@ ompi_proc_t** ompi_proc_self(size_t* size)
|
|||||||
ompi_proc_t * ompi_proc_find ( const ompi_process_name_t * name )
|
ompi_proc_t * ompi_proc_find ( const ompi_process_name_t * name )
|
||||||
{
|
{
|
||||||
ompi_proc_t *proc;
|
ompi_proc_t *proc;
|
||||||
|
ompi_ns_cmp_bitmask_t mask;
|
||||||
|
|
||||||
/* return the proc-struct which matches this jobid+process id */
|
/* return the proc-struct which matches this jobid+process id */
|
||||||
|
|
||||||
|
mask = OMPI_NS_CMP_CELLID | OMPI_NS_CMP_JOBID | OMPI_NS_CMP_VPID;
|
||||||
OMPI_THREAD_LOCK(&ompi_proc_lock);
|
OMPI_THREAD_LOCK(&ompi_proc_lock);
|
||||||
for(proc = (ompi_proc_t*)ompi_list_get_first(&ompi_proc_list);
|
for(proc = (ompi_proc_t*)ompi_list_get_first(&ompi_proc_list);
|
||||||
proc != (ompi_proc_t*)ompi_list_get_end(&ompi_proc_list);
|
proc != (ompi_proc_t*)ompi_list_get_end(&ompi_proc_list);
|
||||||
proc = (ompi_proc_t*)ompi_list_get_next(proc)) {
|
proc = (ompi_proc_t*)ompi_list_get_next(proc)) {
|
||||||
if( proc->proc_name.cellid == name->cellid &&
|
if (0 == ompi_name_server.compare(mask, &proc->proc_name, name))
|
||||||
proc->proc_name.jobid == name->jobid &&
|
|
||||||
proc->proc_name.procid == name->procid )
|
|
||||||
{
|
{
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
Загрузка…
x
Ссылка в новой задаче
Block a user