1
1

NOTE: this modifies the MPI-RTE interface

We have been getting several requests for new collectives that need to be inserted in various places of the MPI layer, all in support of either checkpoint/restart or various research efforts. Until now, this would require that the collective id's be generated at launch. which required modification
s to ORTE and other places. We chose not to make collectives reusable as the race conditions associated with resetting collective counters are daunti
ng.

This commit extends the collective system to allow self-generation of collective id's that the daemons need to support, thereby allowing developers to request any number of collectives for their work. There is one restriction: RTE collectives must occur at the process level - i.e., we don't curren
tly have a way of tagging the collective to a specific thread. From the comment in the code:

 * In order to allow scalable
 * generation of collective id's, they are formed as:
 *
 * top 32-bits are the jobid of the procs involved in
 * the collective. For collectives across multiple jobs
 * (e.g., in a connect_accept), the daemon jobid will
 * be used as the id will be issued by mpirun. This
 * won't cause problems because daemons don't use the
 * collective_id
 *
 * bottom 32-bits are a rolling counter that recycles
 * when the max is hit. The daemon will cleanup each
 * collective upon completion, so this means a job can
 * never have more than 2**32 collectives going on at
 * a time. If someone needs more than that - they've got
 * a problem.
 *
 * Note that this means (for now) that RTE-level collectives
 * cannot be done by individual threads - they must be
 * done at the overall process level. This is required as
 * there is no guaranteed ordering for the collective id's,
 * and all the participants must agree on the id of the
 * collective they are executing. So if thread A on one
 * process asks for a collective id before thread B does,
 * but B asks before A on another process, the collectives will
 * be mixed and not result in the expected behavior. We may
 * find a way to relax this requirement in the future by
 * adding a thread context id to the jobid field (maybe taking the
 * lower 16-bits of that field).

This commit includes a test program (orte/test/mpi/coll_test.c) that cycles 100 times across barrier and modex collectives.

This commit was SVN r32203.
Этот коммит содержится в:
Ralph Castain 2014-07-10 18:53:12 +00:00
родитель 1b9621eeb0
Коммит a523dba41d
23 изменённых файлов: 255 добавлений и 247 удалений

Просмотреть файл

@ -299,8 +299,9 @@ static int connect_accept(ompi_communicator_t *comm, int root,
}
OPAL_OUTPUT_VERBOSE((1, ompi_dpm_base_framework.framework_output,
"%s dpm:orte:connect_accept working with new collective ids %u %u",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), id[0], id[1]));
"%s dpm:orte:connect_accept working with new collective ids %lu %lu",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
(unsigned long)id[0], (unsigned long)id[1]));
/* Generate the message buffer containing the number of processes and the list of
participating processes */

Просмотреть файл

@ -28,6 +28,7 @@ struct ompi_proc_t;
#include "orte/types.h"
#include "orte/mca/errmgr/errmgr.h"
#include "orte/mca/grpcomm/grpcomm.h"
#include "orte/mca/grpcomm/base/base.h"
#include "orte/mca/rml/base/rml_contact.h"
#include "orte/mca/rml/rml.h"
#include "orte/mca/routed/routed.h"
@ -68,6 +69,7 @@ typedef orte_ns_cmp_bitmask_t ompi_rte_cmp_bitmask_t;
typedef orte_grpcomm_coll_id_t ompi_rte_collective_id_t;
OMPI_DECLSPEC int ompi_rte_modex(ompi_rte_collective_t *coll);
#define ompi_rte_barrier(a) orte_grpcomm.barrier(a)
#define ompi_rte_get_collective_id(a) orte_grpcomm_base_get_coll_id(a)
/* Process info struct and values */
typedef orte_node_rank_t ompi_node_rank_t;

Просмотреть файл

@ -406,4 +406,3 @@ static void recv_callback(int status, orte_process_name_t* sender,
/* release */
opal_mutex_unlock(&mca_rte_orte_component.lock);
}

Просмотреть файл

@ -228,7 +228,7 @@ int ompi_mpi_finalize(void)
https://svn.open-mpi.org/trac/ompi/ticket/4669#comment:4 for
more details). */
coll = OBJ_NEW(ompi_rte_collective_t);
coll->id = ompi_process_info.peer_fini_barrier;
coll->id = ompi_rte_get_collective_id(OMPI_PROC_MY_NAME);
coll->active = true;
if (OMPI_SUCCESS != (ret = ompi_rte_barrier(coll))) {
OMPI_ERROR_LOG(ret);

Просмотреть файл

@ -628,7 +628,7 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
* as it will not return until the exchange is complete
*/
coll = OBJ_NEW(ompi_rte_collective_t);
coll->id = ompi_process_info.peer_modex;
coll->id = ompi_rte_get_collective_id(OMPI_PROC_MY_NAME);
coll->active = true;
if (OMPI_SUCCESS != (ret = ompi_rte_modex(coll))) {
error = "rte_modex failed";
@ -817,7 +817,7 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
/* wait for everyone to reach this point */
coll = OBJ_NEW(ompi_rte_collective_t);
coll->id = ompi_process_info.peer_init_barrier;
coll->id = ompi_rte_get_collective_id(OMPI_PROC_MY_NAME);
coll->active = true;
if (OMPI_SUCCESS != (ret = ompi_rte_barrier(coll))) {
error = "rte_barrier failed";

2
orte/mca/ess/env/ess_env_module.c поставляемый
Просмотреть файл

@ -174,7 +174,7 @@ static int rte_init(void)
if (ORTE_PROC_IS_NON_MPI && !orte_do_not_barrier) {
orte_grpcomm_collective_t coll;
OBJ_CONSTRUCT(&coll, orte_grpcomm_collective_t);
coll.id = orte_process_info.peer_modex;
coll.id = orte_grpcomm_base_get_coll_id(ORTE_PROC_MY_NAME);
coll.active = true;
if (ORTE_SUCCESS != (ret = orte_grpcomm.modex(&coll))) {
ORTE_ERROR_LOG(ret);

Просмотреть файл

@ -194,11 +194,6 @@ static int rte_init(void)
return rc;
}
/* set the collective ids */
orte_process_info.peer_modex = 0;
orte_process_info.peer_init_barrier = 1;
orte_process_info.peer_fini_barrier = 2;
/* to the best of our knowledge, we are alone */
orte_process_info.my_node_rank = 0;
orte_process_info.my_local_rank = 0;

Просмотреть файл

@ -210,9 +210,9 @@ static void process_barrier(int fd, short args, void *cbdata)
found = false;
OPAL_LIST_FOREACH(cptr, &orte_grpcomm_base.active_colls, orte_grpcomm_collective_t) {
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_framework.framework_output,
"%s CHECKING COLL id %d",
"%s CHECKING COLL id %lu",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
cptr->id));
(unsigned long)cptr->id));
if (coll->id == cptr->id) {
found = true;
@ -231,9 +231,9 @@ static void process_barrier(int fd, short args, void *cbdata)
* the barrier object
*/
OPAL_OUTPUT_VERBOSE((1, orte_grpcomm_base_framework.framework_output,
"%s grpcomm:bad collective %d already exists - removing prior copy",
"%s grpcomm:bad collective %lu already exists - removing prior copy",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
(int)coll->id));
(unsigned long)coll->id));
while (NULL != (item = opal_list_remove_first(&cptr->targets))) {
opal_list_append(&coll->targets, item);
}
@ -241,9 +241,9 @@ static void process_barrier(int fd, short args, void *cbdata)
OBJ_RELEASE(cptr);
}
OPAL_OUTPUT_VERBOSE((1, orte_grpcomm_base_framework.framework_output,
"%s grpcomm:bad adding collective %d with %d participants to global list",
"%s grpcomm:bad adding collective %lu with %d participants to global list",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
(int)coll->id, (int)opal_list_get_size(&coll->participants)));
(unsigned long)coll->id, (int)opal_list_get_size(&coll->participants)));
/* now add the barrier to the global list of active collectives */
opal_list_append(&orte_grpcomm_base.active_colls, &coll->super);
@ -262,9 +262,9 @@ static void process_barrier(int fd, short args, void *cbdata)
buf = OBJ_NEW(opal_buffer_t);
opal_dss.copy_payload(buf, &coll->buffer);
OPAL_OUTPUT_VERBOSE((1, orte_grpcomm_base_framework.framework_output,
"%s grpcomm:bad sending collective %d to %s",
"%s grpcomm:bad sending collective %lu to %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
(int)coll->id,
(unsigned long)coll->id,
ORTE_NAME_PRINT(&nm->name)));
if (0 > (rc = orte_rml.send_buffer_nb(&nm->name, buf,
ORTE_RML_TAG_COLLECTIVE,
@ -302,7 +302,6 @@ static void process_allgather(int fd, short args, void *cbdata)
int rc;
opal_buffer_t *buf;
orte_namelist_t *nm;
opal_list_item_t *item;
OBJ_RELEASE(caddy);
@ -344,9 +343,9 @@ static void process_allgather(int fd, short args, void *cbdata)
gather, ORTE_GRPCOMM_INTERNAL_STG_APP);
OPAL_OUTPUT_VERBOSE((1, orte_grpcomm_base_framework.framework_output,
"%s grpcomm:bad sending collective %d to our daemon",
"%s grpcomm:bad sending collective %lu to our daemon",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
(int)gather->id));
(unsigned long)gather->id));
/* send to our daemon */
if (0 > (rc = orte_rml.send_buffer_nb(ORTE_PROC_MY_DAEMON, buf,
ORTE_RML_TAG_COLLECTIVE,
@ -361,16 +360,13 @@ static void process_allgather(int fd, short args, void *cbdata)
* include ourselves, which is fine as it will aid in
* determining the collective is complete
*/
for (item = opal_list_get_first(&gather->participants);
item != opal_list_get_end(&gather->participants);
item = opal_list_get_next(item)) {
nm = (orte_namelist_t*)item;
OPAL_LIST_FOREACH(nm, &gather->participants, orte_namelist_t) {
buf = OBJ_NEW(opal_buffer_t);
opal_dss.copy_payload(buf, &gather->buffer);
OPAL_OUTPUT_VERBOSE((1, orte_grpcomm_base_framework.framework_output,
"%s grpcomm:bad sending collective %d to %s",
"%s grpcomm:bad sending collective %lu to %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
(int)gather->id,
(unsigned long)gather->id,
ORTE_NAME_PRINT(&nm->name)));
if (0 > (rc = orte_rml.send_buffer_nb(&nm->name, buf,
ORTE_RML_TAG_COLLECTIVE,

Просмотреть файл

@ -83,9 +83,9 @@ OBJ_CLASS_DECLARATION(orte_grpcomm_caddy_t);
do { \
orte_grpcomm_caddy_t *caddy; \
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_framework.framework_output, \
"%s ACTIVATING GRCPCOMM OP %d at %s:%d", \
"%s ACTIVATING GRCPCOMM OP %lu at %s:%d", \
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
(o)->id, __FILE__, __LINE__)); \
(unsigned long)(o)->id, __FILE__, __LINE__)); \
caddy = OBJ_NEW(orte_grpcomm_caddy_t); \
caddy->op = (o); \
opal_event_set(orte_event_base, &caddy->ev, -1, \
@ -96,9 +96,9 @@ OBJ_CLASS_DECLARATION(orte_grpcomm_caddy_t);
ORTE_DECLSPEC extern orte_grpcomm_base_t orte_grpcomm_base;
ORTE_DECLSPEC orte_grpcomm_collective_t* orte_grpcomm_base_setup_collective(orte_grpcomm_coll_id_t id);
ORTE_DECLSPEC orte_grpcomm_collective_t* orte_grpcomm_base_setup_collective(orte_jobid_t jobid, orte_grpcomm_coll_id_t id);
ORTE_DECLSPEC void orte_grpcomm_base_progress_collectives(void);
ORTE_DECLSPEC orte_grpcomm_coll_id_t orte_grpcomm_base_get_coll_id(void);
ORTE_DECLSPEC orte_grpcomm_coll_id_t orte_grpcomm_base_get_coll_id(orte_process_name_t *nm);
ORTE_DECLSPEC void orte_grpcomm_base_pack_collective(opal_buffer_t *relay,
orte_jobid_t jobid,
orte_grpcomm_collective_t *coll,

Просмотреть файл

@ -93,16 +93,13 @@ MCA_BASE_FRAMEWORK_DECLARE(orte, grpcomm, NULL, NULL, orte_grpcomm_base_open, or
mca_grpcomm_base_static_components, 0);
orte_grpcomm_collective_t* orte_grpcomm_base_setup_collective(orte_grpcomm_coll_id_t id)
orte_grpcomm_collective_t* orte_grpcomm_base_setup_collective(orte_jobid_t jobid, orte_grpcomm_coll_id_t id)
{
opal_list_item_t *item;
orte_grpcomm_collective_t *cptr, *coll;
orte_namelist_t *nm;
coll = NULL;
for (item = opal_list_get_first(&orte_grpcomm_base.active_colls);
item != opal_list_get_end(&orte_grpcomm_base.active_colls);
item = opal_list_get_next(item)) {
cptr = (orte_grpcomm_collective_t*)item;
OPAL_LIST_FOREACH(cptr, &orte_grpcomm_base.active_colls, orte_grpcomm_collective_t) {
if (id == cptr->id) {
coll = cptr;
break;
@ -112,6 +109,11 @@ orte_grpcomm_collective_t* orte_grpcomm_base_setup_collective(orte_grpcomm_coll_
coll = OBJ_NEW(orte_grpcomm_collective_t);
coll->id = id;
opal_list_append(&orte_grpcomm_base.active_colls, &coll->super);
/* need to add the vpid name to the participants */
nm = OBJ_NEW(orte_namelist_t);
nm->name.jobid = jobid;
nm->name.vpid = ORTE_VPID_WILDCARD;
opal_list_append(&coll->participants, &nm->super);
}
return coll;
@ -120,7 +122,7 @@ orte_grpcomm_collective_t* orte_grpcomm_base_setup_collective(orte_grpcomm_coll_
/* local objects */
static void collective_constructor(orte_grpcomm_collective_t *ptr)
{
ptr->id = -1;
ptr->id = ORTE_GRPCOMM_COLL_ID_INVALID;
ptr->active = false;
ptr->num_local_recvd = 0;
OBJ_CONSTRUCT(&ptr->local_bucket, opal_buffer_t);

Просмотреть файл

@ -38,27 +38,66 @@
#include "opal/mca/dstore/dstore.h"
#include "opal/mca/hwloc/base/base.h"
#include "orte/util/proc_info.h"
#include "orte/mca/errmgr/errmgr.h"
#include "orte/mca/ess/ess.h"
#include "orte/mca/rml/rml.h"
#include "orte/runtime/orte_globals.h"
#include "orte/util/attr.h"
#include "orte/util/name_fns.h"
#include "orte/util/nidmap.h"
#include "orte/util/proc_info.h"
#include "orte/orted/orted.h"
#include "orte/runtime/orte_globals.h"
#include "orte/runtime/orte_wait.h"
#include "orte/mca/grpcomm/base/base.h"
#include "orte/mca/grpcomm/grpcomm.h"
orte_grpcomm_coll_id_t orte_grpcomm_base_get_coll_id(void)
orte_grpcomm_coll_id_t orte_grpcomm_base_get_coll_id(orte_process_name_t *nm)
{
orte_grpcomm_coll_id_t id;
opal_list_t myvals;
opal_value_t *kv;
uint64_t n;
OBJ_CONSTRUCT(&myvals, opal_list_t);
if (ORTE_SUCCESS != opal_dstore.fetch(opal_dstore_internal,
(opal_identifier_t*)nm,
ORTE_DB_COLL_ID_CNTR, &myvals)) {
/* start the counter */
kv = OBJ_NEW(opal_value_t);
kv->key = strdup(ORTE_DB_COLL_ID_CNTR);
kv->type = OPAL_UINT32;
kv->data.uint32 = 0;
opal_list_append(&myvals, &kv->super);
}
kv = (opal_value_t*)opal_list_get_first(&myvals);
OPAL_OUTPUT_VERBOSE((1, orte_grpcomm_base_framework.framework_output,
"%s CURRENT COLL ID COUNTER %u FOR PROC %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
kv->data.uint32, ORTE_NAME_PRINT(nm)));
/* construct the next collective id for this job */
id = kv->data.uint32;
n = (uint64_t)nm->jobid << 32;
id |= (n & 0xffffffff00000000);
OPAL_OUTPUT_VERBOSE((1, orte_grpcomm_base_framework.framework_output,
"%s ASSIGNED COLL ID %lu",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
(unsigned long)id));
/* assign the next collective id */
id = orte_grpcomm_base.coll_id;
/* rotate to the next value */
orte_grpcomm_base.coll_id++;
kv->data.uint32++;
if (UINT32_MAX == kv->data.uint32) {
/* need to rotate around */
kv->data.uint32 = 0;
}
if (ORTE_SUCCESS != opal_dstore.store(opal_dstore_internal,
(opal_identifier_t*)nm, kv)) {
OPAL_LIST_DESTRUCT(&myvals);
return ORTE_GRPCOMM_COLL_ID_INVALID;
}
OPAL_LIST_DESTRUCT(&myvals);
return id;
}
@ -135,9 +174,9 @@ void orte_grpcomm_base_modex(int fd, short args, void *cbdata)
item = opal_list_get_next(item)) {
cptr = (orte_grpcomm_collective_t*)item;
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_framework.framework_output,
"%s CHECKING COLL id %d",
"%s CHECKING COLL id %lu",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
cptr->id));
(unsigned long)cptr->id));
if (modex->id == cptr->id) {
found = true;

Просмотреть файл

@ -151,9 +151,13 @@ static void coll_id_req(int status, orte_process_name_t* sender,
/* assume one id was requested */
num = 1;
}
/* coll id requests are for multi-job collectives, so we
* assign a collective id from the DAEMON job as not all
* procs in the participating jobs are required to participate */
id = (orte_grpcomm_coll_id_t*)malloc(num * sizeof(orte_grpcomm_coll_id_t));
for (n=0; n < num; n++) {
id[n] = orte_grpcomm_base_get_coll_id();
id[n] = orte_grpcomm_base_get_coll_id(ORTE_PROC_MY_NAME);
}
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_framework.framework_output,
@ -197,8 +201,8 @@ static void app_recv(int status, orte_process_name_t* sender,
}
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_framework.framework_output,
"%s grpcomm:base:receive processing collective return for id %d recvd from %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), id, ORTE_NAME_PRINT(sender)));
"%s grpcomm:base:receive processing collective return for id %lu recvd from %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), (unsigned long)id, ORTE_NAME_PRINT(sender)));
/* if the sender is my daemon, then this collective is
* a global one and is complete
@ -211,9 +215,9 @@ static void app_recv(int status, orte_process_name_t* sender,
item = opal_list_get_next(item)) {
coll = (orte_grpcomm_collective_t*)item;
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_framework.framework_output,
"%s CHECKING COLL id %d",
"%s CHECKING COLL id %lu",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
coll->id));
(unsigned long)coll->id));
if (id == coll->id) {
/* see if the collective needs another step */
@ -260,9 +264,9 @@ static void app_recv(int status, orte_process_name_t* sender,
item = opal_list_get_next(item)) {
cptr = (orte_grpcomm_collective_t*)item;
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_framework.framework_output,
"%s CHECKING COLL id %d",
"%s CHECKING COLL id %lu",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
cptr->id));
(unsigned long)cptr->id));
if (id == cptr->id) {
/* aha - we do have it */
@ -453,21 +457,23 @@ static void daemon_local_recv(int status, orte_process_name_t* sender,
}
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_framework.framework_output,
"%s WORKING COLLECTIVE %d",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), id));
"%s WORKING COLLECTIVE %lu",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
(unsigned long)id));
/* setup the collective for this id - if it's already present,
* then this will just return the existing structure
*/
coll = orte_grpcomm_base_setup_collective(id);
coll = orte_grpcomm_base_setup_collective(sender->jobid, id);
/* record this proc's participation and its data */
coll->num_local_recvd++;
opal_dss.copy_payload(&coll->local_bucket, buffer);
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_framework.framework_output,
"%s PROGRESSING COLLECTIVE %d",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), id));
"%s PROGRESSING COLLECTIVE %lu",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
(unsigned long)id));
orte_grpcomm_base_progress_collectives();
}
@ -514,15 +520,15 @@ void orte_grpcomm_base_progress_collectives(void)
while (item != opal_list_get_end(&orte_grpcomm_base.active_colls)) {
coll = (orte_grpcomm_collective_t*)item;
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_framework.framework_output,
"%s PROGRESSING COLL id %d",
"%s PROGRESSING COLL id %lu",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
coll->id));
(unsigned long)coll->id));
/* if this collective is already locally complete, then ignore it */
if (coll->locally_complete) {
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_framework.framework_output,
"%s COLL %d IS LOCALLY COMPLETE",
"%s COLL %lu IS LOCALLY COMPLETE",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
coll->id));
(unsigned long)coll->id));
goto next_coll;
}
/* get the jobid of the participants in this collective */
@ -536,9 +542,9 @@ void orte_grpcomm_base_progress_collectives(void)
* this collective
*/
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_framework.framework_output,
"%s COLL %d JOBID %s NOT FOUND",
"%s COLL %lu JOBID %s NOT FOUND",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
coll->id, ORTE_JOBID_PRINT(nm->name.jobid)));
(unsigned long)coll->id, ORTE_JOBID_PRINT(nm->name.jobid)));
goto next_coll;
}
/* all local procs from this job are required to participate */
@ -550,8 +556,9 @@ void orte_grpcomm_base_progress_collectives(void)
/* see if all reqd participants are done */
if (jdata->num_local_procs == coll->num_local_recvd) {
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_framework.framework_output,
"%s COLLECTIVE %d LOCALLY COMPLETE - SENDING TO GLOBAL COLLECTIVE",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), coll->id));
"%s COLLECTIVE %lu LOCALLY COMPLETE - SENDING TO GLOBAL COLLECTIVE",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
(unsigned long)coll->id));
/* mark it as locally complete */
coll->locally_complete = true;
/* pack the collective */
@ -601,13 +608,14 @@ static void daemon_coll_recv(int status, orte_process_name_t* sender,
}
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_framework.framework_output,
"%s grpcomm:base:daemon_coll: WORKING COLLECTIVE %d",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), id));
"%s grpcomm:base:daemon_coll: WORKING COLLECTIVE %lu",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
(unsigned long)id));
/* setup the collective for this id - if it's already present,
* then this will just return the existing structure
*/
coll = orte_grpcomm_base_setup_collective(id);
coll = orte_grpcomm_base_setup_collective(sender->jobid, id);
/* record that we received a bucket */
coll->num_peer_buckets++;

Просмотреть файл

@ -11,6 +11,7 @@
* All rights reserved.
* Copyright (c) 2011-2012 Los Alamos National Security, LLC.
* All rights reserved.
* Copyright (c) 2014 Intel, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -49,9 +50,39 @@ BEGIN_C_DECLS
*/
typedef void (*orte_grpcomm_collective_cbfunc_t)(opal_buffer_t *data, void *cbdata);
typedef int32_t orte_grpcomm_coll_id_t;
#define ORTE_GRPCOMM_COLL_ID_T OPAL_INT32
#define ORTE_GRPCOMM_COLL_ID_REQ -1
/* define a collective_id_t. In order to allow scalable
* generation of collective id's, they are formed as:
*
* top 32-bits are the jobid of the procs involved in
* the collective. For collectives across multiple jobs
* (e.g., in a connect_accept), the daemon jobid will
* be used as the id will be issued by mpirun. This
* won't cause problems because daemons don't use the
* collective_id
*
* bottom 32-bits are a rolling counter that recycles
* when the max is hit. The daemon will cleanup each
* collective upon completion, so this means a job can
* never have more than 2**32 collectives going on at
* a time. If someone needs more than that - they've got
* a problem.
*
* Note that this means (for now) that RTE-level collectives
* cannot be done by individual threads - they must be
* done at the overall process level. This is required as
* there is no guaranteed ordering for the collective id's,
* and all the participants must agree on the id of the
* collective they are executing. So if thread A on one
* process asks for a collective id before thread B does,
* but B asks before A on another process, the collectives will
* be mixed and not result in the expected behavior. We may
* find a way to relax this requirement in the future by
* adding a thread context id to the jobid field (maybe taking the
* lower 16-bits of that field).
*/
typedef uint64_t orte_grpcomm_coll_id_t;
#define ORTE_GRPCOMM_COLL_ID_T OPAL_UINT64
#define ORTE_GRPCOMM_COLL_ID_INVALID UINT64_MAX
typedef int8_t orte_grpcomm_coll_t;
#define ORTE_GRPCOMM_XCAST 1

Просмотреть файл

@ -192,11 +192,8 @@ int orte_odls_base_default_construct_child_list(opal_buffer_t *data,
int8_t flag;
int32_t n;
orte_proc_t *pptr, *dmn;
orte_grpcomm_collective_t *coll;
orte_namelist_t *nm;
opal_buffer_t *bptr;
orte_app_context_t *app;
orte_grpcomm_coll_id_t gid, *gidptr;
OPAL_OUTPUT_VERBOSE((5, orte_odls_base_framework.framework_output,
"%s odls:constructing child list",
@ -381,49 +378,22 @@ int orte_odls_base_default_construct_child_list(opal_buffer_t *data,
}
COMPLETE:
/* create the collectives so the job doesn't stall */
gidptr = &gid;
if (orte_get_attribute(&jdata->attributes, ORTE_JOB_PEER_MODX_ID,
(void**)&gidptr, ORTE_GRPCOMM_COLL_ID_T)) {
coll = orte_grpcomm_base_setup_collective(*gidptr);
nm = OBJ_NEW(orte_namelist_t);
nm->name.jobid = jdata->jobid;
nm->name.vpid = ORTE_VPID_WILDCARD;
opal_list_append(&coll->participants, &nm->super);
}
if (orte_get_attribute(&jdata->attributes, ORTE_JOB_INIT_BAR_ID,
(void**)&gidptr, ORTE_GRPCOMM_COLL_ID_T)) {
coll = orte_grpcomm_base_setup_collective(*gidptr);
nm = OBJ_NEW(orte_namelist_t);
nm->name.jobid = jdata->jobid;
nm->name.vpid = ORTE_VPID_WILDCARD;
opal_list_append(&coll->participants, &nm->super);
}
if (orte_get_attribute(&jdata->attributes, ORTE_JOB_FINI_BAR_ID,
(void**)&gidptr, ORTE_GRPCOMM_COLL_ID_T)) {
coll = orte_grpcomm_base_setup_collective(*gidptr);
nm = OBJ_NEW(orte_namelist_t);
nm->name.jobid = jdata->jobid;
nm->name.vpid = ORTE_VPID_WILDCARD;
opal_list_append(&coll->participants, &nm->super);
}
if (orte_get_attribute(&jdata->attributes, ORTE_JOB_SNAPC_INIT_BAR,
(void**)&gidptr, ORTE_GRPCOMM_COLL_ID_T)) {
coll = orte_grpcomm_base_setup_collective(*gidptr);
nm = OBJ_NEW(orte_namelist_t);
nm->name.jobid = jdata->jobid;
nm->name.vpid = ORTE_VPID_WILDCARD;
opal_list_append(&coll->participants, &nm->super);
}
if (orte_get_attribute(&jdata->attributes, ORTE_JOB_SNAPC_FINI_BAR,
(void**)&gidptr, ORTE_GRPCOMM_COLL_ID_T)) {
coll = orte_grpcomm_base_setup_collective(*gidptr);
nm = OBJ_NEW(orte_namelist_t);
nm->name.jobid = jdata->jobid;
nm->name.vpid = ORTE_VPID_WILDCARD;
opal_list_append(&coll->participants, &nm->super);
#if OPAL_ENABLE_FT_CR == 1
{
orte_grpcomm_coll_id_t gid, *gidptr;
orte_grpcomm_collective_t *coll;
/* create the collectives so the job doesn't stall */
gidptr = &gid;
if (orte_get_attribute(&jdata->attributes, ORTE_JOB_SNAPC_INIT_BAR,
(void**)&gidptr, ORTE_GRPCOMM_COLL_ID_T)) {
coll = orte_grpcomm_base_setup_collective(jdata->jobid, *gidptr);
}
if (orte_get_attribute(&jdata->attributes, ORTE_JOB_SNAPC_FINI_BAR,
(void**)&gidptr, ORTE_GRPCOMM_COLL_ID_T)) {
coll = orte_grpcomm_base_setup_collective(jdata->jobid, *gidptr);
}
}
#endif
/* progress any pending collectives */
orte_grpcomm_base_progress_collectives();
@ -464,34 +434,10 @@ static int odls_base_default_setup_fork(orte_job_t *jdata,
/* add any collective id info to the app's environ */
gidptr = &gid;
if (orte_get_attribute(&jdata->attributes, ORTE_JOB_PEER_MODX_ID,
(void**)&gidptr, ORTE_GRPCOMM_COLL_ID_T)) {
(void) mca_base_var_env_name ("orte_peer_modex_id", &param);
asprintf(&param2, "%d", *gidptr);
opal_setenv(param, param2, true, environ_copy);
free(param);
free(param2);
}
if (orte_get_attribute(&jdata->attributes, ORTE_JOB_INIT_BAR_ID,
(void**)&gidptr, ORTE_GRPCOMM_COLL_ID_T)) {
(void) mca_base_var_env_name ("orte_peer_init_barrier_id", &param);
asprintf(&param2, "%d", *gidptr);
opal_setenv(param, param2, true, environ_copy);
free(param);
free(param2);
}
if (orte_get_attribute(&jdata->attributes, ORTE_JOB_FINI_BAR_ID,
(void**)&gidptr, ORTE_GRPCOMM_COLL_ID_T)) {
(void) mca_base_var_env_name ("orte_peer_fini_barrier_id", &param);
asprintf(&param2, "%d", *gidptr);
opal_setenv(param, param2, true, environ_copy);
free(param);
free(param2);
}
if (orte_get_attribute(&jdata->attributes, ORTE_JOB_SNAPC_INIT_BAR,
(void**)&gidptr, ORTE_GRPCOMM_COLL_ID_T)) {
(void) mca_base_var_env_name ("orte_snapc_init_barrier_id", &param);
asprintf(&param2, "%d", *gidptr);
asprintf(&param2, "%lu", (unsigned long)gid);
opal_setenv(param, param2, true, environ_copy);
free(param);
free(param2);
@ -499,7 +445,7 @@ static int odls_base_default_setup_fork(orte_job_t *jdata,
if (orte_get_attribute(&jdata->attributes, ORTE_JOB_SNAPC_FINI_BAR,
(void**)&gidptr, ORTE_GRPCOMM_COLL_ID_T)) {
(void) mca_base_var_env_name ("orte_snapc_fini_barrier_id", &param);
asprintf(&param2, "%d", *gidptr);
asprintf(&param2, "%lu", (unsigned long)gid);
opal_setenv(param, param2, true, environ_copy);
free(param);
free(param2);

Просмотреть файл

@ -236,7 +236,6 @@ void orte_plm_base_setup_job(int fd, short args, void *cbdata)
int i;
orte_app_context_t *app;
orte_state_caddy_t *caddy = (orte_state_caddy_t*)cbdata;
orte_grpcomm_coll_id_t id;
OPAL_OUTPUT_VERBOSE((5, orte_plm_base_framework.framework_output,
"%s plm:base:setup_job",
@ -271,20 +270,16 @@ void orte_plm_base_setup_job(int fd, short args, void *cbdata)
ORTE_FLAG_SET(caddy->jdata, ORTE_JOB_FLAG_RECOVERABLE);
}
/* get collective ids for the std MPI operations */
id = orte_grpcomm_base_get_coll_id();
orte_set_attribute(&caddy->jdata->attributes, ORTE_JOB_PEER_MODX_ID, ORTE_ATTR_GLOBAL, &id, ORTE_GRPCOMM_COLL_ID_T);
id = orte_grpcomm_base_get_coll_id();
orte_set_attribute(&caddy->jdata->attributes, ORTE_JOB_INIT_BAR_ID, ORTE_ATTR_GLOBAL, &id, ORTE_GRPCOMM_COLL_ID_T);
id = orte_grpcomm_base_get_coll_id();
orte_set_attribute(&caddy->jdata->attributes, ORTE_JOB_FINI_BAR_ID, ORTE_ATTR_GLOBAL, &id, ORTE_GRPCOMM_COLL_ID_T);
#if OPAL_ENABLE_FT_CR == 1
id = orte_grpcomm_base_get_coll_id();
orte_set_attribute(&caddy->jdata->attributes, ORTE_JOB_SNAPC_INIT_BAR, ORTE_ATTR_GLOBAL, &id, ORTE_GRPCOMM_COLL_ID_T);
id = orte_grpcomm_base_get_coll_id();
orte_set_attribute(&caddy->jdata->attributes, ORTE_JOB_SNAPC_FINI_BAR, ORTE_ATTR_GLOBAL, &id, ORTE_GRPCOMM_COLL_ID_T);
{
orte_grpcomm_coll_id_t id;
/* Adrian - I'm not sure if these need to be coordinated by the HNP, or
* can be generated by each proc */
id = orte_grpcomm_base_get_coll_id(ORTE_PROC_MY_NAME);
orte_set_attribute(&caddy->jdata->attributes, ORTE_JOB_SNAPC_INIT_BAR, ORTE_ATTR_GLOBAL, &id, ORTE_GRPCOMM_COLL_ID_T);
id = orte_grpcomm_base_get_coll_id(ORTE_PROC_MY_NAME);
orte_set_attribute(&caddy->jdata->attributes, ORTE_JOB_SNAPC_FINI_BAR, ORTE_ATTR_GLOBAL, &id, ORTE_GRPCOMM_COLL_ID_T);
}
#endif
/* if app recovery is not defined, set apps to defaults */

Просмотреть файл

@ -529,9 +529,6 @@ int orte_daemon(int argc, char *argv[])
orte_app_context_t *app;
char *tmp, *nptr, *sysinfo;
int32_t ljob;
orte_grpcomm_collective_t *coll;
orte_namelist_t *nm;
orte_grpcomm_coll_id_t id;
/* setup the singleton's job */
jdata = OBJ_NEW(orte_job_t);
@ -585,47 +582,19 @@ int orte_daemon(int argc, char *argv[])
proc->app_idx = 0;
ORTE_FLAG_SET(proc, ORTE_PROC_FLAG_LOCAL);
/* account for the collectives in its modex/barriers */
id = orte_grpcomm_base_get_coll_id();
orte_set_attribute(&jdata->attributes, ORTE_JOB_PEER_MODX_ID, ORTE_ATTR_GLOBAL, &id, ORTE_GRPCOMM_COLL_ID_T);
coll = orte_grpcomm_base_setup_collective(id);
nm = OBJ_NEW(orte_namelist_t);
nm->name.jobid = jdata->jobid;
nm->name.vpid = ORTE_VPID_WILDCARD;
opal_list_append(&coll->participants, &nm->super);
id = orte_grpcomm_base_get_coll_id();
orte_set_attribute(&jdata->attributes, ORTE_JOB_INIT_BAR_ID, ORTE_ATTR_GLOBAL, &id, ORTE_GRPCOMM_COLL_ID_T);
coll = orte_grpcomm_base_setup_collective(id);
nm = OBJ_NEW(orte_namelist_t);
nm->name.jobid = jdata->jobid;
nm->name.vpid = ORTE_VPID_WILDCARD;
opal_list_append(&coll->participants, &nm->super);
id = orte_grpcomm_base_get_coll_id();
orte_set_attribute(&jdata->attributes, ORTE_JOB_FINI_BAR_ID, ORTE_ATTR_GLOBAL, &id, ORTE_GRPCOMM_COLL_ID_T);
coll = orte_grpcomm_base_setup_collective(id);
nm = OBJ_NEW(orte_namelist_t);
nm->name.jobid = jdata->jobid;
nm->name.vpid = ORTE_VPID_WILDCARD;
opal_list_append(&coll->participants, &nm->super);
/* account for the collectives */
#if OPAL_ENABLE_FT_CR == 1
id = orte_grpcomm_base_get_coll_id();
orte_set_attribute(&jdata->attributes, ORTE_JOB_SNAPC_INIT_BAR, ORTE_ATTR_GLOBAL, &id, ORTE_GRPCOMM_COLL_ID_T);
coll = orte_grpcomm_base_setup_collective(id);
nm = OBJ_NEW(orte_namelist_t);
nm->name.jobid = jdata->jobid;
nm->name.vpid = ORTE_VPID_WILDCARD;
opal_list_append(&coll->participants, &nm->super);
{
orte_grpcomm_coll_id_t id;
orte_grpcomm_collective_t *coll;
id = orte_grpcomm_base_get_coll_id(ORTE_PROC_MY_NAME);
orte_set_attribute(&jdata->attributes, ORTE_JOB_SNAPC_INIT_BAR, ORTE_ATTR_GLOBAL, &id, ORTE_GRPCOMM_COLL_ID_T);
coll = orte_grpcomm_base_setup_collective(jdata->jobid, id);
id = orte_grpcomm_base_get_coll_id();
orte_set_attribute(&jdata->attributes, ORTE_JOB_SNAPC_FINI_BAR, ORTE_ATTR_GLOBAL, &id, ORTE_GRPCOMM_COLL_ID_T);
coll = orte_grpcomm_base_setup_collective(id);
nm = OBJ_NEW(orte_namelist_t);
nm->name.jobid = jdata->jobid;
nm->name.vpid = ORTE_VPID_WILDCARD;
opal_list_append(&coll->participants, &nm->super);
id = orte_grpcomm_base_get_coll_id(ORTE_PROC_MY_NAME);
orte_set_attribute(&jdata->attributes, ORTE_JOB_SNAPC_FINI_BAR, ORTE_ATTR_GLOBAL, &id, ORTE_GRPCOMM_COLL_ID_T);
coll = orte_grpcomm_base_setup_collective(jdata->jobid, id);
}
#endif
/* create a string that contains our uri + sysinfo */

Просмотреть файл

@ -139,6 +139,7 @@ ORTE_DECLSPEC extern int orte_exit_status;
#define ORTE_DB_RMLURI "orte.rmluri"
#define ORTE_DB_HOSTID "orte.hostid"
#define ORTE_DB_GLOBAL_RANK "orte.global.rank"
#define ORTE_DB_COLL_ID_CNTR "orte.coll.id.cntr"
/* State Machine lists */
ORTE_DECLSPEC extern opal_list_t orte_job_states;

Просмотреть файл

@ -1,4 +1,4 @@
PROGS = mpi_no_op mpi_barrier hello hello_nodename abort multi_abort simple_spawn concurrent_spawn spawn_multiple mpi_spin delayed_abort loop_spawn loop_child bad_exit pubsub hello_barrier segv accept connect hello_output hello_show_help crisscross read_write ziatest slave reduce-hang ziaprobe ziatest bcast_loop parallel_w8 parallel_w64 parallel_r8 parallel_r64 sio sendrecv_blaster early_abort debugger singleton_client_server intercomm_create spawn_tree init-exit77 mpi_info info_spawn server client paccept pconnect
PROGS = mpi_no_op mpi_barrier hello hello_nodename abort multi_abort simple_spawn concurrent_spawn spawn_multiple mpi_spin delayed_abort loop_spawn loop_child bad_exit pubsub hello_barrier segv accept connect hello_output hello_show_help crisscross read_write ziatest slave reduce-hang ziaprobe ziatest bcast_loop parallel_w8 parallel_w64 parallel_r8 parallel_r64 sio sendrecv_blaster early_abort debugger singleton_client_server intercomm_create spawn_tree init-exit77 mpi_info info_spawn server client paccept pconnect coll_test
all: $(PROGS)

66
orte/test/mpi/coll_test.c Обычный файл
Просмотреть файл

@ -0,0 +1,66 @@
/* -*- C -*-
*
* $HEADER$
*
* The most basic of MPI applications
*/
#include <stdio.h>
#include "opal/mca/hwloc/hwloc.h"
#include "mpi.h"
#include "ompi/mca/rte/rte.h"
#include "orte/util/proc_info.h"
#define COLL_TEST_MAX 100
int main(int argc, char* argv[])
{
int rank, size, rc;
hwloc_cpuset_t cpus;
char *bindings;
int i, ret;
ompi_rte_collective_t *coll;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
cpus = hwloc_bitmap_alloc();
rc = hwloc_get_cpubind(opal_hwloc_topology, cpus, HWLOC_CPUBIND_PROCESS);
hwloc_bitmap_list_asprintf(&bindings, cpus);
printf("Hello, World, I am %d of %d [%d local peers]: get_cpubind: %d bitmap %s\n",
rank, size, orte_process_info.num_local_peers, rc,
(NULL == bindings) ? "NULL" : bindings);
for (i=0; i < COLL_TEST_MAX; i++) {
fprintf(stderr, "%d executing barrier %d\n", rank, i);
coll = OBJ_NEW(ompi_rte_collective_t);
coll->id = ompi_rte_get_collective_id(OMPI_PROC_MY_NAME);
coll->active = true;
if (OMPI_SUCCESS != (ret = ompi_rte_barrier(coll))) {
OMPI_ERROR_LOG(ret);
return ret;
}
}
for (i=0; i < COLL_TEST_MAX; i++) {
fprintf(stderr, "%d executing modex %d\n", rank, i);
coll = OBJ_NEW(ompi_rte_collective_t);
coll->id = ompi_rte_get_collective_id(OMPI_PROC_MY_NAME);
coll->active = true;
if (OMPI_SUCCESS != (ret = ompi_rte_modex(coll))) {
OMPI_ERROR_LOG(ret);
return ret;
}
}
/* wait for barrier to complete */
OMPI_LAZY_WAIT_FOR_COMPLETION(coll->active);
OBJ_RELEASE(coll);
MPI_Finalize();
return 0;
}

Просмотреть файл

@ -195,12 +195,8 @@ const char *orte_attr_key_to_str(orte_attribute_key_t key)
return "JOB-FAIL-NOTIFIED";
case ORTE_JOB_TERM_NOTIFIED:
return "JOB-TERM-NOTIFIED";
case ORTE_JOB_PEER_MODX_ID:
return "JOB-PEER-MODX-ID";
case ORTE_JOB_INIT_BAR_ID:
return "JOB-INIT-BAR-ID";
case ORTE_JOB_FINI_BAR_ID:
return "JOB-FINI-BAR-ID";
case ORTE_JOB_COLL_ID_CNTR:
return "JOB-COLL-ID-CNTR";
case ORTE_PROC_NOBARRIER:
return "PROC-NOBARRIER";

Просмотреть файл

@ -115,9 +115,7 @@ typedef uint16_t orte_job_flags_t;
#define ORTE_JOB_GOVERNOR (ORTE_JOB_START_KEY + 27) // string - governor used for nodes in job
#define ORTE_JOB_FAIL_NOTIFIED (ORTE_JOB_START_KEY + 28) // bool - abnormal term of proc within job has been reported
#define ORTE_JOB_TERM_NOTIFIED (ORTE_JOB_START_KEY + 29) // bool - normal term of job has been reported
#define ORTE_JOB_PEER_MODX_ID (ORTE_JOB_START_KEY + 30) // orte_grpcomm_coll_id_t - collective id
#define ORTE_JOB_INIT_BAR_ID (ORTE_JOB_START_KEY + 31) // orte_grpcomm_coll_id_t - collective id
#define ORTE_JOB_FINI_BAR_ID (ORTE_JOB_START_KEY + 32) // orte_grpcomm_coll_id_t - collective id
#define ORTE_JOB_COLL_ID_CNTR (ORTE_JOB_START_KEY + 30) // uint32_t - counter for current collective id
#define ORTE_JOB_MAX_KEY 300

Просмотреть файл

@ -80,9 +80,6 @@ ORTE_DECLSPEC orte_proc_info_t orte_process_info = {
.cpuset = NULL,
#endif
.app_rank = -1,
.peer_modex = -1,
.peer_init_barrier = -1,
.peer_fini_barrier = -1,
.my_hostid = ORTE_VPID_INVALID,
#if OPAL_ENABLE_FT_CR == 1
.snapc_init_barrier = -1,
@ -92,9 +89,6 @@ ORTE_DECLSPEC orte_proc_info_t orte_process_info = {
static bool init=false;
static int orte_ess_node_rank;
static int orte_peer_modex_id;
static int orte_peer_init_barrier_id;
static int orte_peer_fini_barrier_id;
#if OPAL_ENABLE_FT_CR == 1
static int orte_snapc_init_barrier_id;
static int orte_snapc_fini_barrier_id;
@ -268,33 +262,6 @@ int orte_proc_info(void)
orte_process_info.sync_buf = OBJ_NEW(opal_buffer_t);
/* get the collective id info */
orte_peer_modex_id = -1;
(void) mca_base_var_register ("orte", "orte", NULL, "peer_modex_id", "Peer modex collective id",
MCA_BASE_VAR_TYPE_INT, NULL, 0,
MCA_BASE_VAR_FLAG_INTERNAL,
OPAL_INFO_LVL_9,
MCA_BASE_VAR_SCOPE_CONSTANT,
&orte_peer_modex_id);
orte_process_info.peer_modex = (orte_grpcomm_coll_id_t) orte_peer_modex_id;
orte_peer_init_barrier_id = -1;
(void) mca_base_var_register ("orte", "orte", NULL, "peer_init_barrier_id", "Peer init barrier collective id",
MCA_BASE_VAR_TYPE_INT, NULL, 0,
MCA_BASE_VAR_FLAG_INTERNAL,
OPAL_INFO_LVL_9,
MCA_BASE_VAR_SCOPE_CONSTANT,
&orte_peer_init_barrier_id);
orte_process_info.peer_init_barrier = (orte_grpcomm_coll_id_t) orte_peer_init_barrier_id;
orte_peer_fini_barrier_id = -1;
(void) mca_base_var_register ("orte", "orte", NULL, "peer_fini_barrier_id", "Peer finalize barrier collective id",
MCA_BASE_VAR_TYPE_INT, NULL, 0,
MCA_BASE_VAR_FLAG_INTERNAL,
OPAL_INFO_LVL_9,
MCA_BASE_VAR_SCOPE_CONSTANT,
&orte_peer_fini_barrier_id);
orte_process_info.peer_fini_barrier = (orte_grpcomm_coll_id_t) orte_peer_fini_barrier_id;
#if OPAL_ENABLE_FT_CR == 1
orte_snapc_init_barrier_id = -1;
(void) mca_base_var_register ("orte", "orte", NULL, "snapc_init_barrier_id", "SNAPC init barrier collective id",

Просмотреть файл

@ -129,9 +129,6 @@ struct orte_proc_info_t {
char *cpuset; /**< String-representation of bitmap where we are bound */
#endif
int app_rank; /**< rank within my app_context */
orte_grpcomm_coll_id_t peer_modex; /**< modex collective id */
orte_grpcomm_coll_id_t peer_init_barrier; /**< barrier id during init */
orte_grpcomm_coll_id_t peer_fini_barrier; /**< barrier id during finalize */
orte_vpid_t my_hostid; /** identifies the local host for a coprocessor */
#if OPAL_ENABLE_FT_CR == 1
orte_grpcomm_coll_id_t snapc_init_barrier; /**< barrier id during init */