2007-07-20 01:34:02 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The University of Tennessee and The University
|
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
2013-03-27 21:14:43 +00:00
|
|
|
* Copyright (c) 2011-2013 Los Alamos National Security, LLC.
|
2012-04-06 14:23:13 +00:00
|
|
|
* All rights reserved.
|
2014-02-05 14:39:27 +00:00
|
|
|
* Copyright (c) 2013-2014 Intel, Inc. All rights reserved.
|
2007-07-20 01:34:02 +00:00
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
/** @file:
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef MCA_GRPCOMM_BASE_H
|
|
|
|
#define MCA_GRPCOMM_BASE_H
|
|
|
|
|
|
|
|
/*
|
|
|
|
* includes
|
|
|
|
*/
|
|
|
|
#include "orte_config.h"
|
|
|
|
|
|
|
|
#include "opal/class/opal_list.h"
|
2013-09-27 00:37:49 +00:00
|
|
|
#include "opal/dss/dss_types.h"
|
2007-07-20 01:34:02 +00:00
|
|
|
#include "opal/mca/mca.h"
|
2011-10-19 20:18:14 +00:00
|
|
|
#include "opal/mca/hwloc/hwloc.h"
|
2010-02-25 01:11:29 +00:00
|
|
|
|
|
|
|
#include "orte/mca/odls/odls_types.h"
|
2007-07-20 01:34:02 +00:00
|
|
|
|
|
|
|
#include "orte/mca/grpcomm/grpcomm.h"
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Global functions for MCA overall collective open and close
|
|
|
|
*/
|
2008-02-28 01:57:57 +00:00
|
|
|
BEGIN_C_DECLS
|
2007-07-20 01:34:02 +00:00
|
|
|
|
|
|
|
/*
|
2013-03-27 21:14:43 +00:00
|
|
|
* MCA framework
|
2007-07-20 01:34:02 +00:00
|
|
|
*/
|
2013-03-27 21:14:43 +00:00
|
|
|
ORTE_DECLSPEC extern mca_base_framework_t orte_grpcomm_base_framework;
|
|
|
|
/*
|
|
|
|
* Select an available component.
|
|
|
|
*/
|
|
|
|
ORTE_DECLSPEC int orte_grpcomm_base_select(void);
|
2007-07-20 01:34:02 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* globals that might be needed
|
|
|
|
*/
|
2010-02-25 01:11:29 +00:00
|
|
|
typedef struct {
|
2012-04-06 14:23:13 +00:00
|
|
|
orte_grpcomm_coll_id_t coll_id;
|
|
|
|
opal_list_t active_colls;
|
2011-10-19 20:18:14 +00:00
|
|
|
#if OPAL_HAVE_HWLOC
|
|
|
|
hwloc_cpuset_t working_cpuset;
|
|
|
|
#endif
|
2014-02-05 14:39:27 +00:00
|
|
|
bool modex_ready;
|
|
|
|
opal_list_t modex_requests;
|
2010-02-25 01:11:29 +00:00
|
|
|
} orte_grpcomm_base_t;
|
2007-07-20 01:34:02 +00:00
|
|
|
|
2014-02-05 14:39:27 +00:00
|
|
|
typedef struct {
|
|
|
|
opal_list_item_t super;
|
|
|
|
orte_process_name_t peer;
|
|
|
|
} orte_grpcomm_modex_req_t;
|
|
|
|
OBJ_CLASS_DECLARATION(orte_grpcomm_modex_req_t);
|
|
|
|
|
2012-12-09 02:53:20 +00:00
|
|
|
typedef struct {
|
|
|
|
opal_object_t super;
|
|
|
|
opal_event_t ev;
|
|
|
|
orte_grpcomm_collective_t *op;
|
|
|
|
} orte_grpcomm_caddy_t;
|
|
|
|
OBJ_CLASS_DECLARATION(orte_grpcomm_caddy_t);
|
|
|
|
|
|
|
|
#define ORTE_GRPCOMM_ACTIVATE(o, cb) \
|
|
|
|
do { \
|
|
|
|
orte_grpcomm_caddy_t *caddy; \
|
2013-04-24 23:44:02 +00:00
|
|
|
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_framework.framework_output, \
|
NOTE: this modifies the MPI-RTE interface
We have been getting several requests for new collectives that need to be inserted in various places of the MPI layer, all in support of either checkpoint/restart or various research efforts. Until now, this would require that the collective id's be generated at launch. which required modification
s to ORTE and other places. We chose not to make collectives reusable as the race conditions associated with resetting collective counters are daunti
ng.
This commit extends the collective system to allow self-generation of collective id's that the daemons need to support, thereby allowing developers to request any number of collectives for their work. There is one restriction: RTE collectives must occur at the process level - i.e., we don't curren
tly have a way of tagging the collective to a specific thread. From the comment in the code:
* In order to allow scalable
* generation of collective id's, they are formed as:
*
* top 32-bits are the jobid of the procs involved in
* the collective. For collectives across multiple jobs
* (e.g., in a connect_accept), the daemon jobid will
* be used as the id will be issued by mpirun. This
* won't cause problems because daemons don't use the
* collective_id
*
* bottom 32-bits are a rolling counter that recycles
* when the max is hit. The daemon will cleanup each
* collective upon completion, so this means a job can
* never have more than 2**32 collectives going on at
* a time. If someone needs more than that - they've got
* a problem.
*
* Note that this means (for now) that RTE-level collectives
* cannot be done by individual threads - they must be
* done at the overall process level. This is required as
* there is no guaranteed ordering for the collective id's,
* and all the participants must agree on the id of the
* collective they are executing. So if thread A on one
* process asks for a collective id before thread B does,
* but B asks before A on another process, the collectives will
* be mixed and not result in the expected behavior. We may
* find a way to relax this requirement in the future by
* adding a thread context id to the jobid field (maybe taking the
* lower 16-bits of that field).
This commit includes a test program (orte/test/mpi/coll_test.c) that cycles 100 times across barrier and modex collectives.
This commit was SVN r32203.
2014-07-10 18:53:12 +00:00
|
|
|
"%s ACTIVATING GRCPCOMM OP %lu at %s:%d", \
|
2012-12-09 02:53:20 +00:00
|
|
|
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
|
NOTE: this modifies the MPI-RTE interface
We have been getting several requests for new collectives that need to be inserted in various places of the MPI layer, all in support of either checkpoint/restart or various research efforts. Until now, this would require that the collective id's be generated at launch. which required modification
s to ORTE and other places. We chose not to make collectives reusable as the race conditions associated with resetting collective counters are daunti
ng.
This commit extends the collective system to allow self-generation of collective id's that the daemons need to support, thereby allowing developers to request any number of collectives for their work. There is one restriction: RTE collectives must occur at the process level - i.e., we don't curren
tly have a way of tagging the collective to a specific thread. From the comment in the code:
* In order to allow scalable
* generation of collective id's, they are formed as:
*
* top 32-bits are the jobid of the procs involved in
* the collective. For collectives across multiple jobs
* (e.g., in a connect_accept), the daemon jobid will
* be used as the id will be issued by mpirun. This
* won't cause problems because daemons don't use the
* collective_id
*
* bottom 32-bits are a rolling counter that recycles
* when the max is hit. The daemon will cleanup each
* collective upon completion, so this means a job can
* never have more than 2**32 collectives going on at
* a time. If someone needs more than that - they've got
* a problem.
*
* Note that this means (for now) that RTE-level collectives
* cannot be done by individual threads - they must be
* done at the overall process level. This is required as
* there is no guaranteed ordering for the collective id's,
* and all the participants must agree on the id of the
* collective they are executing. So if thread A on one
* process asks for a collective id before thread B does,
* but B asks before A on another process, the collectives will
* be mixed and not result in the expected behavior. We may
* find a way to relax this requirement in the future by
* adding a thread context id to the jobid field (maybe taking the
* lower 16-bits of that field).
This commit includes a test program (orte/test/mpi/coll_test.c) that cycles 100 times across barrier and modex collectives.
This commit was SVN r32203.
2014-07-10 18:53:12 +00:00
|
|
|
(unsigned long)(o)->id, __FILE__, __LINE__)); \
|
2012-12-09 02:53:20 +00:00
|
|
|
caddy = OBJ_NEW(orte_grpcomm_caddy_t); \
|
|
|
|
caddy->op = (o); \
|
|
|
|
opal_event_set(orte_event_base, &caddy->ev, -1, \
|
|
|
|
OPAL_EV_WRITE, (cb), caddy); \
|
|
|
|
opal_event_set_priority(&caddy->ev, ORTE_MSG_PRI); \
|
|
|
|
opal_event_active(&caddy->ev, OPAL_EV_WRITE, 1); \
|
|
|
|
} while(0);
|
|
|
|
|
2010-02-25 01:11:29 +00:00
|
|
|
ORTE_DECLSPEC extern orte_grpcomm_base_t orte_grpcomm_base;
|
2007-07-20 01:34:02 +00:00
|
|
|
|
NOTE: this modifies the MPI-RTE interface
We have been getting several requests for new collectives that need to be inserted in various places of the MPI layer, all in support of either checkpoint/restart or various research efforts. Until now, this would require that the collective id's be generated at launch. which required modification
s to ORTE and other places. We chose not to make collectives reusable as the race conditions associated with resetting collective counters are daunti
ng.
This commit extends the collective system to allow self-generation of collective id's that the daemons need to support, thereby allowing developers to request any number of collectives for their work. There is one restriction: RTE collectives must occur at the process level - i.e., we don't curren
tly have a way of tagging the collective to a specific thread. From the comment in the code:
* In order to allow scalable
* generation of collective id's, they are formed as:
*
* top 32-bits are the jobid of the procs involved in
* the collective. For collectives across multiple jobs
* (e.g., in a connect_accept), the daemon jobid will
* be used as the id will be issued by mpirun. This
* won't cause problems because daemons don't use the
* collective_id
*
* bottom 32-bits are a rolling counter that recycles
* when the max is hit. The daemon will cleanup each
* collective upon completion, so this means a job can
* never have more than 2**32 collectives going on at
* a time. If someone needs more than that - they've got
* a problem.
*
* Note that this means (for now) that RTE-level collectives
* cannot be done by individual threads - they must be
* done at the overall process level. This is required as
* there is no guaranteed ordering for the collective id's,
* and all the participants must agree on the id of the
* collective they are executing. So if thread A on one
* process asks for a collective id before thread B does,
* but B asks before A on another process, the collectives will
* be mixed and not result in the expected behavior. We may
* find a way to relax this requirement in the future by
* adding a thread context id to the jobid field (maybe taking the
* lower 16-bits of that field).
This commit includes a test program (orte/test/mpi/coll_test.c) that cycles 100 times across barrier and modex collectives.
This commit was SVN r32203.
2014-07-10 18:53:12 +00:00
|
|
|
ORTE_DECLSPEC orte_grpcomm_collective_t* orte_grpcomm_base_setup_collective(orte_jobid_t jobid, orte_grpcomm_coll_id_t id);
|
2012-04-06 14:23:13 +00:00
|
|
|
ORTE_DECLSPEC void orte_grpcomm_base_progress_collectives(void);
|
NOTE: this modifies the MPI-RTE interface
We have been getting several requests for new collectives that need to be inserted in various places of the MPI layer, all in support of either checkpoint/restart or various research efforts. Until now, this would require that the collective id's be generated at launch. which required modification
s to ORTE and other places. We chose not to make collectives reusable as the race conditions associated with resetting collective counters are daunti
ng.
This commit extends the collective system to allow self-generation of collective id's that the daemons need to support, thereby allowing developers to request any number of collectives for their work. There is one restriction: RTE collectives must occur at the process level - i.e., we don't curren
tly have a way of tagging the collective to a specific thread. From the comment in the code:
* In order to allow scalable
* generation of collective id's, they are formed as:
*
* top 32-bits are the jobid of the procs involved in
* the collective. For collectives across multiple jobs
* (e.g., in a connect_accept), the daemon jobid will
* be used as the id will be issued by mpirun. This
* won't cause problems because daemons don't use the
* collective_id
*
* bottom 32-bits are a rolling counter that recycles
* when the max is hit. The daemon will cleanup each
* collective upon completion, so this means a job can
* never have more than 2**32 collectives going on at
* a time. If someone needs more than that - they've got
* a problem.
*
* Note that this means (for now) that RTE-level collectives
* cannot be done by individual threads - they must be
* done at the overall process level. This is required as
* there is no guaranteed ordering for the collective id's,
* and all the participants must agree on the id of the
* collective they are executing. So if thread A on one
* process asks for a collective id before thread B does,
* but B asks before A on another process, the collectives will
* be mixed and not result in the expected behavior. We may
* find a way to relax this requirement in the future by
* adding a thread context id to the jobid field (maybe taking the
* lower 16-bits of that field).
This commit includes a test program (orte/test/mpi/coll_test.c) that cycles 100 times across barrier and modex collectives.
This commit was SVN r32203.
2014-07-10 18:53:12 +00:00
|
|
|
ORTE_DECLSPEC orte_grpcomm_coll_id_t orte_grpcomm_base_get_coll_id(orte_process_name_t *nm);
|
2012-04-06 14:23:13 +00:00
|
|
|
ORTE_DECLSPEC void orte_grpcomm_base_pack_collective(opal_buffer_t *relay,
|
2012-09-12 11:31:36 +00:00
|
|
|
orte_jobid_t jobid,
|
2012-04-06 14:23:13 +00:00
|
|
|
orte_grpcomm_collective_t *coll,
|
|
|
|
orte_grpcomm_internal_stage_t stg);
|
2012-06-15 10:15:07 +00:00
|
|
|
ORTE_DECLSPEC void orte_grpcomm_base_rollup_recv(int status, orte_process_name_t* sender,
|
|
|
|
opal_buffer_t* buffer, orte_rml_tag_t tag,
|
|
|
|
void* cbdata);
|
2010-02-25 01:11:29 +00:00
|
|
|
|
2012-04-06 14:23:13 +00:00
|
|
|
/* modex support */
|
|
|
|
ORTE_DECLSPEC void orte_grpcomm_base_store_modex(opal_buffer_t *rbuf, void *cbdata);
|
2012-12-09 02:53:20 +00:00
|
|
|
ORTE_DECLSPEC void orte_grpcomm_base_modex(int fd, short args, void *cbdata);
|
2014-04-29 21:49:23 +00:00
|
|
|
ORTE_DECLSPEC int orte_grpcomm_base_pack_modex_entries(opal_buffer_t *buf, int handle);
|
2008-12-09 23:49:02 +00:00
|
|
|
|
2012-04-06 14:23:13 +00:00
|
|
|
/* comm support */
|
|
|
|
ORTE_DECLSPEC int orte_grpcomm_base_comm_start(void);
|
|
|
|
ORTE_DECLSPEC void orte_grpcomm_base_comm_stop(void);
|
|
|
|
ORTE_DECLSPEC void orte_grpcomm_base_xcast_recv(int status, orte_process_name_t* sender,
|
|
|
|
opal_buffer_t* buffer, orte_rml_tag_t tag,
|
|
|
|
void* cbdata);
|
|
|
|
ORTE_DECLSPEC int orte_grpcomm_base_pack_xcast(orte_jobid_t job,
|
|
|
|
opal_buffer_t *buffer,
|
|
|
|
opal_buffer_t *message,
|
|
|
|
orte_rml_tag_t tag);
|
2014-02-05 14:39:27 +00:00
|
|
|
ORTE_DECLSPEC void orte_grpcomm_base_process_modex(int fd, short args, void *cbdata);
|
2009-01-23 21:57:51 +00:00
|
|
|
|
2008-02-28 01:57:57 +00:00
|
|
|
END_C_DECLS
|
2007-07-20 01:34:02 +00:00
|
|
|
#endif
|