2007-12-21 09:02:00 +03:00
|
|
|
/* -*- Mode: C; c-basic-offset:4 ; -*- */
|
2004-05-21 23:36:19 +04:00
|
|
|
/*
|
2005-11-05 22:57:48 +03:00
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
2008-11-05 00:58:06 +03:00
|
|
|
* Copyright (c) 2004-2008 The University of Tennessee and The University
|
2005-11-05 22:57:48 +03:00
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
2008-08-11 13:43:01 +04:00
|
|
|
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
|
2004-11-28 23:09:25 +03:00
|
|
|
* University of Stuttgart. All rights reserved.
|
2005-03-24 15:43:37 +03:00
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
2009-01-11 05:30:00 +03:00
|
|
|
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
|
2007-09-11 17:23:46 +04:00
|
|
|
* Copyright (c) 2007 Voltaire All rights reserved.
|
2008-11-05 00:58:06 +03:00
|
|
|
* Copyright (c) 2006-2008 University of Houston. All rights reserved.
|
2009-02-24 20:17:33 +03:00
|
|
|
* Copyright (c) 2009 Sun Microsystems, Inc. All rights reserved.
|
2004-11-22 04:38:40 +03:00
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
2004-05-21 23:36:19 +04:00
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
#include "ompi_config.h"
|
2004-05-21 23:36:19 +04:00
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
#include "opal/dss/dss.h"
|
|
|
|
#include "orte/types.h"
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "ompi/communicator/communicator.h"
|
2009-02-24 20:17:33 +03:00
|
|
|
#include "ompi/op/op.h"
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "ompi/constants.h"
|
2007-12-21 09:02:00 +03:00
|
|
|
#include "opal/class/opal_pointer_array.h"
|
2005-07-03 20:22:16 +04:00
|
|
|
#include "opal/class/opal_list.h"
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "ompi/mca/pml/pml.h"
|
|
|
|
#include "ompi/mca/coll/base/base.h"
|
2005-09-13 00:36:04 +04:00
|
|
|
#include "ompi/request/request.h"
|
2006-12-13 01:01:39 +03:00
|
|
|
#include "ompi/runtime/mpiruntime.h"
|
2008-02-28 04:57:57 +03:00
|
|
|
#include "ompi/mca/dpm/dpm.h"
|
|
|
|
|
2008-06-18 07:15:56 +04:00
|
|
|
#include "orte/mca/rml/rml.h"
|
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
BEGIN_C_DECLS
|
2004-05-21 23:36:19 +04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* These functions make sure, that we determine the global result over
|
|
|
|
* an intra communicators (simple), an inter-communicator and a
|
|
|
|
* pseudo inter-communicator described by two separate intra-comms
|
|
|
|
* and a bridge-comm (intercomm-create scenario).
|
|
|
|
*/
|
|
|
|
|
2006-12-13 01:01:39 +03:00
|
|
|
static int cid_block_start = 28;
|
|
|
|
|
2004-08-04 02:07:45 +04:00
|
|
|
typedef int ompi_comm_cid_allredfct (int *inbuf, int* outbuf,
|
2005-05-20 03:16:06 +04:00
|
|
|
int count, struct ompi_op_t *op,
|
2004-08-04 02:07:45 +04:00
|
|
|
ompi_communicator_t *comm,
|
2004-06-07 19:33:53 +04:00
|
|
|
ompi_communicator_t *bridgecomm,
|
2004-08-05 20:31:30 +04:00
|
|
|
void* lleader, void* rleader,
|
|
|
|
int send_first );
|
2004-06-17 02:37:03 +04:00
|
|
|
|
2004-08-04 02:07:45 +04:00
|
|
|
static int ompi_comm_allreduce_intra (int *inbuf, int* outbuf,
|
2005-05-20 03:16:06 +04:00
|
|
|
int count, struct ompi_op_t *op,
|
2004-08-04 02:07:45 +04:00
|
|
|
ompi_communicator_t *intercomm,
|
2004-06-17 02:37:03 +04:00
|
|
|
ompi_communicator_t *bridgecomm,
|
2004-08-04 02:07:45 +04:00
|
|
|
void* local_leader,
|
2004-08-05 20:31:30 +04:00
|
|
|
void* remote_ledaer,
|
|
|
|
int send_first );
|
2004-05-21 23:36:19 +04:00
|
|
|
|
2004-08-04 02:07:45 +04:00
|
|
|
static int ompi_comm_allreduce_inter (int *inbuf, int *outbuf,
|
2005-05-20 03:16:06 +04:00
|
|
|
int count, struct ompi_op_t *op,
|
2004-08-04 02:07:45 +04:00
|
|
|
ompi_communicator_t *intercomm,
|
|
|
|
ompi_communicator_t *bridgecomm,
|
|
|
|
void* local_leader,
|
2004-08-05 20:31:30 +04:00
|
|
|
void* remote_leader,
|
|
|
|
int send_first );
|
2004-05-21 23:36:19 +04:00
|
|
|
|
2004-08-04 02:07:45 +04:00
|
|
|
static int ompi_comm_allreduce_intra_bridge(int *inbuf, int* outbuf,
|
2005-05-20 03:16:06 +04:00
|
|
|
int count, struct ompi_op_t *op,
|
2004-08-04 02:07:45 +04:00
|
|
|
ompi_communicator_t *intercomm,
|
2004-07-16 00:55:15 +04:00
|
|
|
ompi_communicator_t *bridgecomm,
|
2004-08-04 02:07:45 +04:00
|
|
|
void* local_leader,
|
2004-08-05 20:31:30 +04:00
|
|
|
void* remote_leader,
|
|
|
|
int send_first);
|
2004-05-21 23:36:19 +04:00
|
|
|
|
2004-08-04 02:07:45 +04:00
|
|
|
static int ompi_comm_allreduce_intra_oob (int *inbuf, int* outbuf,
|
2005-05-20 03:16:06 +04:00
|
|
|
int count, struct ompi_op_t *op,
|
2004-08-04 02:07:45 +04:00
|
|
|
ompi_communicator_t *intercomm,
|
2004-07-16 00:55:15 +04:00
|
|
|
ompi_communicator_t *bridgecomm,
|
2004-08-04 02:07:45 +04:00
|
|
|
void* local_leader,
|
2004-08-05 20:31:30 +04:00
|
|
|
void* remote_leader,
|
|
|
|
int send_first );
|
2004-05-21 23:36:19 +04:00
|
|
|
|
2004-09-21 22:39:06 +04:00
|
|
|
static int ompi_comm_register_cid (uint32_t contextid);
|
|
|
|
static int ompi_comm_unregister_cid (uint32_t contextid);
|
|
|
|
static uint32_t ompi_comm_lowest_cid ( void );
|
|
|
|
|
|
|
|
struct ompi_comm_reg_t{
|
2005-07-03 20:22:16 +04:00
|
|
|
opal_list_item_t super;
|
2004-09-21 22:39:06 +04:00
|
|
|
uint32_t cid;
|
|
|
|
};
|
|
|
|
typedef struct ompi_comm_reg_t ompi_comm_reg_t;
|
2004-10-22 20:06:05 +04:00
|
|
|
OMPI_DECLSPEC OBJ_CLASS_DECLARATION(ompi_comm_reg_t);
|
2004-09-21 22:39:06 +04:00
|
|
|
|
|
|
|
static void ompi_comm_reg_constructor(ompi_comm_reg_t *regcom);
|
|
|
|
static void ompi_comm_reg_destructor(ompi_comm_reg_t *regcom);
|
|
|
|
|
|
|
|
OBJ_CLASS_INSTANCE (ompi_comm_reg_t,
|
2007-04-06 23:18:31 +04:00
|
|
|
opal_list_item_t,
|
|
|
|
ompi_comm_reg_constructor,
|
|
|
|
ompi_comm_reg_destructor );
|
2004-05-21 23:36:19 +04:00
|
|
|
|
2005-07-04 02:45:48 +04:00
|
|
|
static opal_mutex_t ompi_cid_lock;
|
2005-07-03 20:22:16 +04:00
|
|
|
static opal_list_t ompi_registered_comms;
|
2004-09-17 20:28:58 +04:00
|
|
|
|
2007-08-17 20:15:26 +04:00
|
|
|
|
2004-09-17 20:28:58 +04:00
|
|
|
int ompi_comm_nextcid ( ompi_communicator_t* newcomm,
|
|
|
|
ompi_communicator_t* comm,
|
|
|
|
ompi_communicator_t* bridgecomm,
|
|
|
|
void* local_leader,
|
|
|
|
void* remote_leader,
|
|
|
|
int mode, int send_first )
|
|
|
|
{
|
2006-12-13 01:01:39 +03:00
|
|
|
int nextcid, block;
|
|
|
|
int global_block_start;
|
2006-08-24 20:38:08 +04:00
|
|
|
bool flag;
|
2004-09-17 20:28:58 +04:00
|
|
|
|
|
|
|
ompi_comm_cid_allredfct* allredfnct;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Determine which implementation of allreduce we have to use
|
|
|
|
* for the current scenario
|
|
|
|
*/
|
|
|
|
switch (mode)
|
2007-04-06 23:18:31 +04:00
|
|
|
{
|
2004-09-17 20:28:58 +04:00
|
|
|
case OMPI_COMM_CID_INTRA:
|
|
|
|
allredfnct=(ompi_comm_cid_allredfct*)ompi_comm_allreduce_intra;
|
|
|
|
break;
|
|
|
|
case OMPI_COMM_CID_INTER:
|
|
|
|
allredfnct=(ompi_comm_cid_allredfct*)ompi_comm_allreduce_inter;
|
|
|
|
break;
|
|
|
|
case OMPI_COMM_CID_INTRA_BRIDGE:
|
|
|
|
allredfnct=(ompi_comm_cid_allredfct*)ompi_comm_allreduce_intra_bridge;
|
|
|
|
break;
|
|
|
|
case OMPI_COMM_CID_INTRA_OOB:
|
|
|
|
allredfnct=(ompi_comm_cid_allredfct*)ompi_comm_allreduce_intra_oob;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return MPI_UNDEFINED;
|
|
|
|
break;
|
2007-04-06 23:18:31 +04:00
|
|
|
}
|
2004-09-17 20:28:58 +04:00
|
|
|
|
2006-12-13 01:01:39 +03:00
|
|
|
/**
|
|
|
|
* In case multi-threading is enabled, we revert to the old algorithm
|
|
|
|
* starting from cid_block_start
|
|
|
|
*/
|
|
|
|
if (MPI_THREAD_MULTIPLE == ompi_mpi_thread_provided) {
|
2007-04-06 23:18:31 +04:00
|
|
|
int nextlocal_cid;
|
|
|
|
int done=0;
|
2007-07-17 04:33:27 +04:00
|
|
|
int response, glresponse=0;
|
|
|
|
int start;
|
2007-04-06 23:18:31 +04:00
|
|
|
int i;
|
|
|
|
|
2007-07-17 04:33:27 +04:00
|
|
|
do {
|
|
|
|
/* Only one communicator function allowed in same time on the
|
|
|
|
* same communicator.
|
|
|
|
*/
|
|
|
|
OPAL_THREAD_LOCK(&ompi_cid_lock);
|
|
|
|
response = ompi_comm_register_cid (comm->c_contextid);
|
|
|
|
OPAL_THREAD_UNLOCK(&ompi_cid_lock);
|
|
|
|
} while (OMPI_SUCCESS != response );
|
|
|
|
start = ompi_mpi_communicators.lowest_free;
|
|
|
|
|
2007-04-06 23:18:31 +04:00
|
|
|
while (!done) {
|
|
|
|
/**
|
|
|
|
* This is the real algorithm described in the doc
|
|
|
|
*/
|
|
|
|
|
2007-09-11 19:32:46 +04:00
|
|
|
OPAL_THREAD_LOCK(&ompi_cid_lock);
|
|
|
|
if (comm->c_contextid != ompi_comm_lowest_cid() ) {
|
|
|
|
/* if not lowest cid, we do not continue, but sleep and try again */
|
|
|
|
OPAL_THREAD_UNLOCK(&ompi_cid_lock);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
OPAL_THREAD_UNLOCK(&ompi_cid_lock);
|
|
|
|
|
|
|
|
|
2007-04-06 23:18:31 +04:00
|
|
|
for (i=start; i < mca_pml.pml_max_contextid ; i++) {
|
2007-12-21 09:02:00 +03:00
|
|
|
flag=opal_pointer_array_test_and_set_item(&ompi_mpi_communicators,
|
2007-04-06 23:18:31 +04:00
|
|
|
i, comm);
|
|
|
|
if (true == flag) {
|
|
|
|
nextlocal_cid = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
(allredfnct)(&nextlocal_cid, &nextcid, 1, MPI_MAX, comm, bridgecomm,
|
2006-12-13 01:01:39 +03:00
|
|
|
local_leader, remote_leader, send_first );
|
2007-04-06 23:18:31 +04:00
|
|
|
if (nextcid == nextlocal_cid) {
|
|
|
|
response = 1; /* fine with me */
|
|
|
|
}
|
|
|
|
else {
|
2007-12-21 09:02:00 +03:00
|
|
|
opal_pointer_array_set_item(&ompi_mpi_communicators,
|
2007-04-06 23:18:31 +04:00
|
|
|
nextlocal_cid, NULL);
|
|
|
|
|
2007-12-21 09:02:00 +03:00
|
|
|
flag = opal_pointer_array_test_and_set_item(&ompi_mpi_communicators,
|
2007-04-06 23:18:31 +04:00
|
|
|
nextcid, comm );
|
|
|
|
if (true == flag) {
|
|
|
|
response = 1; /* works as well */
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
response = 0; /* nope, not acceptable */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
(allredfnct)(&response, &glresponse, 1, MPI_MIN, comm, bridgecomm,
|
|
|
|
local_leader, remote_leader, send_first );
|
|
|
|
if (1 == glresponse) {
|
|
|
|
done = 1; /* we are done */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else if ( 0 == glresponse ) {
|
|
|
|
if ( 1 == response ) {
|
|
|
|
/* we could use that, but other don't agree */
|
2007-12-21 09:02:00 +03:00
|
|
|
opal_pointer_array_set_item(&ompi_mpi_communicators,
|
2007-04-06 23:18:31 +04:00
|
|
|
nextcid, NULL);
|
|
|
|
}
|
|
|
|
start = nextcid+1; /* that's where we can start the next round */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* set the according values to the newcomm */
|
|
|
|
newcomm->c_contextid = nextcid;
|
|
|
|
newcomm->c_f_to_c_index = newcomm->c_contextid;
|
2007-12-21 09:02:00 +03:00
|
|
|
opal_pointer_array_set_item (&ompi_mpi_communicators, nextcid, newcomm);
|
2007-04-06 23:18:31 +04:00
|
|
|
|
|
|
|
OPAL_THREAD_LOCK(&ompi_cid_lock);
|
|
|
|
ompi_comm_unregister_cid (comm->c_contextid);
|
|
|
|
OPAL_THREAD_UNLOCK(&ompi_cid_lock);
|
|
|
|
|
|
|
|
return (MPI_SUCCESS);
|
2006-12-13 01:01:39 +03:00
|
|
|
}
|
2004-09-21 22:39:06 +04:00
|
|
|
|
2006-12-13 01:01:39 +03:00
|
|
|
/**
|
|
|
|
* In case the communication mode is INTRA_OOB or INTAR_BRIDGE, we use the
|
|
|
|
* highest-free algorithm
|
|
|
|
*/
|
|
|
|
if ( OMPI_COMM_CID_INTRA_OOB == mode || OMPI_COMM_CID_INTRA_BRIDGE == mode) {
|
2007-04-06 23:18:31 +04:00
|
|
|
(allredfnct)(&cid_block_start, &global_block_start, 1,
|
|
|
|
MPI_MAX, comm, bridgecomm,
|
|
|
|
local_leader, remote_leader, send_first );
|
|
|
|
cid_block_start = global_block_start;
|
|
|
|
nextcid = cid_block_start;
|
|
|
|
cid_block_start = cid_block_start + 1;
|
2006-12-13 01:01:39 +03:00
|
|
|
}
|
|
|
|
else {
|
2007-04-06 23:18:31 +04:00
|
|
|
flag=false;
|
|
|
|
block = 0;
|
|
|
|
if( 0 == comm->c_contextid ) {
|
|
|
|
block = OMPI_COMM_BLOCK_WORLD;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
block = OMPI_COMM_BLOCK_OTHERS;
|
|
|
|
}
|
|
|
|
|
|
|
|
while(!flag) {
|
|
|
|
/**
|
|
|
|
* If the communicator has IDs available then allocate one for the child
|
|
|
|
*/
|
|
|
|
if(MPI_UNDEFINED != comm->c_id_available &&
|
|
|
|
MPI_UNDEFINED != comm->c_id_start_index &&
|
|
|
|
block > comm->c_id_available - comm->c_id_start_index) {
|
|
|
|
nextcid = comm->c_id_available;
|
2007-12-21 09:02:00 +03:00
|
|
|
flag=opal_pointer_array_test_and_set_item (&ompi_mpi_communicators,
|
2007-04-06 23:18:31 +04:00
|
|
|
nextcid, comm);
|
|
|
|
}
|
|
|
|
/**
|
|
|
|
* Otherwise the communicator needs to negotiate a new block of IDs
|
|
|
|
*/
|
|
|
|
else {
|
|
|
|
(allredfnct)(&cid_block_start, &global_block_start, 1,
|
|
|
|
MPI_MAX, comm, bridgecomm,
|
|
|
|
local_leader, remote_leader, send_first );
|
|
|
|
cid_block_start = global_block_start;
|
|
|
|
comm->c_id_available = cid_block_start;
|
|
|
|
comm->c_id_start_index = cid_block_start;
|
|
|
|
cid_block_start = cid_block_start + block;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
comm->c_id_available++;
|
2004-09-17 20:28:58 +04:00
|
|
|
}
|
|
|
|
/* set the according values to the newcomm */
|
|
|
|
newcomm->c_contextid = nextcid;
|
|
|
|
newcomm->c_f_to_c_index = newcomm->c_contextid;
|
2007-12-21 09:02:00 +03:00
|
|
|
opal_pointer_array_set_item (&ompi_mpi_communicators, nextcid, newcomm);
|
2004-09-17 20:28:58 +04:00
|
|
|
|
|
|
|
return (MPI_SUCCESS);
|
2006-12-13 01:01:39 +03:00
|
|
|
|
2004-09-17 20:28:58 +04:00
|
|
|
}
|
2006-12-13 01:01:39 +03:00
|
|
|
|
2004-09-21 22:39:06 +04:00
|
|
|
/**************************************************************************/
|
|
|
|
/**************************************************************************/
|
|
|
|
/**************************************************************************/
|
|
|
|
static void ompi_comm_reg_constructor (ompi_comm_reg_t *regcom)
|
|
|
|
{
|
|
|
|
regcom->cid=MPI_UNDEFINED;
|
|
|
|
}
|
2004-09-17 20:28:58 +04:00
|
|
|
|
2004-09-21 22:39:06 +04:00
|
|
|
static void ompi_comm_reg_destructor (ompi_comm_reg_t *regcom)
|
2004-05-21 23:36:19 +04:00
|
|
|
{
|
2004-09-21 22:39:06 +04:00
|
|
|
}
|
2004-05-21 23:36:19 +04:00
|
|
|
|
2004-09-21 22:39:06 +04:00
|
|
|
void ompi_comm_reg_init (void)
|
|
|
|
{
|
2005-07-03 20:22:16 +04:00
|
|
|
OBJ_CONSTRUCT(&ompi_registered_comms, opal_list_t);
|
2008-09-09 16:57:45 +04:00
|
|
|
OBJ_CONSTRUCT(&ompi_cid_lock, opal_mutex_t);
|
2004-09-21 22:39:06 +04:00
|
|
|
}
|
2004-05-21 23:36:19 +04:00
|
|
|
|
2004-09-21 22:39:06 +04:00
|
|
|
void ompi_comm_reg_finalize (void)
|
|
|
|
{
|
|
|
|
OBJ_DESTRUCT(&ompi_registered_comms);
|
2008-09-09 16:57:45 +04:00
|
|
|
OBJ_DESTRUCT(&ompi_cid_lock);
|
2004-09-21 22:39:06 +04:00
|
|
|
}
|
2004-05-21 23:36:19 +04:00
|
|
|
|
|
|
|
|
2004-09-21 22:39:06 +04:00
|
|
|
static int ompi_comm_register_cid (uint32_t cid )
|
|
|
|
{
|
2007-07-17 04:33:27 +04:00
|
|
|
opal_list_item_t *item;
|
|
|
|
ompi_comm_reg_t *regcom;
|
2004-09-21 22:39:06 +04:00
|
|
|
ompi_comm_reg_t *newentry = OBJ_NEW(ompi_comm_reg_t);
|
2004-09-16 16:16:21 +04:00
|
|
|
|
2004-09-21 22:39:06 +04:00
|
|
|
newentry->cid = cid;
|
2005-07-03 20:22:16 +04:00
|
|
|
if ( !(opal_list_is_empty (&ompi_registered_comms)) ) {
|
2007-04-06 23:18:31 +04:00
|
|
|
for (item = opal_list_get_first(&ompi_registered_comms);
|
|
|
|
item != opal_list_get_end(&ompi_registered_comms);
|
|
|
|
item = opal_list_get_next(item)) {
|
|
|
|
regcom = (ompi_comm_reg_t *)item;
|
|
|
|
if ( regcom->cid > cid ) {
|
|
|
|
break;
|
|
|
|
}
|
2007-07-17 04:33:27 +04:00
|
|
|
#if OMPI_ENABLE_MPI_THREADS
|
|
|
|
if( regcom->cid == cid ) {
|
|
|
|
/**
|
|
|
|
* The MPI standard state that is the user responsability to
|
|
|
|
* schedule the global communications in order to avoid any
|
|
|
|
* kind of troubles. As, managing communicators involve several
|
|
|
|
* collective communications, we should enforce a sequential
|
|
|
|
* execution order. This test only allow one communicator
|
|
|
|
* creation function based on the same communicator.
|
|
|
|
*/
|
|
|
|
OBJ_RELEASE(newentry);
|
|
|
|
return OMPI_ERROR;
|
|
|
|
}
|
|
|
|
#endif /* OMPI_ENABLE_MPI_THREADS */
|
2007-04-06 23:18:31 +04:00
|
|
|
}
|
2007-07-17 04:33:27 +04:00
|
|
|
opal_list_insert_pos (&ompi_registered_comms, item,
|
2007-04-06 23:18:31 +04:00
|
|
|
(opal_list_item_t *)newentry);
|
2004-05-21 23:36:19 +04:00
|
|
|
}
|
2004-09-21 22:39:06 +04:00
|
|
|
else {
|
2007-04-06 23:18:31 +04:00
|
|
|
opal_list_append (&ompi_registered_comms, (opal_list_item_t *)newentry);
|
2004-09-21 22:39:06 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
2004-05-21 23:36:19 +04:00
|
|
|
}
|
|
|
|
|
2004-09-21 22:39:06 +04:00
|
|
|
static int ompi_comm_unregister_cid (uint32_t cid)
|
|
|
|
{
|
2007-09-11 17:23:46 +04:00
|
|
|
ompi_comm_reg_t *regcom;
|
|
|
|
opal_list_item_t *item;
|
2004-09-21 22:39:06 +04:00
|
|
|
|
2007-09-11 17:23:46 +04:00
|
|
|
for (item = opal_list_get_first(&ompi_registered_comms);
|
|
|
|
item != opal_list_get_end(&ompi_registered_comms);
|
|
|
|
item = opal_list_get_next(item)) {
|
|
|
|
regcom = (ompi_comm_reg_t *)item;
|
|
|
|
if(regcom->cid == cid) {
|
|
|
|
opal_list_remove_item(&ompi_registered_comms, item);
|
2007-09-11 21:59:40 +04:00
|
|
|
OBJ_RELEASE(regcom);
|
2007-09-11 17:23:46 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2004-09-21 22:39:06 +04:00
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t ompi_comm_lowest_cid (void)
|
|
|
|
{
|
|
|
|
ompi_comm_reg_t *regcom=NULL;
|
2005-07-03 20:22:16 +04:00
|
|
|
opal_list_item_t *item=opal_list_get_first (&ompi_registered_comms);
|
2004-09-21 22:39:06 +04:00
|
|
|
|
|
|
|
regcom = (ompi_comm_reg_t *)item;
|
|
|
|
return regcom->cid;
|
|
|
|
}
|
2004-08-05 20:31:30 +04:00
|
|
|
/**************************************************************************/
|
|
|
|
/**************************************************************************/
|
|
|
|
/**************************************************************************/
|
|
|
|
/* This routine serves two purposes:
|
|
|
|
* - the allreduce acts as a kind of Barrier,
|
|
|
|
* which avoids, that we have incoming fragments
|
|
|
|
* on the new communicator before everybody has set
|
|
|
|
* up the comm structure.
|
|
|
|
* - some components (e.g. the collective MagPIe component
|
|
|
|
* might want to generate new communicators and communicate
|
|
|
|
* using the new comm. Thus, it can just be called after
|
|
|
|
* the 'barrier'.
|
|
|
|
*
|
|
|
|
* The reason that this routine is in comm_cid and not in
|
|
|
|
* comm.c is, that this file contains the allreduce implementations
|
|
|
|
* which are required, and thus we avoid having duplicate code...
|
|
|
|
*/
|
2008-11-05 00:58:06 +03:00
|
|
|
int ompi_comm_activate ( ompi_communicator_t** newcomm )
|
2004-08-05 20:31:30 +04:00
|
|
|
{
|
2008-11-05 00:58:06 +03:00
|
|
|
int ret = 0;
|
2004-08-05 20:31:30 +04:00
|
|
|
|
2008-11-05 00:58:06 +03:00
|
|
|
/**
|
|
|
|
* Check to see if this process is in the new communicator.
|
|
|
|
*
|
|
|
|
* Specifically, this function is invoked by all proceses in the
|
|
|
|
* old communicator, regardless of whether they are in the new
|
|
|
|
* communicator or not. This is because it is far simpler to use
|
|
|
|
* MPI collective functions on the old communicator to determine
|
|
|
|
* some data for the new communicator (e.g., remote_leader) than
|
|
|
|
* to kludge up our own pseudo-collective routines over just the
|
|
|
|
* processes in the new communicator. Hence, *all* processes in
|
|
|
|
* the old communicator need to invoke this function.
|
|
|
|
*
|
|
|
|
* That being said, only processes in the new communicator need to
|
|
|
|
* select a coll module for the new communicator. More
|
|
|
|
* specifically, proceses who are not in the new communicator
|
|
|
|
* should *not* select a coll module -- for example,
|
|
|
|
* ompi_comm_rank(newcomm) returns MPI_UNDEFINED for processes who
|
|
|
|
* are not in the new communicator. This can cause errors in the
|
|
|
|
* selection / initialization of a coll module. Plus, it's
|
|
|
|
* wasteful -- processes in the new communicator will end up
|
|
|
|
* freeing the new communicator anyway, so we might as well leave
|
|
|
|
* the coll selection as NULL (the coll base comm unselect code
|
|
|
|
* handles that case properly).
|
|
|
|
*/
|
|
|
|
if (MPI_UNDEFINED == (*newcomm)->c_local_group->grp_my_rank) {
|
|
|
|
return OMPI_SUCCESS;
|
2007-03-27 06:06:42 +04:00
|
|
|
}
|
2008-11-05 00:58:06 +03:00
|
|
|
/* Initialize the PML stuff in the newcomm */
|
|
|
|
if ( OMPI_SUCCESS != (ret = MCA_PML_CALL(add_comm(*newcomm))) ) {
|
|
|
|
goto bail_on_error;
|
2004-08-05 20:31:30 +04:00
|
|
|
}
|
2008-11-05 00:58:06 +03:00
|
|
|
OMPI_COMM_SET_PML_ADDED(*newcomm);
|
2004-08-05 20:31:30 +04:00
|
|
|
|
2008-11-05 00:58:06 +03:00
|
|
|
/* Let the collectives components fight over who will do
|
|
|
|
collective on this new comm. */
|
|
|
|
if (OMPI_SUCCESS != (ret = mca_coll_base_comm_select(*newcomm))) {
|
|
|
|
goto bail_on_error;
|
|
|
|
}
|
2004-08-05 20:31:30 +04:00
|
|
|
return OMPI_SUCCESS;
|
2008-11-05 00:58:06 +03:00
|
|
|
|
|
|
|
bail_on_error:
|
|
|
|
OBJ_RELEASE(*newcomm);
|
|
|
|
*newcomm = MPI_COMM_NULL;
|
|
|
|
return ret;
|
2004-08-05 20:31:30 +04:00
|
|
|
}
|
|
|
|
|
2004-08-04 02:07:45 +04:00
|
|
|
/**************************************************************************/
|
|
|
|
/**************************************************************************/
|
|
|
|
/**************************************************************************/
|
2004-05-21 23:36:19 +04:00
|
|
|
/* Arguments not used in this implementation:
|
2004-06-17 02:37:03 +04:00
|
|
|
* - bridgecomm
|
|
|
|
* - local_leader
|
|
|
|
* - remote_leader
|
2004-08-05 20:31:30 +04:00
|
|
|
* - send_first
|
2004-06-17 02:37:03 +04:00
|
|
|
*/
|
2004-08-04 02:07:45 +04:00
|
|
|
static int ompi_comm_allreduce_intra ( int *inbuf, int *outbuf,
|
2005-05-20 03:16:06 +04:00
|
|
|
int count, struct ompi_op_t *op,
|
2004-08-04 02:07:45 +04:00
|
|
|
ompi_communicator_t *comm,
|
|
|
|
ompi_communicator_t *bridgecomm,
|
|
|
|
void* local_leader,
|
2004-08-05 20:31:30 +04:00
|
|
|
void* remote_leader,
|
|
|
|
int send_first )
|
2004-05-21 23:36:19 +04:00
|
|
|
{
|
2004-08-04 02:07:45 +04:00
|
|
|
return comm->c_coll.coll_allreduce ( inbuf, outbuf, count, MPI_INT,
|
2007-08-19 07:37:49 +04:00
|
|
|
op,comm,
|
|
|
|
comm->c_coll.coll_allreduce_module );
|
2004-05-21 23:36:19 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Arguments not used in this implementation:
|
2004-06-17 02:37:03 +04:00
|
|
|
* - bridgecomm
|
|
|
|
* - local_leader
|
|
|
|
* - remote_leader
|
2004-08-05 20:31:30 +04:00
|
|
|
* - send_first
|
2004-06-17 02:37:03 +04:00
|
|
|
*/
|
2004-08-04 02:07:45 +04:00
|
|
|
static int ompi_comm_allreduce_inter ( int *inbuf, int *outbuf,
|
2005-05-20 03:16:06 +04:00
|
|
|
int count, struct ompi_op_t *op,
|
2004-08-04 02:07:45 +04:00
|
|
|
ompi_communicator_t *intercomm,
|
|
|
|
ompi_communicator_t *bridgecomm,
|
|
|
|
void* local_leader,
|
2004-08-05 20:31:30 +04:00
|
|
|
void* remote_leader,
|
|
|
|
int send_first )
|
2004-05-21 23:36:19 +04:00
|
|
|
{
|
2004-08-04 02:07:45 +04:00
|
|
|
int local_rank, rsize;
|
|
|
|
int i, rc;
|
|
|
|
int *sbuf;
|
|
|
|
int *tmpbuf=NULL;
|
|
|
|
int *rcounts=NULL, scount=0;
|
|
|
|
int *rdisps=NULL;
|
2004-05-21 23:36:19 +04:00
|
|
|
|
2009-02-24 20:17:33 +03:00
|
|
|
if ( &ompi_mpi_op_sum.op != op && &ompi_mpi_op_prod.op != op &&
|
|
|
|
&ompi_mpi_op_max.op != op && &ompi_mpi_op_min.op != op ) {
|
2004-05-21 23:36:19 +04:00
|
|
|
return MPI_ERR_OP;
|
|
|
|
}
|
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
if ( !OMPI_COMM_IS_INTER (intercomm)) {
|
2004-05-21 23:36:19 +04:00
|
|
|
return MPI_ERR_COMM;
|
|
|
|
}
|
|
|
|
|
2004-08-04 02:07:45 +04:00
|
|
|
/* Allocate temporary arrays */
|
|
|
|
rsize = ompi_comm_remote_size (intercomm);
|
2004-06-07 19:33:53 +04:00
|
|
|
local_rank = ompi_comm_rank ( intercomm );
|
2004-05-21 23:36:19 +04:00
|
|
|
|
2004-08-04 02:07:45 +04:00
|
|
|
tmpbuf = (int *) malloc ( count * sizeof(int));
|
|
|
|
rdisps = (int *) calloc ( rsize, sizeof(int));
|
|
|
|
rcounts = (int *) calloc ( rsize, sizeof(int) );
|
2008-08-11 13:43:01 +04:00
|
|
|
if ( OPAL_UNLIKELY (NULL == tmpbuf || NULL == rdisps || NULL == rcounts)) {
|
|
|
|
rc = OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
goto exit;
|
2004-08-04 02:07:45 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Execute the inter-allreduce: the result of our group will
|
|
|
|
be in the buffer of the remote group */
|
2004-06-29 04:02:25 +04:00
|
|
|
rc = intercomm->c_coll.coll_allreduce ( inbuf, tmpbuf, count, MPI_INT,
|
2007-08-19 07:37:49 +04:00
|
|
|
op, intercomm,
|
|
|
|
intercomm->c_coll.coll_allreduce_module);
|
2004-06-07 19:33:53 +04:00
|
|
|
if ( OMPI_SUCCESS != rc ) {
|
2004-05-21 23:36:19 +04:00
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ( 0 == local_rank ) {
|
|
|
|
MPI_Request req;
|
|
|
|
|
2004-08-04 02:07:45 +04:00
|
|
|
/* for the allgatherv later */
|
|
|
|
scount = count;
|
|
|
|
|
|
|
|
/* local leader exchange their data and determine the overall result
|
|
|
|
for both groups */
|
2005-04-13 07:19:48 +04:00
|
|
|
rc = MCA_PML_CALL(irecv (outbuf, count, MPI_INT, 0,
|
2004-10-26 19:06:51 +04:00
|
|
|
OMPI_COMM_ALLREDUCE_TAG
|
2005-04-13 07:19:48 +04:00
|
|
|
, intercomm, &req));
|
2004-06-07 19:33:53 +04:00
|
|
|
if ( OMPI_SUCCESS != rc ) {
|
2004-05-21 23:36:19 +04:00
|
|
|
goto exit;
|
|
|
|
}
|
2005-04-13 07:19:48 +04:00
|
|
|
rc = MCA_PML_CALL(send (tmpbuf, count, MPI_INT, 0,
|
2004-10-26 19:06:51 +04:00
|
|
|
OMPI_COMM_ALLREDUCE_TAG,
|
2005-04-13 07:19:48 +04:00
|
|
|
MCA_PML_BASE_SEND_STANDARD, intercomm));
|
2004-06-07 19:33:53 +04:00
|
|
|
if ( OMPI_SUCCESS != rc ) {
|
2004-05-21 23:36:19 +04:00
|
|
|
goto exit;
|
|
|
|
}
|
2004-10-12 19:50:01 +04:00
|
|
|
rc = ompi_request_wait_all ( 1, &req, MPI_STATUS_IGNORE );
|
2004-06-07 19:33:53 +04:00
|
|
|
if ( OMPI_SUCCESS != rc ) {
|
2004-05-21 23:36:19 +04:00
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
2009-02-24 20:17:33 +03:00
|
|
|
if ( &ompi_mpi_op_max.op == op ) {
|
2004-05-21 23:36:19 +04:00
|
|
|
for ( i = 0 ; i < count; i++ ) {
|
|
|
|
if (tmpbuf[i] > outbuf[i]) outbuf[i] = tmpbuf[i];
|
|
|
|
}
|
|
|
|
}
|
2009-02-24 20:17:33 +03:00
|
|
|
else if ( &ompi_mpi_op_min.op == op ) {
|
2004-05-21 23:36:19 +04:00
|
|
|
for ( i = 0 ; i < count; i++ ) {
|
|
|
|
if (tmpbuf[i] < outbuf[i]) outbuf[i] = tmpbuf[i];
|
|
|
|
}
|
|
|
|
}
|
2009-02-24 20:17:33 +03:00
|
|
|
else if ( &ompi_mpi_op_sum.op == op ) {
|
2004-05-21 23:36:19 +04:00
|
|
|
for ( i = 0 ; i < count; i++ ) {
|
|
|
|
outbuf[i] += tmpbuf[i];
|
|
|
|
}
|
|
|
|
}
|
2009-02-24 20:17:33 +03:00
|
|
|
else if ( &ompi_mpi_op_prod.op == op ) {
|
2004-05-21 23:36:19 +04:00
|
|
|
for ( i = 0 ; i < count; i++ ) {
|
|
|
|
outbuf[i] *= tmpbuf[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2004-08-04 02:07:45 +04:00
|
|
|
|
|
|
|
/* distribute the overall result to all processes in the other group.
|
|
|
|
Instead of using bcast, we are using here allgatherv, to avoid the
|
|
|
|
possible deadlock. Else, we need an algorithm to determine,
|
|
|
|
which group sends first in the inter-bcast and which receives
|
|
|
|
the result first.
|
|
|
|
*/
|
|
|
|
rcounts[0] = count;
|
|
|
|
sbuf = outbuf;
|
|
|
|
rc = intercomm->c_coll.coll_allgatherv (sbuf, scount, MPI_INT, outbuf,
|
|
|
|
rcounts, rdisps, MPI_INT,
|
2007-08-19 07:37:49 +04:00
|
|
|
intercomm,
|
|
|
|
intercomm->c_coll.coll_allgatherv_module);
|
2004-08-04 02:07:45 +04:00
|
|
|
|
2004-05-21 23:36:19 +04:00
|
|
|
exit:
|
|
|
|
if ( NULL != tmpbuf ) {
|
|
|
|
free ( tmpbuf );
|
|
|
|
}
|
2004-08-04 02:07:45 +04:00
|
|
|
if ( NULL != rcounts ) {
|
|
|
|
free ( rcounts );
|
|
|
|
}
|
|
|
|
if ( NULL != rdisps ) {
|
|
|
|
free ( rdisps );
|
|
|
|
}
|
|
|
|
|
2004-05-21 23:36:19 +04:00
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Arguments not used in this implementation:
|
2004-08-05 20:31:30 +04:00
|
|
|
* - send_first
|
|
|
|
*/
|
2004-08-04 02:07:45 +04:00
|
|
|
static int ompi_comm_allreduce_intra_bridge (int *inbuf, int *outbuf,
|
2005-05-20 03:16:06 +04:00
|
|
|
int count, struct ompi_op_t *op,
|
2004-08-04 02:07:45 +04:00
|
|
|
ompi_communicator_t *comm,
|
|
|
|
ompi_communicator_t *bcomm,
|
2004-08-05 20:31:30 +04:00
|
|
|
void* lleader, void* rleader,
|
|
|
|
int send_first )
|
2004-05-21 23:36:19 +04:00
|
|
|
{
|
|
|
|
int *tmpbuf=NULL;
|
|
|
|
int local_rank;
|
|
|
|
int i;
|
|
|
|
int rc;
|
2004-06-17 02:37:03 +04:00
|
|
|
int local_leader, remote_leader;
|
|
|
|
|
|
|
|
local_leader = (*((int*)lleader));
|
|
|
|
remote_leader = (*((int*)rleader));
|
2004-05-21 23:36:19 +04:00
|
|
|
|
2009-02-24 20:17:33 +03:00
|
|
|
if ( &ompi_mpi_op_sum.op != op && &ompi_mpi_op_prod.op != op &&
|
|
|
|
&ompi_mpi_op_max.op != op && &ompi_mpi_op_min.op != op ) {
|
2004-05-21 23:36:19 +04:00
|
|
|
return MPI_ERR_OP;
|
|
|
|
}
|
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
local_rank = ompi_comm_rank ( comm );
|
2004-05-21 23:36:19 +04:00
|
|
|
tmpbuf = (int *) malloc ( count * sizeof(int));
|
|
|
|
if ( NULL == tmpbuf ) {
|
2008-08-11 13:43:01 +04:00
|
|
|
rc = OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
goto exit;
|
2004-05-21 23:36:19 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Intercomm_create */
|
2004-06-29 04:02:25 +04:00
|
|
|
rc = comm->c_coll.coll_allreduce ( inbuf, tmpbuf, count, MPI_INT,
|
2007-08-19 07:37:49 +04:00
|
|
|
op, comm, comm->c_coll.coll_allreduce_module );
|
2004-06-07 19:33:53 +04:00
|
|
|
if ( OMPI_SUCCESS != rc ) {
|
2004-05-21 23:36:19 +04:00
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (local_rank == local_leader ) {
|
|
|
|
MPI_Request req;
|
|
|
|
|
2005-04-13 07:19:48 +04:00
|
|
|
rc = MCA_PML_CALL(irecv ( outbuf, count, MPI_INT, remote_leader,
|
2004-10-26 19:06:51 +04:00
|
|
|
OMPI_COMM_ALLREDUCE_TAG,
|
2005-04-13 07:19:48 +04:00
|
|
|
bcomm, &req));
|
2004-06-07 19:33:53 +04:00
|
|
|
if ( OMPI_SUCCESS != rc ) {
|
2004-05-21 23:36:19 +04:00
|
|
|
goto exit;
|
|
|
|
}
|
2005-04-13 07:19:48 +04:00
|
|
|
rc = MCA_PML_CALL(send (tmpbuf, count, MPI_INT, remote_leader,
|
2004-10-26 19:06:51 +04:00
|
|
|
OMPI_COMM_ALLREDUCE_TAG,
|
2005-04-13 07:19:48 +04:00
|
|
|
MCA_PML_BASE_SEND_STANDARD, bcomm));
|
2004-06-07 19:33:53 +04:00
|
|
|
if ( OMPI_SUCCESS != rc ) {
|
2004-05-21 23:36:19 +04:00
|
|
|
goto exit;
|
|
|
|
}
|
2004-10-12 19:50:01 +04:00
|
|
|
rc = ompi_request_wait_all ( 1, &req, MPI_STATUS_IGNORE);
|
2004-06-07 19:33:53 +04:00
|
|
|
if ( OMPI_SUCCESS != rc ) {
|
2004-05-21 23:36:19 +04:00
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
2009-02-24 20:17:33 +03:00
|
|
|
if ( &ompi_mpi_op_max.op == op ) {
|
2004-05-21 23:36:19 +04:00
|
|
|
for ( i = 0 ; i < count; i++ ) {
|
|
|
|
if (tmpbuf[i] > outbuf[i]) outbuf[i] = tmpbuf[i];
|
|
|
|
}
|
|
|
|
}
|
2009-02-24 20:17:33 +03:00
|
|
|
else if ( &ompi_mpi_op_min.op == op ) {
|
2004-05-21 23:36:19 +04:00
|
|
|
for ( i = 0 ; i < count; i++ ) {
|
|
|
|
if (tmpbuf[i] < outbuf[i]) outbuf[i] = tmpbuf[i];
|
|
|
|
}
|
|
|
|
}
|
2009-02-24 20:17:33 +03:00
|
|
|
else if ( &ompi_mpi_op_sum.op == op ) {
|
2004-05-21 23:36:19 +04:00
|
|
|
for ( i = 0 ; i < count; i++ ) {
|
|
|
|
outbuf[i] += tmpbuf[i];
|
|
|
|
}
|
|
|
|
}
|
2009-02-24 20:17:33 +03:00
|
|
|
else if ( &ompi_mpi_op_prod.op == op ) {
|
2004-05-21 23:36:19 +04:00
|
|
|
for ( i = 0 ; i < count; i++ ) {
|
|
|
|
outbuf[i] *= tmpbuf[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2004-08-04 02:07:45 +04:00
|
|
|
rc = comm->c_coll.coll_bcast ( outbuf, count, MPI_INT, local_leader,
|
2007-08-19 07:37:49 +04:00
|
|
|
comm, comm->c_coll.coll_bcast_module );
|
2004-05-21 23:36:19 +04:00
|
|
|
|
|
|
|
exit:
|
|
|
|
if (NULL != tmpbuf ) {
|
|
|
|
free (tmpbuf);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (rc);
|
|
|
|
}
|
|
|
|
|
2004-06-17 02:37:03 +04:00
|
|
|
/* Arguments not used in this implementation:
|
|
|
|
* - bridgecomm
|
|
|
|
*
|
2004-08-05 20:31:30 +04:00
|
|
|
* lleader is the local rank of root in comm
|
|
|
|
* rleader is the OOB contact information of the
|
|
|
|
* root processes in the other world.
|
2004-06-17 02:37:03 +04:00
|
|
|
*/
|
2004-08-04 02:07:45 +04:00
|
|
|
static int ompi_comm_allreduce_intra_oob (int *inbuf, int *outbuf,
|
2005-05-20 03:16:06 +04:00
|
|
|
int count, struct ompi_op_t *op,
|
2004-08-04 02:07:45 +04:00
|
|
|
ompi_communicator_t *comm,
|
2004-06-17 02:37:03 +04:00
|
|
|
ompi_communicator_t *bridgecomm,
|
2004-08-05 20:31:30 +04:00
|
|
|
void* lleader, void* rleader,
|
|
|
|
int send_first )
|
2004-06-17 02:37:03 +04:00
|
|
|
{
|
|
|
|
int *tmpbuf=NULL;
|
|
|
|
int i;
|
|
|
|
int rc;
|
2004-08-05 20:31:30 +04:00
|
|
|
int local_leader, local_rank;
|
2005-03-14 23:57:21 +03:00
|
|
|
orte_process_name_t *remote_leader=NULL;
|
2006-08-15 23:54:10 +04:00
|
|
|
orte_std_cntr_t size_count;
|
2004-06-17 02:37:03 +04:00
|
|
|
|
2004-08-05 20:31:30 +04:00
|
|
|
local_leader = (*((int*)lleader));
|
2005-03-14 23:57:21 +03:00
|
|
|
remote_leader = (orte_process_name_t*)rleader;
|
2005-07-06 00:59:35 +04:00
|
|
|
size_count = count;
|
2004-06-17 02:37:03 +04:00
|
|
|
|
2009-02-24 20:17:33 +03:00
|
|
|
if ( &ompi_mpi_op_sum.op != op && &ompi_mpi_op_prod.op != op &&
|
|
|
|
&ompi_mpi_op_max.op != op && &ompi_mpi_op_min.op != op ) {
|
2004-06-17 02:37:03 +04:00
|
|
|
return MPI_ERR_OP;
|
|
|
|
}
|
|
|
|
|
2004-08-05 20:31:30 +04:00
|
|
|
|
|
|
|
local_rank = ompi_comm_rank ( comm );
|
2005-04-07 21:48:42 +04:00
|
|
|
tmpbuf = (int *) malloc ( count * sizeof(int));
|
2004-06-17 02:37:03 +04:00
|
|
|
if ( NULL == tmpbuf ) {
|
2008-08-11 13:43:01 +04:00
|
|
|
rc = OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
goto exit;
|
2004-06-17 02:37:03 +04:00
|
|
|
}
|
|
|
|
|
2004-08-05 20:31:30 +04:00
|
|
|
/* comm is an intra-communicator */
|
2007-08-19 07:37:49 +04:00
|
|
|
rc = comm->c_coll.coll_allreduce(inbuf,tmpbuf,count,MPI_INT,op, comm,
|
|
|
|
comm->c_coll.coll_allreduce_module);
|
2004-06-17 02:37:03 +04:00
|
|
|
if ( OMPI_SUCCESS != rc ) {
|
|
|
|
goto exit;
|
|
|
|
}
|
2004-08-05 20:31:30 +04:00
|
|
|
|
2004-06-17 02:37:03 +04:00
|
|
|
if (local_rank == local_leader ) {
|
2008-02-28 04:57:57 +03:00
|
|
|
opal_buffer_t *sbuf;
|
|
|
|
opal_buffer_t *rbuf;
|
2004-08-05 20:31:30 +04:00
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
sbuf = OBJ_NEW(opal_buffer_t);
|
|
|
|
rbuf = OBJ_NEW(opal_buffer_t);
|
2005-03-14 23:57:21 +03:00
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(sbuf, tmpbuf, (orte_std_cntr_t)count, OPAL_INT))) {
|
2005-03-14 23:57:21 +03:00
|
|
|
goto exit;
|
|
|
|
}
|
2004-08-05 20:31:30 +04:00
|
|
|
|
2004-08-13 02:41:42 +04:00
|
|
|
if ( send_first ) {
|
2008-06-18 07:15:56 +04:00
|
|
|
if (0 > (rc = orte_rml.send_buffer(remote_leader, sbuf, OMPI_RML_TAG_COMM_CID_INTRA, 0))) {
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
if (0 > (rc = orte_rml.recv_buffer(remote_leader, rbuf, OMPI_RML_TAG_COMM_CID_INTRA, 0))) {
|
|
|
|
goto exit;
|
|
|
|
}
|
2004-08-05 20:31:30 +04:00
|
|
|
}
|
|
|
|
else {
|
2008-06-18 07:15:56 +04:00
|
|
|
if (0 > (rc = orte_rml.recv_buffer(remote_leader, rbuf, OMPI_RML_TAG_COMM_CID_INTRA, 0))) {
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
if (0 > (rc = orte_rml.send_buffer(remote_leader, sbuf, OMPI_RML_TAG_COMM_CID_INTRA, 0))) {
|
|
|
|
goto exit;
|
|
|
|
}
|
2004-08-05 20:31:30 +04:00
|
|
|
}
|
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(rbuf, outbuf, &size_count, OPAL_INT))) {
|
2005-03-14 23:57:21 +03:00
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
OBJ_RELEASE(sbuf);
|
|
|
|
OBJ_RELEASE(rbuf);
|
2006-08-15 23:54:10 +04:00
|
|
|
count = (int)size_count;
|
2004-08-13 02:41:42 +04:00
|
|
|
|
2009-02-24 20:17:33 +03:00
|
|
|
if ( &ompi_mpi_op_max.op == op ) {
|
2004-06-17 02:37:03 +04:00
|
|
|
for ( i = 0 ; i < count; i++ ) {
|
|
|
|
if (tmpbuf[i] > outbuf[i]) outbuf[i] = tmpbuf[i];
|
|
|
|
}
|
|
|
|
}
|
2009-02-24 20:17:33 +03:00
|
|
|
else if ( &ompi_mpi_op_min.op == op ) {
|
2004-06-17 02:37:03 +04:00
|
|
|
for ( i = 0 ; i < count; i++ ) {
|
|
|
|
if (tmpbuf[i] < outbuf[i]) outbuf[i] = tmpbuf[i];
|
|
|
|
}
|
|
|
|
}
|
2009-02-24 20:17:33 +03:00
|
|
|
else if ( &ompi_mpi_op_sum.op == op ) {
|
2004-06-17 02:37:03 +04:00
|
|
|
for ( i = 0 ; i < count; i++ ) {
|
|
|
|
outbuf[i] += tmpbuf[i];
|
|
|
|
}
|
|
|
|
}
|
2009-02-24 20:17:33 +03:00
|
|
|
else if ( &ompi_mpi_op_prod.op == op ) {
|
2004-06-17 02:37:03 +04:00
|
|
|
for ( i = 0 ; i < count; i++ ) {
|
|
|
|
outbuf[i] *= tmpbuf[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2004-08-05 20:31:30 +04:00
|
|
|
rc = comm->c_coll.coll_bcast (outbuf, count, MPI_INT,
|
2007-08-19 07:37:49 +04:00
|
|
|
local_leader, comm,
|
|
|
|
comm->c_coll.coll_bcast_module);
|
2004-06-17 02:37:03 +04:00
|
|
|
|
|
|
|
exit:
|
|
|
|
if (NULL != tmpbuf ) {
|
|
|
|
free (tmpbuf);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (rc);
|
|
|
|
}
|
2008-02-28 04:57:57 +03:00
|
|
|
|
|
|
|
END_C_DECLS
|
2008-11-05 00:58:06 +03:00
|
|
|
|