2009-01-24 00:57:51 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2007 The University of Tennessee and The University
|
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
* Copyright (c) 2006-2008 Los Alamos National Security, LLC.
|
|
|
|
* All rights reserved.
|
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "orte_config.h"
|
|
|
|
#include "orte/types.h"
|
|
|
|
|
|
|
|
|
|
|
|
#include "orte_config.h"
|
|
|
|
#include "orte/constants.h"
|
|
|
|
#include "orte/types.h"
|
|
|
|
|
|
|
|
#include <string.h>
|
|
|
|
#ifdef HAVE_SYS_TIME_H
|
|
|
|
#include <sys/time.h>
|
|
|
|
#endif /* HAVE_SYS_TIME_H */
|
|
|
|
|
|
|
|
#include "opal/threads/condition.h"
|
|
|
|
#include "opal/util/bit_ops.h"
|
|
|
|
|
|
|
|
#include "opal/class/opal_hash_table.h"
|
|
|
|
#include "opal/dss/dss.h"
|
|
|
|
#include "orte/mca/errmgr/errmgr.h"
|
|
|
|
#include "orte/mca/odls/odls_types.h"
|
|
|
|
#include "orte/mca/rml/rml.h"
|
2009-02-14 05:26:12 +03:00
|
|
|
#include "orte/mca/rml/rml_types.h"
|
2009-01-24 00:57:51 +03:00
|
|
|
#include "orte/runtime/orte_globals.h"
|
|
|
|
#include "orte/util/name_fns.h"
|
|
|
|
#include "orte/orted/orted.h"
|
|
|
|
#include "orte/runtime/orte_wait.h"
|
|
|
|
|
|
|
|
#include "orte/mca/grpcomm/base/base.h"
|
|
|
|
|
|
|
|
/*************** TUNED COLLECTIVES FOR GRPCOMM MODULES **************/
|
|
|
|
|
|
|
|
/**** AVAILABLE ALGORITHMS ****/
|
|
|
|
static int twoproc(opal_buffer_t *sendbuf, opal_buffer_t *recvbuf, int32_t num_entries,
|
2009-02-20 00:35:20 +03:00
|
|
|
orte_jobid_t jobid, orte_vpid_t *vpids);
|
2009-01-24 00:57:51 +03:00
|
|
|
static int bruck(opal_buffer_t *sendbuf, opal_buffer_t *recvbuf, int32_t num_entries,
|
2009-02-20 00:35:20 +03:00
|
|
|
orte_jobid_t jobid, orte_vpid_t np, orte_vpid_t *vpids);
|
2009-01-24 00:57:51 +03:00
|
|
|
static int recursivedoubling(opal_buffer_t *sendbuf, opal_buffer_t *recvbuf, int32_t num_entries,
|
2009-02-20 00:35:20 +03:00
|
|
|
orte_jobid_t jobid, orte_vpid_t np, orte_vpid_t *vpids);
|
2009-01-24 00:57:51 +03:00
|
|
|
|
|
|
|
/**** LOCAL VARIABLES USED IN COLLECTIVES ****/
|
|
|
|
static int num_recvd;
|
|
|
|
static opal_buffer_t bucket;
|
|
|
|
|
|
|
|
/* Receive and process collective messages */
|
|
|
|
static void process_coll_msg(int fd, short event, void *data)
|
|
|
|
{
|
|
|
|
orte_message_event_t *mev = (orte_message_event_t*)data;
|
|
|
|
|
|
|
|
/* transfer the data to the collecting bucket */
|
|
|
|
opal_dss.copy_payload(&bucket, mev->buffer);
|
|
|
|
|
|
|
|
/* cleanup */
|
|
|
|
OBJ_RELEASE(mev);
|
|
|
|
|
|
|
|
/* increment the number recvd */
|
|
|
|
num_recvd++;
|
|
|
|
}
|
|
|
|
|
|
|
|
void orte_grpcomm_base_coll_recv(int status, orte_process_name_t* sender,
|
|
|
|
opal_buffer_t* buffer, orte_rml_tag_t tag,
|
|
|
|
void* cbdata)
|
|
|
|
{
|
|
|
|
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_output,
|
|
|
|
"%s grpcomm:coll:receive got message from %s",
|
2009-03-06 00:14:18 +03:00
|
|
|
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
|
|
|
orte_util_print_name_args(sender)));
|
2009-01-24 00:57:51 +03:00
|
|
|
|
|
|
|
/* don't process this right away - we need to get out of the recv before
|
|
|
|
* we process the message as it may ask us to do something that involves
|
|
|
|
* more messaging! Instead, setup an event so that the message gets processed
|
|
|
|
* as soon as we leave the recv.
|
|
|
|
*
|
|
|
|
* The macro makes a copy of the buffer, which we release above - the incoming
|
|
|
|
* buffer, however, is NOT released here, although its payload IS transferred
|
|
|
|
* to the message buffer for later processing
|
|
|
|
*/
|
|
|
|
ORTE_MESSAGE_EVENT(sender, buffer, tag, process_coll_msg);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Switchyard for selecting the collective algorithm to use
|
|
|
|
*/
|
|
|
|
int orte_grpcomm_base_allgather(opal_buffer_t *sendbuf, opal_buffer_t *recvbuf, int32_t num_entries,
|
2009-02-20 00:35:20 +03:00
|
|
|
orte_jobid_t jobid, orte_vpid_t np, orte_vpid_t *vpids)
|
2009-01-24 00:57:51 +03:00
|
|
|
{
|
|
|
|
bool has_one;
|
|
|
|
orte_vpid_t n;
|
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_output,
|
2009-02-20 00:35:20 +03:00
|
|
|
"%s grpcomm:coll:allgather called with %d entries np %d",
|
2009-03-06 00:14:18 +03:00
|
|
|
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
2009-02-20 00:35:20 +03:00
|
|
|
num_entries, (int)np));
|
2009-01-24 00:57:51 +03:00
|
|
|
|
|
|
|
/* if we only have one proc participating, just copy the data across and return */
|
|
|
|
if (1 == np) {
|
|
|
|
opal_dss.pack(recvbuf, &num_entries, 1, OPAL_INT32);
|
|
|
|
return opal_dss.copy_payload(recvbuf, sendbuf);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (2 == np) {
|
|
|
|
/* only two procs in collective */
|
2009-02-20 00:35:20 +03:00
|
|
|
return twoproc(sendbuf, recvbuf, num_entries, jobid, vpids);
|
2009-01-24 00:57:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* if we have power of 2 participants, use recursive doubling - otherwise,
|
|
|
|
* use bruck algorithm
|
|
|
|
*/
|
|
|
|
has_one = false;
|
|
|
|
n = np;
|
|
|
|
for ( ; n > 0; n >>= 1) {
|
|
|
|
if (n & 0x1) {
|
|
|
|
if (has_one) {
|
2009-02-20 00:35:20 +03:00
|
|
|
return bruck(sendbuf, recvbuf, num_entries, jobid, np, vpids);
|
2009-01-24 00:57:51 +03:00
|
|
|
}
|
|
|
|
has_one = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* must be power of two! */
|
2009-02-20 00:35:20 +03:00
|
|
|
return recursivedoubling(sendbuf, recvbuf, num_entries, jobid, np, vpids);
|
2009-01-24 00:57:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The Two-Proc Algorithm
|
|
|
|
*
|
|
|
|
* One sends to zero, zero waits to recv from one
|
|
|
|
* Zero adds its data to message, sends result back to one
|
|
|
|
*/
|
|
|
|
static int twoproc(opal_buffer_t *sendbuf, opal_buffer_t *recvbuf, int32_t num_entries,
|
2009-02-20 00:35:20 +03:00
|
|
|
orte_jobid_t jobid, orte_vpid_t *vpids)
|
2009-01-24 00:57:51 +03:00
|
|
|
{
|
|
|
|
orte_process_name_t peer;
|
|
|
|
int32_t num_remote, cnt;
|
|
|
|
int rc;
|
|
|
|
opal_buffer_t buf;
|
|
|
|
|
|
|
|
peer.jobid = jobid;
|
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_output,
|
|
|
|
"%s grpcomm:coll:two-proc algo employed",
|
2009-03-06 00:14:18 +03:00
|
|
|
orte_util_print_name_args(ORTE_PROC_MY_NAME)));
|
2009-01-24 00:57:51 +03:00
|
|
|
|
2009-02-20 00:35:20 +03:00
|
|
|
if (vpids[0] == ORTE_PROC_MY_NAME->vpid) {
|
2009-01-24 00:57:51 +03:00
|
|
|
/* I send first */
|
2009-02-20 00:35:20 +03:00
|
|
|
peer.vpid = vpids[1];
|
2009-01-24 00:57:51 +03:00
|
|
|
/* setup a temp buffer so I can inform the other side as to the
|
|
|
|
* number of entries in my buffer
|
|
|
|
*/
|
|
|
|
OBJ_CONSTRUCT(&buf, opal_buffer_t);
|
|
|
|
opal_dss.pack(&buf, &num_entries, 1, OPAL_INT32);
|
|
|
|
opal_dss.copy_payload(&buf, sendbuf);
|
|
|
|
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_output,
|
|
|
|
"%s grpcomm:coll:two-proc sending to %s",
|
2009-03-06 00:14:18 +03:00
|
|
|
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
|
|
|
orte_util_print_name_args(&peer)));
|
2009-01-24 00:57:51 +03:00
|
|
|
|
|
|
|
if (0 > (rc = orte_rml.send_buffer(&peer, &buf, ORTE_RML_TAG_DAEMON_COLLECTIVE, 0))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
OBJ_DESTRUCT(&buf);
|
|
|
|
|
|
|
|
/* wait for reply */
|
|
|
|
num_recvd = 0;
|
|
|
|
OBJ_CONSTRUCT(&bucket, opal_buffer_t);
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD,
|
|
|
|
ORTE_RML_TAG_DAEMON_COLLECTIVE,
|
|
|
|
ORTE_RML_NON_PERSISTENT,
|
|
|
|
orte_grpcomm_base_coll_recv,
|
|
|
|
NULL))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
ORTE_PROGRESSED_WAIT(false, num_recvd, 1);
|
|
|
|
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_output,
|
|
|
|
"%s grpcomm:coll:two-proc got my return message",
|
2009-03-06 00:14:18 +03:00
|
|
|
orte_util_print_name_args(ORTE_PROC_MY_NAME)));
|
2009-01-24 00:57:51 +03:00
|
|
|
|
|
|
|
} else {
|
|
|
|
/* if I am not the start, then I recv first */
|
|
|
|
num_recvd = 0;
|
|
|
|
OBJ_CONSTRUCT(&bucket, opal_buffer_t);
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD,
|
|
|
|
ORTE_RML_TAG_DAEMON_COLLECTIVE,
|
|
|
|
ORTE_RML_NON_PERSISTENT,
|
|
|
|
orte_grpcomm_base_coll_recv,
|
|
|
|
NULL))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
ORTE_PROGRESSED_WAIT(false, num_recvd, 1);
|
|
|
|
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_output,
|
|
|
|
"%s grpcomm:coll:two-proc got my starting message",
|
2009-03-06 00:14:18 +03:00
|
|
|
orte_util_print_name_args(ORTE_PROC_MY_NAME)));
|
2009-01-24 00:57:51 +03:00
|
|
|
|
|
|
|
/* send my data back */
|
|
|
|
OBJ_CONSTRUCT(&buf, opal_buffer_t);
|
|
|
|
opal_dss.pack(&buf, &num_entries, 1, OPAL_INT32);
|
|
|
|
opal_dss.copy_payload(&buf, sendbuf);
|
2009-02-20 00:35:20 +03:00
|
|
|
peer.vpid = vpids[0];
|
2009-01-24 00:57:51 +03:00
|
|
|
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_output,
|
|
|
|
"%s grpcomm:coll:two-proc sending to %s",
|
2009-03-06 00:14:18 +03:00
|
|
|
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
|
|
|
orte_util_print_name_args(&peer)));
|
2009-01-24 00:57:51 +03:00
|
|
|
if (0 > (rc = orte_rml.send_buffer(&peer, &buf, ORTE_RML_TAG_DAEMON_COLLECTIVE, 0))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
OBJ_DESTRUCT(&buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* extract the number of entries in the remote buffer */
|
|
|
|
cnt = 1;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&bucket, &num_remote, &cnt, OPAL_INT32))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* output of a collective begins with the total number of entries */
|
|
|
|
num_remote += num_entries;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(recvbuf, &num_remote, 1, OPAL_INT32))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* xfer my data */
|
|
|
|
opal_dss.copy_payload(recvbuf, sendbuf);
|
|
|
|
/* xfer the recvd data */
|
|
|
|
opal_dss.copy_payload(recvbuf, &bucket);
|
|
|
|
|
|
|
|
/* cleanup */
|
|
|
|
OBJ_DESTRUCT(&bucket);
|
|
|
|
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* For a complete description of this algorithm, please look at
|
|
|
|
* ompi/mca/coll/tuned/coll_tuned_allgather.c
|
|
|
|
*/
|
|
|
|
static int bruck(opal_buffer_t *sendbuf, opal_buffer_t *recvbuf, int32_t num_entries,
|
2009-02-20 00:35:20 +03:00
|
|
|
orte_jobid_t jobid, orte_vpid_t np, orte_vpid_t *vpids)
|
2009-01-24 00:57:51 +03:00
|
|
|
{
|
2009-02-20 00:35:20 +03:00
|
|
|
orte_vpid_t rank, distance, nv;
|
2009-01-24 00:57:51 +03:00
|
|
|
orte_process_name_t peer;
|
|
|
|
int32_t num_remote, total_entries, cnt;
|
|
|
|
opal_buffer_t collection, buf;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_output,
|
|
|
|
"%s grpcomm:coll:bruck algo employed",
|
2009-03-06 00:14:18 +03:00
|
|
|
orte_util_print_name_args(ORTE_PROC_MY_NAME)));
|
2009-01-24 00:57:51 +03:00
|
|
|
|
|
|
|
/* initialize */
|
|
|
|
total_entries = num_entries;
|
|
|
|
|
|
|
|
/* start by seeding the collection with our own data */
|
|
|
|
OBJ_CONSTRUCT(&collection, opal_buffer_t);
|
|
|
|
opal_dss.copy_payload(&collection, sendbuf);
|
|
|
|
|
|
|
|
/* collective is constrained to take place within the specified jobid */
|
|
|
|
peer.jobid = jobid;
|
|
|
|
|
|
|
|
/* Communication step:
|
|
|
|
At every step i, rank r:
|
|
|
|
- doubles the distance
|
|
|
|
- sends message containing all data collected so far to rank r - distance
|
|
|
|
- receives message containing all data collected so far from rank (r + distance)
|
|
|
|
*/
|
2009-02-20 00:35:20 +03:00
|
|
|
/* find my position in the group of participants. This
|
2009-01-24 00:57:51 +03:00
|
|
|
* value is the "rank" we will use in the algo
|
|
|
|
*/
|
2009-02-20 00:35:20 +03:00
|
|
|
rank = ORTE_VPID_INVALID;
|
|
|
|
for (nv=0; nv < np; nv++) {
|
|
|
|
if (vpids[nv] == ORTE_PROC_MY_NAME->vpid) {
|
|
|
|
rank = nv;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2009-01-24 00:57:51 +03:00
|
|
|
|
2009-02-20 00:35:20 +03:00
|
|
|
/* check for bozo case */
|
|
|
|
if (ORTE_VPID_INVALID == rank) {
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
|
|
|
|
return ORTE_ERR_NOT_FOUND;
|
|
|
|
}
|
|
|
|
|
2009-01-24 00:57:51 +03:00
|
|
|
for (distance = 1; distance < np; distance <<= 1) {
|
|
|
|
|
|
|
|
/* first send my current contents */
|
2009-02-20 00:35:20 +03:00
|
|
|
nv = (rank - distance + np) % np;
|
|
|
|
peer.vpid = vpids[nv];
|
2009-01-24 00:57:51 +03:00
|
|
|
OBJ_CONSTRUCT(&buf, opal_buffer_t);
|
|
|
|
opal_dss.pack(&buf, &total_entries, 1, OPAL_INT32);
|
|
|
|
opal_dss.copy_payload(&buf, &collection);
|
|
|
|
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_output,
|
|
|
|
"%s grpcomm:coll:bruck sending to %s",
|
2009-03-06 00:14:18 +03:00
|
|
|
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
|
|
|
orte_util_print_name_args(&peer)));
|
2009-01-24 00:57:51 +03:00
|
|
|
if (0 > (rc = orte_rml.send_buffer(&peer, &buf, ORTE_RML_TAG_DAEMON_COLLECTIVE, 0))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
OBJ_DESTRUCT(&buf);
|
|
|
|
|
|
|
|
/* now setup to recv from my other partner */
|
|
|
|
num_recvd = 0;
|
2009-02-20 00:35:20 +03:00
|
|
|
nv = (rank + distance) % np;
|
|
|
|
peer.vpid = vpids[nv];
|
2009-01-24 00:57:51 +03:00
|
|
|
OBJ_CONSTRUCT(&bucket, opal_buffer_t);
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_rml.recv_buffer_nb(&peer,
|
|
|
|
ORTE_RML_TAG_DAEMON_COLLECTIVE,
|
|
|
|
ORTE_RML_NON_PERSISTENT,
|
|
|
|
orte_grpcomm_base_coll_recv,
|
|
|
|
NULL))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
/* and wait for it to get here */
|
|
|
|
ORTE_PROGRESSED_WAIT(false, num_recvd, 1);
|
|
|
|
|
|
|
|
/* extract the number of entries in the remote buffer */
|
|
|
|
cnt = 1;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&bucket, &num_remote, &cnt, OPAL_INT32))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* add it to our running total */
|
|
|
|
total_entries += num_remote;
|
|
|
|
|
|
|
|
/* transfer the data to our collection */
|
|
|
|
opal_dss.copy_payload(&collection, &bucket);
|
|
|
|
|
|
|
|
/* cleanup */
|
|
|
|
OBJ_DESTRUCT(&bucket);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* output of a collective begins with the total number of entries */
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(recvbuf, &total_entries, 1, OPAL_INT32))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* transfer the collected data */
|
|
|
|
opal_dss.copy_payload(recvbuf, &collection);
|
|
|
|
|
|
|
|
/* cleanup */
|
|
|
|
OBJ_DESTRUCT(&collection);
|
|
|
|
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For a complete description of this algorithm, please look at
|
|
|
|
* ompi/mca/coll/tuned/coll_tuned_allgather.c
|
|
|
|
*/
|
|
|
|
static int recursivedoubling(opal_buffer_t *sendbuf, opal_buffer_t *recvbuf, int32_t num_entries,
|
2009-02-20 00:35:20 +03:00
|
|
|
orte_jobid_t jobid, orte_vpid_t np, orte_vpid_t *vpids)
|
2009-01-24 00:57:51 +03:00
|
|
|
{
|
2009-02-20 00:35:20 +03:00
|
|
|
orte_vpid_t rank, distance, nv;
|
2009-01-24 00:57:51 +03:00
|
|
|
int32_t num_remote, total_entries, cnt;
|
|
|
|
opal_buffer_t collection, buf;
|
|
|
|
orte_process_name_t peer;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_output,
|
|
|
|
"%s grpcomm:coll:recdub algo employed",
|
2009-03-06 00:14:18 +03:00
|
|
|
orte_util_print_name_args(ORTE_PROC_MY_NAME)));
|
2009-01-24 00:57:51 +03:00
|
|
|
|
|
|
|
/* initialize */
|
|
|
|
total_entries = num_entries;
|
|
|
|
|
|
|
|
/* start by seeding the collection with our own data */
|
|
|
|
OBJ_CONSTRUCT(&collection, opal_buffer_t);
|
|
|
|
opal_dss.copy_payload(&collection, sendbuf);
|
|
|
|
|
|
|
|
/* collective is constrained to take place within the specified jobid */
|
|
|
|
peer.jobid = jobid;
|
|
|
|
|
|
|
|
/* Communication step:
|
|
|
|
At every step i, rank r:
|
|
|
|
- exchanges message containing all data collected so far with rank peer = (r ^ 2^i).
|
|
|
|
*/
|
2009-02-20 00:35:20 +03:00
|
|
|
/* find my position in the group of participants. This
|
2009-01-24 00:57:51 +03:00
|
|
|
* value is the "rank" we will use in the algo
|
|
|
|
*/
|
2009-02-20 00:35:20 +03:00
|
|
|
rank = ORTE_VPID_INVALID;
|
|
|
|
for (nv=0; nv < np; nv++) {
|
|
|
|
if (vpids[nv] == ORTE_PROC_MY_NAME->vpid) {
|
|
|
|
rank = nv;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check for bozo case */
|
|
|
|
if (ORTE_VPID_INVALID == rank) {
|
|
|
|
ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND);
|
|
|
|
return ORTE_ERR_NOT_FOUND;
|
|
|
|
}
|
|
|
|
|
2009-01-24 00:57:51 +03:00
|
|
|
for (distance = 0x1; distance < np; distance<<=1) {
|
|
|
|
|
|
|
|
/* first send my current contents */
|
2009-02-20 00:35:20 +03:00
|
|
|
nv = rank ^ distance;
|
|
|
|
peer.vpid = vpids[nv];
|
2009-01-24 00:57:51 +03:00
|
|
|
OBJ_CONSTRUCT(&buf, opal_buffer_t);
|
|
|
|
opal_dss.pack(&buf, &total_entries, 1, OPAL_INT32);
|
|
|
|
opal_dss.copy_payload(&buf, &collection);
|
|
|
|
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_output,
|
|
|
|
"%s grpcomm:coll:recdub sending to %s",
|
2009-03-06 00:14:18 +03:00
|
|
|
orte_util_print_name_args(ORTE_PROC_MY_NAME),
|
|
|
|
orte_util_print_name_args(&peer)));
|
2009-01-24 00:57:51 +03:00
|
|
|
if (0 > (rc = orte_rml.send_buffer(&peer, &buf, ORTE_RML_TAG_DAEMON_COLLECTIVE, 0))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
OBJ_DESTRUCT(&buf);
|
|
|
|
|
|
|
|
/* now setup to recv from my other partner */
|
|
|
|
num_recvd = 0;
|
|
|
|
OBJ_CONSTRUCT(&bucket, opal_buffer_t);
|
|
|
|
if (ORTE_SUCCESS != (rc = orte_rml.recv_buffer_nb(&peer,
|
|
|
|
ORTE_RML_TAG_DAEMON_COLLECTIVE,
|
|
|
|
ORTE_RML_NON_PERSISTENT,
|
|
|
|
orte_grpcomm_base_coll_recv,
|
|
|
|
NULL))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
/* and wait for it to get here */
|
|
|
|
ORTE_PROGRESSED_WAIT(false, num_recvd, 1);
|
|
|
|
|
|
|
|
/* extract the number of entries in the remote buffer */
|
|
|
|
cnt = 1;
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.unpack(&bucket, &num_remote, &cnt, OPAL_INT32))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* add it to our running total */
|
|
|
|
total_entries += num_remote;
|
|
|
|
|
|
|
|
/* transfer the data to our collection */
|
|
|
|
opal_dss.copy_payload(&collection, &bucket);
|
|
|
|
|
|
|
|
/* cleanup */
|
|
|
|
OBJ_DESTRUCT(&bucket);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* output of a collective begins with the total number of entries */
|
|
|
|
if (ORTE_SUCCESS != (rc = opal_dss.pack(recvbuf, &total_entries, 1, OPAL_INT32))) {
|
|
|
|
ORTE_ERROR_LOG(rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* transfer the collected data */
|
|
|
|
opal_dss.copy_payload(recvbuf, &collection);
|
|
|
|
|
|
|
|
/* cleanup */
|
|
|
|
OBJ_DESTRUCT(&collection);
|
|
|
|
|
|
|
|
return ORTE_SUCCESS;
|
|
|
|
}
|