2014-03-12 03:01:51 +04:00
|
|
|
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
|
2014-02-25 21:36:43 +04:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University.
|
|
|
|
* All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
|
|
|
|
* All rights reserved.
|
2014-11-05 19:59:12 +03:00
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
2014-02-25 21:36:43 +04:00
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
2015-05-29 17:33:34 +03:00
|
|
|
* Copyright (c) 2007-2015 Los Alamos National Security, LLC. All rights
|
2014-11-05 19:59:12 +03:00
|
|
|
* reserved.
|
2014-02-25 21:36:43 +04:00
|
|
|
* Copyright (c) 2010 IBM Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2012-2013 Sandia National Laboratories. All rights reserved.
|
2015-05-08 07:53:03 +03:00
|
|
|
* Copyright (c) 2015 Research Organization for Information Science
|
|
|
|
* and Technology (RIST). All rights reserved.
|
2014-02-25 21:36:43 +04:00
|
|
|
* $COPYRIGHT$
|
2014-11-05 19:59:12 +03:00
|
|
|
*
|
2014-02-25 21:36:43 +04:00
|
|
|
* Additional copyrights may follow
|
2014-11-05 19:59:12 +03:00
|
|
|
*
|
2014-02-25 21:36:43 +04:00
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "ompi_config.h"
|
|
|
|
|
2014-11-05 19:59:12 +03:00
|
|
|
#include "osc_pt2pt.h"
|
|
|
|
#include "osc_pt2pt_header.h"
|
|
|
|
#include "osc_pt2pt_data_move.h"
|
|
|
|
#include "osc_pt2pt_frag.h"
|
2014-02-25 21:36:43 +04:00
|
|
|
|
|
|
|
#include "mpi.h"
|
|
|
|
#include "opal/runtime/opal_progress.h"
|
|
|
|
#include "opal/threads/mutex.h"
|
|
|
|
#include "ompi/communicator/communicator.h"
|
|
|
|
#include "ompi/mca/osc/base/base.h"
|
|
|
|
|
2014-06-17 19:23:06 +04:00
|
|
|
/**
|
2015-05-29 17:33:34 +03:00
|
|
|
* compare_ranks:
|
2014-06-17 19:23:06 +04:00
|
|
|
*
|
2015-05-29 17:33:34 +03:00
|
|
|
* @param[in] ptra Pointer to integer item
|
|
|
|
* @param[in] ptrb Pointer to integer item
|
|
|
|
*
|
|
|
|
* @returns 0 if *ptra == *ptrb
|
|
|
|
* @returns -1 if *ptra < *ptrb
|
|
|
|
* @returns 1 otherwise
|
|
|
|
*
|
|
|
|
* This function is used to sort the rank list. It can be removed if
|
|
|
|
* groups are always in order.
|
2014-06-17 19:23:06 +04:00
|
|
|
*/
|
2015-05-29 17:33:34 +03:00
|
|
|
static int compare_ranks (const void *ptra, const void *ptrb)
|
2014-06-17 21:48:30 +04:00
|
|
|
{
|
2015-05-29 17:33:34 +03:00
|
|
|
int a = *((int *) ptra);
|
|
|
|
int b = *((int *) ptrb);
|
2014-06-17 21:48:30 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
if (a < b) {
|
|
|
|
return -1;
|
|
|
|
} else if (a > b) {
|
|
|
|
return 1;
|
2014-06-17 21:48:30 +04:00
|
|
|
}
|
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
return 0;
|
2014-06-17 21:48:30 +04:00
|
|
|
}
|
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
/**
|
|
|
|
* ompi_osc_pt2pt_get_comm_ranks:
|
|
|
|
*
|
|
|
|
* @param[in] module - OSC PT2PT module
|
|
|
|
* @param[in] sub_group - Group with ranks to translate
|
|
|
|
*
|
|
|
|
* @returns an array of translated ranks on success or NULL on failure
|
|
|
|
*
|
|
|
|
* Translate the ranks given in {sub_group} into ranks in the
|
|
|
|
* communicator used to create {module}.
|
|
|
|
*/
|
|
|
|
static ompi_osc_pt2pt_peer_t **ompi_osc_pt2pt_get_peers (ompi_osc_pt2pt_module_t *module, ompi_group_t *sub_group)
|
2014-02-25 21:36:43 +04:00
|
|
|
{
|
2015-05-29 17:33:34 +03:00
|
|
|
int size = ompi_group_size(sub_group);
|
|
|
|
ompi_osc_pt2pt_peer_t **peers;
|
|
|
|
int *ranks1, *ranks2;
|
|
|
|
int ret;
|
|
|
|
|
2015-09-22 18:06:25 +03:00
|
|
|
ranks1 = calloc (size, sizeof(int));
|
|
|
|
ranks2 = calloc (size, sizeof(int));
|
2015-09-22 19:30:01 +03:00
|
|
|
peers = calloc (size, sizeof (ompi_osc_pt2pt_peer_t *));
|
2015-05-29 17:33:34 +03:00
|
|
|
if (NULL == ranks1 || NULL == ranks2 || NULL == peers) {
|
|
|
|
free (ranks1);
|
|
|
|
free (ranks2);
|
|
|
|
free (peers);
|
2015-09-22 18:06:25 +03:00
|
|
|
return NULL;
|
2015-05-29 17:33:34 +03:00
|
|
|
}
|
2014-02-25 21:36:43 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
for (int i = 0 ; i < size ; ++i) {
|
2014-02-25 21:36:43 +04:00
|
|
|
ranks1[i] = i;
|
|
|
|
}
|
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
ret = ompi_group_translate_ranks (sub_group, size, ranks1, module->comm->c_local_group,
|
|
|
|
ranks2);
|
|
|
|
free (ranks1);
|
|
|
|
if (OMPI_SUCCESS != ret) {
|
|
|
|
free (ranks2);
|
|
|
|
free (peers);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
qsort (ranks2, size, sizeof (int), compare_ranks);
|
|
|
|
for (int i = 0 ; i < size ; ++i) {
|
|
|
|
peers[i] = ompi_osc_pt2pt_peer_lookup (module, ranks2[i]);
|
|
|
|
OBJ_RETAIN(peers[i]);
|
|
|
|
}
|
|
|
|
free (ranks2);
|
2014-02-25 21:36:43 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
return peers;
|
|
|
|
}
|
2014-02-25 21:36:43 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
static void ompi_osc_pt2pt_release_peers (ompi_osc_pt2pt_peer_t **peers, int npeers)
|
|
|
|
{
|
|
|
|
for (int i = 0 ; i < npeers ; ++i) {
|
|
|
|
OBJ_RELEASE(peers[i]);
|
2014-02-25 21:36:43 +04:00
|
|
|
}
|
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
free (peers);
|
2014-02-25 21:36:43 +04:00
|
|
|
}
|
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
int ompi_osc_pt2pt_fence(int assert, ompi_win_t *win)
|
2014-02-25 21:36:43 +04:00
|
|
|
{
|
2014-11-05 19:59:12 +03:00
|
|
|
ompi_osc_pt2pt_module_t *module = GET_MODULE(win);
|
2014-02-25 21:36:43 +04:00
|
|
|
uint32_t incoming_reqs;
|
|
|
|
int ret = OMPI_SUCCESS;
|
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((25, ompi_osc_base_framework.framework_output,
|
2014-11-05 19:59:12 +03:00
|
|
|
"osc pt2pt: fence start"));
|
2014-02-25 21:36:43 +04:00
|
|
|
|
2014-03-12 21:14:03 +04:00
|
|
|
/* can't enter an active target epoch when in a passive target epoch */
|
2015-05-29 17:33:34 +03:00
|
|
|
if (ompi_osc_pt2pt_in_passive_epoch (module)) {
|
|
|
|
OPAL_OUTPUT_VERBOSE((25, ompi_osc_base_framework.framework_output,
|
|
|
|
"osc pt2pt: could not enter fence. already in an access epoch"));
|
2014-03-12 21:14:03 +04:00
|
|
|
return OMPI_ERR_RMA_SYNC;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* active sends are now active (we will close the epoch if NOSUCCEED is specified) */
|
|
|
|
if (0 == (assert & MPI_MODE_NOSUCCEED)) {
|
2015-05-29 17:33:34 +03:00
|
|
|
module->all_sync.type = OMPI_OSC_PT2PT_SYNC_TYPE_FENCE;
|
|
|
|
module->all_sync.eager_send_active = true;
|
2014-03-12 21:14:03 +04:00
|
|
|
}
|
|
|
|
|
2014-02-25 21:36:43 +04:00
|
|
|
/* short-circuit the noprecede case */
|
|
|
|
if (0 != (assert & MPI_MODE_NOPRECEDE)) {
|
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2014-11-05 19:59:12 +03:00
|
|
|
"osc pt2pt: fence end (short circuit)"));
|
2014-02-25 21:36:43 +04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-11-14 01:13:10 +03:00
|
|
|
/* try to start all requests. */
|
2014-11-05 19:59:12 +03:00
|
|
|
ret = ompi_osc_pt2pt_frag_flush_all(module);
|
2014-11-14 01:13:10 +03:00
|
|
|
if (OMPI_SUCCESS != ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
2014-02-25 21:36:43 +04:00
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2014-11-05 19:59:12 +03:00
|
|
|
"osc pt2pt: fence done sending"));
|
2014-02-25 21:36:43 +04:00
|
|
|
|
|
|
|
/* find out how much data everyone is going to send us. */
|
|
|
|
ret = module->comm->c_coll.coll_reduce_scatter_block (module->epoch_outgoing_frag_count,
|
2014-03-12 03:01:51 +04:00
|
|
|
&incoming_reqs, 1, MPI_UINT32_T,
|
|
|
|
MPI_SUM, module->comm,
|
|
|
|
module->comm->c_coll.coll_reduce_scatter_block_module);
|
2014-11-14 01:13:10 +03:00
|
|
|
if (OMPI_SUCCESS != ret) {
|
|
|
|
OPAL_THREAD_UNLOCK(&module->lock);
|
|
|
|
return ret;
|
|
|
|
}
|
2014-02-25 21:36:43 +04:00
|
|
|
|
|
|
|
OPAL_THREAD_LOCK(&module->lock);
|
|
|
|
bzero(module->epoch_outgoing_frag_count,
|
|
|
|
sizeof(uint32_t) * ompi_comm_size(module->comm));
|
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2014-11-05 19:59:12 +03:00
|
|
|
"osc pt2pt: fence expects %d requests",
|
2014-02-25 21:36:43 +04:00
|
|
|
incoming_reqs));
|
|
|
|
|
|
|
|
/* set our complete condition for incoming requests */
|
|
|
|
module->active_incoming_frag_signal_count += incoming_reqs;
|
|
|
|
|
|
|
|
/* wait for completion */
|
|
|
|
while (module->outgoing_frag_count != module->outgoing_frag_signal_count ||
|
|
|
|
module->active_incoming_frag_count < module->active_incoming_frag_signal_count) {
|
|
|
|
opal_condition_wait(&module->cond, &module->lock);
|
|
|
|
}
|
|
|
|
|
2014-03-12 21:14:03 +04:00
|
|
|
if (assert & MPI_MODE_NOSUCCEED) {
|
|
|
|
/* as specified in MPI-3 p 438 3-5 the fence can end an epoch. it isn't explicitly
|
|
|
|
* stated that MPI_MODE_NOSUCCEED ends the epoch but it is a safe assumption. */
|
2015-05-29 17:33:34 +03:00
|
|
|
ompi_osc_pt2pt_sync_reset (&module->all_sync);
|
2014-11-14 01:13:10 +03:00
|
|
|
}
|
2015-05-29 17:33:34 +03:00
|
|
|
|
|
|
|
module->all_sync.epoch_active = false;
|
|
|
|
|
2014-11-14 01:13:10 +03:00
|
|
|
opal_condition_broadcast (&module->cond);
|
|
|
|
OPAL_THREAD_UNLOCK(&module->lock);
|
2014-02-25 21:36:43 +04:00
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((25, ompi_osc_base_framework.framework_output,
|
2014-11-05 19:59:12 +03:00
|
|
|
"osc pt2pt: fence end: %d", ret));
|
2014-02-25 21:36:43 +04:00
|
|
|
|
2014-11-14 01:13:10 +03:00
|
|
|
return OMPI_SUCCESS;
|
2014-02-25 21:36:43 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
int ompi_osc_pt2pt_start (ompi_group_t *group, int assert, ompi_win_t *win)
|
2014-02-25 21:36:43 +04:00
|
|
|
{
|
2014-11-05 19:59:12 +03:00
|
|
|
ompi_osc_pt2pt_module_t *module = GET_MODULE(win);
|
2015-05-29 17:33:34 +03:00
|
|
|
ompi_osc_pt2pt_sync_t *sync = &module->all_sync;
|
2014-03-12 03:01:51 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
OPAL_THREAD_LOCK(&sync->lock);
|
2014-03-25 19:28:36 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
/* check if we are already in an access epoch */
|
|
|
|
if (ompi_osc_pt2pt_access_epoch_active (module)) {
|
2014-03-25 19:28:36 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&module->lock);
|
2014-03-12 03:45:23 +04:00
|
|
|
return OMPI_ERR_RMA_SYNC;
|
2014-03-12 03:01:51 +04:00
|
|
|
}
|
2014-02-25 21:36:43 +04:00
|
|
|
|
2014-07-09 01:11:12 +04:00
|
|
|
/* mark all procs in this group as being in an access epoch */
|
2015-05-29 17:33:34 +03:00
|
|
|
sync->num_peers = ompi_group_size (group);
|
|
|
|
sync->sync.pscw.group = group;
|
2014-07-09 01:11:12 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
/* haven't processed any post messages yet */
|
|
|
|
sync->sync_expected = sync->num_peers;
|
2014-07-09 01:11:12 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2015-09-22 19:30:01 +03:00
|
|
|
"ompi_osc_pt2pt_start entering with group size %d...",
|
2015-05-29 17:33:34 +03:00
|
|
|
sync->num_peers));
|
2014-07-09 01:11:12 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
if (0 == ompi_group_size (group)) {
|
|
|
|
/* nothing more to do. this is an empty start epoch */
|
|
|
|
OPAL_THREAD_UNLOCK(&module->lock);
|
|
|
|
return OMPI_SUCCESS;
|
2014-07-09 01:11:12 +04:00
|
|
|
}
|
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
opal_atomic_wmb ();
|
2014-06-17 19:23:06 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
sync->type = OMPI_OSC_PT2PT_SYNC_TYPE_PSCW;
|
2014-11-14 01:13:10 +03:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
/* prevent us from entering a passive-target, fence, or another pscw access epoch until
|
|
|
|
* the matching complete is called */
|
|
|
|
sync->epoch_active = true;
|
2014-11-14 01:13:10 +03:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
/* translate the group ranks into the communicator */
|
|
|
|
sync->peer_list.peers = ompi_osc_pt2pt_get_peers (module, group);
|
|
|
|
if (NULL == sync->peer_list.peers) {
|
|
|
|
OPAL_THREAD_UNLOCK(&module->lock);
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
2014-06-17 19:23:06 +04:00
|
|
|
}
|
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
/* save the group */
|
|
|
|
OBJ_RETAIN(group);
|
|
|
|
ompi_group_increment_proc_count(group);
|
2014-02-25 21:36:43 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
if (!(assert & MPI_MODE_NOCHECK)) {
|
|
|
|
OPAL_THREAD_LOCK(&sync->lock);
|
|
|
|
for (int i = 0 ; i < sync->num_peers ; ++i) {
|
|
|
|
ompi_osc_pt2pt_peer_t *peer = sync->peer_list.peers[i];
|
|
|
|
|
|
|
|
if (peer->unexpected_post) {
|
|
|
|
/* the peer already sent a post message for this pscw access epoch */
|
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
|
|
|
"found unexpected post from %d",
|
|
|
|
peer->rank));
|
|
|
|
OPAL_THREAD_ADD32 (&sync->sync_expected, -1);
|
|
|
|
peer->unexpected_post = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
OPAL_THREAD_UNLOCK(&sync->lock);
|
|
|
|
} else {
|
|
|
|
sync->sync_expected = 0;
|
|
|
|
}
|
2014-02-25 21:36:43 +04:00
|
|
|
|
2014-07-09 01:11:12 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2015-05-29 17:33:34 +03:00
|
|
|
"post messages still needed: %d", sync->sync_expected));
|
2014-07-09 01:11:12 +04:00
|
|
|
|
2014-02-25 21:36:43 +04:00
|
|
|
/* if we've already received all the post messages, we can eager
|
|
|
|
send. Otherwise, eager send will be enabled when
|
|
|
|
numb_post_messages reaches 0 */
|
2015-05-29 17:33:34 +03:00
|
|
|
if (0 == sync->sync_expected) {
|
|
|
|
sync->eager_send_active = true;
|
2014-02-25 21:36:43 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2015-05-29 17:33:34 +03:00
|
|
|
"ompi_osc_pt2pt_start complete. eager sends active: %d",
|
|
|
|
sync->eager_send_active));
|
2014-02-25 21:36:43 +04:00
|
|
|
|
|
|
|
OPAL_THREAD_UNLOCK(&module->lock);
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
int ompi_osc_pt2pt_complete (ompi_win_t *win)
|
2014-02-25 21:36:43 +04:00
|
|
|
{
|
2014-11-05 19:59:12 +03:00
|
|
|
ompi_osc_pt2pt_module_t *module = GET_MODULE(win);
|
2015-05-29 17:33:34 +03:00
|
|
|
ompi_osc_pt2pt_sync_t *sync = &module->all_sync;
|
|
|
|
int my_rank = ompi_comm_rank (module->comm);
|
|
|
|
ompi_osc_pt2pt_peer_t **peers;
|
2014-02-25 21:36:43 +04:00
|
|
|
int ret = OMPI_SUCCESS;
|
|
|
|
ompi_group_t *group;
|
2015-05-29 17:33:34 +03:00
|
|
|
size_t group_size;
|
2014-02-25 21:36:43 +04:00
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2014-11-05 19:59:12 +03:00
|
|
|
"ompi_osc_pt2pt_complete entering..."));
|
2014-03-12 03:01:51 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
OPAL_THREAD_LOCK(&module->lock);
|
|
|
|
if (OMPI_OSC_PT2PT_SYNC_TYPE_PSCW != sync->type) {
|
|
|
|
OPAL_THREAD_UNLOCK(&module->lock);
|
2014-03-12 03:45:23 +04:00
|
|
|
return OMPI_ERR_RMA_SYNC;
|
2014-03-12 03:01:51 +04:00
|
|
|
}
|
2014-02-25 21:36:43 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
/* wait for all the post messages */
|
|
|
|
ompi_osc_pt2pt_sync_wait (sync);
|
2014-02-25 21:36:43 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
/* phase 1 cleanup sync object */
|
|
|
|
group = sync->sync.pscw.group;
|
|
|
|
group_size = sync->num_peers;
|
2014-02-25 21:36:43 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
peers = sync->peer_list.peers;
|
|
|
|
if (NULL == peers) {
|
|
|
|
/* empty peer list */
|
|
|
|
OPAL_THREAD_UNLOCK(&(module->lock));
|
|
|
|
OBJ_RELEASE(group);
|
|
|
|
return OMPI_SUCCESS;
|
2014-02-25 21:36:43 +04:00
|
|
|
}
|
2015-05-29 17:33:34 +03:00
|
|
|
|
2014-11-14 01:13:10 +03:00
|
|
|
OPAL_THREAD_UNLOCK(&module->lock);
|
2014-02-25 21:36:43 +04:00
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2015-05-29 17:33:34 +03:00
|
|
|
"ompi_osc_pt2pt_complete all posts received. sending complete messages..."));
|
2014-02-25 21:36:43 +04:00
|
|
|
|
|
|
|
/* for each process in group, send a control message with number
|
|
|
|
of updates coming, then start all the requests. Note that the
|
|
|
|
control send is processed as another message in a fragment, so
|
|
|
|
this might get queued until the flush_all (which is fine).
|
|
|
|
|
|
|
|
At the same time, clean out the outgoing count for the next
|
|
|
|
round. */
|
2015-05-29 17:33:34 +03:00
|
|
|
for (size_t i = 0 ; i < group_size ; ++i) {
|
|
|
|
ompi_osc_pt2pt_header_complete_t complete_req;
|
|
|
|
int rank = peers[i]->rank;
|
|
|
|
|
|
|
|
if (my_rank == rank) {
|
2014-07-09 01:11:12 +04:00
|
|
|
/* shortcut for self */
|
2015-05-29 17:33:34 +03:00
|
|
|
osc_pt2pt_incoming_complete (module, rank, 0);
|
2014-07-09 01:11:12 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-11-05 19:59:12 +03:00
|
|
|
complete_req.base.type = OMPI_OSC_PT2PT_HDR_TYPE_COMPLETE;
|
|
|
|
complete_req.base.flags = OMPI_OSC_PT2PT_HDR_FLAG_VALID;
|
2015-05-08 07:53:03 +03:00
|
|
|
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT && OPAL_ENABLE_DEBUG
|
|
|
|
complete_req.padding[0] = 0;
|
|
|
|
complete_req.padding[1] = 0;
|
|
|
|
#endif
|
2015-05-29 17:33:34 +03:00
|
|
|
complete_req.frag_count = module->epoch_outgoing_frag_count[rank];
|
2015-05-08 07:53:03 +03:00
|
|
|
osc_pt2pt_hton(&complete_req, proc);
|
2014-02-25 21:36:43 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
ompi_osc_pt2pt_peer_t *peer = ompi_osc_pt2pt_peer_lookup (module, rank);
|
2014-07-09 01:11:12 +04:00
|
|
|
|
2015-03-25 00:04:08 +03:00
|
|
|
/* XXX -- TODO -- since fragment are always delivered in order we do not need to count anything but long
|
|
|
|
* requests. once that is done this can be removed. */
|
|
|
|
if (peer->active_frag && (peer->active_frag->remain_len < sizeof (complete_req))) {
|
|
|
|
++complete_req.frag_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
|
|
|
"ompi_osc_pt2pt_complete sending complete message to %d. frag_count: %u",
|
2015-05-29 17:33:34 +03:00
|
|
|
rank, complete_req.frag_count));
|
2015-03-25 00:04:08 +03:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
ret = ompi_osc_pt2pt_control_send (module, rank, &complete_req,
|
2015-03-25 00:04:08 +03:00
|
|
|
sizeof(ompi_osc_pt2pt_header_complete_t));
|
2015-05-29 17:33:34 +03:00
|
|
|
if (OMPI_SUCCESS != ret) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ompi_osc_pt2pt_frag_flush_target (module, rank);
|
|
|
|
if (OMPI_SUCCESS != ret) {
|
|
|
|
break;
|
|
|
|
}
|
2014-02-25 21:36:43 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
/* zero the fragment counts here to ensure they are zerod */
|
|
|
|
module->epoch_outgoing_frag_count[rank] = 0;
|
2015-03-25 00:04:08 +03:00
|
|
|
}
|
2014-02-25 21:36:43 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
/* release our reference to peers in this group */
|
|
|
|
ompi_osc_pt2pt_release_peers (peers, group_size);
|
|
|
|
|
|
|
|
if (OMPI_SUCCESS != ret) {
|
|
|
|
return ret;
|
2014-03-29 02:06:16 +04:00
|
|
|
}
|
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
OPAL_THREAD_LOCK(&module->lock);
|
2014-02-25 21:36:43 +04:00
|
|
|
/* wait for outgoing requests to complete. Don't wait for incoming, as
|
|
|
|
we're only completing the access epoch, not the exposure epoch */
|
|
|
|
while (module->outgoing_frag_count != module->outgoing_frag_signal_count) {
|
|
|
|
opal_condition_wait(&module->cond, &module->lock);
|
|
|
|
}
|
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
ompi_osc_pt2pt_sync_reset (sync);
|
2014-02-25 21:36:43 +04:00
|
|
|
|
|
|
|
/* unlock here, as group cleanup can take a while... */
|
2014-11-14 01:13:10 +03:00
|
|
|
OPAL_THREAD_UNLOCK(&module->lock);
|
2014-02-25 21:36:43 +04:00
|
|
|
|
|
|
|
/* phase 2 cleanup group */
|
|
|
|
ompi_group_decrement_proc_count(group);
|
|
|
|
OBJ_RELEASE(group);
|
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2014-11-05 19:59:12 +03:00
|
|
|
"ompi_osc_pt2pt_complete complete"));
|
2014-02-25 21:36:43 +04:00
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
int ompi_osc_pt2pt_post (ompi_group_t *group, int assert, ompi_win_t *win)
|
2014-02-25 21:36:43 +04:00
|
|
|
{
|
|
|
|
int ret = OMPI_SUCCESS;
|
2014-11-05 19:59:12 +03:00
|
|
|
ompi_osc_pt2pt_module_t *module = GET_MODULE(win);
|
|
|
|
ompi_osc_pt2pt_header_post_t post_req;
|
2015-05-29 17:33:34 +03:00
|
|
|
ompi_osc_pt2pt_peer_t **peers;
|
2014-02-25 21:36:43 +04:00
|
|
|
|
2014-07-09 01:11:12 +04:00
|
|
|
/* can't check for all access epoch here due to fence */
|
2014-03-12 03:01:51 +04:00
|
|
|
if (module->pw_group) {
|
2014-03-12 03:45:23 +04:00
|
|
|
return OMPI_ERR_RMA_SYNC;
|
2014-03-12 03:01:51 +04:00
|
|
|
}
|
2014-02-25 21:36:43 +04:00
|
|
|
|
2014-07-09 01:11:12 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2014-11-05 19:59:12 +03:00
|
|
|
"ompi_osc_pt2pt_post entering with group size %d...",
|
2014-07-09 01:11:12 +04:00
|
|
|
ompi_group_size (group)));
|
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
OPAL_THREAD_LOCK(&module->lock);
|
2014-02-25 21:36:43 +04:00
|
|
|
|
|
|
|
/* ensure we're not already in a post */
|
|
|
|
if (NULL != module->pw_group) {
|
|
|
|
OPAL_THREAD_UNLOCK(&(module->lock));
|
2014-03-12 03:45:23 +04:00
|
|
|
return OMPI_ERR_RMA_SYNC;
|
2014-02-25 21:36:43 +04:00
|
|
|
}
|
2015-05-29 17:33:34 +03:00
|
|
|
|
|
|
|
/* save the group */
|
|
|
|
OBJ_RETAIN(group);
|
|
|
|
ompi_group_increment_proc_count(group);
|
|
|
|
|
2014-02-25 21:36:43 +04:00
|
|
|
module->pw_group = group;
|
|
|
|
|
|
|
|
/* Update completion counter. Can't have received any completion
|
|
|
|
messages yet; complete won't send a completion header until
|
|
|
|
we've sent a post header. */
|
|
|
|
module->num_complete_msgs = -ompi_group_size(module->pw_group);
|
|
|
|
|
|
|
|
OPAL_THREAD_UNLOCK(&(module->lock));
|
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
if ((assert & MPI_MODE_NOCHECK) || 0 == ompi_group_size (group)) {
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2014-02-25 21:36:43 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
2014-03-12 03:01:51 +04:00
|
|
|
"sending post messages"));
|
2014-02-25 21:36:43 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
/* translate group ranks into the communicator */
|
|
|
|
peers = ompi_osc_pt2pt_get_peers (module, module->pw_group);
|
|
|
|
if (OPAL_UNLIKELY(NULL == peers)) {
|
2014-03-12 03:01:51 +04:00
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
2014-02-25 21:36:43 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* send a hello counter to everyone in group */
|
|
|
|
for (int i = 0 ; i < ompi_group_size(module->pw_group) ; ++i) {
|
2015-05-29 17:33:34 +03:00
|
|
|
ompi_osc_pt2pt_peer_t *peer = peers[i];
|
|
|
|
int rank = peer->rank;
|
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output, "Sending post message to rank %d", rank));
|
|
|
|
ompi_proc_t *proc = ompi_comm_peer_lookup (module->comm, rank);
|
2014-07-09 01:11:12 +04:00
|
|
|
|
|
|
|
/* shortcut for self */
|
2015-05-08 07:53:03 +03:00
|
|
|
if (ompi_proc_local() == proc) {
|
2014-11-05 19:59:12 +03:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output, "ompi_osc_pt2pt_complete self post"));
|
2015-05-08 07:53:03 +03:00
|
|
|
osc_pt2pt_incoming_post (module, ompi_comm_rank(module->comm));
|
2014-07-09 01:11:12 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-11-05 19:59:12 +03:00
|
|
|
post_req.base.type = OMPI_OSC_PT2PT_HDR_TYPE_POST;
|
|
|
|
post_req.base.flags = OMPI_OSC_PT2PT_HDR_FLAG_VALID;
|
2015-05-08 07:53:03 +03:00
|
|
|
osc_pt2pt_hton(&post_req, proc);
|
2014-02-25 21:36:43 +04:00
|
|
|
|
|
|
|
/* we don't want to send any data, since we're the exposure
|
|
|
|
epoch only, so use an unbuffered send */
|
2015-05-29 17:33:34 +03:00
|
|
|
ret = ompi_osc_pt2pt_control_send_unbuffered(module, rank, &post_req,
|
|
|
|
sizeof(ompi_osc_pt2pt_header_post_t));
|
2014-02-25 21:36:43 +04:00
|
|
|
if (OMPI_SUCCESS != ret) {
|
2014-03-12 03:01:51 +04:00
|
|
|
break;
|
|
|
|
}
|
2014-02-25 21:36:43 +04:00
|
|
|
}
|
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
ompi_osc_pt2pt_release_peers (peers, ompi_group_size(module->pw_group));
|
2014-02-25 21:36:43 +04:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
int ompi_osc_pt2pt_wait (ompi_win_t *win)
|
2014-02-25 21:36:43 +04:00
|
|
|
{
|
2014-11-05 19:59:12 +03:00
|
|
|
ompi_osc_pt2pt_module_t *module = GET_MODULE(win);
|
2014-02-25 21:36:43 +04:00
|
|
|
ompi_group_t *group;
|
|
|
|
|
2014-03-12 03:01:51 +04:00
|
|
|
if (NULL == module->pw_group) {
|
2014-03-12 03:45:23 +04:00
|
|
|
return OMPI_ERR_RMA_SYNC;
|
2014-03-12 03:01:51 +04:00
|
|
|
}
|
2014-02-25 21:36:43 +04:00
|
|
|
|
2014-07-09 01:11:12 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((25, ompi_osc_base_framework.framework_output,
|
2014-11-05 19:59:12 +03:00
|
|
|
"ompi_osc_pt2pt_wait entering..."));
|
2014-07-09 01:11:12 +04:00
|
|
|
|
2014-03-12 03:01:51 +04:00
|
|
|
OPAL_THREAD_LOCK(&module->lock);
|
2014-02-25 21:36:43 +04:00
|
|
|
while (0 != module->num_complete_msgs ||
|
2014-10-07 21:45:22 +04:00
|
|
|
module->active_incoming_frag_count != module->active_incoming_frag_signal_count) {
|
2015-05-29 17:33:34 +03:00
|
|
|
OPAL_OUTPUT_VERBOSE((25, ompi_osc_base_framework.framework_output, "num_complete_msgs = %d, "
|
|
|
|
"active_incoming_frag_count = %d, active_incoming_frag_signal_count = %d",
|
|
|
|
module->num_complete_msgs, module->active_incoming_frag_count,
|
|
|
|
module->active_incoming_frag_signal_count));
|
2014-02-25 21:36:43 +04:00
|
|
|
opal_condition_wait(&module->cond, &module->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
group = module->pw_group;
|
|
|
|
module->pw_group = NULL;
|
|
|
|
OPAL_THREAD_UNLOCK(&module->lock);
|
|
|
|
|
|
|
|
ompi_group_decrement_proc_count(group);
|
|
|
|
OBJ_RELEASE(group);
|
|
|
|
|
|
|
|
OPAL_OUTPUT_VERBOSE((25, ompi_osc_base_framework.framework_output,
|
2014-11-05 19:59:12 +03:00
|
|
|
"ompi_osc_pt2pt_wait complete"));
|
2014-02-25 21:36:43 +04:00
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
int ompi_osc_pt2pt_test (ompi_win_t *win, int *flag)
|
2014-02-25 21:36:43 +04:00
|
|
|
{
|
2014-11-05 19:59:12 +03:00
|
|
|
ompi_osc_pt2pt_module_t *module = GET_MODULE(win);
|
2014-02-25 21:36:43 +04:00
|
|
|
ompi_group_t *group;
|
|
|
|
int ret = OMPI_SUCCESS;
|
|
|
|
|
George did the work and deserves all the credit for it. Ralph did the merge, and deserves whatever blame results from errors in it :-)
WHAT: Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL
All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies. This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP. Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose. UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs. A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic.
This commit was SVN r32317.
2014-07-26 04:47:28 +04:00
|
|
|
#if !OPAL_ENABLE_PROGRESS_THREADS
|
2014-06-26 00:43:28 +04:00
|
|
|
opal_progress();
|
|
|
|
#endif
|
|
|
|
|
2014-03-12 03:01:51 +04:00
|
|
|
if (NULL == module->pw_group) {
|
2014-03-12 03:45:23 +04:00
|
|
|
return OMPI_ERR_RMA_SYNC;
|
2014-03-12 03:01:51 +04:00
|
|
|
}
|
|
|
|
|
2014-02-25 21:36:43 +04:00
|
|
|
OPAL_THREAD_LOCK(&(module->lock));
|
|
|
|
|
2014-11-05 19:59:12 +03:00
|
|
|
if (0 != module->num_complete_msgs ||
|
2014-10-07 21:45:22 +04:00
|
|
|
module->active_incoming_frag_count != module->active_incoming_frag_signal_count) {
|
2014-02-25 21:36:43 +04:00
|
|
|
*flag = 0;
|
|
|
|
ret = OMPI_SUCCESS;
|
|
|
|
} else {
|
|
|
|
*flag = 1;
|
|
|
|
|
|
|
|
group = module->pw_group;
|
|
|
|
module->pw_group = NULL;
|
|
|
|
|
|
|
|
OPAL_THREAD_UNLOCK(&(module->lock));
|
|
|
|
|
|
|
|
ompi_group_decrement_proc_count(group);
|
|
|
|
OBJ_RELEASE(group);
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
OPAL_THREAD_UNLOCK(&(module->lock));
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
void osc_pt2pt_incoming_complete (ompi_osc_pt2pt_module_t *module, int source, int frag_count)
|
2014-06-17 19:23:06 +04:00
|
|
|
{
|
2015-05-29 17:33:34 +03:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
|
|
|
"osc pt2pt: process_complete got complete message from %d. expected fragment count %d. "
|
|
|
|
"current signal count %d. current incomming count: %d. expected complete msgs: %d",
|
|
|
|
source, frag_count, module->active_incoming_frag_signal_count,
|
|
|
|
module->active_incoming_frag_count, module->num_complete_msgs));
|
2014-06-17 19:23:06 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
/* the current fragment is not part of the frag_count so we need to add it here */
|
|
|
|
OPAL_THREAD_ADD32((int32_t *) &module->active_incoming_frag_signal_count, frag_count);
|
2014-06-17 19:23:06 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
if (0 == OPAL_THREAD_ADD32((int32_t *) &module->num_complete_msgs, 1)) {
|
|
|
|
opal_condition_broadcast (&module->cond);
|
|
|
|
}
|
|
|
|
}
|
2014-06-17 19:23:06 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
void osc_pt2pt_incoming_post (ompi_osc_pt2pt_module_t *module, int source)
|
|
|
|
{
|
|
|
|
ompi_osc_pt2pt_sync_t *sync = &module->all_sync;
|
2014-07-09 01:11:12 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
OPAL_THREAD_LOCK(&sync->lock);
|
2014-06-17 19:23:06 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
/* verify that this proc is part of the current start group */
|
|
|
|
if (!ompi_osc_pt2pt_sync_pscw_peer (module, source, NULL)) {
|
|
|
|
ompi_osc_pt2pt_peer_t *peer = ompi_osc_pt2pt_peer_lookup (module, source);
|
2014-06-17 19:23:06 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
|
|
|
"received unexpected post message from %d for future PSCW synchronization",
|
|
|
|
source));
|
2014-06-17 19:23:06 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
peer->unexpected_post = true;
|
|
|
|
OPAL_THREAD_UNLOCK(&sync->lock);
|
|
|
|
} else {
|
|
|
|
OPAL_THREAD_UNLOCK(&sync->lock);
|
2014-11-14 01:13:10 +03:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
ompi_osc_pt2pt_sync_expected (sync);
|
2014-06-17 19:23:06 +04:00
|
|
|
|
2015-05-29 17:33:34 +03:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_framework.framework_output,
|
|
|
|
"received post message for PSCW synchronization. post messages still needed: %d",
|
|
|
|
sync->sync_expected));
|
2014-06-17 19:23:06 +04:00
|
|
|
}
|
|
|
|
}
|