2014-01-22 19:39:19 +04:00
|
|
|
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
|
2012-08-16 23:11:35 +04:00
|
|
|
/*
|
2014-01-22 19:39:19 +04:00
|
|
|
* Copyright (c) 2009-2013 Oak Ridge National Laboratory. All rights reserved.
|
2012-08-16 23:11:35 +04:00
|
|
|
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
|
2014-03-22 01:54:14 +04:00
|
|
|
* Copyright (c) 2012-2014 Los Alamos National Security, LLC. All rights
|
2014-01-22 19:39:19 +04:00
|
|
|
* reserved.
|
2014-03-12 17:17:54 +04:00
|
|
|
* Copyright (c) 2013-2014 Cisco Systems, Inc. All rights reserved.
|
2012-08-16 23:11:35 +04:00
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
*
|
|
|
|
* Most of the description of the data layout is in the
|
|
|
|
* coll_ml_module.c file.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "ompi_config.h"
|
|
|
|
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <errno.h>
|
|
|
|
|
|
|
|
#include "ompi/constants.h"
|
|
|
|
#include "ompi/communicator/communicator.h"
|
|
|
|
#include "ompi/mca/coll/coll.h"
|
|
|
|
#include "ompi/mca/coll/base/base.h"
|
|
|
|
#include "ompi/mca/sbgp/base/base.h"
|
|
|
|
#include "ompi/mca/bcol/base/base.h"
|
|
|
|
#include "ompi/mca/sbgp/sbgp.h"
|
2013-02-06 01:52:55 +04:00
|
|
|
#include "ompi/patterns/comm/coll_ops.h"
|
2012-08-16 23:11:35 +04:00
|
|
|
#include "ompi/mca/coll/ml/coll_ml.h"
|
|
|
|
|
|
|
|
#include "opal/util/argv.h"
|
|
|
|
#include "opal/datatype/opal_datatype.h"
|
|
|
|
#include "opal/util/output.h"
|
|
|
|
#include "opal/util/arch.h"
|
|
|
|
|
|
|
|
#include "coll_ml.h"
|
2012-08-29 18:10:42 +04:00
|
|
|
#include "coll_ml_inlines.h"
|
2012-08-16 23:11:35 +04:00
|
|
|
#include "coll_ml_select.h"
|
|
|
|
#include "coll_ml_custom_utils.h"
|
|
|
|
#include "coll_ml_allocation.h"
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
static int coll_ml_parse_topology (sub_group_params_t *sub_group_meta_data, size_t sub_group_count,
|
|
|
|
int *list_of_ranks_in_all_subgroups, int level_one_size);
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
/* #define NEW_LEADER_SELECTION */
|
|
|
|
|
|
|
|
struct ranks_proxy_t {
|
|
|
|
/* number of subgroups for which the rank is a proxy */
|
|
|
|
int number_subgroups;
|
|
|
|
/* subgrou indecies */
|
|
|
|
int *subgroup_index;
|
|
|
|
};
|
|
|
|
typedef struct rank_proxy_t rank_proxy_t;
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
#define PROVIDE_SUFFICIENT_MEMORY(ptr, dummy_ptr, ptr_size, unit_type, in_use, \
|
|
|
|
n_to_add,n_to_grow) \
|
|
|
|
do { \
|
|
|
|
if ((in_use) + (n_to_add) > (ptr_size)) { \
|
|
|
|
(dummy_ptr) = (unit_type *) \
|
|
|
|
realloc(ptr, sizeof(unit_type) * ((ptr_size) + (n_to_grow))); \
|
|
|
|
if (NULL != (dummy_ptr)) { \
|
|
|
|
(ptr) = (dummy_ptr); \
|
|
|
|
(ptr_size) += (n_to_grow); \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
} while (0)
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Local functions
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int ml_module_enable(mca_coll_base_module_t *module,
|
2014-01-22 19:39:19 +04:00
|
|
|
struct ompi_communicator_t *comm);
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
static int mca_coll_ml_fill_in_route_tab(mca_coll_ml_topology_t *topo,
|
2014-01-22 19:39:19 +04:00
|
|
|
ompi_communicator_t *comm);
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
static void
|
|
|
|
mca_coll_ml_module_construct(mca_coll_ml_module_t *module)
|
|
|
|
{
|
|
|
|
int index_topo, coll_i, st_i;
|
|
|
|
mca_coll_ml_topology_t *topo;
|
|
|
|
|
|
|
|
module->max_fn_calls = 0;
|
|
|
|
module->initialized = false;
|
|
|
|
module->comm = NULL;
|
|
|
|
module->collective_sequence_num = 0;
|
|
|
|
module->no_data_collective_sequence_num = 0;
|
|
|
|
module->payload_block = NULL;
|
|
|
|
|
|
|
|
module->reference_convertor = NULL;
|
|
|
|
|
|
|
|
/* It's critical to reset data_offset to zero */
|
|
|
|
module->data_offset = -1;
|
|
|
|
|
|
|
|
module->coll_ml_barrier_function = NULL;
|
|
|
|
|
|
|
|
/* If the topology support zero level and no fragmentation was requested */
|
|
|
|
for (index_topo = 0; index_topo < COLL_ML_TOPO_MAX; index_topo++) {
|
|
|
|
topo = &module->topo_list[index_topo];
|
|
|
|
topo->global_lowest_hier_group_index = -1;
|
|
|
|
topo->global_highest_hier_group_index = -1;
|
|
|
|
topo->number_of_all_subgroups = -1;
|
|
|
|
topo->n_levels = -1;
|
|
|
|
topo->sort_list = NULL;
|
|
|
|
topo->hier_layout_info = NULL;
|
|
|
|
topo->all_bcols_mode = ~(0); /* set to all bits */
|
|
|
|
topo->route_vector = NULL;
|
|
|
|
topo->array_of_all_subgroups = NULL;
|
|
|
|
topo->component_pairs = NULL;
|
|
|
|
topo->hier_layout_info = NULL;
|
|
|
|
topo->status = COLL_ML_TOPO_DISABLED; /* all topologies are not used by default */
|
|
|
|
|
|
|
|
/* Init ordering info */
|
|
|
|
topo->topo_ordering_info.next_inorder = 0;
|
|
|
|
topo->topo_ordering_info.next_order_num = 0;
|
|
|
|
topo->topo_ordering_info.num_bcols_need_ordering = 0;
|
|
|
|
|
|
|
|
memset(topo->hierarchical_algorithms, 0,
|
2014-01-22 19:39:19 +04:00
|
|
|
BCOL_NUM_OF_FUNCTIONS * sizeof(coll_ml_collective_description_t *));
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
for (coll_i = 0; coll_i < ML_NUM_OF_FUNCTIONS; coll_i++) {
|
|
|
|
for (st_i = 0; st_i < MCA_COLL_MAX_NUM_SUBTYPES; st_i++) {
|
|
|
|
module->collectives_topology_map[coll_i][st_i] = ML_UNDEFINED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (coll_i = 0; coll_i < BCOL_NUM_OF_FUNCTIONS; ++coll_i) {
|
|
|
|
module->small_message_thresholds[coll_i] = BCOL_THRESHOLD_UNLIMITED;
|
|
|
|
}
|
|
|
|
|
2014-03-22 01:54:14 +04:00
|
|
|
OBJ_CONSTRUCT(&module->active_bcols_list, opal_list_t);
|
|
|
|
OBJ_CONSTRUCT(&module->waiting_for_memory_list, opal_list_t);
|
|
|
|
OBJ_CONSTRUCT(&module->fragment_descriptors, ompi_free_list_t);
|
|
|
|
OBJ_CONSTRUCT(&module->message_descriptors, ompi_free_list_t);
|
|
|
|
OBJ_CONSTRUCT(&module->coll_ml_collective_descriptors, ompi_free_list_t);
|
|
|
|
|
|
|
|
memset (&module->fallback, 0, sizeof (module->fallback));
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
#define ML_RELEASE_FALLBACK(_coll_ml, _coll) \
|
|
|
|
do { \
|
|
|
|
if (_coll_ml->fallback.coll_ ## _coll ## _module) { \
|
|
|
|
OBJ_RELEASE(_coll_ml->fallback.coll_ ## _coll ## _module); \
|
|
|
|
_coll_ml->fallback.coll_ ## _coll ## _module = NULL; \
|
|
|
|
} \
|
|
|
|
} while (0);
|
|
|
|
|
2012-08-16 23:11:35 +04:00
|
|
|
static void
|
|
|
|
mca_coll_ml_module_destruct(mca_coll_ml_module_t *module)
|
|
|
|
{
|
|
|
|
int i, j, k,fnc, index_topo;
|
|
|
|
mca_coll_ml_topology_t *topo;
|
|
|
|
|
|
|
|
ML_VERBOSE(4, ("ML module destruct"));
|
|
|
|
|
2014-03-22 01:54:14 +04:00
|
|
|
for (index_topo = 0; index_topo < COLL_ML_TOPO_MAX; index_topo++) {
|
|
|
|
topo = &module->topo_list[index_topo];
|
|
|
|
if (COLL_ML_TOPO_DISABLED == topo->status) {
|
|
|
|
/* skip the topology */
|
|
|
|
continue;
|
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-03-22 01:54:14 +04:00
|
|
|
if (NULL != topo->component_pairs) {
|
|
|
|
for(i = 0; i < topo->n_levels; ++i) {
|
|
|
|
for(j = 0; j < topo->component_pairs[i].num_bcol_modules; ++j) {
|
|
|
|
OBJ_RELEASE(topo->component_pairs[i].bcol_modules[j]);
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
2014-03-22 01:54:14 +04:00
|
|
|
/* free the array of bcol module */
|
|
|
|
free(topo->component_pairs[i].bcol_modules);
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-03-22 01:54:14 +04:00
|
|
|
OBJ_RELEASE(topo->component_pairs[i].subgroup_module);
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
2014-03-22 01:54:14 +04:00
|
|
|
free(topo->component_pairs);
|
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-03-22 01:54:14 +04:00
|
|
|
/* gvm Leak FIX Free collective algorithms structure */
|
|
|
|
for (fnc = 0; fnc < BCOL_NUM_OF_FUNCTIONS; fnc++) {
|
|
|
|
if (NULL != topo->hierarchical_algorithms[fnc]){
|
|
|
|
free(topo->hierarchical_algorithms[fnc]);
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
2014-03-22 01:54:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* free up the route vector memory */
|
|
|
|
if (NULL != topo->route_vector) {
|
|
|
|
free(topo->route_vector);
|
|
|
|
}
|
|
|
|
/* free resrouce description */
|
|
|
|
if(NULL != topo->array_of_all_subgroups) {
|
|
|
|
for( k=0 ; k < topo->number_of_all_subgroups ; k++ ) {
|
|
|
|
if(0 < topo->array_of_all_subgroups[k].n_ranks) {
|
|
|
|
free(topo->array_of_all_subgroups[k].rank_data);
|
|
|
|
topo->array_of_all_subgroups[k].rank_data = NULL;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
}
|
2014-03-22 01:54:14 +04:00
|
|
|
free(topo->array_of_all_subgroups);
|
|
|
|
topo->array_of_all_subgroups = NULL;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
2014-03-22 01:54:14 +04:00
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-03-22 01:54:14 +04:00
|
|
|
OBJ_DESTRUCT(&(module->active_bcols_list));
|
|
|
|
OBJ_DESTRUCT(&(module->waiting_for_memory_list));
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-03-22 01:54:14 +04:00
|
|
|
/* gvm Leak FIX Remove fragment free list */
|
|
|
|
OBJ_DESTRUCT(&(module->fragment_descriptors));
|
|
|
|
OBJ_DESTRUCT(&(module->message_descriptors));
|
|
|
|
/* push mca_bcol_base_memory_block_desc_t back on list manager */
|
|
|
|
mca_coll_ml_free_block(module->payload_block);
|
|
|
|
/* release the cinvertor if it was allocated */
|
|
|
|
if (NULL != module->reference_convertor) {
|
|
|
|
OBJ_RELEASE(module->reference_convertor);
|
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-03-22 01:54:14 +04:00
|
|
|
OBJ_DESTRUCT(&(module->coll_ml_collective_descriptors));
|
2014-01-22 19:39:19 +04:00
|
|
|
|
2014-03-22 01:54:14 +04:00
|
|
|
if (NULL != module->coll_ml_barrier_function) {
|
|
|
|
free(module->coll_ml_barrier_function);
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
2014-03-22 01:54:14 +04:00
|
|
|
|
|
|
|
/* release saved collectives */
|
|
|
|
ML_RELEASE_FALLBACK(module, allreduce);
|
|
|
|
ML_RELEASE_FALLBACK(module, allgather);
|
|
|
|
ML_RELEASE_FALLBACK(module, reduce);
|
|
|
|
ML_RELEASE_FALLBACK(module, bcast);
|
|
|
|
ML_RELEASE_FALLBACK(module, iallreduce);
|
|
|
|
ML_RELEASE_FALLBACK(module, iallgather);
|
|
|
|
ML_RELEASE_FALLBACK(module, ireduce);
|
|
|
|
ML_RELEASE_FALLBACK(module, ibcast);
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int mca_coll_ml_request_free(ompi_request_t** request)
|
|
|
|
{
|
|
|
|
/* local variables */
|
|
|
|
mca_coll_ml_collective_operation_progress_t *ml_request=
|
|
|
|
(mca_coll_ml_collective_operation_progress_t *)(*request);
|
|
|
|
mca_coll_ml_module_t *ml_module = OP_ML_MODULE(ml_request);
|
|
|
|
|
|
|
|
/* The ML memory bank recycling check done, no we may
|
|
|
|
* return request and signal completion */
|
|
|
|
|
|
|
|
/* this fragement does not hold the message data, so ok to return */
|
|
|
|
assert(0 == ml_request->pending);
|
2014-01-22 19:39:19 +04:00
|
|
|
//assert(0 == ml_request->fragment_data.offset_into_user_buffer);
|
|
|
|
assert(&ml_request->full_message == ml_request->fragment_data.message_descriptor);
|
2012-08-16 23:11:35 +04:00
|
|
|
assert(ml_request->dag_description.status_array[0].item.opal_list_item_refcount == 0);
|
|
|
|
ML_VERBOSE(10, ("Releasing Master %p", ml_request));
|
2014-01-22 19:39:19 +04:00
|
|
|
/* Mark the request as invalid */
|
|
|
|
OMPI_REQUEST_FINI(&ml_request->full_message.super);
|
2013-07-09 02:07:52 +04:00
|
|
|
OMPI_FREE_LIST_RETURN_MT(&(ml_module->coll_ml_collective_descriptors),
|
2014-01-22 19:39:19 +04:00
|
|
|
(ompi_free_list_item_t *)ml_request);
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
/* MPI needs to return with the request object set to MPI_REQUEST_NULL
|
|
|
|
*/
|
|
|
|
*request = MPI_REQUEST_NULL;
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* constructor for collective managment descriptor */
|
|
|
|
static void mca_coll_ml_collective_operation_progress_construct
|
|
|
|
(mca_coll_ml_collective_operation_progress_t *desc) {
|
|
|
|
|
|
|
|
/* initialize pointer */
|
|
|
|
desc->dag_description.status_array = NULL;
|
|
|
|
|
|
|
|
OBJ_CONSTRUCT(&desc->full_message.send_convertor, opal_convertor_t);
|
|
|
|
OBJ_CONSTRUCT(&desc->full_message.recv_convertor, opal_convertor_t);
|
|
|
|
|
|
|
|
OBJ_CONSTRUCT(&desc->full_message.dummy_convertor, opal_convertor_t);
|
|
|
|
|
|
|
|
/* intialize request free pointer */
|
|
|
|
desc->full_message.super.req_free = mca_coll_ml_request_free;
|
|
|
|
|
|
|
|
/* no cancel function */
|
|
|
|
desc->full_message.super.req_cancel = NULL;
|
|
|
|
/* Collective request type */
|
|
|
|
desc->full_message.super.req_type = OMPI_REQUEST_COLL;
|
|
|
|
/* RLG: Do we need to set req_mpi_object ? */
|
|
|
|
|
|
|
|
/* If not null , we have to release next fragment */
|
|
|
|
desc->next_to_process_frag = NULL;
|
|
|
|
|
|
|
|
/* pointer to previous fragment */
|
|
|
|
desc->prev_frag = NULL;
|
|
|
|
|
|
|
|
/* Pasha: moreinit */
|
|
|
|
desc->pending = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* destructor for collective managment descriptor */
|
|
|
|
static void mca_coll_ml_collective_operation_progress_destruct
|
2014-01-22 19:39:19 +04:00
|
|
|
(mca_coll_ml_collective_operation_progress_t *desc) {
|
2012-08-16 23:11:35 +04:00
|
|
|
mca_coll_ml_module_t *ml_module =
|
2014-01-22 19:39:19 +04:00
|
|
|
(mca_coll_ml_module_t *) desc->coll_module;
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
int i, max_dag_size = ml_module->max_dag_size;
|
|
|
|
|
|
|
|
if (NULL != desc->dag_description.status_array) {
|
|
|
|
for (i = 0; i < max_dag_size; ++i) {
|
|
|
|
OBJ_DESTRUCT(&desc->dag_description.status_array[i].item);
|
|
|
|
}
|
|
|
|
|
|
|
|
free(desc->dag_description.status_array);
|
|
|
|
desc->dag_description.status_array = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
OBJ_DESTRUCT(&desc->full_message.send_convertor);
|
|
|
|
OBJ_DESTRUCT(&desc->full_message.recv_convertor);
|
|
|
|
|
|
|
|
OBJ_DESTRUCT(&desc->full_message.dummy_convertor);
|
|
|
|
}
|
|
|
|
/* initialize the full message descriptor - can pass in module specific
|
|
|
|
* initialization data
|
|
|
|
*/
|
|
|
|
static void init_ml_fragment_desc(ompi_free_list_item_t *desc , void* ctx);
|
|
|
|
static void init_ml_message_desc(ompi_free_list_item_t *desc , void* ctx)
|
|
|
|
{
|
2014-01-22 19:39:19 +04:00
|
|
|
mca_coll_ml_module_t *module= (mca_coll_ml_module_t *) ctx;
|
|
|
|
mca_coll_ml_descriptor_t *msg_desc = (mca_coll_ml_descriptor_t *) desc;
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
/* finish setting up the fragment descriptor */
|
|
|
|
init_ml_fragment_desc((ompi_free_list_item_t*)&(msg_desc->fragment),module);
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* initialize the fragment descriptor - can pass in module specific
|
|
|
|
* initialization data
|
|
|
|
*/
|
|
|
|
static void init_ml_fragment_desc(ompi_free_list_item_t *desc , void* ctx)
|
|
|
|
{
|
2014-01-22 19:39:19 +04:00
|
|
|
mca_coll_ml_module_t *module= (mca_coll_ml_module_t *) ctx;
|
|
|
|
mca_coll_ml_fragment_t *frag_desc = (mca_coll_ml_fragment_t *) desc;
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
/* allocated array of function arguments */
|
|
|
|
/* RLG - we have a problem if we don't get the memory */
|
|
|
|
/* malloc-debug does not like zero allocations */
|
|
|
|
if (module->max_fn_calls > 0) {
|
|
|
|
frag_desc->fn_args = (bcol_function_args_t *)
|
|
|
|
malloc(sizeof(bcol_function_args_t) * module->max_fn_calls);
|
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
}
|
|
|
|
static void mca_coll_ml_bcol_list_item_construct(mca_coll_ml_bcol_list_item_t *item)
|
|
|
|
{
|
|
|
|
item->bcol_module = NULL;
|
|
|
|
}
|
|
|
|
OBJ_CLASS_INSTANCE(mca_coll_ml_bcol_list_item_t,
|
|
|
|
opal_list_item_t,
|
|
|
|
mca_coll_ml_bcol_list_item_construct,
|
|
|
|
NULL);
|
|
|
|
|
|
|
|
static void generate_active_bcols_list(mca_coll_ml_module_t *ml_module)
|
|
|
|
{
|
|
|
|
int i, j, index_topo;
|
|
|
|
mca_coll_ml_topology_t *topo;
|
|
|
|
bool bcol_was_found;
|
|
|
|
mca_coll_ml_bcol_list_item_t *bcol_item = NULL;
|
|
|
|
mca_bcol_base_module_t *bcol_module = NULL;
|
|
|
|
|
|
|
|
ML_VERBOSE(10, ("Generating active bcol list "));
|
|
|
|
|
|
|
|
for (index_topo = 0; index_topo < COLL_ML_TOPO_MAX; index_topo++) {
|
|
|
|
topo = &ml_module->topo_list[index_topo];
|
|
|
|
if (COLL_ML_TOPO_DISABLED == topo->status) {
|
|
|
|
/* skip the topology */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
for( i = 0; i < topo->n_levels; i++) {
|
|
|
|
|
|
|
|
for( j = 0; j < topo->component_pairs[i].num_bcol_modules; j++) {
|
|
|
|
bcol_module = topo->component_pairs[i].bcol_modules[j];
|
|
|
|
|
|
|
|
/* Check if the bcol provides synchronization function, if the
|
|
|
|
* function is not provided we skip this bcol, since it isn't used
|
|
|
|
* for memory synchronization (for instance - ptpcoll )*/
|
|
|
|
if (NULL == GET_BCOL_SYNC_FN(bcol_module)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10,(" No sync function was provided by bcol %s",
|
2014-01-22 19:39:19 +04:00
|
|
|
bcol_module->bcol_component->bcol_version.mca_component_name));
|
2012-08-16 23:11:35 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
bcol_was_found = false;
|
|
|
|
for(bcol_item = (mca_coll_ml_bcol_list_item_t *)opal_list_get_first(&ml_module->active_bcols_list);
|
|
|
|
!bcol_was_found &&
|
2014-01-22 19:39:19 +04:00
|
|
|
bcol_item != (mca_coll_ml_bcol_list_item_t *)opal_list_get_end(&ml_module->active_bcols_list);
|
2012-08-16 23:11:35 +04:00
|
|
|
bcol_item = (mca_coll_ml_bcol_list_item_t *)opal_list_get_next((opal_list_item_t *)bcol_item)) {
|
|
|
|
if (bcol_module == bcol_item->bcol_module) {
|
|
|
|
bcol_was_found = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* append the item to the list if it was not found */
|
|
|
|
if (!bcol_was_found) {
|
|
|
|
bcol_item = OBJ_NEW(mca_coll_ml_bcol_list_item_t);
|
|
|
|
bcol_item->bcol_module = bcol_module;
|
|
|
|
opal_list_append(&ml_module->active_bcols_list, (opal_list_item_t *)bcol_item);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int calculate_buffer_header_size(mca_coll_ml_module_t *ml_module)
|
|
|
|
{
|
|
|
|
mca_coll_ml_topology_t *topo;
|
|
|
|
mca_bcol_base_module_t *bcol_module;
|
|
|
|
|
|
|
|
uint32_t offset = 0;
|
|
|
|
int i, j, *ranks_in_comm, kount = 0,
|
|
|
|
rc, data_offset = 0, index_topo,
|
|
|
|
comm_size = ompi_comm_size(ml_module->comm);
|
|
|
|
|
|
|
|
ML_VERBOSE(10, ("Calculating offset for the ML"));
|
|
|
|
|
|
|
|
/* probably a stupid thing to do, but we have to loop over twice */
|
|
|
|
|
|
|
|
for (index_topo = 0; index_topo < COLL_ML_TOPO_MAX; index_topo++) {
|
|
|
|
topo = &ml_module->topo_list[index_topo];
|
|
|
|
if (COLL_ML_TOPO_DISABLED == topo->status) {
|
|
|
|
/* skip the topology */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < topo->n_levels; i++) {
|
|
|
|
for (j = 0; j < topo->component_pairs[i].num_bcol_modules; j++) {
|
|
|
|
bcol_module = topo->component_pairs[i].bcol_modules[j];
|
|
|
|
if (0 < bcol_module->header_size) {
|
|
|
|
/* bump the kounter */
|
|
|
|
kount++;
|
|
|
|
/* find the largest header request */
|
|
|
|
if (offset < bcol_module->header_size) {
|
|
|
|
offset = bcol_module->header_size;
|
|
|
|
}
|
2014-01-22 19:39:19 +04:00
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
/* Set bcol mode bits */
|
|
|
|
topo->all_bcols_mode &= bcol_module->supported_mode;
|
2014-01-22 19:39:19 +04:00
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
offset = ((offset + BCOL_HEAD_ALIGN - 1) / BCOL_HEAD_ALIGN) * BCOL_HEAD_ALIGN;
|
|
|
|
/* select largest offset between multiple topologies */
|
|
|
|
if (data_offset < (int) offset) {
|
|
|
|
data_offset = (int) offset;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ranks_in_comm = (int *) malloc(comm_size * sizeof(int));
|
|
|
|
if (OPAL_UNLIKELY(NULL == ranks_in_comm)) {
|
|
|
|
ML_ERROR(("Memory allocation failed."));
|
|
|
|
return OMPI_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < comm_size; ++i) {
|
|
|
|
ranks_in_comm[i] = i;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = comm_allreduce_pml(&data_offset, &data_offset, 1,
|
|
|
|
MPI_INT, ompi_comm_rank(ml_module->comm),
|
|
|
|
MPI_MAX, comm_size,
|
|
|
|
ranks_in_comm, ml_module->comm);
|
|
|
|
|
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
|
|
|
|
ML_ERROR(("comm_allreduce_pml failed."));
|
|
|
|
return OMPI_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
ml_module->data_offset = (uint32_t) data_offset;
|
|
|
|
free(ranks_in_comm);
|
|
|
|
|
|
|
|
ML_VERBOSE(10, ("The offset is %d", ml_module->data_offset));
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mca_coll_ml_register_bcols(mca_coll_ml_module_t *ml_module)
|
|
|
|
{
|
|
|
|
/* local variables */
|
|
|
|
int i, j, index_topo;
|
|
|
|
int ret = OMPI_SUCCESS;
|
|
|
|
mca_bcol_base_module_t *bcol_module;
|
|
|
|
mca_coll_ml_topology_t *topo;
|
|
|
|
|
|
|
|
/* loop over all bcols and register the ml memory block which each */
|
|
|
|
for (index_topo = 0; index_topo < COLL_ML_TOPO_MAX; index_topo++) {
|
|
|
|
topo = &ml_module->topo_list[index_topo];
|
|
|
|
if (COLL_ML_TOPO_DISABLED == topo->status) {
|
|
|
|
/* skip the topology */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < topo->n_levels; i++) {
|
|
|
|
for (j = 0; j < topo->component_pairs[i].num_bcol_modules; j++) {
|
|
|
|
bcol_module = topo->component_pairs[i].bcol_modules[j];
|
|
|
|
if (NULL != bcol_module->bcol_memory_init) {
|
2014-02-07 23:15:45 +04:00
|
|
|
ret = bcol_module->bcol_memory_init(ml_module->payload_block,
|
|
|
|
ml_module->data_offset,
|
2014-01-22 19:39:19 +04:00
|
|
|
bcol_module,
|
|
|
|
(NULL != bcol_module->network_context) ?
|
|
|
|
bcol_module->network_context->context_data: NULL);
|
2012-08-16 23:11:35 +04:00
|
|
|
if (OMPI_SUCCESS != ret) {
|
|
|
|
ML_ERROR(("Bcol registration failed on ml level!!"));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ml_module_memory_initialization(mca_coll_ml_module_t *ml_module)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int nbanks, nbuffers, buf_size;
|
|
|
|
mca_coll_ml_component_t *cs = &mca_coll_ml_component;
|
|
|
|
|
|
|
|
ml_module->payload_block = mca_coll_ml_allocate_block(cs,ml_module->payload_block);
|
|
|
|
|
|
|
|
if (NULL == ml_module->payload_block) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_ERROR(("mca_coll_ml_allocate_block exited with error."));
|
2012-08-16 23:11:35 +04:00
|
|
|
return OMPI_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get memory block parameters */
|
|
|
|
nbanks = cs->n_payload_mem_banks;
|
|
|
|
nbuffers = cs->n_payload_buffs_per_bank;
|
|
|
|
buf_size = cs->payload_buffer_size;
|
|
|
|
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Call for initialize block."));
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
ret = mca_coll_ml_initialize_block(ml_module->payload_block,
|
2014-01-22 19:39:19 +04:00
|
|
|
nbuffers, nbanks, buf_size, ml_module->data_offset,
|
|
|
|
NULL);
|
2012-08-16 23:11:35 +04:00
|
|
|
if (OMPI_SUCCESS != ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Call for register bcols."));
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
/* inititialize the memory with all of the bcols:
|
|
|
|
loop through the bcol modules and invoke the memory init */
|
|
|
|
ret = mca_coll_ml_register_bcols(ml_module);
|
|
|
|
if (OMPI_SUCCESS != ret) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_ERROR(("mca_coll_ml_register_bcols returned an error."));
|
2012-08-16 23:11:35 +04:00
|
|
|
/* goto CLEANUP; */
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* do some sanity checks */
|
|
|
|
static int check_global_view_of_subgroups( int n_procs_selected,
|
2014-01-22 19:39:19 +04:00
|
|
|
int n_procs_in, int ll_p1, int* all_selected,
|
|
|
|
mca_sbgp_base_module_t *module )
|
2012-08-16 23:11:35 +04:00
|
|
|
{
|
|
|
|
/* local variables */
|
|
|
|
int ret=OMPI_SUCCESS;
|
|
|
|
int i, sum;
|
|
|
|
|
|
|
|
bool local_leader_found=false;
|
|
|
|
|
|
|
|
/* is there a single local-leader */
|
|
|
|
for (i = 0; i < n_procs_selected; i++) {
|
|
|
|
if( ll_p1 == -all_selected[module->group_list[i]]) {
|
|
|
|
/* found the local leader */
|
|
|
|
if( local_leader_found ) {
|
|
|
|
/* more than one local leader - don't know how to
|
|
|
|
* handle this, so bail
|
|
|
|
*/
|
2014-03-19 01:26:10 +04:00
|
|
|
ML_VERBOSE(1, ("More than a single leader for a group."));
|
2012-08-16 23:11:35 +04:00
|
|
|
ret=OMPI_ERROR;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
} else {
|
|
|
|
local_leader_found=true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check to make sure that all agree on the same size of
|
|
|
|
* the group
|
|
|
|
*/
|
|
|
|
sum=0;
|
|
|
|
for (i = 0; i < n_procs_in; i++) {
|
|
|
|
if(ll_p1==all_selected[i]) {
|
|
|
|
sum++;
|
|
|
|
} else if( ll_p1 == -all_selected[i]) {
|
|
|
|
sum++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if( sum != n_procs_selected ) {
|
2014-03-19 01:26:10 +04:00
|
|
|
ML_VERBOSE(1, ("number of procs in the group unexpected. Expected %d Got %d",n_procs_selected,sum));
|
2012-08-16 23:11:35 +04:00
|
|
|
ret=OMPI_ERROR;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
/* check to make sure that all have the same list of ranks.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < n_procs_selected; i++) {
|
|
|
|
if(ll_p1!=all_selected[module->group_list[i]] &&
|
2014-01-22 19:39:19 +04:00
|
|
|
ll_p1!=-all_selected[module->group_list[i]] ) {
|
2012-08-16 23:11:35 +04:00
|
|
|
ret=OMPI_ERROR;
|
2014-03-19 01:26:10 +04:00
|
|
|
ML_VERBOSE(1, ("Mismatch in rank list - element #%d - %d ",i,all_selected[module->group_list[i]]));
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* return */
|
|
|
|
return ret;
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
exit_ERROR:
|
2012-08-16 23:11:35 +04:00
|
|
|
/* return */
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ml_init_k_nomial_trees(mca_coll_ml_topology_t *topo, int *list_of_ranks_in_all_subgroups, int my_rank_in_list)
|
|
|
|
{
|
|
|
|
int *list_n_connected;
|
2012-09-12 23:47:23 +04:00
|
|
|
int group_size, rank, i, j, knt, offset, k, my_sbgp = 0;
|
2014-01-22 19:39:19 +04:00
|
|
|
int my_root, level_one_knt;
|
2012-08-16 23:11:35 +04:00
|
|
|
sub_group_params_t *array_of_all_subgroup_ranks = topo->
|
2014-01-22 19:39:19 +04:00
|
|
|
array_of_all_subgroups;
|
2012-08-16 23:11:35 +04:00
|
|
|
int num_total_subgroups = topo->number_of_all_subgroups;
|
|
|
|
int n_hier = topo->n_levels;
|
|
|
|
|
|
|
|
hierarchy_pairs *pair = NULL;
|
|
|
|
mca_coll_ml_leader_offset_info_t *loc_leader = (mca_coll_ml_leader_offset_info_t *)
|
2014-01-22 19:39:19 +04:00
|
|
|
malloc(sizeof(mca_coll_ml_leader_offset_info_t)*(n_hier+1));
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
/* first thing I want to know is where does the first level end */
|
|
|
|
level_one_knt = 0;
|
2014-01-22 19:39:19 +04:00
|
|
|
|
2014-03-19 01:26:17 +04:00
|
|
|
while (level_one_knt < num_total_subgroups && 0 == array_of_all_subgroup_ranks[level_one_knt].level_in_hierarchy) {
|
2012-08-16 23:11:35 +04:00
|
|
|
level_one_knt++;
|
|
|
|
}
|
2014-01-22 19:39:19 +04:00
|
|
|
|
2014-03-18 16:24:32 +04:00
|
|
|
/* fprintf(stderr,"PPP %d %d %d ", level_one_knt, array_of_all_subgroup_ranks[0].level_in_hierarchy, num_total_subgroups); */
|
2014-01-22 19:39:19 +04:00
|
|
|
|
2012-08-16 23:11:35 +04:00
|
|
|
/* I want to cache this number for unpack*/
|
|
|
|
array_of_all_subgroup_ranks->level_one_index = level_one_knt;
|
|
|
|
|
|
|
|
/* determine whether or not ranks are contiguous */
|
|
|
|
topo->ranks_contiguous = true;
|
2014-01-22 19:39:19 +04:00
|
|
|
for (i = 0, knt = 0 ; i < level_one_knt && topo->ranks_contiguous ; ++i) {
|
|
|
|
for (j = 0 ; j < array_of_all_subgroup_ranks[i].n_ranks ; ++j, ++knt) {
|
|
|
|
if (knt != list_of_ranks_in_all_subgroups[knt]) {
|
|
|
|
topo->ranks_contiguous = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
loc_leader[0].offset = 0;
|
|
|
|
|
2012-08-16 23:11:35 +04:00
|
|
|
/* now find my first level offset, and my index in level one */
|
2014-01-22 19:39:19 +04:00
|
|
|
for (i = 0, loc_leader[0].level_one_index = -1 ; i < level_one_knt ; ++i) {
|
2012-08-16 23:11:35 +04:00
|
|
|
offset = array_of_all_subgroup_ranks[i].index_of_first_element;
|
2014-01-22 19:39:19 +04:00
|
|
|
for (k = 0 ; k < array_of_all_subgroup_ranks[i].n_ranks ; ++k) {
|
2012-08-16 23:11:35 +04:00
|
|
|
rank = list_of_ranks_in_all_subgroups[k + offset];
|
2014-01-22 19:39:19 +04:00
|
|
|
if (rank == my_rank_in_list) {
|
|
|
|
loc_leader[0].offset = offset;
|
2012-08-16 23:11:35 +04:00
|
|
|
loc_leader[0].level_one_index = k;
|
|
|
|
i = level_one_knt;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
/* every rank MUST appear at level 0 */
|
|
|
|
assert (loc_leader[0].level_one_index > -1);
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
for (i = 0 ; i < n_hier ; ++i) {
|
2012-08-16 23:11:35 +04:00
|
|
|
pair = &topo->component_pairs[i];
|
|
|
|
/* find the size of the group */
|
|
|
|
group_size = pair->subgroup_module->group_size;
|
|
|
|
/* malloc some memory for the new list to cache
|
|
|
|
on the bcol module
|
2014-01-22 19:39:19 +04:00
|
|
|
*/
|
2012-08-16 23:11:35 +04:00
|
|
|
list_n_connected = (int *) malloc(sizeof(int)*group_size);
|
2014-01-22 19:39:19 +04:00
|
|
|
|
2012-08-16 23:11:35 +04:00
|
|
|
/* next thing to do is to find out which subgroup I'm in
|
|
|
|
* at this particular level
|
|
|
|
*/
|
2014-01-22 19:39:19 +04:00
|
|
|
for (j = 0, knt = 0, my_sbgp = -1 ; j < num_total_subgroups && 0 > my_sbgp ; ++j) {
|
2012-08-16 23:11:35 +04:00
|
|
|
offset = array_of_all_subgroup_ranks[j].index_of_first_element;
|
2014-01-22 19:39:19 +04:00
|
|
|
|
|
|
|
/* in the 1-level case just skip any group of size 1 and move on
|
|
|
|
* to the real group. */
|
|
|
|
if (1 == n_hier && 1 == array_of_all_subgroup_ranks[j].n_ranks) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (k = 0; k < array_of_all_subgroup_ranks[j].n_ranks; k++) {
|
2012-08-16 23:11:35 +04:00
|
|
|
rank = list_of_ranks_in_all_subgroups[k+offset];
|
2014-01-22 19:39:19 +04:00
|
|
|
/* we can not use the level_in_topology flag to determine the
|
|
|
|
* level since not all levels may be represented so keep a count
|
|
|
|
* of the number of times this ranks shows up. when it has been
|
|
|
|
* seen the correct number of times we are done. */
|
|
|
|
if (rank == my_rank_in_list && ++knt == (i+1)){
|
2012-08-16 23:11:35 +04:00
|
|
|
my_sbgp = j;
|
|
|
|
/* tag whether I am a local leader or not at this level */
|
2014-01-22 19:39:19 +04:00
|
|
|
loc_leader[i].leader = (my_rank_in_list == array_of_all_subgroup_ranks[j].root_rank_in_comm);
|
2012-08-16 23:11:35 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
/* should have found a subgroup */
|
|
|
|
assert (my_sbgp > -1);
|
|
|
|
|
|
|
|
for (j = 0 ; j < group_size ; ++j) {
|
2012-08-16 23:11:35 +04:00
|
|
|
list_n_connected[j] = array_of_all_subgroup_ranks[my_sbgp].
|
|
|
|
rank_data[j].num_of_ranks_represented;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* now find all sbgps that the root of this sbgp belongs to
|
2014-01-22 19:39:19 +04:00
|
|
|
* previous to this "my_sbgp" */
|
2012-08-16 23:11:35 +04:00
|
|
|
my_root = array_of_all_subgroup_ranks[my_sbgp].root_rank_in_comm;
|
2014-01-22 19:39:19 +04:00
|
|
|
|
|
|
|
for (j = 0, knt = 0 ; j < my_sbgp ; ++j) {
|
|
|
|
if (array_of_all_subgroup_ranks[j].root_rank_in_comm == my_root) {
|
|
|
|
for (k = 1; k < array_of_all_subgroup_ranks[j].n_ranks; ++k) {
|
2012-08-16 23:11:35 +04:00
|
|
|
knt += array_of_all_subgroup_ranks[j].rank_data[k].
|
|
|
|
num_of_ranks_represented;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
/* and then I add one for the root itself */
|
|
|
|
list_n_connected[0] = knt + 1;
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
/* now cache this on the bcol module */
|
|
|
|
pair->bcol_modules[0]->list_n_connected = list_n_connected;
|
|
|
|
|
|
|
|
|
|
|
|
/* I should do one more round here and figure out my offset at this level
|
|
|
|
* the calculation is simple: Am I a local leader in this level? If so, then I keep the offset
|
|
|
|
* from the previous level. Else, I find out how "far away" the local leader is from me and set
|
|
|
|
* this as the new offset.
|
|
|
|
*/
|
|
|
|
/* do this after first level */
|
|
|
|
if (i > 0) {
|
|
|
|
/* if I'm not the local leader */
|
|
|
|
if( !loc_leader[i].leader) {
|
|
|
|
/* then I am not a local leader at this level */
|
|
|
|
offset = array_of_all_subgroup_ranks[my_sbgp].index_of_first_element;
|
2014-01-22 19:39:19 +04:00
|
|
|
for (k = 0, knt = 0 ; k < array_of_all_subgroup_ranks[my_sbgp].n_ranks ; ++k) {
|
2012-08-16 23:11:35 +04:00
|
|
|
rank = list_of_ranks_in_all_subgroups[k+offset];
|
2014-01-22 19:39:19 +04:00
|
|
|
if (rank == my_rank_in_list) {
|
2012-08-16 23:11:35 +04:00
|
|
|
break;
|
|
|
|
}
|
2014-01-22 19:39:19 +04:00
|
|
|
|
|
|
|
knt += list_n_connected[k];
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
loc_leader[i].offset = loc_leader[i-1].offset - knt;
|
2014-01-22 19:39:19 +04:00
|
|
|
} else {
|
|
|
|
/* if I am the local leader, then keep the same offset */
|
|
|
|
loc_leader[i].offset = loc_leader[i-1].offset;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
pair->bcol_modules[0]->hier_scather_offset = loc_leader[i].offset;
|
|
|
|
|
2012-08-16 23:11:35 +04:00
|
|
|
/*setup the tree */
|
|
|
|
pair->bcol_modules[0]->k_nomial_tree(pair->bcol_modules[0]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* see if I am in the last subgroup, if I am,
|
|
|
|
* then I am a root for the bcast operation
|
|
|
|
*/
|
|
|
|
offset = array_of_all_subgroup_ranks[n_hier - 1].index_of_first_element;
|
|
|
|
for( i = 0; i < array_of_all_subgroup_ranks[n_hier - 1].n_ranks; i++){
|
|
|
|
rank = list_of_ranks_in_all_subgroups[i + offset];
|
|
|
|
if( rank == my_rank_in_list ){
|
|
|
|
loc_leader[n_hier - 1].offset = 0;
|
|
|
|
loc_leader[n_hier - 1].leader = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* set the last offset to 0 and set the leader according to your top level position */
|
|
|
|
loc_leader[n_hier].offset = 0;
|
|
|
|
if(loc_leader[n_hier - 1].leader){
|
|
|
|
loc_leader[n_hier].leader = true;
|
|
|
|
} else {
|
2014-01-22 19:39:19 +04:00
|
|
|
loc_leader[n_hier].leader = false;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* what other goodies do I want to cache on the ml-module? */
|
|
|
|
topo->hier_layout_info = loc_leader;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ml_setup_full_tree_data(mca_coll_ml_topology_t *topo,
|
2014-01-22 19:39:19 +04:00
|
|
|
ompi_communicator_t *comm,
|
|
|
|
int my_highest_group_index, int *map_to_comm_ranks,
|
|
|
|
int *num_total_subgroups, sub_group_params_t **array_of_all_subgroup_ranks,
|
|
|
|
int **list_of_ranks_in_all_subgroups)
|
2012-08-16 23:11:35 +04:00
|
|
|
{
|
|
|
|
|
|
|
|
int ret = OMPI_SUCCESS;
|
2014-01-22 19:39:19 +04:00
|
|
|
int i, in_buf, root, my_rank,sum;
|
2012-08-16 23:11:35 +04:00
|
|
|
int in_num_total_subgroups = *num_total_subgroups;
|
2012-09-12 23:47:23 +04:00
|
|
|
int *scratch_space = NULL;
|
2014-01-22 19:39:19 +04:00
|
|
|
|
2012-08-16 23:11:35 +04:00
|
|
|
/* figure out who holds all the sub-group information - only those
|
|
|
|
* ranks in the top level know this data at this point */
|
|
|
|
my_rank = ompi_comm_rank(comm);
|
|
|
|
if( (my_highest_group_index == topo->global_highest_hier_group_index )
|
2014-01-22 19:39:19 +04:00
|
|
|
&&
|
|
|
|
( my_rank ==
|
|
|
|
topo->component_pairs[topo->n_levels-1].subgroup_module->group_list[0])
|
|
|
|
) {
|
2012-08-16 23:11:35 +04:00
|
|
|
in_buf=my_rank;
|
|
|
|
} else {
|
|
|
|
/* since this will be a sum allreduce - contributing 0 will not
|
|
|
|
* change the value */
|
|
|
|
in_buf=0;
|
|
|
|
}
|
|
|
|
ret = comm_allreduce_pml(&in_buf, &root, 1, MPI_INT,
|
2014-01-22 19:39:19 +04:00
|
|
|
my_rank, MPI_SUM,
|
|
|
|
ompi_comm_size(comm), map_to_comm_ranks,
|
|
|
|
comm);
|
2012-08-16 23:11:35 +04:00
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("comm_allreduce_pml failed. root reduction"));
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* broadcast the number of groups */
|
|
|
|
ret=comm_bcast_pml(num_total_subgroups, root, 1,
|
2014-01-22 19:39:19 +04:00
|
|
|
MPI_INT, my_rank, ompi_comm_size(comm),
|
|
|
|
map_to_comm_ranks,comm);
|
2012-08-16 23:11:35 +04:00
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("comm_bcast_pml failed. num_total_subgroups bcast"));
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
scratch_space=(int *)malloc(4*sizeof(int)*(*num_total_subgroups));
|
|
|
|
if (OPAL_UNLIKELY(NULL == scratch_space)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Cannot allocate memory scratch_space."));
|
2012-08-16 23:11:35 +04:00
|
|
|
ret = OMPI_ERR_OUT_OF_RESOURCE;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
2014-01-22 19:39:19 +04:00
|
|
|
|
2012-08-16 23:11:35 +04:00
|
|
|
if( my_rank == root ) {
|
|
|
|
sum=0;
|
|
|
|
for(i=0 ; i < (*num_total_subgroups) ; i++ ) {
|
|
|
|
scratch_space[4*i]=(*array_of_all_subgroup_ranks)[i].root_rank_in_comm;
|
|
|
|
scratch_space[4*i+1]=(*array_of_all_subgroup_ranks)[i].n_ranks;
|
|
|
|
scratch_space[4*i+2]=(*array_of_all_subgroup_ranks)[i].index_of_first_element;
|
|
|
|
scratch_space[4*i+3]=(*array_of_all_subgroup_ranks)[i].level_in_hierarchy;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ret=comm_bcast_pml(scratch_space, root, 4*(*num_total_subgroups),
|
2014-01-22 19:39:19 +04:00
|
|
|
MPI_INT, my_rank, ompi_comm_size(comm),
|
|
|
|
map_to_comm_ranks, comm);
|
2012-08-16 23:11:35 +04:00
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("comm_allreduce_pml failed. scratch_space bcast"));
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
if( my_rank != root ) {
|
|
|
|
if( in_num_total_subgroups != (*num_total_subgroups) ) {
|
|
|
|
/* free old array_of_all_subgroup_ranks array - need to fill it
|
|
|
|
* with the global data - assume that if the array size is the
|
|
|
|
* same, all data is correct, and in the same order */
|
|
|
|
free((*array_of_all_subgroup_ranks));
|
|
|
|
(*array_of_all_subgroup_ranks)=(sub_group_params_t *)
|
|
|
|
malloc(sizeof(sub_group_params_t)*(*num_total_subgroups));
|
|
|
|
if (OPAL_UNLIKELY(NULL == (*array_of_all_subgroup_ranks))) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Cannot allocate memory array_of_all_subgroup_ranks."));
|
2012-08-16 23:11:35 +04:00
|
|
|
ret = OMPI_ERR_OUT_OF_RESOURCE;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
for(i=0 ; i < (*num_total_subgroups) ; i++ ) {
|
|
|
|
(*array_of_all_subgroup_ranks)[i].root_rank_in_comm=scratch_space[4*i];
|
|
|
|
(*array_of_all_subgroup_ranks)[i].n_ranks=scratch_space[4*i+1];
|
|
|
|
(*array_of_all_subgroup_ranks)[i].index_of_first_element=scratch_space[4*i+2];
|
|
|
|
(*array_of_all_subgroup_ranks)[i].level_in_hierarchy=scratch_space[4*i+3];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* figure out how many entries in all the subgroups - ranks that apear
|
|
|
|
* in k subgroups appear k times in the list */
|
|
|
|
sum=0;
|
|
|
|
for(i=0 ; i < (*num_total_subgroups) ; i++ ) {
|
|
|
|
sum+=(*array_of_all_subgroup_ranks)[i].n_ranks;
|
|
|
|
}
|
|
|
|
if( in_num_total_subgroups != (*num_total_subgroups) ) {
|
|
|
|
(*list_of_ranks_in_all_subgroups)=(int *)
|
|
|
|
realloc((*list_of_ranks_in_all_subgroups),sizeof(int)*sum);
|
2014-01-22 19:39:19 +04:00
|
|
|
if (OPAL_UNLIKELY(NULL == (*list_of_ranks_in_all_subgroups))) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Cannot allocate memory *list_of_ranks_in_all_subgroups."));
|
2012-08-16 23:11:35 +04:00
|
|
|
ret = OMPI_ERR_OUT_OF_RESOURCE;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
}
|
2014-01-22 19:39:19 +04:00
|
|
|
ret = comm_bcast_pml(*list_of_ranks_in_all_subgroups, root, sum,
|
|
|
|
MPI_INT, my_rank, ompi_comm_size(comm),
|
|
|
|
map_to_comm_ranks, comm);
|
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Bcast failed for list_of_ranks_in_all_subgroups "));
|
2014-01-22 19:39:19 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The data that is needed for a given rooted operation is:
|
|
|
|
* - subgroup,rank information for the source of the data.
|
|
|
|
* That is, which rank in the subgroup will recieve the
|
|
|
|
* data and distribute to the rest of the ranks.
|
|
|
|
* - the ranks that this data will be sent to. This is
|
|
|
|
* described by the ranks in the current subgroups, and
|
|
|
|
* the subroups for which each rank is a proxy for,
|
|
|
|
* recursively in the communication tree.
|
|
|
|
*
|
|
|
|
* The assumption is that data will be delived to each subgroup
|
|
|
|
* in an order, that is, all the data destined to subgroup rank 0
|
|
|
|
* will appear 1st, then that for rank 1, etc. This implies that
|
|
|
|
* the data destined to rank 0, for example, will include the
|
|
|
|
* data for rank 0, as well as all the ranks that appear following
|
|
|
|
* it in the tree - in order.
|
|
|
|
*
|
|
|
|
* Proxies: A rank may be a proxy for more than a single subgroup.
|
|
|
|
* When a rank is proxy for more than a single subgroup, we
|
|
|
|
* maintain a fixed order of subgroups for which this is a
|
|
|
|
* proxy, with an assumption that the data for the first subgroup
|
|
|
|
* appears first in the list, then that for the second, etc.
|
|
|
|
* Since the data for the proxy (which is a member of this subgroup)
|
|
|
|
* appears only once in the data list, the assumption is that the
|
|
|
|
* proxy will be the root for this operation, and it is the first
|
|
|
|
* set of data in the data list. This means, that the data offset
|
|
|
|
* for the second ranks in each subgroup will include all the data
|
|
|
|
* for the previous subgroups, recursively. This lets us maintain
|
|
|
|
* the simple addressing scheme of contigous data per rank in
|
|
|
|
* the subcommunicator.
|
|
|
|
*
|
|
|
|
* The information needed for each rank in the subgroup are the
|
2014-01-29 22:44:21 +04:00
|
|
|
* group indices for which it is a proxy.
|
2012-08-16 23:11:35 +04:00
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* fill in the vertecies in the hierarchichal communications graph
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* figure out how detailed connection information, so that we can
|
|
|
|
* can figure out how the data needs to be ordered for sending it
|
|
|
|
* though the tree in various collective algorithms that have per-rank
|
|
|
|
* data associated with them.
|
|
|
|
*/
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
/* this function does a depth first traversal of the tree data and
|
|
|
|
* builds rank data and ensures that hierarchy level 0 is in the
|
|
|
|
* correct order for collective algorithms with per-rank data.
|
2012-08-16 23:11:35 +04:00
|
|
|
*/
|
2014-01-22 19:39:19 +04:00
|
|
|
coll_ml_parse_topology (*array_of_all_subgroup_ranks, *num_total_subgroups,
|
|
|
|
*list_of_ranks_in_all_subgroups, ompi_comm_size (comm));
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
/* The list of ranks in all subgroups is the same as the old sort list. This is the same
|
|
|
|
* order needed for both scatter and gather. */
|
|
|
|
topo->sort_list = (*list_of_ranks_in_all_subgroups);
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
/* return */
|
2014-01-22 19:39:19 +04:00
|
|
|
exit_ERROR:
|
|
|
|
if (scratch_space) {
|
2012-08-16 23:11:35 +04:00
|
|
|
free(scratch_space);
|
|
|
|
}
|
2014-01-22 19:39:19 +04:00
|
|
|
|
2012-08-16 23:11:35 +04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int get_new_subgroup_data (int32_t *all_selected, int size_of_all_selected,
|
2014-01-22 19:39:19 +04:00
|
|
|
sub_group_params_t **sub_group_meta_data,
|
|
|
|
int *size_of_sub_group_meta_data,
|
|
|
|
int **list_of_ranks_in_all_subgroups,
|
|
|
|
int *size_of_list_of_ranks_in_all_subgroups,
|
|
|
|
int *num_ranks_in_list_of_ranks_in_all_subgroups,
|
|
|
|
int *num_total_subgroups,
|
|
|
|
int *map_to_comm_ranks, int level_in_hierarchy
|
|
|
|
) {
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
/* local data */
|
|
|
|
int rc=OMPI_SUCCESS;
|
|
|
|
int rank_in_list,old_sg_size=(*num_total_subgroups);
|
|
|
|
int sg_index, array_id, offset, sg_id;
|
|
|
|
sub_group_params_t *dummy1 = NULL;
|
|
|
|
int32_t **dummy2 = NULL;
|
|
|
|
int32_t *dummy3 = NULL;
|
|
|
|
int32_t **temp = NULL;
|
|
|
|
int knt1 = 0,
|
|
|
|
knt2 = 0,
|
|
|
|
knt3 = 0;
|
|
|
|
|
|
|
|
/* loop over all elements in the array of ranks selected, looking for
|
|
|
|
* newly selected ranks - these form the new subgroups */
|
|
|
|
for(rank_in_list = 0 ; rank_in_list < size_of_all_selected ; rank_in_list++ ) {
|
|
|
|
int sg_root, current_rank_in_comm;
|
|
|
|
/* get root's rank in the communicator */
|
|
|
|
sg_root=all_selected[rank_in_list];
|
|
|
|
|
|
|
|
if( 0 == sg_root ) {
|
|
|
|
/* this rank not selected - go to the next rank */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if( sg_root < 0 ) {
|
|
|
|
sg_root=-sg_root-1;
|
|
|
|
} else {
|
|
|
|
sg_root-=1;
|
|
|
|
}
|
|
|
|
|
|
|
|
current_rank_in_comm=map_to_comm_ranks[rank_in_list];
|
|
|
|
|
|
|
|
/* loop over existing groups, and see if this is a member of a new group
|
|
|
|
* or if this group has already been found.
|
|
|
|
*/
|
2014-01-22 19:39:19 +04:00
|
|
|
for (sg_index = old_sg_size, sg_id = -1 ; sg_index < (*num_total_subgroups) ; sg_index++) {
|
|
|
|
if ((*sub_group_meta_data)[sg_index].root_rank_in_comm == sg_root) {
|
2012-08-16 23:11:35 +04:00
|
|
|
/* add rank to the list */
|
|
|
|
(*sub_group_meta_data)[sg_index].n_ranks++;
|
2014-01-22 19:39:19 +04:00
|
|
|
sg_id = sg_index;
|
2012-08-16 23:11:35 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2014-01-22 19:39:19 +04:00
|
|
|
|
|
|
|
if (-1 == sg_id) {
|
2012-08-16 23:11:35 +04:00
|
|
|
/* did not find existing sub-group, create new one */
|
|
|
|
/* intialize new subgroup */
|
|
|
|
PROVIDE_SUFFICIENT_MEMORY((*sub_group_meta_data), dummy1,
|
2014-01-22 19:39:19 +04:00
|
|
|
(*size_of_sub_group_meta_data),
|
|
|
|
sub_group_params_t, (*num_total_subgroups), 1, 5);
|
2012-08-16 23:11:35 +04:00
|
|
|
/* do this for the temporary memory slots */
|
|
|
|
PROVIDE_SUFFICIENT_MEMORY(temp, dummy2,
|
2014-01-22 19:39:19 +04:00
|
|
|
knt1, int32_t *, knt2, 1, 5);
|
2012-08-16 23:11:35 +04:00
|
|
|
if (OPAL_UNLIKELY(NULL == (*sub_group_meta_data))) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Cannot allocate memory for sub_group_meta_data."));
|
2012-08-16 23:11:35 +04:00
|
|
|
rc = OMPI_ERR_OUT_OF_RESOURCE;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
2014-01-22 19:39:19 +04:00
|
|
|
(*sub_group_meta_data)[(*num_total_subgroups)].root_rank_in_comm = sg_root;
|
|
|
|
(*sub_group_meta_data)[(*num_total_subgroups)].n_ranks = 1;
|
|
|
|
|
2012-08-16 23:11:35 +04:00
|
|
|
/* no need for this here - use a temporary ptr */
|
|
|
|
temp[knt2]=
|
|
|
|
(int *)malloc(sizeof(int)*size_of_all_selected);
|
|
|
|
if (OPAL_UNLIKELY(NULL == temp[knt2] ) ){
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Cannot allocate memory for sub_group_meta_data."));
|
2012-08-16 23:11:35 +04:00
|
|
|
rc = OMPI_ERR_OUT_OF_RESOURCE;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
2014-01-22 19:39:19 +04:00
|
|
|
sg_id = (*num_total_subgroups)++;
|
|
|
|
knt3 = ++knt2;
|
2012-08-16 23:11:35 +04:00
|
|
|
} else {
|
|
|
|
knt3 = sg_id - old_sg_size + 1;
|
|
|
|
}
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
array_id = (*sub_group_meta_data)[sg_id].n_ranks-1;
|
|
|
|
temp[knt3-1][array_id] = current_rank_in_comm;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* linearize the data - one rank will ship this to all the other
|
|
|
|
* ranks the communicator
|
|
|
|
*/
|
|
|
|
/* make sure there is enough memory to hold the list */
|
|
|
|
PROVIDE_SUFFICIENT_MEMORY((*list_of_ranks_in_all_subgroups),dummy3,
|
2014-01-22 19:39:19 +04:00
|
|
|
(*size_of_list_of_ranks_in_all_subgroups),
|
|
|
|
int, (*num_ranks_in_list_of_ranks_in_all_subgroups),
|
|
|
|
size_of_all_selected,size_of_all_selected);
|
2012-08-16 23:11:35 +04:00
|
|
|
if (OPAL_UNLIKELY(NULL == (*list_of_ranks_in_all_subgroups))) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Cannot allocate memory for list_of_ranks_in_all_subgroups."));
|
2012-08-16 23:11:35 +04:00
|
|
|
rc = OMPI_ERR_OUT_OF_RESOURCE;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* loop over new subgroups */
|
|
|
|
for( sg_id=old_sg_size ; sg_id < (*num_total_subgroups) ; sg_id++ ) {
|
|
|
|
offset=(*num_ranks_in_list_of_ranks_in_all_subgroups);
|
|
|
|
|
|
|
|
(*sub_group_meta_data)[sg_id].index_of_first_element=offset;
|
|
|
|
|
|
|
|
for( array_id=0 ; array_id < (*sub_group_meta_data)[sg_id].n_ranks ;
|
2014-01-22 19:39:19 +04:00
|
|
|
array_id++ ) {
|
2012-08-16 23:11:35 +04:00
|
|
|
(*list_of_ranks_in_all_subgroups)[offset+array_id]=
|
|
|
|
temp[sg_id-old_sg_size][array_id];
|
|
|
|
}
|
|
|
|
(*num_ranks_in_list_of_ranks_in_all_subgroups)+=
|
|
|
|
(*sub_group_meta_data)[sg_id].n_ranks;
|
|
|
|
(*sub_group_meta_data)[sg_id].level_in_hierarchy=level_in_hierarchy;
|
|
|
|
/* this causes problems on XT5 starting at 6144 cores */
|
|
|
|
free(temp[sg_id-old_sg_size]);
|
|
|
|
}
|
2014-01-22 19:39:19 +04:00
|
|
|
|
2012-08-16 23:11:35 +04:00
|
|
|
/* clean up temporary storage */
|
|
|
|
if(NULL != temp) {
|
|
|
|
free(temp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* return */
|
2014-01-22 19:39:19 +04:00
|
|
|
exit_ERROR:
|
2012-08-16 23:11:35 +04:00
|
|
|
return rc;
|
2014-01-22 19:39:19 +04:00
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
static int topo_parse (sub_group_params_t *sub_group_meta_data, int index, int *dst, int *src, int *dst_offset)
|
|
|
|
{
|
|
|
|
int src_offset = sub_group_meta_data[index].index_of_first_element;
|
|
|
|
int total_ranks_represented = 0, ranks_represented;
|
|
|
|
|
|
|
|
if (0 == sub_group_meta_data[index].level_in_hierarchy) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Copying data for index %d to %d. Ranks at this level: %d", index, *dst_offset,
|
2014-01-22 19:39:19 +04:00
|
|
|
sub_group_meta_data[index].n_ranks));
|
|
|
|
|
|
|
|
/* move level one subgroup data */
|
|
|
|
memmove (dst + *dst_offset, src + src_offset, sizeof (int) * sub_group_meta_data[index].n_ranks);
|
|
|
|
|
|
|
|
/* update the offset of this subgroup since it may have been moved */
|
|
|
|
sub_group_meta_data[index].index_of_first_element = *dst_offset;
|
|
|
|
*dst_offset += sub_group_meta_data[index].n_ranks;
|
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Subgroup %d has %d ranks. level = %d", index, sub_group_meta_data[index].n_ranks,
|
2014-01-22 19:39:19 +04:00
|
|
|
sub_group_meta_data[index].level_in_hierarchy));
|
|
|
|
|
|
|
|
/* fill in subgroup ranks */
|
|
|
|
sub_group_meta_data[index].rank_data=(rank_properties_t *)
|
|
|
|
malloc(sizeof(rank_properties_t) * sub_group_meta_data[index].n_ranks);
|
|
|
|
if (OPAL_UNLIKELY(NULL == sub_group_meta_data[index].rank_data)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Cannot allocate memory for rank_data "));
|
2014-01-22 19:39:19 +04:00
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* recurse on all subgroups */
|
|
|
|
for (int j = 0 ; j < sub_group_meta_data[index].n_ranks ; ++j) {
|
|
|
|
int rank = src[j + src_offset];
|
|
|
|
int next_level;
|
|
|
|
|
|
|
|
/* determine if this rank is the root of the subgroup */
|
|
|
|
if (rank == sub_group_meta_data[index].root_rank_in_comm) {
|
|
|
|
sub_group_meta_data[index].root_index = j;
|
|
|
|
}
|
|
|
|
|
|
|
|
sub_group_meta_data[index].rank_data[j].leaf = true;
|
|
|
|
sub_group_meta_data[index].rank_data[j].rank = rank;
|
|
|
|
|
|
|
|
if (sub_group_meta_data[index].level_in_hierarchy) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Looking for subgroup containing %d as root", rank));
|
2014-01-22 19:39:19 +04:00
|
|
|
|
|
|
|
for (next_level = index - 1 ; next_level >= 0 ; --next_level) {
|
|
|
|
if (rank == sub_group_meta_data[next_level].root_rank_in_comm) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Subgroup %d has root %d", next_level, rank));
|
2014-01-22 19:39:19 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* all ranks are represented in the lowest level. this subgroup is not at the lowest level
|
|
|
|
* so it must be a root at a lower level */
|
|
|
|
assert (next_level >= 0);
|
|
|
|
|
|
|
|
/* not a leaf node */
|
|
|
|
sub_group_meta_data[index].rank_data[j].leaf = false;
|
|
|
|
ranks_represented = topo_parse (sub_group_meta_data, next_level, dst, src, dst_offset);
|
|
|
|
if (0 > ranks_represented) {
|
|
|
|
return ranks_represented;
|
|
|
|
}
|
|
|
|
sub_group_meta_data[index].rank_data[j].num_of_ranks_represented = ranks_represented;
|
|
|
|
|
|
|
|
total_ranks_represented += ranks_represented;
|
|
|
|
} else {
|
|
|
|
/* leaf node */
|
|
|
|
sub_group_meta_data[index].rank_data[j].leaf = true;
|
|
|
|
sub_group_meta_data[index].rank_data[j].num_of_ranks_represented = 1;
|
|
|
|
|
|
|
|
total_ranks_represented++;
|
|
|
|
}
|
|
|
|
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Group %d, level %d, index %d, rank %d represents %d ranks", index,
|
2014-01-22 19:39:19 +04:00
|
|
|
sub_group_meta_data[index].level_in_hierarchy, j, rank,
|
|
|
|
sub_group_meta_data[index].rank_data[j].num_of_ranks_represented));
|
|
|
|
}
|
|
|
|
|
|
|
|
return total_ranks_represented;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* put level one in leaf order */
|
|
|
|
static int coll_ml_parse_topology (sub_group_params_t *sub_group_meta_data, size_t sub_group_count,
|
|
|
|
int *list_of_ranks_in_all_subgroups, int level_one_size)
|
|
|
|
{
|
|
|
|
int *tmp_data;
|
|
|
|
int offset, rc;
|
|
|
|
|
|
|
|
tmp_data = calloc (level_one_size, sizeof (int));
|
|
|
|
if (NULL == tmp_data) {
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* do a DFS parse of the topology and ensure that level 1 is in the correct scatter/gather order */
|
|
|
|
offset = 0;
|
|
|
|
rc = topo_parse (sub_group_meta_data, sub_group_count - 1, tmp_data, list_of_ranks_in_all_subgroups, &offset);
|
|
|
|
if (0 > rc) {
|
|
|
|
free (tmp_data);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* all ranks in level one should be represented in the re-order buffer */
|
|
|
|
assert (offset == level_one_size);
|
|
|
|
|
|
|
|
/* copy re-ordered level 1 (0) */
|
|
|
|
if (0 != offset) {
|
|
|
|
/* copy new level one data back into the list of all subgroups */
|
|
|
|
memmove (list_of_ranks_in_all_subgroups, tmp_data, sizeof (int) * offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
free (tmp_data);
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int append_new_network_context(hierarchy_pairs *pair)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int rc;
|
|
|
|
mca_coll_ml_lmngr_t *memory_manager = &mca_coll_ml_component.memory_manager;
|
|
|
|
bcol_base_network_context_t *nc = NULL;
|
|
|
|
|
|
|
|
for (i = 0; i < pair->num_bcol_modules; i++) {
|
|
|
|
nc = pair->bcol_modules[i]->network_context;
|
|
|
|
if (NULL != nc) {
|
|
|
|
rc = mca_coll_ml_lmngr_append_nc(memory_manager, nc);
|
|
|
|
if (OMPI_SUCCESS != rc) {
|
|
|
|
return OMPI_ERROR;
|
|
|
|
}
|
|
|
|
/* caching the network context id on bcol */
|
|
|
|
pair->bcol_modules[i]->context_index = nc->context_id;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ml_module_set_small_msg_thresholds(mca_coll_ml_module_t *ml_module)
|
|
|
|
{
|
|
|
|
const mca_coll_ml_topology_t *topo_info;
|
|
|
|
mca_bcol_base_module_t *bcol_module;
|
|
|
|
hierarchy_pairs *pair;
|
|
|
|
|
|
|
|
int i, j, rc, hier, *ranks_in_comm, n_hier, tp,
|
|
|
|
comm_size = ompi_comm_size(ml_module->comm);
|
|
|
|
|
|
|
|
for (tp = 0; tp < COLL_ML_TOPO_MAX; ++tp) {
|
|
|
|
topo_info = &ml_module->topo_list[tp];
|
|
|
|
if (COLL_ML_TOPO_DISABLED == topo_info->status) {
|
|
|
|
/* Skip the topology */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
n_hier = topo_info->n_levels;
|
|
|
|
for (hier = 0; hier < n_hier; ++hier) {
|
|
|
|
pair = &topo_info->component_pairs[hier];
|
|
|
|
|
|
|
|
for (i = 0; i < pair->num_bcol_modules; ++i) {
|
|
|
|
bcol_module = pair->bcol_modules[i];
|
|
|
|
|
|
|
|
if (NULL != bcol_module->set_small_msg_thresholds) {
|
|
|
|
bcol_module->set_small_msg_thresholds(bcol_module);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (j = 0; j < BCOL_NUM_OF_FUNCTIONS; ++j) {
|
|
|
|
if (ml_module->small_message_thresholds[j] >
|
2014-01-22 19:39:19 +04:00
|
|
|
bcol_module->small_message_thresholds[j]) {
|
2012-08-16 23:11:35 +04:00
|
|
|
ml_module->small_message_thresholds[j] =
|
2014-01-22 19:39:19 +04:00
|
|
|
bcol_module->small_message_thresholds[j];
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ranks_in_comm = (int *) malloc(comm_size * sizeof(int));
|
|
|
|
if (OPAL_UNLIKELY(NULL == ranks_in_comm)) {
|
|
|
|
ML_ERROR(("Memory allocation failed."));
|
|
|
|
return OMPI_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < comm_size; ++i) {
|
|
|
|
ranks_in_comm[i] = i;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = comm_allreduce_pml(ml_module->small_message_thresholds,
|
|
|
|
ml_module->small_message_thresholds,
|
|
|
|
BCOL_NUM_OF_FUNCTIONS, MPI_INT,
|
|
|
|
ompi_comm_rank(ml_module->comm), MPI_MIN,
|
|
|
|
comm_size, ranks_in_comm, ml_module->comm);
|
|
|
|
|
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
|
|
|
|
ML_ERROR(("comm_allreduce_pml failed."));
|
|
|
|
return OMPI_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
free(ranks_in_comm);
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mca_coll_ml_read_allbcols_settings(mca_coll_ml_module_t *ml_module,
|
2014-01-22 19:39:19 +04:00
|
|
|
int n_hierarchies)
|
2012-08-16 23:11:35 +04:00
|
|
|
{
|
|
|
|
int i, j,
|
|
|
|
ret = OMPI_SUCCESS;
|
|
|
|
int *ranks_map = NULL,
|
|
|
|
*bcols_in_use = NULL,
|
|
|
|
*bcols_in_use_all_ranks = NULL;
|
|
|
|
bool use_user_bufs, limit_size_user_bufs;
|
|
|
|
ssize_t length_ml_payload;
|
|
|
|
int64_t frag_size;
|
|
|
|
const mca_bcol_base_component_2_0_0_t *bcol_component = NULL;
|
|
|
|
mca_base_component_list_item_t *bcol_cli = NULL;
|
|
|
|
|
|
|
|
/* If this assert fails, it means that you changed initialization
|
|
|
|
* order and the date offset , that is critical for this section of code,
|
|
|
|
* have not been initilized.
|
|
|
|
* DO NOT REMOVE THIS ASSERT !!!
|
|
|
|
*/
|
|
|
|
assert(ml_module->data_offset >= 0);
|
|
|
|
|
|
|
|
/* need to figure out which bcol's are participating
|
|
|
|
* in the hierarchy across the communicator, so that we can set
|
|
|
|
* appropriate segmantation parameters.
|
|
|
|
*/
|
|
|
|
bcols_in_use = (int *) malloc(sizeof(int) * 2 * n_hierarchies);
|
|
|
|
if (OPAL_UNLIKELY(NULL == bcols_in_use)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Cannot allocate memory for bcols_in_use."));
|
2012-08-16 23:11:35 +04:00
|
|
|
ret = OMPI_ERR_OUT_OF_RESOURCE;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
/* setup pointers to arrays that will hold bcol parameters. Since
|
|
|
|
* given bols are not instantiated in all processes, need to get this
|
|
|
|
* information from those ranks that have instantiated these
|
|
|
|
* parameters
|
|
|
|
*/
|
|
|
|
bcols_in_use_all_ranks = bcols_in_use+n_hierarchies;
|
|
|
|
|
|
|
|
/* get list of bcols that I am using */
|
|
|
|
for(i = 0; i < n_hierarchies; i++) {
|
|
|
|
bcols_in_use[i] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (j = 0; j < COLL_ML_TOPO_MAX; j++) {
|
|
|
|
mca_coll_ml_topology_t *topo_info = &ml_module->topo_list[j];
|
|
|
|
if (COLL_ML_TOPO_DISABLED == topo_info->status) {
|
|
|
|
/* skip the topology */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
for(i = 0; i < topo_info->n_levels; i++ ) {
|
|
|
|
int ind;
|
|
|
|
ind = topo_info->component_pairs[i].bcol_index;
|
|
|
|
bcols_in_use[ind] = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* set one to one mapping */
|
|
|
|
ranks_map = (int *) malloc(sizeof(int) * ompi_comm_size(ml_module->comm));
|
|
|
|
if (NULL == ranks_map) {
|
|
|
|
ret = OMPI_ERR_OUT_OF_RESOURCE;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
for (i = 0; i < ompi_comm_size(ml_module->comm); i++) {
|
|
|
|
ranks_map[i] = i;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* reduce over all the ranks to figure out which bcols are
|
|
|
|
* participating at this level
|
|
|
|
*/
|
|
|
|
ret = comm_allreduce_pml(bcols_in_use, bcols_in_use_all_ranks,
|
2014-01-22 19:39:19 +04:00
|
|
|
n_hierarchies, MPI_INT, ompi_comm_rank(ml_module->comm),
|
|
|
|
MPI_MAX, ompi_comm_size(ml_module->comm),
|
|
|
|
ranks_map, ml_module->comm);
|
2012-08-16 23:11:35 +04:00
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("comm_allreduce_pml failed. bcols_in_use reduction"));
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* figure out fragmenation parameters
|
|
|
|
*/
|
|
|
|
/* can user buffers be used */
|
|
|
|
use_user_bufs = true;
|
|
|
|
bcol_cli = (mca_base_component_list_item_t *) opal_list_get_first(&mca_bcol_base_components_in_use);
|
|
|
|
|
|
|
|
for (i = 0; i < n_hierarchies; i++) {
|
|
|
|
if(!bcols_in_use_all_ranks) {
|
|
|
|
/* this bcol is not being used - do nothing */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
bcol_component = (mca_bcol_base_component_2_0_0_t *) bcol_cli->cli_component;
|
|
|
|
/* check to see if user buffers can be used */
|
|
|
|
if(!bcol_component->can_use_user_buffers) {
|
|
|
|
/* need to use library buffers, so all will do this */
|
|
|
|
use_user_bufs = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
bcol_cli = (mca_base_component_list_item_t *) opal_list_get_next((opal_list_item_t *) bcol_cli);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* size of ml buffer */
|
|
|
|
length_ml_payload = mca_coll_ml_component.payload_buffer_size - ml_module->data_offset;
|
|
|
|
|
|
|
|
if (use_user_bufs) {
|
|
|
|
/*
|
|
|
|
* using user buffers
|
|
|
|
*/
|
|
|
|
ml_module->use_user_buffers = 1;
|
|
|
|
|
|
|
|
/* figure out if data will be segmented for pipelining -
|
|
|
|
* for non-contigous data will just use a fragment the size
|
|
|
|
* of the ml payload buffer */
|
|
|
|
|
|
|
|
/* check to see if any bcols impose a limit */
|
|
|
|
limit_size_user_bufs = false;
|
|
|
|
bcol_cli = (mca_base_component_list_item_t *)opal_list_get_first(&mca_bcol_base_components_in_use);
|
|
|
|
for (i = 0; i < n_hierarchies; i++) {
|
|
|
|
bcol_component = (mca_bcol_base_component_2_0_0_t *) bcol_cli->cli_component;
|
|
|
|
if(bcol_component->max_frag_size != FRAG_SIZE_NO_LIMIT ){
|
|
|
|
limit_size_user_bufs = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
bcol_cli = (mca_base_component_list_item_t *) opal_list_get_next((opal_list_item_t *) bcol_cli);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (limit_size_user_bufs) {
|
|
|
|
/* figure out fragement size */
|
|
|
|
frag_size = 0;
|
|
|
|
bcol_cli = (mca_base_component_list_item_t *) opal_list_get_first(&mca_bcol_base_components_in_use);
|
|
|
|
for (i = 0; i < n_hierarchies; i++) {
|
|
|
|
/* check to see if this bcol is being used */
|
|
|
|
if(!bcols_in_use_all_ranks[i]) {
|
|
|
|
/* not in use */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
bcol_component = (mca_bcol_base_component_2_0_0_t *) bcol_cli->cli_component;
|
|
|
|
if (FRAG_SIZE_NO_LIMIT == bcol_component->max_frag_size) {
|
|
|
|
/* no limit - will not determine fragement size */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (0 != bcol_component->max_frag_size) {
|
|
|
|
/* nothing set yet */
|
|
|
|
frag_size = bcol_component->max_frag_size;
|
|
|
|
} else {
|
|
|
|
if(frag_size < bcol_component->max_frag_size) {
|
|
|
|
/* stricter constraint on fragment size */
|
|
|
|
frag_size = bcol_component->max_frag_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
bcol_cli = (mca_base_component_list_item_t *)opal_list_get_next((opal_list_item_t *)bcol_cli);
|
|
|
|
}
|
|
|
|
ml_module->fragment_size = frag_size;
|
|
|
|
} else {
|
|
|
|
/* entire message may be processed in single chunk */
|
|
|
|
ml_module->fragment_size = FRAG_SIZE_NO_LIMIT;
|
|
|
|
}
|
|
|
|
/* for non-contigous data - just use the ML buffers */
|
|
|
|
ml_module->ml_fragment_size = length_ml_payload;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* using library buffers
|
|
|
|
*/
|
|
|
|
ml_module->use_user_buffers = 0;
|
|
|
|
|
|
|
|
/* figure out buffer size */
|
|
|
|
ml_module->fragment_size = length_ml_payload;
|
|
|
|
/* see if this is too large */
|
|
|
|
bcol_cli = (mca_base_component_list_item_t *) opal_list_get_first(&mca_bcol_base_components_in_use);
|
|
|
|
for (i = 0; i < n_hierarchies; i++) {
|
|
|
|
/* check to see if this bcol is being used */
|
|
|
|
if(!bcols_in_use_all_ranks[i]) {
|
|
|
|
/* not in use */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
bcol_component = (mca_bcol_base_component_2_0_0_t *) bcol_cli->cli_component;
|
|
|
|
bcol_cli = (mca_base_component_list_item_t *) opal_list_get_next((opal_list_item_t *) bcol_cli);
|
|
|
|
if (FRAG_SIZE_NO_LIMIT == bcol_component->max_frag_size) {
|
|
|
|
/* no limit - will not affect fragement size */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (bcol_component->max_frag_size < (int)ml_module->fragment_size)
|
2014-01-22 19:39:19 +04:00
|
|
|
{
|
|
|
|
/* frag size set too large */
|
|
|
|
ml_module->fragment_size = bcol_component->max_frag_size;
|
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
/* for non-contigous data - just use the ML buffers */
|
|
|
|
ml_module->ml_fragment_size = ml_module->fragment_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
ML_VERBOSE(10, ("Seting payload size to %d %d [%d %d]",
|
2014-01-22 19:39:19 +04:00
|
|
|
ml_module->ml_fragment_size, length_ml_payload,
|
|
|
|
mca_coll_ml_component.payload_buffer_size,
|
|
|
|
ml_module->data_offset));
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
exit_ERROR:
|
2012-08-16 23:11:35 +04:00
|
|
|
if (NULL != ranks_map) {
|
|
|
|
free(ranks_map);
|
|
|
|
}
|
|
|
|
if (NULL != bcols_in_use) {
|
|
|
|
free(bcols_in_use);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ml_discover_hierarchy(mca_coll_ml_module_t *ml_module)
|
|
|
|
{
|
|
|
|
ompi_proc_t *my_proc = NULL;
|
|
|
|
|
|
|
|
int n_hierarchies = 0,
|
|
|
|
i = 0, ret = OMPI_SUCCESS;
|
|
|
|
|
|
|
|
int size_bcol_list, size_sbgp_list;
|
|
|
|
|
|
|
|
size_bcol_list = opal_list_get_size(&mca_bcol_base_components_in_use);
|
|
|
|
size_sbgp_list = opal_list_get_size(&mca_sbgp_base_components_in_use);
|
|
|
|
|
|
|
|
if ((size_bcol_list != size_sbgp_list) || size_sbgp_list < 1 || size_bcol_list < 1) {
|
|
|
|
ML_ERROR(("Error: (size of mca_bcol_base_components_in_use = %d)"
|
2014-03-18 16:24:32 +04:00
|
|
|
" != (size of mca_sbgp_base_components_in_use = %d) or zero.",
|
2014-01-22 19:39:19 +04:00
|
|
|
size_bcol_list, size_sbgp_list));
|
2012-08-16 23:11:35 +04:00
|
|
|
return OMPI_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
n_hierarchies = size_sbgp_list;
|
|
|
|
|
|
|
|
my_proc = ompi_proc_local();
|
|
|
|
/* create the converter, for current implementation we
|
|
|
|
support homogenius comunicators only */
|
|
|
|
ml_module->reference_convertor =
|
|
|
|
opal_convertor_create(my_proc->proc_arch, 0);
|
|
|
|
|
|
|
|
if (OPAL_UNLIKELY(NULL == ml_module->reference_convertor)) {
|
|
|
|
return OMPI_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do loop over all supported hiearchies.
|
|
|
|
To Do. We would like to have mca parameter that will allow control list
|
|
|
|
of topolgies that user would like use. Right now we will run
|
2014-01-22 19:39:19 +04:00
|
|
|
*/
|
2012-08-16 23:11:35 +04:00
|
|
|
for (i = 0; i < COLL_ML_TOPO_MAX; i++) {
|
|
|
|
if (COLL_ML_TOPO_ENABLED == ml_module->topo_list[i].status) {
|
|
|
|
ret = mca_coll_ml_component.topo_discovery_fn[i](ml_module, n_hierarchies);
|
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Local query for bcol header size */
|
|
|
|
ret = calculate_buffer_header_size(ml_module);
|
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get BCOL tuning, like support for zero copy, fragment size, and etc.
|
|
|
|
* This query involves global synchronization over all processes */
|
|
|
|
ret = mca_coll_ml_read_allbcols_settings(ml_module, n_hierarchies);
|
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
/* Here is the safe point to call ml_module_memory_initialization , please
|
|
|
|
be very careful,if you decide to move this arround.*/
|
|
|
|
ret = ml_module_memory_initialization(ml_module);
|
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
|
|
|
|
/* make sure to release just allocated memory */
|
|
|
|
mca_coll_ml_free_block(ml_module->payload_block);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ml_module_set_small_msg_thresholds(ml_module);
|
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
|
|
|
|
/* make sure to release just allocated memory */
|
|
|
|
mca_coll_ml_free_block(ml_module->payload_block);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
/* Syncronization barrier to make sure that all sides finsihed
|
|
|
|
* to register the memory */
|
|
|
|
int ret, i;
|
|
|
|
int *comm_ranks = NULL;
|
|
|
|
|
|
|
|
comm_ranks = (int *)calloc(ompi_comm_size(ml_module->comm), sizeof(int));
|
|
|
|
if (OPAL_UNLIKELY(NULL == comm_ranks)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Cannot allocate memory."));
|
2012-08-16 23:11:35 +04:00
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ompi_comm_size(ml_module->comm); i++) {
|
|
|
|
comm_ranks[i] = i;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = comm_allreduce_pml(&ret, &i,
|
2014-01-22 19:39:19 +04:00
|
|
|
1, MPI_INT, ompi_comm_rank(ml_module->comm),
|
|
|
|
MPI_MIN, ompi_comm_size(ml_module->comm), comm_ranks,
|
|
|
|
ml_module->comm);
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
if (OMPI_SUCCESS != ret) {
|
|
|
|
ML_ERROR(("comm_allreduce - failed to collect max_comm data"));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
/* Barrier done */
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mca_coll_ml_tree_hierarchy_discovery(mca_coll_ml_module_t *ml_module,
|
2014-01-22 19:39:19 +04:00
|
|
|
mca_coll_ml_topology_t *topo, int n_hierarchies,
|
|
|
|
const char *exclude_sbgp_name, const char *include_sbgp_name)
|
2012-08-16 23:11:35 +04:00
|
|
|
{
|
|
|
|
/* local variables */
|
|
|
|
char *ptr_output = NULL;
|
|
|
|
sbgp_base_component_keyval_t *sbgp_cli = NULL;
|
|
|
|
mca_base_component_list_item_t *bcol_cli = NULL;
|
|
|
|
hierarchy_pairs *pair = NULL;
|
|
|
|
|
|
|
|
mca_sbgp_base_module_t *module = NULL;
|
2014-03-06 02:57:21 +04:00
|
|
|
ompi_proc_t **copy_procs = NULL,
|
2014-01-22 19:39:19 +04:00
|
|
|
*my_proc = NULL;
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
const mca_sbgp_base_component_2_0_0_t *sbgp_component = NULL;
|
|
|
|
|
|
|
|
|
|
|
|
int i_hier = 0, n_hier = 0, ll_p1,
|
|
|
|
n_procs_in = 0, group_index = 0, n_remain = 0,
|
|
|
|
i, j, ret = OMPI_SUCCESS, my_rank_in_list = 0,
|
|
|
|
n_procs_selected = 0, original_group_size = 0, i_am_done = 0,
|
2014-02-03 21:01:46 +04:00
|
|
|
local_leader, my_rank_in_subgroup, my_rank_in_remaining_list = 0,
|
|
|
|
my_rank_in_comm;
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
int32_t my_lowest_group_index = -1, my_highest_group_index = -1;
|
|
|
|
|
|
|
|
int *map_to_comm_ranks = NULL, *bcols_in_use = NULL;
|
|
|
|
|
|
|
|
int32_t *all_selected = NULL,
|
2014-01-22 19:39:19 +04:00
|
|
|
*index_proc_selected = NULL;
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
short all_reduce_buffer2_in[2];
|
|
|
|
short all_reduce_buffer2_out[2];
|
|
|
|
sub_group_params_t *array_of_all_subgroup_ranks=NULL;
|
|
|
|
/* this pointer should probably be an int32_t and not an int type */
|
|
|
|
int32_t *list_of_ranks_in_all_subgroups=NULL;
|
2014-03-19 01:26:24 +04:00
|
|
|
int num_ranks_in_all_subgroups=0,num_total_subgroups=0;
|
2012-08-16 23:11:35 +04:00
|
|
|
int size_of_array_of_all_subgroup_ranks=0;
|
|
|
|
int size_of_list_of_ranks_in_all_subgroups=0;
|
|
|
|
int32_t in_allgather_value;
|
|
|
|
|
|
|
|
if (NULL != exclude_sbgp_name && NULL != include_sbgp_name) {
|
|
|
|
ret = OMPI_ERROR;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
ML_VERBOSE(10,("include %s exclude %s size %d", include_sbgp_name, exclude_sbgp_name, n_hierarchies));
|
|
|
|
|
|
|
|
/* allocates scratch space */
|
|
|
|
all_selected = (int32_t *) calloc(ompi_comm_size(ml_module->comm), sizeof(int32_t));
|
|
|
|
if (OPAL_UNLIKELY(NULL == all_selected)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Cannot allocate memory."));
|
2012-08-16 23:11:35 +04:00
|
|
|
ret = OMPI_ERR_OUT_OF_RESOURCE;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
map_to_comm_ranks = (int *) calloc(ompi_comm_size(ml_module->comm), sizeof(int));
|
|
|
|
if (OPAL_UNLIKELY(NULL == map_to_comm_ranks)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Cannot allocate memory."));
|
2012-08-16 23:11:35 +04:00
|
|
|
ret = OMPI_ERR_OUT_OF_RESOURCE;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
** obtain list of procs
|
|
|
|
*/
|
|
|
|
/* create private copy for manipulation */
|
|
|
|
copy_procs = (ompi_proc_t **) calloc(ompi_comm_size(ml_module->comm),
|
2014-01-22 19:39:19 +04:00
|
|
|
sizeof(ompi_proc_t *));
|
2012-08-16 23:11:35 +04:00
|
|
|
if (OPAL_UNLIKELY(NULL == copy_procs)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Cannot allocate memory."));
|
2012-08-16 23:11:35 +04:00
|
|
|
ret = OMPI_ERR_OUT_OF_RESOURCE;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ompi_comm_size(ml_module->comm); i++) {
|
2014-03-06 02:57:21 +04:00
|
|
|
copy_procs[i] = ompi_comm_peer_lookup (ml_module->comm, i);
|
2012-08-16 23:11:35 +04:00
|
|
|
map_to_comm_ranks[i] = i;
|
|
|
|
}
|
|
|
|
|
2014-02-03 21:01:46 +04:00
|
|
|
my_rank_in_comm = ompi_comm_rank (ml_module->comm);
|
2012-08-16 23:11:35 +04:00
|
|
|
n_procs_in = ompi_comm_size(ml_module->comm);
|
|
|
|
original_group_size = n_procs_in;
|
|
|
|
|
|
|
|
/* setup information for all-reduce over out of band */
|
|
|
|
index_proc_selected = (int32_t *) malloc(sizeof(int32_t) * n_procs_in);
|
|
|
|
if (OPAL_UNLIKELY(NULL == index_proc_selected)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Cannot allocate memory."));
|
2012-08-16 23:11:35 +04:00
|
|
|
ret = OMPI_ERR_OUT_OF_RESOURCE;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* get my proc pointer - used to identify myself in the list */
|
|
|
|
my_proc = ompi_proc_local();
|
|
|
|
my_rank_in_list = ompi_comm_rank(ml_module->comm);
|
|
|
|
|
|
|
|
topo->component_pairs = (hierarchy_pairs *) calloc(n_hierarchies, sizeof(hierarchy_pairs));
|
|
|
|
if (OPAL_UNLIKELY(NULL == topo->component_pairs)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Cannot allocate memory."));
|
2012-08-16 23:11:35 +04:00
|
|
|
ret = OMPI_ERR_OUT_OF_RESOURCE;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
n_hier = 0;
|
|
|
|
/*
|
|
|
|
* Algorithm for subgrouping:
|
|
|
|
* 1) Start with all the ranks in the communicator
|
|
|
|
* 2) iterate over all (exclusive) hierarchy selection rules
|
|
|
|
* A) Apply subgrouping function to the remaining set of ranks
|
|
|
|
* - After the call to subgrouping subgroup_module->group_list
|
|
|
|
* has the index of ranks selected, from the list or ranks
|
|
|
|
* passed in.
|
|
|
|
* - map_to_comm_ranks maintains the mapping of the remaining
|
|
|
|
* ranks, to their rank in the communicator
|
|
|
|
* B) Each rank initializes a scratch array the size of the
|
|
|
|
* remaining ranks to 0, and then fills in the entry that
|
|
|
|
* corresponds to itself only with the value -/+R. If the
|
|
|
|
* rank is the local leader for the subgroup, the value of -R
|
|
|
|
* is entered, other wise R is entered. R is the root of the
|
|
|
|
* selected subgroup plus 1, so that for rank 0, +R has a
|
|
|
|
* different value than -R.
|
|
|
|
* C) The vector is then reduced, with the results going to all
|
|
|
|
* ranks, over the list of remaining ranks. As a result,
|
|
|
|
* the ranks of a given subgroup will show up with the value R,
|
|
|
|
* for all but the local-leader, which will have the value of -R.
|
|
|
|
* This is also used for error checking.
|
|
|
|
* D) subgroup_module->group_list is changed to contain the ranks
|
|
|
|
* of each member of the group within the communicator.
|
|
|
|
* E) Local rank with the group is determined.
|
|
|
|
* F) the list or remaining ranks is compacted, removing all selected
|
|
|
|
* ranks that are not the local-leader of the group.
|
|
|
|
* map_to_comm_ranks is also compacted.
|
|
|
|
* 3) This is terminated once all ranks are selected.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* loop over hierarchies */
|
|
|
|
sbgp_cli = (sbgp_base_component_keyval_t *) opal_list_get_first(&mca_sbgp_base_components_in_use);
|
|
|
|
bcol_cli = (mca_base_component_list_item_t *) opal_list_get_first(&mca_bcol_base_components_in_use);
|
|
|
|
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Loop over hierarchies."));
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
i_hier = 0;
|
|
|
|
while ((opal_list_item_t *) sbgp_cli != opal_list_get_end(&mca_sbgp_base_components_in_use)){
|
2014-03-19 01:26:24 +04:00
|
|
|
/* number of processes selected with this sbgp on all ranks */
|
|
|
|
int global_n_procs_selected;
|
|
|
|
|
2012-08-16 23:11:35 +04:00
|
|
|
/*
|
2014-01-22 19:39:19 +04:00
|
|
|
** obtain the list of ranks in the current level
|
|
|
|
*/
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
sbgp_component = (mca_sbgp_base_component_2_0_0_t *) sbgp_cli->component.cli_component;
|
|
|
|
|
|
|
|
/* Skip excluded levels */
|
|
|
|
if (NULL != exclude_sbgp_name) {
|
2014-01-22 19:39:19 +04:00
|
|
|
|
2012-08-16 23:11:35 +04:00
|
|
|
ML_VERBOSE(10,("EXCLUDE compare %s to %s", include_sbgp_name,
|
2014-01-22 19:39:19 +04:00
|
|
|
sbgp_component->sbgp_version.mca_component_name));
|
2012-08-16 23:11:35 +04:00
|
|
|
if(0 == strcmp(exclude_sbgp_name,
|
2014-01-22 19:39:19 +04:00
|
|
|
sbgp_component->sbgp_version.mca_component_name)) {
|
2012-08-16 23:11:35 +04:00
|
|
|
/* take the next element */
|
|
|
|
sbgp_cli = (sbgp_base_component_keyval_t *) opal_list_get_next((opal_list_item_t *) sbgp_cli);
|
|
|
|
bcol_cli = (mca_base_component_list_item_t *) opal_list_get_next((opal_list_item_t *) bcol_cli);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NULL != include_sbgp_name) {
|
|
|
|
ML_VERBOSE(10,("INCLUDE compare %s to %s", include_sbgp_name,
|
2014-01-22 19:39:19 +04:00
|
|
|
sbgp_component->sbgp_version.mca_component_name));
|
2012-08-16 23:11:35 +04:00
|
|
|
if(0 != strcmp(include_sbgp_name,
|
2014-01-22 19:39:19 +04:00
|
|
|
sbgp_component->sbgp_version.mca_component_name)) {
|
2012-08-16 23:11:35 +04:00
|
|
|
/* take the next element */
|
|
|
|
sbgp_cli = (sbgp_base_component_keyval_t *) opal_list_get_next((opal_list_item_t *) sbgp_cli);
|
|
|
|
bcol_cli = (mca_base_component_list_item_t *) opal_list_get_next((opal_list_item_t *) bcol_cli);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ML_VERBOSE(10,("Passed include %s exclude %s", include_sbgp_name, exclude_sbgp_name));
|
|
|
|
|
|
|
|
/* discover subgroup */
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Discover subgroup: hier level - %d.", i_hier));
|
2012-08-16 23:11:35 +04:00
|
|
|
module = sbgp_component->select_procs(copy_procs, n_procs_in,
|
2014-01-22 19:39:19 +04:00
|
|
|
ml_module->comm,
|
|
|
|
sbgp_cli->key_value, &ptr_output);
|
2012-08-16 23:11:35 +04:00
|
|
|
if (NULL == module) {
|
|
|
|
/* no module created */
|
|
|
|
n_procs_selected = 0;
|
2014-01-22 19:39:19 +04:00
|
|
|
/* We must continue and participate in the allgather.
|
|
|
|
* It's not clear that one can enter this conditional
|
|
|
|
* during "normal" execution. We need to review
|
|
|
|
* all modules.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* THE CODE SNIPPET COMMENTED OUT BELOW IS DANGEROUS CODE THAT
|
|
|
|
* COULD RESULT IN A HANG - THE "CONTINUE" STATEMENT MAY RESULT IN
|
2012-08-16 23:11:35 +04:00
|
|
|
* RANKS BYPASSING THE ALLGATHER IN NON-SYMMETRIC CASES
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2014-01-22 19:39:19 +04:00
|
|
|
sbgp_cli = (sbgp_base_component_keyval_t *) opal_list_get_next((opal_list_item_t *) sbgp_cli);
|
|
|
|
bcol_cli = (mca_base_component_list_item_t *) opal_list_get_next((opal_list_item_t *) bcol_cli);
|
|
|
|
continue;
|
2012-08-16 23:11:35 +04:00
|
|
|
*/
|
2014-01-22 19:39:19 +04:00
|
|
|
|
|
|
|
/* Skipping subgroups of size one will cause these processes to be missed in list of level one
|
|
|
|
* indices. */
|
|
|
|
} else if (NULL == module->group_list || (1 == module->group_size && i_hier)) {
|
|
|
|
/* bypass modules that have no group_list */
|
2012-08-16 23:11:35 +04:00
|
|
|
n_procs_selected = 0;
|
|
|
|
OBJ_RELEASE(module);
|
|
|
|
module=NULL;
|
|
|
|
} else {
|
|
|
|
n_procs_selected = module->group_size;
|
|
|
|
}
|
|
|
|
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Hier level - %d; group size - %d", i_hier, n_procs_selected));
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
/* setup array indicating all procs that were selected */
|
|
|
|
for (i = 0; i < n_procs_in; i++) {
|
|
|
|
index_proc_selected[i] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* figure out my rank in the subgroup */
|
|
|
|
my_rank_in_subgroup=-1;
|
|
|
|
ll_p1=-1;
|
|
|
|
in_allgather_value = 0;
|
2014-03-19 01:26:24 +04:00
|
|
|
if (n_procs_selected) {
|
2012-08-16 23:11:35 +04:00
|
|
|
/* I need to contribute to the vector */
|
|
|
|
for (group_index = 0; group_index < n_procs_selected; group_index++) {
|
|
|
|
/* set my rank within the group */
|
2014-02-03 21:01:46 +04:00
|
|
|
if (map_to_comm_ranks[module->group_list[group_index]] == my_rank_in_comm) {
|
2012-08-16 23:11:35 +04:00
|
|
|
my_rank_in_subgroup=group_index;
|
|
|
|
module->my_index = group_index;
|
|
|
|
/* currently the indecies are still given in terms of
|
|
|
|
* the rank in the list of remaining ranks */
|
|
|
|
my_rank_in_remaining_list=module->group_list[group_index];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if( -1 != my_rank_in_subgroup ) {
|
|
|
|
/* I am contributing to this subgroup */
|
|
|
|
|
|
|
|
#ifdef NEW_LEADER_SELECTION
|
2014-01-22 19:39:19 +04:00
|
|
|
#if 0
|
2012-08-16 23:11:35 +04:00
|
|
|
int lleader_index;
|
|
|
|
/* Select the local leader */
|
|
|
|
lleader_index = coll_ml_select_leader(ml_module,module, map_to_comm_ranks,
|
2014-01-22 19:39:19 +04:00
|
|
|
copy_procs,n_procs_selected);
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-02-03 21:01:46 +04:00
|
|
|
local_leader = map_to_comm_ranks[module->group_list[lleader_index]];
|
2014-01-22 19:39:19 +04:00
|
|
|
#endif
|
2012-08-16 23:11:35 +04:00
|
|
|
#else
|
|
|
|
|
|
|
|
/* local leader is rank within list or remaining ranks */
|
2014-02-03 21:01:46 +04:00
|
|
|
local_leader = map_to_comm_ranks[module->group_list[0]];
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
#endif
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10,("The local leader selected for hierarchy %d is rank %d ",
|
2014-01-22 19:39:19 +04:00
|
|
|
i_hier, local_leader));
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-02-03 21:01:46 +04:00
|
|
|
ll_p1 = local_leader + 1;
|
|
|
|
if (local_leader == my_rank_in_comm) {
|
2012-08-16 23:11:35 +04:00
|
|
|
in_allgather_value =
|
|
|
|
index_proc_selected[my_rank_in_remaining_list] = -ll_p1;
|
|
|
|
} else {
|
|
|
|
in_allgather_value =
|
|
|
|
index_proc_selected[my_rank_in_remaining_list] = ll_p1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* gather the information from all the other remaining ranks */
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Call for comm_allreduce_pml."));
|
2012-08-16 23:11:35 +04:00
|
|
|
ret = comm_allgather_pml(&in_allgather_value,
|
2014-01-22 19:39:19 +04:00
|
|
|
all_selected, 1, MPI_INT, my_rank_in_list,
|
|
|
|
n_procs_in, map_to_comm_ranks ,ml_module->comm);
|
2012-08-16 23:11:35 +04:00
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("comm_allreduce_pml failed."));
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* do some sanity checks */
|
|
|
|
if( -1 != my_rank_in_subgroup ) {
|
|
|
|
ret = check_global_view_of_subgroups(n_procs_selected,
|
2014-01-22 19:39:19 +04:00
|
|
|
n_procs_in, ll_p1, all_selected, module );
|
2012-08-16 23:11:35 +04:00
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("check_global_view_of_subgroups failed."));
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2014-01-22 19:39:19 +04:00
|
|
|
** change the list of procs stored on the module to ranks within
|
|
|
|
** the communicator.
|
|
|
|
*/
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Change the list of procs; hier level - %d.", i_hier));
|
2012-08-16 23:11:35 +04:00
|
|
|
for (group_index = 0; group_index < n_procs_selected; group_index++) {
|
|
|
|
module->group_list[group_index] = map_to_comm_ranks[module->group_list[group_index]];
|
|
|
|
/* set my rank within the group */
|
|
|
|
if (module->group_list[group_index] == ompi_comm_rank(ml_module->comm)) {
|
|
|
|
module->my_index = group_index;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* accumulate data on the new subgroups created
|
|
|
|
*/
|
|
|
|
/*XXX*/
|
2014-03-19 01:26:24 +04:00
|
|
|
global_n_procs_selected = num_ranks_in_all_subgroups;
|
2012-08-16 23:11:35 +04:00
|
|
|
ret = get_new_subgroup_data(all_selected, n_procs_in,
|
2014-01-22 19:39:19 +04:00
|
|
|
&array_of_all_subgroup_ranks,
|
|
|
|
&size_of_array_of_all_subgroup_ranks,
|
|
|
|
&list_of_ranks_in_all_subgroups,
|
|
|
|
&size_of_list_of_ranks_in_all_subgroups,
|
2014-03-19 01:26:24 +04:00
|
|
|
&num_ranks_in_all_subgroups,
|
2014-01-22 19:39:19 +04:00
|
|
|
&num_total_subgroups, map_to_comm_ranks,i_hier);
|
|
|
|
|
2012-08-16 23:11:35 +04:00
|
|
|
if( OMPI_SUCCESS != ret ) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, (" Error: get_new_subgroup_data returned %d ",ret));
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
2014-03-19 01:26:24 +04:00
|
|
|
/* the global number of processes selected at this level is the difference
|
|
|
|
* in the number of procs in all subgroups between this level and the
|
|
|
|
* last */
|
|
|
|
global_n_procs_selected = num_ranks_in_all_subgroups - global_n_procs_selected;
|
|
|
|
|
2012-08-16 23:11:35 +04:00
|
|
|
/* am I done ? */
|
|
|
|
i_am_done=0;
|
|
|
|
if ( (all_selected[my_rank_in_list] == ll_p1) &&
|
2014-01-22 19:39:19 +04:00
|
|
|
/* if I was not a member of any group, still need to continue */
|
|
|
|
n_procs_selected ){
|
2012-08-16 23:11:35 +04:00
|
|
|
i_am_done = 1;
|
|
|
|
}
|
|
|
|
/* get my rank in the list */
|
|
|
|
n_remain = 0;
|
|
|
|
my_rank_in_list = -1;
|
|
|
|
for (i = 0; i < n_procs_in; i++) {
|
|
|
|
if (all_selected[i] > 0 ) {
|
|
|
|
/* this proc will not be used in the next hierarchy */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* reset my_rank_in_list, n_procs_in */
|
|
|
|
copy_procs[n_remain] = copy_procs[i];
|
|
|
|
map_to_comm_ranks[n_remain] = map_to_comm_ranks[i];
|
|
|
|
|
|
|
|
if (my_proc == copy_procs[n_remain]){
|
|
|
|
my_rank_in_list = n_remain;
|
|
|
|
}
|
|
|
|
|
|
|
|
n_remain++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check to make sure we did not get a size 1 group if more than
|
|
|
|
* one rank are still remaning to be grouped */
|
|
|
|
if ((1 == n_procs_selected) && n_remain > 1) {
|
|
|
|
OBJ_RELEASE(module);
|
|
|
|
n_procs_selected = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if( 0 < n_procs_selected ) {
|
|
|
|
/* increment the level counter */
|
|
|
|
pair = &topo->component_pairs[n_hier];
|
|
|
|
|
|
|
|
/* add this to the list of sub-group/bcol pairs in use */
|
|
|
|
pair->subgroup_module = module;
|
|
|
|
pair->bcol_component = (mca_bcol_base_component_t *)
|
|
|
|
((mca_base_component_list_item_t *) bcol_cli)->cli_component;
|
|
|
|
|
|
|
|
pair->bcol_index = i_hier;
|
|
|
|
|
|
|
|
/* create bcol modules */
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Create bcol modules."));
|
2014-01-22 19:39:19 +04:00
|
|
|
pair->bcol_modules = pair->bcol_component->collm_comm_query(module, &pair->num_bcol_modules);
|
2012-08-16 23:11:35 +04:00
|
|
|
/* failed to create a new module */
|
|
|
|
if (OPAL_UNLIKELY(NULL == pair->bcol_modules)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Failed to create new modules."));
|
2012-08-16 23:11:35 +04:00
|
|
|
ret = OMPI_ERROR;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (pair->bcol_component->need_ordering) {
|
|
|
|
topo->topo_ordering_info.num_bcols_need_ordering += pair->num_bcol_modules;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Append new network contexts to our memory managment */
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Append new network contexts to our memory managment."));
|
2012-08-16 23:11:35 +04:00
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != append_new_network_context(pair))) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Exit with error. - append new network context"));
|
2012-08-16 23:11:35 +04:00
|
|
|
ret = OMPI_ERROR;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < pair->num_bcol_modules; ++i) {
|
|
|
|
/* set the starting sequence number */
|
|
|
|
pair->bcol_modules[i]->squence_number_offset =
|
|
|
|
mca_coll_ml_component.base_sequence_number;
|
|
|
|
|
|
|
|
/* cache the sub-group size */
|
|
|
|
pair->bcol_modules[i]->size_of_subgroup=
|
|
|
|
module->group_size;
|
|
|
|
|
|
|
|
/* set the bcol id */
|
|
|
|
pair->bcol_modules[i]->bcol_id = (int16_t) i_hier;
|
2014-01-22 19:39:19 +04:00
|
|
|
|
|
|
|
/* Set bcol mode bits */
|
|
|
|
topo->all_bcols_mode &= (( mca_bcol_base_module_t *) pair->bcol_modules[i])->supported_mode;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* set largest power of 2 for this group
|
|
|
|
*/
|
|
|
|
module->n_levels_pow2 = ml_fls(module->group_size);
|
|
|
|
module->pow_2 = 1 << module->n_levels_pow2;
|
|
|
|
|
|
|
|
n_hier++;
|
|
|
|
|
|
|
|
if (-1 == my_lowest_group_index) {
|
|
|
|
my_lowest_group_index = i_hier;
|
|
|
|
}
|
|
|
|
|
|
|
|
my_highest_group_index = i_hier;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if n_remain is 1, and the communicator size is not 1, and module
|
2014-01-22 19:39:19 +04:00
|
|
|
** is not NULL, I am done
|
|
|
|
*/
|
2012-08-16 23:11:35 +04:00
|
|
|
if ((1 == n_remain) && (1 < original_group_size) &&
|
2014-01-22 19:39:19 +04:00
|
|
|
(NULL != module)) {
|
2012-08-16 23:11:35 +04:00
|
|
|
i_am_done = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* am I done ? */
|
|
|
|
if (1 == i_am_done) {
|
|
|
|
/* nothing more to do */
|
|
|
|
goto SelectionDone;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* take the next element */
|
|
|
|
sbgp_cli = (sbgp_base_component_keyval_t *) opal_list_get_next((opal_list_item_t *) sbgp_cli);
|
|
|
|
bcol_cli = (mca_base_component_list_item_t *) opal_list_get_next((opal_list_item_t *) bcol_cli);
|
2014-01-22 19:39:19 +04:00
|
|
|
|
2014-03-19 01:26:24 +04:00
|
|
|
/* if no processes were selected anywhere with this sbgp module don't bother
|
|
|
|
* incrementing the hierarchy index. this resolves issues where (for example)
|
|
|
|
* process binding is not enabled or supported. */
|
|
|
|
if (global_n_procs_selected) {
|
2014-03-14 23:39:00 +04:00
|
|
|
/* The way initialization is currently written *all* ranks MUST appear
|
|
|
|
* in the first level (0) of the hierarchy. If any rank is not in the first
|
|
|
|
* level then the calculation of gather/scatter offsets will be wrong.
|
|
|
|
* NTH: DO NOT REMOVE this assert until this changes! */
|
2014-03-19 01:26:24 +04:00
|
|
|
assert (i_hier || global_n_procs_selected == n_procs_in);
|
|
|
|
i_hier++;
|
2014-03-14 23:39:00 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
n_procs_in = n_remain;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
SelectionDone:
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
if (topo->topo_ordering_info.num_bcols_need_ordering > 0) {
|
|
|
|
for (j = 0; j < n_hier; ++j) {
|
|
|
|
pair = &topo->component_pairs[j];
|
|
|
|
if (pair->bcol_component->need_ordering) {
|
|
|
|
for (i = 0; i < pair->num_bcol_modules; ++i) {
|
|
|
|
pair->bcol_modules[i]->next_inorder = &topo->topo_ordering_info.next_inorder;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
/* If I was not done, it means that we skipped all subgroups and no hierarchy was build */
|
|
|
|
if (0 == i_am_done) {
|
|
|
|
if (NULL != include_sbgp_name || NULL != exclude_sbgp_name) {
|
|
|
|
/* User explicitly asked for specific type of topology, which generates empty group */
|
2014-03-18 16:24:32 +04:00
|
|
|
/* JMS You really should use opal_show_help() here;
|
|
|
|
showing long error messages is *exactly* what
|
|
|
|
opal_show_help() is for. */
|
2014-01-22 19:39:19 +04:00
|
|
|
ML_ERROR(("ML topology configuration explicitly requested to %s subgroup %s. "
|
|
|
|
"Such configuration results in a creation of empty groups. As a result, ML framework can't "
|
|
|
|
"configure requested collective operations. ML framework will be disabled.",
|
|
|
|
NULL != include_sbgp_name ? "include only" : "exclude",
|
|
|
|
NULL != include_sbgp_name ? include_sbgp_name : exclude_sbgp_name
|
|
|
|
));
|
|
|
|
ret = OMPI_ERROR;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
2014-01-22 19:39:19 +04:00
|
|
|
ML_VERBOSE(10, ("Empty hierarchy..."));
|
|
|
|
ret = OMPI_SUCCESS;
|
|
|
|
goto exit_ERROR;
|
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
topo->n_levels = n_hier;
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
/* Find lowest and highest index of the groups in this communicator.
|
|
|
|
** This will be needed in deciding where in the hierarchical collective
|
|
|
|
** sequence of calls these particular groups belong.
|
|
|
|
** It is done with one allreduce call to save allreduce overhead.
|
|
|
|
*/
|
|
|
|
all_reduce_buffer2_in[0] = (short)my_lowest_group_index;
|
|
|
|
all_reduce_buffer2_in[1] = (short)-my_highest_group_index;
|
|
|
|
/* restore map to ranks for the original communicator */
|
|
|
|
for (i = 0; i < ompi_comm_size(ml_module->comm); i++) {
|
|
|
|
map_to_comm_ranks[i] = i;
|
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
ret = comm_allreduce_pml(all_reduce_buffer2_in, all_reduce_buffer2_out,
|
|
|
|
2, MPI_SHORT, ompi_comm_rank(ml_module->comm),
|
|
|
|
MPI_MIN, original_group_size,
|
|
|
|
map_to_comm_ranks, ml_module->comm);
|
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("comm_allreduce_pml failed. all_reduce_buffer2_in reduction"));
|
2014-01-22 19:39:19 +04:00
|
|
|
goto exit_ERROR;
|
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
topo->global_lowest_hier_group_index = all_reduce_buffer2_out[0];
|
|
|
|
topo->global_highest_hier_group_index = -all_reduce_buffer2_out[1];
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("The lowest index and highest index was successfully found."));
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
ML_VERBOSE(10, ("ml_discover_hierarchy done, n_levels %d lowest_group_index %d highest_group_index %d,"
|
2012-08-16 23:11:35 +04:00
|
|
|
" original_group_size %d my_lowest_group_index %d my_highest_group_index %d",
|
|
|
|
topo->n_levels, topo->global_lowest_hier_group_index,
|
|
|
|
topo->global_highest_hier_group_index,
|
|
|
|
original_group_size,
|
|
|
|
my_lowest_group_index,
|
|
|
|
my_highest_group_index));
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
/*
|
|
|
|
* setup detailed subgroup information
|
|
|
|
*/
|
|
|
|
ret = ml_setup_full_tree_data(topo, ml_module->comm, my_highest_group_index,
|
|
|
|
map_to_comm_ranks,&num_total_subgroups,&array_of_all_subgroup_ranks,
|
|
|
|
&list_of_ranks_in_all_subgroups);
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("comm_allreduce_pml failed: bcols_in_use reduction %d ",ret));
|
2014-01-22 19:39:19 +04:00
|
|
|
goto exit_ERROR;
|
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
/* cache the ML hierarchical description on the tree */
|
|
|
|
topo->number_of_all_subgroups = num_total_subgroups;
|
|
|
|
topo->array_of_all_subgroups = array_of_all_subgroup_ranks;
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
ml_init_k_nomial_trees(topo, list_of_ranks_in_all_subgroups, ompi_comm_rank(ml_module->comm));
|
|
|
|
/* Set the route table if know-root type of algorithms is used */
|
|
|
|
if (COLL_ML_STATIC_BCAST == mca_coll_ml_component.bcast_algorithm) {
|
|
|
|
ret = mca_coll_ml_fill_in_route_tab(topo, ml_module->comm);
|
|
|
|
if (OMPI_SUCCESS != ret) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_ERROR(("mca_coll_ml_fill_in_route_tab returned an error."));
|
2014-01-22 19:39:19 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
2014-01-22 19:39:19 +04:00
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
/*
|
|
|
|
** If all ranks are selected, there will be a single rank that remains -
|
|
|
|
** the root of the last group. Check to make sure that all ranks are
|
|
|
|
** selected, and if not, return an error. We can't handle the collectives
|
|
|
|
** correctly with this module.
|
|
|
|
*/
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
exit_ERROR:
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Discovery done"));
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
/* free temp resources */
|
|
|
|
if (NULL != all_selected) {
|
|
|
|
free(all_selected);
|
|
|
|
all_selected = NULL;
|
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
if (NULL != copy_procs) {
|
|
|
|
free(copy_procs);
|
|
|
|
copy_procs = NULL;
|
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
if (NULL != map_to_comm_ranks) {
|
|
|
|
free(map_to_comm_ranks);
|
|
|
|
map_to_comm_ranks = NULL;
|
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
if (NULL != index_proc_selected) {
|
|
|
|
free(index_proc_selected);
|
|
|
|
index_proc_selected = NULL;
|
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
if (NULL != bcols_in_use) {
|
|
|
|
free(bcols_in_use);
|
|
|
|
bcols_in_use = NULL;
|
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
if (NULL != list_of_ranks_in_all_subgroups) {
|
|
|
|
free(list_of_ranks_in_all_subgroups);
|
|
|
|
list_of_ranks_in_all_subgroups = NULL;
|
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
return ret;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void mca_coll_ml_allreduce_matrix_init(mca_coll_ml_module_t *ml_module,
|
2014-01-22 19:39:19 +04:00
|
|
|
const mca_bcol_base_component_2_0_0_t *bcol_component)
|
2012-08-16 23:11:35 +04:00
|
|
|
{
|
|
|
|
int op, dt, et;
|
|
|
|
|
|
|
|
for (op = 0; op < OMPI_OP_NUM_OF_TYPES; ++op) {
|
|
|
|
for (dt = 0; dt < OMPI_DATATYPE_MAX_PREDEFINED; ++dt) {
|
|
|
|
for (et = 0; et < BCOL_NUM_OF_ELEM_TYPES; ++et) {
|
|
|
|
ml_module->allreduce_matrix[op][dt][et] =
|
2014-01-22 19:39:19 +04:00
|
|
|
bcol_component->coll_support(op, dt, et);
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int mca_coll_ml_fulltree_hierarchy_discovery(mca_coll_ml_module_t *ml_module,
|
2014-01-22 19:39:19 +04:00
|
|
|
int n_hierarchies)
|
2012-08-16 23:11:35 +04:00
|
|
|
{
|
|
|
|
return mca_coll_ml_tree_hierarchy_discovery(ml_module,
|
2014-01-22 19:39:19 +04:00
|
|
|
&ml_module->topo_list[COLL_ML_HR_FULL],
|
|
|
|
n_hierarchies, NULL, NULL);
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int mca_coll_ml_allreduce_hierarchy_discovery(mca_coll_ml_module_t *ml_module,
|
2014-01-22 19:39:19 +04:00
|
|
|
int n_hierarchies)
|
2012-08-16 23:11:35 +04:00
|
|
|
{
|
|
|
|
mca_base_component_list_item_t *bcol_cli;
|
|
|
|
const mca_bcol_base_component_2_0_0_t *bcol_component;
|
|
|
|
|
|
|
|
sbgp_base_component_keyval_t *sbgp_cli;
|
|
|
|
const mca_sbgp_base_component_2_0_0_t *sbgp_component;
|
|
|
|
|
|
|
|
sbgp_cli = (sbgp_base_component_keyval_t *)
|
2014-01-22 19:39:19 +04:00
|
|
|
opal_list_get_first(&mca_sbgp_base_components_in_use);
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
for (bcol_cli = (mca_base_component_list_item_t *)
|
2014-01-22 19:39:19 +04:00
|
|
|
opal_list_get_first(&mca_bcol_base_components_in_use);
|
|
|
|
(opal_list_item_t *) bcol_cli !=
|
|
|
|
opal_list_get_end(&mca_bcol_base_components_in_use);
|
|
|
|
bcol_cli = (mca_base_component_list_item_t *)
|
|
|
|
opal_list_get_next((opal_list_item_t *) bcol_cli),
|
|
|
|
sbgp_cli = (sbgp_base_component_keyval_t *)
|
|
|
|
opal_list_get_next((opal_list_item_t *) sbgp_cli)) {
|
2012-08-16 23:11:35 +04:00
|
|
|
bcol_component = (mca_bcol_base_component_2_0_0_t *) bcol_cli->cli_component;
|
|
|
|
if (NULL != bcol_component->coll_support_all_types &&
|
2014-01-22 19:39:19 +04:00
|
|
|
!bcol_component->coll_support_all_types(BCOL_ALLREDUCE)) {
|
2012-08-16 23:11:35 +04:00
|
|
|
mca_base_component_list_item_t *bcol_cli_next;
|
|
|
|
const mca_bcol_base_component_2_0_0_t *bcol_component_next;
|
|
|
|
|
|
|
|
bcol_cli_next = (mca_base_component_list_item_t *)
|
2014-01-22 19:39:19 +04:00
|
|
|
opal_list_get_next((opal_list_item_t *) bcol_cli);
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
mca_coll_ml_component.need_allreduce_support = true;
|
|
|
|
mca_coll_ml_allreduce_matrix_init(ml_module, bcol_component);
|
|
|
|
|
|
|
|
sbgp_component = (mca_sbgp_base_component_2_0_0_t *)
|
2014-01-22 19:39:19 +04:00
|
|
|
sbgp_cli->component.cli_component;
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
ML_VERBOSE(10, ("Topology build: sbgp %s will be excluded.",
|
2014-01-22 19:39:19 +04:00
|
|
|
sbgp_component->sbgp_version.mca_component_name));
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
/* If there isn't additional component supports all types => print warning */
|
|
|
|
if (1 == opal_list_get_size(&mca_bcol_base_components_in_use) ||
|
2014-01-22 19:39:19 +04:00
|
|
|
(opal_list_item_t *) bcol_cli_next ==
|
|
|
|
opal_list_get_end(&mca_bcol_base_components_in_use)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
/* JMS You really should use opal_show_help() here;
|
|
|
|
showing long error messages is *exactly* what
|
|
|
|
opal_show_help() is for. */
|
|
|
|
ML_ERROR(("\n--------------------------------------------------------------------------------"
|
|
|
|
"The BCOL component %s doesn't support "
|
|
|
|
"all possible tuples (OPERATION X DATATYPE) for Allreduce "
|
|
|
|
"and you didn't provide additional one for alternative topology building, "
|
|
|
|
"as a result ML isn't be run correctly and its behavior is undefined. "
|
|
|
|
"You should run this bcol with another one supports all possible tuples, "
|
|
|
|
"\"--mca bcol_base_string %s,ptpcoll --mca sbgp_base_subgroups_string %s,p2p\" for example.",
|
2012-08-16 23:11:35 +04:00
|
|
|
bcol_component->bcol_version.mca_component_name,
|
|
|
|
bcol_component->bcol_version.mca_component_name,
|
|
|
|
sbgp_component->sbgp_version.mca_component_name));
|
|
|
|
} else {
|
|
|
|
bcol_component_next = (mca_bcol_base_component_2_0_0_t *)
|
2014-01-22 19:39:19 +04:00
|
|
|
bcol_cli_next->cli_component;
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
if (NULL != bcol_component_next->coll_support_all_types &&
|
2014-01-22 19:39:19 +04:00
|
|
|
!bcol_component_next->coll_support_all_types(BCOL_ALLREDUCE)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
/* JMS You really should use opal_show_help() here;
|
|
|
|
showing long error messages is *exactly* what
|
|
|
|
opal_show_help() is for. */
|
|
|
|
ML_ERROR(("\n--------------------------------------------------------------------------------"
|
|
|
|
"The BCOL component %s doesn't support "
|
|
|
|
"all possible tuples for Allreduce. "
|
|
|
|
"While you did provid an additional %s bcol component for alternative topology building, "
|
|
|
|
"this component also lacks support for all tuples. "
|
|
|
|
"As a result, ML Allreduce's behavior is undefined. "
|
|
|
|
"You must provide a component that supports all possible tuples, e.g. "
|
|
|
|
"\"--mca bcol_base_string %s,ptpcoll --mca sbgp_base_subgroups_string %s,p2p",
|
2014-01-22 19:39:19 +04:00
|
|
|
bcol_component->bcol_version.mca_component_name,
|
|
|
|
bcol_component_next->bcol_version.mca_component_name,
|
|
|
|
bcol_component->bcol_version.mca_component_name,
|
|
|
|
sbgp_component->sbgp_version.mca_component_name));
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return mca_coll_ml_tree_hierarchy_discovery(ml_module,
|
2014-01-22 19:39:19 +04:00
|
|
|
&ml_module->topo_list[COLL_ML_HR_ALLREDUCE],
|
|
|
|
n_hierarchies, sbgp_component->sbgp_version.mca_component_name, NULL);
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mca_coll_ml_fulltree_exclude_basesmsocket_hierarchy_discovery(mca_coll_ml_module_t *ml_module,
|
2014-01-22 19:39:19 +04:00
|
|
|
int n_hierarchies)
|
2012-08-16 23:11:35 +04:00
|
|
|
{
|
|
|
|
return mca_coll_ml_tree_hierarchy_discovery(ml_module,
|
2014-01-22 19:39:19 +04:00
|
|
|
&ml_module->topo_list[COLL_ML_HR_NBS],
|
|
|
|
n_hierarchies, "basesmsocket", NULL);
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int mca_coll_ml_fulltree_ptp_only_hierarchy_discovery(mca_coll_ml_module_t *ml_module,
|
2014-01-22 19:39:19 +04:00
|
|
|
int n_hierarchies)
|
2012-08-16 23:11:35 +04:00
|
|
|
{
|
|
|
|
return mca_coll_ml_tree_hierarchy_discovery(ml_module,
|
2014-01-22 19:39:19 +04:00
|
|
|
&ml_module->topo_list[COLL_ML_HR_SINGLE_PTP],
|
|
|
|
n_hierarchies, NULL, "p2p");
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int mca_coll_ml_fulltree_iboffload_only_hierarchy_discovery(mca_coll_ml_module_t *ml_module,
|
2014-01-22 19:39:19 +04:00
|
|
|
int n_hierarchies)
|
2012-08-16 23:11:35 +04:00
|
|
|
{
|
|
|
|
return mca_coll_ml_tree_hierarchy_discovery(ml_module,
|
2014-01-22 19:39:19 +04:00
|
|
|
&ml_module->topo_list[COLL_ML_HR_SINGLE_IBOFFLOAD],
|
|
|
|
n_hierarchies, NULL, "ibnet");
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
2014-03-18 16:24:32 +04:00
|
|
|
#define IS_REACHABLE 1
|
|
|
|
#define IS_NOT_REACHABLE -1
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
static int mca_coll_ml_fill_in_route_tab(mca_coll_ml_topology_t *topo, ompi_communicator_t *comm)
|
|
|
|
{
|
|
|
|
int i, rc, level, comm_size = 0,
|
|
|
|
my_rank = ompi_comm_rank(comm);
|
|
|
|
|
|
|
|
int32_t **route_table = NULL;
|
|
|
|
int32_t *all_reachable_ranks = NULL;
|
|
|
|
|
|
|
|
struct ompi_proc_t **sbgp_procs = NULL;
|
|
|
|
|
|
|
|
mca_sbgp_base_module_t *sbgp_group = NULL;
|
|
|
|
comm_size = ompi_comm_size(comm);
|
|
|
|
|
|
|
|
all_reachable_ranks = (int32_t *) malloc(comm_size * sizeof(int32_t));
|
|
|
|
if (NULL == all_reachable_ranks) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Cannot allocate memory."));
|
2012-08-16 23:11:35 +04:00
|
|
|
rc = OMPI_ERR_OUT_OF_RESOURCE;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < comm_size; ++i) {
|
2014-03-18 16:24:32 +04:00
|
|
|
all_reachable_ranks[i] = IS_NOT_REACHABLE;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
route_table = (int32_t **) calloc(topo->n_levels, sizeof(int32_t *));
|
|
|
|
if (NULL == route_table) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Cannot allocate memory."));
|
2012-08-16 23:11:35 +04:00
|
|
|
rc = OMPI_ERR_OUT_OF_RESOURCE;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
2014-02-07 23:15:45 +04:00
|
|
|
topo->route_vector = (mca_bcol_base_route_info_t *)
|
|
|
|
calloc(comm_size, sizeof(mca_bcol_base_route_info_t));
|
2012-08-16 23:11:35 +04:00
|
|
|
if (NULL == topo->route_vector) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Cannot allocate memory."));
|
2012-08-16 23:11:35 +04:00
|
|
|
rc = OMPI_ERR_OUT_OF_RESOURCE;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
2014-03-18 16:24:32 +04:00
|
|
|
all_reachable_ranks[my_rank] = IS_REACHABLE;
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
for (level = 0; level < topo->n_levels; ++level) {
|
|
|
|
sbgp_group = topo->component_pairs[level].subgroup_module;
|
|
|
|
|
|
|
|
route_table[level] = (int32_t *) malloc(comm_size * sizeof(int32_t));
|
|
|
|
if (NULL == route_table[level]) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Cannot allocate memory."));
|
2012-08-16 23:11:35 +04:00
|
|
|
rc = OMPI_ERR_OUT_OF_RESOURCE;
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < comm_size; ++i) {
|
2014-03-18 16:24:32 +04:00
|
|
|
if (IS_NOT_REACHABLE != all_reachable_ranks[i]) {
|
2012-08-16 23:11:35 +04:00
|
|
|
all_reachable_ranks[i] = sbgp_group->my_index;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = comm_allreduce_pml(all_reachable_ranks,
|
2014-01-22 19:39:19 +04:00
|
|
|
route_table[level],
|
|
|
|
comm_size,
|
|
|
|
MPI_INT, sbgp_group->my_index,
|
|
|
|
MPI_MAX, sbgp_group->group_size,
|
|
|
|
sbgp_group->group_list,
|
|
|
|
comm);
|
2012-08-16 23:11:35 +04:00
|
|
|
if (OMPI_SUCCESS != rc) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("comm_allreduce failed."));
|
2014-01-10 22:00:49 +04:00
|
|
|
goto exit_ERROR;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < comm_size; ++i) {
|
2014-03-18 16:24:32 +04:00
|
|
|
if (IS_NOT_REACHABLE !=
|
2014-01-22 19:39:19 +04:00
|
|
|
route_table[level][i]) {
|
2014-03-18 16:24:32 +04:00
|
|
|
all_reachable_ranks[i] = IS_REACHABLE;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(0 < level);
|
|
|
|
|
|
|
|
/* If there are unreachable ranks =>
|
|
|
|
reach them through leader of my upper layer */
|
|
|
|
for (i = 0; i < comm_size; ++i) {
|
2014-03-18 16:24:32 +04:00
|
|
|
if (IS_NOT_REACHABLE ==
|
2014-01-22 19:39:19 +04:00
|
|
|
route_table[level - 1][i]) {
|
2012-08-16 23:11:35 +04:00
|
|
|
route_table[level - 1][i] = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
free(all_reachable_ranks);
|
|
|
|
|
|
|
|
for (i = 0; i < comm_size; ++i) {
|
|
|
|
for (level = 0; level < topo->n_levels; ++level) {
|
2014-03-18 16:24:32 +04:00
|
|
|
if (IS_NOT_REACHABLE != route_table[level][i]) {
|
2012-08-16 23:11:35 +04:00
|
|
|
topo->route_vector[i].level = level;
|
|
|
|
topo->route_vector[i].rank = route_table[level][i];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#if OPAL_ENABLE_DEBUG
|
|
|
|
#define COLL_ML_ROUTE_BUFF_SIZE (1024*1024)
|
2014-03-15 00:39:39 +04:00
|
|
|
/* Only bother creating the string if we're actually going to
|
|
|
|
print it out (i.e., if the verbose level is >= 10) */
|
|
|
|
if (mca_coll_ml_component.verbose >= 10) {
|
2012-08-16 23:11:35 +04:00
|
|
|
int ii, jj;
|
2014-03-15 00:39:39 +04:00
|
|
|
char *buff, *output;
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-03-15 00:39:39 +04:00
|
|
|
output = buff = calloc(1, COLL_ML_ROUTE_BUFF_SIZE);
|
|
|
|
assert(NULL != output);
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
sprintf(output, "ranks: ");
|
|
|
|
|
|
|
|
output = buff + strlen(buff);
|
|
|
|
assert(COLL_ML_ROUTE_BUFF_SIZE + buff > output);
|
|
|
|
|
|
|
|
for(ii = 0; ii < comm_size; ++ii) {
|
|
|
|
sprintf(output, " %2d", ii);
|
|
|
|
|
|
|
|
output = buff + strlen(buff);
|
|
|
|
assert(COLL_ML_ROUTE_BUFF_SIZE + buff > output);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (ii = 0; ii < topo->n_levels; ++ii) {
|
|
|
|
sprintf(output, "\nlevel: %d ", ii);
|
|
|
|
|
|
|
|
output = buff + strlen(buff);
|
|
|
|
assert(COLL_ML_ROUTE_BUFF_SIZE + buff > output);
|
|
|
|
for(jj = 0; jj < comm_size; ++jj) {
|
|
|
|
sprintf(output, " %2d", route_table[ii][jj]);
|
|
|
|
|
|
|
|
output = buff + strlen(buff);
|
|
|
|
assert(COLL_ML_ROUTE_BUFF_SIZE + buff > output);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sprintf(output, "\n\nThe vector is:\n============\nranks: ");
|
|
|
|
|
|
|
|
output = buff + strlen(buff);
|
|
|
|
assert(COLL_ML_ROUTE_BUFF_SIZE + buff > output);
|
|
|
|
|
|
|
|
for(ii = 0; ii < comm_size; ++ii) {
|
|
|
|
sprintf(output, " %6d", ii);
|
|
|
|
|
|
|
|
output = buff + strlen(buff);
|
|
|
|
assert(COLL_ML_ROUTE_BUFF_SIZE + buff > output);
|
|
|
|
}
|
|
|
|
|
|
|
|
sprintf(output, "\nlevel x rank: ");
|
|
|
|
|
|
|
|
output = buff + strlen(buff);
|
|
|
|
assert(COLL_ML_ROUTE_BUFF_SIZE + buff > output);
|
|
|
|
|
|
|
|
for(ii = 0; ii < comm_size; ++ii) {
|
|
|
|
sprintf(output, " (%d, %d)",
|
2014-01-22 19:39:19 +04:00
|
|
|
topo->route_vector[ii].level,
|
|
|
|
topo->route_vector[ii].rank);
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
output = buff + strlen(buff);
|
|
|
|
assert(COLL_ML_ROUTE_BUFF_SIZE + buff > output);
|
|
|
|
}
|
|
|
|
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("\nThe table is:\n============%s", buff));
|
2014-03-15 00:39:39 +04:00
|
|
|
free(buff);
|
2014-03-15 00:41:54 +04:00
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
for (level = 0; level < topo->n_levels; ++level) {
|
|
|
|
free(route_table[level]);
|
|
|
|
}
|
|
|
|
|
|
|
|
free(route_table);
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
exit_ERROR:
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Exit with error status - %d.", rc));
|
2012-08-16 23:11:35 +04:00
|
|
|
if (NULL != route_table) {
|
|
|
|
for (level = 0; level < topo->n_levels; ++level) {
|
|
|
|
if (NULL != route_table[level]) {
|
|
|
|
free(route_table[level]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
free(route_table);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NULL != sbgp_procs) {
|
|
|
|
free(sbgp_procs);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NULL != all_reachable_ranks) {
|
|
|
|
free(all_reachable_ranks);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void init_coll_func_pointers(mca_coll_ml_module_t *ml_module)
|
|
|
|
{
|
|
|
|
mca_coll_base_module_2_0_0_t *coll_base = &ml_module->super;
|
|
|
|
|
|
|
|
int iboffload_used =
|
2014-01-22 19:39:19 +04:00
|
|
|
mca_coll_ml_check_if_bcol_is_used("iboffload", ml_module, COLL_ML_TOPO_MAX);
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
/* initialize coll component function pointers */
|
|
|
|
coll_base->coll_module_enable = ml_module_enable;
|
|
|
|
coll_base->ft_event = NULL;
|
|
|
|
|
|
|
|
if (mca_coll_ml_component.disable_allgather) {
|
|
|
|
coll_base->coll_allgather = NULL;
|
|
|
|
coll_base->coll_iallgather = NULL;
|
|
|
|
} else {
|
2014-01-22 19:39:19 +04:00
|
|
|
coll_base->coll_allgather = mca_coll_ml_allgather;
|
|
|
|
coll_base->coll_iallgather = mca_coll_ml_allgather_nb;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
coll_base->coll_allgatherv = NULL;
|
|
|
|
|
|
|
|
if (mca_coll_ml_component.use_knomial_allreduce) {
|
|
|
|
if (true == mca_coll_ml_component.need_allreduce_support) {
|
2014-01-22 19:39:19 +04:00
|
|
|
coll_base->coll_allreduce = mca_coll_ml_allreduce_dispatch;
|
|
|
|
coll_base->coll_iallreduce = mca_coll_ml_allreduce_dispatch_nb;
|
2012-08-16 23:11:35 +04:00
|
|
|
} else {
|
2014-01-22 19:39:19 +04:00
|
|
|
coll_base->coll_allreduce = mca_coll_ml_allreduce;
|
|
|
|
coll_base->coll_iallreduce = mca_coll_ml_allreduce_nb;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
coll_base->coll_allreduce = NULL;
|
|
|
|
}
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
coll_base->coll_alltoall = NULL;
|
|
|
|
coll_base->coll_ialltoall = NULL;
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
coll_base->coll_alltoallv = NULL;
|
|
|
|
coll_base->coll_alltoallw = NULL;
|
|
|
|
|
|
|
|
coll_base->coll_barrier = mca_coll_ml_barrier_intra;
|
|
|
|
|
|
|
|
/* Use the sequential broadcast */
|
2014-01-22 19:39:19 +04:00
|
|
|
if (COLL_ML_SEQ_BCAST == mca_coll_ml_component.bcast_algorithm) {
|
2012-08-16 23:11:35 +04:00
|
|
|
coll_base->coll_bcast = mca_coll_ml_bcast_sequential_root;
|
|
|
|
} else {
|
|
|
|
coll_base->coll_bcast = mca_coll_ml_parallel_bcast;
|
|
|
|
}
|
|
|
|
|
|
|
|
coll_base->coll_exscan = NULL;
|
|
|
|
coll_base->coll_gather = NULL;
|
2014-01-22 19:39:19 +04:00
|
|
|
/*
|
|
|
|
coll_base->coll_gather = mca_coll_ml_gather;
|
|
|
|
*/
|
2012-08-16 23:11:35 +04:00
|
|
|
/* Current iboffload/ptpcoll version have no support for gather */
|
|
|
|
if (iboffload_used ||
|
|
|
|
mca_coll_ml_check_if_bcol_is_used("ptpcoll", ml_module, COLL_ML_TOPO_MAX)) {
|
|
|
|
coll_base->coll_gather = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
coll_base->coll_gatherv = NULL;
|
2014-01-22 19:39:19 +04:00
|
|
|
if (mca_coll_ml_component.disable_reduce) {
|
|
|
|
coll_base->coll_reduce = NULL;
|
|
|
|
} else {
|
|
|
|
coll_base->coll_reduce = mca_coll_ml_reduce;
|
|
|
|
}
|
2012-08-16 23:11:35 +04:00
|
|
|
coll_base->coll_reduce_scatter = NULL;
|
|
|
|
coll_base->coll_scan = NULL;
|
|
|
|
coll_base->coll_scatter = NULL;
|
|
|
|
#if 0
|
|
|
|
coll_base->coll_scatter = mca_coll_ml_scatter_sequential;
|
|
|
|
#endif
|
|
|
|
coll_base->coll_scatterv = NULL;
|
|
|
|
|
|
|
|
coll_base->coll_iallgatherv = NULL;
|
|
|
|
coll_base->coll_ialltoallv = NULL;
|
|
|
|
coll_base->coll_ialltoallw = NULL;
|
|
|
|
coll_base->coll_ibarrier = mca_coll_ml_ibarrier_intra;
|
|
|
|
|
|
|
|
coll_base->coll_ibcast = mca_coll_ml_parallel_bcast_nb;
|
|
|
|
coll_base->coll_iexscan = NULL;
|
|
|
|
coll_base->coll_igather = NULL;
|
|
|
|
coll_base->coll_igatherv = NULL;
|
2014-01-22 19:39:19 +04:00
|
|
|
coll_base->coll_ireduce = mca_coll_ml_reduce_nb;
|
2012-08-16 23:11:35 +04:00
|
|
|
coll_base->coll_ireduce_scatter = NULL;
|
|
|
|
coll_base->coll_iscan = NULL;
|
|
|
|
coll_base->coll_iscatter = NULL;
|
|
|
|
coll_base->coll_iscatterv = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int init_lists(mca_coll_ml_module_t *ml_module)
|
|
|
|
{
|
|
|
|
mca_coll_ml_component_t *cs = &mca_coll_ml_component;
|
|
|
|
int num_elements = cs->free_list_init_size;
|
|
|
|
int max_elements = cs->free_list_max_size;
|
|
|
|
int elements_per_alloc = cs->free_list_grow_size;
|
|
|
|
size_t length_payload = 0;
|
|
|
|
size_t length;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* initialize full message descriptors - moving this to the
|
|
|
|
* module, as the fragment has resrouce requirements that
|
|
|
|
* are communicator dependent */
|
|
|
|
/* no data associated with the message descriptor */
|
|
|
|
|
|
|
|
length = sizeof(mca_coll_ml_descriptor_t);
|
|
|
|
ret = ompi_free_list_init_ex_new(&(ml_module->message_descriptors), length,
|
2014-01-22 19:39:19 +04:00
|
|
|
opal_cache_line_size, OBJ_CLASS(mca_coll_ml_descriptor_t),
|
|
|
|
length_payload, 0,
|
|
|
|
num_elements, max_elements, elements_per_alloc,
|
|
|
|
NULL,
|
|
|
|
init_ml_message_desc, ml_module);
|
2012-08-16 23:11:35 +04:00
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
|
|
|
|
ML_ERROR(("ompi_free_list_init_ex_new exit with error"));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* initialize fragement descriptors - always associate one fragment
|
|
|
|
* descriptr with full message descriptor, so that we can minimize
|
|
|
|
* small message latency */
|
|
|
|
|
|
|
|
/* create a free list of fragment descriptors */
|
|
|
|
/*length_payload=sizeof(something);*/
|
|
|
|
length = sizeof(mca_coll_ml_fragment_t);
|
|
|
|
ret = ompi_free_list_init_ex_new(&(ml_module->fragment_descriptors), length,
|
2014-01-22 19:39:19 +04:00
|
|
|
opal_cache_line_size, OBJ_CLASS(mca_coll_ml_fragment_t),
|
|
|
|
length_payload, 0,
|
|
|
|
num_elements, max_elements, elements_per_alloc,
|
|
|
|
NULL,
|
|
|
|
init_ml_fragment_desc, ml_module);
|
2012-08-16 23:11:35 +04:00
|
|
|
if (OMPI_SUCCESS != ret) {
|
|
|
|
ML_ERROR(("ompi_free_list_init_ex_new exit with error"));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int check_for_max_supported_ml_modules(struct ompi_communicator_t *comm)
|
|
|
|
{
|
|
|
|
int i, ret;
|
|
|
|
mca_coll_ml_component_t *cs = &mca_coll_ml_component;
|
|
|
|
int *comm_ranks = NULL;
|
|
|
|
|
|
|
|
comm_ranks = (int *)calloc(ompi_comm_size(comm), sizeof(int));
|
|
|
|
if (OPAL_UNLIKELY(NULL == comm_ranks)) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Cannot allocate memory."));
|
2012-08-16 23:11:35 +04:00
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
}
|
|
|
|
for (i = 0; i < ompi_comm_size(comm); i++) {
|
|
|
|
comm_ranks[i] = i;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = comm_allreduce_pml(&cs->max_comm, &cs->max_comm,
|
2014-01-22 19:39:19 +04:00
|
|
|
1 , MPI_INT, ompi_comm_rank(comm),
|
|
|
|
MPI_MIN, ompi_comm_size(comm), comm_ranks,
|
|
|
|
comm);
|
2012-08-16 23:11:35 +04:00
|
|
|
if (OMPI_SUCCESS != ret) {
|
|
|
|
ML_ERROR(("comm_allreduce - failed to collect max_comm data"));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (0 >= cs->max_comm ||
|
2014-01-22 19:39:19 +04:00
|
|
|
ompi_comm_size(comm) < cs->min_comm_size) {
|
2012-08-16 23:11:35 +04:00
|
|
|
return OMPI_ERROR;
|
|
|
|
} else {
|
|
|
|
--cs->max_comm;
|
|
|
|
}
|
|
|
|
|
|
|
|
free(comm_ranks);
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if OPAL_ENABLE_DEBUG
|
2014-01-22 19:39:19 +04:00
|
|
|
#define DEBUG_ML_COMM_QUERY() \
|
|
|
|
do { \
|
|
|
|
static int verbosity_level = 5; \
|
|
|
|
static int module_num = 0; \
|
|
|
|
ML_VERBOSE(10, ("ML module - %p num %d for comm - %p, " \
|
2014-03-18 16:24:32 +04:00
|
|
|
"comm size - %d, ML component prio - %d.", \
|
2014-01-22 19:39:19 +04:00
|
|
|
ml_module, ++module_num, comm, ompi_comm_size(comm), *priority)); \
|
|
|
|
/* For now I want to always print that we enter ML - \
|
2012-08-16 23:11:35 +04:00
|
|
|
at the past there was an issue that we did not enter ML and actually run with tuned. \
|
2014-01-22 19:39:19 +04:00
|
|
|
Still I do not want to print it for each module - only for the first. */ \
|
|
|
|
ML_VERBOSE(verbosity_level, ("ML module - %p was successfully created", ml_module)); \
|
|
|
|
verbosity_level = 10; \
|
2012-08-16 23:11:35 +04:00
|
|
|
} while(0)
|
|
|
|
|
|
|
|
#else
|
|
|
|
#define DEBUG_ML_COMM_QUERY()
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static int mca_coll_ml_need_multi_topo(int bcol_collective)
|
|
|
|
{
|
|
|
|
mca_base_component_list_item_t *bcol_cli;
|
|
|
|
const mca_bcol_base_component_2_0_0_t *bcol_component;
|
|
|
|
|
|
|
|
for (bcol_cli = (mca_base_component_list_item_t *)
|
2014-01-22 19:39:19 +04:00
|
|
|
opal_list_get_first(&mca_bcol_base_components_in_use);
|
|
|
|
(opal_list_item_t *) bcol_cli !=
|
|
|
|
opal_list_get_end(&mca_bcol_base_components_in_use);
|
|
|
|
bcol_cli = (mca_base_component_list_item_t *)
|
|
|
|
opal_list_get_next((opal_list_item_t *) bcol_cli)) {
|
2012-08-16 23:11:35 +04:00
|
|
|
bcol_component = (mca_bcol_base_component_2_0_0_t *) bcol_cli->cli_component;
|
|
|
|
if (NULL != bcol_component->coll_support_all_types &&
|
2014-01-22 19:39:19 +04:00
|
|
|
!bcol_component->coll_support_all_types(bcol_collective)) {
|
2012-08-16 23:11:35 +04:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We may call this function ONLY AFTER algorithm initialization */
|
|
|
|
static int setup_bcast_table(mca_coll_ml_module_t *module)
|
|
|
|
{
|
|
|
|
mca_coll_ml_component_t *cm = &mca_coll_ml_component;
|
2013-08-27 20:36:54 +04:00
|
|
|
bool has_zero_copy;
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
/* setup bcast index table */
|
2014-01-22 19:39:19 +04:00
|
|
|
if (COLL_ML_STATIC_BCAST == cm->bcast_algorithm) {
|
2012-08-16 23:11:35 +04:00
|
|
|
module->bcast_fn_index_table[0] = ML_BCAST_SMALL_DATA_KNOWN;
|
2013-08-27 20:36:54 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
has_zero_copy = !!(MCA_BCOL_BASE_ZERO_COPY &
|
|
|
|
module->coll_ml_bcast_functions[ML_BCAST_LARGE_DATA_KNOWN]->topo_info->all_bcols_mode);
|
2013-08-27 20:36:54 +04:00
|
|
|
|
|
|
|
if (1 == cm->enable_fragmentation || (2 == cm->enable_fragmentation && !has_zero_copy)) {
|
2012-08-16 23:11:35 +04:00
|
|
|
module->bcast_fn_index_table[1] = ML_BCAST_SMALL_DATA_KNOWN;
|
2013-08-27 20:36:54 +04:00
|
|
|
} else if (!has_zero_copy) {
|
2014-03-18 16:24:32 +04:00
|
|
|
/* JMS You really should use opal_show_help() here;
|
|
|
|
showing long error messages is *exactly* what
|
|
|
|
opal_show_help() is for. */
|
2012-08-16 23:11:35 +04:00
|
|
|
ML_ERROR(("ML couldn't be used: because the mca param coll_ml_enable_fragmentation "
|
|
|
|
"was set to zero and there is a bcol doesn't support zero copy method."));
|
|
|
|
return OMPI_ERROR;
|
|
|
|
} else {
|
|
|
|
module->bcast_fn_index_table[1] = ML_BCAST_LARGE_DATA_KNOWN;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
module->bcast_fn_index_table[0] = ML_BCAST_SMALL_DATA_UNKNOWN;
|
2013-08-27 20:36:54 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
if (NULL == module->coll_ml_bcast_functions[ML_BCAST_LARGE_DATA_UNKNOWN]) {
|
2014-03-18 16:24:32 +04:00
|
|
|
/* JMS You really should use opal_show_help() here;
|
|
|
|
showing long error messages is *exactly* what
|
|
|
|
opal_show_help() is for. */
|
2014-01-22 19:39:19 +04:00
|
|
|
ML_ERROR(("ML couldn't be used: because the mca param coll_ml_bcast_algorithm was not set "
|
|
|
|
"to static and no function is available."));
|
|
|
|
return OMPI_ERROR;
|
|
|
|
}
|
2013-08-27 20:36:54 +04:00
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
has_zero_copy = !!(MCA_BCOL_BASE_ZERO_COPY &
|
|
|
|
module->coll_ml_bcast_functions[ML_BCAST_LARGE_DATA_UNKNOWN]->topo_info->all_bcols_mode);
|
2013-08-27 20:36:54 +04:00
|
|
|
|
|
|
|
if (1 == cm->enable_fragmentation || (2 == cm->enable_fragmentation && !has_zero_copy)) {
|
2012-08-16 23:11:35 +04:00
|
|
|
module->bcast_fn_index_table[1] = ML_BCAST_SMALL_DATA_UNKNOWN;
|
2013-08-27 20:36:54 +04:00
|
|
|
} else if (!has_zero_copy) {
|
2014-03-18 16:24:32 +04:00
|
|
|
/* JMS You really should use opal_show_help() here;
|
|
|
|
showing long error messages is *exactly* what
|
|
|
|
opal_show_help() is for. */
|
2012-08-16 23:11:35 +04:00
|
|
|
ML_ERROR(("ML couldn't be used: because the mca param coll_ml_enable_fragmentation "
|
|
|
|
"was set to zero and there is a bcol doesn't support zero copy method."));
|
|
|
|
return OMPI_ERROR;
|
|
|
|
} else {
|
|
|
|
/* If the topology support zero level and no fragmentation was requested */
|
|
|
|
module->bcast_fn_index_table[1] = ML_BCAST_LARGE_DATA_UNKNOWN;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ml_check_for_enabled_topologies (int map[][MCA_COLL_MAX_NUM_SUBTYPES], mca_coll_ml_topology_t *topo_list)
|
|
|
|
{
|
|
|
|
int coll_i, st_i;
|
|
|
|
for (coll_i = 0; coll_i < MCA_COLL_MAX_NUM_COLLECTIVES; coll_i++) {
|
|
|
|
for (st_i = 0; st_i < MCA_COLL_MAX_NUM_SUBTYPES; st_i++) {
|
|
|
|
if (map[coll_i][st_i] > -1) {
|
|
|
|
/* The topology is used, so set it to enabled */
|
|
|
|
assert(map[coll_i][st_i] <= COLL_ML_TOPO_MAX);
|
|
|
|
topo_list[map[coll_i][st_i]].status = COLL_ML_TOPO_ENABLED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void setup_default_topology_map(mca_coll_ml_module_t *ml_module)
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
for (i = 0; i < MCA_COLL_MAX_NUM_COLLECTIVES; i++) {
|
|
|
|
for (j = 0; j < MCA_COLL_MAX_NUM_SUBTYPES; j++) {
|
|
|
|
ml_module->collectives_topology_map[i][j] = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ml_module->collectives_topology_map[ML_BARRIER][ML_BARRIER_DEFAULT] = COLL_ML_HR_FULL;
|
|
|
|
|
|
|
|
ml_module->collectives_topology_map[ML_BCAST][ML_BCAST_SMALL_DATA_KNOWN] = COLL_ML_HR_FULL;
|
|
|
|
ml_module->collectives_topology_map[ML_BCAST][ML_BCAST_SMALL_DATA_UNKNOWN] = COLL_ML_HR_FULL;
|
|
|
|
ml_module->collectives_topology_map[ML_BCAST][ML_BCAST_SMALL_DATA_SEQUENTIAL] = COLL_ML_HR_FULL;
|
|
|
|
ml_module->collectives_topology_map[ML_BCAST][ML_BCAST_LARGE_DATA_KNOWN] = COLL_ML_HR_FULL;
|
|
|
|
ml_module->collectives_topology_map[ML_BCAST][ML_BCAST_LARGE_DATA_UNKNOWN] = COLL_ML_HR_FULL;
|
|
|
|
ml_module->collectives_topology_map[ML_BCAST][ML_BCAST_LARGE_DATA_UNKNOWN] = COLL_ML_HR_FULL;
|
|
|
|
|
|
|
|
ml_module->collectives_topology_map[ML_ALLGATHER][ML_SMALL_DATA_ALLGATHER] = COLL_ML_HR_FULL;
|
|
|
|
ml_module->collectives_topology_map[ML_ALLGATHER][ML_LARGE_DATA_ALLGATHER] = COLL_ML_HR_FULL;
|
|
|
|
|
|
|
|
ml_module->collectives_topology_map[ML_GATHER][ML_SMALL_DATA_GATHER] = COLL_ML_HR_FULL;
|
|
|
|
ml_module->collectives_topology_map[ML_GATHER][ML_LARGE_DATA_GATHER] = COLL_ML_HR_FULL;
|
|
|
|
|
|
|
|
ml_module->collectives_topology_map[ML_ALLTOALL][ML_SMALL_DATA_ALLTOALL] = COLL_ML_HR_SINGLE_IBOFFLOAD;
|
|
|
|
ml_module->collectives_topology_map[ML_ALLTOALL][ML_LARGE_DATA_ALLTOALL] = COLL_ML_HR_SINGLE_IBOFFLOAD;
|
|
|
|
|
|
|
|
ml_module->collectives_topology_map[ML_ALLREDUCE][ML_SMALL_DATA_ALLREDUCE] = COLL_ML_HR_FULL;
|
|
|
|
ml_module->collectives_topology_map[ML_ALLREDUCE][ML_LARGE_DATA_ALLREDUCE] = COLL_ML_HR_FULL;
|
|
|
|
|
|
|
|
if (mca_coll_ml_need_multi_topo(BCOL_ALLREDUCE)) {
|
|
|
|
ml_module->collectives_topology_map[ML_ALLREDUCE][ML_SMALL_DATA_EXTRA_TOPO_ALLREDUCE] = COLL_ML_HR_ALLREDUCE;
|
|
|
|
ml_module->collectives_topology_map[ML_ALLREDUCE][ML_LARGE_DATA_EXTRA_TOPO_ALLREDUCE] = COLL_ML_HR_ALLREDUCE;
|
|
|
|
}
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
ml_module->collectives_topology_map[ML_REDUCE][ML_SMALL_DATA_REDUCE] = COLL_ML_HR_FULL;
|
|
|
|
ml_module->collectives_topology_map[ML_REDUCE][ML_LARGE_DATA_REDUCE] = COLL_ML_HR_FULL;
|
|
|
|
|
|
|
|
|
2012-08-16 23:11:35 +04:00
|
|
|
ml_module->collectives_topology_map[ML_SCATTER][ML_SCATTER_SMALL_DATA_KNOWN] = COLL_ML_HR_FULL;
|
|
|
|
ml_module->collectives_topology_map[ML_SCATTER][ML_SCATTER_N_DATASIZE_BINS] = COLL_ML_HR_FULL;
|
|
|
|
ml_module->collectives_topology_map[ML_SCATTER][ML_SCATTER_SMALL_DATA_UNKNOWN] = COLL_ML_HR_FULL;
|
|
|
|
ml_module->collectives_topology_map[ML_SCATTER][ML_SCATTER_SMALL_DATA_SEQUENTIAL] = COLL_ML_HR_FULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define GET_CF(I, J) (&mca_coll_ml_component.coll_config[I][J]);
|
|
|
|
|
|
|
|
static void load_cached_config(mca_coll_ml_module_t *ml_module)
|
|
|
|
{
|
|
|
|
int c_idx, m_idx, alg;
|
|
|
|
per_collective_configuration_t *cf = NULL;
|
|
|
|
|
|
|
|
for (c_idx = 0; c_idx < ML_NUM_OF_FUNCTIONS; c_idx++) {
|
|
|
|
for (m_idx = 0; m_idx < ML_NUM_MSG; m_idx++) {
|
|
|
|
cf = GET_CF(c_idx, m_idx);
|
|
|
|
/* load topology tunings */
|
|
|
|
if (ML_UNDEFINED != cf->topology_id &&
|
|
|
|
ML_UNDEFINED != cf->algorithm_id) {
|
|
|
|
alg =
|
|
|
|
cf->algorithm_id;
|
|
|
|
ml_module->collectives_topology_map[c_idx][alg] =
|
|
|
|
cf->topology_id;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Pasha: In future I would suggest to convert this configuration to some sophisticated mca parameter or
|
2014-01-22 19:39:19 +04:00
|
|
|
even configuration file. On this stage of project I will set it statically and later we will change it
|
|
|
|
to run time parameter */
|
2012-08-16 23:11:35 +04:00
|
|
|
static void setup_topology_coll_map(mca_coll_ml_module_t *ml_module)
|
|
|
|
{
|
|
|
|
/* Load default topology setup */
|
|
|
|
setup_default_topology_map(ml_module);
|
|
|
|
|
|
|
|
/* Load configuration file */
|
|
|
|
load_cached_config(ml_module);
|
|
|
|
|
|
|
|
ml_check_for_enabled_topologies(ml_module->collectives_topology_map, ml_module->topo_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* query to see if the module is available for use on the given
|
|
|
|
* communicator, and if so, what it's priority is. This is where
|
|
|
|
* the backing shared-memory file is created.
|
|
|
|
*/
|
|
|
|
mca_coll_base_module_t *
|
|
|
|
mca_coll_ml_comm_query(struct ompi_communicator_t *comm, int *priority)
|
|
|
|
{
|
|
|
|
/* local variables */
|
|
|
|
int ret = OMPI_SUCCESS;
|
|
|
|
|
|
|
|
mca_coll_ml_module_t *ml_module = NULL;
|
|
|
|
mca_coll_ml_component_t *cs = &mca_coll_ml_component;
|
|
|
|
bool iboffload_was_requested = mca_coll_ml_check_if_bcol_is_requested("iboffload");
|
|
|
|
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("ML comm query start."));
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* No support for inter-communicator yet.
|
|
|
|
*/
|
|
|
|
if (OMPI_COMM_IS_INTER(comm)) {
|
|
|
|
*priority = -1;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* If it is inter-communicator and size is less than 2 we have specialized modules
|
|
|
|
* to handle the intra collective communications.
|
|
|
|
*/
|
|
|
|
if (OMPI_COMM_IS_INTRA(comm) && ompi_comm_size(comm) < 2) {
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("It is inter-communicator and size is less than 2."));
|
2012-08-16 23:11:35 +04:00
|
|
|
*priority = -1;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* In current implementation we limit number of supported ML modules in cases when
|
|
|
|
* iboffload companent was requested
|
|
|
|
*/
|
|
|
|
if (iboffload_was_requested) {
|
|
|
|
ret = check_for_max_supported_ml_modules(comm);
|
|
|
|
if (OMPI_SUCCESS != ret) {
|
|
|
|
/* We have nothing to cleanup yet, so just return NULL */
|
|
|
|
ML_VERBOSE(10, ("check_for_max_supported_ml_modules returns ERROR, return NULL"));
|
|
|
|
*priority = -1;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Create ML module start."));
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
/* allocate and initialize an ml module */
|
|
|
|
ml_module = OBJ_NEW(mca_coll_ml_module_t);
|
|
|
|
if (NULL == ml_module) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get our priority */
|
|
|
|
*priority = cs->ml_priority;
|
|
|
|
|
|
|
|
/** Set initial ML values **/
|
|
|
|
ml_module->comm = comm;
|
|
|
|
/* set the starting sequence number */
|
|
|
|
ml_module->collective_sequence_num = cs->base_sequence_number;
|
|
|
|
ml_module->no_data_collective_sequence_num = cs->base_sequence_number;
|
|
|
|
/* initialize the size of the largest collective communication description */
|
|
|
|
ml_module->max_fn_calls = 0;
|
|
|
|
|
|
|
|
#ifdef NEW_LEADER_SELECTION
|
|
|
|
coll_ml_construct_resource_graphs(ml_module);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Set topology - function map */
|
|
|
|
setup_topology_coll_map(ml_module);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This is the core of the function:
|
|
|
|
* setup communicator hierarchy - the ml component is available for
|
|
|
|
* caching information about the sbgp modules selected.
|
|
|
|
*/
|
|
|
|
ret = ml_discover_hierarchy(ml_module);
|
|
|
|
if (OMPI_SUCCESS != ret) {
|
2014-03-19 01:26:10 +04:00
|
|
|
ML_VERBOSE(1, ("ml_discover_hierarchy exited with error."));
|
2012-08-16 23:11:35 +04:00
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* gvm Disabled for debuggin */
|
|
|
|
ret = mca_coll_ml_build_filtered_fn_table(ml_module);
|
|
|
|
if (OMPI_SUCCESS != ret) {
|
2014-03-19 01:26:10 +04:00
|
|
|
ML_VERBOSE(1, ("mca_coll_ml_build_filtered_fn_table returned an error."));
|
2014-01-22 19:39:19 +04:00
|
|
|
goto CLEANUP;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Generate active bcols list */
|
|
|
|
generate_active_bcols_list(ml_module);
|
|
|
|
|
|
|
|
/* setup collective schedules - note that a given bcol may have more than
|
|
|
|
one module instantiated. We may want to use the same collective cap
|
|
|
|
capabilities over more than one set of procs. Each module will store
|
|
|
|
the relevant information for a given set of procs */
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Call for setup schedule."));
|
2012-08-16 23:11:35 +04:00
|
|
|
ret = ml_coll_schedule_setup(ml_module);
|
|
|
|
if (OMPI_SUCCESS != ret) {
|
2014-03-19 01:26:10 +04:00
|
|
|
ML_VERBOSE(1, ("ml_coll_schedule_setup exit with error"));
|
2012-08-16 23:11:35 +04:00
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup bcast table */
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Setup bcast table"));
|
2012-08-16 23:11:35 +04:00
|
|
|
ret = setup_bcast_table(ml_module);
|
|
|
|
if (OMPI_SUCCESS != ret) {
|
2014-03-19 01:26:10 +04:00
|
|
|
ML_VERBOSE(1, ("setup_bcast_table exit with error"));
|
2012-08-16 23:11:35 +04:00
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Setup pointer to collectives calls."));
|
2012-08-16 23:11:35 +04:00
|
|
|
init_coll_func_pointers(ml_module);
|
|
|
|
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("Setup free lists"));
|
2012-08-16 23:11:35 +04:00
|
|
|
ret = init_lists(ml_module);
|
|
|
|
if (OMPI_SUCCESS != ret) {
|
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEBUG_ML_COMM_QUERY();
|
|
|
|
|
|
|
|
/* Compute the bruck's buffer constant -- temp buffer requirements */
|
|
|
|
{
|
|
|
|
int comm_size =ompi_comm_size(comm);
|
|
|
|
int count = 1, log_comm_size = 0;
|
|
|
|
|
|
|
|
/* compute log of comm_size */
|
|
|
|
while (count < comm_size) {
|
|
|
|
count = count << 1;
|
|
|
|
log_comm_size++;
|
|
|
|
}
|
|
|
|
|
|
|
|
ml_module->brucks_buffer_threshold_const =
|
2014-01-22 19:39:19 +04:00
|
|
|
(comm_size / 2 + comm_size % 2) * (log_comm_size) ;
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
ml_module->log_comm_size = log_comm_size;
|
2012-08-16 23:11:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (iboffload_was_requested) {
|
|
|
|
/* HACK: Calling memory sync barrier first time to make sure
|
|
|
|
* that iboffload create qps for service barrier in right order,
|
|
|
|
* otherwise we may have deadlock and really nasty data corruptions.
|
|
|
|
* If you plan to remove this one - please talk to me first.
|
|
|
|
* Pasha.
|
|
|
|
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
|
|
|
Work around for deadlock caused by connection setup
|
|
|
|
for asyc service barrier. Asyc service barrier use own set of
|
|
|
|
MQ and QP _BUT_ the exchange operation uses the MQ that is used for
|
|
|
|
primary set of collectives operations like Allgahter, Barrier,etc.
|
|
|
|
As result exchange wait operation could be pushed to primary MQ and
|
|
|
|
cause dead-lock.
|
|
|
|
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
|
|
|
Create connection for service barrier and memory address exchange
|
|
|
|
for ml buffers and asyc service barrier
|
2014-01-22 19:39:19 +04:00
|
|
|
*/
|
2012-08-16 23:11:35 +04:00
|
|
|
ret = mca_coll_ml_memsync_intra(ml_module, 0);
|
|
|
|
if (OMPI_SUCCESS != ret) {
|
|
|
|
goto CLEANUP;
|
|
|
|
}
|
|
|
|
opal_progress();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The module is ready */
|
|
|
|
ml_module->initialized = true;
|
|
|
|
|
|
|
|
return &(ml_module->super);
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
CLEANUP:
|
2012-08-16 23:11:35 +04:00
|
|
|
/* Vasily: RLG: Need to cleanup free lists */
|
|
|
|
if (NULL != ml_module) {
|
|
|
|
OBJ_RELEASE(ml_module);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
/* copied slightly modified from coll/hcoll */
|
|
|
|
#define ML_SAVE_FALLBACK(_coll_ml, _coll) \
|
|
|
|
do { \
|
|
|
|
_coll_ml->fallback.coll_ ## _coll = comm->c_coll.coll_ ## _coll; \
|
|
|
|
_coll_ml->fallback.coll_ ## _coll ## _module = comm->c_coll.coll_ ## _coll ## _module; \
|
|
|
|
if (comm->c_coll.coll_ ## _coll && comm->c_coll.coll_ ## _coll ## _module) { \
|
|
|
|
OBJ_RETAIN(_coll_ml->fallback.coll_ ## _coll ## _module); \
|
|
|
|
} \
|
|
|
|
} while(0)
|
|
|
|
|
|
|
|
static void ml_save_fallback_colls (mca_coll_ml_module_t *coll_ml,
|
|
|
|
struct ompi_communicator_t *comm)
|
|
|
|
{
|
|
|
|
memset (&coll_ml->fallback, 0, sizeof (coll_ml->fallback));
|
|
|
|
/* save lower-priority collectives to handle cases not yet handled
|
|
|
|
* by coll/ml */
|
|
|
|
ML_SAVE_FALLBACK(coll_ml, allreduce);
|
|
|
|
ML_SAVE_FALLBACK(coll_ml, allgather);
|
|
|
|
ML_SAVE_FALLBACK(coll_ml, reduce);
|
2014-03-22 01:54:14 +04:00
|
|
|
ML_SAVE_FALLBACK(coll_ml, bcast);
|
2014-01-22 19:39:19 +04:00
|
|
|
ML_SAVE_FALLBACK(coll_ml, iallreduce);
|
|
|
|
ML_SAVE_FALLBACK(coll_ml, iallgather);
|
|
|
|
ML_SAVE_FALLBACK(coll_ml, ireduce);
|
|
|
|
ML_SAVE_FALLBACK(coll_ml, ibcast);
|
|
|
|
}
|
|
|
|
|
2012-08-16 23:11:35 +04:00
|
|
|
/*
|
|
|
|
* Init module on the communicator
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ml_module_enable(mca_coll_base_module_t *module,
|
2014-01-22 19:39:19 +04:00
|
|
|
struct ompi_communicator_t *comm)
|
2012-08-16 23:11:35 +04:00
|
|
|
{
|
|
|
|
/* local variables */
|
|
|
|
char output_buffer[2 * MPI_MAX_OBJECT_NAME];
|
|
|
|
|
2014-01-22 19:39:19 +04:00
|
|
|
ml_save_fallback_colls ((mca_coll_ml_module_t *) module, comm);
|
|
|
|
|
2012-08-16 23:11:35 +04:00
|
|
|
memset(&output_buffer[0], 0, sizeof(output_buffer));
|
|
|
|
snprintf(output_buffer, sizeof(output_buffer), "%s (cid %d)", comm->c_name,
|
2014-01-22 19:39:19 +04:00
|
|
|
comm->c_contextid);
|
2012-08-16 23:11:35 +04:00
|
|
|
|
2014-03-18 16:24:32 +04:00
|
|
|
ML_VERBOSE(10, ("coll:ml:enable: new communicator: %s.", output_buffer));
|
2012-08-16 23:11:35 +04:00
|
|
|
|
|
|
|
/* All done */
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
OBJ_CLASS_INSTANCE(mca_coll_ml_module_t,
|
|
|
|
mca_coll_base_module_t,
|
|
|
|
mca_coll_ml_module_construct,
|
|
|
|
mca_coll_ml_module_destruct);
|
|
|
|
|
|
|
|
OBJ_CLASS_INSTANCE(mca_coll_ml_collective_operation_progress_t,
|
2014-01-22 19:39:19 +04:00
|
|
|
ompi_request_t,
|
|
|
|
mca_coll_ml_collective_operation_progress_construct,
|
|
|
|
mca_coll_ml_collective_operation_progress_destruct);
|