1
1

Rename the base header file containing the prototypes of the collective

functions.
Этот коммит содержится в:
George Bosilca 2015-02-15 14:47:27 -05:00
родитель 8fbcdf685d
Коммит aa019e239e
21 изменённых файлов: 1400 добавлений и 3323 удалений

Просмотреть файл

@ -2,7 +2,7 @@
# Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana # Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
# University Research and Technology # University Research and Technology
# Corporation. All rights reserved. # Corporation. All rights reserved.
# Copyright (c) 2004-2005 The University of Tennessee and The University # Copyright (c) 2004-2015 The University of Tennessee and The University
# of Tennessee Research Foundation. All rights # of Tennessee Research Foundation. All rights
# reserved. # reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, # Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -20,10 +20,25 @@ dist_ompidata_DATA = base/help-mca-coll-base.txt
headers += \ headers += \
base/base.h \ base/base.h \
base/coll_tags.h base/coll_tags.h \
base/coll_base_topo.h \
base/coll_base_util.h
libmca_coll_la_SOURCES += \ libmca_coll_la_SOURCES += \
base/coll_base_comm_select.c \ base/coll_base_comm_select.c \
base/coll_base_comm_unselect.c \ base/coll_base_comm_unselect.c \
base/coll_base_find_available.c \ base/coll_base_find_available.c \
base/coll_base_frame.c base/coll_base_frame.c \
base/coll_base_bcast.c \
base/coll_base_scatter.c \
base/coll_base_topo.c \
base/coll_base_allgather.c \
base/coll_base_allgatherv.c \
base/coll_base_util.c \
base/coll_base_allreduce.c \
base/coll_base_alltoall.c \
base/coll_base_gather.c \
base/coll_base_alltoallv.c \
base/coll_base_reduce.c \
base/coll_base_barrier.c \
base/coll_base_reduce_scatter.c

Просмотреть файл

@ -1,558 +0,0 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2009 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2008 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef MCA_COLL_TUNED_EXPORT_H
#define MCA_COLL_TUNED_EXPORT_H
#include "ompi_config.h"
#include "mpi.h"
#include "opal/mca/mca.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/request/request.h"
/* need to include our own topo prototypes so we can malloc data on the comm correctly */
#include "coll_tuned_topo.h"
/* also need the dynamic rule structures */
#include "coll_tuned_dynamic_rules.h"
/* some fixed value index vars to simplify certain operations */
typedef enum COLLTYPE {
ALLGATHER = 0, /* 0 */
ALLGATHERV, /* 1 */
ALLREDUCE, /* 2 */
ALLTOALL, /* 3 */
ALLTOALLV, /* 4 */
ALLTOALLW, /* 5 */
BARRIER, /* 6 */
BCAST, /* 7 */
EXSCAN, /* 8 */
GATHER, /* 9 */
GATHERV, /* 10 */
REDUCE, /* 11 */
REDUCESCATTER, /* 12 */
SCAN, /* 13 */
SCATTER, /* 14 */
SCATTERV, /* 15 */
COLLCOUNT /* 16 end counter keep it as last element */
} COLLTYPE_T;
/* defined arg lists to simply auto inclusion of user overriding decision functions */
#define ALLGATHER_ARGS void *sbuf, int scount, struct ompi_datatype_t *sdtype, void *rbuf, int rcount, struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define ALLGATHERV_ARGS void *sbuf, int scount, struct ompi_datatype_t *sdtype, void * rbuf, int *rcounts, int *disps, struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define ALLREDUCE_ARGS void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype, struct ompi_op_t *op, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define ALLTOALL_ARGS void *sbuf, int scount, struct ompi_datatype_t *sdtype, void* rbuf, int rcount, struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define ALLTOALLV_ARGS void *sbuf, int *scounts, int *sdisps, struct ompi_datatype_t *sdtype, void *rbuf, int *rcounts, int *rdisps, struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define ALLTOALLW_ARGS void *sbuf, int *scounts, int *sdisps, struct ompi_datatype_t **sdtypes, void *rbuf, int *rcounts, int *rdisps, struct ompi_datatype_t **rdtypes, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define BARRIER_ARGS struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define BCAST_ARGS void *buff, int count, struct ompi_datatype_t *datatype, int root, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define EXSCAN_ARGS void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype, struct ompi_op_t *op, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define GATHER_ARGS void *sbuf, int scount, struct ompi_datatype_t *sdtype, void *rbuf, int rcount, struct ompi_datatype_t *rdtype, int root, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define GATHERV_ARGS void *sbuf, int scount, struct ompi_datatype_t *sdtype, void *rbuf, int *rcounts, int *disps, struct ompi_datatype_t *rdtype, int root, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define REDUCE_ARGS void *sbuf, void* rbuf, int count, struct ompi_datatype_t *dtype, struct ompi_op_t *op, int root, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define REDUCESCATTER_ARGS void *sbuf, void *rbuf, int *rcounts, struct ompi_datatype_t *dtype, struct ompi_op_t *op, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define SCAN_ARGS void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype, struct ompi_op_t *op, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define SCATTER_ARGS void *sbuf, int scount, struct ompi_datatype_t *sdtype, void *rbuf, int rcount, struct ompi_datatype_t *rdtype, int root, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define SCATTERV_ARGS void *sbuf, int *scounts, int *disps, struct ompi_datatype_t *sdtype, void* rbuf, int rcount, struct ompi_datatype_t *rdtype, int root, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
/* end defined arg lists to simply auto inclusion of user overriding decision functions */
BEGIN_C_DECLS
/* these are the same across all modules and are loaded at component query time */
extern int ompi_coll_tuned_stream;
extern int ompi_coll_tuned_priority;
extern int ompi_coll_tuned_preallocate_memory_comm_size_limit;
extern bool ompi_coll_tuned_use_dynamic_rules;
extern char* ompi_coll_tuned_dynamic_rules_filename;
extern int ompi_coll_tuned_init_tree_fanout;
extern int ompi_coll_tuned_init_chain_fanout;
extern int ompi_coll_tuned_init_max_requests;
extern int ompi_coll_tuned_alltoall_small_msg;
extern int ompi_coll_tuned_alltoall_intermediate_msg;
/* forced algorithm choices */
/* this structure is for storing the indexes to the forced algorithm mca params... */
/* we get these at component query (so that registered values appear in ompi_infoi) */
struct coll_tuned_force_algorithm_mca_param_indices_t {
int algorithm_param_index; /* which algorithm you want to force */
int segsize_param_index; /* segsize to use (if supported), 0 = no segmentation */
int tree_fanout_param_index; /* tree fanout/in to use */
int chain_fanout_param_index; /* K-chain fanout/in to use */
int max_requests_param_index; /* Maximum number of outstanding send or recv requests */
};
typedef struct coll_tuned_force_algorithm_mca_param_indices_t coll_tuned_force_algorithm_mca_param_indices_t;
/* the following type is for storing actual value obtained from the MCA on each tuned module */
/* via their mca param indices lookup in the component */
/* this structure is stored once per collective type per communicator... */
struct coll_tuned_force_algorithm_params_t {
int algorithm; /* which algorithm you want to force */
int segsize; /* segsize to use (if supported), 0 = no segmentation */
int tree_fanout; /* tree fanout/in to use */
int chain_fanout; /* K-chain fanout/in to use */
int max_requests; /* Maximum number of outstanding send or recv requests */
};
typedef struct coll_tuned_force_algorithm_params_t coll_tuned_force_algorithm_params_t;
/* the indices to the MCA params so that modules can look them up at open / comm create time */
extern coll_tuned_force_algorithm_mca_param_indices_t ompi_coll_tuned_forced_params[COLLCOUNT];
/* the actual max algorithm values (readonly), loaded at component open */
extern int ompi_coll_tuned_forced_max_algorithms[COLLCOUNT];
/*
* coll API functions
*/
/* API functions */
int ompi_coll_tuned_init_query(bool enable_progress_threads,
bool enable_mpi_threads);
mca_coll_base_module_t *
ompi_coll_tuned_comm_query(struct ompi_communicator_t *comm, int *priority);
/* API functions of decision functions and any implementations */
/*
* Note this gets long as we have to have a prototype for each
* MPI collective 4 times.. 2 for the comm type and 2 for each decision
* type.
* we might cut down the decision prototypes by conditional compiling
*/
/* All Gather */
int ompi_coll_tuned_allgather_intra_dec_fixed(ALLGATHER_ARGS);
int ompi_coll_tuned_allgather_intra_dec_dynamic(ALLGATHER_ARGS);
int ompi_coll_tuned_allgather_intra_do_forced(ALLGATHER_ARGS);
int ompi_coll_tuned_allgather_intra_do_this(ALLGATHER_ARGS, int algorithm, int faninout, int segsize);
int ompi_coll_tuned_allgather_intra_check_forced_init(coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_allgather_intra_bruck(ALLGATHER_ARGS);
int ompi_coll_tuned_allgather_intra_recursivedoubling(ALLGATHER_ARGS);
int ompi_coll_tuned_allgather_intra_ring(ALLGATHER_ARGS);
int ompi_coll_tuned_allgather_intra_neighborexchange(ALLGATHER_ARGS);
int ompi_coll_tuned_allgather_intra_basic_linear(ALLGATHER_ARGS);
int ompi_coll_tuned_allgather_intra_two_procs(ALLGATHER_ARGS);
int ompi_coll_tuned_allgather_inter_dec_fixed(ALLGATHER_ARGS);
int ompi_coll_tuned_allgather_inter_dec_dynamic(ALLGATHER_ARGS);
/* All GatherV */
int ompi_coll_tuned_allgatherv_intra_dec_fixed(ALLGATHERV_ARGS);
int ompi_coll_tuned_allgatherv_intra_dec_dynamic(ALLGATHERV_ARGS);
int ompi_coll_tuned_allgatherv_intra_do_forced(ALLGATHERV_ARGS);
int ompi_coll_tuned_allgatherv_intra_do_this(ALLGATHERV_ARGS, int algorithm, int faninout, int segsize);
int ompi_coll_tuned_allgatherv_intra_check_forced_init(coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_allgatherv_intra_bruck(ALLGATHERV_ARGS);
int ompi_coll_tuned_allgatherv_intra_ring(ALLGATHERV_ARGS);
int ompi_coll_tuned_allgatherv_intra_neighborexchange(ALLGATHERV_ARGS);
int ompi_coll_tuned_allgatherv_intra_basic_default(ALLGATHERV_ARGS);
int ompi_coll_tuned_allgatherv_intra_two_procs(ALLGATHERV_ARGS);
int ompi_coll_tuned_allgatherv_inter_dec_fixed(ALLGATHERV_ARGS);
int ompi_coll_tuned_allgatherv_inter_dec_dynamic(ALLGATHERV_ARGS);
/* All Reduce */
int ompi_coll_tuned_allreduce_intra_dec_fixed(ALLREDUCE_ARGS);
int ompi_coll_tuned_allreduce_intra_dec_dynamic(ALLREDUCE_ARGS);
int ompi_coll_tuned_allreduce_intra_do_forced(ALLREDUCE_ARGS);
int ompi_coll_tuned_allreduce_intra_do_this(ALLREDUCE_ARGS, int algorithm, int faninout, int segsize);
int ompi_coll_tuned_allreduce_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_allreduce_intra_nonoverlapping(ALLREDUCE_ARGS);
int ompi_coll_tuned_allreduce_intra_recursivedoubling(ALLREDUCE_ARGS);
int ompi_coll_tuned_allreduce_intra_ring(ALLREDUCE_ARGS);
int ompi_coll_tuned_allreduce_intra_ring_segmented(ALLREDUCE_ARGS, uint32_t segsize);
int ompi_coll_tuned_allreduce_intra_basic_linear(ALLREDUCE_ARGS);
int ompi_coll_tuned_allreduce_inter_dec_fixed(ALLREDUCE_ARGS);
int ompi_coll_tuned_allreduce_inter_dec_dynamic(ALLREDUCE_ARGS);
/* AlltoAll */
int ompi_coll_tuned_alltoall_intra_dec_fixed(ALLTOALL_ARGS);
int ompi_coll_tuned_alltoall_intra_dec_dynamic(ALLTOALL_ARGS);
int ompi_coll_tuned_alltoall_intra_do_forced(ALLTOALL_ARGS);
int ompi_coll_tuned_alltoall_intra_do_this(ALLTOALL_ARGS, int algorithm, int faninout, int segsize, int max_requests);
int ompi_coll_tuned_alltoall_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_alltoall_intra_pairwise(ALLTOALL_ARGS);
int ompi_coll_tuned_alltoall_intra_bruck(ALLTOALL_ARGS);
int ompi_coll_tuned_alltoall_intra_basic_linear(ALLTOALL_ARGS);
int ompi_coll_tuned_alltoall_intra_linear_sync(ALLTOALL_ARGS, int max_requests);
int ompi_coll_tuned_alltoall_intra_two_procs(ALLTOALL_ARGS);
int ompi_coll_tuned_alltoall_inter_dec_fixed(ALLTOALL_ARGS);
int ompi_coll_tuned_alltoall_inter_dec_dynamic(ALLTOALL_ARGS);
/* AlltoAllV */
int ompi_coll_tuned_alltoallv_intra_dec_fixed(ALLTOALLV_ARGS);
int ompi_coll_tuned_alltoallv_intra_dec_dynamic(ALLTOALLV_ARGS);
int ompi_coll_tuned_alltoallv_intra_do_forced(ALLTOALLV_ARGS);
int ompi_coll_tuned_alltoallv_intra_do_this(ALLTOALLV_ARGS, int algorithm);
int ompi_coll_tuned_alltoallv_intra_check_forced_init(coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_alltoallv_intra_pairwise(ALLTOALLV_ARGS);
int ompi_coll_tuned_alltoallv_intra_basic_linear(ALLTOALLV_ARGS);
int ompi_coll_tuned_alltoallv_inter_dec_fixed(ALLTOALLV_ARGS);
int ompi_coll_tuned_alltoallv_inter_dec_dynamic(ALLTOALLV_ARGS);
/* AlltoAllW */
int ompi_coll_tuned_alltoallw_intra_dec_fixed(ALLTOALLW_ARGS);
int ompi_coll_tuned_alltoallw_intra_dec_dynamic(ALLTOALLW_ARGS);
int ompi_coll_tuned_alltoallw_inter_dec_fixed(ALLTOALLW_ARGS);
int ompi_coll_tuned_alltoallw_inter_dec_dynamic(ALLTOALLW_ARGS);
/* Barrier */
int ompi_coll_tuned_barrier_intra_dec_fixed(BARRIER_ARGS);
int ompi_coll_tuned_barrier_intra_dec_dynamic(BARRIER_ARGS);
int ompi_coll_tuned_barrier_intra_do_forced(BARRIER_ARGS);
int ompi_coll_tuned_barrier_intra_do_this(BARRIER_ARGS, int algorithm, int faninout, int segsize);
int ompi_coll_tuned_barrier_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_barrier_inter_dec_fixed(BARRIER_ARGS);
int ompi_coll_tuned_barrier_inter_dec_dynamic(BARRIER_ARGS);
int ompi_coll_tuned_barrier_intra_doublering(BARRIER_ARGS);
int ompi_coll_tuned_barrier_intra_recursivedoubling(BARRIER_ARGS);
int ompi_coll_tuned_barrier_intra_bruck(BARRIER_ARGS);
int ompi_coll_tuned_barrier_intra_two_procs(BARRIER_ARGS);
int ompi_coll_tuned_barrier_intra_linear(BARRIER_ARGS);
int ompi_coll_tuned_barrier_intra_tree(BARRIER_ARGS);
/* Bcast */
int ompi_coll_tuned_bcast_intra_generic( BCAST_ARGS, uint32_t count_by_segment, ompi_coll_tree_t* tree );
int ompi_coll_tuned_bcast_intra_dec_fixed(BCAST_ARGS);
int ompi_coll_tuned_bcast_intra_dec_dynamic(BCAST_ARGS);
int ompi_coll_tuned_bcast_intra_do_forced(BCAST_ARGS);
int ompi_coll_tuned_bcast_intra_do_this(BCAST_ARGS, int algorithm, int faninout, int segsize);
int ompi_coll_tuned_bcast_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_bcast_intra_basic_linear(BCAST_ARGS);
int ompi_coll_tuned_bcast_intra_chain(BCAST_ARGS, uint32_t segsize, int32_t chains);
int ompi_coll_tuned_bcast_intra_pipeline(BCAST_ARGS, uint32_t segsize);
int ompi_coll_tuned_bcast_intra_binomial(BCAST_ARGS, uint32_t segsize);
int ompi_coll_tuned_bcast_intra_bintree(BCAST_ARGS, uint32_t segsize);
int ompi_coll_tuned_bcast_intra_split_bintree(BCAST_ARGS, uint32_t segsize);
int ompi_coll_tuned_bcast_inter_dec_fixed(BCAST_ARGS);
int ompi_coll_tuned_bcast_inter_dec_dynamic(BCAST_ARGS);
/* Exscan */
int ompi_coll_tuned_exscan_intra_dec_fixed(EXSCAN_ARGS);
int ompi_coll_tuned_exscan_intra_dec_dynamic(EXSCAN_ARGS);
int ompi_coll_tuned_exscan_inter_dec_fixed(EXSCAN_ARGS);
int ompi_coll_tuned_exscan_inter_dec_dynamic(EXSCAN_ARGS);
/* Gather */
int ompi_coll_tuned_gather_intra_dec_fixed(GATHER_ARGS);
int ompi_coll_tuned_gather_intra_dec_dynamic(GATHER_ARGS);
int ompi_coll_tuned_gather_intra_do_forced(GATHER_ARGS);
int ompi_coll_tuned_gather_intra_do_this(GATHER_ARGS, int algorithm, int faninout, int segsize);
int ompi_coll_tuned_gather_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_gather_intra_basic_linear(GATHER_ARGS);
int ompi_coll_tuned_gather_intra_binomial(GATHER_ARGS);
int ompi_coll_tuned_gather_intra_linear_sync(GATHER_ARGS, int first_segment_size);
int ompi_coll_tuned_gather_inter_dec_fixed(GATHER_ARGS);
int ompi_coll_tuned_gather_inter_dec_dynamic(GATHER_ARGS);
/* GatherV */
int ompi_coll_tuned_gatherv_intra_dec_fixed(GATHERV_ARGS);
int ompi_coll_tuned_gatherv_intra_dec_dynamic(GATHER_ARGS);
int ompi_coll_tuned_gatherv_inter_dec_fixed(GATHER_ARGS);
int ompi_coll_tuned_gatherv_inter_dec_dynamic(GATHER_ARGS);
/* Reduce */
int ompi_coll_tuned_reduce_generic( REDUCE_ARGS, ompi_coll_tree_t* tree, int count_by_segment, int max_outstanding_reqs );
int ompi_coll_tuned_reduce_intra_dec_fixed(REDUCE_ARGS);
int ompi_coll_tuned_reduce_intra_dec_dynamic(REDUCE_ARGS);
int ompi_coll_tuned_reduce_intra_do_forced(REDUCE_ARGS);
int ompi_coll_tuned_reduce_intra_do_this(REDUCE_ARGS, int algorithm, int faninout, int segsize, int max_oustanding_reqs);
int ompi_coll_tuned_reduce_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_reduce_intra_basic_linear(REDUCE_ARGS);
int ompi_coll_tuned_reduce_intra_chain(REDUCE_ARGS, uint32_t segsize, int fanout, int max_outstanding_reqs );
int ompi_coll_tuned_reduce_intra_pipeline(REDUCE_ARGS, uint32_t segsize, int max_outstanding_reqs );
int ompi_coll_tuned_reduce_intra_binary(REDUCE_ARGS, uint32_t segsize, int max_outstanding_reqs );
int ompi_coll_tuned_reduce_intra_binomial(REDUCE_ARGS, uint32_t segsize, int max_outstanding_reqs );
int ompi_coll_tuned_reduce_intra_in_order_binary(REDUCE_ARGS, uint32_t segsize, int max_outstanding_reqs );
int ompi_coll_tuned_reduce_inter_dec_fixed(REDUCE_ARGS);
int ompi_coll_tuned_reduce_inter_dec_dynamic(REDUCE_ARGS);
/* Reduce_scatter */
int ompi_coll_tuned_reduce_scatter_intra_dec_fixed(REDUCESCATTER_ARGS);
int ompi_coll_tuned_reduce_scatter_intra_dec_dynamic(REDUCESCATTER_ARGS);
int ompi_coll_tuned_reduce_scatter_intra_do_forced(REDUCESCATTER_ARGS);
int ompi_coll_tuned_reduce_scatter_intra_do_this(REDUCESCATTER_ARGS, int algorithm, int faninout, int segsize);
int ompi_coll_tuned_reduce_scatter_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_reduce_scatter_intra_nonoverlapping(REDUCESCATTER_ARGS);
int ompi_coll_tuned_reduce_scatter_intra_basic_recursivehalving(REDUCESCATTER_ARGS);
int ompi_coll_tuned_reduce_scatter_intra_ring(REDUCESCATTER_ARGS);
int ompi_coll_tuned_reduce_scatter_inter_dec_fixed(REDUCESCATTER_ARGS);
int ompi_coll_tuned_reduce_scatter_inter_dec_dynamic(REDUCESCATTER_ARGS);
/* Scan */
int ompi_coll_tuned_scan_intra_dec_fixed(SCAN_ARGS);
int ompi_coll_tuned_scan_intra_dec_dynamic(SCAN_ARGS);
int ompi_coll_tuned_scan_inter_dec_fixed(SCAN_ARGS);
int ompi_coll_tuned_scan_inter_dec_dynamic(SCAN_ARGS);
/* Scatter */
int ompi_coll_tuned_scatter_intra_dec_fixed(SCATTER_ARGS);
int ompi_coll_tuned_scatter_intra_dec_dynamic(SCATTER_ARGS);
int ompi_coll_tuned_scatter_intra_do_forced(SCATTER_ARGS);
int ompi_coll_tuned_scatter_intra_do_this(SCATTER_ARGS, int algorithm, int faninout, int segsize);
int ompi_coll_tuned_scatter_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices);
int ompi_coll_tuned_scatter_intra_basic_linear(SCATTER_ARGS);
int ompi_coll_tuned_scatter_intra_binomial(SCATTER_ARGS);
int ompi_coll_tuned_scatter_inter_dec_fixed(SCATTER_ARGS);
int ompi_coll_tuned_scatter_inter_dec_dynamic(SCATTER_ARGS);
/* ScatterV */
int ompi_coll_tuned_scatterv_intra_dec_fixed(SCATTERV_ARGS);
int ompi_coll_tuned_scatterv_intra_dec_dynamic(SCATTERV_ARGS);
int ompi_coll_tuned_scatterv_inter_dec_fixed(SCATTERV_ARGS);
int ompi_coll_tuned_scatterv_inter_dec_dynamic(SCATTERV_ARGS);
int mca_coll_tuned_ft_event(int state);
/* Utility functions */
static inline void ompi_coll_tuned_free_reqs(ompi_request_t **reqs, int count)
{
int i;
for (i = 0; i < count; ++i)
ompi_request_free(&reqs[i]);
}
struct mca_coll_tuned_component_t {
/** Base coll component */
mca_coll_base_component_2_0_0_t super;
/** MCA parameter: Priority of this component */
int tuned_priority;
/** global stuff that I need the component to store */
/* MCA parameters first */
/* cached decision table stuff (moved from MCW module) */
ompi_coll_alg_rule_t *all_base_rules;
};
/**
* Convenience typedef
*/
typedef struct mca_coll_tuned_component_t mca_coll_tuned_component_t;
/**
* Global component instance
*/
OMPI_MODULE_DECLSPEC extern mca_coll_tuned_component_t mca_coll_tuned_component;
/*
* Data structure for hanging data off the communicator
* i.e. per module instance
*/
struct mca_coll_tuned_comm_t {
/* standard data for requests and PML usage */
/* Precreate space for requests
* Note this does not effect basic,
* but if in wrong context can confuse a debugger
* this is controlled by an MCA param
*/
ompi_request_t **mcct_reqs;
int mcct_num_reqs;
/*
* tuned topo information caching per communicator
*
* for each communicator we cache the topo information so we can
* reuse without regenerating if we change the root, [or fanout]
* then regenerate and recache this information
*/
/* general tree with n fan out */
ompi_coll_tree_t *cached_ntree;
int cached_ntree_root;
int cached_ntree_fanout;
/* binary tree */
ompi_coll_tree_t *cached_bintree;
int cached_bintree_root;
/* binomial tree */
ompi_coll_tree_t *cached_bmtree;
int cached_bmtree_root;
/* binomial tree */
ompi_coll_tree_t *cached_in_order_bmtree;
int cached_in_order_bmtree_root;
/* chained tree (fanout followed by pipelines) */
ompi_coll_tree_t *cached_chain;
int cached_chain_root;
int cached_chain_fanout;
/* pipeline */
ompi_coll_tree_t *cached_pipeline;
int cached_pipeline_root;
/* in-order binary tree (root of the in-order binary tree is rank 0) */
ompi_coll_tree_t *cached_in_order_bintree;
/* moving to the component */
ompi_coll_com_rule_t *com_rules[COLLCOUNT]; /* the communicator rules for each MPI collective for ONLY my comsize */
/* for forced algorithms we store the information on the module */
/* previously we only had one shared copy, ops, it really is per comm/module */
coll_tuned_force_algorithm_params_t user_forced[COLLCOUNT];
};
typedef struct mca_coll_tuned_comm_t mca_coll_tuned_comm_t;
struct mca_coll_tuned_module_t {
mca_coll_base_module_t super;
mca_coll_tuned_comm_t *tuned_data;
};
typedef struct mca_coll_tuned_module_t mca_coll_tuned_module_t;
OBJ_CLASS_DECLARATION(mca_coll_tuned_module_t);
static inline void mca_coll_tuned_free_reqs(ompi_request_t ** reqs,
int count)
{
int i;
for (i = 0; i < count; ++i)
ompi_request_free(reqs + i);
}
END_C_DECLS
#define COLL_TUNED_UPDATE_BINTREE( OMPI_COMM, TUNED_MODULE, ROOT ) \
do { \
mca_coll_tuned_comm_t* coll_comm = (TUNED_MODULE)->tuned_data; \
if( !( (coll_comm->cached_bintree) \
&& (coll_comm->cached_bintree_root == (ROOT)) ) ) { \
if( coll_comm->cached_bintree ) { /* destroy previous binomial if defined */ \
ompi_coll_tuned_topo_destroy_tree( &(coll_comm->cached_bintree) ); \
} \
coll_comm->cached_bintree = ompi_coll_tuned_topo_build_tree(2,(OMPI_COMM),(ROOT)); \
coll_comm->cached_bintree_root = (ROOT); \
} \
} while (0)
#define COLL_TUNED_UPDATE_BMTREE( OMPI_COMM, TUNED_MODULE, ROOT ) \
do { \
mca_coll_tuned_comm_t* coll_comm = (TUNED_MODULE)->tuned_data; \
if( !( (coll_comm->cached_bmtree) \
&& (coll_comm->cached_bmtree_root == (ROOT)) ) ) { \
if( coll_comm->cached_bmtree ) { /* destroy previous binomial if defined */ \
ompi_coll_tuned_topo_destroy_tree( &(coll_comm->cached_bmtree) ); \
} \
coll_comm->cached_bmtree = ompi_coll_tuned_topo_build_bmtree( (OMPI_COMM), (ROOT) ); \
coll_comm->cached_bmtree_root = (ROOT); \
} \
} while (0)
#define COLL_TUNED_UPDATE_IN_ORDER_BMTREE( OMPI_COMM, TUNED_MODULE, ROOT ) \
do { \
mca_coll_tuned_comm_t* coll_comm = (TUNED_MODULE)->tuned_data; \
if( !( (coll_comm->cached_in_order_bmtree) \
&& (coll_comm->cached_in_order_bmtree_root == (ROOT)) ) ) { \
if( coll_comm->cached_in_order_bmtree ) { /* destroy previous binomial if defined */ \
ompi_coll_tuned_topo_destroy_tree( &(coll_comm->cached_in_order_bmtree) ); \
} \
coll_comm->cached_in_order_bmtree = ompi_coll_tuned_topo_build_in_order_bmtree( (OMPI_COMM), (ROOT) ); \
coll_comm->cached_in_order_bmtree_root = (ROOT); \
} \
} while (0)
#define COLL_TUNED_UPDATE_PIPELINE( OMPI_COMM, TUNED_MODULE, ROOT ) \
do { \
mca_coll_tuned_comm_t* coll_comm = (TUNED_MODULE)->tuned_data; \
if( !( (coll_comm->cached_pipeline) \
&& (coll_comm->cached_pipeline_root == (ROOT)) ) ) { \
if (coll_comm->cached_pipeline) { /* destroy previous pipeline if defined */ \
ompi_coll_tuned_topo_destroy_tree( &(coll_comm->cached_pipeline) ); \
} \
coll_comm->cached_pipeline = ompi_coll_tuned_topo_build_chain( 1, (OMPI_COMM), (ROOT) ); \
coll_comm->cached_pipeline_root = (ROOT); \
} \
} while (0)
#define COLL_TUNED_UPDATE_CHAIN( OMPI_COMM, TUNED_MODULE, ROOT, FANOUT ) \
do { \
mca_coll_tuned_comm_t* coll_comm = (TUNED_MODULE)->tuned_data; \
if( !( (coll_comm->cached_chain) \
&& (coll_comm->cached_chain_root == (ROOT)) \
&& (coll_comm->cached_chain_fanout == (FANOUT)) ) ) { \
if( coll_comm->cached_chain) { /* destroy previous chain if defined */ \
ompi_coll_tuned_topo_destroy_tree( &(coll_comm->cached_chain) ); \
} \
coll_comm->cached_chain = ompi_coll_tuned_topo_build_chain((FANOUT), (OMPI_COMM), (ROOT)); \
coll_comm->cached_chain_root = (ROOT); \
coll_comm->cached_chain_fanout = (FANOUT); \
} \
} while (0)
#define COLL_TUNED_UPDATE_IN_ORDER_BINTREE( OMPI_COMM, TUNED_MODULE ) \
do { \
mca_coll_tuned_comm_t* coll_comm = (TUNED_MODULE)->tuned_data; \
if( !(coll_comm->cached_in_order_bintree) ) { \
/* In-order binary tree topology is defined by communicator size */ \
/* Thus, there is no need to destroy anything */ \
coll_comm->cached_in_order_bintree = \
ompi_coll_tuned_topo_build_in_order_bintree((OMPI_COMM)); \
} \
} while (0)
/**
* This macro give a generic way to compute the best count of
* the segment (i.e. the number of complete datatypes that
* can fit in the specified SEGSIZE). Beware, when this macro
* is called, the SEGCOUNT should be initialized to the count as
* expected by the collective call.
*/
#define COLL_TUNED_COMPUTED_SEGCOUNT(SEGSIZE, TYPELNG, SEGCOUNT) \
if( ((SEGSIZE) >= (TYPELNG)) && \
((SEGSIZE) < ((TYPELNG) * (SEGCOUNT))) ) { \
size_t residual; \
(SEGCOUNT) = (int)((SEGSIZE) / (TYPELNG)); \
residual = (SEGSIZE) - (SEGCOUNT) * (TYPELNG); \
if( residual > ((TYPELNG) >> 1) ) \
(SEGCOUNT)++; \
} \
/**
* This macro gives a generic wait to compute the well distributed block counts
* when the count and number of blocks are fixed.
* Macro returns "early-block" count, "late-block" count, and "split-index"
* which is the block at which we switch from "early-block" count to
* the "late-block" count.
* count = split_index * early_block_count +
* (block_count - split_index) * late_block_count
* We do not perform ANY error checks - make sure that the input values
* make sense (eg. count > num_blocks).
*/
#define COLL_TUNED_COMPUTE_BLOCKCOUNT( COUNT, NUM_BLOCKS, SPLIT_INDEX, \
EARLY_BLOCK_COUNT, LATE_BLOCK_COUNT ) \
EARLY_BLOCK_COUNT = LATE_BLOCK_COUNT = COUNT / NUM_BLOCKS; \
SPLIT_INDEX = COUNT % NUM_BLOCKS; \
if (0 != SPLIT_INDEX) { \
EARLY_BLOCK_COUNT = EARLY_BLOCK_COUNT + 1; \
} \
#endif /* MCA_COLL_TUNED_EXPORT_H */

Просмотреть файл

@ -2,7 +2,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology * University Research and Technology
* Corporation. All rights reserved. * Corporation. All rights reserved.
* Copyright (c) 2004-2014 The University of Tennessee and The University * Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -30,31 +30,12 @@
#include "ompi/communicator/communicator.h" #include "ompi/communicator/communicator.h"
#include "ompi/mca/coll/coll.h" #include "ompi/mca/coll/coll.h"
#include "ompi/mca/coll/base/coll_tags.h" #include "ompi/mca/coll/base/coll_tags.h"
#include "coll_tuned.h" #include "ompi/mca/coll/base/coll_base_functions.h"
#include "coll_tuned_topo.h" #include "coll_base_topo.h"
#include "coll_tuned_util.h" #include "coll_base_util.h"
/* allgather algorithm variables */
static int coll_tuned_allgather_algorithm_count = 6;
static int coll_tuned_allgather_forced_algorithm = 0;
static int coll_tuned_allgather_segment_size = 0;
static int coll_tuned_allgather_tree_fanout;
static int coll_tuned_allgather_chain_fanout;
/* valid values for coll_tuned_allgather_forced_algorithm */
static mca_base_var_enum_value_t allgather_algorithms[] = {
{0, "ignore"},
{1, "linear"},
{2, "bruck"},
{3, "recursive_doubling"},
{4, "ring"},
{5, "neighbor"},
{6, "two_proc"},
{0, NULL}
};
/* /*
* ompi_coll_tuned_allgather_intra_bruck * ompi_coll_base_allgather_intra_bruck
* *
* Function: allgather using O(log(N)) steps. * Function: allgather using O(log(N)) steps.
* Accepts: Same arguments as MPI_Allgather * Accepts: Same arguments as MPI_Allgather
@ -101,7 +82,7 @@ static mca_base_var_enum_value_t allgather_algorithms[] = {
* [4] [4] [4] [4] [4] [4] * [4] [4] [4] [4] [4] [4]
* [5] [5] [5] [5] [5] [5] * [5] [5] [5] [5] [5] [5]
*/ */
int ompi_coll_tuned_allgather_intra_bruck(void *sbuf, int scount, int ompi_coll_base_allgather_intra_bruck(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void* rbuf, int rcount, void* rbuf, int rcount,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
@ -115,8 +96,8 @@ int ompi_coll_tuned_allgather_intra_bruck(void *sbuf, int scount,
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:allgather_intra_bruck rank %d", rank)); "coll:base:allgather_intra_bruck rank %d", rank));
err = ompi_datatype_get_extent (sdtype, &slb, &sext); err = ompi_datatype_get_extent (sdtype, &slb, &sext);
if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; } if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; }
@ -167,7 +148,7 @@ int ompi_coll_tuned_allgather_intra_bruck(void *sbuf, int scount,
} }
/* Sendreceive */ /* Sendreceive */
err = ompi_coll_tuned_sendrecv(tmpsend, blockcount * rcount, rdtype, err = ompi_coll_base_sendrecv(tmpsend, blockcount * rcount, rdtype,
sendto, MCA_COLL_BASE_TAG_ALLGATHER, sendto, MCA_COLL_BASE_TAG_ALLGATHER,
tmprecv, blockcount * rcount, rdtype, tmprecv, blockcount * rcount, rdtype,
recvfrom, MCA_COLL_BASE_TAG_ALLGATHER, recvfrom, MCA_COLL_BASE_TAG_ALLGATHER,
@ -223,13 +204,13 @@ int ompi_coll_tuned_allgather_intra_bruck(void *sbuf, int scount,
return OMPI_SUCCESS; return OMPI_SUCCESS;
err_hndl: err_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream, "%s:%4d\tError occurred %d, rank %2d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "%s:%4d\tError occurred %d, rank %2d",
__FILE__, line, err, rank)); __FILE__, line, err, rank));
return err; return err;
} }
/* /*
* ompi_coll_tuned_allgather_intra_recursivedoubling * ompi_coll_base_allgather_intra_recursivedoubling
* *
* Function: allgather using O(log(N)) steps. * Function: allgather using O(log(N)) steps.
* Accepts: Same arguments as MPI_Allgather * Accepts: Same arguments as MPI_Allgather
@ -274,7 +255,7 @@ int ompi_coll_tuned_allgather_intra_bruck(void *sbuf, int scount,
* step, and send them appropriate messages. * step, and send them appropriate messages.
*/ */
int int
ompi_coll_tuned_allgather_intra_recursivedoubling(void *sbuf, int scount, ompi_coll_base_allgather_intra_recursivedoubling(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void* rbuf, int rcount, void* rbuf, int rcount,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
@ -297,17 +278,17 @@ ompi_coll_tuned_allgather_intra_recursivedoubling(void *sbuf, int scount,
print warning and call bruck allgather algorithm with same parameters. print warning and call bruck allgather algorithm with same parameters.
*/ */
if (pow2size != size) { if (pow2size != size) {
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:allgather_intra_recursivedoubling WARNING: non-pow-2 size %d, switching to bruck algorithm", "coll:base:allgather_intra_recursivedoubling WARNING: non-pow-2 size %d, switching to bruck algorithm",
size)); size));
return ompi_coll_tuned_allgather_intra_bruck(sbuf, scount, sdtype, return ompi_coll_base_allgather_intra_bruck(sbuf, scount, sdtype,
rbuf, rcount, rdtype, rbuf, rcount, rdtype,
comm, module); comm, module);
} }
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:allgather_intra_recursivedoubling rank %d, size %d", "coll:base:allgather_intra_recursivedoubling rank %d, size %d",
rank, size)); rank, size));
err = ompi_datatype_get_extent (sdtype, &slb, &sext); err = ompi_datatype_get_extent (sdtype, &slb, &sext);
@ -347,7 +328,7 @@ ompi_coll_tuned_allgather_intra_recursivedoubling(void *sbuf, int scount,
} }
/* Sendreceive */ /* Sendreceive */
err = ompi_coll_tuned_sendrecv(tmpsend, (ptrdiff_t)distance * (ptrdiff_t)rcount, rdtype, err = ompi_coll_base_sendrecv(tmpsend, (ptrdiff_t)distance * (ptrdiff_t)rcount, rdtype,
remote, MCA_COLL_BASE_TAG_ALLGATHER, remote, MCA_COLL_BASE_TAG_ALLGATHER,
tmprecv, (ptrdiff_t)distance * (ptrdiff_t)rcount, rdtype, tmprecv, (ptrdiff_t)distance * (ptrdiff_t)rcount, rdtype,
remote, MCA_COLL_BASE_TAG_ALLGATHER, remote, MCA_COLL_BASE_TAG_ALLGATHER,
@ -359,7 +340,7 @@ ompi_coll_tuned_allgather_intra_recursivedoubling(void *sbuf, int scount,
return OMPI_SUCCESS; return OMPI_SUCCESS;
err_hndl: err_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream, "%s:%4d\tError occurred %d, rank %2d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "%s:%4d\tError occurred %d, rank %2d",
__FILE__, line, err, rank)); __FILE__, line, err, rank));
return err; return err;
} }
@ -367,7 +348,7 @@ ompi_coll_tuned_allgather_intra_recursivedoubling(void *sbuf, int scount,
/* /*
* ompi_coll_tuned_allgather_intra_ring * ompi_coll_base_allgather_intra_ring
* *
* Function: allgather using O(N) steps. * Function: allgather using O(N) steps.
* Accepts: Same arguments as MPI_Allgather * Accepts: Same arguments as MPI_Allgather
@ -381,7 +362,7 @@ ompi_coll_tuned_allgather_intra_recursivedoubling(void *sbuf, int scount,
* No additional memory requirements. * No additional memory requirements.
* *
*/ */
int ompi_coll_tuned_allgather_intra_ring(void *sbuf, int scount, int ompi_coll_base_allgather_intra_ring(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void* rbuf, int rcount, void* rbuf, int rcount,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
@ -395,8 +376,8 @@ int ompi_coll_tuned_allgather_intra_ring(void *sbuf, int scount,
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:allgather_intra_ring rank %d", rank)); "coll:base:allgather_intra_ring rank %d", rank));
err = ompi_datatype_get_extent (sdtype, &slb, &sext); err = ompi_datatype_get_extent (sdtype, &slb, &sext);
if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; } if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; }
@ -434,7 +415,7 @@ int ompi_coll_tuned_allgather_intra_ring(void *sbuf, int scount,
tmpsend = (char*)rbuf + (ptrdiff_t)senddatafrom * (ptrdiff_t)rcount * rext; tmpsend = (char*)rbuf + (ptrdiff_t)senddatafrom * (ptrdiff_t)rcount * rext;
/* Sendreceive */ /* Sendreceive */
err = ompi_coll_tuned_sendrecv(tmpsend, rcount, rdtype, sendto, err = ompi_coll_base_sendrecv(tmpsend, rcount, rdtype, sendto,
MCA_COLL_BASE_TAG_ALLGATHER, MCA_COLL_BASE_TAG_ALLGATHER,
tmprecv, rcount, rdtype, recvfrom, tmprecv, rcount, rdtype, recvfrom,
MCA_COLL_BASE_TAG_ALLGATHER, MCA_COLL_BASE_TAG_ALLGATHER,
@ -446,13 +427,13 @@ int ompi_coll_tuned_allgather_intra_ring(void *sbuf, int scount,
return OMPI_SUCCESS; return OMPI_SUCCESS;
err_hndl: err_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream, "%s:%4d\tError occurred %d, rank %2d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "%s:%4d\tError occurred %d, rank %2d",
__FILE__, line, err, rank)); __FILE__, line, err, rank));
return err; return err;
} }
/* /*
* ompi_coll_tuned_allgather_intra_neighborexchange * ompi_coll_base_allgather_intra_neighborexchange
* *
* Function: allgather using N/2 steps (O(N)) * Function: allgather using N/2 steps (O(N))
* Accepts: Same arguments as MPI_Allgather * Accepts: Same arguments as MPI_Allgather
@ -509,7 +490,7 @@ int ompi_coll_tuned_allgather_intra_ring(void *sbuf, int scount,
* [5] [5] [5] [5] [5] [5] * [5] [5] [5] [5] [5] [5]
*/ */
int int
ompi_coll_tuned_allgather_intra_neighborexchange(void *sbuf, int scount, ompi_coll_base_allgather_intra_neighborexchange(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void* rbuf, int rcount, void* rbuf, int rcount,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
@ -525,16 +506,16 @@ ompi_coll_tuned_allgather_intra_neighborexchange(void *sbuf, int scount,
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
if (size % 2) { if (size % 2) {
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:allgather_intra_neighborexchange WARNING: odd size %d, switching to ring algorithm", "coll:base:allgather_intra_neighborexchange WARNING: odd size %d, switching to ring algorithm",
size)); size));
return ompi_coll_tuned_allgather_intra_ring(sbuf, scount, sdtype, return ompi_coll_base_allgather_intra_ring(sbuf, scount, sdtype,
rbuf, rcount, rdtype, rbuf, rcount, rdtype,
comm, module); comm, module);
} }
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:allgather_intra_neighborexchange rank %d", rank)); "coll:base:allgather_intra_neighborexchange rank %d", rank));
err = ompi_datatype_get_extent (sdtype, &slb, &sext); err = ompi_datatype_get_extent (sdtype, &slb, &sext);
if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; } if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; }
@ -581,7 +562,7 @@ ompi_coll_tuned_allgather_intra_neighborexchange(void *sbuf, int scount,
tmprecv = (char*)rbuf + (ptrdiff_t)neighbor[0] * (ptrdiff_t)rcount * rext; tmprecv = (char*)rbuf + (ptrdiff_t)neighbor[0] * (ptrdiff_t)rcount * rext;
tmpsend = (char*)rbuf + (ptrdiff_t)rank * (ptrdiff_t)rcount * rext; tmpsend = (char*)rbuf + (ptrdiff_t)rank * (ptrdiff_t)rcount * rext;
/* Sendreceive */ /* Sendreceive */
err = ompi_coll_tuned_sendrecv(tmpsend, rcount, rdtype, neighbor[0], err = ompi_coll_base_sendrecv(tmpsend, rcount, rdtype, neighbor[0],
MCA_COLL_BASE_TAG_ALLGATHER, MCA_COLL_BASE_TAG_ALLGATHER,
tmprecv, rcount, rdtype, neighbor[0], tmprecv, rcount, rdtype, neighbor[0],
MCA_COLL_BASE_TAG_ALLGATHER, MCA_COLL_BASE_TAG_ALLGATHER,
@ -604,7 +585,7 @@ ompi_coll_tuned_allgather_intra_neighborexchange(void *sbuf, int scount,
tmpsend = (char*)rbuf + (ptrdiff_t)send_data_from * rcount * rext; tmpsend = (char*)rbuf + (ptrdiff_t)send_data_from * rcount * rext;
/* Sendreceive */ /* Sendreceive */
err = ompi_coll_tuned_sendrecv(tmpsend, (ptrdiff_t)2 * (ptrdiff_t)rcount, rdtype, err = ompi_coll_base_sendrecv(tmpsend, (ptrdiff_t)2 * (ptrdiff_t)rcount, rdtype,
neighbor[i_parity], neighbor[i_parity],
MCA_COLL_BASE_TAG_ALLGATHER, MCA_COLL_BASE_TAG_ALLGATHER,
tmprecv, (ptrdiff_t)2 * (ptrdiff_t)rcount, rdtype, tmprecv, (ptrdiff_t)2 * (ptrdiff_t)rcount, rdtype,
@ -619,13 +600,13 @@ ompi_coll_tuned_allgather_intra_neighborexchange(void *sbuf, int scount,
return OMPI_SUCCESS; return OMPI_SUCCESS;
err_hndl: err_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream, "%s:%4d\tError occurred %d, rank %2d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "%s:%4d\tError occurred %d, rank %2d",
__FILE__, line, err, rank)); __FILE__, line, err, rank));
return err; return err;
} }
int ompi_coll_tuned_allgather_intra_two_procs(void *sbuf, int scount, int ompi_coll_base_allgather_intra_two_procs(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void* rbuf, int rcount, void* rbuf, int rcount,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
@ -638,8 +619,8 @@ int ompi_coll_tuned_allgather_intra_two_procs(void *sbuf, int scount,
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"ompi_coll_tuned_allgather_intra_two_procs rank %d", rank)); "ompi_coll_base_allgather_intra_two_procs rank %d", rank));
err = ompi_datatype_get_extent (sdtype, &lb, &sext); err = ompi_datatype_get_extent (sdtype, &lb, &sext);
if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; } if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; }
@ -661,7 +642,7 @@ int ompi_coll_tuned_allgather_intra_two_procs(void *sbuf, int scount,
} }
tmprecv = (char*)rbuf + (ptrdiff_t)remote * (ptrdiff_t)rcount * rext; tmprecv = (char*)rbuf + (ptrdiff_t)remote * (ptrdiff_t)rcount * rext;
err = ompi_coll_tuned_sendrecv(tmpsend, scount, sdtype, remote, err = ompi_coll_base_sendrecv(tmpsend, scount, sdtype, remote,
MCA_COLL_BASE_TAG_ALLGATHER, MCA_COLL_BASE_TAG_ALLGATHER,
tmprecv, rcount, rdtype, remote, tmprecv, rcount, rdtype, remote,
MCA_COLL_BASE_TAG_ALLGATHER, MCA_COLL_BASE_TAG_ALLGATHER,
@ -678,7 +659,7 @@ int ompi_coll_tuned_allgather_intra_two_procs(void *sbuf, int scount,
return MPI_SUCCESS; return MPI_SUCCESS;
err_hndl: err_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream, "%s:%4d\tError occurred %d, rank %2d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "%s:%4d\tError occurred %d, rank %2d",
__FILE__, line, err, rank)); __FILE__, line, err, rank));
return err; return err;
} }
@ -688,12 +669,12 @@ int ompi_coll_tuned_allgather_intra_two_procs(void *sbuf, int scount,
* Linear functions are copied from the BASIC coll module * Linear functions are copied from the BASIC coll module
* they do not segment the message and are simple implementations * they do not segment the message and are simple implementations
* but for some small number of nodes and/or small data sizes they * but for some small number of nodes and/or small data sizes they
* are just as fast as tuned/tree based segmenting operations * are just as fast as base/tree based segmenting operations
* and as such may be selected by the decision functions * and as such may be selected by the decision functions
* These are copied into this module due to the way we select modules * These are copied into this module due to the way we select modules
* in V1. i.e. in V2 we will handle this differently and so will not * in V1. i.e. in V2 we will handle this differently and so will not
* have to duplicate code. * have to duplicate code.
* JPG following the examples from other coll_tuned implementations. Dec06. * JPG following the examples from other coll_base implementations. Dec06.
*/ */
/* copied function (with appropriate renaming) starts here */ /* copied function (with appropriate renaming) starts here */
@ -706,7 +687,7 @@ int ompi_coll_tuned_allgather_intra_two_procs(void *sbuf, int scount,
* Returns: - MPI_SUCCESS or error code * Returns: - MPI_SUCCESS or error code
*/ */
int int
ompi_coll_tuned_allgather_intra_basic_linear(void *sbuf, int scount, ompi_coll_base_allgather_intra_basic_linear(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void *rbuf, void *rbuf,
int rcount, int rcount,
@ -755,183 +736,3 @@ ompi_coll_tuned_allgather_intra_basic_linear(void *sbuf, int scount,
} }
/* copied function (with appropriate renaming) ends here */ /* copied function (with appropriate renaming) ends here */
/* The following are used by dynamic and forced rules */
/* publish details of each algorithm and if its forced/fixed/locked in */
/* as you add methods/algorithms you must update this and the query/map
routines */
/* this routine is called by the component only */
/* this makes sure that the mca parameters are set to their initial values
and perms */
/* module does not call this they call the forced_getvalues routine instead */
int
ompi_coll_tuned_allgather_intra_check_forced_init(coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices)
{
mca_base_var_enum_t *new_enum;
ompi_coll_tuned_forced_max_algorithms[ALLGATHER] = coll_tuned_allgather_algorithm_count;
(void) mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"allgather_algorithm_count",
"Number of allgather algorithms available",
MCA_BASE_VAR_TYPE_INT, NULL, 0,
MCA_BASE_VAR_FLAG_DEFAULT_ONLY,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_CONSTANT,
&coll_tuned_allgather_algorithm_count);
/* MPI_T: This variable should eventually be bound to a communicator */
coll_tuned_allgather_forced_algorithm = 0;
(void) mca_base_var_enum_create("coll_tuned_allgather_algorithms", allgather_algorithms, &new_enum);
mca_param_indices->algorithm_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"allgather_algorithm",
"Which allallgather algorithm is used. Can be locked down to choice of: 0 ignore, 1 basic linear, 2 bruck, 3 recursive doubling, 4 ring, 5 neighbor exchange, 6: two proc only.",
MCA_BASE_VAR_TYPE_INT, new_enum, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_allgather_forced_algorithm);
OBJ_RELEASE(new_enum);
if (mca_param_indices->algorithm_param_index < 0) {
return mca_param_indices->algorithm_param_index;
}
coll_tuned_allgather_segment_size = 0;
mca_param_indices->segsize_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"allgather_algorithm_segmentsize",
"Segment size in bytes used by default for allgather algorithms. Only has meaning if algorithm is forced and supports segmenting. 0 bytes means no segmentation. Currently, available algorithms do not support segmentation.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_allgather_segment_size);
coll_tuned_allgather_tree_fanout = ompi_coll_tuned_init_tree_fanout; /* get system wide default */
mca_param_indices->tree_fanout_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"allgather_algorithm_tree_fanout",
"Fanout for n-tree used for allgather algorithms. Only has meaning if algorithm is forced and supports n-tree topo based operation. Currently, available algorithms do not support n-tree topologies.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_allgather_tree_fanout);
coll_tuned_allgather_chain_fanout = ompi_coll_tuned_init_chain_fanout; /* get system wide default */
mca_param_indices->chain_fanout_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"allgather_algorithm_chain_fanout",
"Fanout for chains used for allgather algorithms. Only has meaning if algorithm is forced and supports chain topo based operation. Currently, available algorithms do not support chain topologies.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_allgather_chain_fanout);
return (MPI_SUCCESS);
}
int ompi_coll_tuned_allgather_intra_do_forced(void *sbuf, int scount,
struct ompi_datatype_t *sdtype,
void* rbuf, int rcount,
struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data;
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:allgather_intra_do_forced selected algorithm %d",
data->user_forced[ALLGATHER].algorithm));
switch (data->user_forced[ALLGATHER].algorithm) {
case (0):
return ompi_coll_tuned_allgather_intra_dec_fixed (sbuf, scount, sdtype,
rbuf, rcount, rdtype,
comm, module);
case (1):
return ompi_coll_tuned_allgather_intra_basic_linear (sbuf, scount, sdtype,
rbuf, rcount, rdtype,
comm, module);
case (2):
return ompi_coll_tuned_allgather_intra_bruck (sbuf, scount, sdtype,
rbuf, rcount, rdtype,
comm, module);
case (3):
return ompi_coll_tuned_allgather_intra_recursivedoubling (sbuf, scount, sdtype,
rbuf, rcount, rdtype,
comm, module);
case (4):
return ompi_coll_tuned_allgather_intra_ring (sbuf, scount, sdtype,
rbuf, rcount, rdtype,
comm, module);
case (5):
return ompi_coll_tuned_allgather_intra_neighborexchange (sbuf, scount, sdtype,
rbuf, rcount, rdtype,
comm, module);
case (6):
return ompi_coll_tuned_allgather_intra_two_procs (sbuf, scount, sdtype,
rbuf, rcount, rdtype,
comm, module);
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:allgather_intra_do_forced attempt to select algorithm %d when only 0-%d is valid?",
data->user_forced[ALLGATHER].algorithm,
ompi_coll_tuned_forced_max_algorithms[ALLGATHER]));
return (MPI_ERR_ARG);
} /* switch */
}
int ompi_coll_tuned_allgather_intra_do_this(void *sbuf, int scount,
struct ompi_datatype_t *sdtype,
void* rbuf, int rcount,
struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module,
int algorithm, int faninout, int segsize)
{
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:allgather_intra_do_this selected algorithm %d topo faninout %d segsize %d",
algorithm, faninout, segsize));
switch (algorithm) {
case (0):
return ompi_coll_tuned_allgather_intra_dec_fixed(sbuf, scount, sdtype,
rbuf, rcount, rdtype,
comm, module);
case (1):
return ompi_coll_tuned_allgather_intra_basic_linear(sbuf, scount, sdtype,
rbuf, rcount, rdtype,
comm, module);
case (2):
return ompi_coll_tuned_allgather_intra_bruck(sbuf, scount, sdtype,
rbuf, rcount, rdtype,
comm, module);
case (3):
return ompi_coll_tuned_allgather_intra_recursivedoubling(sbuf, scount, sdtype,
rbuf, rcount, rdtype,
comm, module);
case (4):
return ompi_coll_tuned_allgather_intra_ring(sbuf, scount, sdtype,
rbuf, rcount, rdtype,
comm, module);
case (5):
return ompi_coll_tuned_allgather_intra_neighborexchange(sbuf, scount, sdtype,
rbuf, rcount, rdtype,
comm, module);
case (6):
return ompi_coll_tuned_allgather_intra_two_procs (sbuf, scount, sdtype,
rbuf, rcount, rdtype,
comm, module);
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:allgather_intra_do_this attempt to select algorithm %d when only 0-%d is valid?",
algorithm,
ompi_coll_tuned_forced_max_algorithms[ALLGATHER]));
return (MPI_ERR_ARG);
} /* switch */
}

Просмотреть файл

@ -30,19 +30,12 @@
#include "ompi/communicator/communicator.h" #include "ompi/communicator/communicator.h"
#include "ompi/mca/coll/coll.h" #include "ompi/mca/coll/coll.h"
#include "ompi/mca/coll/base/coll_tags.h" #include "ompi/mca/coll/base/coll_tags.h"
#include "coll_tuned.h" #include "ompi/mca/coll/base/coll_base_functions.h"
#include "coll_tuned_topo.h" #include "coll_base_topo.h"
#include "coll_tuned_util.h" #include "coll_base_util.h"
/* allgatherv algorithm variables */ /* valid values for coll_base_allgatherv_forced_algorithm */
static int coll_tuned_allgatherv_algorithm_count = 5; mca_base_var_enum_value_t coll_base_allgatherv_algorithms[] = {
static int coll_tuned_allgatherv_forced_algorithm = 0;
static int coll_tuned_allgatherv_segment_size = 0;
static int coll_tuned_allgatherv_tree_fanout;
static int coll_tuned_allgatherv_chain_fanout;
/* valid values for coll_tuned_allgatherv_forced_algorithm */
static mca_base_var_enum_value_t allgatherv_algorithms[] = {
{0, "ignore"}, {0, "ignore"},
{1, "default"}, {1, "default"},
{2, "bruck"}, {2, "bruck"},
@ -53,7 +46,7 @@ static mca_base_var_enum_value_t allgatherv_algorithms[] = {
}; };
/* /*
* ompi_coll_tuned_allgatherv_intra_bruck * ompi_coll_base_allgatherv_intra_bruck
* *
* Function: allgather using O(log(N)) steps. * Function: allgather using O(log(N)) steps.
* Accepts: Same arguments as MPI_Allgather * Accepts: Same arguments as MPI_Allgather
@ -107,7 +100,7 @@ static mca_base_var_enum_value_t allgatherv_algorithms[] = {
* [5] [5] [5] [5] [5] [5] [5] * [5] [5] [5] [5] [5] [5] [5]
* [6] [6] [6] [6] [6] [6] [6] * [6] [6] [6] [6] [6] [6] [6]
*/ */
int ompi_coll_tuned_allgatherv_intra_bruck(void *sbuf, int scount, int ompi_coll_base_allgatherv_intra_bruck(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void *rbuf, int *rcounts, void *rbuf, int *rcounts,
int *rdispls, int *rdispls,
@ -124,8 +117,8 @@ int ompi_coll_tuned_allgatherv_intra_bruck(void *sbuf, int scount,
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:allgather_intra_bruck rank %d", rank)); "coll:base:allgather_intra_bruck rank %d", rank));
err = ompi_datatype_get_extent (sdtype, &slb, &sext); err = ompi_datatype_get_extent (sdtype, &slb, &sext);
if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; } if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; }
@ -198,7 +191,7 @@ int ompi_coll_tuned_allgatherv_intra_bruck(void *sbuf, int scount,
if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; } if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; }
/* Sendreceive */ /* Sendreceive */
err = ompi_coll_tuned_sendrecv(rbuf, 1, new_sdtype, sendto, err = ompi_coll_base_sendrecv(rbuf, 1, new_sdtype, sendto,
MCA_COLL_BASE_TAG_ALLGATHERV, MCA_COLL_BASE_TAG_ALLGATHERV,
rbuf, 1, new_rdtype, recvfrom, rbuf, 1, new_rdtype, recvfrom,
MCA_COLL_BASE_TAG_ALLGATHERV, MCA_COLL_BASE_TAG_ALLGATHERV,
@ -217,14 +210,14 @@ int ompi_coll_tuned_allgatherv_intra_bruck(void *sbuf, int scount,
err_hndl: err_hndl:
if( NULL != new_rcounts ) free(new_rcounts); if( NULL != new_rcounts ) free(new_rcounts);
OPAL_OUTPUT((ompi_coll_tuned_stream, "%s:%4d\tError occurred %d, rank %2d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "%s:%4d\tError occurred %d, rank %2d",
__FILE__, line, err, rank)); __FILE__, line, err, rank));
return err; return err;
} }
/* /*
* ompi_coll_tuned_allgatherv_intra_ring * ompi_coll_base_allgatherv_intra_ring
* *
* Function: allgatherv using O(N) steps. * Function: allgatherv using O(N) steps.
* Accepts: Same arguments as MPI_Allgatherv * Accepts: Same arguments as MPI_Allgatherv
@ -238,7 +231,7 @@ int ompi_coll_tuned_allgatherv_intra_bruck(void *sbuf, int scount,
* No additional memory requirements. * No additional memory requirements.
* *
*/ */
int ompi_coll_tuned_allgatherv_intra_ring(void *sbuf, int scount, int ompi_coll_base_allgatherv_intra_ring(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void* rbuf, int *rcounts, int *rdisps, void* rbuf, int *rcounts, int *rdisps,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
@ -252,8 +245,8 @@ int ompi_coll_tuned_allgatherv_intra_ring(void *sbuf, int scount,
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:allgatherv_intra_ring rank %d", rank)); "coll:base:allgatherv_intra_ring rank %d", rank));
err = ompi_datatype_get_extent (sdtype, &slb, &sext); err = ompi_datatype_get_extent (sdtype, &slb, &sext);
if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; } if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; }
@ -292,7 +285,7 @@ int ompi_coll_tuned_allgatherv_intra_ring(void *sbuf, int scount,
tmpsend = (char*)rbuf + rdisps[senddatafrom] * rext; tmpsend = (char*)rbuf + rdisps[senddatafrom] * rext;
/* Sendreceive */ /* Sendreceive */
err = ompi_coll_tuned_sendrecv(tmpsend, rcounts[senddatafrom], rdtype, err = ompi_coll_base_sendrecv(tmpsend, rcounts[senddatafrom], rdtype,
sendto, MCA_COLL_BASE_TAG_ALLGATHERV, sendto, MCA_COLL_BASE_TAG_ALLGATHERV,
tmprecv, rcounts[recvdatafrom], rdtype, tmprecv, rcounts[recvdatafrom], rdtype,
recvfrom, MCA_COLL_BASE_TAG_ALLGATHERV, recvfrom, MCA_COLL_BASE_TAG_ALLGATHERV,
@ -304,13 +297,13 @@ int ompi_coll_tuned_allgatherv_intra_ring(void *sbuf, int scount,
return OMPI_SUCCESS; return OMPI_SUCCESS;
err_hndl: err_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream, "%s:%4d\tError occurred %d, rank %2d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "%s:%4d\tError occurred %d, rank %2d",
__FILE__, line, err, rank)); __FILE__, line, err, rank));
return err; return err;
} }
/* /*
* ompi_coll_tuned_allgatherv_intra_neighborexchange * ompi_coll_base_allgatherv_intra_neighborexchange
* *
* Function: allgatherv using N/2 steps (O(N)) * Function: allgatherv using N/2 steps (O(N))
* Accepts: Same arguments as MPI_Allgatherv * Accepts: Same arguments as MPI_Allgatherv
@ -368,7 +361,7 @@ int ompi_coll_tuned_allgatherv_intra_ring(void *sbuf, int scount,
* [5] [5] [5] [5] [5] [5] * [5] [5] [5] [5] [5] [5]
*/ */
int int
ompi_coll_tuned_allgatherv_intra_neighborexchange(void *sbuf, int scount, ompi_coll_base_allgatherv_intra_neighborexchange(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void* rbuf, int *rcounts, int *rdispls, void* rbuf, int *rcounts, int *rdispls,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
@ -386,17 +379,17 @@ ompi_coll_tuned_allgatherv_intra_neighborexchange(void *sbuf, int scount,
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
if (size % 2) { if (size % 2) {
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:allgatherv_intra_neighborexchange WARNING: odd size %d, switching to ring algorithm", "coll:base:allgatherv_intra_neighborexchange WARNING: odd size %d, switching to ring algorithm",
size)); size));
return ompi_coll_tuned_allgatherv_intra_ring(sbuf, scount, sdtype, return ompi_coll_base_allgatherv_intra_ring(sbuf, scount, sdtype,
rbuf, rcounts, rbuf, rcounts,
rdispls, rdtype, rdispls, rdtype,
comm, module); comm, module);
} }
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:allgatherv_intra_neighborexchange rank %d", rank)); "coll:base:allgatherv_intra_neighborexchange rank %d", rank));
err = ompi_datatype_get_extent (sdtype, &slb, &sext); err = ompi_datatype_get_extent (sdtype, &slb, &sext);
if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; } if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; }
@ -445,7 +438,7 @@ ompi_coll_tuned_allgatherv_intra_neighborexchange(void *sbuf, int scount,
*/ */
tmprecv = (char*)rbuf + (ptrdiff_t)rdispls[neighbor[0]] * rext; tmprecv = (char*)rbuf + (ptrdiff_t)rdispls[neighbor[0]] * rext;
tmpsend = (char*)rbuf + (ptrdiff_t)rdispls[rank] * rext; tmpsend = (char*)rbuf + (ptrdiff_t)rdispls[rank] * rext;
err = ompi_coll_tuned_sendrecv(tmpsend, rcounts[rank], rdtype, err = ompi_coll_base_sendrecv(tmpsend, rcounts[rank], rdtype,
neighbor[0], MCA_COLL_BASE_TAG_ALLGATHERV, neighbor[0], MCA_COLL_BASE_TAG_ALLGATHERV,
tmprecv, rcounts[neighbor[0]], rdtype, tmprecv, rcounts[neighbor[0]], rdtype,
neighbor[0], MCA_COLL_BASE_TAG_ALLGATHERV, neighbor[0], MCA_COLL_BASE_TAG_ALLGATHERV,
@ -493,7 +486,7 @@ ompi_coll_tuned_allgatherv_intra_neighborexchange(void *sbuf, int scount,
tmpsend = (char*)rbuf; tmpsend = (char*)rbuf;
/* Sendreceive */ /* Sendreceive */
err = ompi_coll_tuned_sendrecv(tmpsend, 1, new_sdtype, neighbor[i_parity], err = ompi_coll_base_sendrecv(tmpsend, 1, new_sdtype, neighbor[i_parity],
MCA_COLL_BASE_TAG_ALLGATHERV, MCA_COLL_BASE_TAG_ALLGATHERV,
tmprecv, 1, new_rdtype, neighbor[i_parity], tmprecv, 1, new_rdtype, neighbor[i_parity],
MCA_COLL_BASE_TAG_ALLGATHERV, MCA_COLL_BASE_TAG_ALLGATHERV,
@ -509,13 +502,13 @@ ompi_coll_tuned_allgatherv_intra_neighborexchange(void *sbuf, int scount,
return OMPI_SUCCESS; return OMPI_SUCCESS;
err_hndl: err_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream, "%s:%4d\tError occurred %d, rank %2d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "%s:%4d\tError occurred %d, rank %2d",
__FILE__, line, err, rank)); __FILE__, line, err, rank));
return err; return err;
} }
int ompi_coll_tuned_allgatherv_intra_two_procs(void *sbuf, int scount, int ompi_coll_base_allgatherv_intra_two_procs(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void* rbuf, int *rcounts, void* rbuf, int *rcounts,
int *rdispls, int *rdispls,
@ -529,8 +522,8 @@ int ompi_coll_tuned_allgatherv_intra_two_procs(void *sbuf, int scount,
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"ompi_coll_tuned_allgatherv_intra_two_procs rank %d", rank)); "ompi_coll_base_allgatherv_intra_two_procs rank %d", rank));
err = ompi_datatype_get_extent (sdtype, &lb, &sext); err = ompi_datatype_get_extent (sdtype, &lb, &sext);
if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; } if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; }
@ -552,7 +545,7 @@ int ompi_coll_tuned_allgatherv_intra_two_procs(void *sbuf, int scount,
} }
tmprecv = (char*)rbuf + (ptrdiff_t)rdispls[remote] * rext; tmprecv = (char*)rbuf + (ptrdiff_t)rdispls[remote] * rext;
err = ompi_coll_tuned_sendrecv(tmpsend, scount, sdtype, remote, err = ompi_coll_base_sendrecv(tmpsend, scount, sdtype, remote,
MCA_COLL_BASE_TAG_ALLGATHERV, MCA_COLL_BASE_TAG_ALLGATHERV,
tmprecv, rcounts[remote], rdtype, remote, tmprecv, rcounts[remote], rdtype, remote,
MCA_COLL_BASE_TAG_ALLGATHERV, MCA_COLL_BASE_TAG_ALLGATHERV,
@ -570,7 +563,7 @@ int ompi_coll_tuned_allgatherv_intra_two_procs(void *sbuf, int scount,
return MPI_SUCCESS; return MPI_SUCCESS;
err_hndl: err_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream, "%s:%4d\tError occurred %d, rank %2d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "%s:%4d\tError occurred %d, rank %2d",
__FILE__, line, err, rank)); __FILE__, line, err, rank));
return err; return err;
} }
@ -580,12 +573,12 @@ int ompi_coll_tuned_allgatherv_intra_two_procs(void *sbuf, int scount,
* Linear functions are copied from the BASIC coll module * Linear functions are copied from the BASIC coll module
* they do not segment the message and are simple implementations * they do not segment the message and are simple implementations
* but for some small number of nodes and/or small data sizes they * but for some small number of nodes and/or small data sizes they
* are just as fast as tuned/tree based segmenting operations * are just as fast as base/tree based segmenting operations
* and as such may be selected by the decision functions * and as such may be selected by the decision functions
* These are copied into this module due to the way we select modules * These are copied into this module due to the way we select modules
* in V1. i.e. in V2 we will handle this differently and so will not * in V1. i.e. in V2 we will handle this differently and so will not
* have to duplicate code. * have to duplicate code.
* JPG following the examples from other coll_tuned implementations. Dec06. * JPG following the examples from other coll_base implementations. Dec06.
*/ */
/* copied function (with appropriate renaming) starts here */ /* copied function (with appropriate renaming) starts here */
@ -599,7 +592,7 @@ int ompi_coll_tuned_allgatherv_intra_two_procs(void *sbuf, int scount,
* Returns: - MPI_SUCCESS or error code * Returns: - MPI_SUCCESS or error code
*/ */
int int
ompi_coll_tuned_allgatherv_intra_basic_default(void *sbuf, int scount, ompi_coll_base_allgatherv_intra_basic_default(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void *rbuf, int *rcounts, void *rbuf, int *rcounts,
int *disps, int *disps,
@ -619,8 +612,8 @@ ompi_coll_tuned_allgatherv_intra_basic_default(void *sbuf, int scount,
* to process with rank 0 (OMPI convention) * to process with rank 0 (OMPI convention)
*/ */
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"ompi_coll_tuned_allgatherv_intra_basic_default rank %d", "ompi_coll_base_allgatherv_intra_basic_default rank %d",
rank)); rank));
if (MPI_IN_PLACE == sbuf) { if (MPI_IN_PLACE == sbuf) {
@ -676,177 +669,3 @@ ompi_coll_tuned_allgatherv_intra_basic_default(void *sbuf, int scount,
/* copied function (with appropriate renaming) ends here */ /* copied function (with appropriate renaming) ends here */
/* The following are used by dynamic and forced rules */
/* publish details of each algorithm and if its forced/fixed/locked in */
/* as you add methods/algorithms you must update this and the query/map
routines */
/* this routine is called by the component only */
/* this makes sure that the mca parameters are set to their initial values
and perms */
/* module does not call this they call the forced_getvalues routine instead */
int
ompi_coll_tuned_allgatherv_intra_check_forced_init(coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices)
{
mca_base_var_enum_t *new_enum;
ompi_coll_tuned_forced_max_algorithms[ALLGATHERV] = coll_tuned_allgatherv_algorithm_count;
(void) mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"allgatherv_algorithm_count",
"Number of allgatherv algorithms available",
MCA_BASE_VAR_TYPE_INT, NULL, 0,
MCA_BASE_VAR_FLAG_DEFAULT_ONLY,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_CONSTANT,
&coll_tuned_allgatherv_algorithm_count);
/* MPI_T: This variable should eventually be bound to a communicator */
coll_tuned_allgatherv_forced_algorithm = 0;
(void) mca_base_var_enum_create("coll_tuned_allgatherv_algorithms", allgatherv_algorithms, &new_enum);
mca_param_indices->algorithm_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"allgatherv_algorithm",
"Which allallgatherv algorithm is used. Can be locked down to choice of: 0 ignore, 1 default (allgathervv + bcast), 2 bruck, 3 ring, 4 neighbor exchange, 5: two proc only.",
MCA_BASE_VAR_TYPE_INT, new_enum, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_allgatherv_forced_algorithm);
OBJ_RELEASE(new_enum);
if (mca_param_indices->algorithm_param_index < 0) {
return mca_param_indices->algorithm_param_index;
}
coll_tuned_allgatherv_segment_size = 0;
mca_param_indices->segsize_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"allgatherv_algorithm_segmentsize",
"Segment size in bytes used by default for allgatherv algorithms. Only has meaning if algorithm is forced and supports segmenting. 0 bytes means no segmentation. Currently, available algorithms do not support segmentation.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_allgatherv_segment_size);
coll_tuned_allgatherv_tree_fanout = ompi_coll_tuned_init_tree_fanout; /* get system wide default */
mca_param_indices->tree_fanout_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"allgatherv_algorithm_tree_fanout",
"Fanout for n-tree used for allgatherv algorithms. Only has meaning if algorithm is forced and supports n-tree topo based operation. Currently, available algorithms do not support n-tree topologies.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_allgatherv_tree_fanout);
coll_tuned_allgatherv_chain_fanout = ompi_coll_tuned_init_chain_fanout; /* get system wide default */
mca_param_indices->chain_fanout_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"allgatherv_algorithm_chain_fanout",
"Fanout for chains used for allgatherv algorithms. Only has meaning if algorithm is forced and supports chain topo based operation. Currently, available algorithms do not support chain topologies.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_allgatherv_chain_fanout);
return (MPI_SUCCESS);
}
int ompi_coll_tuned_allgatherv_intra_do_forced(void *sbuf, int scount,
struct ompi_datatype_t *sdtype,
void *rbuf, int *rcounts,
int *rdispls,
struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data;
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:allgatherv_intra_do_forced selected algorithm %d",
data->user_forced[ALLGATHERV].algorithm));
switch (data->user_forced[ALLGATHERV].algorithm) {
case (0):
return ompi_coll_tuned_allgatherv_intra_dec_fixed (sbuf, scount, sdtype,
rbuf, rcounts, rdispls, rdtype,
comm, module);
case (1):
return ompi_coll_tuned_allgatherv_intra_basic_default (sbuf, scount, sdtype,
rbuf, rcounts, rdispls, rdtype,
comm, module);
case (2):
return ompi_coll_tuned_allgatherv_intra_bruck (sbuf, scount, sdtype,
rbuf, rcounts, rdispls, rdtype,
comm, module);
case (3):
return ompi_coll_tuned_allgatherv_intra_ring (sbuf, scount, sdtype,
rbuf, rcounts, rdispls, rdtype,
comm, module);
case (4):
return ompi_coll_tuned_allgatherv_intra_neighborexchange (sbuf, scount, sdtype,
rbuf, rcounts, rdispls, rdtype,
comm, module);
case (5):
return ompi_coll_tuned_allgatherv_intra_two_procs (sbuf, scount, sdtype,
rbuf, rcounts, rdispls, rdtype,
comm, module);
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:allgatherv_intra_do_forced attempt to select algorithm %d when only 0-%d is valid?",
data->user_forced[ALLGATHERV].algorithm,
ompi_coll_tuned_forced_max_algorithms[ALLGATHERV]));
return (MPI_ERR_ARG);
} /* switch */
}
int ompi_coll_tuned_allgatherv_intra_do_this(void *sbuf, int scount,
struct ompi_datatype_t *sdtype,
void *rbuf, int *rcounts,
int *rdispls,
struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module,
int algorithm, int faninout,
int segsize)
{
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:allgatherv_intra_do_this selected algorithm %d topo faninout %d segsize %d",
algorithm, faninout, segsize));
switch (algorithm) {
case (0):
return ompi_coll_tuned_allgatherv_intra_dec_fixed(sbuf, scount, sdtype,
rbuf, rcounts, rdispls, rdtype,
comm, module);
case (1):
return ompi_coll_tuned_allgatherv_intra_basic_default(sbuf, scount, sdtype,
rbuf, rcounts, rdispls, rdtype,
comm, module);
case (2):
return ompi_coll_tuned_allgatherv_intra_bruck(sbuf, scount, sdtype,
rbuf, rcounts, rdispls, rdtype,
comm, module);
case (3):
return ompi_coll_tuned_allgatherv_intra_ring(sbuf, scount, sdtype,
rbuf, rcounts, rdispls, rdtype,
comm, module);
case (4):
return ompi_coll_tuned_allgatherv_intra_neighborexchange(sbuf, scount, sdtype,
rbuf, rcounts, rdispls, rdtype,
comm, module);
case (5):
return ompi_coll_tuned_allgatherv_intra_two_procs (sbuf, scount, sdtype,
rbuf, rcounts, rdispls, rdtype,
comm, module);
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:allgatherv_intra_do_this attempt to select algorithm %d when only 0-%d is valid?",
algorithm,
ompi_coll_tuned_forced_max_algorithms[ALLGATHERV]));
return (MPI_ERR_ARG);
} /* switch */
}

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology * University Research and Technology
* Corporation. All rights reserved. * Corporation. All rights reserved.
* Copyright (c) 2004-2014 The University of Tennessee and The University * Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -31,33 +31,15 @@
#include "ompi/mca/coll/base/coll_tags.h" #include "ompi/mca/coll/base/coll_tags.h"
#include "ompi/mca/pml/pml.h" #include "ompi/mca/pml/pml.h"
#include "ompi/op/op.h" #include "ompi/op/op.h"
#include "coll_tuned.h" #include "ompi/mca/coll/base/coll_base_functions.h"
#include "coll_tuned_topo.h" #include "coll_base_topo.h"
#include "coll_tuned_util.h" #include "coll_base_util.h"
/* allreduce algorithm variables */
static int coll_tuned_allreduce_algorithm_count = 5;
static int coll_tuned_allreduce_forced_algorithm = 0;
static int coll_tuned_allreduce_segment_size = 0;
static int coll_tuned_allreduce_tree_fanout;
static int coll_tuned_allreduce_chain_fanout;
/* valid values for coll_tuned_allreduce_forced_algorithm */
static mca_base_var_enum_value_t allreduce_algorithms[] = {
{0, "ignore"},
{1, "basic_linear"},
{2, "nonoverlapping"},
{3, "recursive_doubling"},
{4, "ring"},
{5, "segmented_ring"},
{0, NULL}
};
/* /*
* ompi_coll_tuned_allreduce_intra_nonoverlapping * ompi_coll_base_allreduce_intra_nonoverlapping
* *
* This function just calls a reduce followed by a broadcast * This function just calls a reduce followed by a broadcast
* both called functions are tuned but they complete sequentially, * both called functions are base but they complete sequentially,
* i.e. no additional overlapping * i.e. no additional overlapping
* meaning if the number of segments used is greater than the topo depth * meaning if the number of segments used is greater than the topo depth
* then once the first segment of data is fully 'reduced' it is not broadcast * then once the first segment of data is fully 'reduced' it is not broadcast
@ -65,7 +47,7 @@ static mca_base_var_enum_value_t allreduce_algorithms[] = {
* *
*/ */
int int
ompi_coll_tuned_allreduce_intra_nonoverlapping(void *sbuf, void *rbuf, int count, ompi_coll_base_allreduce_intra_nonoverlapping(void *sbuf, void *rbuf, int count,
struct ompi_datatype_t *dtype, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_op_t *op,
struct ompi_communicator_t *comm, struct ompi_communicator_t *comm,
@ -75,7 +57,7 @@ ompi_coll_tuned_allreduce_intra_nonoverlapping(void *sbuf, void *rbuf, int count
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:allreduce_intra_nonoverlapping rank %d", rank)); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:allreduce_intra_nonoverlapping rank %d", rank));
/* Reduce to 0 and broadcast. */ /* Reduce to 0 and broadcast. */
@ -100,7 +82,7 @@ ompi_coll_tuned_allreduce_intra_nonoverlapping(void *sbuf, void *rbuf, int count
} }
/* /*
* ompi_coll_tuned_allreduce_intra_recursivedoubling * ompi_coll_base_allreduce_intra_recursivedoubling
* *
* Function: Recursive doubling algorithm for allreduce operation * Function: Recursive doubling algorithm for allreduce operation
* Accepts: Same as MPI_Allreduce() * Accepts: Same as MPI_Allreduce()
@ -141,7 +123,7 @@ ompi_coll_tuned_allreduce_intra_nonoverlapping(void *sbuf, void *rbuf, int count
* *
*/ */
int int
ompi_coll_tuned_allreduce_intra_recursivedoubling(void *sbuf, void *rbuf, ompi_coll_base_allreduce_intra_recursivedoubling(void *sbuf, void *rbuf,
int count, int count,
struct ompi_datatype_t *dtype, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_op_t *op,
@ -157,8 +139,8 @@ ompi_coll_tuned_allreduce_intra_recursivedoubling(void *sbuf, void *rbuf,
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:allreduce_intra_recursivedoubling rank %d", rank)); "coll:base:allreduce_intra_recursivedoubling rank %d", rank));
/* Special case for size == 1 */ /* Special case for size == 1 */
if (1 == size) { if (1 == size) {
@ -287,14 +269,14 @@ ompi_coll_tuned_allreduce_intra_recursivedoubling(void *sbuf, void *rbuf,
return MPI_SUCCESS; return MPI_SUCCESS;
error_hndl: error_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream, "%s:%4d\tRank %d Error occurred %d\n", OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "%s:%4d\tRank %d Error occurred %d\n",
__FILE__, line, rank, ret)); __FILE__, line, rank, ret));
if (NULL != inplacebuf) free(inplacebuf); if (NULL != inplacebuf) free(inplacebuf);
return ret; return ret;
} }
/* /*
* ompi_coll_tuned_allreduce_intra_ring * ompi_coll_base_allreduce_intra_ring
* *
* Function: Ring algorithm for allreduce operation * Function: Ring algorithm for allreduce operation
* Accepts: Same as MPI_Allreduce() * Accepts: Same as MPI_Allreduce()
@ -358,7 +340,7 @@ ompi_coll_tuned_allreduce_intra_recursivedoubling(void *sbuf, void *rbuf,
* *
*/ */
int int
ompi_coll_tuned_allreduce_intra_ring(void *sbuf, void *rbuf, int count, ompi_coll_base_allreduce_intra_ring(void *sbuf, void *rbuf, int count,
struct ompi_datatype_t *dtype, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_op_t *op,
struct ompi_communicator_t *comm, struct ompi_communicator_t *comm,
@ -375,8 +357,8 @@ ompi_coll_tuned_allreduce_intra_ring(void *sbuf, void *rbuf, int count,
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:allreduce_intra_ring rank %d, count %d", rank, count)); "coll:base:allreduce_intra_ring rank %d, count %d", rank, count));
/* Special case for size == 1 */ /* Special case for size == 1 */
if (1 == size) { if (1 == size) {
@ -389,8 +371,8 @@ ompi_coll_tuned_allreduce_intra_ring(void *sbuf, void *rbuf, int count,
/* Special case for count less than size - use recursive doubling */ /* Special case for count less than size - use recursive doubling */
if (count < size) { if (count < size) {
OPAL_OUTPUT((ompi_coll_tuned_stream, "coll:tuned:allreduce_ring rank %d/%d, count %d, switching to recursive doubling", rank, size, count)); OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "coll:base:allreduce_ring rank %d/%d, count %d, switching to recursive doubling", rank, size, count));
return (ompi_coll_tuned_allreduce_intra_recursivedoubling(sbuf, rbuf, return (ompi_coll_base_allreduce_intra_recursivedoubling(sbuf, rbuf,
count, count,
dtype, op, dtype, op,
comm, module)); comm, module));
@ -411,7 +393,7 @@ ompi_coll_tuned_allreduce_intra_ring(void *sbuf, void *rbuf, int count,
blocks (split_rank) .. (size - 1) are "late". blocks (split_rank) .. (size - 1) are "late".
Early blocks are at most 1 element larger than the late ones. Early blocks are at most 1 element larger than the late ones.
*/ */
COLL_TUNED_COMPUTE_BLOCKCOUNT( count, size, split_rank, COLL_BASE_COMPUTE_BLOCKCOUNT( count, size, split_rank,
early_segcount, late_segcount ); early_segcount, late_segcount );
max_segcount = early_segcount; max_segcount = early_segcount;
max_real_segsize = true_extent + (max_segcount - 1) * extent; max_real_segsize = true_extent + (max_segcount - 1) * extent;
@ -531,7 +513,7 @@ ompi_coll_tuned_allreduce_intra_ring(void *sbuf, void *rbuf, int count,
tmprecv = (char*)rbuf + (ptrdiff_t)recv_block_offset * extent; tmprecv = (char*)rbuf + (ptrdiff_t)recv_block_offset * extent;
tmpsend = (char*)rbuf + (ptrdiff_t)send_block_offset * extent; tmpsend = (char*)rbuf + (ptrdiff_t)send_block_offset * extent;
ret = ompi_coll_tuned_sendrecv(tmpsend, block_count, dtype, send_to, ret = ompi_coll_base_sendrecv(tmpsend, block_count, dtype, send_to,
MCA_COLL_BASE_TAG_ALLREDUCE, MCA_COLL_BASE_TAG_ALLREDUCE,
tmprecv, max_segcount, dtype, recv_from, tmprecv, max_segcount, dtype, recv_from,
MCA_COLL_BASE_TAG_ALLREDUCE, MCA_COLL_BASE_TAG_ALLREDUCE,
@ -546,7 +528,7 @@ ompi_coll_tuned_allreduce_intra_ring(void *sbuf, void *rbuf, int count,
return MPI_SUCCESS; return MPI_SUCCESS;
error_hndl: error_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream, "%s:%4d\tRank %d Error occurred %d\n", OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "%s:%4d\tRank %d Error occurred %d\n",
__FILE__, line, rank, ret)); __FILE__, line, rank, ret));
if (NULL != inbuf[0]) free(inbuf[0]); if (NULL != inbuf[0]) free(inbuf[0]);
if (NULL != inbuf[1]) free(inbuf[1]); if (NULL != inbuf[1]) free(inbuf[1]);
@ -554,7 +536,7 @@ ompi_coll_tuned_allreduce_intra_ring(void *sbuf, void *rbuf, int count,
} }
/* /*
* ompi_coll_tuned_allreduce_intra_ring_segmented * ompi_coll_base_allreduce_intra_ring_segmented
* *
* Function: Pipelined ring algorithm for allreduce operation * Function: Pipelined ring algorithm for allreduce operation
* Accepts: Same as MPI_Allreduce(), segment size * Accepts: Same as MPI_Allreduce(), segment size
@ -633,7 +615,7 @@ ompi_coll_tuned_allreduce_intra_ring(void *sbuf, void *rbuf, int count,
* *
*/ */
int int
ompi_coll_tuned_allreduce_intra_ring_segmented(void *sbuf, void *rbuf, int count, ompi_coll_base_allreduce_intra_ring_segmented(void *sbuf, void *rbuf, int count,
struct ompi_datatype_t *dtype, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_op_t *op,
struct ompi_communicator_t *comm, struct ompi_communicator_t *comm,
@ -652,8 +634,8 @@ ompi_coll_tuned_allreduce_intra_ring_segmented(void *sbuf, void *rbuf, int count
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:allreduce_intra_ring_segmented rank %d, count %d", rank, count)); "coll:base:allreduce_intra_ring_segmented rank %d, count %d", rank, count));
/* Special case for size == 1 */ /* Special case for size == 1 */
if (1 == size) { if (1 == size) {
@ -672,12 +654,12 @@ ompi_coll_tuned_allreduce_intra_ring_segmented(void *sbuf, void *rbuf, int count
ret = ompi_datatype_type_size( dtype, &typelng); ret = ompi_datatype_type_size( dtype, &typelng);
if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; } if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
segcount = count; segcount = count;
COLL_TUNED_COMPUTED_SEGCOUNT(segsize, typelng, segcount) COLL_BASE_COMPUTED_SEGCOUNT(segsize, typelng, segcount)
/* Special case for count less than size * segcount - use regular ring */ /* Special case for count less than size * segcount - use regular ring */
if (count < (size * segcount)) { if (count < (size * segcount)) {
OPAL_OUTPUT((ompi_coll_tuned_stream, "coll:tuned:allreduce_ring_segmented rank %d/%d, count %d, switching to regular ring", rank, size, count)); OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "coll:base:allreduce_ring_segmented rank %d/%d, count %d, switching to regular ring", rank, size, count));
return (ompi_coll_tuned_allreduce_intra_ring(sbuf, rbuf, count, dtype, op, return (ompi_coll_base_allreduce_intra_ring(sbuf, rbuf, count, dtype, op,
comm, module)); comm, module));
} }
@ -697,9 +679,9 @@ ompi_coll_tuned_allreduce_intra_ring_segmented(void *sbuf, void *rbuf, int count
Note, these blocks will be split into num_phases segments, Note, these blocks will be split into num_phases segments,
out of the largest one will have max_segcount elements. out of the largest one will have max_segcount elements.
*/ */
COLL_TUNED_COMPUTE_BLOCKCOUNT( count, size, split_rank, COLL_BASE_COMPUTE_BLOCKCOUNT( count, size, split_rank,
early_blockcount, late_blockcount ); early_blockcount, late_blockcount );
COLL_TUNED_COMPUTE_BLOCKCOUNT( early_blockcount, num_phases, inbi, COLL_BASE_COMPUTE_BLOCKCOUNT( early_blockcount, num_phases, inbi,
max_segcount, k); max_segcount, k);
max_real_segsize = true_extent + (ptrdiff_t)(max_segcount - 1) * extent; max_real_segsize = true_extent + (ptrdiff_t)(max_segcount - 1) * extent;
@ -754,7 +736,7 @@ ompi_coll_tuned_allreduce_intra_ring_segmented(void *sbuf, void *rbuf, int count
((ptrdiff_t)rank * (ptrdiff_t)early_blockcount) : ((ptrdiff_t)rank * (ptrdiff_t)early_blockcount) :
((ptrdiff_t)rank * (ptrdiff_t)late_blockcount + split_rank)); ((ptrdiff_t)rank * (ptrdiff_t)late_blockcount + split_rank));
block_count = ((rank < split_rank)? early_blockcount : late_blockcount); block_count = ((rank < split_rank)? early_blockcount : late_blockcount);
COLL_TUNED_COMPUTE_BLOCKCOUNT(block_count, num_phases, split_phase, COLL_BASE_COMPUTE_BLOCKCOUNT(block_count, num_phases, split_phase,
early_phase_segcount, late_phase_segcount) early_phase_segcount, late_phase_segcount)
phase_count = ((phase < split_phase)? phase_count = ((phase < split_phase)?
(early_phase_segcount) : (late_phase_segcount)); (early_phase_segcount) : (late_phase_segcount));
@ -790,7 +772,7 @@ ompi_coll_tuned_allreduce_intra_ring_segmented(void *sbuf, void *rbuf, int count
((ptrdiff_t)prevblock * (ptrdiff_t)late_blockcount + split_rank)); ((ptrdiff_t)prevblock * (ptrdiff_t)late_blockcount + split_rank));
block_count = ((prevblock < split_rank)? block_count = ((prevblock < split_rank)?
early_blockcount : late_blockcount); early_blockcount : late_blockcount);
COLL_TUNED_COMPUTE_BLOCKCOUNT(block_count, num_phases, split_phase, COLL_BASE_COMPUTE_BLOCKCOUNT(block_count, num_phases, split_phase,
early_phase_segcount, late_phase_segcount) early_phase_segcount, late_phase_segcount)
phase_count = ((phase < split_phase)? phase_count = ((phase < split_phase)?
(early_phase_segcount) : (late_phase_segcount)); (early_phase_segcount) : (late_phase_segcount));
@ -819,7 +801,7 @@ ompi_coll_tuned_allreduce_intra_ring_segmented(void *sbuf, void *rbuf, int count
((ptrdiff_t)recv_from * (ptrdiff_t)late_blockcount + split_rank)); ((ptrdiff_t)recv_from * (ptrdiff_t)late_blockcount + split_rank));
block_count = ((recv_from < split_rank)? block_count = ((recv_from < split_rank)?
early_blockcount : late_blockcount); early_blockcount : late_blockcount);
COLL_TUNED_COMPUTE_BLOCKCOUNT(block_count, num_phases, split_phase, COLL_BASE_COMPUTE_BLOCKCOUNT(block_count, num_phases, split_phase,
early_phase_segcount, late_phase_segcount) early_phase_segcount, late_phase_segcount)
phase_count = ((phase < split_phase)? phase_count = ((phase < split_phase)?
(early_phase_segcount) : (late_phase_segcount)); (early_phase_segcount) : (late_phase_segcount));
@ -850,7 +832,7 @@ ompi_coll_tuned_allreduce_intra_ring_segmented(void *sbuf, void *rbuf, int count
tmprecv = (char*)rbuf + (ptrdiff_t)recv_block_offset * extent; tmprecv = (char*)rbuf + (ptrdiff_t)recv_block_offset * extent;
tmpsend = (char*)rbuf + (ptrdiff_t)send_block_offset * extent; tmpsend = (char*)rbuf + (ptrdiff_t)send_block_offset * extent;
ret = ompi_coll_tuned_sendrecv(tmpsend, block_count, dtype, send_to, ret = ompi_coll_base_sendrecv(tmpsend, block_count, dtype, send_to,
MCA_COLL_BASE_TAG_ALLREDUCE, MCA_COLL_BASE_TAG_ALLREDUCE,
tmprecv, early_blockcount, dtype, recv_from, tmprecv, early_blockcount, dtype, recv_from,
MCA_COLL_BASE_TAG_ALLREDUCE, MCA_COLL_BASE_TAG_ALLREDUCE,
@ -865,7 +847,7 @@ ompi_coll_tuned_allreduce_intra_ring_segmented(void *sbuf, void *rbuf, int count
return MPI_SUCCESS; return MPI_SUCCESS;
error_hndl: error_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream, "%s:%4d\tRank %d Error occurred %d\n", OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "%s:%4d\tRank %d Error occurred %d\n",
__FILE__, line, rank, ret)); __FILE__, line, rank, ret));
if (NULL != inbuf[0]) free(inbuf[0]); if (NULL != inbuf[0]) free(inbuf[0]);
if (NULL != inbuf[1]) free(inbuf[1]); if (NULL != inbuf[1]) free(inbuf[1]);
@ -876,7 +858,7 @@ ompi_coll_tuned_allreduce_intra_ring_segmented(void *sbuf, void *rbuf, int count
* Linear functions are copied from the BASIC coll module * Linear functions are copied from the BASIC coll module
* they do not segment the message and are simple implementations * they do not segment the message and are simple implementations
* but for some small number of nodes and/or small data sizes they * but for some small number of nodes and/or small data sizes they
* are just as fast as tuned/tree based segmenting operations * are just as fast as base/tree based segmenting operations
* and as such may be selected by the decision functions * and as such may be selected by the decision functions
* These are copied into this module due to the way we select modules * These are copied into this module due to the way we select modules
* in V1. i.e. in V2 we will handle this differently and so will not * in V1. i.e. in V2 we will handle this differently and so will not
@ -895,7 +877,7 @@ ompi_coll_tuned_allreduce_intra_ring_segmented(void *sbuf, void *rbuf, int count
* Returns: - MPI_SUCCESS or error code * Returns: - MPI_SUCCESS or error code
*/ */
int int
ompi_coll_tuned_allreduce_intra_basic_linear(void *sbuf, void *rbuf, int count, ompi_coll_base_allreduce_intra_basic_linear(void *sbuf, void *rbuf, int count,
struct ompi_datatype_t *dtype, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_op_t *op,
struct ompi_communicator_t *comm, struct ompi_communicator_t *comm,
@ -905,158 +887,28 @@ ompi_coll_tuned_allreduce_intra_basic_linear(void *sbuf, void *rbuf, int count,
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:allreduce_intra_basic_linear rank %d", rank)); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:allreduce_intra_basic_linear rank %d", rank));
/* Reduce to 0 and broadcast. */ /* Reduce to 0 and broadcast. */
if (MPI_IN_PLACE == sbuf) { if (MPI_IN_PLACE == sbuf) {
if (0 == rank) { if (0 == rank) {
err = ompi_coll_tuned_reduce_intra_basic_linear (MPI_IN_PLACE, rbuf, count, dtype, err = ompi_coll_base_reduce_intra_basic_linear (MPI_IN_PLACE, rbuf, count, dtype,
op, 0, comm, module); op, 0, comm, module);
} else { } else {
err = ompi_coll_tuned_reduce_intra_basic_linear(rbuf, NULL, count, dtype, err = ompi_coll_base_reduce_intra_basic_linear(rbuf, NULL, count, dtype,
op, 0, comm, module); op, 0, comm, module);
} }
} else { } else {
err = ompi_coll_tuned_reduce_intra_basic_linear(sbuf, rbuf, count, dtype, err = ompi_coll_base_reduce_intra_basic_linear(sbuf, rbuf, count, dtype,
op, 0, comm, module); op, 0, comm, module);
} }
if (MPI_SUCCESS != err) { if (MPI_SUCCESS != err) {
return err; return err;
} }
return ompi_coll_tuned_bcast_intra_basic_linear(rbuf, count, dtype, 0, comm, module); return ompi_coll_base_bcast_intra_basic_linear(rbuf, count, dtype, 0, comm, module);
} }
/* copied function (with appropriate renaming) ends here */ /* copied function (with appropriate renaming) ends here */
/* The following are used by dynamic and forced rules */
/* publish details of each algorithm and if its forced/fixed/locked in */
/* as you add methods/algorithms you must update this and the query/map routines */
/* this routine is called by the component only */
/* this makes sure that the mca parameters are set to their initial values and perms */
/* module does not call this they call the forced_getvalues routine instead */
int ompi_coll_tuned_allreduce_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices)
{
mca_base_var_enum_t *new_enum;
ompi_coll_tuned_forced_max_algorithms[ALLREDUCE] = coll_tuned_allreduce_algorithm_count;
(void) mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"allreduce_algorithm_count",
"Number of allreduce algorithms available",
MCA_BASE_VAR_TYPE_INT, NULL, 0,
MCA_BASE_VAR_FLAG_DEFAULT_ONLY,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_CONSTANT,
&coll_tuned_allreduce_algorithm_count);
/* MPI_T: This variable should eventually be bound to a communicator */
coll_tuned_allreduce_forced_algorithm = 0;
(void) mca_base_var_enum_create("coll_tuned_allreduce_algorithms", allreduce_algorithms, &new_enum);
mca_param_indices->algorithm_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"allreduce_algorithm",
"Which allreduce algorithm is used. Can be locked down to any of: 0 ignore, 1 basic linear, 2 nonoverlapping (tuned reduce + tuned bcast), 3 recursive doubling, 4 ring, 5 segmented ring",
MCA_BASE_VAR_TYPE_INT, new_enum, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_allreduce_forced_algorithm);
OBJ_RELEASE(new_enum);
if (mca_param_indices->algorithm_param_index < 0) {
return mca_param_indices->algorithm_param_index;
}
coll_tuned_allreduce_segment_size = 0;
mca_param_indices->segsize_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"allreduce_algorithm_segmentsize",
"Segment size in bytes used by default for allreduce algorithms. Only has meaning if algorithm is forced and supports segmenting. 0 bytes means no segmentation.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_allreduce_segment_size);
coll_tuned_allreduce_tree_fanout = ompi_coll_tuned_init_tree_fanout; /* get system wide default */
mca_param_indices->tree_fanout_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"allreduce_algorithm_tree_fanout",
"Fanout for n-tree used for allreduce algorithms. Only has meaning if algorithm is forced and supports n-tree topo based operation.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_allreduce_tree_fanout);
coll_tuned_allreduce_chain_fanout = ompi_coll_tuned_init_chain_fanout; /* get system wide default */
mca_param_indices->chain_fanout_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"allreduce_algorithm_chain_fanout",
"Fanout for chains used for allreduce algorithms. Only has meaning if algorithm is forced and supports chain topo based operation.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_allreduce_chain_fanout);
return (MPI_SUCCESS);
}
int ompi_coll_tuned_allreduce_intra_do_forced(void *sbuf, void *rbuf, int count,
struct ompi_datatype_t *dtype,
struct ompi_op_t *op,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data;
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:allreduce_intra_do_forced selected algorithm %d, segment size %d",
data->user_forced[ALLREDUCE].algorithm,
data->user_forced[ALLREDUCE].segsize));
switch (data->user_forced[ALLREDUCE].algorithm) {
case (0): return ompi_coll_tuned_allreduce_intra_dec_fixed (sbuf, rbuf, count, dtype, op, comm, module);
case (1): return ompi_coll_tuned_allreduce_intra_basic_linear (sbuf, rbuf, count, dtype, op, comm, module);
case (2): return ompi_coll_tuned_allreduce_intra_nonoverlapping (sbuf, rbuf, count, dtype, op, comm, module);
case (3): return ompi_coll_tuned_allreduce_intra_recursivedoubling (sbuf, rbuf, count, dtype, op, comm, module);
case (4): return ompi_coll_tuned_allreduce_intra_ring (sbuf, rbuf, count, dtype, op, comm, module);
case (5): return ompi_coll_tuned_allreduce_intra_ring_segmented (sbuf, rbuf, count, dtype, op, comm, module, data->user_forced[ALLREDUCE].segsize);
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:allreduce_intra_do_forced attempt to select algorithm %d when only 0-%d is valid?",
data->user_forced[ALLREDUCE].algorithm,
ompi_coll_tuned_forced_max_algorithms[ALLREDUCE]));
return (MPI_ERR_ARG);
} /* switch */
}
int ompi_coll_tuned_allreduce_intra_do_this(void *sbuf, void *rbuf, int count,
struct ompi_datatype_t *dtype,
struct ompi_op_t *op,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module,
int algorithm, int faninout, int segsize)
{
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:allreduce_intra_do_this algorithm %d topo fan in/out %d segsize %d",
algorithm, faninout, segsize));
switch (algorithm) {
case (0): return ompi_coll_tuned_allreduce_intra_dec_fixed (sbuf, rbuf, count, dtype, op, comm, module);
case (1): return ompi_coll_tuned_allreduce_intra_basic_linear (sbuf, rbuf, count, dtype, op, comm, module);
case (2): return ompi_coll_tuned_allreduce_intra_nonoverlapping (sbuf, rbuf, count, dtype, op, comm, module);
case (3): return ompi_coll_tuned_allreduce_intra_recursivedoubling (sbuf, rbuf, count, dtype, op, comm, module);
case (4): return ompi_coll_tuned_allreduce_intra_ring (sbuf, rbuf, count, dtype, op, comm, module);
case (5): return ompi_coll_tuned_allreduce_intra_ring_segmented (sbuf, rbuf, count, dtype, op, comm, module, segsize);
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:allreduce_intra_do_this attempt to select algorithm %d when only 0-%d is valid?",
algorithm, ompi_coll_tuned_forced_max_algorithms[ALLREDUCE]));
return (MPI_ERR_ARG);
} /* switch */
}

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology * University Research and Technology
* Corporation. All rights reserved. * Corporation. All rights reserved.
* Copyright (c) 2004-2012 The University of Tennessee and The University * Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -30,37 +30,18 @@
#include "ompi/mca/coll/coll.h" #include "ompi/mca/coll/coll.h"
#include "ompi/mca/coll/base/coll_tags.h" #include "ompi/mca/coll/base/coll_tags.h"
#include "ompi/mca/pml/pml.h" #include "ompi/mca/pml/pml.h"
#include "coll_tuned.h" #include "ompi/mca/coll/base/coll_base_functions.h"
#include "coll_tuned_topo.h" #include "coll_base_topo.h"
#include "coll_tuned_util.h" #include "coll_base_util.h"
/* alltoall algorithm variables */
static int coll_tuned_alltoall_algorithm_count = 5;
static int coll_tuned_alltoall_forced_algorithm = 0;
static int coll_tuned_alltoall_segment_size = 0;
static int coll_tuned_alltoall_max_requests;
static int coll_tuned_alltoall_tree_fanout;
static int coll_tuned_alltoall_chain_fanout;
/* valid values for coll_tuned_alltoall_forced_algorithm */
static mca_base_var_enum_value_t alltoall_algorithms[] = {
{0, "ignore"},
{1, "linear"},
{2, "pairwise"},
{3, "modified_bruck"},
{4, "linear_sync"},
{5, "two_proc"},
{0, NULL}
};
/* MPI_IN_PLACE all to all algorithm. TODO: implement a better one. */ /* MPI_IN_PLACE all to all algorithm. TODO: implement a better one. */
static int static int
mca_coll_tuned_alltoall_intra_basic_inplace(void *rbuf, int rcount, mca_coll_base_alltoall_intra_basic_inplace(void *rbuf, int rcount,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, struct ompi_communicator_t *comm,
mca_coll_base_module_t *module) mca_coll_base_module_t *module)
{ {
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module; mca_coll_base_module_t *base_module = (mca_coll_base_module_t*) module;
int i, j, size, rank, err=MPI_SUCCESS; int i, j, size, rank, err=MPI_SUCCESS;
MPI_Request *preq; MPI_Request *preq;
char *tmp_buffer; char *tmp_buffer;
@ -91,7 +72,7 @@ mca_coll_tuned_alltoall_intra_basic_inplace(void *rbuf, int rcount,
for (i = 0 ; i < size ; ++i) { for (i = 0 ; i < size ; ++i) {
for (j = i+1 ; j < size ; ++j) { for (j = i+1 ; j < size ; ++j) {
/* Initiate all send/recv to/from others. */ /* Initiate all send/recv to/from others. */
preq = tuned_module->tuned_data->mcct_reqs; preq = base_module->base_data->mcct_reqs;
if (i == rank) { if (i == rank) {
/* Copy the data into the temporary buffer */ /* Copy the data into the temporary buffer */
@ -128,11 +109,8 @@ mca_coll_tuned_alltoall_intra_basic_inplace(void *rbuf, int rcount,
} }
/* Wait for the requests to complete */ /* Wait for the requests to complete */
err = ompi_request_wait_all (2, tuned_module->tuned_data->mcct_reqs, MPI_STATUSES_IGNORE); err = ompi_request_wait_all (2, base_module->base_data->mcct_reqs, MPI_STATUSES_IGNORE);
if (MPI_SUCCESS != err) { goto error_hndl; } if (MPI_SUCCESS != err) { goto error_hndl; }
/* Free the requests. */
mca_coll_tuned_free_reqs(tuned_module->tuned_data->mcct_reqs, 2);
} }
} }
@ -145,7 +123,7 @@ mca_coll_tuned_alltoall_intra_basic_inplace(void *rbuf, int rcount,
return err; return err;
} }
int ompi_coll_tuned_alltoall_intra_pairwise(void *sbuf, int scount, int ompi_coll_base_alltoall_intra_pairwise(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void* rbuf, int rcount, void* rbuf, int rcount,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
@ -157,15 +135,15 @@ int ompi_coll_tuned_alltoall_intra_pairwise(void *sbuf, int scount,
ptrdiff_t lb, sext, rext; ptrdiff_t lb, sext, rext;
if (MPI_IN_PLACE == sbuf) { if (MPI_IN_PLACE == sbuf) {
return mca_coll_tuned_alltoall_intra_basic_inplace (rbuf, rcount, rdtype, return mca_coll_base_alltoall_intra_basic_inplace (rbuf, rcount, rdtype,
comm, module); comm, module);
} }
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:alltoall_intra_pairwise rank %d", rank)); "coll:base:alltoall_intra_pairwise rank %d", rank));
err = ompi_datatype_get_extent (sdtype, &lb, &sext); err = ompi_datatype_get_extent (sdtype, &lb, &sext);
if (err != MPI_SUCCESS) { line = __LINE__; goto err_hndl; } if (err != MPI_SUCCESS) { line = __LINE__; goto err_hndl; }
@ -185,7 +163,7 @@ int ompi_coll_tuned_alltoall_intra_pairwise(void *sbuf, int scount,
tmprecv = (char*)rbuf + (ptrdiff_t)recvfrom * rext * (ptrdiff_t)rcount; tmprecv = (char*)rbuf + (ptrdiff_t)recvfrom * rext * (ptrdiff_t)rcount;
/* send and receive */ /* send and receive */
err = ompi_coll_tuned_sendrecv( tmpsend, scount, sdtype, sendto, err = ompi_coll_base_sendrecv( tmpsend, scount, sdtype, sendto,
MCA_COLL_BASE_TAG_ALLTOALL, MCA_COLL_BASE_TAG_ALLTOALL,
tmprecv, rcount, rdtype, recvfrom, tmprecv, rcount, rdtype, recvfrom,
MCA_COLL_BASE_TAG_ALLTOALL, MCA_COLL_BASE_TAG_ALLTOALL,
@ -196,14 +174,14 @@ int ompi_coll_tuned_alltoall_intra_pairwise(void *sbuf, int scount,
return MPI_SUCCESS; return MPI_SUCCESS;
err_hndl: err_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"%s:%4d\tError occurred %d, rank %2d", __FILE__, line, "%s:%4d\tError occurred %d, rank %2d", __FILE__, line,
err, rank)); err, rank));
return err; return err;
} }
int ompi_coll_tuned_alltoall_intra_bruck(void *sbuf, int scount, int ompi_coll_base_alltoall_intra_bruck(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void* rbuf, int rcount, void* rbuf, int rcount,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
@ -216,20 +194,20 @@ int ompi_coll_tuned_alltoall_intra_bruck(void *sbuf, int scount,
ptrdiff_t rlb, slb, tlb, sext, rext, tsext; ptrdiff_t rlb, slb, tlb, sext, rext, tsext;
struct ompi_datatype_t *new_ddt; struct ompi_datatype_t *new_ddt;
#ifdef blahblah #ifdef blahblah
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module; mca_coll_base_module_t *base_module = (mca_coll_base_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data; mca_coll_base_comm_t *data = base_module->base_data;
#endif #endif
if (MPI_IN_PLACE == sbuf) { if (MPI_IN_PLACE == sbuf) {
return mca_coll_tuned_alltoall_intra_basic_inplace (rbuf, rcount, rdtype, return mca_coll_base_alltoall_intra_basic_inplace (rbuf, rcount, rdtype,
comm, module); comm, module);
} }
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:alltoall_intra_bruck rank %d", rank)); "coll:base:alltoall_intra_bruck rank %d", rank));
err = ompi_datatype_get_extent (sdtype, &slb, &sext); err = ompi_datatype_get_extent (sdtype, &slb, &sext);
if (err != MPI_SUCCESS) { line = __LINE__; goto err_hndl; } if (err != MPI_SUCCESS) { line = __LINE__; goto err_hndl; }
@ -307,7 +285,7 @@ int ompi_coll_tuned_alltoall_intra_bruck(void *sbuf, int scount,
if (err != MPI_SUCCESS) { line = __LINE__; goto err_hndl; } if (err != MPI_SUCCESS) { line = __LINE__; goto err_hndl; }
/* Sendreceive */ /* Sendreceive */
err = ompi_coll_tuned_sendrecv ( tmpbuf, 1, new_ddt, sendto, err = ompi_coll_base_sendrecv ( tmpbuf, 1, new_ddt, sendto,
MCA_COLL_BASE_TAG_ALLTOALL, MCA_COLL_BASE_TAG_ALLTOALL,
rbuf, 1, new_ddt, recvfrom, rbuf, 1, new_ddt, recvfrom,
MCA_COLL_BASE_TAG_ALLTOALL, MCA_COLL_BASE_TAG_ALLTOALL,
@ -341,7 +319,7 @@ int ompi_coll_tuned_alltoall_intra_bruck(void *sbuf, int scount,
return OMPI_SUCCESS; return OMPI_SUCCESS;
err_hndl: err_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"%s:%4d\tError occurred %d, rank %2d", __FILE__, line, err, "%s:%4d\tError occurred %d, rank %2d", __FILE__, line, err,
rank)); rank));
if (tmpbuf != NULL) free(tmpbuf_free); if (tmpbuf != NULL) free(tmpbuf_free);
@ -367,7 +345,7 @@ int ompi_coll_tuned_alltoall_intra_bruck(void *sbuf, int scount,
* - wait for any request to complete * - wait for any request to complete
* - replace that request by the new one of the same type. * - replace that request by the new one of the same type.
*/ */
int ompi_coll_tuned_alltoall_intra_linear_sync(void *sbuf, int scount, int ompi_coll_base_alltoall_intra_linear_sync(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void* rbuf, int rcount, void* rbuf, int rcount,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
@ -382,7 +360,7 @@ int ompi_coll_tuned_alltoall_intra_linear_sync(void *sbuf, int scount,
ompi_request_t **reqs = NULL; ompi_request_t **reqs = NULL;
if (MPI_IN_PLACE == sbuf) { if (MPI_IN_PLACE == sbuf) {
return mca_coll_tuned_alltoall_intra_basic_inplace (rbuf, rcount, rdtype, return mca_coll_base_alltoall_intra_basic_inplace (rbuf, rcount, rdtype,
comm, module); comm, module);
} }
@ -391,8 +369,8 @@ int ompi_coll_tuned_alltoall_intra_linear_sync(void *sbuf, int scount,
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"ompi_coll_tuned_alltoall_intra_linear_sync rank %d", rank)); "ompi_coll_base_alltoall_intra_linear_sync rank %d", rank));
error = ompi_datatype_get_extent(sdtype, &slb, &sext); error = ompi_datatype_get_extent(sdtype, &slb, &sext);
if (OMPI_SUCCESS != error) { if (OMPI_SUCCESS != error) {
@ -506,7 +484,7 @@ int ompi_coll_tuned_alltoall_intra_linear_sync(void *sbuf, int scount,
return MPI_SUCCESS; return MPI_SUCCESS;
error_hndl: error_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"%s:%4d\tError occurred %d, rank %2d", __FILE__, line, error, "%s:%4d\tError occurred %d, rank %2d", __FILE__, line, error,
rank)); rank));
if (NULL != reqs) free(reqs); if (NULL != reqs) free(reqs);
@ -514,7 +492,7 @@ int ompi_coll_tuned_alltoall_intra_linear_sync(void *sbuf, int scount,
} }
int ompi_coll_tuned_alltoall_intra_two_procs(void *sbuf, int scount, int ompi_coll_base_alltoall_intra_two_procs(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void* rbuf, int rcount, void* rbuf, int rcount,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
@ -526,14 +504,14 @@ int ompi_coll_tuned_alltoall_intra_two_procs(void *sbuf, int scount,
ptrdiff_t sext, rext, lb; ptrdiff_t sext, rext, lb;
if (MPI_IN_PLACE == sbuf) { if (MPI_IN_PLACE == sbuf) {
return mca_coll_tuned_alltoall_intra_basic_inplace (rbuf, rcount, rdtype, return mca_coll_base_alltoall_intra_basic_inplace (rbuf, rcount, rdtype,
comm, module); comm, module);
} }
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"ompi_coll_tuned_alltoall_intra_two_procs rank %d", rank)); "ompi_coll_base_alltoall_intra_two_procs rank %d", rank));
err = ompi_datatype_get_extent (sdtype, &lb, &sext); err = ompi_datatype_get_extent (sdtype, &lb, &sext);
if (err != MPI_SUCCESS) { line = __LINE__; goto err_hndl; } if (err != MPI_SUCCESS) { line = __LINE__; goto err_hndl; }
@ -548,7 +526,7 @@ int ompi_coll_tuned_alltoall_intra_two_procs(void *sbuf, int scount,
tmprecv = (char*)rbuf + (ptrdiff_t)remote * rext * (ptrdiff_t)rcount; tmprecv = (char*)rbuf + (ptrdiff_t)remote * rext * (ptrdiff_t)rcount;
/* send and receive */ /* send and receive */
err = ompi_coll_tuned_sendrecv ( tmpsend, scount, sdtype, remote, err = ompi_coll_base_sendrecv ( tmpsend, scount, sdtype, remote,
MCA_COLL_BASE_TAG_ALLTOALL, MCA_COLL_BASE_TAG_ALLTOALL,
tmprecv, rcount, rdtype, remote, tmprecv, rcount, rdtype, remote,
MCA_COLL_BASE_TAG_ALLTOALL, MCA_COLL_BASE_TAG_ALLTOALL,
@ -566,7 +544,7 @@ int ompi_coll_tuned_alltoall_intra_two_procs(void *sbuf, int scount,
return MPI_SUCCESS; return MPI_SUCCESS;
err_hndl: err_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"%s:%4d\tError occurred %d, rank %2d", __FILE__, line, err, "%s:%4d\tError occurred %d, rank %2d", __FILE__, line, err,
rank)); rank));
return err; return err;
@ -578,7 +556,7 @@ int ompi_coll_tuned_alltoall_intra_two_procs(void *sbuf, int scount,
* Linear functions are copied from the BASIC coll module * Linear functions are copied from the BASIC coll module
* they do not segment the message and are simple implementations * they do not segment the message and are simple implementations
* but for some small number of nodes and/or small data sizes they * but for some small number of nodes and/or small data sizes they
* are just as fast as tuned/tree based segmenting operations * are just as fast as base/tree based segmenting operations
* and as such may be selected by the decision functions * and as such may be selected by the decision functions
* These are copied into this module due to the way we select modules * These are copied into this module due to the way we select modules
* in V1. i.e. in V2 we will handle this differently and so will not * in V1. i.e. in V2 we will handle this differently and so will not
@ -588,7 +566,7 @@ int ompi_coll_tuned_alltoall_intra_two_procs(void *sbuf, int scount,
/* copied function (with appropriate renaming) starts here */ /* copied function (with appropriate renaming) starts here */
int ompi_coll_tuned_alltoall_intra_basic_linear(void *sbuf, int scount, int ompi_coll_base_alltoall_intra_basic_linear(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void* rbuf, int rcount, void* rbuf, int rcount,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
@ -599,11 +577,11 @@ int ompi_coll_tuned_alltoall_intra_basic_linear(void *sbuf, int scount,
char *psnd, *prcv; char *psnd, *prcv;
MPI_Aint lb, sndinc, rcvinc; MPI_Aint lb, sndinc, rcvinc;
ompi_request_t **req, **sreq, **rreq; ompi_request_t **req, **sreq, **rreq;
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module; mca_coll_base_module_t *base_module = (mca_coll_base_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data; mca_coll_base_comm_t *data = base_module->base_data;
if (MPI_IN_PLACE == sbuf) { if (MPI_IN_PLACE == sbuf) {
return mca_coll_tuned_alltoall_intra_basic_inplace (rbuf, rcount, rdtype, return mca_coll_base_alltoall_intra_basic_inplace (rbuf, rcount, rdtype,
comm, module); comm, module);
} }
@ -612,8 +590,8 @@ int ompi_coll_tuned_alltoall_intra_basic_linear(void *sbuf, int scount,
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"ompi_coll_tuned_alltoall_intra_basic_linear rank %d", rank)); "ompi_coll_base_alltoall_intra_basic_linear rank %d", rank));
err = ompi_datatype_get_extent(sdtype, &lb, &sndinc); err = ompi_datatype_get_extent(sdtype, &lb, &sndinc);
@ -661,7 +639,7 @@ int ompi_coll_tuned_alltoall_intra_basic_linear(void *sbuf, int scount,
(prcv + (ptrdiff_t)i * rcvinc, rcount, rdtype, i, (prcv + (ptrdiff_t)i * rcvinc, rcount, rdtype, i,
MCA_COLL_BASE_TAG_ALLTOALL, comm, rreq)); MCA_COLL_BASE_TAG_ALLTOALL, comm, rreq));
if (MPI_SUCCESS != err) { if (MPI_SUCCESS != err) {
ompi_coll_tuned_free_reqs(req, rreq - req); ompi_coll_base_free_reqs(req, rreq - req);
return err; return err;
} }
} }
@ -678,7 +656,7 @@ int ompi_coll_tuned_alltoall_intra_basic_linear(void *sbuf, int scount,
MCA_COLL_BASE_TAG_ALLTOALL, MCA_COLL_BASE_TAG_ALLTOALL,
MCA_PML_BASE_SEND_STANDARD, comm, sreq)); MCA_PML_BASE_SEND_STANDARD, comm, sreq));
if (MPI_SUCCESS != err) { if (MPI_SUCCESS != err) {
ompi_coll_tuned_free_reqs(req, sreq - req); ompi_coll_base_free_reqs(req, sreq - req);
return err; return err;
} }
} }
@ -698,165 +676,10 @@ int ompi_coll_tuned_alltoall_intra_basic_linear(void *sbuf, int scount,
err = ompi_request_wait_all(nreqs, req, MPI_STATUSES_IGNORE); err = ompi_request_wait_all(nreqs, req, MPI_STATUSES_IGNORE);
/* Free the reqs */ /* Free the reqs */
ompi_coll_base_free_reqs(req, nreqs);
ompi_coll_tuned_free_reqs(req, nreqs);
/* All done */ /* All done */
return err; return err;
} }
/* copied function (with appropriate renaming) ends here */ /* copied function (with appropriate renaming) ends here */
/* The following are used by dynamic and forced rules */
/* publish details of each algorithm and if its forced/fixed/locked in */
/* as you add methods/algorithms you must update this and the query/map routines */
/* this routine is called by the component only */
/* this makes sure that the mca parameters are set to their initial values and perms */
/* module does not call this they call the forced_getvalues routine instead */
int ompi_coll_tuned_alltoall_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices)
{
mca_base_var_enum_t*new_enum;
ompi_coll_tuned_forced_max_algorithms[ALLTOALL] = coll_tuned_alltoall_algorithm_count;
(void) mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"alltoall_algorithm_count",
"Number of alltoall algorithms available",
MCA_BASE_VAR_TYPE_INT, NULL, 0,
MCA_BASE_VAR_FLAG_DEFAULT_ONLY,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_CONSTANT,
&coll_tuned_alltoall_algorithm_count);
/* MPI_T: This variable should eventually be bound to a communicator */
coll_tuned_alltoall_forced_algorithm = 0;
(void) mca_base_var_enum_create("coll_tuned_alltoall_algorithms", alltoall_algorithms, &new_enum);
mca_param_indices->algorithm_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"alltoall_algorithm",
"Which alltoall algorithm is used. Can be locked down to choice of: 0 ignore, 1 basic linear, 2 pairwise, 3: modified bruck, 4: linear with sync, 5:two proc only.",
MCA_BASE_VAR_TYPE_INT, new_enum, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_alltoall_forced_algorithm);
OBJ_RELEASE(new_enum);
if (mca_param_indices->algorithm_param_index < 0) {
return mca_param_indices->algorithm_param_index;
}
coll_tuned_alltoall_segment_size = 0;
mca_param_indices->segsize_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"alltoall_algorithm_segmentsize",
"Segment size in bytes used by default for alltoall algorithms. Only has meaning if algorithm is forced and supports segmenting. 0 bytes means no segmentation.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_alltoall_segment_size);
coll_tuned_alltoall_tree_fanout = ompi_coll_tuned_init_tree_fanout; /* get system wide default */
mca_param_indices->tree_fanout_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"alltoall_algorithm_tree_fanout",
"Fanout for n-tree used for alltoall algorithms. Only has meaning if algorithm is forced and supports n-tree topo based operation.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_alltoall_tree_fanout);
coll_tuned_alltoall_chain_fanout = ompi_coll_tuned_init_chain_fanout; /* get system wide default */
mca_param_indices->chain_fanout_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"alltoall_algorithm_chain_fanout",
"Fanout for chains used for alltoall algorithms. Only has meaning if algorithm is forced and supports chain topo based operation.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_alltoall_chain_fanout);
coll_tuned_alltoall_max_requests = 0; /* no limit for alltoall by default */
mca_param_indices->max_requests_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"alltoall_algorithm_max_requests",
"Maximum number of outstanding send or recv requests. Only has meaning for synchronized algorithms.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_alltoall_max_requests);
if (mca_param_indices->max_requests_param_index < 0) {
return mca_param_indices->max_requests_param_index;
}
if (coll_tuned_alltoall_max_requests < 0) {
if( 0 == ompi_comm_rank( MPI_COMM_WORLD ) ) {
opal_output( 0, "Maximum outstanding requests must be positive number greater than 1. Switching to system level default %d \n",
ompi_coll_tuned_init_max_requests );
}
coll_tuned_alltoall_max_requests = 0;
}
return (MPI_SUCCESS);
}
int ompi_coll_tuned_alltoall_intra_do_forced(void *sbuf, int scount,
struct ompi_datatype_t *sdtype,
void* rbuf, int rcount,
struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data;
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:alltoall_intra_do_forced selected algorithm %d",
data->user_forced[ALLTOALL].algorithm));
switch (data->user_forced[ALLTOALL].algorithm) {
case (0): return ompi_coll_tuned_alltoall_intra_dec_fixed (sbuf, scount, sdtype, rbuf, rcount, rdtype, comm, module);
case (1): return ompi_coll_tuned_alltoall_intra_basic_linear (sbuf, scount, sdtype, rbuf, rcount, rdtype, comm, module);
case (2): return ompi_coll_tuned_alltoall_intra_pairwise (sbuf, scount, sdtype, rbuf, rcount, rdtype, comm, module);
case (3): return ompi_coll_tuned_alltoall_intra_bruck (sbuf, scount, sdtype, rbuf, rcount, rdtype, comm, module);
case (4): return ompi_coll_tuned_alltoall_intra_linear_sync (sbuf, scount, sdtype, rbuf, rcount, rdtype, comm, module, data->user_forced[ALLTOALL].max_requests);
case (5): return ompi_coll_tuned_alltoall_intra_two_procs (sbuf, scount, sdtype, rbuf, rcount, rdtype, comm, module);
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:alltoall_intra_do_forced attempt to select algorithm %d when only 0-%d is valid?",
data->user_forced[ALLTOALL].algorithm, ompi_coll_tuned_forced_max_algorithms[ALLTOALL]));
return (MPI_ERR_ARG);
} /* switch */
}
int ompi_coll_tuned_alltoall_intra_do_this(void *sbuf, int scount,
struct ompi_datatype_t *sdtype,
void* rbuf, int rcount,
struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module,
int algorithm, int faninout, int segsize,
int max_requests)
{
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:alltoall_intra_do_this selected algorithm %d topo faninout %d segsize %d",
algorithm, faninout, segsize));
switch (algorithm) {
case (0): return ompi_coll_tuned_alltoall_intra_dec_fixed (sbuf, scount, sdtype, rbuf, rcount, rdtype, comm, module);
case (1): return ompi_coll_tuned_alltoall_intra_basic_linear (sbuf, scount, sdtype, rbuf, rcount, rdtype, comm, module);
case (2): return ompi_coll_tuned_alltoall_intra_pairwise (sbuf, scount, sdtype, rbuf, rcount, rdtype, comm, module);
case (3): return ompi_coll_tuned_alltoall_intra_bruck (sbuf, scount, sdtype, rbuf, rcount, rdtype, comm, module);
case (4): return ompi_coll_tuned_alltoall_intra_linear_sync (sbuf, scount, sdtype, rbuf, rcount, rdtype, comm, module, max_requests);
case (5): return ompi_coll_tuned_alltoall_intra_two_procs (sbuf, scount, sdtype, rbuf, rcount, rdtype, comm, module);
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:alltoall_intra_do_this attempt to select algorithm %d when only 0-%d is valid?",
algorithm, ompi_coll_tuned_forced_max_algorithms[ALLTOALL]));
return (MPI_ERR_ARG);
} /* switch */
}

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology * University Research and Technology
* Corporation. All rights reserved. * Corporation. All rights reserved.
* Copyright (c) 2004-2012 The University of Tennessee and The University * Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -32,29 +32,17 @@
#include "ompi/mca/coll/coll.h" #include "ompi/mca/coll/coll.h"
#include "ompi/mca/coll/base/coll_tags.h" #include "ompi/mca/coll/base/coll_tags.h"
#include "ompi/mca/pml/pml.h" #include "ompi/mca/pml/pml.h"
#include "coll_tuned.h" #include "ompi/mca/coll/base/coll_base_functions.h"
#include "coll_tuned_topo.h" #include "coll_base_topo.h"
#include "coll_tuned_util.h" #include "coll_base_util.h"
/* alltoallv algorithm variables */
static int coll_tuned_alltoallv_algorithm_count = 2;
static int coll_tuned_alltoallv_forced_algorithm = 0;
/* valid values for coll_tuned_alltoallv_forced_algorithm */
static mca_base_var_enum_value_t alltoallv_algorithms[] = {
{0, "ignore"},
{1, "basic_linear"},
{2, "pairwise"},
{0, NULL}
};
static int static int
mca_coll_tuned_alltoallv_intra_basic_inplace(void *rbuf, const int *rcounts, const int *rdisps, mca_coll_base_alltoallv_intra_basic_inplace(void *rbuf, const int *rcounts, const int *rdisps,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, struct ompi_communicator_t *comm,
mca_coll_base_module_t *module) mca_coll_base_module_t *module)
{ {
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module; mca_coll_base_module_t *base_module = (mca_coll_base_module_t*) module;
int i, j, size, rank, err=MPI_SUCCESS; int i, j, size, rank, err=MPI_SUCCESS;
MPI_Request *preq; MPI_Request *preq;
char *tmp_buffer; char *tmp_buffer;
@ -90,7 +78,7 @@ mca_coll_tuned_alltoallv_intra_basic_inplace(void *rbuf, const int *rcounts, con
for (i = 0 ; i < size ; ++i) { for (i = 0 ; i < size ; ++i) {
for (j = i+1 ; j < size ; ++j) { for (j = i+1 ; j < size ; ++j) {
/* Initiate all send/recv to/from others. */ /* Initiate all send/recv to/from others. */
preq = tuned_module->tuned_data->mcct_reqs; preq = base_module->base_data->mcct_reqs;
if (i == rank && rcounts[j]) { if (i == rank && rcounts[j]) {
/* Copy the data into the temporary buffer */ /* Copy the data into the temporary buffer */
@ -127,11 +115,8 @@ mca_coll_tuned_alltoallv_intra_basic_inplace(void *rbuf, const int *rcounts, con
} }
/* Wait for the requests to complete */ /* Wait for the requests to complete */
err = ompi_request_wait_all (2, tuned_module->tuned_data->mcct_reqs, MPI_STATUSES_IGNORE); err = ompi_request_wait_all (2, base_module->base_data->mcct_reqs, MPI_STATUSES_IGNORE);
if (MPI_SUCCESS != err) { goto error_hndl; } if (MPI_SUCCESS != err) { goto error_hndl; }
/* Free the requests. */
mca_coll_tuned_free_reqs(tuned_module->tuned_data->mcct_reqs, 2);
} }
} }
@ -145,7 +130,7 @@ mca_coll_tuned_alltoallv_intra_basic_inplace(void *rbuf, const int *rcounts, con
} }
int int
ompi_coll_tuned_alltoallv_intra_pairwise(void *sbuf, int *scounts, int *sdisps, ompi_coll_base_alltoallv_intra_pairwise(void *sbuf, int *scounts, int *sdisps,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void* rbuf, int *rcounts, int *rdisps, void* rbuf, int *rcounts, int *rdisps,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
@ -157,15 +142,15 @@ ompi_coll_tuned_alltoallv_intra_pairwise(void *sbuf, int *scounts, int *sdisps,
ptrdiff_t sext, rext; ptrdiff_t sext, rext;
if (MPI_IN_PLACE == sbuf) { if (MPI_IN_PLACE == sbuf) {
return mca_coll_tuned_alltoallv_intra_basic_inplace (rbuf, rcounts, rdisps, return mca_coll_base_alltoallv_intra_basic_inplace (rbuf, rcounts, rdisps,
rdtype, comm, module); rdtype, comm, module);
} }
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:alltoallv_intra_pairwise rank %d", rank)); "coll:base:alltoallv_intra_pairwise rank %d", rank));
ompi_datatype_type_extent(sdtype, &sext); ompi_datatype_type_extent(sdtype, &sext);
ompi_datatype_type_extent(rdtype, &rext); ompi_datatype_type_extent(rdtype, &rext);
@ -182,7 +167,7 @@ ompi_coll_tuned_alltoallv_intra_pairwise(void *sbuf, int *scounts, int *sdisps,
prcv = (char*)rbuf + (ptrdiff_t)rdisps[recvfrom] * rext; prcv = (char*)rbuf + (ptrdiff_t)rdisps[recvfrom] * rext;
/* send and receive */ /* send and receive */
err = ompi_coll_tuned_sendrecv( psnd, scounts[sendto], sdtype, sendto, err = ompi_coll_base_sendrecv( psnd, scounts[sendto], sdtype, sendto,
MCA_COLL_BASE_TAG_ALLTOALLV, MCA_COLL_BASE_TAG_ALLTOALLV,
prcv, rcounts[recvfrom], rdtype, recvfrom, prcv, rcounts[recvfrom], rdtype, recvfrom,
MCA_COLL_BASE_TAG_ALLTOALLV, MCA_COLL_BASE_TAG_ALLTOALLV,
@ -193,23 +178,22 @@ ompi_coll_tuned_alltoallv_intra_pairwise(void *sbuf, int *scounts, int *sdisps,
return MPI_SUCCESS; return MPI_SUCCESS;
err_hndl: err_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"%s:%4d\tError occurred %d, rank %2d at step %d", __FILE__, line, "%s:%4d\tError occurred %d, rank %2d at step %d", __FILE__, line,
err, rank, step)); err, rank, step));
return err; return err;
} }
/* /**
* Linear functions are copied from the basic coll module. For * Linear functions are copied from the basic coll module. For
* some small number of nodes and/or small data sizes they are just as * some small number of nodes and/or small data sizes they are just as
* fast as tuned/tree based segmenting operations and as such may be * fast as base/tree based segmenting operations and as such may be
* selected by the decision functions. These are copied into this module * selected by the decision functions. These are copied into this module
* due to the way we select modules in V1. i.e. in V2 we will handle this * due to the way we select modules in V1. i.e. in V2 we will handle this
* differently and so will not have to duplicate code. * differently and so will not have to duplicate code.
* GEF Oct05 after asking Jeff.
*/ */
int int
ompi_coll_tuned_alltoallv_intra_basic_linear(void *sbuf, int *scounts, int *sdisps, ompi_coll_base_alltoallv_intra_basic_linear(void *sbuf, int *scounts, int *sdisps,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void *rbuf, int *rcounts, int *rdisps, void *rbuf, int *rcounts, int *rdisps,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
@ -220,19 +204,19 @@ ompi_coll_tuned_alltoallv_intra_basic_linear(void *sbuf, int *scounts, int *sdis
char *psnd, *prcv; char *psnd, *prcv;
ptrdiff_t sext, rext; ptrdiff_t sext, rext;
MPI_Request *preq; MPI_Request *preq;
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module; mca_coll_base_module_t *base_module = (mca_coll_base_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data; mca_coll_base_comm_t *data = base_module->base_data;
if (MPI_IN_PLACE == sbuf) { if (MPI_IN_PLACE == sbuf) {
return mca_coll_tuned_alltoallv_intra_basic_inplace (rbuf, rcounts, rdisps, return mca_coll_base_alltoallv_intra_basic_inplace (rbuf, rcounts, rdisps,
rdtype, comm, module); rdtype, comm, module);
} }
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:alltoallv_intra_basic_linear rank %d", rank)); "coll:base:alltoallv_intra_basic_linear rank %d", rank));
ompi_datatype_type_extent(sdtype, &sext); ompi_datatype_type_extent(sdtype, &sext);
ompi_datatype_type_extent(rdtype, &rext); ompi_datatype_type_extent(rdtype, &rext);
@ -269,7 +253,7 @@ ompi_coll_tuned_alltoallv_intra_basic_linear(void *sbuf, int *scounts, int *sdis
preq++)); preq++));
++nreqs; ++nreqs;
if (MPI_SUCCESS != err) { if (MPI_SUCCESS != err) {
ompi_coll_tuned_free_reqs(data->mcct_reqs, nreqs); ompi_coll_base_free_reqs(data->mcct_reqs, nreqs);
return err; return err;
} }
} }
@ -287,7 +271,7 @@ ompi_coll_tuned_alltoallv_intra_basic_linear(void *sbuf, int *scounts, int *sdis
preq++)); preq++));
++nreqs; ++nreqs;
if (MPI_SUCCESS != err) { if (MPI_SUCCESS != err) {
ompi_coll_tuned_free_reqs(data->mcct_reqs, nreqs); ompi_coll_base_free_reqs(data->mcct_reqs, nreqs);
return err; return err;
} }
} }
@ -305,128 +289,7 @@ ompi_coll_tuned_alltoallv_intra_basic_linear(void *sbuf, int *scounts, int *sdis
MPI_STATUSES_IGNORE); MPI_STATUSES_IGNORE);
/* Free the requests. */ /* Free the requests. */
ompi_coll_tuned_free_reqs(data->mcct_reqs, nreqs); ompi_coll_base_free_reqs(data->mcct_reqs, nreqs);
return err; return err;
} }
/*
* The following are used by dynamic and forced rules. Publish
* details of each algorithm and if its forced/fixed/locked in as you add
* methods/algorithms you must update this and the query/map routines.
* This routine is called by the component only. This makes sure that
* the mca parameters are set to their initial values and perms.
* Module does not call this. They call the forced_getvalues routine
* instead.
*/
int ompi_coll_tuned_alltoallv_intra_check_forced_init(coll_tuned_force_algorithm_mca_param_indices_t
*mca_param_indices)
{
mca_base_var_enum_t *new_enum;
ompi_coll_tuned_forced_max_algorithms[ALLTOALLV] = coll_tuned_alltoallv_algorithm_count;
(void) mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"alltoallv_algorithm_count",
"Number of alltoallv algorithms available",
MCA_BASE_VAR_TYPE_INT, NULL, 0,
MCA_BASE_VAR_FLAG_DEFAULT_ONLY,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_CONSTANT,
&coll_tuned_alltoallv_algorithm_count);
/* MPI_T: This variable should eventually be bound to a communicator */
coll_tuned_alltoallv_forced_algorithm = 0;
(void) mca_base_var_enum_create("coll_tuned_alltoallv_algorithms", alltoallv_algorithms, &new_enum);
mca_param_indices->algorithm_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"alltoallv_algorithm",
"Which alltoallv algorithm is used. "
"Can be locked down to choice of: 0 ignore, "
"1 basic linear, 2 pairwise.",
MCA_BASE_VAR_TYPE_INT, new_enum, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_alltoallv_forced_algorithm);
OBJ_RELEASE(new_enum);
if (mca_param_indices->algorithm_param_index < 0) {
return mca_param_indices->algorithm_param_index;
}
return (MPI_SUCCESS);
}
int ompi_coll_tuned_alltoallv_intra_do_forced(void *sbuf, int *scounts, int *sdisps,
struct ompi_datatype_t *sdtype,
void* rbuf, int *rcounts, int *rdisps,
struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data;
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:alltoallv_intra_do_forced selected algorithm %d",
data->user_forced[ALLTOALLV].algorithm));
switch (data->user_forced[ALLTOALLV].algorithm) {
case (0):
return ompi_coll_tuned_alltoallv_intra_dec_fixed(sbuf, scounts, sdisps, sdtype,
rbuf, rcounts, rdisps, rdtype,
comm, module);
case (1):
return ompi_coll_tuned_alltoallv_intra_basic_linear(sbuf, scounts, sdisps, sdtype,
rbuf, rcounts, rdisps, rdtype,
comm, module);
case (2):
return ompi_coll_tuned_alltoallv_intra_pairwise(sbuf, scounts, sdisps, sdtype,
rbuf, rcounts, rdisps, rdtype,
comm, module);
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:alltoallv_intra_do_forced attempt to "
"select algorithm %d when only 0-%d is valid.",
data->user_forced[ALLTOALLV].algorithm,
ompi_coll_tuned_forced_max_algorithms[ALLTOALLV]));
return (MPI_ERR_ARG);
}
}
/* If the user selects dynamic rules and specifies the algorithm to
* use, then this function is called. */
int ompi_coll_tuned_alltoallv_intra_do_this(void *sbuf, int *scounts, int *sdisps,
struct ompi_datatype_t *sdtype,
void* rbuf, int *rcounts, int *rdisps,
struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module,
int algorithm)
{
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:alltoallv_intra_do_this selected algorithm %d ",
algorithm));
switch (algorithm) {
case (0):
return ompi_coll_tuned_alltoallv_intra_dec_fixed(sbuf, scounts, sdisps, sdtype,
rbuf, rcounts, rdisps, rdtype,
comm, module);
case (1):
return ompi_coll_tuned_alltoallv_intra_basic_linear(sbuf, scounts, sdisps, sdtype,
rbuf, rcounts, rdisps, rdtype,
comm, module);
case (2):
return ompi_coll_tuned_alltoallv_intra_pairwise(sbuf, scounts, sdisps, sdtype,
rbuf, rcounts, rdisps, rdtype,
comm, module);
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:alltoall_intra_do_this attempt to select "
"algorithm %d when only 0-%d is valid.",
algorithm, ompi_coll_tuned_forced_max_algorithms[ALLTOALLV]));
return (MPI_ERR_ARG);
}
}

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology * University Research and Technology
* Corporation. All rights reserved. * Corporation. All rights reserved.
* Copyright (c) 2004-2014 The University of Tennessee and The University * Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -31,25 +31,9 @@
#include "ompi/mca/coll/coll.h" #include "ompi/mca/coll/coll.h"
#include "ompi/mca/coll/base/coll_tags.h" #include "ompi/mca/coll/base/coll_tags.h"
#include "ompi/mca/pml/pml.h" #include "ompi/mca/pml/pml.h"
#include "coll_tuned.h" #include "ompi/mca/coll/base/coll_base_functions.h"
#include "coll_tuned_topo.h" #include "coll_base_topo.h"
#include "coll_tuned_util.h" #include "coll_base_util.h"
/* barrier algorithm variables */
static int coll_tuned_barrier_algorithm_count = 6;
static int coll_tuned_barrier_forced_algorithm = 0;
/* valid values for coll_tuned_barrier_forced_algorithm */
static mca_base_var_enum_value_t barrier_algorithms[] = {
{0, "ignore"},
{1, "linear"},
{2, "double_ring"},
{3, "recursive_doubling"},
{4, "bruck"},
{5, "two_proc"},
{6, "tree"},
{0, NULL}
};
/** /**
* A quick version of the MPI_Sendreceive implemented for the barrier. * A quick version of the MPI_Sendreceive implemented for the barrier.
@ -57,7 +41,7 @@ static mca_base_var_enum_value_t barrier_algorithms[] = {
* signal a two peer synchronization. * signal a two peer synchronization.
*/ */
static inline int static inline int
ompi_coll_tuned_sendrecv_zero(int dest, int stag, ompi_coll_base_sendrecv_zero(int dest, int stag,
int source, int rtag, int source, int rtag,
MPI_Comm comm) MPI_Comm comm)
@ -87,8 +71,8 @@ ompi_coll_tuned_sendrecv_zero(int dest, int stag,
err_index = 1; err_index = 1;
} }
err = statuses[err_index].MPI_ERROR; err = statuses[err_index].MPI_ERROR;
OPAL_OUTPUT ((ompi_coll_tuned_stream, "%s:%d: Error %d occurred in the %s" OPAL_OUTPUT ((ompi_coll_base_framework.framework_output, "%s:%d: Error %d occurred in the %s"
" stage of ompi_coll_tuned_sendrecv_zero\n", " stage of ompi_coll_base_sendrecv_zero\n",
__FILE__, line, err, (0 == err_index ? "receive" : "send"))); __FILE__, line, err, (0 == err_index ? "receive" : "send")));
return err; return err;
} }
@ -100,7 +84,7 @@ ompi_coll_tuned_sendrecv_zero(int dest, int stag,
/* Error discovered during the posting of the irecv or isend, /* Error discovered during the posting of the irecv or isend,
* and no status is available. * and no status is available.
*/ */
OPAL_OUTPUT ((ompi_coll_tuned_stream, "%s:%d: Error %d occurred\n", OPAL_OUTPUT ((ompi_coll_base_framework.framework_output, "%s:%d: Error %d occurred\n",
__FILE__, line, err)); __FILE__, line, err));
return err; return err;
} }
@ -124,7 +108,7 @@ ompi_coll_tuned_sendrecv_zero(int dest, int stag,
* synchronous gurantee made by last ring of sends are synchronous * synchronous gurantee made by last ring of sends are synchronous
* *
*/ */
int ompi_coll_tuned_barrier_intra_doublering(struct ompi_communicator_t *comm, int ompi_coll_base_barrier_intra_doublering(struct ompi_communicator_t *comm,
mca_coll_base_module_t *module) mca_coll_base_module_t *module)
{ {
int rank, size, err = 0, line = 0, left, right; int rank, size, err = 0, line = 0, left, right;
@ -132,7 +116,7 @@ int ompi_coll_tuned_barrier_intra_doublering(struct ompi_communicator_t *comm,
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream,"ompi_coll_tuned_barrier_intra_doublering rank %d", rank)); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"ompi_coll_base_barrier_intra_doublering rank %d", rank));
left = ((rank-1)%size); left = ((rank-1)%size);
right = ((rank+1)%size); right = ((rank+1)%size);
@ -183,7 +167,7 @@ int ompi_coll_tuned_barrier_intra_doublering(struct ompi_communicator_t *comm,
return MPI_SUCCESS; return MPI_SUCCESS;
err_hndl: err_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream,"%s:%4d\tError occurred %d, rank %2d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"%s:%4d\tError occurred %d, rank %2d",
__FILE__, line, err, rank)); __FILE__, line, err, rank));
return err; return err;
} }
@ -193,15 +177,15 @@ int ompi_coll_tuned_barrier_intra_doublering(struct ompi_communicator_t *comm,
* To make synchronous, uses sync sends and sync sendrecvs * To make synchronous, uses sync sends and sync sendrecvs
*/ */
int ompi_coll_tuned_barrier_intra_recursivedoubling(struct ompi_communicator_t *comm, int ompi_coll_base_barrier_intra_recursivedoubling(struct ompi_communicator_t *comm,
mca_coll_base_module_t *module) mca_coll_base_module_t *module)
{ {
int rank, size, adjsize, err, line, mask, remote; int rank, size, adjsize, err, line, mask, remote;
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"ompi_coll_tuned_barrier_intra_recursivedoubling rank %d", "ompi_coll_base_barrier_intra_recursivedoubling rank %d",
rank)); rank));
/* do nearest power of 2 less than size calc */ /* do nearest power of 2 less than size calc */
@ -213,7 +197,7 @@ int ompi_coll_tuned_barrier_intra_recursivedoubling(struct ompi_communicator_t *
if (rank >= adjsize) { if (rank >= adjsize) {
/* send message to lower ranked node */ /* send message to lower ranked node */
remote = rank - adjsize; remote = rank - adjsize;
err = ompi_coll_tuned_sendrecv_zero(remote, MCA_COLL_BASE_TAG_BARRIER, err = ompi_coll_base_sendrecv_zero(remote, MCA_COLL_BASE_TAG_BARRIER,
remote, MCA_COLL_BASE_TAG_BARRIER, remote, MCA_COLL_BASE_TAG_BARRIER,
comm); comm);
if (err != MPI_SUCCESS) { line = __LINE__; goto err_hndl;} if (err != MPI_SUCCESS) { line = __LINE__; goto err_hndl;}
@ -238,7 +222,7 @@ int ompi_coll_tuned_barrier_intra_recursivedoubling(struct ompi_communicator_t *
if (remote >= adjsize) continue; if (remote >= adjsize) continue;
/* post receive from the remote node */ /* post receive from the remote node */
err = ompi_coll_tuned_sendrecv_zero(remote, MCA_COLL_BASE_TAG_BARRIER, err = ompi_coll_base_sendrecv_zero(remote, MCA_COLL_BASE_TAG_BARRIER,
remote, MCA_COLL_BASE_TAG_BARRIER, remote, MCA_COLL_BASE_TAG_BARRIER,
comm); comm);
if (err != MPI_SUCCESS) { line = __LINE__; goto err_hndl;} if (err != MPI_SUCCESS) { line = __LINE__; goto err_hndl;}
@ -261,7 +245,7 @@ int ompi_coll_tuned_barrier_intra_recursivedoubling(struct ompi_communicator_t *
return MPI_SUCCESS; return MPI_SUCCESS;
err_hndl: err_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream,"%s:%4d\tError occurred %d, rank %2d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"%s:%4d\tError occurred %d, rank %2d",
__FILE__, line, err, rank)); __FILE__, line, err, rank));
return err; return err;
} }
@ -271,15 +255,15 @@ int ompi_coll_tuned_barrier_intra_recursivedoubling(struct ompi_communicator_t *
* To make synchronous, uses sync sends and sync sendrecvs * To make synchronous, uses sync sends and sync sendrecvs
*/ */
int ompi_coll_tuned_barrier_intra_bruck(struct ompi_communicator_t *comm, int ompi_coll_base_barrier_intra_bruck(struct ompi_communicator_t *comm,
mca_coll_base_module_t *module) mca_coll_base_module_t *module)
{ {
int rank, size, distance, to, from, err, line = 0; int rank, size, distance, to, from, err, line = 0;
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"ompi_coll_tuned_barrier_intra_bruck rank %d", rank)); "ompi_coll_base_barrier_intra_bruck rank %d", rank));
/* exchange data with rank-2^k and rank+2^k */ /* exchange data with rank-2^k and rank+2^k */
for (distance = 1; distance < size; distance <<= 1) { for (distance = 1; distance < size; distance <<= 1) {
@ -287,7 +271,7 @@ int ompi_coll_tuned_barrier_intra_bruck(struct ompi_communicator_t *comm,
to = (rank + distance) % size; to = (rank + distance) % size;
/* send message to lower ranked node */ /* send message to lower ranked node */
err = ompi_coll_tuned_sendrecv_zero(to, MCA_COLL_BASE_TAG_BARRIER, err = ompi_coll_base_sendrecv_zero(to, MCA_COLL_BASE_TAG_BARRIER,
from, MCA_COLL_BASE_TAG_BARRIER, from, MCA_COLL_BASE_TAG_BARRIER,
comm); comm);
if (err != MPI_SUCCESS) { line = __LINE__; goto err_hndl;} if (err != MPI_SUCCESS) { line = __LINE__; goto err_hndl;}
@ -296,7 +280,7 @@ int ompi_coll_tuned_barrier_intra_bruck(struct ompi_communicator_t *comm,
return MPI_SUCCESS; return MPI_SUCCESS;
err_hndl: err_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream,"%s:%4d\tError occurred %d, rank %2d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"%s:%4d\tError occurred %d, rank %2d",
__FILE__, line, err, rank)); __FILE__, line, err, rank));
return err; return err;
} }
@ -306,17 +290,17 @@ int ompi_coll_tuned_barrier_intra_bruck(struct ompi_communicator_t *comm,
* To make synchronous, uses sync sends and sync sendrecvs * To make synchronous, uses sync sends and sync sendrecvs
*/ */
/* special case for two processes */ /* special case for two processes */
int ompi_coll_tuned_barrier_intra_two_procs(struct ompi_communicator_t *comm, int ompi_coll_base_barrier_intra_two_procs(struct ompi_communicator_t *comm,
mca_coll_base_module_t *module) mca_coll_base_module_t *module)
{ {
int remote, err; int remote, err;
remote = ompi_comm_rank(comm); remote = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"ompi_coll_tuned_barrier_intra_two_procs rank %d", remote)); "ompi_coll_base_barrier_intra_two_procs rank %d", remote));
remote = (remote + 1) & 0x1; remote = (remote + 1) & 0x1;
err = ompi_coll_tuned_sendrecv_zero(remote, MCA_COLL_BASE_TAG_BARRIER, err = ompi_coll_base_sendrecv_zero(remote, MCA_COLL_BASE_TAG_BARRIER,
remote, MCA_COLL_BASE_TAG_BARRIER, remote, MCA_COLL_BASE_TAG_BARRIER,
comm); comm);
return (err); return (err);
@ -327,7 +311,7 @@ int ompi_coll_tuned_barrier_intra_two_procs(struct ompi_communicator_t *comm,
* Linear functions are copied from the BASIC coll module * Linear functions are copied from the BASIC coll module
* they do not segment the message and are simple implementations * they do not segment the message and are simple implementations
* but for some small number of nodes and/or small data sizes they * but for some small number of nodes and/or small data sizes they
* are just as fast as tuned/tree based segmenting operations * are just as fast as base/tree based segmenting operations
* and as such may be selected by the decision functions * and as such may be selected by the decision functions
* These are copied into this module due to the way we select modules * These are copied into this module due to the way we select modules
* in V1. i.e. in V2 we will handle this differently and so will not * in V1. i.e. in V2 we will handle this differently and so will not
@ -337,7 +321,7 @@ int ompi_coll_tuned_barrier_intra_two_procs(struct ompi_communicator_t *comm,
/* copied function (with appropriate renaming) starts here */ /* copied function (with appropriate renaming) starts here */
static int ompi_coll_tuned_barrier_intra_basic_linear(struct ompi_communicator_t *comm, static int ompi_coll_base_barrier_intra_basic_linear(struct ompi_communicator_t *comm,
mca_coll_base_module_t *module) mca_coll_base_module_t *module)
{ {
int i, err, rank, size; int i, err, rank, size;
@ -402,15 +386,15 @@ static int ompi_coll_tuned_barrier_intra_basic_linear(struct ompi_communicator_t
* Another recursive doubling type algorithm, but in this case * Another recursive doubling type algorithm, but in this case
* we go up the tree and back down the tree. * we go up the tree and back down the tree.
*/ */
int ompi_coll_tuned_barrier_intra_tree(struct ompi_communicator_t *comm, int ompi_coll_base_barrier_intra_tree(struct ompi_communicator_t *comm,
mca_coll_base_module_t *module) mca_coll_base_module_t *module)
{ {
int rank, size, depth, err, jump, partner; int rank, size, depth, err, jump, partner;
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"ompi_coll_tuned_barrier_intra_tree %d", "ompi_coll_base_barrier_intra_tree %d",
rank)); rank));
/* Find the nearest power of 2 of the communicator size. */ /* Find the nearest power of 2 of the communicator size. */
@ -457,101 +441,3 @@ int ompi_coll_tuned_barrier_intra_tree(struct ompi_communicator_t *comm,
return MPI_SUCCESS; return MPI_SUCCESS;
} }
/* The following are used by dynamic and forced rules */
/* publish details of each algorithm and if its forced/fixed/locked in */
/* as you add methods/algorithms you must update this and the query/map */
/* routines */
/* this routine is called by the component only */
/* this makes sure that the mca parameters are set to their initial values */
/* and perms */
/* module does not call this they call the forced_getvalues routine instead */
int ompi_coll_tuned_barrier_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices)
{
mca_base_var_enum_t *new_enum;
ompi_coll_tuned_forced_max_algorithms[BARRIER] = coll_tuned_barrier_algorithm_count;
(void) mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"barrier_algorithm_count",
"Number of barrier algorithms available",
MCA_BASE_VAR_TYPE_INT, NULL, 0,
MCA_BASE_VAR_FLAG_DEFAULT_ONLY,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_CONSTANT,
&coll_tuned_barrier_algorithm_count);
/* MPI_T: This variable should eventually be bound to a communicator */
coll_tuned_barrier_forced_algorithm = 0;
(void) mca_base_var_enum_create("coll_tuned_barrier_algorithms", barrier_algorithms, &new_enum);
mca_param_indices->algorithm_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"barrier_algorithm",
"Which barrier algorithm is used. Can be locked down to choice of: 0 ignore, 1 linear, 2 double ring, 3: recursive doubling 4: bruck, 5: two proc only, 6: tree",
MCA_BASE_VAR_TYPE_INT, new_enum, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_barrier_forced_algorithm);
OBJ_RELEASE(new_enum);
if (mca_param_indices->algorithm_param_index < 0) {
return mca_param_indices->algorithm_param_index;
}
return (MPI_SUCCESS);
}
int ompi_coll_tuned_barrier_intra_do_forced(struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data;
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:barrier_intra_do_forced selected algorithm %d",
data->user_forced[BARRIER].algorithm));
switch (data->user_forced[BARRIER].algorithm) {
case (0): return ompi_coll_tuned_barrier_intra_dec_fixed (comm, module);
case (1): return ompi_coll_tuned_barrier_intra_basic_linear (comm, module);
case (2): return ompi_coll_tuned_barrier_intra_doublering (comm, module);
case (3): return ompi_coll_tuned_barrier_intra_recursivedoubling (comm, module);
case (4): return ompi_coll_tuned_barrier_intra_bruck (comm, module);
case (5): return ompi_coll_tuned_barrier_intra_two_procs (comm, module);
case (6): return ompi_coll_tuned_barrier_intra_tree (comm, module);
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:barrier_intra_do_forced attempt to select algorithm %d when only 0-%d is valid?",
data->user_forced[BARRIER].algorithm,
ompi_coll_tuned_forced_max_algorithms[BARRIER]));
return (MPI_ERR_ARG);
} /* switch */
}
int ompi_coll_tuned_barrier_intra_do_this (struct ompi_communicator_t *comm,
mca_coll_base_module_t *module,
int algorithm, int faninout, int segsize)
{
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:barrier_intra_do_this selected algorithm %d topo fanin/out%d", algorithm, faninout));
switch (algorithm) {
case (0): return ompi_coll_tuned_barrier_intra_dec_fixed (comm, module);
case (1): return ompi_coll_tuned_barrier_intra_basic_linear (comm, module);
case (2): return ompi_coll_tuned_barrier_intra_doublering (comm, module);
case (3): return ompi_coll_tuned_barrier_intra_recursivedoubling (comm, module);
case (4): return ompi_coll_tuned_barrier_intra_bruck (comm, module);
case (5): return ompi_coll_tuned_barrier_intra_two_procs (comm, module);
case (6): return ompi_coll_tuned_barrier_intra_tree (comm, module);
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:barrier_intra_do_this attempt to select algorithm %d when only 0-%d is valid?",
algorithm, ompi_coll_tuned_forced_max_algorithms[BARRIER]));
return (MPI_ERR_ARG);
} /* switch */
}

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology * University Research and Technology
* Corporation. All rights reserved. * Corporation. All rights reserved.
* Copyright (c) 2004-2012 The University of Tennessee and The University * Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -27,31 +27,12 @@
#include "ompi/mca/coll/coll.h" #include "ompi/mca/coll/coll.h"
#include "ompi/mca/coll/base/coll_tags.h" #include "ompi/mca/coll/base/coll_tags.h"
#include "ompi/mca/pml/pml.h" #include "ompi/mca/pml/pml.h"
#include "coll_tuned.h" #include "ompi/mca/coll/base/coll_base_functions.h"
#include "coll_tuned_topo.h" #include "coll_base_topo.h"
#include "coll_tuned_util.h" #include "coll_base_util.h"
/* bcast algorithm variables */
static int coll_tuned_bcast_algorithm_count = 6;
static int coll_tuned_bcast_forced_algorithm = 0;
static int coll_tuned_bcast_segment_size = 0;
static int coll_tuned_bcast_tree_fanout;
static int coll_tuned_bcast_chain_fanout;
/* valid values for coll_tuned_bcast_forced_algorithm */
static mca_base_var_enum_value_t bcast_algorithms[] = {
{0, "ignore"},
{1, "basic_linear"},
{2, "chain"},
{3, "pipeline"},
{4, "split_binary_tree"},
{5, "binary_tree"},
{6, "binomial"},
{0, NULL}
};
int int
ompi_coll_tuned_bcast_intra_generic( void* buffer, ompi_coll_base_bcast_intra_generic( void* buffer,
int original_count, int original_count,
struct ompi_datatype_t* datatype, struct ompi_datatype_t* datatype,
int root, int root,
@ -67,7 +48,7 @@ ompi_coll_tuned_bcast_intra_generic( void* buffer,
char *tmpbuf; char *tmpbuf;
ptrdiff_t extent, lb; ptrdiff_t extent, lb;
ompi_request_t *recv_reqs[2] = {MPI_REQUEST_NULL, MPI_REQUEST_NULL}; ompi_request_t *recv_reqs[2] = {MPI_REQUEST_NULL, MPI_REQUEST_NULL};
#if !defined(COLL_TUNED_BCAST_USE_BLOCKING) #if !defined(COLL_BASE_BCAST_USE_BLOCKING)
ompi_request_t **send_reqs = NULL; ompi_request_t **send_reqs = NULL;
#endif #endif
@ -83,7 +64,7 @@ ompi_coll_tuned_bcast_intra_generic( void* buffer,
/* Set the buffer pointers */ /* Set the buffer pointers */
tmpbuf = (char *) buffer; tmpbuf = (char *) buffer;
#if !defined(COLL_TUNED_BCAST_USE_BLOCKING) #if !defined(COLL_BASE_BCAST_USE_BLOCKING)
if( tree->tree_nextsize != 0 ) { if( tree->tree_nextsize != 0 ) {
send_reqs = (ompi_request_t**)malloc( (ptrdiff_t)tree->tree_nextsize * send_reqs = (ompi_request_t**)malloc( (ptrdiff_t)tree->tree_nextsize *
sizeof(ompi_request_t*) ); sizeof(ompi_request_t*) );
@ -103,7 +84,7 @@ ompi_coll_tuned_bcast_intra_generic( void* buffer,
sendcount = original_count - segindex * count_by_segment; sendcount = original_count - segindex * count_by_segment;
} }
for( i = 0; i < tree->tree_nextsize; i++ ) { for( i = 0; i < tree->tree_nextsize; i++ ) {
#if defined(COLL_TUNED_BCAST_USE_BLOCKING) #if defined(COLL_BASE_BCAST_USE_BLOCKING)
err = MCA_PML_CALL(send(tmpbuf, sendcount, datatype, err = MCA_PML_CALL(send(tmpbuf, sendcount, datatype,
tree->tree_next[i], tree->tree_next[i],
MCA_COLL_BASE_TAG_BCAST, MCA_COLL_BASE_TAG_BCAST,
@ -114,16 +95,16 @@ ompi_coll_tuned_bcast_intra_generic( void* buffer,
MCA_COLL_BASE_TAG_BCAST, MCA_COLL_BASE_TAG_BCAST,
MCA_PML_BASE_SEND_STANDARD, comm, MCA_PML_BASE_SEND_STANDARD, comm,
&send_reqs[i])); &send_reqs[i]));
#endif /* COLL_TUNED_BCAST_USE_BLOCKING */ #endif /* COLL_BASE_BCAST_USE_BLOCKING */
if (err != MPI_SUCCESS) { line = __LINE__; goto error_hndl; } if (err != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }
} }
#if !defined(COLL_TUNED_BCAST_USE_BLOCKING) #if !defined(COLL_BASE_BCAST_USE_BLOCKING)
/* complete the sends before starting the next sends */ /* complete the sends before starting the next sends */
err = ompi_request_wait_all( tree->tree_nextsize, send_reqs, err = ompi_request_wait_all( tree->tree_nextsize, send_reqs,
MPI_STATUSES_IGNORE ); MPI_STATUSES_IGNORE );
if (err != MPI_SUCCESS) { line = __LINE__; goto error_hndl; } if (err != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }
#endif /* not COLL_TUNED_BCAST_USE_BLOCKING */ #endif /* not COLL_BASE_BCAST_USE_BLOCKING */
/* update tmp buffer */ /* update tmp buffer */
tmpbuf += realsegsize; tmpbuf += realsegsize;
@ -167,7 +148,7 @@ ompi_coll_tuned_bcast_intra_generic( void* buffer,
if (err != MPI_SUCCESS) { line = __LINE__; goto error_hndl; } if (err != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }
for( i = 0; i < tree->tree_nextsize; i++ ) { for( i = 0; i < tree->tree_nextsize; i++ ) {
#if defined(COLL_TUNED_BCAST_USE_BLOCKING) #if defined(COLL_BASE_BCAST_USE_BLOCKING)
err = MCA_PML_CALL(send(tmpbuf, count_by_segment, datatype, err = MCA_PML_CALL(send(tmpbuf, count_by_segment, datatype,
tree->tree_next[i], tree->tree_next[i],
MCA_COLL_BASE_TAG_BCAST, MCA_COLL_BASE_TAG_BCAST,
@ -178,16 +159,16 @@ ompi_coll_tuned_bcast_intra_generic( void* buffer,
MCA_COLL_BASE_TAG_BCAST, MCA_COLL_BASE_TAG_BCAST,
MCA_PML_BASE_SEND_STANDARD, comm, MCA_PML_BASE_SEND_STANDARD, comm,
&send_reqs[i])); &send_reqs[i]));
#endif /* COLL_TUNED_BCAST_USE_BLOCKING */ #endif /* COLL_BASE_BCAST_USE_BLOCKING */
if (err != MPI_SUCCESS) { line = __LINE__; goto error_hndl; } if (err != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }
} }
#if !defined(COLL_TUNED_BCAST_USE_BLOCKING) #if !defined(COLL_BASE_BCAST_USE_BLOCKING)
/* complete the sends before starting the next iteration */ /* complete the sends before starting the next iteration */
err = ompi_request_wait_all( tree->tree_nextsize, send_reqs, err = ompi_request_wait_all( tree->tree_nextsize, send_reqs,
MPI_STATUSES_IGNORE ); MPI_STATUSES_IGNORE );
if (err != MPI_SUCCESS) { line = __LINE__; goto error_hndl; } if (err != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }
#endif /* COLL_TUNED_BCAST_USE_BLOCKING */ #endif /* COLL_BASE_BCAST_USE_BLOCKING */
/* Update the receive buffer */ /* Update the receive buffer */
tmpbuf += realsegsize; tmpbuf += realsegsize;
@ -199,7 +180,7 @@ ompi_coll_tuned_bcast_intra_generic( void* buffer,
if (err != MPI_SUCCESS) { line = __LINE__; goto error_hndl; } if (err != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }
sendcount = original_count - (ptrdiff_t)(num_segments - 1) * count_by_segment; sendcount = original_count - (ptrdiff_t)(num_segments - 1) * count_by_segment;
for( i = 0; i < tree->tree_nextsize; i++ ) { for( i = 0; i < tree->tree_nextsize; i++ ) {
#if defined(COLL_TUNED_BCAST_USE_BLOCKING) #if defined(COLL_BASE_BCAST_USE_BLOCKING)
err = MCA_PML_CALL(send(tmpbuf, sendcount, datatype, err = MCA_PML_CALL(send(tmpbuf, sendcount, datatype,
tree->tree_next[i], tree->tree_next[i],
MCA_COLL_BASE_TAG_BCAST, MCA_COLL_BASE_TAG_BCAST,
@ -210,15 +191,15 @@ ompi_coll_tuned_bcast_intra_generic( void* buffer,
MCA_COLL_BASE_TAG_BCAST, MCA_COLL_BASE_TAG_BCAST,
MCA_PML_BASE_SEND_STANDARD, comm, MCA_PML_BASE_SEND_STANDARD, comm,
&send_reqs[i])); &send_reqs[i]));
#endif /* COLL_TUNED_BCAST_USE_BLOCKING */ #endif /* COLL_BASE_BCAST_USE_BLOCKING */
if (err != MPI_SUCCESS) { line = __LINE__; goto error_hndl; } if (err != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }
} }
#if !defined(COLL_TUNED_BCAST_USE_BLOCKING) #if !defined(COLL_BASE_BCAST_USE_BLOCKING)
err = ompi_request_wait_all( tree->tree_nextsize, send_reqs, err = ompi_request_wait_all( tree->tree_nextsize, send_reqs,
MPI_STATUSES_IGNORE ); MPI_STATUSES_IGNORE );
if (err != MPI_SUCCESS) { line = __LINE__; goto error_hndl; } if (err != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }
#endif /* COLL_TUNED_BCAST_USE_BLOCKING */ #endif /* COLL_BASE_BCAST_USE_BLOCKING */
} }
/* Leaf nodes */ /* Leaf nodes */
@ -255,23 +236,23 @@ ompi_coll_tuned_bcast_intra_generic( void* buffer,
if (err != MPI_SUCCESS) { line = __LINE__; goto error_hndl; } if (err != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }
} }
#if !defined(COLL_TUNED_BCAST_USE_BLOCKING) #if !defined(COLL_BASE_BCAST_USE_BLOCKING)
if( NULL != send_reqs ) free(send_reqs); if( NULL != send_reqs ) free(send_reqs);
#endif #endif
return (MPI_SUCCESS); return (MPI_SUCCESS);
error_hndl: error_hndl:
OPAL_OUTPUT( (ompi_coll_tuned_stream,"%s:%4d\tError occurred %d, rank %2d", OPAL_OUTPUT( (ompi_coll_base_framework.framework_output,"%s:%4d\tError occurred %d, rank %2d",
__FILE__, line, err, rank) ); __FILE__, line, err, rank) );
#if !defined(COLL_TUNED_BCAST_USE_BLOCKING) #if !defined(COLL_BASE_BCAST_USE_BLOCKING)
if( NULL != send_reqs ) free(send_reqs); if( NULL != send_reqs ) free(send_reqs);
#endif #endif
return (err); return (err);
} }
int int
ompi_coll_tuned_bcast_intra_bintree ( void* buffer, ompi_coll_base_bcast_intra_bintree ( void* buffer,
int count, int count,
struct ompi_datatype_t* datatype, struct ompi_datatype_t* datatype,
int root, int root,
@ -281,26 +262,25 @@ ompi_coll_tuned_bcast_intra_bintree ( void* buffer,
{ {
int segcount = count; int segcount = count;
size_t typelng; size_t typelng;
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module; mca_coll_base_comm_t *data = module->base_data;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data;
COLL_TUNED_UPDATE_BINTREE( comm, tuned_module, root ); COLL_BASE_UPDATE_BINTREE( comm, module, root );
/** /**
* Determine number of elements sent per operation. * Determine number of elements sent per operation.
*/ */
ompi_datatype_type_size( datatype, &typelng ); ompi_datatype_type_size( datatype, &typelng );
COLL_TUNED_COMPUTED_SEGCOUNT( segsize, typelng, segcount ); COLL_BASE_COMPUTED_SEGCOUNT( segsize, typelng, segcount );
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:bcast_intra_binary rank %d ss %5d typelng %lu segcount %d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:bcast_intra_binary rank %d ss %5d typelng %lu segcount %d",
ompi_comm_rank(comm), segsize, (unsigned long)typelng, segcount)); ompi_comm_rank(comm), segsize, (unsigned long)typelng, segcount));
return ompi_coll_tuned_bcast_intra_generic( buffer, count, datatype, root, comm, module, return ompi_coll_base_bcast_intra_generic( buffer, count, datatype, root, comm, module,
segcount, data->cached_bintree ); segcount, data->cached_bintree );
} }
int int
ompi_coll_tuned_bcast_intra_pipeline( void* buffer, ompi_coll_base_bcast_intra_pipeline( void* buffer,
int count, int count,
struct ompi_datatype_t* datatype, struct ompi_datatype_t* datatype,
int root, int root,
@ -310,26 +290,25 @@ ompi_coll_tuned_bcast_intra_pipeline( void* buffer,
{ {
int segcount = count; int segcount = count;
size_t typelng; size_t typelng;
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module; mca_coll_base_comm_t *data = module->base_data;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data;
COLL_TUNED_UPDATE_PIPELINE( comm, tuned_module, root ); COLL_BASE_UPDATE_PIPELINE( comm, module, root );
/** /**
* Determine number of elements sent per operation. * Determine number of elements sent per operation.
*/ */
ompi_datatype_type_size( datatype, &typelng ); ompi_datatype_type_size( datatype, &typelng );
COLL_TUNED_COMPUTED_SEGCOUNT( segsize, typelng, segcount ); COLL_BASE_COMPUTED_SEGCOUNT( segsize, typelng, segcount );
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:bcast_intra_pipeline rank %d ss %5d typelng %lu segcount %d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:bcast_intra_pipeline rank %d ss %5d typelng %lu segcount %d",
ompi_comm_rank(comm), segsize, (unsigned long)typelng, segcount)); ompi_comm_rank(comm), segsize, (unsigned long)typelng, segcount));
return ompi_coll_tuned_bcast_intra_generic( buffer, count, datatype, root, comm, module, return ompi_coll_base_bcast_intra_generic( buffer, count, datatype, root, comm, module,
segcount, data->cached_pipeline ); segcount, data->cached_pipeline );
} }
int int
ompi_coll_tuned_bcast_intra_chain( void* buffer, ompi_coll_base_bcast_intra_chain( void* buffer,
int count, int count,
struct ompi_datatype_t* datatype, struct ompi_datatype_t* datatype,
int root, int root,
@ -339,26 +318,25 @@ ompi_coll_tuned_bcast_intra_chain( void* buffer,
{ {
int segcount = count; int segcount = count;
size_t typelng; size_t typelng;
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module; mca_coll_base_comm_t *data = module->base_data;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data;
COLL_TUNED_UPDATE_CHAIN( comm, tuned_module, root, chains ); COLL_BASE_UPDATE_CHAIN( comm, module, root, chains );
/** /**
* Determine number of elements sent per operation. * Determine number of elements sent per operation.
*/ */
ompi_datatype_type_size( datatype, &typelng ); ompi_datatype_type_size( datatype, &typelng );
COLL_TUNED_COMPUTED_SEGCOUNT( segsize, typelng, segcount ); COLL_BASE_COMPUTED_SEGCOUNT( segsize, typelng, segcount );
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:bcast_intra_chain rank %d fo %d ss %5d typelng %lu segcount %d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:bcast_intra_chain rank %d fo %d ss %5d typelng %lu segcount %d",
ompi_comm_rank(comm), chains, segsize, (unsigned long)typelng, segcount)); ompi_comm_rank(comm), chains, segsize, (unsigned long)typelng, segcount));
return ompi_coll_tuned_bcast_intra_generic( buffer, count, datatype, root, comm, module, return ompi_coll_base_bcast_intra_generic( buffer, count, datatype, root, comm, module,
segcount, data->cached_chain ); segcount, data->cached_chain );
} }
int int
ompi_coll_tuned_bcast_intra_binomial( void* buffer, ompi_coll_base_bcast_intra_binomial( void* buffer,
int count, int count,
struct ompi_datatype_t* datatype, struct ompi_datatype_t* datatype,
int root, int root,
@ -368,26 +346,25 @@ ompi_coll_tuned_bcast_intra_binomial( void* buffer,
{ {
int segcount = count; int segcount = count;
size_t typelng; size_t typelng;
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module; mca_coll_base_comm_t *data = module->base_data;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data;
COLL_TUNED_UPDATE_BMTREE( comm, tuned_module, root ); COLL_BASE_UPDATE_BMTREE( comm, module, root );
/** /**
* Determine number of elements sent per operation. * Determine number of elements sent per operation.
*/ */
ompi_datatype_type_size( datatype, &typelng ); ompi_datatype_type_size( datatype, &typelng );
COLL_TUNED_COMPUTED_SEGCOUNT( segsize, typelng, segcount ); COLL_BASE_COMPUTED_SEGCOUNT( segsize, typelng, segcount );
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:bcast_intra_binomial rank %d ss %5d typelng %lu segcount %d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:bcast_intra_binomial rank %d ss %5d typelng %lu segcount %d",
ompi_comm_rank(comm), segsize, (unsigned long)typelng, segcount)); ompi_comm_rank(comm), segsize, (unsigned long)typelng, segcount));
return ompi_coll_tuned_bcast_intra_generic( buffer, count, datatype, root, comm, module, return ompi_coll_base_bcast_intra_generic( buffer, count, datatype, root, comm, module,
segcount, data->cached_bmtree ); segcount, data->cached_bmtree );
} }
int int
ompi_coll_tuned_bcast_intra_split_bintree ( void* buffer, ompi_coll_base_bcast_intra_split_bintree ( void* buffer,
int count, int count,
struct ompi_datatype_t* datatype, struct ompi_datatype_t* datatype,
int root, int root,
@ -405,20 +382,19 @@ ompi_coll_tuned_bcast_intra_split_bintree ( void* buffer,
ptrdiff_t type_extent, lb; ptrdiff_t type_extent, lb;
ompi_request_t *base_req, *new_req; ompi_request_t *base_req, *new_req;
ompi_coll_tree_t *tree; ompi_coll_tree_t *tree;
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module; mca_coll_base_comm_t *data = module->base_data;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data;
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream,"ompi_coll_tuned_bcast_intra_split_bintree rank %d root %d ss %5d", rank, root, segsize)); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"ompi_coll_base_bcast_intra_split_bintree rank %d root %d ss %5d", rank, root, segsize));
if (size == 1) { if (size == 1) {
return MPI_SUCCESS; return MPI_SUCCESS;
} }
/* setup the binary tree topology. */ /* setup the binary tree topology. */
COLL_TUNED_UPDATE_BINTREE( comm, tuned_module, root ); COLL_BASE_UPDATE_BINTREE( comm, module, root );
tree = data->cached_bintree; tree = data->cached_bintree;
err = ompi_datatype_type_size( datatype, &type_size ); err = ompi_datatype_type_size( datatype, &type_size );
@ -450,7 +426,7 @@ ompi_coll_tuned_bcast_intra_split_bintree ( void* buffer,
(segsize > ((ptrdiff_t)counts[0] * type_size)) || (segsize > ((ptrdiff_t)counts[0] * type_size)) ||
(segsize > ((ptrdiff_t)counts[1] * type_size)) ) { (segsize > ((ptrdiff_t)counts[1] * type_size)) ) {
/* call linear version here ! */ /* call linear version here ! */
return (ompi_coll_tuned_bcast_intra_chain ( buffer, count, datatype, return (ompi_coll_base_bcast_intra_chain ( buffer, count, datatype,
root, comm, module, root, comm, module,
segsize, 1 )); segsize, 1 ));
} }
@ -593,7 +569,7 @@ ompi_coll_tuned_bcast_intra_split_bintree ( void* buffer,
if ( (size%2) != 0 && rank != root) { if ( (size%2) != 0 && rank != root) {
err = ompi_coll_tuned_sendrecv( tmpbuf[lr], counts[lr], datatype, err = ompi_coll_base_sendrecv( tmpbuf[lr], counts[lr], datatype,
pair, MCA_COLL_BASE_TAG_BCAST, pair, MCA_COLL_BASE_TAG_BCAST,
tmpbuf[(lr+1)%2], counts[(lr+1)%2], datatype, tmpbuf[(lr+1)%2], counts[(lr+1)%2], datatype,
pair, MCA_COLL_BASE_TAG_BCAST, pair, MCA_COLL_BASE_TAG_BCAST,
@ -617,7 +593,7 @@ ompi_coll_tuned_bcast_intra_split_bintree ( void* buffer,
} }
/* everyone else exchanges buffers */ /* everyone else exchanges buffers */
else { else {
err = ompi_coll_tuned_sendrecv( tmpbuf[lr], counts[lr], datatype, err = ompi_coll_base_sendrecv( tmpbuf[lr], counts[lr], datatype,
pair, MCA_COLL_BASE_TAG_BCAST, pair, MCA_COLL_BASE_TAG_BCAST,
tmpbuf[(lr+1)%2], counts[(lr+1)%2], datatype, tmpbuf[(lr+1)%2], counts[(lr+1)%2], datatype,
pair, MCA_COLL_BASE_TAG_BCAST, pair, MCA_COLL_BASE_TAG_BCAST,
@ -628,7 +604,7 @@ ompi_coll_tuned_bcast_intra_split_bintree ( void* buffer,
return (MPI_SUCCESS); return (MPI_SUCCESS);
error_hndl: error_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream,"%s:%4d\tError occurred %d, rank %2d", __FILE__,line,err,rank)); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"%s:%4d\tError occurred %d, rank %2d", __FILE__,line,err,rank));
return (err); return (err);
} }
@ -637,7 +613,7 @@ ompi_coll_tuned_bcast_intra_split_bintree ( void* buffer,
* Linear functions are copied from the BASIC coll module * Linear functions are copied from the BASIC coll module
* they do not segment the message and are simple implementations * they do not segment the message and are simple implementations
* but for some small number of nodes and/or small data sizes they * but for some small number of nodes and/or small data sizes they
* are just as fast as tuned/tree based segmenting operations * are just as fast as base/tree based segmenting operations
* and as such may be selected by the decision functions * and as such may be selected by the decision functions
* These are copied into this module due to the way we select modules * These are copied into this module due to the way we select modules
* in V1. i.e. in V2 we will handle this differently and so will not * in V1. i.e. in V2 we will handle this differently and so will not
@ -655,21 +631,20 @@ ompi_coll_tuned_bcast_intra_split_bintree ( void* buffer,
* Returns: - MPI_SUCCESS or error code * Returns: - MPI_SUCCESS or error code
*/ */
int int
ompi_coll_tuned_bcast_intra_basic_linear (void *buff, int count, ompi_coll_base_bcast_intra_basic_linear (void *buff, int count,
struct ompi_datatype_t *datatype, int root, struct ompi_datatype_t *datatype, int root,
struct ompi_communicator_t *comm, struct ompi_communicator_t *comm,
mca_coll_base_module_t *module) mca_coll_base_module_t *module)
{ {
int i, size, rank, err; int i, size, rank, err;
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module; mca_coll_base_comm_t *data = module->base_data;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data;
ompi_request_t **preq, **reqs = data->mcct_reqs; ompi_request_t **preq, **reqs = data->mcct_reqs;
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream,"ompi_coll_tuned_bcast_intra_basic_linear rank %d root %d", rank, root)); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"ompi_coll_base_bcast_intra_basic_linear rank %d root %d", rank, root));
/* Non-root receive the data. */ /* Non-root receive the data. */
@ -710,148 +685,11 @@ ompi_coll_tuned_bcast_intra_basic_linear (void *buff, int count,
err = ompi_request_wait_all(i, reqs, MPI_STATUSES_IGNORE); err = ompi_request_wait_all(i, reqs, MPI_STATUSES_IGNORE);
/* Free the reqs */ /* Free the reqs */
ompi_coll_base_free_reqs(reqs, i);
ompi_coll_tuned_free_reqs(reqs, i);
/* All done */ /* All done */
return err; return err;
} }
/* copied function (with appropriate renaming) ends here */ /* copied function (with appropriate renaming) ends here */
/* The following are used by dynamic and forced rules */
/* publish details of each algorithm and if its forced/fixed/locked in */
/* as you add methods/algorithms you must update this and the query/map routines */
/* this routine is called by the component only */
/* this makes sure that the mca parameters are set to their initial values and perms */
/* module does not call this they call the forced_getvalues routine instead */
int ompi_coll_tuned_bcast_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices)
{
mca_base_var_enum_t *new_enum;
ompi_coll_tuned_forced_max_algorithms[BCAST] = coll_tuned_bcast_algorithm_count;
(void) mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"bcast_algorithm_count",
"Number of bcast algorithms available",
MCA_BASE_VAR_TYPE_INT, NULL, 0,
MCA_BASE_VAR_FLAG_DEFAULT_ONLY,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_CONSTANT,
&coll_tuned_bcast_algorithm_count);
/* MPI_T: This variable should eventually be bound to a communicator */
coll_tuned_bcast_forced_algorithm = 0;
(void) mca_base_var_enum_create("coll_tuned_bcast_algorithms", bcast_algorithms, &new_enum);
mca_param_indices->algorithm_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"bcast_algorithm",
"Which bcast algorithm is used. Can be locked down to choice of: 0 ignore, 1 basic linear, 2 chain, 3: pipeline, 4: split binary tree, 5: binary tree, 6: binomial tree.",
MCA_BASE_VAR_TYPE_INT, new_enum, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_bcast_forced_algorithm);
OBJ_RELEASE(new_enum);
if (mca_param_indices->algorithm_param_index < 0) {
return mca_param_indices->algorithm_param_index;
}
coll_tuned_bcast_segment_size = 0;
mca_param_indices->segsize_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"bcast_algorithm_segmentsize",
"Segment size in bytes used by default for bcast algorithms. Only has meaning if algorithm is forced and supports segmenting. 0 bytes means no segmentation.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_bcast_segment_size);
coll_tuned_bcast_tree_fanout = ompi_coll_tuned_init_tree_fanout; /* get system wide default */
mca_param_indices->tree_fanout_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"bcast_algorithm_tree_fanout",
"Fanout for n-tree used for bcast algorithms. Only has meaning if algorithm is forced and supports n-tree topo based operation.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_bcast_tree_fanout);
coll_tuned_bcast_chain_fanout = ompi_coll_tuned_init_chain_fanout; /* get system wide default */
mca_param_indices->chain_fanout_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"bcast_algorithm_chain_fanout",
"Fanout for chains used for bcast algorithms. Only has meaning if algorithm is forced and supports chain topo based operation.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_bcast_chain_fanout);
return (MPI_SUCCESS);
}
int ompi_coll_tuned_bcast_intra_do_forced(void *buf, int count,
struct ompi_datatype_t *dtype,
int root,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data;
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:bcast_intra_do_forced algorithm %d",
data->user_forced[BCAST].algorithm));
switch (data->user_forced[BCAST].algorithm) {
case (0): return ompi_coll_tuned_bcast_intra_dec_fixed( buf, count, dtype, root, comm, module );
case (1): return ompi_coll_tuned_bcast_intra_basic_linear( buf, count, dtype, root, comm, module );
case (2): return ompi_coll_tuned_bcast_intra_chain( buf, count, dtype, root, comm, module,
data->user_forced[BCAST].segsize,
data->user_forced[BCAST].chain_fanout );
case (3): return ompi_coll_tuned_bcast_intra_pipeline( buf, count, dtype, root, comm, module,
data->user_forced[BCAST].segsize );
case (4): return ompi_coll_tuned_bcast_intra_split_bintree( buf, count, dtype, root, comm, module,
data->user_forced[BCAST].segsize );
case (5): return ompi_coll_tuned_bcast_intra_bintree( buf, count, dtype, root, comm, module,
data->user_forced[BCAST].segsize );
case (6): return ompi_coll_tuned_bcast_intra_binomial( buf, count, dtype, root, comm, module,
data->user_forced[BCAST].segsize );
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:bcast_intra_do_forced attempt to select algorithm %d when only 0-%d is valid?",
data->user_forced[BCAST].algorithm, ompi_coll_tuned_forced_max_algorithms[BCAST]));
} /* switch */
return (MPI_ERR_ARG);
}
int ompi_coll_tuned_bcast_intra_do_this(void *buf, int count,
struct ompi_datatype_t *dtype,
int root,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module,
int algorithm, int faninout, int segsize)
{
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:bcast_intra_do_this algorithm %d topo faninout %d segsize %d",
algorithm, faninout, segsize));
switch (algorithm) {
case (0): return ompi_coll_tuned_bcast_intra_dec_fixed( buf, count, dtype, root, comm, module );
case (1): return ompi_coll_tuned_bcast_intra_basic_linear( buf, count, dtype, root, comm, module );
case (2): return ompi_coll_tuned_bcast_intra_chain( buf, count, dtype, root, comm, module, segsize, faninout );
case (3): return ompi_coll_tuned_bcast_intra_pipeline( buf, count, dtype, root, comm, module, segsize );
case (4): return ompi_coll_tuned_bcast_intra_split_bintree( buf, count, dtype, root, comm, module, segsize );
case (5): return ompi_coll_tuned_bcast_intra_bintree( buf, count, dtype, root, comm, module, segsize );
case (6): return ompi_coll_tuned_bcast_intra_binomial( buf, count, dtype, root, comm, module, segsize );
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:bcast_intra_do_this attempt to select algorithm %d when only 0-%d is valid?",
algorithm, ompi_coll_tuned_forced_max_algorithms[BCAST]));
} /* switch */
return (MPI_ERR_ARG);
}

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology * University Research and Technology
* Corporation. All rights reserved. * Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University * Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -33,6 +33,7 @@
#include "ompi/mca/coll/coll.h" #include "ompi/mca/coll/coll.h"
#include "ompi/mca/coll/base/base.h" #include "ompi/mca/coll/base/base.h"
#include "ompi/mca/coll/base/coll_base_functions.h"
/* /*
* The following file was created by configure. It contains extern * The following file was created by configure. It contains extern
@ -49,10 +50,55 @@ static void coll_base_module_construct(mca_coll_base_module_t *m)
/* zero out all functions */ /* zero out all functions */
memset ((char *) m + sizeof (m->super), 0, sizeof (*m) - sizeof (m->super)); memset ((char *) m + sizeof (m->super), 0, sizeof (*m) - sizeof (m->super));
m->coll_module_disable = NULL; m->coll_module_disable = NULL;
m->base_data = NULL;
}
static void
coll_base_module_destruct(mca_coll_base_module_t *module)
{
mca_coll_base_comm_t* data = module->base_data;
if (NULL != data) {
if( NULL != data->mcct_reqs ) {
for( int i = 0; i < data->mcct_num_reqs; ++i ) {
if( MPI_REQUEST_NULL != data->mcct_reqs[i] )
ompi_request_free(&data->mcct_reqs[i]);
}
free(data->mcct_reqs);
data->mcct_reqs = NULL;
data->mcct_num_reqs = 0;
}
assert(0 == data->mcct_num_reqs);
/* free any cached information that has been allocated */
if (data->cached_ntree) { /* destroy general tree if defined */
ompi_coll_base_topo_destroy_tree (&data->cached_ntree);
}
if (data->cached_bintree) { /* destroy bintree if defined */
ompi_coll_base_topo_destroy_tree (&data->cached_bintree);
}
if (data->cached_bmtree) { /* destroy bmtree if defined */
ompi_coll_base_topo_destroy_tree (&data->cached_bmtree);
}
if (data->cached_in_order_bmtree) { /* destroy bmtree if defined */
ompi_coll_base_topo_destroy_tree (&data->cached_in_order_bmtree);
}
if (data->cached_chain) { /* destroy general chain if defined */
ompi_coll_base_topo_destroy_tree (&data->cached_chain);
}
if (data->cached_pipeline) { /* destroy pipeline if defined */
ompi_coll_base_topo_destroy_tree (&data->cached_pipeline);
}
if (data->cached_in_order_bintree) { /* destroy in order bintree if defined */
ompi_coll_base_topo_destroy_tree (&data->cached_in_order_bintree);
}
free(data);
}
} }
OBJ_CLASS_INSTANCE(mca_coll_base_module_t, opal_object_t, OBJ_CLASS_INSTANCE(mca_coll_base_module_t, opal_object_t,
coll_base_module_construct, NULL); coll_base_module_construct, coll_base_module_destruct);
MCA_BASE_FRAMEWORK_DECLARE(ompi, coll, "Collectives", NULL, NULL, NULL, MCA_BASE_FRAMEWORK_DECLARE(ompi, coll, "Collectives", NULL, NULL, NULL,
mca_coll_base_static_components, 0); mca_coll_base_static_components, 0);

341
ompi/mca/coll/base/coll_base_functions.h Обычный файл
Просмотреть файл

@ -0,0 +1,341 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2008 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef MCA_COLL_BASE_EXPORT_H
#define MCA_COLL_BASE_EXPORT_H
#include "ompi_config.h"
#include "ompi/mca/coll/base/base.h"
#include "opal/mca/mca.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/request/request.h"
/* need to include our own topo prototypes so we can malloc data on the comm correctly */
#include "coll_base_topo.h"
/* some fixed value index vars to simplify certain operations */
typedef enum COLLTYPE {
ALLGATHER = 0, /* 0 */
ALLGATHERV, /* 1 */
ALLREDUCE, /* 2 */
ALLTOALL, /* 3 */
ALLTOALLV, /* 4 */
ALLTOALLW, /* 5 */
BARRIER, /* 6 */
BCAST, /* 7 */
EXSCAN, /* 8 */
GATHER, /* 9 */
GATHERV, /* 10 */
REDUCE, /* 11 */
REDUCESCATTER, /* 12 */
SCAN, /* 13 */
SCATTER, /* 14 */
SCATTERV, /* 15 */
COLLCOUNT /* 16 end counter keep it as last element */
} COLLTYPE_T;
/* defined arg lists to simply auto inclusion of user overriding decision functions */
#define ALLGATHER_ARGS void *sbuf, int scount, struct ompi_datatype_t *sdtype, void *rbuf, int rcount, struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define ALLGATHERV_ARGS void *sbuf, int scount, struct ompi_datatype_t *sdtype, void * rbuf, int *rcounts, int *disps, struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define ALLREDUCE_ARGS void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype, struct ompi_op_t *op, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define ALLTOALL_ARGS void *sbuf, int scount, struct ompi_datatype_t *sdtype, void* rbuf, int rcount, struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define ALLTOALLV_ARGS void *sbuf, int *scounts, int *sdisps, struct ompi_datatype_t *sdtype, void *rbuf, int *rcounts, int *rdisps, struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define ALLTOALLW_ARGS void *sbuf, int *scounts, int *sdisps, struct ompi_datatype_t **sdtypes, void *rbuf, int *rcounts, int *rdisps, struct ompi_datatype_t **rdtypes, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define BARRIER_ARGS struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define BCAST_ARGS void *buff, int count, struct ompi_datatype_t *datatype, int root, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define EXSCAN_ARGS void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype, struct ompi_op_t *op, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define GATHER_ARGS void *sbuf, int scount, struct ompi_datatype_t *sdtype, void *rbuf, int rcount, struct ompi_datatype_t *rdtype, int root, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define GATHERV_ARGS void *sbuf, int scount, struct ompi_datatype_t *sdtype, void *rbuf, int *rcounts, int *disps, struct ompi_datatype_t *rdtype, int root, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define REDUCE_ARGS void *sbuf, void* rbuf, int count, struct ompi_datatype_t *dtype, struct ompi_op_t *op, int root, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define REDUCESCATTER_ARGS void *sbuf, void *rbuf, int *rcounts, struct ompi_datatype_t *dtype, struct ompi_op_t *op, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define SCAN_ARGS void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype, struct ompi_op_t *op, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define SCATTER_ARGS void *sbuf, int scount, struct ompi_datatype_t *sdtype, void *rbuf, int rcount, struct ompi_datatype_t *rdtype, int root, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
#define SCATTERV_ARGS void *sbuf, int *scounts, int *disps, struct ompi_datatype_t *sdtype, void* rbuf, int rcount, struct ompi_datatype_t *rdtype, int root, struct ompi_communicator_t *comm, mca_coll_base_module_t *module
/* end defined arg lists to simply auto inclusion of user overriding decision functions */
BEGIN_C_DECLS
/* All Gather */
int ompi_coll_base_allgather_intra_bruck(ALLGATHER_ARGS);
int ompi_coll_base_allgather_intra_recursivedoubling(ALLGATHER_ARGS);
int ompi_coll_base_allgather_intra_ring(ALLGATHER_ARGS);
int ompi_coll_base_allgather_intra_neighborexchange(ALLGATHER_ARGS);
int ompi_coll_base_allgather_intra_basic_linear(ALLGATHER_ARGS);
int ompi_coll_base_allgather_intra_two_procs(ALLGATHER_ARGS);
/* All GatherV */
int ompi_coll_base_allgatherv_intra_bruck(ALLGATHERV_ARGS);
int ompi_coll_base_allgatherv_intra_ring(ALLGATHERV_ARGS);
int ompi_coll_base_allgatherv_intra_neighborexchange(ALLGATHERV_ARGS);
int ompi_coll_base_allgatherv_intra_basic_default(ALLGATHERV_ARGS);
int ompi_coll_base_allgatherv_intra_two_procs(ALLGATHERV_ARGS);
/* All Reduce */
int ompi_coll_base_allreduce_intra_nonoverlapping(ALLREDUCE_ARGS);
int ompi_coll_base_allreduce_intra_recursivedoubling(ALLREDUCE_ARGS);
int ompi_coll_base_allreduce_intra_ring(ALLREDUCE_ARGS);
int ompi_coll_base_allreduce_intra_ring_segmented(ALLREDUCE_ARGS, uint32_t segsize);
int ompi_coll_base_allreduce_intra_basic_linear(ALLREDUCE_ARGS);
/* AlltoAll */
int ompi_coll_base_alltoall_intra_pairwise(ALLTOALL_ARGS);
int ompi_coll_base_alltoall_intra_bruck(ALLTOALL_ARGS);
int ompi_coll_base_alltoall_intra_basic_linear(ALLTOALL_ARGS);
int ompi_coll_base_alltoall_intra_linear_sync(ALLTOALL_ARGS, int max_requests);
int ompi_coll_base_alltoall_intra_two_procs(ALLTOALL_ARGS);
/* AlltoAllV */
int ompi_coll_base_alltoallv_intra_pairwise(ALLTOALLV_ARGS);
int ompi_coll_base_alltoallv_intra_basic_linear(ALLTOALLV_ARGS);
/* AlltoAllW */
/* Barrier */
int ompi_coll_base_barrier_intra_doublering(BARRIER_ARGS);
int ompi_coll_base_barrier_intra_recursivedoubling(BARRIER_ARGS);
int ompi_coll_base_barrier_intra_bruck(BARRIER_ARGS);
int ompi_coll_base_barrier_intra_two_procs(BARRIER_ARGS);
int ompi_coll_base_barrier_intra_linear(BARRIER_ARGS);
int ompi_coll_base_barrier_intra_tree(BARRIER_ARGS);
/* Bcast */
int ompi_coll_base_bcast_intra_basic_linear(BCAST_ARGS);
int ompi_coll_base_bcast_intra_chain(BCAST_ARGS, uint32_t segsize, int32_t chains);
int ompi_coll_base_bcast_intra_pipeline(BCAST_ARGS, uint32_t segsize);
int ompi_coll_base_bcast_intra_binomial(BCAST_ARGS, uint32_t segsize);
int ompi_coll_base_bcast_intra_bintree(BCAST_ARGS, uint32_t segsize);
int ompi_coll_base_bcast_intra_split_bintree(BCAST_ARGS, uint32_t segsize);
/* Exscan */
/* Gather */
int ompi_coll_base_gather_intra_basic_linear(GATHER_ARGS);
int ompi_coll_base_gather_intra_binomial(GATHER_ARGS);
int ompi_coll_base_gather_intra_linear_sync(GATHER_ARGS, int first_segment_size);
/* GatherV */
/* Reduce */
int ompi_coll_base_reduce_intra_basic_linear(REDUCE_ARGS);
int ompi_coll_base_reduce_intra_chain(REDUCE_ARGS, uint32_t segsize, int fanout, int max_outstanding_reqs );
int ompi_coll_base_reduce_intra_pipeline(REDUCE_ARGS, uint32_t segsize, int max_outstanding_reqs );
int ompi_coll_base_reduce_intra_binary(REDUCE_ARGS, uint32_t segsize, int max_outstanding_reqs );
int ompi_coll_base_reduce_intra_binomial(REDUCE_ARGS, uint32_t segsize, int max_outstanding_reqs );
int ompi_coll_base_reduce_intra_in_order_binary(REDUCE_ARGS, uint32_t segsize, int max_outstanding_reqs );
/* Reduce_scatter */
int ompi_coll_base_reduce_scatter_intra_nonoverlapping(REDUCESCATTER_ARGS);
int ompi_coll_base_reduce_scatter_intra_basic_recursivehalving(REDUCESCATTER_ARGS);
int ompi_coll_base_reduce_scatter_intra_ring(REDUCESCATTER_ARGS);
/* Scan */
/* Scatter */
int ompi_coll_base_scatter_intra_basic_linear(SCATTER_ARGS);
int ompi_coll_base_scatter_intra_binomial(SCATTER_ARGS);
/* ScatterV */
END_C_DECLS
#define COLL_BASE_UPDATE_BINTREE( OMPI_COMM, BASE_MODULE, ROOT ) \
do { \
mca_coll_base_comm_t* coll_comm = (BASE_MODULE)->base_data; \
if( !( (coll_comm->cached_bintree) \
&& (coll_comm->cached_bintree_root == (ROOT)) ) ) { \
if( coll_comm->cached_bintree ) { /* destroy previous binomial if defined */ \
ompi_coll_base_topo_destroy_tree( &(coll_comm->cached_bintree) ); \
} \
coll_comm->cached_bintree = ompi_coll_base_topo_build_tree(2,(OMPI_COMM),(ROOT)); \
coll_comm->cached_bintree_root = (ROOT); \
} \
} while (0)
#define COLL_BASE_UPDATE_BMTREE( OMPI_COMM, BASE_MODULE, ROOT ) \
do { \
mca_coll_base_comm_t* coll_comm = (BASE_MODULE)->base_data; \
if( !( (coll_comm->cached_bmtree) \
&& (coll_comm->cached_bmtree_root == (ROOT)) ) ) { \
if( coll_comm->cached_bmtree ) { /* destroy previous binomial if defined */ \
ompi_coll_base_topo_destroy_tree( &(coll_comm->cached_bmtree) ); \
} \
coll_comm->cached_bmtree = ompi_coll_base_topo_build_bmtree( (OMPI_COMM), (ROOT) ); \
coll_comm->cached_bmtree_root = (ROOT); \
} \
} while (0)
#define COLL_BASE_UPDATE_IN_ORDER_BMTREE( OMPI_COMM, BASE_MODULE, ROOT ) \
do { \
mca_coll_base_comm_t* coll_comm = (BASE_MODULE)->base_data; \
if( !( (coll_comm->cached_in_order_bmtree) \
&& (coll_comm->cached_in_order_bmtree_root == (ROOT)) ) ) { \
if( coll_comm->cached_in_order_bmtree ) { /* destroy previous binomial if defined */ \
ompi_coll_base_topo_destroy_tree( &(coll_comm->cached_in_order_bmtree) ); \
} \
coll_comm->cached_in_order_bmtree = ompi_coll_base_topo_build_in_order_bmtree( (OMPI_COMM), (ROOT) ); \
coll_comm->cached_in_order_bmtree_root = (ROOT); \
} \
} while (0)
#define COLL_BASE_UPDATE_PIPELINE( OMPI_COMM, BASE_MODULE, ROOT ) \
do { \
mca_coll_base_comm_t* coll_comm = (BASE_MODULE)->base_data; \
if( !( (coll_comm->cached_pipeline) \
&& (coll_comm->cached_pipeline_root == (ROOT)) ) ) { \
if (coll_comm->cached_pipeline) { /* destroy previous pipeline if defined */ \
ompi_coll_base_topo_destroy_tree( &(coll_comm->cached_pipeline) ); \
} \
coll_comm->cached_pipeline = ompi_coll_base_topo_build_chain( 1, (OMPI_COMM), (ROOT) ); \
coll_comm->cached_pipeline_root = (ROOT); \
} \
} while (0)
#define COLL_BASE_UPDATE_CHAIN( OMPI_COMM, BASE_MODULE, ROOT, FANOUT ) \
do { \
mca_coll_base_comm_t* coll_comm = (BASE_MODULE)->base_data; \
if( !( (coll_comm->cached_chain) \
&& (coll_comm->cached_chain_root == (ROOT)) \
&& (coll_comm->cached_chain_fanout == (FANOUT)) ) ) { \
if( coll_comm->cached_chain) { /* destroy previous chain if defined */ \
ompi_coll_base_topo_destroy_tree( &(coll_comm->cached_chain) ); \
} \
coll_comm->cached_chain = ompi_coll_base_topo_build_chain((FANOUT), (OMPI_COMM), (ROOT)); \
coll_comm->cached_chain_root = (ROOT); \
coll_comm->cached_chain_fanout = (FANOUT); \
} \
} while (0)
#define COLL_BASE_UPDATE_IN_ORDER_BINTREE( OMPI_COMM, BASE_MODULE ) \
do { \
mca_coll_base_comm_t* coll_comm = (BASE_MODULE)->base_data; \
if( !(coll_comm->cached_in_order_bintree) ) { \
/* In-order binary tree topology is defined by communicator size */ \
/* Thus, there is no need to destroy anything */ \
coll_comm->cached_in_order_bintree = \
ompi_coll_base_topo_build_in_order_bintree((OMPI_COMM)); \
} \
} while (0)
/**
* This macro give a generic way to compute the best count of
* the segment (i.e. the number of complete datatypes that
* can fit in the specified SEGSIZE). Beware, when this macro
* is called, the SEGCOUNT should be initialized to the count as
* expected by the collective call.
*/
#define COLL_BASE_COMPUTED_SEGCOUNT(SEGSIZE, TYPELNG, SEGCOUNT) \
if( ((SEGSIZE) >= (TYPELNG)) && \
((SEGSIZE) < ((TYPELNG) * (SEGCOUNT))) ) { \
size_t residual; \
(SEGCOUNT) = (int)((SEGSIZE) / (TYPELNG)); \
residual = (SEGSIZE) - (SEGCOUNT) * (TYPELNG); \
if( residual > ((TYPELNG) >> 1) ) \
(SEGCOUNT)++; \
} \
/**
* This macro gives a generic wait to compute the well distributed block counts
* when the count and number of blocks are fixed.
* Macro returns "early-block" count, "late-block" count, and "split-index"
* which is the block at which we switch from "early-block" count to
* the "late-block" count.
* count = split_index * early_block_count +
* (block_count - split_index) * late_block_count
* We do not perform ANY error checks - make sure that the input values
* make sense (eg. count > num_blocks).
*/
#define COLL_BASE_COMPUTE_BLOCKCOUNT( COUNT, NUM_BLOCKS, SPLIT_INDEX, \
EARLY_BLOCK_COUNT, LATE_BLOCK_COUNT ) \
EARLY_BLOCK_COUNT = LATE_BLOCK_COUNT = COUNT / NUM_BLOCKS; \
SPLIT_INDEX = COUNT % NUM_BLOCKS; \
if (0 != SPLIT_INDEX) { \
EARLY_BLOCK_COUNT = EARLY_BLOCK_COUNT + 1; \
} \
/*
* Data structure for hanging data off the communicator
* i.e. per module instance
*/
struct mca_coll_base_comm_t {
opal_object_t super;
/* standard data for requests and PML usage */
/* Precreate space for requests
* Note this does not effect basic,
* but if in wrong context can confuse a debugger
* this is controlled by an MCA param
*/
ompi_request_t **mcct_reqs;
int mcct_num_reqs;
/*
* base topo information caching per communicator
*
* for each communicator we cache the topo information so we can
* reuse without regenerating if we change the root, [or fanout]
* then regenerate and recache this information
*/
/* general tree with n fan out */
ompi_coll_tree_t *cached_ntree;
int cached_ntree_root;
int cached_ntree_fanout;
/* binary tree */
ompi_coll_tree_t *cached_bintree;
int cached_bintree_root;
/* binomial tree */
ompi_coll_tree_t *cached_bmtree;
int cached_bmtree_root;
/* binomial tree */
ompi_coll_tree_t *cached_in_order_bmtree;
int cached_in_order_bmtree_root;
/* chained tree (fanout followed by pipelines) */
ompi_coll_tree_t *cached_chain;
int cached_chain_root;
int cached_chain_fanout;
/* pipeline */
ompi_coll_tree_t *cached_pipeline;
int cached_pipeline_root;
/* in-order binary tree (root of the in-order binary tree is rank 0) */
ompi_coll_tree_t *cached_in_order_bintree;
};
typedef struct mca_coll_base_comm_t mca_coll_base_comm_t;
OMPI_DECLSPEC OBJ_CLASS_DECLARATION(mca_coll_base_comm_t);
static inline void ompi_coll_base_free_reqs(ompi_request_t **reqs, int count)
{
int i;
for (i = 0; i < count; ++i)
ompi_request_free(&reqs[i]);
}
#endif /* MCA_COLL_BASE_EXPORT_H */

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology * University Research and Technology
* Corporation. All rights reserved. * Corporation. All rights reserved.
* Copyright (c) 2004-2014 The University of Tennessee and The University * Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -30,30 +30,14 @@
#include "ompi/mca/coll/coll.h" #include "ompi/mca/coll/coll.h"
#include "ompi/mca/coll/base/coll_tags.h" #include "ompi/mca/coll/base/coll_tags.h"
#include "ompi/mca/pml/pml.h" #include "ompi/mca/pml/pml.h"
#include "coll_tuned.h" #include "ompi/mca/coll/base/coll_base_functions.h"
#include "coll_tuned_topo.h" #include "coll_base_topo.h"
#include "coll_tuned_util.h" #include "coll_base_util.h"
/* gather algorithm variables */
static int coll_tuned_gather_algorithm_count = 3;
static int coll_tuned_gather_forced_algorithm = 0;
static int coll_tuned_gather_segment_size = 0;
static int coll_tuned_gather_tree_fanout;
static int coll_tuned_gather_chain_fanout;
/* valid values for coll_tuned_gather_forced_algorithm */
static mca_base_var_enum_value_t gather_algorithms[] = {
{0, "ignore"},
{1, "basic_linear"},
{2, "binomial"},
{3, "linear_sync"},
{0, NULL}
};
/* Todo: gather_intra_generic, gather_intra_binary, gather_intra_chain, /* Todo: gather_intra_generic, gather_intra_binary, gather_intra_chain,
* gather_intra_pipeline, segmentation? */ * gather_intra_pipeline, segmentation? */
int int
ompi_coll_tuned_gather_intra_binomial(void *sbuf, int scount, ompi_coll_base_gather_intra_binomial(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void *rbuf, int rcount, void *rbuf, int rcount,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
@ -67,17 +51,17 @@ ompi_coll_tuned_gather_intra_binomial(void *sbuf, int scount,
MPI_Status status; MPI_Status status;
MPI_Aint sextent, slb, strue_lb, strue_extent; MPI_Aint sextent, slb, strue_lb, strue_extent;
MPI_Aint rextent, rlb, rtrue_lb, rtrue_extent; MPI_Aint rextent, rlb, rtrue_lb, rtrue_extent;
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module; mca_coll_base_module_t *base_module = (mca_coll_base_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data; mca_coll_base_comm_t *data = base_module->base_data;
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"ompi_coll_tuned_gather_intra_binomial rank %d", rank)); "ompi_coll_base_gather_intra_binomial rank %d", rank));
/* create the binomial tree */ /* create the binomial tree */
COLL_TUNED_UPDATE_IN_ORDER_BMTREE( comm, tuned_module, root ); COLL_BASE_UPDATE_IN_ORDER_BMTREE( comm, base_module, root );
bmtree = data->cached_in_order_bmtree; bmtree = data->cached_in_order_bmtree;
ompi_datatype_get_extent(sdtype, &slb, &sextent); ompi_datatype_get_extent(sdtype, &slb, &sextent);
@ -157,8 +141,8 @@ ompi_coll_tuned_gather_intra_binomial(void *sbuf, int scount,
mycount = size - vkid; mycount = size - vkid;
mycount *= rcount; mycount *= rcount;
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"ompi_coll_tuned_gather_intra_binomial rank %d recv %d mycount = %d", "ompi_coll_base_gather_intra_binomial rank %d recv %d mycount = %d",
rank, bmtree->tree_next[i], mycount)); rank, bmtree->tree_next[i], mycount));
err = MCA_PML_CALL(recv(ptmp + total_recv*rextent, (ptrdiff_t)rcount * size - total_recv, rdtype, err = MCA_PML_CALL(recv(ptmp + total_recv*rextent, (ptrdiff_t)rcount * size - total_recv, rdtype,
@ -172,8 +156,8 @@ ompi_coll_tuned_gather_intra_binomial(void *sbuf, int scount,
if (rank != root) { if (rank != root) {
/* all nodes except root send to parents */ /* all nodes except root send to parents */
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"ompi_coll_tuned_gather_intra_binomial rank %d send %d count %d\n", "ompi_coll_base_gather_intra_binomial rank %d send %d count %d\n",
rank, bmtree->tree_prev, total_recv)); rank, bmtree->tree_prev, total_recv));
err = MCA_PML_CALL(send(ptmp, total_recv, sdtype, err = MCA_PML_CALL(send(ptmp, total_recv, sdtype,
@ -207,7 +191,7 @@ ompi_coll_tuned_gather_intra_binomial(void *sbuf, int scount,
if (NULL != tempbuf) if (NULL != tempbuf)
free(tempbuf); free(tempbuf);
OPAL_OUTPUT((ompi_coll_tuned_stream, "%s:%4d\tError occurred %d, rank %2d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "%s:%4d\tError occurred %d, rank %2d",
__FILE__, line, err, rank)); __FILE__, line, err, rank));
return err; return err;
} }
@ -220,7 +204,7 @@ ompi_coll_tuned_gather_intra_binomial(void *sbuf, int scount,
* Returns: - MPI_SUCCESS or error code * Returns: - MPI_SUCCESS or error code
*/ */
int int
ompi_coll_tuned_gather_intra_linear_sync(void *sbuf, int scount, ompi_coll_base_gather_intra_linear_sync(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void *rbuf, int rcount, void *rbuf, int rcount,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
@ -237,8 +221,8 @@ ompi_coll_tuned_gather_intra_linear_sync(void *sbuf, int scount,
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"ompi_coll_tuned_gather_intra_linear_sync rank %d, segment %d", rank, first_segment_size)); "ompi_coll_base_gather_intra_linear_sync rank %d, segment %d", rank, first_segment_size));
if (rank != root) { if (rank != root) {
/* Non-root processes: /* Non-root processes:
@ -250,7 +234,7 @@ ompi_coll_tuned_gather_intra_linear_sync(void *sbuf, int scount,
ompi_datatype_type_size(sdtype, &typelng); ompi_datatype_type_size(sdtype, &typelng);
ompi_datatype_get_extent(sdtype, &lb, &extent); ompi_datatype_get_extent(sdtype, &lb, &extent);
first_segment_count = scount; first_segment_count = scount;
COLL_TUNED_COMPUTED_SEGCOUNT( (size_t) first_segment_size, typelng, COLL_BASE_COMPUTED_SEGCOUNT( (size_t) first_segment_size, typelng,
first_segment_count ); first_segment_count );
ret = MCA_PML_CALL(recv(sbuf, 0, MPI_BYTE, root, ret = MCA_PML_CALL(recv(sbuf, 0, MPI_BYTE, root,
@ -288,7 +272,7 @@ ompi_coll_tuned_gather_intra_linear_sync(void *sbuf, int scount,
ompi_datatype_type_size(rdtype, &typelng); ompi_datatype_type_size(rdtype, &typelng);
ompi_datatype_get_extent(rdtype, &lb, &extent); ompi_datatype_get_extent(rdtype, &lb, &extent);
first_segment_count = rcount; first_segment_count = rcount;
COLL_TUNED_COMPUTED_SEGCOUNT( (size_t)first_segment_size, typelng, COLL_BASE_COMPUTED_SEGCOUNT( (size_t)first_segment_size, typelng,
first_segment_count ); first_segment_count );
ptmp = (char *) rbuf; ptmp = (char *) rbuf;
@ -346,7 +330,7 @@ ompi_coll_tuned_gather_intra_linear_sync(void *sbuf, int scount,
if (NULL != reqs) { if (NULL != reqs) {
free(reqs); free(reqs);
} }
OPAL_OUTPUT (( ompi_coll_tuned_stream, OPAL_OUTPUT (( ompi_coll_base_framework.framework_output,
"ERROR_HNDL: node %d file %s line %d error %d\n", "ERROR_HNDL: node %d file %s line %d error %d\n",
rank, __FILE__, line, ret )); rank, __FILE__, line, ret ));
return ret; return ret;
@ -356,12 +340,12 @@ ompi_coll_tuned_gather_intra_linear_sync(void *sbuf, int scount,
* Linear functions are copied from the BASIC coll module * Linear functions are copied from the BASIC coll module
* they do not segment the message and are simple implementations * they do not segment the message and are simple implementations
* but for some small number of nodes and/or small data sizes they * but for some small number of nodes and/or small data sizes they
* are just as fast as tuned/tree based segmenting operations * are just as fast as base/tree based segmenting operations
* and as such may be selected by the decision functions * and as such may be selected by the decision functions
* These are copied into this module due to the way we select modules * These are copied into this module due to the way we select modules
* in V1. i.e. in V2 we will handle this differently and so will not * in V1. i.e. in V2 we will handle this differently and so will not
* have to duplicate code. * have to duplicate code.
* JPG following the examples from other coll_tuned implementations. Dec06. * JPG following the examples from other coll_base implementations. Dec06.
*/ */
/* copied function (with appropriate renaming) starts here */ /* copied function (with appropriate renaming) starts here */
@ -373,7 +357,7 @@ ompi_coll_tuned_gather_intra_linear_sync(void *sbuf, int scount,
* Returns: - MPI_SUCCESS or error code * Returns: - MPI_SUCCESS or error code
*/ */
int int
ompi_coll_tuned_gather_intra_basic_linear(void *sbuf, int scount, ompi_coll_base_gather_intra_basic_linear(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void *rbuf, int rcount, void *rbuf, int rcount,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
@ -389,8 +373,8 @@ ompi_coll_tuned_gather_intra_basic_linear(void *sbuf, int scount,
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
/* Everyone but root sends data and returns. */ /* Everyone but root sends data and returns. */
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"ompi_coll_tuned_gather_intra_basic_linear rank %d", rank)); "ompi_coll_base_gather_intra_basic_linear rank %d", rank));
if (rank != root) { if (rank != root) {
return MCA_PML_CALL(send(sbuf, scount, sdtype, root, return MCA_PML_CALL(send(sbuf, scount, sdtype, root,
@ -427,164 +411,3 @@ ompi_coll_tuned_gather_intra_basic_linear(void *sbuf, int scount,
/* copied function (with appropriate renaming) ends here */ /* copied function (with appropriate renaming) ends here */
/* The following are used by dynamic and forced rules */
/* publish details of each algorithm and if its forced/fixed/locked in */
/* as you add methods/algorithms you must update this and the query/map
routines */
/* this routine is called by the component only */
/* this makes sure that the mca parameters are set to their initial values
and perms */
/* module does not call this they call the forced_getvalues routine instead */
int
ompi_coll_tuned_gather_intra_check_forced_init(coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices)
{
mca_base_var_enum_t *new_enum;
ompi_coll_tuned_forced_max_algorithms[GATHER] = coll_tuned_gather_algorithm_count;
(void) mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"gather_algorithm_count",
"Number of gather algorithms available",
MCA_BASE_VAR_TYPE_INT, NULL, 0,
MCA_BASE_VAR_FLAG_DEFAULT_ONLY,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_CONSTANT,
&coll_tuned_gather_algorithm_count);
/* MPI_T: This variable should eventually be bound to a communicator */
coll_tuned_gather_forced_algorithm = 0;
(void) mca_base_var_enum_create("coll_tuned_gather_algorithms", gather_algorithms, &new_enum);
mca_param_indices->algorithm_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"gather_algorithm",
"Which gather algorithm is used. Can be locked down to choice of: 0 ignore, 1 basic linear, 2 binomial, 3 linear with synchronization.",
MCA_BASE_VAR_TYPE_INT, new_enum, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_gather_forced_algorithm);
OBJ_RELEASE(new_enum);
if (mca_param_indices->algorithm_param_index < 0) {
return mca_param_indices->algorithm_param_index;
}
coll_tuned_gather_segment_size = 0;
mca_param_indices->segsize_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"gather_algorithm_segmentsize",
"Segment size in bytes used by default for gather algorithms. Only has meaning if algorithm is forced and supports segmenting. 0 bytes means no segmentation. Currently, available algorithms do not support segmentation.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_gather_segment_size);
coll_tuned_gather_tree_fanout = ompi_coll_tuned_init_tree_fanout; /* get system wide default */
mca_param_indices->tree_fanout_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"gather_algorithm_tree_fanout",
"Fanout for n-tree used for gather algorithms. Only has meaning if algorithm is forced and supports n-tree topo based operation. Currently, available algorithms do not support n-tree topologies.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_gather_tree_fanout);
coll_tuned_gather_chain_fanout = ompi_coll_tuned_init_chain_fanout; /* get system wide default */
mca_param_indices->chain_fanout_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"gather_algorithm_chain_fanout",
"Fanout for chains used for gather algorithms. Only has meaning if algorithm is forced and supports chain topo based operation. Currently, available algorithms do not support chain topologies.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_gather_chain_fanout);
return (MPI_SUCCESS);
}
int
ompi_coll_tuned_gather_intra_do_forced(void *sbuf, int scount,
struct ompi_datatype_t *sdtype,
void* rbuf, int rcount,
struct ompi_datatype_t *rdtype,
int root,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data;
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:gather_intra_do_forced selected algorithm %d",
data->user_forced[GATHER].algorithm));
switch (data->user_forced[GATHER].algorithm) {
case (0):
return ompi_coll_tuned_gather_intra_dec_fixed (sbuf, scount, sdtype,
rbuf, rcount, rdtype,
root, comm, module);
case (1):
return ompi_coll_tuned_gather_intra_basic_linear (sbuf, scount, sdtype,
rbuf, rcount, rdtype,
root, comm, module);
case (2):
return ompi_coll_tuned_gather_intra_binomial(sbuf, scount, sdtype,
rbuf, rcount, rdtype,
root, comm, module);
case (3):
return ompi_coll_tuned_gather_intra_linear_sync (sbuf, scount, sdtype,
rbuf, rcount, rdtype,
root, comm, module,
data->user_forced[GATHER].segsize);
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:gather_intra_do_forced attempt to select algorithm %d when only 0-%d is valid?",
data->user_forced[GATHER].algorithm,
ompi_coll_tuned_forced_max_algorithms[GATHER]));
return (MPI_ERR_ARG);
} /* switch */
}
int
ompi_coll_tuned_gather_intra_do_this(void *sbuf, int scount,
struct ompi_datatype_t *sdtype,
void* rbuf, int rcount,
struct ompi_datatype_t *rdtype,
int root,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module,
int algorithm, int faninout, int segsize)
{
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:gather_intra_do_this selected algorithm %d topo faninout %d segsize %d",
algorithm, faninout, segsize));
switch (algorithm) {
case (0):
return ompi_coll_tuned_gather_intra_dec_fixed (sbuf, scount, sdtype,
rbuf, rcount, rdtype,
root, comm, module);
case (1):
return ompi_coll_tuned_gather_intra_basic_linear (sbuf, scount, sdtype,
rbuf, rcount, rdtype,
root, comm, module);
case (2):
return ompi_coll_tuned_gather_intra_binomial(sbuf, scount, sdtype,
rbuf, rcount, rdtype,
root, comm, module);
case (3):
return ompi_coll_tuned_gather_intra_linear_sync (sbuf, scount, sdtype,
rbuf, rcount, rdtype,
root, comm, module,
segsize);
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:gather_intra_do_this attempt to select algorithm %d when only 0-%d is valid?",
algorithm,
ompi_coll_tuned_forced_max_algorithms[GATHER]));
return (MPI_ERR_ARG);
} /* switch */
}

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology * University Research and Technology
* Corporation. All rights reserved. * Corporation. All rights reserved.
* Copyright (c) 2004-2014 The University of Tennessee and The University * Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -31,28 +31,8 @@
#include "ompi/mca/coll/base/coll_tags.h" #include "ompi/mca/coll/base/coll_tags.h"
#include "ompi/mca/pml/pml.h" #include "ompi/mca/pml/pml.h"
#include "ompi/op/op.h" #include "ompi/op/op.h"
#include "coll_tuned.h" #include "ompi/mca/coll/base/coll_base_functions.h"
#include "coll_tuned_topo.h" #include "coll_base_topo.h"
/* reduce algorithm variables */
static int coll_tuned_reduce_algorithm_count = 6;
static int coll_tuned_reduce_forced_algorithm = 0;
static int coll_tuned_reduce_segment_size = 0;
static int coll_tuned_reduce_max_requests;
static int coll_tuned_reduce_tree_fanout;
static int coll_tuned_reduce_chain_fanout;
/* valid values for coll_tuned_reduce_forced_algorithm */
static mca_base_var_enum_value_t reduce_algorithms[] = {
{0, "ignore"},
{1, "linear"},
{2, "chain"},
{3, "pipeline"},
{4, "binary"},
{5, "binomial"},
{6, "in-order_binary"},
{0, NULL}
};
/** /**
* This is a generic implementation of the reduce protocol. It used the tree * This is a generic implementation of the reduce protocol. It used the tree
@ -65,7 +45,7 @@ static mca_base_var_enum_value_t reduce_algorithms[] = {
* for the first block: thus we must copy sendbuf to accumbuf on intermediate * for the first block: thus we must copy sendbuf to accumbuf on intermediate
* to keep the optimized loop happy. * to keep the optimized loop happy.
*/ */
int ompi_coll_tuned_reduce_generic( void* sendbuf, void* recvbuf, int original_count, int ompi_coll_base_reduce_generic( void* sendbuf, void* recvbuf, int original_count,
ompi_datatype_t* datatype, ompi_op_t* op, ompi_datatype_t* datatype, ompi_op_t* op,
int root, ompi_communicator_t* comm, int root, ompi_communicator_t* comm,
mca_coll_base_module_t *module, mca_coll_base_module_t *module,
@ -95,7 +75,7 @@ int ompi_coll_tuned_reduce_generic( void* sendbuf, void* recvbuf, int original_c
sendtmpbuf = (char *)recvbuf; sendtmpbuf = (char *)recvbuf;
} }
OPAL_OUTPUT((ompi_coll_tuned_stream, "coll:tuned:reduce_generic count %d, msg size %ld, segsize %ld, max_requests %d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "coll:base:reduce_generic count %d, msg size %ld, segsize %ld, max_requests %d",
original_count, (unsigned long)((ptrdiff_t)num_segments * (ptrdiff_t)segment_increment), original_count, (unsigned long)((ptrdiff_t)num_segments * (ptrdiff_t)segment_increment),
(unsigned long)segment_increment, max_outstanding_reqs)); (unsigned long)segment_increment, max_outstanding_reqs));
@ -353,7 +333,7 @@ int ompi_coll_tuned_reduce_generic( void* sendbuf, void* recvbuf, int original_c
return OMPI_SUCCESS; return OMPI_SUCCESS;
error_hndl: /* error handler */ error_hndl: /* error handler */
OPAL_OUTPUT (( ompi_coll_tuned_stream, OPAL_OUTPUT (( ompi_coll_base_framework.framework_output,
"ERROR_HNDL: node %d file %s line %d error %d\n", "ERROR_HNDL: node %d file %s line %d error %d\n",
rank, __FILE__, line, ret )); rank, __FILE__, line, ret ));
if( inbuf_free[0] != NULL ) free(inbuf_free[0]); if( inbuf_free[0] != NULL ) free(inbuf_free[0]);
@ -369,7 +349,7 @@ int ompi_coll_tuned_reduce_generic( void* sendbuf, void* recvbuf, int original_c
meaning that at least one datatype must fit in the segment ! meaning that at least one datatype must fit in the segment !
*/ */
int ompi_coll_tuned_reduce_intra_chain( void *sendbuf, void *recvbuf, int count, int ompi_coll_base_reduce_intra_chain( void *sendbuf, void *recvbuf, int count,
ompi_datatype_t* datatype, ompi_datatype_t* datatype,
ompi_op_t* op, int root, ompi_op_t* op, int root,
ompi_communicator_t* comm, ompi_communicator_t* comm,
@ -379,27 +359,27 @@ int ompi_coll_tuned_reduce_intra_chain( void *sendbuf, void *recvbuf, int count,
{ {
int segcount = count; int segcount = count;
size_t typelng; size_t typelng;
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module; mca_coll_base_module_t *base_module = (mca_coll_base_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data; mca_coll_base_comm_t *data = base_module->base_data;
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:reduce_intra_chain rank %d fo %d ss %5d", ompi_comm_rank(comm), fanout, segsize)); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:reduce_intra_chain rank %d fo %d ss %5d", ompi_comm_rank(comm), fanout, segsize));
COLL_TUNED_UPDATE_CHAIN( comm, tuned_module, root, fanout ); COLL_BASE_UPDATE_CHAIN( comm, base_module, root, fanout );
/** /**
* Determine number of segments and number of elements * Determine number of segments and number of elements
* sent per operation * sent per operation
*/ */
ompi_datatype_type_size( datatype, &typelng ); ompi_datatype_type_size( datatype, &typelng );
COLL_TUNED_COMPUTED_SEGCOUNT( segsize, typelng, segcount ); COLL_BASE_COMPUTED_SEGCOUNT( segsize, typelng, segcount );
return ompi_coll_tuned_reduce_generic( sendbuf, recvbuf, count, datatype, return ompi_coll_base_reduce_generic( sendbuf, recvbuf, count, datatype,
op, root, comm, module, op, root, comm, module,
data->cached_chain, data->cached_chain,
segcount, max_outstanding_reqs ); segcount, max_outstanding_reqs );
} }
int ompi_coll_tuned_reduce_intra_pipeline( void *sendbuf, void *recvbuf, int ompi_coll_base_reduce_intra_pipeline( void *sendbuf, void *recvbuf,
int count, ompi_datatype_t* datatype, int count, ompi_datatype_t* datatype,
ompi_op_t* op, int root, ompi_op_t* op, int root,
ompi_communicator_t* comm, ompi_communicator_t* comm,
@ -409,28 +389,28 @@ int ompi_coll_tuned_reduce_intra_pipeline( void *sendbuf, void *recvbuf,
{ {
int segcount = count; int segcount = count;
size_t typelng; size_t typelng;
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module; mca_coll_base_module_t *base_module = (mca_coll_base_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data; mca_coll_base_comm_t *data = base_module->base_data;
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:reduce_intra_pipeline rank %d ss %5d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:reduce_intra_pipeline rank %d ss %5d",
ompi_comm_rank(comm), segsize)); ompi_comm_rank(comm), segsize));
COLL_TUNED_UPDATE_PIPELINE( comm, tuned_module, root ); COLL_BASE_UPDATE_PIPELINE( comm, base_module, root );
/** /**
* Determine number of segments and number of elements * Determine number of segments and number of elements
* sent per operation * sent per operation
*/ */
ompi_datatype_type_size( datatype, &typelng ); ompi_datatype_type_size( datatype, &typelng );
COLL_TUNED_COMPUTED_SEGCOUNT( segsize, typelng, segcount ); COLL_BASE_COMPUTED_SEGCOUNT( segsize, typelng, segcount );
return ompi_coll_tuned_reduce_generic( sendbuf, recvbuf, count, datatype, return ompi_coll_base_reduce_generic( sendbuf, recvbuf, count, datatype,
op, root, comm, module, op, root, comm, module,
data->cached_pipeline, data->cached_pipeline,
segcount, max_outstanding_reqs ); segcount, max_outstanding_reqs );
} }
int ompi_coll_tuned_reduce_intra_binary( void *sendbuf, void *recvbuf, int ompi_coll_base_reduce_intra_binary( void *sendbuf, void *recvbuf,
int count, ompi_datatype_t* datatype, int count, ompi_datatype_t* datatype,
ompi_op_t* op, int root, ompi_op_t* op, int root,
ompi_communicator_t* comm, ompi_communicator_t* comm,
@ -440,28 +420,28 @@ int ompi_coll_tuned_reduce_intra_binary( void *sendbuf, void *recvbuf,
{ {
int segcount = count; int segcount = count;
size_t typelng; size_t typelng;
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module; mca_coll_base_module_t *base_module = (mca_coll_base_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data; mca_coll_base_comm_t *data = base_module->base_data;
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:reduce_intra_binary rank %d ss %5d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:reduce_intra_binary rank %d ss %5d",
ompi_comm_rank(comm), segsize)); ompi_comm_rank(comm), segsize));
COLL_TUNED_UPDATE_BINTREE( comm, tuned_module, root ); COLL_BASE_UPDATE_BINTREE( comm, base_module, root );
/** /**
* Determine number of segments and number of elements * Determine number of segments and number of elements
* sent per operation * sent per operation
*/ */
ompi_datatype_type_size( datatype, &typelng ); ompi_datatype_type_size( datatype, &typelng );
COLL_TUNED_COMPUTED_SEGCOUNT( segsize, typelng, segcount ); COLL_BASE_COMPUTED_SEGCOUNT( segsize, typelng, segcount );
return ompi_coll_tuned_reduce_generic( sendbuf, recvbuf, count, datatype, return ompi_coll_base_reduce_generic( sendbuf, recvbuf, count, datatype,
op, root, comm, module, op, root, comm, module,
data->cached_bintree, data->cached_bintree,
segcount, max_outstanding_reqs ); segcount, max_outstanding_reqs );
} }
int ompi_coll_tuned_reduce_intra_binomial( void *sendbuf, void *recvbuf, int ompi_coll_base_reduce_intra_binomial( void *sendbuf, void *recvbuf,
int count, ompi_datatype_t* datatype, int count, ompi_datatype_t* datatype,
ompi_op_t* op, int root, ompi_op_t* op, int root,
ompi_communicator_t* comm, ompi_communicator_t* comm,
@ -471,22 +451,22 @@ int ompi_coll_tuned_reduce_intra_binomial( void *sendbuf, void *recvbuf,
{ {
int segcount = count; int segcount = count;
size_t typelng; size_t typelng;
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module; mca_coll_base_module_t *base_module = (mca_coll_base_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data; mca_coll_base_comm_t *data = base_module->base_data;
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:reduce_intra_binomial rank %d ss %5d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:reduce_intra_binomial rank %d ss %5d",
ompi_comm_rank(comm), segsize)); ompi_comm_rank(comm), segsize));
COLL_TUNED_UPDATE_IN_ORDER_BMTREE( comm, tuned_module, root ); COLL_BASE_UPDATE_IN_ORDER_BMTREE( comm, base_module, root );
/** /**
* Determine number of segments and number of elements * Determine number of segments and number of elements
* sent per operation * sent per operation
*/ */
ompi_datatype_type_size( datatype, &typelng ); ompi_datatype_type_size( datatype, &typelng );
COLL_TUNED_COMPUTED_SEGCOUNT( segsize, typelng, segcount ); COLL_BASE_COMPUTED_SEGCOUNT( segsize, typelng, segcount );
return ompi_coll_tuned_reduce_generic( sendbuf, recvbuf, count, datatype, return ompi_coll_base_reduce_generic( sendbuf, recvbuf, count, datatype,
op, root, comm, module, op, root, comm, module,
data->cached_in_order_bmtree, data->cached_in_order_bmtree,
segcount, max_outstanding_reqs ); segcount, max_outstanding_reqs );
@ -499,7 +479,7 @@ int ompi_coll_tuned_reduce_intra_binomial( void *sendbuf, void *recvbuf,
* Acecpts: same as MPI_Reduce() * Acecpts: same as MPI_Reduce()
* Returns: MPI_SUCCESS or error code * Returns: MPI_SUCCESS or error code
*/ */
int ompi_coll_tuned_reduce_intra_in_order_binary( void *sendbuf, void *recvbuf, int ompi_coll_base_reduce_intra_in_order_binary( void *sendbuf, void *recvbuf,
int count, int count,
ompi_datatype_t* datatype, ompi_datatype_t* datatype,
ompi_op_t* op, int root, ompi_op_t* op, int root,
@ -511,22 +491,22 @@ int ompi_coll_tuned_reduce_intra_in_order_binary( void *sendbuf, void *recvbuf,
int ret, rank, size, io_root, segcount = count; int ret, rank, size, io_root, segcount = count;
void *use_this_sendbuf = NULL, *use_this_recvbuf = NULL; void *use_this_sendbuf = NULL, *use_this_recvbuf = NULL;
size_t typelng; size_t typelng;
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module; mca_coll_base_module_t *base_module = (mca_coll_base_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data; mca_coll_base_comm_t *data = base_module->base_data;
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:reduce_intra_in_order_binary rank %d ss %5d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:reduce_intra_in_order_binary rank %d ss %5d",
rank, segsize)); rank, segsize));
COLL_TUNED_UPDATE_IN_ORDER_BINTREE( comm, tuned_module ); COLL_BASE_UPDATE_IN_ORDER_BINTREE( comm, base_module );
/** /**
* Determine number of segments and number of elements * Determine number of segments and number of elements
* sent per operation * sent per operation
*/ */
ompi_datatype_type_size( datatype, &typelng ); ompi_datatype_type_size( datatype, &typelng );
COLL_TUNED_COMPUTED_SEGCOUNT( segsize, typelng, segcount ); COLL_BASE_COMPUTED_SEGCOUNT( segsize, typelng, segcount );
/* An in-order binary tree must use root (size-1) to preserve the order of /* An in-order binary tree must use root (size-1) to preserve the order of
operations. Thus, if root is not rank (size - 1), then we must handle operations. Thus, if root is not rank (size - 1), then we must handle
@ -564,7 +544,7 @@ int ompi_coll_tuned_reduce_intra_in_order_binary( void *sendbuf, void *recvbuf,
} }
/* Use generic reduce with in-order binary tree topology and io_root */ /* Use generic reduce with in-order binary tree topology and io_root */
ret = ompi_coll_tuned_reduce_generic( use_this_sendbuf, use_this_recvbuf, count, datatype, ret = ompi_coll_base_reduce_generic( use_this_sendbuf, use_this_recvbuf, count, datatype,
op, io_root, comm, module, op, io_root, comm, module,
data->cached_in_order_bintree, data->cached_in_order_bintree,
segcount, max_outstanding_reqs ); segcount, max_outstanding_reqs );
@ -599,7 +579,7 @@ int ompi_coll_tuned_reduce_intra_in_order_binary( void *sendbuf, void *recvbuf,
* Linear functions are copied from the BASIC coll module * Linear functions are copied from the BASIC coll module
* they do not segment the message and are simple implementations * they do not segment the message and are simple implementations
* but for some small number of nodes and/or small data sizes they * but for some small number of nodes and/or small data sizes they
* are just as fast as tuned/tree based segmenting operations * are just as fast as base/tree based segmenting operations
* and as such may be selected by the decision functions * and as such may be selected by the decision functions
* These are copied into this module due to the way we select modules * These are copied into this module due to the way we select modules
* in V1. i.e. in V2 we will handle this differently and so will not * in V1. i.e. in V2 we will handle this differently and so will not
@ -617,7 +597,7 @@ int ompi_coll_tuned_reduce_intra_in_order_binary( void *sendbuf, void *recvbuf,
* Returns: - MPI_SUCCESS or error code * Returns: - MPI_SUCCESS or error code
*/ */
int int
ompi_coll_tuned_reduce_intra_basic_linear(void *sbuf, void *rbuf, int count, ompi_coll_base_reduce_intra_basic_linear(void *sbuf, void *rbuf, int count,
struct ompi_datatype_t *dtype, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_op_t *op,
int root, int root,
@ -634,7 +614,7 @@ ompi_coll_tuned_reduce_intra_basic_linear(void *sbuf, void *rbuf, int count,
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:reduce_intra_basic_linear rank %d", rank)); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:reduce_intra_basic_linear rank %d", rank));
/* If not root, send data to the root. */ /* If not root, send data to the root. */
@ -724,185 +704,3 @@ ompi_coll_tuned_reduce_intra_basic_linear(void *sbuf, void *rbuf, int count,
} }
/* copied function (with appropriate renaming) ends here */ /* copied function (with appropriate renaming) ends here */
/**
* The following are used by dynamic and forced rules
*
* publish details of each algorithm and if its forced/fixed/locked in
* as you add methods/algorithms you must update this and the query/map routines
*
* this routine is called by the component only
* this makes sure that the mca parameters are set to their initial values and
* perms module does not call this they call the forced_getvalues routine
* instead.
*/
int ompi_coll_tuned_reduce_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices)
{
mca_base_var_enum_t*new_enum;
ompi_coll_tuned_forced_max_algorithms[REDUCE] = coll_tuned_reduce_algorithm_count;
(void) mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"reduce_algorithm_count",
"Number of reduce algorithms available",
MCA_BASE_VAR_TYPE_INT, NULL, 0,
MCA_BASE_VAR_FLAG_DEFAULT_ONLY,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_CONSTANT,
&coll_tuned_reduce_algorithm_count);
/* MPI_T: This variable should eventually be bound to a communicator */
coll_tuned_reduce_forced_algorithm = 0;
(void) mca_base_var_enum_create("coll_tuned_reduce_algorithms", reduce_algorithms, &new_enum);
mca_param_indices->algorithm_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"reduce_algorithm",
"Which reduce algorithm is used. Can be locked down to choice of: 0 ignore, 1 linear, 2 chain, 3 pipeline, 4 binary, 5 binomial, 6 in-order binary",
MCA_BASE_VAR_TYPE_INT, new_enum, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_reduce_forced_algorithm);
OBJ_RELEASE(new_enum);
if (mca_param_indices->algorithm_param_index < 0) {
return mca_param_indices->algorithm_param_index;
}
coll_tuned_reduce_segment_size = 0;
mca_param_indices->segsize_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"reduce_algorithm_segmentsize",
"Segment size in bytes used by default for reduce algorithms. Only has meaning if algorithm is forced and supports segmenting. 0 bytes means no segmentation.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_reduce_segment_size);
coll_tuned_reduce_tree_fanout = ompi_coll_tuned_init_tree_fanout; /* get system wide default */
mca_param_indices->tree_fanout_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"reduce_algorithm_tree_fanout",
"Fanout for n-tree used for reduce algorithms. Only has meaning if algorithm is forced and supports n-tree topo based operation.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_reduce_tree_fanout);
coll_tuned_reduce_chain_fanout = ompi_coll_tuned_init_chain_fanout; /* get system wide default */
mca_param_indices->chain_fanout_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"reduce_algorithm_chain_fanout",
"Fanout for chains used for reduce algorithms. Only has meaning if algorithm is forced and supports chain topo based operation.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_reduce_chain_fanout);
coll_tuned_reduce_max_requests = 0; /* no limit for reduce by default */
mca_param_indices->max_requests_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"reduce_algorithm_max_requests",
"Maximum number of outstanding send requests on leaf nodes. 0 means no limit.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_reduce_max_requests);
if (mca_param_indices->max_requests_param_index < 0) {
return mca_param_indices->max_requests_param_index;
}
if (coll_tuned_reduce_max_requests < 0) {
if( 0 == ompi_comm_rank( MPI_COMM_WORLD ) ) {
opal_output( 0, "Maximum outstanding requests must be positive number or 0. Initializing to 0 (no limit).\n" );
}
coll_tuned_reduce_max_requests = 0;
}
return (MPI_SUCCESS);
}
int ompi_coll_tuned_reduce_intra_do_forced(void *sbuf, void* rbuf, int count,
struct ompi_datatype_t *dtype,
struct ompi_op_t *op, int root,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data;
const int segsize = data->user_forced[REDUCE].segsize;
const int chain_fanout = data->user_forced[REDUCE].chain_fanout;
const int max_requests = data->user_forced[REDUCE].max_requests;
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:reduce_intra_do_forced selected algorithm %d",
data->user_forced[REDUCE].algorithm));
switch (data->user_forced[REDUCE].algorithm) {
case (0): return ompi_coll_tuned_reduce_intra_dec_fixed (sbuf, rbuf, count, dtype,
op, root, comm, module);
case (1): return ompi_coll_tuned_reduce_intra_basic_linear (sbuf, rbuf, count, dtype,
op, root, comm, module);
case (2): return ompi_coll_tuned_reduce_intra_chain (sbuf, rbuf, count, dtype,
op, root, comm, module,
segsize, chain_fanout, max_requests);
case (3): return ompi_coll_tuned_reduce_intra_pipeline (sbuf, rbuf, count, dtype,
op, root, comm, module,
segsize, max_requests);
case (4): return ompi_coll_tuned_reduce_intra_binary (sbuf, rbuf, count, dtype,
op, root, comm, module,
segsize, max_requests);
case (5): return ompi_coll_tuned_reduce_intra_binomial (sbuf, rbuf, count, dtype,
op, root, comm, module,
segsize, max_requests);
case (6): return ompi_coll_tuned_reduce_intra_in_order_binary(sbuf, rbuf, count, dtype,
op, root, comm, module,
segsize, max_requests);
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:reduce_intra_do_forced attempt to select algorithm %d when only 0-%d is valid?",
data->user_forced[REDUCE].algorithm, ompi_coll_tuned_forced_max_algorithms[REDUCE]));
return (MPI_ERR_ARG);
} /* switch */
}
int ompi_coll_tuned_reduce_intra_do_this(void *sbuf, void* rbuf, int count,
struct ompi_datatype_t *dtype,
struct ompi_op_t *op, int root,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module,
int algorithm, int faninout,
int segsize, int max_requests )
{
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:reduce_intra_do_this selected algorithm %d topo faninout %d segsize %d",
algorithm, faninout, segsize));
switch (algorithm) {
case (0): return ompi_coll_tuned_reduce_intra_dec_fixed (sbuf, rbuf, count, dtype,
op, root, comm, module);
case (1): return ompi_coll_tuned_reduce_intra_basic_linear (sbuf, rbuf, count, dtype,
op, root, comm, module);
case (2): return ompi_coll_tuned_reduce_intra_chain (sbuf, rbuf, count, dtype,
op, root, comm, module,
segsize, faninout, max_requests);
case (3): return ompi_coll_tuned_reduce_intra_pipeline (sbuf, rbuf, count, dtype,
op, root, comm, module,
segsize, max_requests);
case (4): return ompi_coll_tuned_reduce_intra_binary (sbuf, rbuf, count, dtype,
op, root, comm, module,
segsize, max_requests);
case (5): return ompi_coll_tuned_reduce_intra_binomial (sbuf, rbuf, count, dtype,
op, root, comm, module,
segsize, max_requests);
case (6): return ompi_coll_tuned_reduce_intra_in_order_binary(sbuf, rbuf, count, dtype,
op, root, comm, module,
segsize, max_requests);
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:reduce_intra_do_this attempt to select algorithm %d when only 0-%d is valid?",
algorithm, ompi_coll_tuned_forced_max_algorithms[REDUCE]));
return (MPI_ERR_ARG);
} /* switch */
}

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology * University Research and Technology
* Corporation. All rights reserved. * Corporation. All rights reserved.
* Copyright (c) 2004-2014 The University of Tennessee and The University * Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -32,32 +32,16 @@
#include "ompi/mca/coll/base/coll_tags.h" #include "ompi/mca/coll/base/coll_tags.h"
#include "ompi/mca/pml/pml.h" #include "ompi/mca/pml/pml.h"
#include "ompi/op/op.h" #include "ompi/op/op.h"
#include "coll_tuned.h" #include "ompi/mca/coll/base/coll_base_functions.h"
#include "coll_tuned_topo.h" #include "coll_base_topo.h"
/* reduce_scatter algorithm variables */
static int coll_tuned_reduce_scatter_algorithm_count = 2;
static int coll_tuned_reduce_scatter_forced_algorithm = 0;
static int coll_tuned_reduce_scatter_segment_size = 0;
static int coll_tuned_reduce_scatter_tree_fanout;
static int coll_tuned_reduce_scatter_chain_fanout;
/* valid values for coll_tuned_reduce_scatter_forced_algorithm */
static mca_base_var_enum_value_t reduce_scatter_algorithms[] = {
{0, "ignore"},
{1, "non-overlapping"},
{2, "recursive_halfing"},
{3, "ring"},
{0, NULL}
};
/******************************************************************************* /*******************************************************************************
* ompi_coll_tuned_reduce_scatter_intra_nonoverlapping * ompi_coll_base_reduce_scatter_intra_nonoverlapping
* *
* This function just calls a reduce to rank 0, followed by an * This function just calls a reduce to rank 0, followed by an
* appropriate scatterv call. * appropriate scatterv call.
*/ */
int ompi_coll_tuned_reduce_scatter_intra_nonoverlapping(void *sbuf, void *rbuf, int ompi_coll_base_reduce_scatter_intra_nonoverlapping(void *sbuf, void *rbuf,
int *rcounts, int *rcounts,
struct ompi_datatype_t *dtype, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_op_t *op,
@ -71,7 +55,7 @@ int ompi_coll_tuned_reduce_scatter_intra_nonoverlapping(void *sbuf, void *rbuf,
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:reduce_scatter_intra_nonoverlapping, rank %d", rank)); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:reduce_scatter_intra_nonoverlapping, rank %d", rank));
for (i = 0, total_count = 0; i < size; i++) { total_count += rcounts[i]; } for (i = 0, total_count = 0; i < size; i++) { total_count += rcounts[i]; }
@ -138,7 +122,7 @@ int ompi_coll_tuned_reduce_scatter_intra_nonoverlapping(void *sbuf, void *rbuf,
* Limitation: - Works only for commutative operations. * Limitation: - Works only for commutative operations.
*/ */
int int
ompi_coll_tuned_reduce_scatter_intra_basic_recursivehalving(void *sbuf, ompi_coll_base_reduce_scatter_intra_basic_recursivehalving(void *sbuf,
void *rbuf, void *rbuf,
int *rcounts, int *rcounts,
struct ompi_datatype_t *dtype, struct ompi_datatype_t *dtype,
@ -156,7 +140,7 @@ ompi_coll_tuned_reduce_scatter_intra_basic_recursivehalving(void *sbuf,
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:reduce_scatter_intra_basic_recursivehalving, rank %d", rank)); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:reduce_scatter_intra_basic_recursivehalving, rank %d", rank));
/* Find displacements and the like */ /* Find displacements and the like */
disps = (int*) malloc(sizeof(int) * size); disps = (int*) malloc(sizeof(int) * size);
@ -404,7 +388,7 @@ ompi_coll_tuned_reduce_scatter_intra_basic_recursivehalving(void *sbuf,
/* /*
* ompi_coll_tuned_reduce_scatter_intra_ring * ompi_coll_base_reduce_scatter_intra_ring
* *
* Function: Ring algorithm for reduce_scatter operation * Function: Ring algorithm for reduce_scatter operation
* Accepts: Same as MPI_Reduce_scatter() * Accepts: Same as MPI_Reduce_scatter()
@ -463,7 +447,7 @@ ompi_coll_tuned_reduce_scatter_intra_basic_recursivehalving(void *sbuf,
* *
*/ */
int int
ompi_coll_tuned_reduce_scatter_intra_ring(void *sbuf, void *rbuf, int *rcounts, ompi_coll_base_reduce_scatter_intra_ring(void *sbuf, void *rbuf, int *rcounts,
struct ompi_datatype_t *dtype, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_op_t *op,
struct ompi_communicator_t *comm, struct ompi_communicator_t *comm,
@ -480,8 +464,8 @@ ompi_coll_tuned_reduce_scatter_intra_ring(void *sbuf, void *rbuf, int *rcounts,
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:reduce_scatter_intra_ring rank %d, size %d", "coll:base:reduce_scatter_intra_ring rank %d, size %d",
rank, size)); rank, size));
/* Determine the maximum number of elements per node, /* Determine the maximum number of elements per node,
@ -626,7 +610,7 @@ ompi_coll_tuned_reduce_scatter_intra_ring(void *sbuf, void *rbuf, int *rcounts,
return MPI_SUCCESS; return MPI_SUCCESS;
error_hndl: error_hndl:
OPAL_OUTPUT((ompi_coll_tuned_stream, "%s:%4d\tRank %d Error occurred %d\n", OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "%s:%4d\tRank %d Error occurred %d\n",
__FILE__, line, rank, ret)); __FILE__, line, rank, ret));
if (NULL != displs) free(displs); if (NULL != displs) free(displs);
if (NULL != accumbuf_free) free(accumbuf_free); if (NULL != accumbuf_free) free(accumbuf_free);
@ -634,139 +618,3 @@ ompi_coll_tuned_reduce_scatter_intra_ring(void *sbuf, void *rbuf, int *rcounts,
if (NULL != inbuf_free[1]) free(inbuf_free[1]); if (NULL != inbuf_free[1]) free(inbuf_free[1]);
return ret; return ret;
} }
/**
* The following are used by dynamic and forced rules
*
* publish details of each algorithm and if its forced/fixed/locked in
* as you add methods/algorithms you must update this and the query/map routines
*
* this routine is called by the component only
* this makes sure that the mca parameters are set to their initial values and
* perms module does not call this they call the forced_getvalues routine
* instead
*/
int ompi_coll_tuned_reduce_scatter_intra_check_forced_init (coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices)
{
mca_base_var_enum_t *new_enum;
ompi_coll_tuned_forced_max_algorithms[REDUCESCATTER] = coll_tuned_reduce_scatter_algorithm_count;
(void) mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"reduce_scatter_algorithm_count",
"Number of reduce_scatter algorithms available",
MCA_BASE_VAR_TYPE_INT, NULL, 0,
MCA_BASE_VAR_FLAG_DEFAULT_ONLY,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_CONSTANT,
&coll_tuned_reduce_scatter_algorithm_count);
/* MPI_T: This variable should eventually be bound to a communicator */
coll_tuned_reduce_scatter_forced_algorithm = 0;
(void) mca_base_var_enum_create("coll_tuned_reduce_scatter_algorithms", reduce_scatter_algorithms, &new_enum);
mca_param_indices->algorithm_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"reduce_scatter_algorithm",
"Which reduce reduce_scatter algorithm is used. Can be locked down to choice of: 0 ignore, 1 non-overlapping (Reduce + Scatterv), 2 recursive halving, 3 ring",
MCA_BASE_VAR_TYPE_INT, new_enum, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_reduce_scatter_forced_algorithm);
OBJ_RELEASE(new_enum);
if (mca_param_indices->algorithm_param_index < 0) {
return mca_param_indices->algorithm_param_index;
}
coll_tuned_reduce_scatter_segment_size = 0;
mca_param_indices->segsize_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"reduce_scatter_algorithm_segmentsize",
"Segment size in bytes used by default for reduce_scatter algorithms. Only has meaning if algorithm is forced and supports segmenting. 0 bytes means no segmentation.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_reduce_scatter_segment_size);
coll_tuned_reduce_scatter_tree_fanout = ompi_coll_tuned_init_tree_fanout; /* get system wide default */
mca_param_indices->tree_fanout_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"reduce_scatter_algorithm_tree_fanout",
"Fanout for n-tree used for reduce_scatter algorithms. Only has meaning if algorithm is forced and supports n-tree topo based operation.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_reduce_scatter_tree_fanout);
coll_tuned_reduce_scatter_chain_fanout = ompi_coll_tuned_init_chain_fanout; /* get system wide default */
mca_param_indices->chain_fanout_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"reduce_scatter_algorithm_chain_fanout",
"Fanout for chains used for reduce_scatter algorithms. Only has meaning if algorithm is forced and supports chain topo based operation.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_reduce_scatter_chain_fanout);
return (MPI_SUCCESS);
}
int ompi_coll_tuned_reduce_scatter_intra_do_forced(void *sbuf, void* rbuf,
int *rcounts,
struct ompi_datatype_t *dtype,
struct ompi_op_t *op,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data;
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:reduce_scatter_intra_do_forced selected algorithm %d",
data->user_forced[REDUCESCATTER].algorithm));
switch (data->user_forced[REDUCESCATTER].algorithm) {
case (0): return ompi_coll_tuned_reduce_scatter_intra_dec_fixed (sbuf, rbuf, rcounts,
dtype, op, comm, module);
case (1): return ompi_coll_tuned_reduce_scatter_intra_nonoverlapping(sbuf, rbuf, rcounts,
dtype, op, comm, module);
case (2): return ompi_coll_tuned_reduce_scatter_intra_basic_recursivehalving(sbuf, rbuf, rcounts,
dtype, op, comm, module);
case (3): return ompi_coll_tuned_reduce_scatter_intra_ring (sbuf, rbuf, rcounts,
dtype, op, comm, module);
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:reduce_scatter_intra_do_forced attempt to select algorithm %d when only 0-%d is valid?",
data->user_forced[REDUCESCATTER].algorithm, ompi_coll_tuned_forced_max_algorithms[REDUCESCATTER]));
return (MPI_ERR_ARG);
} /* switch */
}
int ompi_coll_tuned_reduce_scatter_intra_do_this(void *sbuf, void* rbuf,
int *rcounts,
struct ompi_datatype_t *dtype,
struct ompi_op_t *op,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module,
int algorithm, int faninout, int segsize)
{
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:reduce_scatter_intra_do_this selected algorithm %d topo faninout %d segsize %d",
algorithm, faninout, segsize));
switch (algorithm) {
case (0): return ompi_coll_tuned_reduce_scatter_intra_dec_fixed (sbuf, rbuf, rcounts,
dtype, op, comm, module);
case (1): return ompi_coll_tuned_reduce_scatter_intra_nonoverlapping(sbuf, rbuf, rcounts,
dtype, op, comm, module);
case (2): return ompi_coll_tuned_reduce_scatter_intra_basic_recursivehalving(sbuf, rbuf, rcounts,
dtype, op, comm, module);
case (3): return ompi_coll_tuned_reduce_scatter_intra_ring (sbuf, rbuf, rcounts,
dtype, op, comm, module);
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:reduce_scatter_intra_do_this attempt to select algorithm %d when only 0-%d is valid?",
algorithm, ompi_coll_tuned_forced_max_algorithms[REDUCESCATTER]));
return (MPI_ERR_ARG);
} /* switch */
}

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology * University Research and Technology
* Corporation. All rights reserved. * Corporation. All rights reserved.
* Copyright (c) 2004-2014 The University of Tennessee and The University * Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -28,27 +28,12 @@
#include "ompi/mca/coll/coll.h" #include "ompi/mca/coll/coll.h"
#include "ompi/mca/coll/base/coll_tags.h" #include "ompi/mca/coll/base/coll_tags.h"
#include "ompi/mca/pml/pml.h" #include "ompi/mca/pml/pml.h"
#include "coll_tuned.h" #include "ompi/mca/coll/base/coll_base_functions.h"
#include "coll_tuned_topo.h" #include "coll_base_topo.h"
#include "coll_tuned_util.h" #include "coll_base_util.h"
/* scatter algorithm variables */
static int coll_tuned_scatter_algorithm_count = 2;
static int coll_tuned_scatter_forced_algorithm = 0;
static int coll_tuned_scatter_segment_size = 0;
static int coll_tuned_scatter_tree_fanout;
static int coll_tuned_scatter_chain_fanout;
/* valid values for coll_tuned_scatter_forced_algorithm */
static mca_base_var_enum_value_t scatter_algorithms[] = {
{0, "ignore"},
{1, "basic_linear"},
{2, "binomial"},
{0, NULL}
};
int int
ompi_coll_tuned_scatter_intra_binomial(void *sbuf, int scount, ompi_coll_base_scatter_intra_binomial(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void *rbuf, int rcount, void *rbuf, int rcount,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
@ -62,17 +47,17 @@ ompi_coll_tuned_scatter_intra_binomial(void *sbuf, int scount,
MPI_Status status; MPI_Status status;
MPI_Aint sextent, slb, strue_lb, strue_extent; MPI_Aint sextent, slb, strue_lb, strue_extent;
MPI_Aint rextent, rlb, rtrue_lb, rtrue_extent; MPI_Aint rextent, rlb, rtrue_lb, rtrue_extent;
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module; mca_coll_base_module_t *base_module = (mca_coll_base_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data; mca_coll_base_comm_t *data = base_module->base_data;
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"ompi_coll_tuned_scatter_intra_binomial rank %d", rank)); "ompi_coll_base_scatter_intra_binomial rank %d", rank));
/* create the binomial tree */ /* create the binomial tree */
COLL_TUNED_UPDATE_IN_ORDER_BMTREE( comm, tuned_module, root ); COLL_BASE_UPDATE_IN_ORDER_BMTREE( comm, base_module, root );
bmtree = data->cached_in_order_bmtree; bmtree = data->cached_in_order_bmtree;
ompi_datatype_get_extent(sdtype, &slb, &sextent); ompi_datatype_get_extent(sdtype, &slb, &sextent);
@ -182,7 +167,7 @@ ompi_coll_tuned_scatter_intra_binomial(void *sbuf, int scount,
if (NULL != tempbuf) if (NULL != tempbuf)
free(tempbuf); free(tempbuf);
OPAL_OUTPUT((ompi_coll_tuned_stream, "%s:%4d\tError occurred %d, rank %2d", OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "%s:%4d\tError occurred %d, rank %2d",
__FILE__, line, err, rank)); __FILE__, line, err, rank));
return err; return err;
} }
@ -191,12 +176,12 @@ ompi_coll_tuned_scatter_intra_binomial(void *sbuf, int scount,
* Linear functions are copied from the BASIC coll module * Linear functions are copied from the BASIC coll module
* they do not segment the message and are simple implementations * they do not segment the message and are simple implementations
* but for some small number of nodes and/or small data sizes they * but for some small number of nodes and/or small data sizes they
* are just as fast as tuned/tree based segmenting operations * are just as fast as base/tree based segmenting operations
* and as such may be selected by the decision functions * and as such may be selected by the decision functions
* These are copied into this module due to the way we select modules * These are copied into this module due to the way we select modules
* in V1. i.e. in V2 we will handle this differently and so will not * in V1. i.e. in V2 we will handle this differently and so will not
* have to duplicate code. * have to duplicate code.
* JPG following the examples from other coll_tuned implementations. Dec06. * JPG following the examples from other coll_base implementations. Dec06.
*/ */
/* copied function (with appropriate renaming) starts here */ /* copied function (with appropriate renaming) starts here */
@ -208,7 +193,7 @@ ompi_coll_tuned_scatter_intra_binomial(void *sbuf, int scount,
* Returns: - MPI_SUCCESS or error code * Returns: - MPI_SUCCESS or error code
*/ */
int int
ompi_coll_tuned_scatter_intra_basic_linear(void *sbuf, int scount, ompi_coll_base_scatter_intra_basic_linear(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, struct ompi_datatype_t *sdtype,
void *rbuf, int rcount, void *rbuf, int rcount,
struct ompi_datatype_t *rdtype, struct ompi_datatype_t *rdtype,
@ -269,153 +254,3 @@ ompi_coll_tuned_scatter_intra_basic_linear(void *sbuf, int scount,
/* copied function (with appropriate renaming) ends here */ /* copied function (with appropriate renaming) ends here */
/* The following are used by dynamic and forced rules */
/* publish details of each algorithm and if its forced/fixed/locked in */
/* as you add methods/algorithms you must update this and the query/map
routines */
/* this routine is called by the component only */
/* this makes sure that the mca parameters are set to their initial values
and perms */
/* module does not call this they call the forced_getvalues routine instead */
int
ompi_coll_tuned_scatter_intra_check_forced_init(coll_tuned_force_algorithm_mca_param_indices_t *mca_param_indices)
{
mca_base_var_enum_t *new_enum;
ompi_coll_tuned_forced_max_algorithms[SCATTER] = coll_tuned_scatter_algorithm_count;
(void) mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"scatter_algorithm_count",
"Number of scatter algorithms available",
MCA_BASE_VAR_TYPE_INT, NULL, 0,
MCA_BASE_VAR_FLAG_DEFAULT_ONLY,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_CONSTANT,
&coll_tuned_scatter_algorithm_count);
/* MPI_T: This variable should eventually be bound to a communicator */
coll_tuned_scatter_forced_algorithm = 0;
(void) mca_base_var_enum_create("coll_tuned_scatter_algorithms", scatter_algorithms, &new_enum);
mca_param_indices->algorithm_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"scatter_algorithm",
"Which scatter algorithm is used. Can be locked down to choice of: 0 ignore, 1 basic linear, 2 binomial.",
MCA_BASE_VAR_TYPE_INT, new_enum, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_scatter_forced_algorithm);
OBJ_RELEASE(new_enum);
if (mca_param_indices->algorithm_param_index < 0) {
return mca_param_indices->algorithm_param_index;
}
coll_tuned_scatter_segment_size = 0;
mca_param_indices->segsize_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"scatter_algorithm_segmentsize",
"Segment size in bytes used by default for scatter algorithms. Only has meaning if algorithm is forced and supports segmenting. 0 bytes means no segmentation. Currently, available algorithms do not support segmentation.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_scatter_segment_size);
coll_tuned_scatter_tree_fanout = ompi_coll_tuned_init_tree_fanout; /* get system wide default */
mca_param_indices->tree_fanout_param_index =
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"scatter_algorithm_tree_fanout",
"Fanout for n-tree used for scatter algorithms. Only has meaning if algorithm is forced and supports n-tree topo based operation. Currently, available algorithms do not support n-tree topologies.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_scatter_tree_fanout);
coll_tuned_scatter_chain_fanout = ompi_coll_tuned_init_chain_fanout; /* get system wide default */
mca_param_indices->chain_fanout_param_index=
mca_base_component_var_register(&mca_coll_tuned_component.super.collm_version,
"scatter_algorithm_chain_fanout",
"Fanout for chains used for scatter algorithms. Only has meaning if algorithm is forced and supports chain topo based operation. Currently, available algorithms do not support chain topologies.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_5,
MCA_BASE_VAR_SCOPE_READONLY,
&coll_tuned_scatter_chain_fanout);
return (MPI_SUCCESS);
}
int
ompi_coll_tuned_scatter_intra_do_forced(void *sbuf, int scount,
struct ompi_datatype_t *sdtype,
void* rbuf, int rcount,
struct ompi_datatype_t *rdtype,
int root,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module;
mca_coll_tuned_comm_t *data = tuned_module->tuned_data;
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:scatter_intra_do_forced selected algorithm %d",
data->user_forced[SCATTER].algorithm));
switch (data->user_forced[SCATTER].algorithm) {
case (0):
return ompi_coll_tuned_scatter_intra_dec_fixed (sbuf, scount, sdtype,
rbuf, rcount, rdtype,
root, comm, module);
case (1):
return ompi_coll_tuned_scatter_intra_basic_linear (sbuf, scount, sdtype,
rbuf, rcount, rdtype,
root, comm, module);
case (2):
return ompi_coll_tuned_scatter_intra_binomial(sbuf, scount, sdtype,
rbuf, rcount, rdtype,
root, comm, module);
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:scatter_intra_do_forced attempt to select algorithm %d when only 0-%d is valid?",
data->user_forced[SCATTER].algorithm,
ompi_coll_tuned_forced_max_algorithms[SCATTER]));
return (MPI_ERR_ARG);
} /* switch */
}
int
ompi_coll_tuned_scatter_intra_do_this(void *sbuf, int scount,
struct ompi_datatype_t *sdtype,
void* rbuf, int rcount,
struct ompi_datatype_t *rdtype,
int root,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module,
int algorithm, int faninout, int segsize)
{
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:scatter_intra_do_this selected algorithm %d topo faninout %d segsize %d",
algorithm, faninout, segsize));
switch (algorithm) {
case (0):
return ompi_coll_tuned_scatter_intra_dec_fixed (sbuf, scount, sdtype,
rbuf, rcount, rdtype,
root, comm, module);
case (1):
return ompi_coll_tuned_scatter_intra_basic_linear (sbuf, scount, sdtype,
rbuf, rcount, rdtype,
root, comm, module);
case (2):
return ompi_coll_tuned_scatter_intra_binomial(sbuf, scount, sdtype,
rbuf, rcount, rdtype,
root, comm, module);
default:
OPAL_OUTPUT((ompi_coll_tuned_stream,
"coll:tuned:scatter_intra_do_this attempt to select algorithm %d when only 0-%d is valid?",
algorithm,
ompi_coll_tuned_forced_max_algorithms[SCATTER]));
return (MPI_ERR_ARG);
} /* switch */
}

Просмотреть файл

@ -25,8 +25,8 @@
#include "ompi/constants.h" #include "ompi/constants.h"
#include "ompi/communicator/communicator.h" #include "ompi/communicator/communicator.h"
#include "ompi/mca/coll/base/coll_tags.h" #include "ompi/mca/coll/base/coll_tags.h"
#include "coll_tuned.h" #include "ompi/mca/coll/base/coll_base_functions.h"
#include "coll_tuned_topo.h" #include "coll_base_topo.h"
/* /*
* Some static helpers. * Some static helpers.
@ -75,7 +75,7 @@ static int calculate_num_nodes_up_to_level( int fanout, int level )
*/ */
ompi_coll_tree_t* ompi_coll_tree_t*
ompi_coll_tuned_topo_build_tree( int fanout, ompi_coll_base_topo_build_tree( int fanout,
struct ompi_communicator_t* comm, struct ompi_communicator_t* comm,
int root ) int root )
{ {
@ -85,14 +85,14 @@ ompi_coll_tuned_topo_build_tree( int fanout,
int slimit; /* total number of nodes on levels above me */ int slimit; /* total number of nodes on levels above me */
ompi_coll_tree_t* tree; ompi_coll_tree_t* tree;
OPAL_OUTPUT((ompi_coll_tuned_stream, "coll:tuned:topo_build_tree Building fo %d rt %d", fanout, root)); OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "coll:base:topo_build_tree Building fo %d rt %d", fanout, root));
if (fanout<1) { if (fanout<1) {
OPAL_OUTPUT((ompi_coll_tuned_stream, "coll:tuned:topo_build_tree invalid fanout %d", fanout)); OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "coll:base:topo_build_tree invalid fanout %d", fanout));
return NULL; return NULL;
} }
if (fanout>MAXTREEFANOUT) { if (fanout>MAXTREEFANOUT) {
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:topo_build_tree invalid fanout %d bigger than max %d", fanout, MAXTREEFANOUT)); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:topo_build_tree invalid fanout %d bigger than max %d", fanout, MAXTREEFANOUT));
return NULL; return NULL;
} }
@ -104,7 +104,7 @@ ompi_coll_tuned_topo_build_tree( int fanout,
tree = (ompi_coll_tree_t*)malloc(sizeof(ompi_coll_tree_t)); tree = (ompi_coll_tree_t*)malloc(sizeof(ompi_coll_tree_t));
if (!tree) { if (!tree) {
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:topo_build_tree PANIC::out of memory")); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:topo_build_tree PANIC::out of memory"));
return NULL; return NULL;
} }
@ -189,7 +189,7 @@ ompi_coll_tuned_topo_build_tree( int fanout,
* 4 0 * 4 0
*/ */
ompi_coll_tree_t* ompi_coll_tree_t*
ompi_coll_tuned_topo_build_in_order_bintree( struct ompi_communicator_t* comm ) ompi_coll_base_topo_build_in_order_bintree( struct ompi_communicator_t* comm )
{ {
int rank, size, myrank, rightsize, delta, parent, lchild, rchild; int rank, size, myrank, rightsize, delta, parent, lchild, rchild;
ompi_coll_tree_t* tree; ompi_coll_tree_t* tree;
@ -202,8 +202,8 @@ ompi_coll_tuned_topo_build_in_order_bintree( struct ompi_communicator_t* comm )
tree = (ompi_coll_tree_t*)malloc(sizeof(ompi_coll_tree_t)); tree = (ompi_coll_tree_t*)malloc(sizeof(ompi_coll_tree_t));
if (!tree) { if (!tree) {
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:topo_build_tree PANIC::out of memory")); "coll:base:topo_build_tree PANIC::out of memory"));
return NULL; return NULL;
} }
@ -220,8 +220,8 @@ ompi_coll_tuned_topo_build_in_order_bintree( struct ompi_communicator_t* comm )
tree->tree_nextsize = 0; tree->tree_nextsize = 0;
tree->tree_next[0] = -1; tree->tree_next[0] = -1;
tree->tree_next[1] = -1; tree->tree_next[1] = -1;
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:topo_build_in_order_tree Building fo %d rt %d", "coll:base:topo_build_in_order_tree Building fo %d rt %d",
tree->tree_fanout, tree->tree_root)); tree->tree_fanout, tree->tree_root));
/* /*
@ -294,7 +294,7 @@ ompi_coll_tuned_topo_build_in_order_bintree( struct ompi_communicator_t* comm )
return tree; return tree;
} }
int ompi_coll_tuned_topo_destroy_tree( ompi_coll_tree_t** tree ) int ompi_coll_base_topo_destroy_tree( ompi_coll_tree_t** tree )
{ {
ompi_coll_tree_t *ptr; ompi_coll_tree_t *ptr;
@ -323,13 +323,13 @@ int ompi_coll_tuned_topo_destroy_tree( ompi_coll_tree_t** tree )
* 7 * 7
*/ */
ompi_coll_tree_t* ompi_coll_tree_t*
ompi_coll_tuned_topo_build_bmtree( struct ompi_communicator_t* comm, ompi_coll_base_topo_build_bmtree( struct ompi_communicator_t* comm,
int root ) int root )
{ {
int childs = 0, rank, size, mask = 1, index, remote, i; int childs = 0, rank, size, mask = 1, index, remote, i;
ompi_coll_tree_t *bmtree; ompi_coll_tree_t *bmtree;
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:topo:build_bmtree rt %d", root)); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:topo:build_bmtree rt %d", root));
/* /*
* Get size and rank of the process in this communicator * Get size and rank of the process in this communicator
@ -341,7 +341,7 @@ ompi_coll_tuned_topo_build_bmtree( struct ompi_communicator_t* comm,
bmtree = (ompi_coll_tree_t*)malloc(sizeof(ompi_coll_tree_t)); bmtree = (ompi_coll_tree_t*)malloc(sizeof(ompi_coll_tree_t));
if (!bmtree) { if (!bmtree) {
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:topo:build_bmtree PANIC out of memory")); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:topo:build_bmtree PANIC out of memory"));
return NULL; return NULL;
} }
@ -372,7 +372,7 @@ ompi_coll_tuned_topo_build_bmtree( struct ompi_communicator_t* comm,
remote += root; remote += root;
if( remote >= size ) remote -= size; if( remote >= size ) remote -= size;
if (childs==MAXTREEFANOUT) { if (childs==MAXTREEFANOUT) {
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:topo:build_bmtree max fanout incorrect %d needed %d", MAXTREEFANOUT, childs)); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:topo:build_bmtree max fanout incorrect %d needed %d", MAXTREEFANOUT, childs));
free(bmtree); free(bmtree);
return NULL; return NULL;
} }
@ -400,13 +400,13 @@ ompi_coll_tuned_topo_build_bmtree( struct ompi_communicator_t* comm,
* 7 * 7
*/ */
ompi_coll_tree_t* ompi_coll_tree_t*
ompi_coll_tuned_topo_build_in_order_bmtree( struct ompi_communicator_t* comm, ompi_coll_base_topo_build_in_order_bmtree( struct ompi_communicator_t* comm,
int root ) int root )
{ {
int childs = 0, rank, vrank, size, mask = 1, remote, i; int childs = 0, rank, vrank, size, mask = 1, remote, i;
ompi_coll_tree_t *bmtree; ompi_coll_tree_t *bmtree;
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:topo:build_in_order_bmtree rt %d", root)); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:topo:build_in_order_bmtree rt %d", root));
/* /*
* Get size and rank of the process in this communicator * Get size and rank of the process in this communicator
@ -418,7 +418,7 @@ ompi_coll_tuned_topo_build_in_order_bmtree( struct ompi_communicator_t* comm,
bmtree = (ompi_coll_tree_t*)malloc(sizeof(ompi_coll_tree_t)); bmtree = (ompi_coll_tree_t*)malloc(sizeof(ompi_coll_tree_t));
if (!bmtree) { if (!bmtree) {
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:topo:build_bmtree PANIC out of memory")); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:topo:build_bmtree PANIC out of memory"));
return NULL; return NULL;
} }
@ -442,8 +442,8 @@ ompi_coll_tuned_topo_build_in_order_bmtree( struct ompi_communicator_t* comm,
bmtree->tree_next[childs] = (remote + root) % size; bmtree->tree_next[childs] = (remote + root) % size;
childs++; childs++;
if (childs==MAXTREEFANOUT) { if (childs==MAXTREEFANOUT) {
OPAL_OUTPUT((ompi_coll_tuned_stream, OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
"coll:tuned:topo:build_bmtree max fanout incorrect %d needed %d", "coll:base:topo:build_bmtree max fanout incorrect %d needed %d",
MAXTREEFANOUT, childs)); MAXTREEFANOUT, childs));
free (bmtree); free (bmtree);
return NULL; return NULL;
@ -459,14 +459,14 @@ ompi_coll_tuned_topo_build_in_order_bmtree( struct ompi_communicator_t* comm,
ompi_coll_tree_t* ompi_coll_tree_t*
ompi_coll_tuned_topo_build_chain( int fanout, ompi_coll_base_topo_build_chain( int fanout,
struct ompi_communicator_t* comm, struct ompi_communicator_t* comm,
int root ) int root )
{ {
int i, maxchainlen, mark, head, len, rank, size, srank /* shifted rank */; int i, maxchainlen, mark, head, len, rank, size, srank /* shifted rank */;
ompi_coll_tree_t *chain; ompi_coll_tree_t *chain;
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:topo:build_chain fo %d rt %d", fanout, root)); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:topo:build_chain fo %d rt %d", fanout, root));
/* /*
* Get size and rank of the process in this communicator * Get size and rank of the process in this communicator
@ -475,11 +475,11 @@ ompi_coll_tuned_topo_build_chain( int fanout,
rank = ompi_comm_rank(comm); rank = ompi_comm_rank(comm);
if( fanout < 1 ) { if( fanout < 1 ) {
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:topo:build_chain WARNING invalid fanout of ZERO, forcing to 1 (pipeline)!")); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:topo:build_chain WARNING invalid fanout of ZERO, forcing to 1 (pipeline)!"));
fanout = 1; fanout = 1;
} }
if (fanout>MAXTREEFANOUT) { if (fanout>MAXTREEFANOUT) {
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:topo:build_chain WARNING invalid fanout %d bigger than max %d, forcing to max!", fanout, MAXTREEFANOUT)); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:topo:build_chain WARNING invalid fanout %d bigger than max %d, forcing to max!", fanout, MAXTREEFANOUT));
fanout = MAXTREEFANOUT; fanout = MAXTREEFANOUT;
} }
@ -488,7 +488,7 @@ ompi_coll_tuned_topo_build_chain( int fanout,
*/ */
chain = (ompi_coll_tree_t*)malloc( sizeof(ompi_coll_tree_t) ); chain = (ompi_coll_tree_t*)malloc( sizeof(ompi_coll_tree_t) );
if (!chain) { if (!chain) {
OPAL_OUTPUT((ompi_coll_tuned_stream,"coll:tuned:topo:build_chain PANIC out of memory")); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"coll:base:topo:build_chain PANIC out of memory"));
fflush(stdout); fflush(stdout);
return NULL; return NULL;
} }
@ -603,17 +603,62 @@ ompi_coll_tuned_topo_build_chain( int fanout,
return chain; return chain;
} }
int ompi_coll_tuned_topo_dump_tree (ompi_coll_tree_t* tree, int rank) int ompi_coll_base_topo_dump_tree (ompi_coll_tree_t* tree, int rank)
{ {
int i; int i;
OPAL_OUTPUT((ompi_coll_tuned_stream, "coll:tuned:topo:topo_dump_tree %1d tree root %d" OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "coll:base:topo:topo_dump_tree %1d tree root %d"
" fanout %d BM %1d nextsize %d prev %d", " fanout %d BM %1d nextsize %d prev %d",
rank, tree->tree_root, tree->tree_bmtree, tree->tree_fanout, rank, tree->tree_root, tree->tree_bmtree, tree->tree_fanout,
tree->tree_nextsize, tree->tree_prev)); tree->tree_nextsize, tree->tree_prev));
if( tree->tree_nextsize ) { if( tree->tree_nextsize ) {
for( i = 0; i < tree->tree_nextsize; i++ ) for( i = 0; i < tree->tree_nextsize; i++ )
OPAL_OUTPUT((ompi_coll_tuned_stream,"[%1d] %d", i, tree->tree_next[i])); OPAL_OUTPUT((ompi_coll_base_framework.framework_output,"[%1d] %d", i, tree->tree_next[i]));
} }
return (0); return (0);
} }
mca_coll_base_comm_t* ompi_coll_base_topo_construct( mca_coll_base_comm_t* data )
{
if( NULL == data ) {
data = (mca_coll_base_comm_t*)calloc(1, sizeof(mca_coll_base_comm_t));
}
return data;
}
void ompi_coll_base_topo_destruct( mca_coll_base_comm_t* data )
{
if(NULL == data) return;
#if OPAL_ENABLE_DEBUG
/* Reset the reqs to NULL/0 -- they'll be freed as part of freeing
the generel c_coll_selected_data */
data->mcct_reqs = NULL;
data->mcct_num_reqs = 0;
#endif
/* free any cached information that has been allocated */
if (data->cached_ntree) { /* destroy general tree if defined */
ompi_coll_base_topo_destroy_tree (&data->cached_ntree);
}
if (data->cached_bintree) { /* destroy bintree if defined */
ompi_coll_base_topo_destroy_tree (&data->cached_bintree);
}
if (data->cached_bmtree) { /* destroy bmtree if defined */
ompi_coll_base_topo_destroy_tree (&data->cached_bmtree);
}
if (data->cached_in_order_bmtree) { /* destroy bmtree if defined */
ompi_coll_base_topo_destroy_tree (&data->cached_in_order_bmtree);
}
if (data->cached_chain) { /* destroy general chain if defined */
ompi_coll_base_topo_destroy_tree (&data->cached_chain);
}
if (data->cached_pipeline) { /* destroy pipeline if defined */
ompi_coll_base_topo_destroy_tree (&data->cached_pipeline);
}
if (data->cached_in_order_bintree) { /* destroy in order bintree if defined */
ompi_coll_base_topo_destroy_tree (&data->cached_in_order_bintree);
}
free(data);
}

Просмотреть файл

@ -16,8 +16,8 @@
* $HEADER$ * $HEADER$
*/ */
#ifndef MCA_COLL_TUNED_TOPO_H_HAS_BEEN_INCLUDED #ifndef MCA_COLL_BASE_TOPO_H_HAS_BEEN_INCLUDED
#define MCA_COLL_TUNED_TOPO_H_HAS_BEEN_INCLUDED #define MCA_COLL_BASE_TOPO_H_HAS_BEEN_INCLUDED
#include "ompi_config.h" #include "ompi_config.h"
@ -35,29 +35,28 @@ typedef struct ompi_coll_tree_t {
} ompi_coll_tree_t; } ompi_coll_tree_t;
ompi_coll_tree_t* ompi_coll_tree_t*
ompi_coll_tuned_topo_build_tree( int fanout, ompi_coll_base_topo_build_tree( int fanout,
struct ompi_communicator_t* com, struct ompi_communicator_t* com,
int root ); int root );
ompi_coll_tree_t* ompi_coll_tree_t*
ompi_coll_tuned_topo_build_in_order_bintree( struct ompi_communicator_t* comm ); ompi_coll_base_topo_build_in_order_bintree( struct ompi_communicator_t* comm );
ompi_coll_tree_t* ompi_coll_tree_t*
ompi_coll_tuned_topo_build_bmtree( struct ompi_communicator_t* comm, ompi_coll_base_topo_build_bmtree( struct ompi_communicator_t* comm,
int root ); int root );
ompi_coll_tree_t* ompi_coll_tree_t*
ompi_coll_tuned_topo_build_in_order_bmtree( struct ompi_communicator_t* comm, ompi_coll_base_topo_build_in_order_bmtree( struct ompi_communicator_t* comm,
int root ); int root );
ompi_coll_tree_t* ompi_coll_tree_t*
ompi_coll_tuned_topo_build_chain( int fanout, ompi_coll_base_topo_build_chain( int fanout,
struct ompi_communicator_t* com, struct ompi_communicator_t* com,
int root ); int root );
int ompi_coll_tuned_topo_destroy_tree( ompi_coll_tree_t** tree ); int ompi_coll_base_topo_destroy_tree( ompi_coll_tree_t** tree );
/* debugging stuff, will be removed later */ /* debugging stuff, will be removed later */
int ompi_coll_tuned_topo_dump_tree (ompi_coll_tree_t* tree, int rank); int ompi_coll_base_topo_dump_tree (ompi_coll_tree_t* tree, int rank);
END_C_DECLS END_C_DECLS
#endif /* MCA_COLL_TUNED_TOPO_H_HAS_BEEN_INCLUDED */ #endif /* MCA_COLL_BASE_TOPO_H_HAS_BEEN_INCLUDED */

Просмотреть файл

@ -2,7 +2,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology * University Research and Technology
* Corporation. All rights reserved. * Corporation. All rights reserved.
* Copyright (c) 2004-2014 The University of Tennessee and The University * Copyright (c) 2004-2015 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -19,17 +19,17 @@
*/ */
#include "ompi_config.h" #include "ompi_config.h"
#include "coll_tuned.h"
#include "mpi.h" #include "mpi.h"
#include "ompi/constants.h" #include "ompi/constants.h"
#include "ompi/datatype/ompi_datatype.h" #include "ompi/datatype/ompi_datatype.h"
#include "ompi/communicator/communicator.h" #include "ompi/communicator/communicator.h"
#include "ompi/mca/coll/base/coll_tags.h" #include "ompi/mca/coll/base/coll_tags.h"
#include "ompi/mca/coll/base/coll_base_functions.h"
#include "ompi/mca/pml/pml.h" #include "ompi/mca/pml/pml.h"
#include "coll_tuned_util.h" #include "coll_base_util.h"
int ompi_coll_tuned_sendrecv_nonzero_actual( void* sendbuf, size_t scount, int ompi_coll_base_sendrecv_nonzero_actual( void* sendbuf, size_t scount,
ompi_datatype_t* sdatatype, ompi_datatype_t* sdatatype,
int dest, int stag, int dest, int stag,
void* recvbuf, size_t rcount, void* recvbuf, size_t rcount,
@ -91,14 +91,14 @@ int ompi_coll_tuned_sendrecv_nonzero_actual( void* sendbuf, size_t scount,
*status = statuses[err_index]; *status = statuses[err_index];
} }
err = statuses[err_index].MPI_ERROR; err = statuses[err_index].MPI_ERROR;
OPAL_OUTPUT ((ompi_coll_tuned_stream, "%s:%d: Error %d occurred in the %s" OPAL_OUTPUT ((ompi_coll_base_framework.framework_output, "%s:%d: Error %d occurred in the %s"
" stage of ompi_coll_tuned_sendrecv_zero\n", " stage of ompi_coll_base_sendrecv_zero\n",
__FILE__, line, err, (0 == err_index ? "receive" : "send"))); __FILE__, line, err, (0 == err_index ? "receive" : "send")));
} else { } else {
/* Error discovered during the posting of the irecv or isend, /* Error discovered during the posting of the irecv or isend,
* and no status is available. * and no status is available.
*/ */
OPAL_OUTPUT ((ompi_coll_tuned_stream, "%s:%d: Error %d occurred\n", OPAL_OUTPUT ((ompi_coll_base_framework.framework_output, "%s:%d: Error %d occurred\n",
__FILE__, line, err)); __FILE__, line, err));
if (MPI_STATUS_IGNORE != status) { if (MPI_STATUS_IGNORE != status) {
status->MPI_ERROR = err; status->MPI_ERROR = err;

Просмотреть файл

@ -18,8 +18,8 @@
* $HEADER$ * $HEADER$
*/ */
#ifndef MCA_COLL_TUNED_UTIL_EXPORT_H #ifndef MCA_COLL_BASE_UTIL_EXPORT_H
#define MCA_COLL_TUNED_UTIL_EXPORT_H #define MCA_COLL_BASE_UTIL_EXPORT_H
#include "ompi_config.h" #include "ompi_config.h"
@ -36,7 +36,7 @@ BEGIN_C_DECLS
* If one of the communications results in a zero-byte message the * If one of the communications results in a zero-byte message the
* communication is ignored, and no message will cross to the peer. * communication is ignored, and no message will cross to the peer.
*/ */
int ompi_coll_tuned_sendrecv_nonzero_actual( void* sendbuf, size_t scount, int ompi_coll_base_sendrecv_nonzero_actual( void* sendbuf, size_t scount,
ompi_datatype_t* sdatatype, ompi_datatype_t* sdatatype,
int dest, int stag, int dest, int stag,
void* recvbuf, size_t rcount, void* recvbuf, size_t rcount,
@ -53,7 +53,7 @@ int ompi_coll_tuned_sendrecv_nonzero_actual( void* sendbuf, size_t scount,
* communications. * communications.
*/ */
static inline int static inline int
ompi_coll_tuned_sendrecv( void* sendbuf, size_t scount, ompi_datatype_t* sdatatype, ompi_coll_base_sendrecv( void* sendbuf, size_t scount, ompi_datatype_t* sdatatype,
int dest, int stag, int dest, int stag,
void* recvbuf, size_t rcount, ompi_datatype_t* rdatatype, void* recvbuf, size_t rcount, ompi_datatype_t* rdatatype,
int source, int rtag, int source, int rtag,
@ -64,13 +64,13 @@ ompi_coll_tuned_sendrecv( void* sendbuf, size_t scount, ompi_datatype_t* sdataty
return (int) ompi_datatype_sndrcv(sendbuf, (int32_t) scount, sdatatype, return (int) ompi_datatype_sndrcv(sendbuf, (int32_t) scount, sdatatype,
recvbuf, (int32_t) rcount, rdatatype); recvbuf, (int32_t) rcount, rdatatype);
} }
return ompi_coll_tuned_sendrecv_nonzero_actual (sendbuf, scount, sdatatype, return ompi_coll_base_sendrecv_nonzero_actual (sendbuf, scount, sdatatype,
dest, stag, dest, stag,
recvbuf, rcount, rdatatype, recvbuf, rcount, rdatatype,
source, rtag, comm, status); source, rtag, comm, status);
} }
END_C_DECLS END_C_DECLS
#endif /* MCA_COLL_TUNED_UTIL_EXPORT_H */ #endif /* MCA_COLL_BASE_UTIL_EXPORT_H */

Просмотреть файл

@ -470,6 +470,9 @@ struct mca_coll_base_module_2_1_0_t {
be used for the given communicator */ be used for the given communicator */
mca_coll_base_module_disable_1_1_0_fn_t coll_module_disable; mca_coll_base_module_disable_1_1_0_fn_t coll_module_disable;
/** Data storage for all the algorithms defined in the base. Should
not be used by other modules */
struct mca_coll_base_comm_t* base_data;
}; };
typedef struct mca_coll_base_module_2_1_0_t mca_coll_base_module_2_1_0_t; typedef struct mca_coll_base_module_2_1_0_t mca_coll_base_module_2_1_0_t;