1
1
openmpi/ompi/mpi/c/ibcast.c
David Eberius d377a6b6f4 Added Software-based Performance Counters driver code along with several counters.
This code is the implementation of Software-base Performance Counters as described in the paper 'Using Software-Base Performance Counters to Expose Low-Level Open MPI Performance Information' in EuroMPI/USA '17 (http://icl.cs.utk.edu/news_pub/submissions/software-performance-counters.pdf).  More practical usage information can be found here: https://github.com/davideberius/ompi/wiki/How-to-Use-Software-Based-Performance-Counters-(SPCs)-in-Open-MPI.

All software events functions are put in macros that become no-ops when SOFTWARE_EVENTS_ENABLE is not defined.  The internal timer units have been changed to cycles to avoid division operations which was a large source of overhead as discussed in the paper.  Added a --with-spc configure option to enable SPCs in the Open MPI build.  This defines SOFTWARE_EVENTS_ENABLE.  Added an MCA parameter, mpi_spc_enable, for turning on specific counters.  Added an MCA parameter, mpi_spc_dump_enabled, for turning on and off dumping SPC counters in MPI_Finalize.  Added an SPC test and example.

Signed-off-by: David Eberius <deberius@vols.utk.edu>
2018-06-11 22:48:16 -04:00

91 строка
2.6 KiB
C

/*
* Copyright (c) 2012 Oak Rigde National Laboratory. All rights reserved.
* Copyright (c) 2015 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2017-2018 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_Ibcast = PMPI_Ibcast
#endif
#define MPI_Ibcast PMPI_Ibcast
#endif
static const char FUNC_NAME[] = "MPI_Ibcast";
int MPI_Ibcast(void *buffer, int count, MPI_Datatype datatype,
int root, MPI_Comm comm, MPI_Request *request)
{
int err;
SPC_RECORD(OMPI_SPC_IBCAST, 1);
MEMCHECKER(
memchecker_datatype(datatype);
memchecker_call(&opal_memchecker_base_isdefined, buffer, count, datatype);
memchecker_comm(comm);
);
if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
/* Errors for all ranks */
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, count);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
if (MPI_IN_PLACE == buffer) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
/* Errors for intracommunicators */
if (OMPI_COMM_IS_INTRA(comm)) {
if ((root >= ompi_comm_size(comm)) || (root < 0)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
}
}
/* Errors for intercommunicators */
else {
if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) ||
MPI_ROOT == root || MPI_PROC_NULL == root)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
}
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll->coll_ibcast(buffer, count, datatype, root, comm,
request,
comm->c_coll->coll_ibcast_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}