1
1

Merge pull request #5413 from ggouaillardet/topic/pcollreq

mpiext/pcollreq: check subroutine parameters and add profiling symbols
Этот коммит содержится в:
Gilles Gouaillardet 2018-07-14 15:02:02 +09:00 коммит произвёл GitHub
родитель 9831145020 61b3308871
Коммит 99e0f96155
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
42 изменённых файлов: 3237 добавлений и 195 удалений

Просмотреть файл

@ -13,7 +13,7 @@
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014-2015 Research Organization for Information Science * Copyright (c) 2014-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved. * and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
@ -31,6 +31,7 @@
#include "ompi/errhandler/errhandler.h" #include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h" #include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h" #include "ompi/memchecker.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING #if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS #if OPAL_HAVE_WEAK_SYMBOLS
@ -49,6 +50,8 @@ int MPI_Alltoallv(const void *sendbuf, const int sendcounts[],
{ {
int i, size, err; int i, size, err;
SPC_RECORD(OMPI_SPC_ALLTOALLV, 1);
MEMCHECKER( MEMCHECKER(
ptrdiff_t recv_ext; ptrdiff_t recv_ext;
ptrdiff_t send_ext; ptrdiff_t send_ext;

Просмотреть файл

@ -13,7 +13,7 @@
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014-2015 Research Organization for Information Science * Copyright (c) 2014-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved. * and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
@ -31,6 +31,7 @@
#include "ompi/errhandler/errhandler.h" #include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h" #include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h" #include "ompi/memchecker.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING #if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS #if OPAL_HAVE_WEAK_SYMBOLS
@ -49,6 +50,8 @@ int MPI_Alltoallw(const void *sendbuf, const int sendcounts[],
{ {
int i, size, err; int i, size, err;
SPC_RECORD(OMPI_SPC_ALLTOALLW, 1);
MEMCHECKER( MEMCHECKER(
ptrdiff_t recv_ext; ptrdiff_t recv_ext;
ptrdiff_t send_ext; ptrdiff_t send_ext;

Просмотреть файл

@ -12,7 +12,7 @@
* All rights reserved. * All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2015 Research Organization for Information Science * Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved. * and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
@ -30,6 +30,7 @@
#include "ompi/datatype/ompi_datatype.h" #include "ompi/datatype/ompi_datatype.h"
#include "ompi/op/op.h" #include "ompi/op/op.h"
#include "ompi/memchecker.h" #include "ompi/memchecker.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING #if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS #if OPAL_HAVE_WEAK_SYMBOLS
@ -46,6 +47,8 @@ int MPI_Exscan(const void *sendbuf, void *recvbuf, int count,
{ {
int err; int err;
SPC_RECORD(OMPI_SPC_EXSCAN, 1);
MEMCHECKER( MEMCHECKER(
memchecker_datatype(datatype); memchecker_datatype(datatype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype); memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype);

Просмотреть файл

@ -12,7 +12,7 @@
* All rights reserved. * All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2015 Research Organization for Information Science * Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved. * and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
@ -30,6 +30,7 @@
#include "ompi/datatype/ompi_datatype.h" #include "ompi/datatype/ompi_datatype.h"
#include "ompi/op/op.h" #include "ompi/op/op.h"
#include "ompi/memchecker.h" #include "ompi/memchecker.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING #if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS #if OPAL_HAVE_WEAK_SYMBOLS
@ -46,6 +47,8 @@ int MPI_Iexscan(const void *sendbuf, void *recvbuf, int count,
{ {
int err; int err;
SPC_RECORD(OMPI_SPC_IEXSCAN, 1);
MEMCHECKER( MEMCHECKER(
memchecker_datatype(datatype); memchecker_datatype(datatype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype); memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype);

Просмотреть файл

@ -13,8 +13,8 @@
* Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2015-2016 Research Organization for Information Science * Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved. * and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -30,6 +30,7 @@
#include "ompi/errhandler/errhandler.h" #include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h" #include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h" #include "ompi/memchecker.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING #if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS #if OPAL_HAVE_WEAK_SYMBOLS
@ -47,6 +48,8 @@ int MPI_Igatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
{ {
int i, size, err; int i, size, err;
SPC_RECORD(OMPI_SPC_IGATHERV, 1);
MEMCHECKER( MEMCHECKER(
ptrdiff_t ext; ptrdiff_t ext;

Просмотреть файл

@ -14,7 +14,7 @@
* Copyright (c) 2012 Oak Rigde National Laboratory. All rights reserved. * Copyright (c) 2012 Oak Rigde National Laboratory. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2015-2017 Research Organization for Information Science * Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved. * and Technology (RIST). All rights reserved.
* Copyright (c) 2017 IBM Corporation. All rights reserved. * Copyright (c) 2017 IBM Corporation. All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
@ -35,6 +35,7 @@
#include "ompi/memchecker.h" #include "ompi/memchecker.h"
#include "ompi/mca/topo/topo.h" #include "ompi/mca/topo/topo.h"
#include "ompi/mca/topo/base/base.h" #include "ompi/mca/topo/base/base.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING #if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS #if OPAL_HAVE_WEAK_SYMBOLS
@ -52,6 +53,8 @@ int MPI_Ineighbor_allgather(const void *sendbuf, int sendcount, MPI_Datatype sen
{ {
int err; int err;
SPC_RECORD(OMPI_SPC_INEIGHBOR_ALLGATHER, 1);
MEMCHECKER( MEMCHECKER(
int rank; int rank;
ptrdiff_t ext; ptrdiff_t ext;

Просмотреть файл

@ -14,7 +14,7 @@
* Copyright (c) 2012 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2015-2017 Research Organization for Information Science * Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved. * and Technology (RIST). All rights reserved.
* Copyright (c) 2017 IBM Corporation. All rights reserved. * Copyright (c) 2017 IBM Corporation. All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
@ -35,6 +35,7 @@
#include "ompi/memchecker.h" #include "ompi/memchecker.h"
#include "ompi/mca/topo/topo.h" #include "ompi/mca/topo/topo.h"
#include "ompi/mca/topo/base/base.h" #include "ompi/mca/topo/base/base.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING #if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS #if OPAL_HAVE_WEAK_SYMBOLS
@ -52,6 +53,8 @@ int MPI_Ineighbor_allgatherv(const void *sendbuf, int sendcount, MPI_Datatype se
{ {
int i, size, err; int i, size, err;
SPC_RECORD(OMPI_SPC_INEIGHBOR_ALLGATHERV, 1);
MEMCHECKER( MEMCHECKER(
int rank; int rank;
ptrdiff_t ext; ptrdiff_t ext;

Просмотреть файл

@ -14,7 +14,7 @@
* Copyright (c) 2012 Oak Ridge National Laboratory. All rights reserved. * Copyright (c) 2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014-2015 Research Organization for Information Science * Copyright (c) 2014-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved. * and Technology (RIST). All rights reserved.
* Copyright (c) 2017 IBM Corporation. All rights reserved. * Copyright (c) 2017 IBM Corporation. All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
@ -35,6 +35,7 @@
#include "ompi/memchecker.h" #include "ompi/memchecker.h"
#include "ompi/mca/topo/topo.h" #include "ompi/mca/topo/topo.h"
#include "ompi/mca/topo/base/base.h" #include "ompi/mca/topo/base/base.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING #if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS #if OPAL_HAVE_WEAK_SYMBOLS
@ -53,6 +54,8 @@ int MPI_Ineighbor_alltoall(const void *sendbuf, int sendcount, MPI_Datatype send
size_t sendtype_size, recvtype_size; size_t sendtype_size, recvtype_size;
int err; int err;
SPC_RECORD(OMPI_SPC_INEIGHBOR_ALLTOALL, 1);
MEMCHECKER( MEMCHECKER(
memchecker_comm(comm); memchecker_comm(comm);
if (MPI_IN_PLACE != sendbuf) { if (MPI_IN_PLACE != sendbuf) {

Просмотреть файл

@ -13,7 +13,7 @@
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2017 Los Alamos National Security, LLC. All rights * Copyright (c) 2012-2017 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014-2015 Research Organization for Information Science * Copyright (c) 2014-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved. * and Technology (RIST). All rights reserved.
* Copyright (c) 2017 IBM Corporation. All rights reserved. * Copyright (c) 2017 IBM Corporation. All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
@ -34,6 +34,7 @@
#include "ompi/memchecker.h" #include "ompi/memchecker.h"
#include "ompi/mca/topo/topo.h" #include "ompi/mca/topo/topo.h"
#include "ompi/mca/topo/base/base.h" #include "ompi/mca/topo/base/base.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING #if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS #if OPAL_HAVE_WEAK_SYMBOLS
@ -53,6 +54,8 @@ int MPI_Ineighbor_alltoallv(const void *sendbuf, const int sendcounts[], const i
int i, err; int i, err;
int indegree, outdegree; int indegree, outdegree;
SPC_RECORD(OMPI_SPC_INEIGHBOR_ALLTOALLV, 1);
MEMCHECKER( MEMCHECKER(
ptrdiff_t recv_ext; ptrdiff_t recv_ext;
ptrdiff_t send_ext; ptrdiff_t send_ext;

Просмотреть файл

@ -13,7 +13,7 @@
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2017 Los Alamos National Security, LLC. All rights * Copyright (c) 2012-2017 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014-2015 Research Organization for Information Science * Copyright (c) 2014-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved. * and Technology (RIST). All rights reserved.
* Copyright (c) 2017 IBM Corporation. All rights reserved. * Copyright (c) 2017 IBM Corporation. All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
@ -34,6 +34,7 @@
#include "ompi/memchecker.h" #include "ompi/memchecker.h"
#include "ompi/mca/topo/topo.h" #include "ompi/mca/topo/topo.h"
#include "ompi/mca/topo/base/base.h" #include "ompi/mca/topo/base/base.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING #if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS #if OPAL_HAVE_WEAK_SYMBOLS
@ -53,6 +54,8 @@ int MPI_Ineighbor_alltoallw(const void *sendbuf, const int sendcounts[], const M
int i, err; int i, err;
int indegree, outdegree; int indegree, outdegree;
SPC_RECORD(OMPI_SPC_INEIGHBOR_ALLTOALLW, 1);
MEMCHECKER( MEMCHECKER(
ptrdiff_t recv_ext; ptrdiff_t recv_ext;
ptrdiff_t send_ext; ptrdiff_t send_ext;

Просмотреть файл

@ -13,7 +13,7 @@
* Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2015 Research Organization for Information Science * Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved. * and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
@ -31,6 +31,7 @@
#include "ompi/datatype/ompi_datatype.h" #include "ompi/datatype/ompi_datatype.h"
#include "ompi/op/op.h" #include "ompi/op/op.h"
#include "ompi/memchecker.h" #include "ompi/memchecker.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING #if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS #if OPAL_HAVE_WEAK_SYMBOLS
@ -47,6 +48,8 @@ int MPI_Reduce_scatter(const void *sendbuf, void *recvbuf, const int recvcounts[
{ {
int i, err, size, count; int i, err, size, count;
SPC_RECORD(OMPI_SPC_REDUCE_SCATTER, 1);
MEMCHECKER( MEMCHECKER(
int rank; int rank;

Просмотреть файл

@ -14,7 +14,7 @@
* Copyright (c) 2012 Oak Ridge National Labs. All rights reserved. * Copyright (c) 2012 Oak Ridge National Labs. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2015 Research Organization for Information Science * Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved. * and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
@ -32,6 +32,7 @@
#include "ompi/datatype/ompi_datatype.h" #include "ompi/datatype/ompi_datatype.h"
#include "ompi/op/op.h" #include "ompi/op/op.h"
#include "ompi/memchecker.h" #include "ompi/memchecker.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING #if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS #if OPAL_HAVE_WEAK_SYMBOLS
@ -48,6 +49,8 @@ int MPI_Reduce_scatter_block(const void *sendbuf, void *recvbuf, int recvcount,
{ {
int err; int err;
SPC_RECORD(OMPI_SPC_REDUCE_SCATTER_BLOCK, 1);
MEMCHECKER( MEMCHECKER(
memchecker_comm(comm); memchecker_comm(comm);
memchecker_datatype(datatype); memchecker_datatype(datatype);

Просмотреть файл

@ -1,5 +1,7 @@
# #
# Copyright (c) 2017 FUJITSU LIMITED. All rights reserved. # Copyright (c) 2017 FUJITSU LIMITED. All rights reserved.
# Copyright (c) 2018 Research Organization for Information Science
# and Technology (RIST). All rights reserved.
# $COPYRIGHT$ # $COPYRIGHT$
# #
# Additional copyrights may follow # Additional copyrights may follow
@ -7,13 +9,16 @@
# $HEADER$ # $HEADER$
# #
# We must set these #defines so that the inner OMPI MPI prototype SUBDIRS = profile
# header files do the Right Thing.
AM_CPPFLAGS = -DOMPI_PROFILE_LAYER=0 -DOMPI_COMPILING_FORTRAN_WRAPPERS=1 # OMPI_BUILD_MPI_PROFILING is enabled when we want our generated MPI_* symbols
# to be replaced by PMPI_*.
# In this directory, we need it to be 0
AM_CPPFLAGS = -DOMPI_BUILD_MPI_PROFILING=0
include $(top_srcdir)/Makefile.ompi-rules include $(top_srcdir)/Makefile.ompi-rules
# Convenience libtool library that will be slurped up into libmpi.la.
noinst_LTLIBRARIES = libmpiext_pcollreq_c.la noinst_LTLIBRARIES = libmpiext_pcollreq_c.la
# This is where the top-level header file (that is included in # This is where the top-level header file (that is included in
@ -28,8 +33,34 @@ ompi_HEADERS = mpiext_pcollreq_c.h
# conventions. # conventions.
libmpiext_pcollreq_c_la_SOURCES = \ libmpiext_pcollreq_c_la_SOURCES = \
$(ompi_HEADERS) \ $(ompi_HEADERS) \
mpiext_pcollreq.c mpiext_pcollreq_c.c
libmpiext_pcollreq_c_la_LDFLAGS = -module -avoid-version
if BUILD_MPI_BINDINGS_LAYER
libmpiext_pcollreq_c_la_SOURCES += \
allgather_init.c \
allgatherv_init.c \
allreduce_init.c \
alltoall_init.c \
alltoallv_init.c \
alltoallw_init.c \
barrier_init.c \
bcast_init.c \
exscan_init.c \
gather_init.c \
gatherv_init.c \
reduce_init.c \
reduce_scatter_block_init.c \
reduce_scatter_init.c \
scan_init.c \
scatter_init.c \
scatterv_init.c \
\
neighbor_allgather_init.c \
neighbor_allgatherv_init.c \
neighbor_alltoall_init.c \
neighbor_alltoallv_init.c \
neighbor_alltoallw_init.c
endif
# Man page installation # Man page installation
nodist_man_MANS = \ nodist_man_MANS = \

108
ompi/mpiext/pcollreq/c/allgather_init.c Обычный файл
Просмотреть файл

@ -0,0 +1,108 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2018 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Allgather_init = PMPIX_Allgather_init
#endif
#define MPIX_Allgather_init PMPIX_Allgather_init
#endif
static const char FUNC_NAME[] = "MPIX_Allgather_init";
int MPIX_Allgather_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm, MPI_Info info, MPI_Request *request)
{
int err;
SPC_RECORD(OMPI_SPC_ALLGATHER_INIT, 1);
MEMCHECKER(
int rank;
ptrdiff_t ext;
rank = ompi_comm_rank(comm);
ompi_datatype_type_extent(recvtype, &ext);
memchecker_datatype(recvtype);
memchecker_comm(comm);
/* check whether the actual send buffer is defined. */
if (MPI_IN_PLACE == sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(recvbuf)+rank*ext,
recvcount, recvtype);
} else {
memchecker_datatype(sendtype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
/* check whether the receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
err = MPI_ERR_TYPE;
} else if (recvcount < 0) {
err = MPI_ERR_COUNT;
} else if ((MPI_IN_PLACE == sendbuf && OMPI_COMM_IS_INTER(comm)) ||
MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} else if (MPI_IN_PLACE != sendbuf) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll->coll_allgather_init(sendbuf, sendcount, sendtype,
recvbuf, recvcount, recvtype, comm,
info, request, comm->c_coll->coll_allgather_init_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

133
ompi/mpiext/pcollreq/c/allgatherv_init.c Обычный файл
Просмотреть файл

@ -0,0 +1,133 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2018 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2010 University of Houston. All rights reserved.
* Copyright (c) 2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Allgatherv_init = PMPIX_Allgatherv_init
#endif
#define MPIX_Allgatherv_init PMPIX_Allgatherv_init
#endif
static const char FUNC_NAME[] = "MPIX_Allgatherv_init";
int MPIX_Allgatherv_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, const int recvcounts[], const int displs[],
MPI_Datatype recvtype, MPI_Comm comm,
MPI_Info info, MPI_Request *request)
{
int i, size, err;
SPC_RECORD(OMPI_SPC_ALLGATHERV_INIT, 1);
MEMCHECKER(
int rank;
ptrdiff_t ext;
rank = ompi_comm_rank(comm);
size = ompi_comm_size(comm);
ompi_datatype_type_extent(recvtype, &ext);
memchecker_datatype(recvtype);
memchecker_comm (comm);
/* check whether the receive buffer is addressable. */
for (i = 0; i < size; i++) {
memchecker_call(&opal_memchecker_base_isaddressable,
(char *)(recvbuf)+displs[i]*ext,
recvcounts[i], recvtype);
}
/* check whether the actual send buffer is defined. */
if (MPI_IN_PLACE == sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(recvbuf)+displs[rank]*ext,
recvcounts[rank], recvtype);
} else {
memchecker_datatype(sendtype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if ((MPI_IN_PLACE == sendbuf && OMPI_COMM_IS_INTER(comm)) ||
MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
if (MPI_IN_PLACE != sendbuf) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
/* We always define the remote group to be the same as the local
group in the case of an intracommunicator, so it's safe to
get the size of the remote group here for both intra- and
intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
}
if (NULL == displs) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_BUFFER, FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll->coll_allgatherv_init(sendbuf, sendcount, sendtype,
recvbuf, recvcounts, displs,
recvtype, comm, info, request,
comm->c_coll->coll_allgatherv_init_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

122
ompi/mpiext/pcollreq/c/allreduce_init.c Обычный файл
Просмотреть файл

@ -0,0 +1,122 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2018 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2016 IBM Corporation. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/op/op.h"
#include "ompi/memchecker.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Allreduce_init = PMPIX_Allreduce_init
#endif
#define MPIX_Allreduce_init PMPIX_Allreduce_init
#endif
static const char FUNC_NAME[] = "MPIX_Allreduce_init";
int MPIX_Allreduce_init(const void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm,
MPI_Info info, MPI_Request *request)
{
int err;
SPC_RECORD(OMPI_SPC_ALLREDUCE_INIT, 1);
MEMCHECKER(
memchecker_datatype(datatype);
memchecker_comm(comm);
/* check whether receive buffer is defined. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, count, datatype);
/* check whether the actual send buffer is defined. */
if (MPI_IN_PLACE == sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined, recvbuf, count, datatype);
} else {
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype);
}
);
if (MPI_PARAM_CHECK) {
char *msg;
/* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if (MPI_OP_NULL == op) {
err = MPI_ERR_OP;
} else if (!ompi_op_is_valid(op, datatype, &msg, FUNC_NAME)) {
int ret = OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, msg);
free(msg);
return ret;
} else if ((MPI_IN_PLACE == sendbuf && OMPI_COMM_IS_INTER(comm)) ||
MPI_IN_PLACE == recvbuf ) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_BUFFER,
FUNC_NAME);
} else if( (sendbuf == recvbuf) &&
(MPI_BOTTOM != sendbuf) &&
(count > 1) ) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_BUFFER,
FUNC_NAME);
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, count);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
/* MPI standard says that reductions have to have a count of at least 1,
* but some benchmarks (e.g., IMB) calls this function with a count of 0.
* So handle that case.
*/
if (0 == count) {
*request = &ompi_request_empty;
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
OBJ_RETAIN(op);
err = comm->c_coll->coll_allreduce_init(sendbuf, recvbuf, count, datatype,
op, comm, info, request, comm->c_coll->coll_allreduce_init_module);
OBJ_RELEASE(op);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

106
ompi/mpiext/pcollreq/c/alltoall_init.c Обычный файл
Просмотреть файл

@ -0,0 +1,106 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2018 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2014-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Alltoall_init = PMPIX_Alltoall_init
#endif
#define MPIX_Alltoall_init PMPIX_Alltoall_init
#endif
static const char FUNC_NAME[] = "MPIX_Alltoall_init";
int MPIX_Alltoall_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm, MPI_Info info, MPI_Request *request)
{
size_t sendtype_size, recvtype_size;
int err;
SPC_RECORD(OMPI_SPC_ALLTOALL_INIT, 1);
MEMCHECKER(
memchecker_comm(comm);
if (MPI_IN_PLACE != sendbuf) {
memchecker_datatype(sendtype);
memchecker_call(&opal_memchecker_base_isdefined, (void *)sendbuf, sendcount, sendtype);
}
memchecker_datatype(recvtype);
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if ((MPI_IN_PLACE == sendbuf && OMPI_COMM_IS_INTER(comm)) ||
MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
FUNC_NAME);
} else {
if (MPI_IN_PLACE != sendbuf) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
OMPI_CHECK_DATATYPE_FOR_RECV(err, recvtype, recvcount);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
if (MPI_IN_PLACE != sendbuf && !OMPI_COMM_IS_INTER(comm)) {
ompi_datatype_type_size(sendtype, &sendtype_size);
ompi_datatype_type_size(recvtype, &recvtype_size);
if ((sendtype_size*sendcount) != (recvtype_size*recvcount)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TRUNCATE, FUNC_NAME);
}
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll->coll_alltoall_init(sendbuf, sendcount, sendtype,
recvbuf, recvcount, recvtype, comm, info,
request, comm->c_coll->coll_alltoall_init_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

136
ompi/mpiext/pcollreq/c/alltoallv_init.c Обычный файл
Просмотреть файл

@ -0,0 +1,136 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2018 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2014-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Alltoallv_init = PMPIX_Alltoallv_init
#endif
#define MPIX_Alltoallv_init PMPIX_Alltoallv_init
#endif
static const char FUNC_NAME[] = "MPIX_Alltoallv_init";
int MPIX_Alltoallv_init(const void *sendbuf, const int sendcounts[], const int sdispls[],
MPI_Datatype sendtype, void *recvbuf, const int recvcounts[],
const int rdispls[], MPI_Datatype recvtype, MPI_Comm comm,
MPI_Info info, MPI_Request *request)
{
int i, size, err;
SPC_RECORD(OMPI_SPC_ALLTOALLV_INIT, 1);
MEMCHECKER(
ptrdiff_t recv_ext;
ptrdiff_t send_ext;
memchecker_comm(comm);
if (MPI_IN_PLACE != sendbuf) {
memchecker_datatype(sendtype);
ompi_datatype_type_extent(sendtype, &send_ext);
}
memchecker_datatype(recvtype);
ompi_datatype_type_extent(recvtype, &recv_ext);
size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
for ( i = 0; i < size; i++ ) {
if (MPI_IN_PLACE != sendbuf) {
/* check if send chunks are defined. */
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(sendbuf)+sdispls[i]*send_ext,
sendcounts[i], sendtype);
}
/* check if receive chunks are addressable. */
memchecker_call(&opal_memchecker_base_isaddressable,
(char *)(recvbuf)+rdispls[i]*recv_ext,
recvcounts[i], recvtype);
}
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
if (MPI_IN_PLACE == sendbuf) {
sendcounts = recvcounts;
sdispls = rdispls;
sendtype = recvtype;
}
if ((NULL == sendcounts) || (NULL == sdispls) ||
(NULL == recvcounts) || (NULL == rdispls) ||
(MPI_IN_PLACE == sendbuf && OMPI_COMM_IS_INTER(comm)) ||
MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
for (i = 0; i < size; ++i) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
OMPI_CHECK_DATATYPE_FOR_RECV(err, recvtype, recvcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
if (MPI_IN_PLACE != sendbuf && !OMPI_COMM_IS_INTER(comm)) {
int me = ompi_comm_rank(comm);
size_t sendtype_size, recvtype_size;
ompi_datatype_type_size(sendtype, &sendtype_size);
ompi_datatype_type_size(recvtype, &recvtype_size);
if ((sendtype_size*sendcounts[me]) != (recvtype_size*recvcounts[me])) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TRUNCATE, FUNC_NAME);
}
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll->coll_alltoallv_init(sendbuf, sendcounts, sdispls,
sendtype, recvbuf, recvcounts, rdispls,
recvtype, comm, info, request, comm->c_coll->coll_alltoallv_init_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

133
ompi/mpiext/pcollreq/c/alltoallw_init.c Обычный файл
Просмотреть файл

@ -0,0 +1,133 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2018 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2014-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Alltoallw_init = PMPIX_Alltoallw_init
#endif
#define MPIX_Alltoallw_init PMPIX_Alltoallw_init
#endif
static const char FUNC_NAME[] = "MPIX_Alltoallw_init";
int MPIX_Alltoallw_init(const void *sendbuf, const int sendcounts[], const int sdispls[],
const MPI_Datatype sendtypes[], void *recvbuf, const int recvcounts[],
const int rdispls[], const MPI_Datatype recvtypes[], MPI_Comm comm,
MPI_Info info, MPI_Request *request)
{
int i, size, err;
SPC_RECORD(OMPI_SPC_ALLTOALLW_INIT, 1);
MEMCHECKER(
ptrdiff_t recv_ext;
ptrdiff_t send_ext;
memchecker_comm(comm);
size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
for ( i = 0; i < size; i++ ) {
if (MPI_IN_PLACE != sendbuf) {
memchecker_datatype(sendtypes[i]);
ompi_datatype_type_extent(sendtypes[i], &send_ext);
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(sendbuf)+sdispls[i]*send_ext,
sendcounts[i], sendtypes[i]);
}
memchecker_datatype(recvtypes[i]);
ompi_datatype_type_extent(recvtypes[i], &recv_ext);
memchecker_call(&opal_memchecker_base_isaddressable,
(char *)(recvbuf)+rdispls[i]*recv_ext,
recvcounts[i], recvtypes[i]);
}
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
if (MPI_IN_PLACE == sendbuf) {
sendcounts = recvcounts;
sdispls = rdispls;
sendtypes = recvtypes;
}
if ((NULL == sendcounts) || (NULL == sdispls) || (NULL == sendtypes) ||
(NULL == recvcounts) || (NULL == rdispls) || (NULL == recvtypes) ||
(MPI_IN_PLACE == sendbuf && OMPI_COMM_IS_INTER(comm)) ||
MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
size = OMPI_COMM_IS_INTER(comm)?ompi_comm_remote_size(comm):ompi_comm_size(comm);
for (i = 0; i < size; ++i) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtypes[i], sendcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
OMPI_CHECK_DATATYPE_FOR_RECV(err, recvtypes[i], recvcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
if (MPI_IN_PLACE != sendbuf && !OMPI_COMM_IS_INTER(comm)) {
int me = ompi_comm_rank(comm);
size_t sendtype_size, recvtype_size;
ompi_datatype_type_size(sendtypes[me], &sendtype_size);
ompi_datatype_type_size(recvtypes[me], &recvtype_size);
if ((sendtype_size*sendcounts[me]) != (recvtype_size*recvcounts[me])) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TRUNCATE, FUNC_NAME);
}
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll->coll_alltoallw_init(sendbuf, sendcounts, sdispls,
sendtypes, recvbuf, recvcounts,
rdispls, recvtypes, comm, info, request,
comm->c_coll->coll_alltoallw_init_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

69
ompi/mpiext/pcollreq/c/barrier_init.c Обычный файл
Просмотреть файл

@ -0,0 +1,69 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2018 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2012 Oak Rigde National Laboratory. All rights reserved.
* Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Barrier_init = PMPIX_Barrier_init
#endif
#define MPIX_Barrier_init PMPIX_Barrier_init
#endif
static const char FUNC_NAME[] = "MPIX_Barrier_init";
int MPIX_Barrier_init(MPI_Comm comm, MPI_Info info, MPI_Request *request)
{
int err = MPI_SUCCESS;
SPC_RECORD(OMPI_SPC_BARRIER_INIT, 1);
MEMCHECKER(
memchecker_comm(comm);
);
/* Error checking */
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
err = comm->c_coll->coll_barrier_init(comm, info, request, comm->c_coll->coll_barrier_init_module);
/* All done */
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

91
ompi/mpiext/pcollreq/c/bcast_init.c Обычный файл
Просмотреть файл

@ -0,0 +1,91 @@
/*
* Copyright (c) 2012 Oak Rigde National Laboratory. All rights reserved.
* Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2017-2018 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Bcast_init = PMPIX_Bcast_init
#endif
#define MPIX_Bcast_init PMPIX_Bcast_init
#endif
static const char FUNC_NAME[] = "MPIX_Bcast_init";
int MPIX_Bcast_init(void *buffer, int count, MPI_Datatype datatype,
int root, MPI_Comm comm, MPI_Info info, MPI_Request *request)
{
int err;
SPC_RECORD(OMPI_SPC_BCAST_INIT, 1);
MEMCHECKER(
memchecker_datatype(datatype);
memchecker_call(&opal_memchecker_base_isdefined, buffer, count, datatype);
memchecker_comm(comm);
);
if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
/* Errors for all ranks */
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, count);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
if (MPI_IN_PLACE == buffer) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
/* Errors for intracommunicators */
if (OMPI_COMM_IS_INTRA(comm)) {
if ((root >= ompi_comm_size(comm)) || (root < 0)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
}
}
/* Errors for intercommunicators */
else {
if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) ||
MPI_ROOT == root || MPI_PROC_NULL == root)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
}
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll->coll_bcast_init(buffer, count, datatype, root, comm,
info, request,
comm->c_coll->coll_bcast_init_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

93
ompi/mpiext/pcollreq/c/exscan_init.c Обычный файл
Просмотреть файл

@ -0,0 +1,93 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2017 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/op/op.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#include "ompi/memchecker.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Exscan_init = PMPIX_Exscan_init
#endif
#define MPIX_Exscan_init PMPIX_Exscan_init
#endif
static const char FUNC_NAME[] = "MPIX_Exscan_init";
int MPIX_Exscan_init(const void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm,
MPI_Info info, MPI_Request *request)
{
int err;
SPC_RECORD(OMPI_SPC_EXSCAN_INIT, 1);
MEMCHECKER(
memchecker_datatype(datatype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype);
memchecker_comm(comm);
);
if (MPI_PARAM_CHECK) {
char *msg;
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
/* Unrooted operation -- same checks for intracommunicators
and intercommunicators */
else if (MPI_OP_NULL == op) {
err = MPI_ERR_OP;
} else if (!ompi_op_is_valid(op, datatype, &msg, FUNC_NAME)) {
int ret = OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, msg);
free(msg);
return ret;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, count);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
OBJ_RETAIN(op);
err = comm->c_coll->coll_exscan_init(sendbuf, recvbuf, count,
datatype, op, comm, info, request,
comm->c_coll->coll_exscan_init_module);
OBJ_RELEASE(op);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

178
ompi/mpiext/pcollreq/c/gather_init.c Обычный файл
Просмотреть файл

@ -0,0 +1,178 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2018 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2008 University of Houston. All rights reserved.
* Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Gather_init = PMPIX_Gather_init
#endif
#define MPIX_Gather_init PMPIX_Gather_init
#endif
static const char FUNC_NAME[] = "MPIX_Gather_init";
int MPIX_Gather_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm, MPI_Info info, MPI_Request *request)
{
int err;
SPC_RECORD(OMPI_SPC_GATHER_INIT, 1);
MEMCHECKER(
int rank;
ptrdiff_t ext;
rank = ompi_comm_rank(comm);
ompi_datatype_type_extent(recvtype, &ext);
memchecker_comm(comm);
if(OMPI_COMM_IS_INTRA(comm)) {
if(ompi_comm_rank(comm) == root) {
/* check whether root's send buffer is defined. */
if (MPI_IN_PLACE == sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(recvbuf)+rank*ext,
recvcount, recvtype);
} else {
memchecker_datatype(sendtype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
memchecker_datatype(recvtype);
/* check whether root's receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
} else {
memchecker_datatype(sendtype);
/* check whether send buffer is defined on other processes. */
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
} else {
if (MPI_ROOT == root) {
memchecker_datatype(recvtype);
/* check whether root's receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
} else if (MPI_PROC_NULL != root) {
memchecker_datatype(sendtype);
/* check whether send buffer is defined. */
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
}
);
if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if ((ompi_comm_rank(comm) != root && MPI_IN_PLACE == sendbuf) ||
(ompi_comm_rank(comm) == root && MPI_IN_PLACE == recvbuf)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
/* Errors for intracommunicators */
if (OMPI_COMM_IS_INTRA(comm)) {
/* Errors for all ranks */
if ((root >= ompi_comm_size(comm)) || (root < 0)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
}
if (MPI_IN_PLACE != sendbuf) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
/* Errors for the root. Some of these could have been
combined into compound if statements above, but since
this whole section can be compiled out (or turned off at
run time) for efficiency, it's more clear to separate
them out into individual tests. */
if (ompi_comm_rank(comm) == root) {
if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
if (recvcount < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
}
}
/* Errors for intercommunicators */
else {
if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) ||
MPI_ROOT == root || MPI_PROC_NULL == root)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
}
/* Errors for the senders */
if (MPI_ROOT != root && MPI_PROC_NULL != root) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
/* Errors for the root. Ditto on the comment above -- these
error checks could have been combined above, but let's
make the code easier to read. */
else if (MPI_ROOT == root) {
if (recvcount < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
}
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll->coll_gather_init(sendbuf, sendcount, sendtype, recvbuf,
recvcount, recvtype, root, comm, info, request,
comm->c_coll->coll_gather_init_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

203
ompi/mpiext/pcollreq/c/gatherv_init.c Обычный файл
Просмотреть файл

@ -0,0 +1,203 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2017 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#include "ompi/memchecker.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Gatherv_init = PMPIX_Gatherv_init
#endif
#define MPIX_Gatherv_init PMPIX_Gatherv_init
#endif
static const char FUNC_NAME[] = "MPIX_Gatherv_init";
int MPIX_Gatherv_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, const int recvcounts[], const int displs[],
MPI_Datatype recvtype, int root, MPI_Comm comm,
MPI_Info info, MPI_Request *request)
{
int i, size, err;
SPC_RECORD(OMPI_SPC_GATHERV_INIT, 1);
MEMCHECKER(
ptrdiff_t ext;
size = ompi_comm_remote_size(comm);
ompi_datatype_type_extent(recvtype, &ext);
memchecker_comm(comm);
if(OMPI_COMM_IS_INTRA(comm)) {
if(ompi_comm_rank(comm) == root) {
/* check whether root's send buffer is defined. */
if (MPI_IN_PLACE == sendbuf) {
for (i = 0; i < size; i++) {
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(recvbuf)+displs[i]*ext,
recvcounts[i], recvtype);
}
} else {
memchecker_datatype(sendtype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
memchecker_datatype(recvtype);
/* check whether root's receive buffer is addressable. */
for (i = 0; i < size; i++) {
memchecker_call(&opal_memchecker_base_isaddressable,
(char *)(recvbuf)+displs[i]*ext,
recvcounts[i], recvtype);
}
} else {
memchecker_datatype(sendtype);
/* check whether send buffer is defined on other processes. */
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
} else {
if (MPI_ROOT == root) {
memchecker_datatype(recvtype);
/* check whether root's receive buffer is addressable. */
for (i = 0; i < size; i++) {
memchecker_call(&opal_memchecker_base_isaddressable,
(char *)(recvbuf)+displs[i]*ext,
recvcounts[i], recvtype);
}
} else if (MPI_PROC_NULL != root) {
memchecker_datatype(sendtype);
/* check whether send buffer is defined. */
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
}
);
if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if ((ompi_comm_rank(comm) != root && MPI_IN_PLACE == sendbuf) ||
(ompi_comm_rank(comm) == root && MPI_IN_PLACE == recvbuf)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
/* Errors for intracommunicators */
if (OMPI_COMM_IS_INTRA(comm)) {
/* Errors for all ranks */
if ((root >= ompi_comm_size(comm)) || (root < 0)) {
err = MPI_ERR_ROOT;
} else if (MPI_IN_PLACE != sendbuf) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
/* Errors for the root. Some of these could have been
combined into compound if statements above, but since
this whole section can be compiled out (or turned off at
run time) for efficiency, it's more clear to separate
them out into individual tests. */
if (ompi_comm_rank(comm) == root) {
if (NULL == displs) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
if (NULL == recvcounts) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
size = ompi_comm_size(comm);
for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
}
}
}
/* Errors for intercommunicators */
else {
if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) ||
MPI_ROOT == root || MPI_PROC_NULL == root)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
}
/* Errors for the senders */
if (MPI_ROOT != root && MPI_PROC_NULL != root) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
/* Errors for the root. Ditto on the comment above -- these
error checks could have been combined above, but let's
make the code easier to read. */
else if (MPI_ROOT == root) {
if (NULL == displs) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
if (NULL == recvcounts) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
}
}
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll->coll_gatherv_init(sendbuf, sendcount, sendtype, recvbuf,
recvcounts, displs, recvtype,
root, comm, info, request,
comm->c_coll->coll_gatherv_init_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

Просмотреть файл

@ -1,174 +0,0 @@
/*
* Copyright (c) 2017 FUJITSU LIMITED. All rights reserved.
* Copyright (c) 2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*
*/
#include "ompi_config.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/mca/coll/base/coll_base_functions.h"
#include "ompi/communicator/communicator.h"
#include "mpiext_pcollreq_c.h"
#define INFO_REQ_ARGS ompi_info_t *info, ompi_request_t **request
int MPIX_Allgather_init(ALLGATHER_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_allgather_init(
ALLGATHER_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_allgather_init_module);
}
int MPIX_Allgatherv_init(ALLGATHERV_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_allgatherv_init(
ALLGATHERV_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_allgatherv_init_module);
}
int MPIX_Allreduce_init(ALLREDUCE_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_allreduce_init(
ALLREDUCE_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_allreduce_init_module);
}
int MPIX_Alltoall_init(ALLTOALL_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_alltoall_init(
ALLTOALL_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_alltoall_init_module);
}
int MPIX_Alltoallv_init(ALLTOALLV_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_alltoallv_init(
ALLTOALLV_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_alltoallv_init_module);
}
int MPIX_Alltoallw_init(ALLTOALLW_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_alltoallw_init(
ALLTOALLW_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_alltoallw_init_module);
}
int MPIX_Barrier_init(BARRIER_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_barrier_init(
BARRIER_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_barrier_init_module);
}
int MPIX_Bcast_init(BCAST_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_bcast_init(
BCAST_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_bcast_init_module);
}
int MPIX_Exscan_init(EXSCAN_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_exscan_init(
EXSCAN_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_exscan_init_module);
}
int MPIX_Gather_init(GATHER_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_gather_init(
GATHER_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_gather_init_module);
}
int MPIX_Gatherv_init(GATHERV_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_gatherv_init(
GATHERV_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_gatherv_init_module);
}
int MPIX_Reduce_init(REDUCE_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_reduce_init(
REDUCE_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_reduce_init_module);
}
int MPIX_Reduce_scatter_init(REDUCESCATTER_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_reduce_scatter_init(
REDUCESCATTER_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_reduce_scatter_init_module);
}
int MPIX_Reduce_scatter_block_init(REDUCESCATTERBLOCK_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_reduce_scatter_block_init(
REDUCESCATTERBLOCK_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_reduce_scatter_block_init_module);
}
int MPIX_Scan_init(SCAN_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_scan_init(
SCAN_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_scan_init_module);
}
int MPIX_Scatter_init(SCATTER_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_scatter_init(
SCATTER_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_scatter_init_module);
}
int MPIX_Scatterv_init(SCATTERV_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_scatterv_init(
SCATTERV_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_scatterv_init_module);
}
int MPIX_Neighbor_allgather_init(NEIGHBOR_ALLGATHER_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_neighbor_allgather_init(
NEIGHBOR_ALLGATHER_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_neighbor_allgather_init_module);
}
int MPIX_Neighbor_allgatherv_init(NEIGHBOR_ALLGATHERV_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_neighbor_allgatherv_init(
NEIGHBOR_ALLGATHERV_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_neighbor_allgatherv_init_module);
}
int MPIX_Neighbor_alltoall_init(NEIGHBOR_ALLTOALL_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_neighbor_alltoall_init(
NEIGHBOR_ALLTOALL_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_neighbor_alltoall_init_module);
}
int MPIX_Neighbor_alltoallv_init(NEIGHBOR_ALLTOALLV_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_neighbor_alltoallv_init(
NEIGHBOR_ALLTOALLV_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_neighbor_alltoallv_init_module);
}
int MPIX_Neighbor_alltoallw_init(NEIGHBOR_ALLTOALLW_BASE_ARGS, INFO_REQ_ARGS)
{
return comm->c_coll->coll_neighbor_alltoallw_init(
NEIGHBOR_ALLTOALLW_BASE_ARG_NAMES, info, request,
comm->c_coll->coll_neighbor_alltoallw_init_module);
}

15
ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.c Обычный файл
Просмотреть файл

@ -0,0 +1,15 @@
/*
* Copyright (c) 2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*
*/
void mpiext_pcollreq_dummy(void);
void mpiext_pcollreq_dummy() {
}

Просмотреть файл

@ -1,5 +1,7 @@
/* /*
* Copyright (c) 2017 FUJITSU LIMITED. All rights reserved. * Copyright (c) 2017 FUJITSU LIMITED. All rights reserved.
* Copyright (c) 2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -31,3 +33,30 @@ OMPI_DECLSPEC int MPIX_Neighbor_allgatherv_init(const void *sendbuf, int sendcou
OMPI_DECLSPEC int MPIX_Neighbor_alltoall_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm, MPI_Info info, MPI_Request *request); OMPI_DECLSPEC int MPIX_Neighbor_alltoall_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int MPIX_Neighbor_alltoallv_init(const void *sendbuf, const int sendcounts[], const int sdispls[], MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int rdispls[], MPI_Datatype recvtype, MPI_Comm comm, MPI_Info info, MPI_Request *request); OMPI_DECLSPEC int MPIX_Neighbor_alltoallv_init(const void *sendbuf, const int sendcounts[], const int sdispls[], MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int rdispls[], MPI_Datatype recvtype, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int MPIX_Neighbor_alltoallw_init(const void *sendbuf, const int sendcounts[], const MPI_Aint sdispls[], const MPI_Datatype sendtypes[], void *recvbuf, const int recvcounts[], const MPI_Aint rdispls[], const MPI_Datatype recvtypes[], MPI_Comm comm, MPI_Info info, MPI_Request *request); OMPI_DECLSPEC int MPIX_Neighbor_alltoallw_init(const void *sendbuf, const int sendcounts[], const MPI_Aint sdispls[], const MPI_Datatype sendtypes[], void *recvbuf, const int recvcounts[], const MPI_Aint rdispls[], const MPI_Datatype recvtypes[], MPI_Comm comm, MPI_Info info, MPI_Request *request);
/*
* Profiling MPI API
*/
OMPI_DECLSPEC int PMPIX_Allgather_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Allgatherv_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Allreduce_init(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Alltoall_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Alltoallv_init(const void *sendbuf, const int sendcounts[], const int sdispls[], MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int rdispls[], MPI_Datatype recvtype, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Alltoallw_init(const void *sendbuf, const int sendcounts[], const int sdispls[], const MPI_Datatype sendtypes[], void *recvbuf, const int recvcounts[], const int rdispls[], const MPI_Datatype recvtypes[], MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Barrier_init(MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Bcast_init(void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Exscan_init(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Gather_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Gatherv_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Reduce_init(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Reduce_scatter_init(const void *sendbuf, void *recvbuf, const int recvcounts[], MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Reduce_scatter_block_init(const void *sendbuf, void *recvbuf, int recvcount, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Scan_init(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Scatter_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Scatterv_init(const void *sendbuf, const int sendcounts[], const int displs[], MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Neighbor_allgather_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Neighbor_allgatherv_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Neighbor_alltoall_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Neighbor_alltoallv_init(const void *sendbuf, const int sendcounts[], const int sdispls[], MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int rdispls[], MPI_Datatype recvtype, MPI_Comm comm, MPI_Info info, MPI_Request *request);
OMPI_DECLSPEC int PMPIX_Neighbor_alltoallw_init(const void *sendbuf, const int sendcounts[], const MPI_Aint sdispls[], const MPI_Datatype sendtypes[], void *recvbuf, const int recvcounts[], const MPI_Aint rdispls[], const MPI_Datatype recvtypes[], MPI_Comm comm, MPI_Info info, MPI_Request *request);

Просмотреть файл

@ -0,0 +1,130 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2017 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012 Oak Rigde National Laboratory. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2017 IBM Corporation. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#include "ompi/mca/topo/topo.h"
#include "ompi/mca/topo/base/base.h"
#include "ompi/runtime/ompi_spc.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Neighbor_allgather_init = PMPIX_Neighbor_allgather_init
#endif
#define MPIX_Neighbor_allgather_init PMPIX_Neighbor_allgather_init
#endif
static const char FUNC_NAME[] = "MPIX_Neighbor_allgather_init";
int MPIX_Neighbor_allgather_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm, MPI_Info info, MPI_Request *request)
{
int err;
SPC_RECORD(OMPI_SPC_NEIGHBOR_ALLGATHER_INIT, 1);
MEMCHECKER(
int rank;
ptrdiff_t ext;
rank = ompi_comm_rank(comm);
ompi_datatype_type_extent(recvtype, &ext);
memchecker_datatype(recvtype);
memchecker_comm(comm);
/* check whether the actual send buffer is defined. */
if (MPI_IN_PLACE != sendbuf) {
memchecker_datatype(sendtype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
/* check whether the receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm) || OMPI_COMM_IS_INTER(comm)) {
OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (! OMPI_COMM_IS_TOPO(comm)) {
OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_TOPOLOGY, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
err = MPI_ERR_TYPE;
} else if (recvcount < 0) {
err = MPI_ERR_COUNT;
} else if (MPI_IN_PLACE == sendbuf || MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
if( OMPI_COMM_IS_CART(comm) ) {
const mca_topo_base_comm_cart_2_2_0_t *cart = comm->c_topo->mtc.cart;
if( 0 > cart->ndims ) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
}
else if( OMPI_COMM_IS_GRAPH(comm) ) {
int degree;
mca_topo_base_graph_neighbors_count(comm, ompi_comm_rank(comm), &degree);
if( 0 > degree ) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
}
else if( OMPI_COMM_IS_DIST_GRAPH(comm) ) {
const mca_topo_base_comm_dist_graph_2_2_0_t *dist_graph = comm->c_topo->mtc.dist_graph;
int indegree = dist_graph->indegree;
int outdegree = dist_graph->outdegree;
if( indegree < 0 || outdegree < 0 ) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll->coll_neighbor_allgather_init(sendbuf, sendcount, sendtype, recvbuf,
recvcount, recvtype, comm, info, request,
comm->c_coll->coll_neighbor_allgather_init_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

Просмотреть файл

@ -0,0 +1,154 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2017 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2010 University of Houston. All rights reserved.
* Copyright (c) 2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2017 IBM Corporation. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#include "ompi/mca/topo/topo.h"
#include "ompi/mca/topo/base/base.h"
#include "ompi/runtime/ompi_spc.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Neighbor_allgatherv_init = PMPIX_Neighbor_allgatherv_init
#endif
#define MPIX_Neighbor_allgatherv_init PMPIX_Neighbor_allgatherv_init
#endif
static const char FUNC_NAME[] = "MPIX_Neighbor_allgatherv_init";
int MPIX_Neighbor_allgatherv_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, const int recvcounts[], const int displs[],
MPI_Datatype recvtype, MPI_Comm comm,
MPI_Info info, MPI_Request *request)
{
int i, size, err;
SPC_RECORD(OMPI_SPC_NEIGHBOR_ALLGATHERV_INIT, 1);
MEMCHECKER(
int rank;
ptrdiff_t ext;
rank = ompi_comm_rank(comm);
size = ompi_comm_size(comm);
ompi_datatype_type_extent(recvtype, &ext);
memchecker_datatype(recvtype);
memchecker_comm (comm);
/* check whether the receive buffer is addressable. */
for (i = 0; i < size; i++) {
memchecker_call(&opal_memchecker_base_isaddressable,
(char *)(recvbuf)+displs[i]*ext,
recvcounts[i], recvtype);
}
/* check whether the actual send buffer is defined. */
if (MPI_IN_PLACE != sendbuf) {
memchecker_datatype(sendtype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm) || OMPI_COMM_IS_INTER(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if (! OMPI_COMM_IS_TOPO(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_TOPOLOGY,
FUNC_NAME);
} else if (MPI_IN_PLACE == sendbuf || MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
/* We always define the remote group to be the same as the local
group in the case of an intracommunicator, so it's safe to
get the size of the remote group here for both intra- and
intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
}
if (NULL == displs) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_BUFFER, FUNC_NAME);
}
if( OMPI_COMM_IS_CART(comm) ) {
const mca_topo_base_comm_cart_2_2_0_t *cart = comm->c_topo->mtc.cart;
if( 0 > cart->ndims ) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
}
else if( OMPI_COMM_IS_GRAPH(comm) ) {
int degree;
mca_topo_base_graph_neighbors_count(comm, ompi_comm_rank(comm), &degree);
if( 0 > degree ) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
}
else if( OMPI_COMM_IS_DIST_GRAPH(comm) ) {
const mca_topo_base_comm_dist_graph_2_2_0_t *dist_graph = comm->c_topo->mtc.dist_graph;
int indegree = dist_graph->indegree;
int outdegree = dist_graph->outdegree;
if( indegree < 0 || outdegree < 0 ) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll->coll_neighbor_allgatherv_init(sendbuf, sendcount, sendtype,
recvbuf, (int *) recvcounts, (int *) displs,
recvtype, comm, info, request,
comm->c_coll->coll_neighbor_allgatherv_init_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

Просмотреть файл

@ -0,0 +1,130 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2017 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2014-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2017 IBM Corporation. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#include "ompi/mca/topo/topo.h"
#include "ompi/mca/topo/base/base.h"
#include "ompi/runtime/ompi_spc.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Neighbor_alltoall_init = PMPIX_Neighbor_alltoall_init
#endif
#define MPIX_Neighbor_alltoall_init PMPIX_Neighbor_alltoall_init
#endif
static const char FUNC_NAME[] = "MPIX_Neighbor_alltoall_init";
int MPIX_Neighbor_alltoall_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm, MPI_Info info, MPI_Request *request)
{
size_t sendtype_size, recvtype_size;
int err;
SPC_RECORD(OMPI_SPC_NEIGHBOR_ALLTOALL_INIT, 1);
MEMCHECKER(
memchecker_comm(comm);
if (MPI_IN_PLACE != sendbuf) {
memchecker_datatype(sendtype);
memchecker_call(&opal_memchecker_base_isdefined, (void *)sendbuf, sendcount, sendtype);
}
memchecker_datatype(recvtype);
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm) || OMPI_COMM_IS_INTER(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if (! OMPI_COMM_IS_TOPO(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_TOPOLOGY,
FUNC_NAME);
} else if (MPI_IN_PLACE == sendbuf || MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG,
FUNC_NAME);
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
OMPI_CHECK_DATATYPE_FOR_RECV(err, recvtype, recvcount);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
ompi_datatype_type_size(sendtype, &sendtype_size);
ompi_datatype_type_size(recvtype, &recvtype_size);
if ((sendtype_size*sendcount) != (recvtype_size*recvcount)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TRUNCATE, FUNC_NAME);
}
if( OMPI_COMM_IS_CART(comm) ) {
const mca_topo_base_comm_cart_2_2_0_t *cart = comm->c_topo->mtc.cart;
if( 0 > cart->ndims ) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
}
else if( OMPI_COMM_IS_GRAPH(comm) ) {
int degree;
mca_topo_base_graph_neighbors_count(comm, ompi_comm_rank(comm), &degree);
if( 0 > degree ) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
}
else if( OMPI_COMM_IS_DIST_GRAPH(comm) ) {
const mca_topo_base_comm_dist_graph_2_2_0_t *dist_graph = comm->c_topo->mtc.dist_graph;
int indegree = dist_graph->indegree;
int outdegree = dist_graph->outdegree;
if( indegree < 0 || outdegree < 0 ) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll->coll_neighbor_alltoall_init(sendbuf, sendcount, sendtype,
recvbuf, recvcount, recvtype, comm,
info, request,
comm->c_coll->coll_neighbor_alltoall_init_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

Просмотреть файл

@ -0,0 +1,154 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2017 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2017 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2014-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2017 IBM Corporation. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#include "ompi/mca/topo/topo.h"
#include "ompi/mca/topo/base/base.h"
#include "ompi/runtime/ompi_spc.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Neighbor_alltoallv_init = PMPIX_Neighbor_alltoallv_init
#endif
#define MPIX_Neighbor_alltoallv_init PMPIX_Neighbor_alltoallv_init
#endif
static const char FUNC_NAME[] = "MPIX_Neighbor_alltoallv_init";
int MPIX_Neighbor_alltoallv_init(const void *sendbuf, const int sendcounts[], const int sdispls[],
MPI_Datatype sendtype, void *recvbuf, const int recvcounts[],
const int rdispls[], MPI_Datatype recvtype, MPI_Comm comm,
MPI_Info info, MPI_Request *request)
{
int i, err;
int indegree, outdegree;
SPC_RECORD(OMPI_SPC_NEIGHBOR_ALLTOALLV_INIT, 1);
MEMCHECKER(
ptrdiff_t recv_ext;
ptrdiff_t send_ext;
memchecker_comm(comm);
if (MPI_IN_PLACE != sendbuf) {
memchecker_datatype(sendtype);
ompi_datatype_type_extent(recvtype, &recv_ext);
}
memchecker_datatype(recvtype);
ompi_datatype_type_extent(sendtype, &send_ext);
err = mca_topo_base_neighbor_count (comm, &indegree, &outdegree);
if (MPI_SUCCESS == err) {
if (MPI_IN_PLACE != sendbuf) {
for ( i = 0; i < outdegree; i++ ) {
/* check if send chunks are defined. */
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(sendbuf)+sdispls[i]*send_ext,
sendcounts[i], sendtype);
}
}
for ( i = 0; i < indegree; i++ ) {
/* check if receive chunks are addressable. */
memchecker_call(&opal_memchecker_base_isaddressable,
(char *)(recvbuf)+rdispls[i]*recv_ext,
recvcounts[i], recvtype);
}
}
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm) || OMPI_COMM_IS_INTER(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if (! OMPI_COMM_IS_TOPO(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_TOPOLOGY,
FUNC_NAME);
} else if ((NULL == sendcounts) || (NULL == sdispls) ||
(NULL == recvcounts) || (NULL == rdispls) ||
MPI_IN_PLACE == sendbuf || MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
err = mca_topo_base_neighbor_count (comm, &indegree, &outdegree);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
for (i = 0; i < outdegree; ++i) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
for (i = 0; i < indegree; ++i) {
OMPI_CHECK_DATATYPE_FOR_RECV(err, recvtype, recvcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
if( OMPI_COMM_IS_CART(comm) ) {
const mca_topo_base_comm_cart_2_2_0_t *cart = comm->c_topo->mtc.cart;
if( 0 > cart->ndims ) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
}
else if( OMPI_COMM_IS_GRAPH(comm) ) {
int degree;
mca_topo_base_graph_neighbors_count(comm, ompi_comm_rank(comm), &degree);
if( 0 > degree ) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
}
else if( OMPI_COMM_IS_DIST_GRAPH(comm) ) {
const mca_topo_base_comm_dist_graph_2_2_0_t *dist_graph = comm->c_topo->mtc.dist_graph;
indegree = dist_graph->indegree;
outdegree = dist_graph->outdegree;
if( indegree < 0 || outdegree < 0 ) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll->coll_neighbor_alltoallv_init(sendbuf, sendcounts, sdispls,
sendtype, recvbuf, recvcounts, rdispls,
recvtype, comm, info, request,
comm->c_coll->coll_neighbor_alltoallv_init_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

Просмотреть файл

@ -0,0 +1,154 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2017 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2017 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2014-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2017 IBM Corporation. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#include "ompi/mca/topo/topo.h"
#include "ompi/mca/topo/base/base.h"
#include "ompi/runtime/ompi_spc.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Neighbor_alltoallw_init = PMPIX_Neighbor_alltoallw_init
#endif
#define MPIX_Neighbor_alltoallw_init PMPIX_Neighbor_alltoallw_init
#endif
static const char FUNC_NAME[] = "MPIX_Neighbor_alltoallw_init";
int MPIX_Neighbor_alltoallw_init(const void *sendbuf, const int sendcounts[], const MPI_Aint sdispls[],
const MPI_Datatype sendtypes[], void *recvbuf, const int recvcounts[],
const MPI_Aint rdispls[], const MPI_Datatype recvtypes[], MPI_Comm comm,
MPI_Info info, MPI_Request *request)
{
int i, err;
int indegree, outdegree;
SPC_RECORD(OMPI_SPC_NEIGHBOR_ALLTOALLW_INIT, 1);
MEMCHECKER(
ptrdiff_t recv_ext;
ptrdiff_t send_ext;
memchecker_comm(comm);
err = mca_topo_base_neighbor_count (comm, &indegree, &outdegree);
if (MPI_SUCCESS == err) {
if (MPI_IN_PLACE != sendbuf) {
for ( i = 0; i < outdegree; i++ ) {
memchecker_datatype(sendtypes[i]);
ompi_datatype_type_extent(sendtypes[i], &send_ext);
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(sendbuf)+sdispls[i]*send_ext,
sendcounts[i], sendtypes[i]);
}
}
for ( i = 0; i < indegree; i++ ) {
memchecker_datatype(recvtypes[i]);
ompi_datatype_type_extent(recvtypes[i], &recv_ext);
memchecker_call(&opal_memchecker_base_isaddressable,
(char *)(recvbuf)+sdispls[i]*recv_ext,
recvcounts[i], recvtypes[i]);
}
}
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm) || OMPI_COMM_IS_INTER(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if (! OMPI_COMM_IS_TOPO(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_TOPOLOGY,
FUNC_NAME);
}
if ((NULL == sendcounts) || (NULL == sdispls) || (NULL == sendtypes) ||
(NULL == recvcounts) || (NULL == rdispls) || (NULL == recvtypes) ||
MPI_IN_PLACE == sendbuf || MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
err = mca_topo_base_neighbor_count (comm, &indegree, &outdegree);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
for (i = 0; i < outdegree; ++i) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtypes[i], sendcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
for (i = 0; i < indegree; ++i) {
OMPI_CHECK_DATATYPE_FOR_RECV(err, recvtypes[i], recvcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
if( OMPI_COMM_IS_CART(comm) ) {
const mca_topo_base_comm_cart_2_2_0_t *cart = comm->c_topo->mtc.cart;
if( 0 > cart->ndims ) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
}
else if( OMPI_COMM_IS_GRAPH(comm) ) {
int degree;
mca_topo_base_graph_neighbors_count(comm, ompi_comm_rank(comm), &degree);
if( 0 > degree ) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
}
else if( OMPI_COMM_IS_DIST_GRAPH(comm) ) {
const mca_topo_base_comm_dist_graph_2_2_0_t *dist_graph = comm->c_topo->mtc.dist_graph;
indegree = dist_graph->indegree;
outdegree = dist_graph->outdegree;
if( indegree < 0 || outdegree < 0 ) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll->coll_neighbor_alltoallw_init(sendbuf, sendcounts, sdispls, sendtypes,
recvbuf, recvcounts, rdispls, recvtypes, comm,
info, request,
comm->c_coll->coll_neighbor_alltoallw_init_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

66
ompi/mpiext/pcollreq/c/profile/Makefile.am Обычный файл
Просмотреть файл

@ -0,0 +1,66 @@
#
# Copyright (c) 2017 FUJITSU LIMITED. All rights reserved.
# Copyright (c) 2018 Research Organization for Information Science
# and Technology (RIST). All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
# OMPI_BUILD_MPI_PROFILING is enabled when we want our generated MPI_* symbols
# to be replaced by PMPI_*.
# In this directory, we need it to be 1
AM_CPPFLAGS = -DOMPI_BUILD_MPI_PROFILING=1
include $(top_srcdir)/Makefile.ompi-rules
# Convenience libtool library that will be slurped up into libmpi.la.
noinst_LTLIBRARIES = libpmpiext_pcollreq_c.la
# This is where the top-level header file (that is included in
# <mpi-ext.h>) must be installed.
ompidir = $(ompiincludedir)/ompi/mpiext/pcollreq/c
# This is the header file that is installed.
ompi_HEADERS = pmpiext_pcollreq_c.h
# Sources for the convenience libtool library. Other than the one
# header file, all source files in the extension have no file naming
# conventions.
nodist_libpmpiext_pcollreq_c_la_SOURCES = \
$(ompi_HEADERS) \
pallgather_init.c \
pallgatherv_init.c \
pallreduce_init.c \
palltoall_init.c \
palltoallv_init.c \
palltoallw_init.c \
pbarrier_init.c \
pbcast_init.c \
pexscan_init.c \
pgather_init.c \
pgatherv_init.c \
preduce_init.c \
preduce_scatter_block_init.c \
preduce_scatter_init.c \
pscan_init.c \
pscatter_init.c \
pscatterv_init.c \
\
pneighbor_allgather_init.c \
pneighbor_allgatherv_init.c \
pneighbor_alltoall_init.c \
pneighbor_alltoallv_init.c \
pneighbor_alltoallw_init.c
#
# Sym link in the sources from the real MPI directory
#
$(nodist_libpmpiext_pcollreq_c_la_SOURCES):
$(OMPI_V_LN_S) if test ! -r $@ ; then \
pname=`echo $@ | cut -b '2-'` ; \
$(LN_S) $(top_srcdir)/ompi/mpiext/pcollreq/c/$$pname $@ ; \
fi

147
ompi/mpiext/pcollreq/c/reduce_init.c Обычный файл
Просмотреть файл

@ -0,0 +1,147 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2018 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2016 IBM Corporation. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/op/op.h"
#include "ompi/memchecker.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Reduce_init = PMPIX_Reduce_init
#endif
#define MPIX_Reduce_init PMPIX_Reduce_init
#endif
static const char FUNC_NAME[] = "MPIX_Reduce_init";
int MPIX_Reduce_init(const void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm,
MPI_Info info, MPI_Request *request)
{
int err;
SPC_RECORD(OMPI_SPC_REDUCE_INIT, 1);
MEMCHECKER(
memchecker_datatype(datatype);
memchecker_comm(comm);
if(OMPI_COMM_IS_INTRA(comm)) {
if(ompi_comm_rank(comm) == root) {
/* check whether root's send buffer is defined. */
if (MPI_IN_PLACE == sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined, recvbuf, count, datatype);
} else {
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype);
}
/* check whether root's receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, count, datatype);
} else {
/* check whether send buffer is defined on other processes. */
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype);
}
} else {
if (MPI_ROOT == root) {
/* check whether root's receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, count, datatype);
} else if (MPI_PROC_NULL != root) {
/* check whether send buffer is defined. */
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype);
}
}
);
if (MPI_PARAM_CHECK) {
char *msg;
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
/* Checks for all ranks */
else if (MPI_OP_NULL == op || NULL == op) {
err = MPI_ERR_OP;
} else if (!ompi_op_is_valid(op, datatype, &msg, FUNC_NAME)) {
int ret = OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, msg);
free(msg);
return ret;
} else if ((ompi_comm_rank(comm) != root && MPI_IN_PLACE == sendbuf) ||
(ompi_comm_rank(comm) == root && ((MPI_IN_PLACE == recvbuf) || (sendbuf == recvbuf)))) {
err = MPI_ERR_ARG;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, count);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
/* Intercommunicator errors */
if (!OMPI_COMM_IS_INTRA(comm)) {
if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) ||
MPI_ROOT == root || MPI_PROC_NULL == root)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
}
}
/* Intracommunicator errors */
else {
if (root < 0 || root >= ompi_comm_size(comm)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
}
}
}
/* MPI standard says that reductions have to have a count of at least 1,
* but some benchmarks (e.g., IMB) calls this function with a count of 0.
* So handle that case.
*/
if (0 == count) {
*request = &ompi_request_empty;
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
OBJ_RETAIN(op);
err = comm->c_coll->coll_reduce_init(sendbuf, recvbuf, count,
datatype, op, root, comm, info, request,
comm->c_coll->coll_reduce_init_module);
OBJ_RELEASE(op);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

Просмотреть файл

@ -0,0 +1,110 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2018 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012 Oak Ridge National Labs. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/op/op.h"
#include "ompi/memchecker.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Reduce_scatter_block_init = PMPIX_Reduce_scatter_block_init
#endif
#define MPIX_Reduce_scatter_block_init PMPIX_Reduce_scatter_block_init
#endif
static const char FUNC_NAME[] = "MPIX_Reduce_scatter_block_init";
int MPIX_Reduce_scatter_block_init(const void *sendbuf, void *recvbuf, int recvcount,
MPI_Datatype datatype, MPI_Op op,
MPI_Comm comm, MPI_Info info, MPI_Request *request)
{
int err;
SPC_RECORD(OMPI_SPC_REDUCE_SCATTER_BLOCK_INIT, 1);
MEMCHECKER(
memchecker_comm(comm);
memchecker_datatype(datatype);
/* check receive buffer of current proccess, whether it's addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf,
recvcount, datatype);
/* check whether the actual send buffer is defined. */
if(MPI_IN_PLACE == sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined, recvbuf, recvcount, datatype);
} else {
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, recvcount, datatype);
}
);
if (MPI_PARAM_CHECK) {
char *msg;
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
/* Unrooted operation; same checks for all ranks on both
intracommunicators and intercommunicators */
else if (MPI_OP_NULL == op || NULL == op) {
err = MPI_ERR_OP;
} else if (!ompi_op_is_valid(op, datatype, &msg, FUNC_NAME)) {
int ret = OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, msg);
free(msg);
return ret;
} else if (MPI_IN_PLACE == recvbuf) {
err = MPI_ERR_ARG;
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, recvcount);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
OBJ_RETAIN(op);
err = comm->c_coll->coll_reduce_scatter_block_init(sendbuf, recvbuf, recvcount,
datatype, op, comm, info, request,
comm->c_coll->coll_reduce_scatter_block_init_module);
OBJ_RELEASE(op);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

143
ompi/mpiext/pcollreq/c/reduce_scatter_init.c Обычный файл
Просмотреть файл

@ -0,0 +1,143 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2018 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2016 IBM Corporation. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/op/op.h"
#include "ompi/memchecker.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Reduce_scatter_init = PMPIX_Reduce_scatter_init
#endif
#define MPIX_Reduce_scatter_init PMPIX_Reduce_scatter_init
#endif
static const char FUNC_NAME[] = "MPIX_Reduce_scatter_init";
int MPIX_Reduce_scatter_init(const void *sendbuf, void *recvbuf, const int recvcounts[],
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Info info, MPI_Request *request)
{
int i, err, size, count;
SPC_RECORD(OMPI_SPC_REDUCE_SCATTER_INIT, 1);
MEMCHECKER(
int rank;
int count;
size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm);
for (count = i = 0; i < size; ++i) {
if (0 == recvcounts[i]) {
count += recvcounts[i];
}
}
memchecker_comm(comm);
memchecker_datatype(datatype);
/* check receive buffer of current proccess, whether it's addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf,
recvcounts[rank], datatype);
/* check whether the actual send buffer is defined. */
if(MPI_IN_PLACE == sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined, recvbuf, count, datatype);
} else {
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype);
}
);
if (MPI_PARAM_CHECK) {
char *msg;
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
/* Unrooted operation; same checks for all ranks on both
intracommunicators and intercommunicators */
else if (MPI_OP_NULL == op || NULL == op) {
err = MPI_ERR_OP;
} else if (!ompi_op_is_valid(op, datatype, &msg, FUNC_NAME)) {
int ret = OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, msg);
free(msg);
return ret;
} else if (NULL == recvcounts) {
err = MPI_ERR_COUNT;
} else if (MPI_IN_PLACE == recvbuf) {
err = MPI_ERR_ARG;
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
/* Based on the standard each group has to provide the same total
number of elements, so the size of the recvcounts array depends
on the number of participants in the local group. */
size = ompi_comm_size(comm);
for (i = 0; i < size; ++i) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, recvcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
}
/* MPI standard says that reductions have to have a count of at least 1,
* but some benchmarks (e.g., IMB) calls this function with a count of 0.
* So handle that case.
*/
size = ompi_comm_size(comm);
for (count = i = 0; i < size; ++i) {
if (0 == recvcounts[i]) {
++count;
}
}
if (size == count) {
*request = &ompi_request_empty;
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
OBJ_RETAIN(op);
err = comm->c_coll->coll_reduce_scatter_init(sendbuf, recvbuf, recvcounts,
datatype, op, comm, info, request,
comm->c_coll->coll_reduce_scatter_init_module);
OBJ_RELEASE(op);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

108
ompi/mpiext/pcollreq/c/scan_init.c Обычный файл
Просмотреть файл

@ -0,0 +1,108 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2018 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/op/op.h"
#include "ompi/memchecker.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Scan_init = PMPIX_Scan_init
#endif
#define MPIX_Scan_init PMPIX_Scan_init
#endif
static const char FUNC_NAME[] = "MPIX_Scan_init";
int MPIX_Scan_init(const void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm,
MPI_Info info, MPI_Request *request)
{
int err;
SPC_RECORD(OMPI_SPC_SCAN_INIT, 1);
MEMCHECKER(
memchecker_datatype(datatype);
memchecker_comm(comm);
if (MPI_IN_PLACE != sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype);
} else {
memchecker_call(&opal_memchecker_base_isdefined, recvbuf, count, datatype);
}
);
if (MPI_PARAM_CHECK) {
char *msg;
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
/* No intercommunicators allowed! (MPI does not define
MPI_SCAN on intercommunicators) */
else if (OMPI_COMM_IS_INTER(comm)) {
err = MPI_ERR_COMM;
}
/* Unrooted operation; checks for all ranks */
else if (MPI_OP_NULL == op || NULL == op) {
err = MPI_ERR_OP;
} else if (MPI_IN_PLACE == recvbuf) {
err = MPI_ERR_ARG;
} else if (!ompi_op_is_valid(op, datatype, &msg, FUNC_NAME)) {
int ret = OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, msg);
free(msg);
return ret;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, count);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Call the coll component to actually perform the allgather */
OBJ_RETAIN(op);
err = comm->c_coll->coll_scan_init(sendbuf, recvbuf, count,
datatype, op, comm,
info, request,
comm->c_coll->coll_scan_init_module);
OBJ_RELEASE(op);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

161
ompi/mpiext/pcollreq/c/scatter_init.c Обычный файл
Просмотреть файл

@ -0,0 +1,161 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2018 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2008 University of Houston. All rights reserved.
* Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Scatter_init = PMPIX_Scatter_init
#endif
#define MPIX_Scatter_init PMPIX_Scatter_init
#endif
static const char FUNC_NAME[] = "MPIX_Scatter_init";
int MPIX_Scatter_init(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm, MPI_Info info, MPI_Request *request)
{
int err;
SPC_RECORD(OMPI_SPC_SCATTER_INIT, 1);
MEMCHECKER(
memchecker_comm(comm);
if(OMPI_COMM_IS_INTRA(comm)) {
if(ompi_comm_rank(comm) == root) {
memchecker_datatype(sendtype);
/* check whether root's send buffer is defined. */
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
if(MPI_IN_PLACE != recvbuf) {
memchecker_datatype(recvtype);
/* check whether receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
}
} else {
memchecker_datatype(recvtype);
/* check whether receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
}
} else {
if(MPI_ROOT == root) {
memchecker_datatype(sendtype);
/* check whether root's send buffer is defined. */
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
} else if (MPI_PROC_NULL != root) {
memchecker_datatype(recvtype);
/* check whether receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
}
}
);
if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if ((ompi_comm_rank(comm) != root && MPI_IN_PLACE == recvbuf) ||
(ompi_comm_rank(comm) == root && MPI_IN_PLACE == sendbuf)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
/* Errors for intracommunicators */
if (OMPI_COMM_IS_INTRA(comm)) {
/* Errors for all ranks */
if ((root >= ompi_comm_size(comm)) || (root < 0)) {
err = MPI_ERR_ROOT;
} else if (MPI_IN_PLACE != recvbuf) {
if (recvcount < 0) {
err = MPI_ERR_COUNT;
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
err = MPI_ERR_TYPE;
}
}
/* Errors for the root. Some of these could have been
combined into compound if statements above, but since
this whole section can be compiled out (or turned off at
run time) for efficiency, it's more clear to separate
them out into individual tests. */
else if (ompi_comm_rank(comm) == root) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
/* Errors for intercommunicators */
else {
if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) ||
MPI_ROOT == root || MPI_PROC_NULL == root)) {
err = MPI_ERR_ROOT;
}
/* Errors for the receivers */
else if (MPI_ROOT != root && MPI_PROC_NULL != root) {
if (recvcount < 0) {
err = MPI_ERR_COUNT;
} else if (MPI_DATATYPE_NULL == recvtype) {
err = MPI_ERR_TYPE;
}
}
/* Errors for the root. Ditto on the comment above -- these
error checks could have been combined above, but let's
make the code easier to read. */
else if (MPI_ROOT == root) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll->coll_scatter_init(sendbuf, sendcount, sendtype, recvbuf,
recvcount, recvtype, root, comm, info, request,
comm->c_coll->coll_scatter_init_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

201
ompi/mpiext/pcollreq/c/scatterv_init.c Обычный файл
Просмотреть файл

@ -0,0 +1,201 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2018 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2015-2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#include "ompi/mpiext/pcollreq/c/mpiext_pcollreq_c.h"
#include "ompi/runtime/ompi_spc.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPIX_Scatterv_init = PMPIX_Scatterv_init
#endif
#define MPIX_Scatterv_init PMPIX_Scatterv_init
#endif
static const char FUNC_NAME[] = "MPIX_Scatterv_init";
int MPIX_Scatterv_init(const void *sendbuf, const int sendcounts[], const int displs[],
MPI_Datatype sendtype, void *recvbuf, int recvcount,
MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Info info, MPI_Request *request)
{
int i, size, err;
SPC_RECORD(OMPI_SPC_SCATTERV_INIT, 1);
MEMCHECKER(
ptrdiff_t ext;
size = ompi_comm_remote_size(comm);
ompi_datatype_type_extent(recvtype, &ext);
memchecker_comm(comm);
if(OMPI_COMM_IS_INTRA(comm)) {
if(ompi_comm_rank(comm) == root) {
memchecker_datatype(sendtype);
/* check whether root's send buffer is defined. */
for (i = 0; i < size; i++) {
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(sendbuf)+displs[i]*ext,
sendcounts[i], sendtype);
}
if(MPI_IN_PLACE != recvbuf) {
memchecker_datatype(recvtype);
/* check whether receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
}
} else {
memchecker_datatype(recvtype);
/* check whether receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
}
} else {
if(MPI_ROOT == root) {
memchecker_datatype(sendtype);
/* check whether root's send buffer is defined. */
for (i = 0; i < size; i++) {
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(sendbuf)+displs[i]*ext,
sendcounts[i], sendtype);
}
} else if (MPI_PROC_NULL != root) {
/* check whether receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
}
}
);
if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if ((ompi_comm_rank(comm) != root && MPI_IN_PLACE == recvbuf) ||
(ompi_comm_rank(comm) == root && MPI_IN_PLACE == sendbuf)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
/* Errors for intracommunicators */
if (OMPI_COMM_IS_INTRA(comm)) {
/* Errors for all ranks */
if ((root >= ompi_comm_size(comm)) || (root < 0)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
}
if (MPI_IN_PLACE != recvbuf) {
if (recvcount < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT,
FUNC_NAME);
}
if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE,
FUNC_NAME);
}
}
/* Errors for the root. Some of these could have been
combined into compound if statements above, but since
this whole section can be compiled out (or turned off at
run time) for efficiency, it's more clear to separate
them out into individual tests. */
if (ompi_comm_rank(comm) == root) {
if (NULL == displs) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
if (NULL == sendcounts) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
size = ompi_comm_size(comm);
for (i = 0; i < size; ++i) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
}
}
/* Errors for intercommunicators */
else {
if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) ||
MPI_ROOT == root || MPI_PROC_NULL == root)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
}
/* Errors for the receivers */
if (MPI_ROOT != root && MPI_PROC_NULL != root) {
if (recvcount < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
}
/* Errors for the root. Ditto on the comment above -- these
error checks could have been combined above, but let's
make the code easier to read. */
else if (MPI_ROOT == root) {
if (NULL == displs) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
if (NULL == sendcounts) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
}
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll->coll_scatterv_init(sendbuf, sendcounts, displs,
sendtype, recvbuf, recvcount, recvtype, root, comm,
info, request, comm->c_coll->coll_scatterv_init_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

Просмотреть файл

@ -1,6 +1,8 @@
# -*- shell-script -*- # -*- shell-script -*-
# #
# Copyright (c) 2017 FUJITSU LIMITED. All rights reserved. # Copyright (c) 2017 FUJITSU LIMITED. All rights reserved.
# Copyright (c) 2018 Research Organization for Information Science
# and Technology (RIST). All rights reserved.
# $COPYRIGHT$ # $COPYRIGHT$
# #
# Additional copyrights may follow # Additional copyrights may follow
@ -11,8 +13,11 @@
# OMPI_MPIEXT_pcollreq_CONFIG([action-if-found], [action-if-not-found]) # OMPI_MPIEXT_pcollreq_CONFIG([action-if-found], [action-if-not-found])
# ----------------------------------------------------------- # -----------------------------------------------------------
AC_DEFUN([OMPI_MPIEXT_pcollreq_CONFIG],[ AC_DEFUN([OMPI_MPIEXT_pcollreq_CONFIG],[
AC_CONFIG_FILES([ompi/mpiext/pcollreq/Makefile]) AC_CONFIG_FILES([
AC_CONFIG_FILES([ompi/mpiext/pcollreq/c/Makefile]) ompi/mpiext/pcollreq/Makefile
ompi/mpiext/pcollreq/c/Makefile
ompi/mpiext/pcollreq/c/profile/Makefile
])
AS_IF([test "$ENABLE_pcollreq" = "1" || \ AS_IF([test "$ENABLE_pcollreq" = "1" || \
test "$ENABLE_EXT_ALL" = "1"], test "$ENABLE_EXT_ALL" = "1"],

Просмотреть файл

@ -4,6 +4,8 @@
* reserved. * reserved.
* *
* Copyright (c) 2018 Cisco Systems, Inc. All rights reserved * Copyright (c) 2018 Cisco Systems, Inc. All rights reserved
* Copyright (c) 2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -52,34 +54,67 @@ static ompi_spc_event_t ompi_spc_events_names[OMPI_SPC_NUM_COUNTERS] = {
SET_COUNTER_ARRAY(OMPI_SPC_IPROBE, "The number of times MPI_Iprobe was called."), SET_COUNTER_ARRAY(OMPI_SPC_IPROBE, "The number of times MPI_Iprobe was called."),
SET_COUNTER_ARRAY(OMPI_SPC_BCAST, "The number of times MPI_Bcast was called."), SET_COUNTER_ARRAY(OMPI_SPC_BCAST, "The number of times MPI_Bcast was called."),
SET_COUNTER_ARRAY(OMPI_SPC_IBCAST, "The number of times MPI_Ibcast was called."), SET_COUNTER_ARRAY(OMPI_SPC_IBCAST, "The number of times MPI_Ibcast was called."),
SET_COUNTER_ARRAY(OMPI_SPC_BCAST_INIT, "The number of times MPIX_Bcast_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_REDUCE, "The number of times MPI_Reduce was called."), SET_COUNTER_ARRAY(OMPI_SPC_REDUCE, "The number of times MPI_Reduce was called."),
SET_COUNTER_ARRAY(OMPI_SPC_REDUCE_SCATTER, "The number of times MPI_Reduce_scatter was called."),
SET_COUNTER_ARRAY(OMPI_SPC_REDUCE_SCATTER_BLOCK, "The number of times MPI_Reduce_scatter_block was called."),
SET_COUNTER_ARRAY(OMPI_SPC_IREDUCE, "The number of times MPI_Ireduce was called."), SET_COUNTER_ARRAY(OMPI_SPC_IREDUCE, "The number of times MPI_Ireduce was called."),
SET_COUNTER_ARRAY(OMPI_SPC_IREDUCE_SCATTER, "The number of times MPI_Ireduce_scatter was called."), SET_COUNTER_ARRAY(OMPI_SPC_IREDUCE_SCATTER, "The number of times MPI_Ireduce_scatter was called."),
SET_COUNTER_ARRAY(OMPI_SPC_IREDUCE_SCATTER_BLOCK, "The number of times MPI_Ireduce_scatter_block was called."), SET_COUNTER_ARRAY(OMPI_SPC_IREDUCE_SCATTER_BLOCK, "The number of times MPI_Ireduce_scatter_block was called."),
SET_COUNTER_ARRAY(OMPI_SPC_REDUCE_INIT, "The number of times MPIX_Reduce_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_REDUCE_SCATTER_INIT, "The number of times MPIX_Reduce_scatter_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_REDUCE_SCATTER_BLOCK_INIT, "The number of times MPIX_Reduce_scatter_block_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_ALLREDUCE, "The number of times MPI_Allreduce was called."), SET_COUNTER_ARRAY(OMPI_SPC_ALLREDUCE, "The number of times MPI_Allreduce was called."),
SET_COUNTER_ARRAY(OMPI_SPC_IALLREDUCE, "The number of times MPI_Iallreduce was called."), SET_COUNTER_ARRAY(OMPI_SPC_IALLREDUCE, "The number of times MPI_Iallreduce was called."),
SET_COUNTER_ARRAY(OMPI_SPC_ALLREDUCE_INIT, "The number of times MPIX_Allreduce_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_SCAN, "The number of times MPI_Scan was called."), SET_COUNTER_ARRAY(OMPI_SPC_SCAN, "The number of times MPI_Scan was called."),
SET_COUNTER_ARRAY(OMPI_SPC_EXSCAN, "The number of times MPI_Exscan was called."),
SET_COUNTER_ARRAY(OMPI_SPC_ISCAN, "The number of times MPI_Iscan was called."), SET_COUNTER_ARRAY(OMPI_SPC_ISCAN, "The number of times MPI_Iscan was called."),
SET_COUNTER_ARRAY(OMPI_SPC_IEXSCAN, "The number of times MPI_Iexscan was called."),
SET_COUNTER_ARRAY(OMPI_SPC_SCAN_INIT, "The number of times MPIX_Scan_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_EXSCAN_INIT, "The number of times MPIX_Exscan_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_SCATTER, "The number of times MPI_Scatter was called."), SET_COUNTER_ARRAY(OMPI_SPC_SCATTER, "The number of times MPI_Scatter was called."),
SET_COUNTER_ARRAY(OMPI_SPC_SCATTERV, "The number of times MPI_Scatterv was called."), SET_COUNTER_ARRAY(OMPI_SPC_SCATTERV, "The number of times MPI_Scatterv was called."),
SET_COUNTER_ARRAY(OMPI_SPC_ISCATTER, "The number of times MPI_Iscatter was called."), SET_COUNTER_ARRAY(OMPI_SPC_ISCATTER, "The number of times MPI_Iscatter was called."),
SET_COUNTER_ARRAY(OMPI_SPC_ISCATTERV, "The number of times MPI_Iscatterv was called."), SET_COUNTER_ARRAY(OMPI_SPC_ISCATTERV, "The number of times MPI_Iscatterv was called."),
SET_COUNTER_ARRAY(OMPI_SPC_SCATTER_INIT, "The number of times MPIX_Scatter_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_SCATTERV_INIT, "The number of times MPIX_Scatterv_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_GATHER, "The number of times MPI_Gather was called."), SET_COUNTER_ARRAY(OMPI_SPC_GATHER, "The number of times MPI_Gather was called."),
SET_COUNTER_ARRAY(OMPI_SPC_GATHERV, "The number of times MPI_Gatherv was called."), SET_COUNTER_ARRAY(OMPI_SPC_GATHERV, "The number of times MPI_Gatherv was called."),
SET_COUNTER_ARRAY(OMPI_SPC_IGATHER, "The number of times MPI_Igather was called."), SET_COUNTER_ARRAY(OMPI_SPC_IGATHER, "The number of times MPI_Igather was called."),
SET_COUNTER_ARRAY(OMPI_SPC_IGATHERV, "The number of times MPI_Igatherv was called."),
SET_COUNTER_ARRAY(OMPI_SPC_GATHER_INIT, "The number of times MPIX_Gather_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_GATHERV_INIT, "The number of times MPIX_Gatherv_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_ALLTOALL, "The number of times MPI_Alltoall was called."), SET_COUNTER_ARRAY(OMPI_SPC_ALLTOALL, "The number of times MPI_Alltoall was called."),
SET_COUNTER_ARRAY(OMPI_SPC_ALLTOALLV, "The number of times MPI_Alltoallv was called."),
SET_COUNTER_ARRAY(OMPI_SPC_ALLTOALLW, "The number of times MPI_Alltoallw was called."),
SET_COUNTER_ARRAY(OMPI_SPC_IALLTOALL, "The number of times MPI_Ialltoall was called."), SET_COUNTER_ARRAY(OMPI_SPC_IALLTOALL, "The number of times MPI_Ialltoall was called."),
SET_COUNTER_ARRAY(OMPI_SPC_IALLTOALLV, "The number of times MPI_Ialltoallv was called."), SET_COUNTER_ARRAY(OMPI_SPC_IALLTOALLV, "The number of times MPI_Ialltoallv was called."),
SET_COUNTER_ARRAY(OMPI_SPC_IALLTOALLW, "The number of times MPI_Ialltoallw was called."), SET_COUNTER_ARRAY(OMPI_SPC_IALLTOALLW, "The number of times MPI_Ialltoallw was called."),
SET_COUNTER_ARRAY(OMPI_SPC_ALLTOALL_INIT, "The number of times MPIX_Alltoall_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_ALLTOALLV_INIT, "The number of times MPIX_Alltoallv_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_ALLTOALLW_INIT, "The number of times MPIX_Alltoallw_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_NEIGHBOR_ALLTOALL, "The number of times MPI_Neighbor_alltoall was called."), SET_COUNTER_ARRAY(OMPI_SPC_NEIGHBOR_ALLTOALL, "The number of times MPI_Neighbor_alltoall was called."),
SET_COUNTER_ARRAY(OMPI_SPC_NEIGHBOR_ALLTOALLV, "The number of times MPI_Neighbor_alltoallv was called."), SET_COUNTER_ARRAY(OMPI_SPC_NEIGHBOR_ALLTOALLV, "The number of times MPI_Neighbor_alltoallv was called."),
SET_COUNTER_ARRAY(OMPI_SPC_NEIGHBOR_ALLTOALLW, "The number of times MPI_Neighbor_alltoallw was called."), SET_COUNTER_ARRAY(OMPI_SPC_NEIGHBOR_ALLTOALLW, "The number of times MPI_Neighbor_alltoallw was called."),
SET_COUNTER_ARRAY(OMPI_SPC_INEIGHBOR_ALLTOALL, "The number of times MPI_Ineighbor_alltoall was called."),
SET_COUNTER_ARRAY(OMPI_SPC_INEIGHBOR_ALLTOALLV, "The number of times MPI_Ineighbor_alltoallv was called."),
SET_COUNTER_ARRAY(OMPI_SPC_INEIGHBOR_ALLTOALLW, "The number of times MPI_Ineighbor_alltoallw was called."),
SET_COUNTER_ARRAY(OMPI_SPC_NEIGHBOR_ALLTOALL_INIT, "The number of times MPIX_Neighbor_alltoall_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_NEIGHBOR_ALLTOALLV_INIT, "The number of times MPIX_Neighbor_alltoallv_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_NEIGHBOR_ALLTOALLW_INIT, "The number of times MPIX_Neighbor_alltoallw_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_ALLGATHER, "The number of times MPI_Allgather was called."), SET_COUNTER_ARRAY(OMPI_SPC_ALLGATHER, "The number of times MPI_Allgather was called."),
SET_COUNTER_ARRAY(OMPI_SPC_ALLGATHERV, "The number of times MPI_Allgatherv was called."), SET_COUNTER_ARRAY(OMPI_SPC_ALLGATHERV, "The number of times MPI_Allgatherv was called."),
SET_COUNTER_ARRAY(OMPI_SPC_IALLGATHER, "The number of times MPI_Iallgather was called."), SET_COUNTER_ARRAY(OMPI_SPC_IALLGATHER, "The number of times MPI_Iallgather was called."),
SET_COUNTER_ARRAY(OMPI_SPC_IALLGATHERV, "The number of times MPI_Iallgatherv was called."), SET_COUNTER_ARRAY(OMPI_SPC_IALLGATHERV, "The number of times MPI_Iallgatherv was called."),
SET_COUNTER_ARRAY(OMPI_SPC_ALLGATHER_INIT, "The number of times MPIX_Allgather_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_ALLGATHERV_INIT, "The number of times MPIX_Allgatherv_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_NEIGHBOR_ALLGATHER, "The number of times MPI_Neighbor_allgather was called."), SET_COUNTER_ARRAY(OMPI_SPC_NEIGHBOR_ALLGATHER, "The number of times MPI_Neighbor_allgather was called."),
SET_COUNTER_ARRAY(OMPI_SPC_NEIGHBOR_ALLGATHERV, "The number of times MPI_Neighbor_allgatherv was called."), SET_COUNTER_ARRAY(OMPI_SPC_NEIGHBOR_ALLGATHERV, "The number of times MPI_Neighbor_allgatherv was called."),
SET_COUNTER_ARRAY(OMPI_SPC_INEIGHBOR_ALLGATHER, "The number of times MPI_Ineighbor_allgather was called."),
SET_COUNTER_ARRAY(OMPI_SPC_INEIGHBOR_ALLGATHERV, "The number of times MPI_Ineighbor_allgatherv was called."),
SET_COUNTER_ARRAY(OMPI_SPC_NEIGHBOR_ALLGATHER_INIT, "The number of times MPIX_Neighbor_allgather_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_NEIGHBOR_ALLGATHERV_INIT, "The number of times MPIX_Neighbor_allgatherv_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_TEST, "The number of times MPI_Test was called."), SET_COUNTER_ARRAY(OMPI_SPC_TEST, "The number of times MPI_Test was called."),
SET_COUNTER_ARRAY(OMPI_SPC_TESTALL, "The number of times MPI_Testall was called."), SET_COUNTER_ARRAY(OMPI_SPC_TESTALL, "The number of times MPI_Testall was called."),
SET_COUNTER_ARRAY(OMPI_SPC_TESTANY, "The number of times MPI_Testany was called."), SET_COUNTER_ARRAY(OMPI_SPC_TESTANY, "The number of times MPI_Testany was called."),
@ -90,6 +125,7 @@ static ompi_spc_event_t ompi_spc_events_names[OMPI_SPC_NUM_COUNTERS] = {
SET_COUNTER_ARRAY(OMPI_SPC_WAITSOME, "The number of times MPI_Waitsome was called."), SET_COUNTER_ARRAY(OMPI_SPC_WAITSOME, "The number of times MPI_Waitsome was called."),
SET_COUNTER_ARRAY(OMPI_SPC_BARRIER, "The number of times MPI_Barrier was called."), SET_COUNTER_ARRAY(OMPI_SPC_BARRIER, "The number of times MPI_Barrier was called."),
SET_COUNTER_ARRAY(OMPI_SPC_IBARRIER, "The number of times MPI_Ibarrier was called."), SET_COUNTER_ARRAY(OMPI_SPC_IBARRIER, "The number of times MPI_Ibarrier was called."),
SET_COUNTER_ARRAY(OMPI_SPC_BARRIER_INIT, "The number of times MPIX_Barrier_init was called."),
SET_COUNTER_ARRAY(OMPI_SPC_WTIME, "The number of times MPI_Wtime was called."), SET_COUNTER_ARRAY(OMPI_SPC_WTIME, "The number of times MPI_Wtime was called."),
SET_COUNTER_ARRAY(OMPI_SPC_CANCEL, "The number of times MPI_Cancel was called."), SET_COUNTER_ARRAY(OMPI_SPC_CANCEL, "The number of times MPI_Cancel was called."),
SET_COUNTER_ARRAY(OMPI_SPC_BYTES_RECEIVED_USER, "The number of bytes received by the user through point-to-point communications. Note: Includes bytes transferred using internal RMA operations."), SET_COUNTER_ARRAY(OMPI_SPC_BYTES_RECEIVED_USER, "The number of bytes received by the user through point-to-point communications. Note: Includes bytes transferred using internal RMA operations."),

Просмотреть файл

@ -73,34 +73,67 @@ typedef enum ompi_spc_counters {
OMPI_SPC_IPROBE, OMPI_SPC_IPROBE,
OMPI_SPC_BCAST, OMPI_SPC_BCAST,
OMPI_SPC_IBCAST, OMPI_SPC_IBCAST,
OMPI_SPC_BCAST_INIT,
OMPI_SPC_REDUCE, OMPI_SPC_REDUCE,
OMPI_SPC_REDUCE_SCATTER,
OMPI_SPC_REDUCE_SCATTER_BLOCK,
OMPI_SPC_IREDUCE, OMPI_SPC_IREDUCE,
OMPI_SPC_IREDUCE_SCATTER, OMPI_SPC_IREDUCE_SCATTER,
OMPI_SPC_IREDUCE_SCATTER_BLOCK, OMPI_SPC_IREDUCE_SCATTER_BLOCK,
OMPI_SPC_REDUCE_INIT,
OMPI_SPC_REDUCE_SCATTER_INIT,
OMPI_SPC_REDUCE_SCATTER_BLOCK_INIT,
OMPI_SPC_ALLREDUCE, OMPI_SPC_ALLREDUCE,
OMPI_SPC_IALLREDUCE, OMPI_SPC_IALLREDUCE,
OMPI_SPC_ALLREDUCE_INIT,
OMPI_SPC_SCAN, OMPI_SPC_SCAN,
OMPI_SPC_EXSCAN,
OMPI_SPC_ISCAN, OMPI_SPC_ISCAN,
OMPI_SPC_IEXSCAN,
OMPI_SPC_SCAN_INIT,
OMPI_SPC_EXSCAN_INIT,
OMPI_SPC_SCATTER, OMPI_SPC_SCATTER,
OMPI_SPC_SCATTERV, OMPI_SPC_SCATTERV,
OMPI_SPC_ISCATTER, OMPI_SPC_ISCATTER,
OMPI_SPC_ISCATTERV, OMPI_SPC_ISCATTERV,
OMPI_SPC_SCATTER_INIT,
OMPI_SPC_SCATTERV_INIT,
OMPI_SPC_GATHER, OMPI_SPC_GATHER,
OMPI_SPC_GATHERV, OMPI_SPC_GATHERV,
OMPI_SPC_IGATHER, OMPI_SPC_IGATHER,
OMPI_SPC_IGATHERV,
OMPI_SPC_GATHER_INIT,
OMPI_SPC_GATHERV_INIT,
OMPI_SPC_ALLTOALL, OMPI_SPC_ALLTOALL,
OMPI_SPC_ALLTOALLV,
OMPI_SPC_ALLTOALLW,
OMPI_SPC_IALLTOALL, OMPI_SPC_IALLTOALL,
OMPI_SPC_IALLTOALLV, OMPI_SPC_IALLTOALLV,
OMPI_SPC_IALLTOALLW, OMPI_SPC_IALLTOALLW,
OMPI_SPC_ALLTOALL_INIT,
OMPI_SPC_ALLTOALLV_INIT,
OMPI_SPC_ALLTOALLW_INIT,
OMPI_SPC_NEIGHBOR_ALLTOALL, OMPI_SPC_NEIGHBOR_ALLTOALL,
OMPI_SPC_NEIGHBOR_ALLTOALLV, OMPI_SPC_NEIGHBOR_ALLTOALLV,
OMPI_SPC_NEIGHBOR_ALLTOALLW, OMPI_SPC_NEIGHBOR_ALLTOALLW,
OMPI_SPC_INEIGHBOR_ALLTOALL,
OMPI_SPC_INEIGHBOR_ALLTOALLV,
OMPI_SPC_INEIGHBOR_ALLTOALLW,
OMPI_SPC_NEIGHBOR_ALLTOALL_INIT,
OMPI_SPC_NEIGHBOR_ALLTOALLV_INIT,
OMPI_SPC_NEIGHBOR_ALLTOALLW_INIT,
OMPI_SPC_ALLGATHER, OMPI_SPC_ALLGATHER,
OMPI_SPC_ALLGATHERV, OMPI_SPC_ALLGATHERV,
OMPI_SPC_IALLGATHER, OMPI_SPC_IALLGATHER,
OMPI_SPC_IALLGATHERV, OMPI_SPC_IALLGATHERV,
OMPI_SPC_ALLGATHER_INIT,
OMPI_SPC_ALLGATHERV_INIT,
OMPI_SPC_NEIGHBOR_ALLGATHER, OMPI_SPC_NEIGHBOR_ALLGATHER,
OMPI_SPC_NEIGHBOR_ALLGATHERV, OMPI_SPC_NEIGHBOR_ALLGATHERV,
OMPI_SPC_INEIGHBOR_ALLGATHER,
OMPI_SPC_INEIGHBOR_ALLGATHERV,
OMPI_SPC_NEIGHBOR_ALLGATHER_INIT,
OMPI_SPC_NEIGHBOR_ALLGATHERV_INIT,
OMPI_SPC_TEST, OMPI_SPC_TEST,
OMPI_SPC_TESTALL, OMPI_SPC_TESTALL,
OMPI_SPC_TESTANY, OMPI_SPC_TESTANY,
@ -111,6 +144,7 @@ typedef enum ompi_spc_counters {
OMPI_SPC_WAITSOME, OMPI_SPC_WAITSOME,
OMPI_SPC_BARRIER, OMPI_SPC_BARRIER,
OMPI_SPC_IBARRIER, OMPI_SPC_IBARRIER,
OMPI_SPC_BARRIER_INIT,
OMPI_SPC_WTIME, OMPI_SPC_WTIME,
OMPI_SPC_CANCEL, OMPI_SPC_CANCEL,
OMPI_SPC_BYTES_RECEIVED_USER, OMPI_SPC_BYTES_RECEIVED_USER,