1
1
openmpi/ompi/mpi/c/scan.c
Rainer Keller 6c5532072a - Split the datatype engine into two parts: an MPI specific part in
OMPI
   and a language agnostic part in OPAL. The convertor is completely
   moved into OPAL.  This offers several benefits as described in RFC
   http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
   namely:
    - Fewer basic types (int* and float* types, boolean and wchar
    - Fixing naming scheme to ompi-nomenclature.
    - Usability outside of the ompi-layer.
 - Due to the fixed nature of simple opal types, their information is
   completely
   known at compile time and therefore constified
 - With fewer datatypes (22), the actual sizes of bit-field types may be
   reduced
   from 64 to 32 bits, allowing reorganizing the opal_datatype
   structure, eliminating holes and keeping data required in convertor
   (upon send/recv) in one cacheline...
   This has implications to the convertor-datastructure and other parts
   of the code.
 - Several performance tests have been run, the netpipe latency does not
   change with
   this patch on Linux/x86-64 on the smoky cluster.
 - Extensive tests have been done to verify correctness (no new
   regressions) using:
   1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
    ompi-ddt:
    a. running both trunk and ompi-ddt resulted in no differences
       (except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
       correctly).
    b. with --enable-memchecker and running under valgrind (one buglet
       when run with static found in test-suite, commited)
   2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
      all passed (except for the dynamic/ tests failed!! as trunk/MTT)
   3. compilation and usage of HDF5 tests on Jaguar using PGI and
      PathScale compilers.
   4. compilation and usage on Scicortex.
 - Please note, that for the heterogeneous case, (-m32 compiled
   binaries/ompi), neither
   ompi-trunk, nor ompi-ddt branch would successfully launch.

This commit was SVN r21641.
2009-07-13 04:56:31 +00:00

107 строки
3.3 KiB
C

/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/op/op.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Scan = PMPI_Scan
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Scan";
int MPI_Scan(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
{
int err;
MEMCHECKER(
memchecker_datatype(datatype);
memchecker_comm(comm);
if (MPI_IN_PLACE != sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype);
} else {
memchecker_call(&opal_memchecker_base_isdefined, recvbuf, count, datatype);
}
);
if (MPI_PARAM_CHECK) {
char *msg;
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
/* No intercommunicators allowed! (MPI does not define
MPI_SCAN on intercommunicators) */
else if (OMPI_COMM_IS_INTER(comm)) {
err = MPI_ERR_COMM;
}
/* Unrooted operation; checks for all ranks */
else if (MPI_OP_NULL == op || NULL == op) {
err = MPI_ERR_OP;
} else if (MPI_IN_PLACE == recvbuf) {
err = MPI_ERR_ARG;
} else if (!ompi_op_is_valid(op, datatype, &msg, FUNC_NAME)) {
int ret = OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, msg);
free(msg);
return ret;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, count);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
/* Do we need to do anything? (MPI says that reductions have to
have a count of at least 1, but at least IMB calls reduce with
a count of 0 -- blah!) */
if (0 == count) {
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Call the coll component to actually perform the allgather */
OBJ_RETAIN(op);
err = comm->c_coll.coll_scan(sendbuf, recvbuf, count,
datatype, op, comm,
comm->c_coll.coll_scan_module);
OBJ_RELEASE(op);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}