1
1
openmpi/ompi/mpi/c/get_elements.c
Rainer Keller 6c5532072a - Split the datatype engine into two parts: an MPI specific part in
OMPI
   and a language agnostic part in OPAL. The convertor is completely
   moved into OPAL.  This offers several benefits as described in RFC
   http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
   namely:
    - Fewer basic types (int* and float* types, boolean and wchar
    - Fixing naming scheme to ompi-nomenclature.
    - Usability outside of the ompi-layer.
 - Due to the fixed nature of simple opal types, their information is
   completely
   known at compile time and therefore constified
 - With fewer datatypes (22), the actual sizes of bit-field types may be
   reduced
   from 64 to 32 bits, allowing reorganizing the opal_datatype
   structure, eliminating holes and keeping data required in convertor
   (upon send/recv) in one cacheline...
   This has implications to the convertor-datastructure and other parts
   of the code.
 - Several performance tests have been run, the netpipe latency does not
   change with
   this patch on Linux/x86-64 on the smoky cluster.
 - Extensive tests have been done to verify correctness (no new
   regressions) using:
   1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
    ompi-ddt:
    a. running both trunk and ompi-ddt resulted in no differences
       (except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
       correctly).
    b. with --enable-memchecker and running under valgrind (one buglet
       when run with static found in test-suite, commited)
   2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
      all passed (except for the dynamic/ tests failed!! as trunk/MTT)
   3. compilation and usage of HDF5 tests on Jaguar using PGI and
      PathScale compilers.
   4. compilation and usage on Scicortex.
 - Please note, that for the heterogeneous case, (-m32 compiled
   binaries/ompi), neither
   ompi-trunk, nor ompi-ddt branch would successfully launch.

This commit was SVN r21641.
2009-07-13 04:56:31 +00:00

103 строки
3.3 KiB
C

/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Get_elements = PMPI_Get_elements
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Get_elements";
int MPI_Get_elements(MPI_Status *status, MPI_Datatype datatype, int *count)
{
int i;
size_t size;
OPAL_CR_NOOP_PROGRESS();
MEMCHECKER(
if (status != MPI_STATUSES_IGNORE) {
/*
* Before checking the complete status, we need to reset the definedness
* of the MPI_ERROR-field (single-completion calls wait/test).
*/
opal_memchecker_base_mem_defined(&status->MPI_ERROR, sizeof(int));
memchecker_status(status);
memchecker_datatype(datatype);
}
);
if (MPI_PARAM_CHECK) {
int err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (NULL == status || MPI_STATUSES_IGNORE == status ||
MPI_STATUS_IGNORE == status || NULL == count) {
err = MPI_ERR_ARG;
} else if (NULL == datatype || MPI_DATATYPE_NULL == datatype) {
err = MPI_ERR_TYPE;
} else {
OMPI_CHECK_DATATYPE_FOR_RECV(err, datatype, 1);
}
OMPI_ERRHANDLER_CHECK(err, MPI_COMM_WORLD, err, FUNC_NAME);
}
*count = 0;
if( ompi_datatype_type_size( datatype, &size ) == MPI_SUCCESS ) {
if( size == 0 ) {
/* If the size of the datatype is zero let's return a count of zero */
return MPI_SUCCESS;
}
*count = (int)(status->_count / size);
size = status->_count - (*count) * size;
/* if basic type we should return the same result as MPI_Get_count */
if( ompi_datatype_is_predefined(datatype) ) {
if( size != 0 ) {
*count = MPI_UNDEFINED;
}
return MPI_SUCCESS;
}
if( (*count) != 0 ) {
int total; /* count the basic elements in the datatype */
for( i = 4, total = 0; i < OMPI_DATATYPE_MAX_PREDEFINED; i++ )
total += datatype->super.btypes[i];
*count = total * (*count);
}
if( size > 0 ) {
if( (i = ompi_datatype_get_element_count( datatype, size )) != -1 )
*count += i;
else
*count = MPI_UNDEFINED;
}
return MPI_SUCCESS;
}
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME);
}