1
1
openmpi/ompi/mca/coll/inter/coll_inter_scatter.c
Rainer Keller 6c5532072a - Split the datatype engine into two parts: an MPI specific part in
OMPI
   and a language agnostic part in OPAL. The convertor is completely
   moved into OPAL.  This offers several benefits as described in RFC
   http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
   namely:
    - Fewer basic types (int* and float* types, boolean and wchar
    - Fixing naming scheme to ompi-nomenclature.
    - Usability outside of the ompi-layer.
 - Due to the fixed nature of simple opal types, their information is
   completely
   known at compile time and therefore constified
 - With fewer datatypes (22), the actual sizes of bit-field types may be
   reduced
   from 64 to 32 bits, allowing reorganizing the opal_datatype
   structure, eliminating holes and keeping data required in convertor
   (upon send/recv) in one cacheline...
   This has implications to the convertor-datastructure and other parts
   of the code.
 - Several performance tests have been run, the netpipe latency does not
   change with
   this patch on Linux/x86-64 on the smoky cluster.
 - Extensive tests have been done to verify correctness (no new
   regressions) using:
   1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
    ompi-ddt:
    a. running both trunk and ompi-ddt resulted in no differences
       (except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
       correctly).
    b. with --enable-memchecker and running under valgrind (one buglet
       when run with static found in test-suite, commited)
   2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
      all passed (except for the dynamic/ tests failed!! as trunk/MTT)
   3. compilation and usage of HDF5 tests on Jaguar using PGI and
      PathScale compilers.
   4. compilation and usage on Scicortex.
 - Please note, that for the heterogeneous case, (-m32 compiled
   binaries/ompi), neither
   ompi-trunk, nor ompi-ddt branch would successfully launch.

This commit was SVN r21641.
2009-07-13 04:56:31 +00:00

99 строки
3.0 KiB
C

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2008 University of Houston. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "coll_inter.h"
#include "mpi.h"
#include "ompi/constants.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/mca/coll/base/coll_tags.h"
#include "ompi/mca/pml/pml.h"
/*
* scatter_inter
*
* Function: - scatter operation
* Accepts: - same arguments as MPI_Scatter()
* Returns: - MPI_SUCCESS or error code
*/
int
mca_coll_inter_scatter_inter(void *sbuf, int scount,
struct ompi_datatype_t *sdtype,
void *rbuf, int rcount,
struct ompi_datatype_t *rdtype,
int root, struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
int rank, size, size_local, err;
char *ptmp = NULL;
ptrdiff_t lb, incr;
/* Initialize */
rank = ompi_comm_rank(comm);
size = ompi_comm_remote_size(comm);
if (MPI_PROC_NULL == root) {
/* do nothing */
err = OMPI_SUCCESS;
} else if (MPI_ROOT != root) {
/* First process receives the data from root */
if(0 == rank) {
err = ompi_datatype_get_extent(rdtype, &lb, &incr);
if (OMPI_SUCCESS != err) {
return OMPI_ERROR;
}
incr *= rcount;
size_local = ompi_comm_size(comm->c_local_comm);
ptmp = (char*)malloc(size_local * incr);
if (NULL == ptmp) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
err = MCA_PML_CALL(recv(ptmp, rcount*size_local, rdtype,
root, MCA_COLL_BASE_TAG_SCATTER,
comm, MPI_STATUS_IGNORE));
if (OMPI_SUCCESS != err) {
return err;
}
}
/* Perform the scatter locally with the first process as root */
err = comm->c_local_comm->c_coll.coll_scatter(ptmp, rcount, rdtype,
rbuf, rcount, rdtype,
0, comm->c_local_comm,
comm->c_local_comm->c_coll.coll_scatter_module);
if (NULL != ptmp) {
free(ptmp);
}
} else {
/* Root sends data to the first process in the remote group */
err = MCA_PML_CALL(send(sbuf, scount*size, sdtype, 0,
MCA_COLL_BASE_TAG_SCATTER,
MCA_PML_BASE_SEND_STANDARD, comm));
if (OMPI_SUCCESS != err) {
return err;
}
}
return err;
}