2005-10-15 21:04:01 +04:00
|
|
|
/*
|
2005-11-05 22:57:48 +03:00
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
2006-08-24 20:38:08 +04:00
|
|
|
* Copyright (c) 2004-2006 The University of Tennessee and The University
|
2005-11-05 22:57:48 +03:00
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
2005-10-15 21:04:01 +04:00
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
2008-02-01 20:11:36 +03:00
|
|
|
* Copyright (c) 2008 University of Houston. All rights reserved.
|
2005-10-15 21:04:01 +04:00
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "ompi_config.h"
|
|
|
|
#include "coll_hierarch.h"
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
|
|
|
|
#include "mpi.h"
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "ompi/communicator/communicator.h"
|
2005-10-15 21:04:01 +04:00
|
|
|
#include "ompi/op/op.h"
|
|
|
|
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "ompi/mca/coll/base/base.h"
|
2005-10-15 21:04:01 +04:00
|
|
|
#include "ompi/mca/coll/base/coll_tags.h"
|
|
|
|
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "ompi/mca/bml/base/base.h"
|
|
|
|
#include "ompi/mca/pml/pml.h"
|
2005-10-15 21:04:01 +04:00
|
|
|
|
|
|
|
|
|
|
|
int mca_coll_hierarch_allreduce_tmp(void *sbuf, void *rbuf, int count,
|
|
|
|
struct ompi_datatype_t *dtype,
|
|
|
|
struct ompi_op_t *op,
|
|
|
|
struct ompi_communicator_t *comm)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = mca_coll_hierarch_reduce_tmp ( sbuf, rbuf, count, dtype, op, 0, comm);
|
|
|
|
if ( OMPI_SUCCESS != ret ) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
ret = mca_coll_hierarch_bcast_tmp ( rbuf, count, dtype, 0, comm);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int mca_coll_hierarch_allgather_tmp(void *sbuf, int scount,
|
|
|
|
struct ompi_datatype_t *sdtype,
|
|
|
|
void *rbuf, int rcount,
|
|
|
|
struct ompi_datatype_t *rdtype,
|
|
|
|
struct ompi_communicator_t *comm)
|
|
|
|
{
|
|
|
|
int ret;
|
2005-10-15 23:36:54 +04:00
|
|
|
int size = ompi_comm_size (comm);
|
2005-10-15 21:04:01 +04:00
|
|
|
|
|
|
|
ret = mca_coll_hierarch_gather_tmp ( sbuf, scount, sdtype, rbuf, rcount,
|
|
|
|
rdtype, 0, comm);
|
|
|
|
|
|
|
|
if ( OMPI_SUCCESS != ret ) {
|
|
|
|
return ret;
|
|
|
|
}
|
2005-10-15 23:36:54 +04:00
|
|
|
ret = mca_coll_hierarch_bcast_tmp ( rbuf, rcount*size, rdtype, 0, comm);
|
2005-10-15 21:04:01 +04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mca_coll_hierarch_bcast_tmp ( void *buf, int count, struct ompi_datatype_t *dtype,
|
|
|
|
int root, struct ompi_communicator_t *comm)
|
|
|
|
{
|
|
|
|
int err = OMPI_SUCCESS;
|
|
|
|
int rank = ompi_comm_rank ( comm );
|
|
|
|
|
|
|
|
if ( rank != root ) {
|
|
|
|
err = MCA_PML_CALL(recv(buf, count, dtype, root,
|
|
|
|
MCA_COLL_BASE_TAG_BCAST,
|
|
|
|
comm, MPI_STATUS_IGNORE));
|
|
|
|
if ( OMPI_SUCCESS != err ) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
int i;
|
|
|
|
int size=ompi_comm_size ( comm );
|
|
|
|
|
|
|
|
for ( i=0; i<size; i++ ) {
|
2008-02-01 20:11:36 +03:00
|
|
|
if ( i == root ) {
|
|
|
|
continue;
|
|
|
|
}
|
2005-10-15 21:04:01 +04:00
|
|
|
err = MCA_PML_CALL(send(buf, count, dtype, i,
|
|
|
|
MCA_COLL_BASE_TAG_BCAST,
|
|
|
|
MCA_PML_BASE_SEND_STANDARD, comm));
|
|
|
|
if ( OMPI_SUCCESS != err ) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mca_coll_hierarch_reduce_tmp(void *sbuf, void *rbuf, int count,
|
|
|
|
struct ompi_datatype_t *dtype,
|
|
|
|
struct ompi_op_t *op,
|
|
|
|
int root, struct ompi_communicator_t *comm)
|
|
|
|
{
|
2006-10-18 00:20:58 +04:00
|
|
|
int i, err, size;
|
2005-10-15 21:04:01 +04:00
|
|
|
char *pml_buffer = NULL;
|
2006-10-18 00:20:58 +04:00
|
|
|
ptrdiff_t extent, lb;
|
2006-10-20 07:28:51 +04:00
|
|
|
int rank = ompi_comm_rank(comm);
|
2005-10-15 21:04:01 +04:00
|
|
|
|
|
|
|
/* If not root, send data to the root. */
|
|
|
|
if (rank != root) {
|
|
|
|
err = MCA_PML_CALL(send(sbuf, count, dtype, root,
|
|
|
|
MCA_COLL_BASE_TAG_REDUCE,
|
|
|
|
MCA_PML_BASE_SEND_STANDARD, comm));
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
size = ompi_comm_size(comm);
|
|
|
|
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
ompi_datatype_get_extent(dtype, &lb, &extent);
|
2006-08-24 20:38:08 +04:00
|
|
|
pml_buffer = (char*)malloc(count * extent);
|
2005-10-15 21:04:01 +04:00
|
|
|
if (NULL == pml_buffer) {
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
}
|
|
|
|
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
err = ompi_datatype_copy_content_same_ddt(dtype, count, (char*)rbuf, (char*)sbuf);
|
2005-10-15 21:04:01 +04:00
|
|
|
if (MPI_SUCCESS != err) {
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Loop receiving and calling reduction function (C or Fortran). */
|
|
|
|
for (i = size - 1; i >= 0; --i) {
|
|
|
|
if (rank == i) {
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
err = MCA_PML_CALL(recv(pml_buffer, count, dtype, i,
|
|
|
|
MCA_COLL_BASE_TAG_REDUCE, comm,
|
|
|
|
MPI_STATUS_IGNORE));
|
|
|
|
if (MPI_SUCCESS != err) {
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform the reduction */
|
|
|
|
ompi_op_reduce(op, pml_buffer, rbuf, count, dtype);
|
|
|
|
}
|
|
|
|
|
|
|
|
exit:
|
|
|
|
if (NULL != pml_buffer) {
|
|
|
|
free(pml_buffer);
|
|
|
|
}
|
|
|
|
return MPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int mca_coll_hierarch_gather_tmp(void *sbuf, int scount,
|
|
|
|
struct ompi_datatype_t *sdtype,
|
|
|
|
void *rbuf, int rcount,
|
|
|
|
struct ompi_datatype_t *rdtype,
|
|
|
|
int root, struct ompi_communicator_t *comm)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int err;
|
|
|
|
int rank;
|
|
|
|
int size;
|
|
|
|
char *ptmp;
|
|
|
|
MPI_Aint incr;
|
|
|
|
MPI_Aint extent;
|
|
|
|
MPI_Aint lb;
|
|
|
|
|
|
|
|
size = ompi_comm_size(comm);
|
|
|
|
rank = ompi_comm_rank(comm);
|
|
|
|
|
|
|
|
/* Everyone but root sends data and returns. */
|
|
|
|
if (rank != root) {
|
|
|
|
return MCA_PML_CALL(send(sbuf, scount, sdtype, root,
|
|
|
|
MCA_COLL_BASE_TAG_GATHER,
|
|
|
|
MCA_PML_BASE_SEND_STANDARD, comm));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* I am the root, loop receiving the data. */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
ompi_datatype_get_extent(rdtype, &lb, &extent);
|
2005-10-15 21:04:01 +04:00
|
|
|
incr = extent * rcount;
|
|
|
|
for (i = 0, ptmp = (char *) rbuf; i < size; ++i, ptmp += incr) {
|
|
|
|
if (i == rank) {
|
|
|
|
if (MPI_IN_PLACE != sbuf) {
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
err = ompi_datatype_sndrcv(sbuf, scount, sdtype,
|
2005-10-15 21:04:01 +04:00
|
|
|
ptmp, rcount, rdtype);
|
|
|
|
} else {
|
|
|
|
err = MPI_SUCCESS;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
err = MCA_PML_CALL(recv(ptmp, rcount, rdtype, i,
|
|
|
|
MCA_COLL_BASE_TAG_GATHER,
|
|
|
|
comm, MPI_STATUS_IGNORE));
|
|
|
|
}
|
|
|
|
if (MPI_SUCCESS != err) {
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* All done */
|
|
|
|
return MPI_SUCCESS;
|
|
|
|
}
|