2004-01-12 00:26:55 +03:00
|
|
|
/*
|
2005-11-05 22:57:48 +03:00
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
2006-08-24 20:38:08 +04:00
|
|
|
* Copyright (c) 2004-2006 The University of Tennessee and The University
|
2005-11-05 22:57:48 +03:00
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
2004-11-28 23:09:25 +03:00
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
2005-03-24 15:43:37 +03:00
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
2004-11-22 04:38:40 +03:00
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
2004-01-12 00:26:55 +03:00
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
|
2004-06-07 19:33:53 +04:00
|
|
|
#include "ompi_config.h"
|
2004-02-15 00:26:30 +03:00
|
|
|
#include "coll_basic.h"
|
2004-01-12 00:26:55 +03:00
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
|
|
|
|
#include "mpi.h"
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "ompi/constants.h"
|
2005-09-13 00:21:53 +04:00
|
|
|
#include "ompi/mca/coll/coll.h"
|
|
|
|
#include "ompi/mca/coll/base/coll_tags.h"
|
2009-03-04 20:06:51 +03:00
|
|
|
#include "ompi/mca/pml/pml.h"
|
2005-09-13 00:21:53 +04:00
|
|
|
#include "ompi/op/op.h"
|
2004-01-12 00:26:55 +03:00
|
|
|
|
|
|
|
/*
|
2004-06-29 04:02:25 +04:00
|
|
|
* reduce_lin_intra
|
2004-01-12 00:26:55 +03:00
|
|
|
*
|
|
|
|
* Function: - reduction using O(N) algorithm
|
|
|
|
* Accepts: - same as MPI_Reduce()
|
|
|
|
* Returns: - MPI_SUCCESS or error code
|
|
|
|
*/
|
2005-08-10 14:51:42 +04:00
|
|
|
int
|
|
|
|
mca_coll_basic_reduce_lin_intra(void *sbuf, void *rbuf, int count,
|
2005-08-10 21:53:43 +04:00
|
|
|
struct ompi_datatype_t *dtype,
|
|
|
|
struct ompi_op_t *op,
|
2007-08-19 07:37:49 +04:00
|
|
|
int root, struct ompi_communicator_t *comm,
|
2008-07-29 02:40:57 +04:00
|
|
|
mca_coll_base_module_t *module)
|
2004-01-12 00:26:55 +03:00
|
|
|
{
|
2006-10-18 00:20:58 +04:00
|
|
|
int i, rank, err, size;
|
|
|
|
ptrdiff_t true_lb, true_extent, lb, extent;
|
2005-08-10 14:51:42 +04:00
|
|
|
char *free_buffer = NULL;
|
|
|
|
char *pml_buffer = NULL;
|
2005-09-14 00:06:54 +04:00
|
|
|
char *inplace_temp = NULL;
|
2005-08-10 14:51:42 +04:00
|
|
|
char *inbuf;
|
|
|
|
|
|
|
|
/* Initialize */
|
|
|
|
|
|
|
|
rank = ompi_comm_rank(comm);
|
|
|
|
size = ompi_comm_size(comm);
|
|
|
|
|
|
|
|
/* If not root, send data to the root. */
|
|
|
|
|
|
|
|
if (rank != root) {
|
2005-08-10 21:53:43 +04:00
|
|
|
err = MCA_PML_CALL(send(sbuf, count, dtype, root,
|
|
|
|
MCA_COLL_BASE_TAG_REDUCE,
|
|
|
|
MCA_PML_BASE_SEND_STANDARD, comm));
|
|
|
|
return err;
|
2004-06-29 04:02:25 +04:00
|
|
|
}
|
2005-08-10 14:51:42 +04:00
|
|
|
|
|
|
|
/* Root receives and reduces messages. Allocate buffer to receive
|
|
|
|
* messages. This comment applies to all collectives in this basic
|
|
|
|
* module where we allocate a temporary buffer. For the next few
|
|
|
|
* lines of code, it's tremendously complicated how we decided that
|
|
|
|
* this was the Right Thing to do. Sit back and enjoy. And prepare
|
|
|
|
* to have your mind warped. :-)
|
|
|
|
*
|
|
|
|
* Recall some definitions (I always get these backwards, so I'm
|
|
|
|
* going to put them here):
|
|
|
|
*
|
|
|
|
* extent: the length from the lower bound to the upper bound -- may
|
|
|
|
* be considerably larger than the buffer required to hold the data
|
|
|
|
* (or smaller! But it's easiest to think about when it's larger).
|
|
|
|
*
|
|
|
|
* true extent: the exact number of bytes required to hold the data
|
|
|
|
* in the layout pattern in the datatype.
|
|
|
|
*
|
|
|
|
* For example, consider the following buffer (just talking about
|
|
|
|
* LB, extent, and true extent -- extrapolate for UB; i.e., assume
|
|
|
|
* the UB equals exactly where the data ends):
|
|
|
|
*
|
|
|
|
* A B C
|
|
|
|
* --------------------------------------------------------
|
|
|
|
* | | |
|
|
|
|
* --------------------------------------------------------
|
|
|
|
*
|
|
|
|
* There are multiple cases:
|
|
|
|
*
|
|
|
|
* 1. A is what we give to MPI_Send (and friends), and A is where
|
|
|
|
* the data starts, and C is where the data ends. In this case:
|
|
|
|
*
|
|
|
|
* - extent: C-A
|
|
|
|
* - true extent: C-A
|
|
|
|
* - LB: 0
|
|
|
|
*
|
|
|
|
* A C
|
|
|
|
* --------------------------------------------------------
|
|
|
|
* | |
|
|
|
|
* --------------------------------------------------------
|
|
|
|
* <=======================extent=========================>
|
|
|
|
* <======================true extent=====================>
|
|
|
|
*
|
|
|
|
* 2. A is what we give to MPI_Send (and friends), B is where the
|
|
|
|
* data starts, and C is where the data ends. In this case:
|
|
|
|
*
|
|
|
|
* - extent: C-A
|
|
|
|
* - true extent: C-B
|
|
|
|
* - LB: positive
|
|
|
|
*
|
|
|
|
* A B C
|
|
|
|
* --------------------------------------------------------
|
|
|
|
* | | User buffer |
|
|
|
|
* --------------------------------------------------------
|
|
|
|
* <=======================extent=========================>
|
|
|
|
* <===============true extent=============>
|
|
|
|
*
|
|
|
|
* 3. B is what we give to MPI_Send (and friends), A is where the
|
|
|
|
* data starts, and C is where the data ends. In this case:
|
|
|
|
*
|
|
|
|
* - extent: C-A
|
|
|
|
* - true extent: C-A
|
|
|
|
* - LB: negative
|
|
|
|
*
|
|
|
|
* A B C
|
|
|
|
* --------------------------------------------------------
|
|
|
|
* | | User buffer |
|
|
|
|
* --------------------------------------------------------
|
|
|
|
* <=======================extent=========================>
|
|
|
|
* <======================true extent=====================>
|
|
|
|
*
|
|
|
|
* 4. MPI_BOTTOM is what we give to MPI_Send (and friends), B is
|
|
|
|
* where the data starts, and C is where the data ends. In this
|
|
|
|
* case:
|
|
|
|
*
|
|
|
|
* - extent: C-MPI_BOTTOM
|
|
|
|
* - true extent: C-B
|
|
|
|
* - LB: [potentially very large] positive
|
|
|
|
*
|
|
|
|
* MPI_BOTTOM B C
|
|
|
|
* --------------------------------------------------------
|
|
|
|
* | | User buffer |
|
|
|
|
* --------------------------------------------------------
|
|
|
|
* <=======================extent=========================>
|
|
|
|
* <===============true extent=============>
|
|
|
|
*
|
|
|
|
* So in all cases, for a temporary buffer, all we need to malloc()
|
|
|
|
* is a buffer of size true_extent. We therefore need to know two
|
|
|
|
* pointer values: what value to give to MPI_Send (and friends) and
|
|
|
|
* what value to give to free(), because they might not be the same.
|
|
|
|
*
|
|
|
|
* Clearly, what we give to free() is exactly what was returned from
|
|
|
|
* malloc(). That part is easy. :-)
|
|
|
|
*
|
|
|
|
* What we give to MPI_Send (and friends) is a bit more complicated.
|
|
|
|
* Let's take the 4 cases from above:
|
|
|
|
*
|
|
|
|
* 1. If A is what we give to MPI_Send and A is where the data
|
|
|
|
* starts, then clearly we give to MPI_Send what we got back from
|
|
|
|
* malloc().
|
|
|
|
*
|
|
|
|
* 2. If B is what we get back from malloc, but we give A to
|
|
|
|
* MPI_Send, then the buffer range [A,B) represents "dead space"
|
|
|
|
* -- no data will be put there. So it's safe to give B-LB to
|
|
|
|
* MPI_Send. More specifically, the LB is positive, so B-LB is
|
|
|
|
* actually A.
|
|
|
|
*
|
|
|
|
* 3. If A is what we get back from malloc, and B is what we give to
|
|
|
|
* MPI_Send, then the LB is negative, so A-LB will actually equal
|
|
|
|
* B.
|
|
|
|
*
|
|
|
|
* 4. Although this seems like the weirdest case, it's actually
|
|
|
|
* quite similar to case #2 -- the pointer we give to MPI_Send is
|
|
|
|
* smaller than the pointer we got back from malloc().
|
|
|
|
*
|
|
|
|
* Hence, in all cases, we give (return_from_malloc - LB) to MPI_Send.
|
|
|
|
*
|
|
|
|
* This works fine and dandy if we only have (count==1), which we
|
|
|
|
* rarely do. ;-) So we really need to allocate (true_extent +
|
|
|
|
* ((count - 1) * extent)) to get enough space for the rest. This may
|
|
|
|
* be more than is necessary, but it's ok.
|
|
|
|
*
|
|
|
|
* Simple, no? :-)
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
ompi_datatype_get_extent(dtype, &lb, &extent);
|
|
|
|
ompi_datatype_get_true_extent(dtype, &true_lb, &true_extent);
|
2005-09-14 00:06:54 +04:00
|
|
|
|
|
|
|
if (MPI_IN_PLACE == sbuf) {
|
|
|
|
sbuf = rbuf;
|
2006-08-24 20:38:08 +04:00
|
|
|
inplace_temp = (char*)malloc(true_extent + (count - 1) * extent);
|
2005-09-14 00:06:54 +04:00
|
|
|
if (NULL == inplace_temp) {
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
}
|
|
|
|
rbuf = inplace_temp - lb;
|
|
|
|
}
|
2005-08-10 21:53:43 +04:00
|
|
|
|
2005-09-14 00:06:54 +04:00
|
|
|
if (size > 1) {
|
2006-08-24 20:38:08 +04:00
|
|
|
free_buffer = (char*)malloc(true_extent + (count - 1) * extent);
|
2005-08-10 21:53:43 +04:00
|
|
|
if (NULL == free_buffer) {
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
}
|
|
|
|
pml_buffer = free_buffer - lb;
|
2004-06-29 04:02:25 +04:00
|
|
|
}
|
2004-01-12 00:26:55 +03:00
|
|
|
|
2005-08-10 14:51:42 +04:00
|
|
|
/* Initialize the receive buffer. */
|
2004-01-12 00:26:55 +03:00
|
|
|
|
2005-08-10 14:51:42 +04:00
|
|
|
if (rank == (size - 1)) {
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
err = ompi_datatype_copy_content_same_ddt(dtype, count, (char*)rbuf, (char*)sbuf);
|
2004-01-12 00:26:55 +03:00
|
|
|
} else {
|
2005-08-10 21:53:43 +04:00
|
|
|
err = MCA_PML_CALL(recv(rbuf, count, dtype, size - 1,
|
|
|
|
MCA_COLL_BASE_TAG_REDUCE, comm,
|
|
|
|
MPI_STATUS_IGNORE));
|
2005-08-10 14:51:42 +04:00
|
|
|
}
|
|
|
|
if (MPI_SUCCESS != err) {
|
2005-08-10 21:53:43 +04:00
|
|
|
if (NULL != free_buffer) {
|
|
|
|
free(free_buffer);
|
|
|
|
}
|
|
|
|
return err;
|
2004-01-12 00:26:55 +03:00
|
|
|
}
|
|
|
|
|
2005-08-10 14:51:42 +04:00
|
|
|
/* Loop receiving and calling reduction function (C or Fortran). */
|
|
|
|
|
|
|
|
for (i = size - 2; i >= 0; --i) {
|
2005-08-10 21:53:43 +04:00
|
|
|
if (rank == i) {
|
2006-08-24 20:38:08 +04:00
|
|
|
inbuf = (char*)sbuf;
|
2005-08-10 21:53:43 +04:00
|
|
|
} else {
|
|
|
|
err = MCA_PML_CALL(recv(pml_buffer, count, dtype, i,
|
|
|
|
MCA_COLL_BASE_TAG_REDUCE, comm,
|
|
|
|
MPI_STATUS_IGNORE));
|
|
|
|
if (MPI_SUCCESS != err) {
|
|
|
|
if (NULL != free_buffer) {
|
|
|
|
free(free_buffer);
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
inbuf = pml_buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform the reduction */
|
|
|
|
|
|
|
|
ompi_op_reduce(op, inbuf, rbuf, count, dtype);
|
2005-08-10 14:51:42 +04:00
|
|
|
}
|
2004-01-12 00:26:55 +03:00
|
|
|
|
2005-09-14 00:06:54 +04:00
|
|
|
if (NULL != inplace_temp) {
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
err = ompi_datatype_copy_content_same_ddt(dtype, count, (char*)sbuf, inplace_temp);
|
2005-09-14 00:06:54 +04:00
|
|
|
free(inplace_temp);
|
|
|
|
}
|
2005-08-10 14:51:42 +04:00
|
|
|
if (NULL != free_buffer) {
|
2005-08-10 21:53:43 +04:00
|
|
|
free(free_buffer);
|
2005-08-10 14:51:42 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* All done */
|
|
|
|
|
|
|
|
return MPI_SUCCESS;
|
2004-01-12 00:26:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2004-06-29 04:02:25 +04:00
|
|
|
* reduce_log_intra
|
2004-01-12 00:26:55 +03:00
|
|
|
*
|
|
|
|
* Function: - reduction using O(log N) algorithm
|
|
|
|
* Accepts: - same as MPI_Reduce()
|
|
|
|
* Returns: - MPI_SUCCESS or error code
|
2007-02-26 22:15:37 +03:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* Performing reduction on each dimension of the hypercube.
|
|
|
|
* An example for 8 procs (dimensions = 3):
|
|
|
|
*
|
|
|
|
* Stage 1, reduce on X dimension, 1 -> 0, 3 -> 2, 5 -> 4, 7 -> 6
|
|
|
|
*
|
|
|
|
* 6----<---7 proc_0: 0+1
|
|
|
|
* /| /| proc_1: 1
|
|
|
|
* / | / | proc_2: 2+3
|
|
|
|
* / | / | proc_3: 3
|
|
|
|
* 4----<---5 | proc_4: 4+5
|
|
|
|
* | 2--< |---3 proc_5: 5
|
|
|
|
* | / | / proc_6: 6+7
|
|
|
|
* | / | / proc_7: 7
|
|
|
|
* |/ |/
|
|
|
|
* 0----<---1
|
|
|
|
*
|
|
|
|
* Stage 2, reduce on Y dimension, 2 -> 0, 6 -> 4
|
|
|
|
*
|
|
|
|
* 6--------7 proc_0: 0+1+2+3
|
|
|
|
* /| /| proc_1: 1
|
|
|
|
* v | / | proc_2: 2+3
|
|
|
|
* / | / | proc_3: 3
|
|
|
|
* 4--------5 | proc_4: 4+5+6+7
|
|
|
|
* | 2--- |---3 proc_5: 5
|
|
|
|
* | / | / proc_6: 6+7
|
|
|
|
* | v | / proc_7: 7
|
|
|
|
* |/ |/
|
|
|
|
* 0--------1
|
|
|
|
*
|
|
|
|
* Stage 3, reduce on Z dimension, 4 -> 0
|
|
|
|
*
|
|
|
|
* 6--------7 proc_0: 0+1+2+3+4+5+6+7
|
|
|
|
* /| /| proc_1: 1
|
|
|
|
* / | / | proc_2: 2+3
|
|
|
|
* / | / | proc_3: 3
|
|
|
|
* 4--------5 | proc_4: 4+5+6+7
|
|
|
|
* | 2--- |---3 proc_5: 5
|
|
|
|
* v / | / proc_6: 6+7
|
|
|
|
* | / | / proc_7: 7
|
|
|
|
* |/ |/
|
|
|
|
* 0--------1
|
|
|
|
*
|
|
|
|
*
|
2004-01-12 00:26:55 +03:00
|
|
|
*/
|
2005-08-10 14:51:42 +04:00
|
|
|
int
|
|
|
|
mca_coll_basic_reduce_log_intra(void *sbuf, void *rbuf, int count,
|
2005-08-10 21:53:43 +04:00
|
|
|
struct ompi_datatype_t *dtype,
|
|
|
|
struct ompi_op_t *op,
|
2007-08-19 07:37:49 +04:00
|
|
|
int root, struct ompi_communicator_t *comm,
|
2008-07-29 02:40:57 +04:00
|
|
|
mca_coll_base_module_t *module)
|
2004-01-12 00:26:55 +03:00
|
|
|
{
|
2006-10-18 00:20:58 +04:00
|
|
|
int i, size, rank, vrank;
|
|
|
|
int err, peer, dim, mask;
|
|
|
|
ptrdiff_t true_lb, true_extent, lb, extent;
|
2005-08-10 14:51:42 +04:00
|
|
|
char *free_buffer = NULL;
|
|
|
|
char *free_rbuf = NULL;
|
|
|
|
char *pml_buffer = NULL;
|
2005-09-28 19:12:05 +04:00
|
|
|
char *snd_buffer = NULL;
|
2006-08-24 20:38:08 +04:00
|
|
|
char *rcv_buffer = (char*)rbuf;
|
2005-09-28 01:51:55 +04:00
|
|
|
char *inplace_temp = NULL;
|
2005-08-10 14:51:42 +04:00
|
|
|
|
|
|
|
/* JMS Codearound for now -- if the operations is not communative,
|
|
|
|
* just call the linear algorithm. Need to talk to Edgar / George
|
|
|
|
* about fixing this algorithm here to work with non-communative
|
|
|
|
* operations. */
|
|
|
|
|
|
|
|
if (!ompi_op_is_commute(op)) {
|
2005-08-10 21:53:43 +04:00
|
|
|
return mca_coll_basic_reduce_lin_intra(sbuf, rbuf, count, dtype,
|
2007-08-19 07:37:49 +04:00
|
|
|
op, root, comm, module);
|
2005-08-10 14:51:42 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Some variables */
|
|
|
|
size = ompi_comm_size(comm);
|
|
|
|
rank = ompi_comm_rank(comm);
|
|
|
|
vrank = ompi_op_is_commute(op) ? (rank - root + size) % size : rank;
|
|
|
|
dim = comm->c_cube_dim;
|
|
|
|
|
|
|
|
/* Allocate the incoming and resulting message buffers. See lengthy
|
|
|
|
* rationale above. */
|
|
|
|
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
ompi_datatype_get_extent(dtype, &lb, &extent);
|
|
|
|
ompi_datatype_get_true_extent(dtype, &true_lb, &true_extent);
|
2005-09-14 00:06:54 +04:00
|
|
|
|
2006-08-24 20:38:08 +04:00
|
|
|
free_buffer = (char*)malloc(true_extent + (count - 1) * extent);
|
2005-09-14 00:06:54 +04:00
|
|
|
if (NULL == free_buffer) {
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
}
|
|
|
|
|
|
|
|
pml_buffer = free_buffer - lb;
|
|
|
|
/* read the comment about commutative operations (few lines down
|
|
|
|
* the page) */
|
|
|
|
if (ompi_op_is_commute(op)) {
|
|
|
|
rcv_buffer = pml_buffer;
|
|
|
|
}
|
|
|
|
|
2005-09-28 01:51:55 +04:00
|
|
|
/* Allocate sendbuf in case the MPI_IN_PLACE option has been used. See lengthy
|
|
|
|
* rationale above. */
|
|
|
|
|
|
|
|
if (MPI_IN_PLACE == sbuf) {
|
2006-08-24 20:38:08 +04:00
|
|
|
inplace_temp = (char*)malloc(true_extent + (count - 1) * extent);
|
2005-09-28 01:51:55 +04:00
|
|
|
if (NULL == inplace_temp) {
|
2009-05-05 17:54:55 +04:00
|
|
|
err = OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
goto cleanup_and_return;
|
2005-09-28 01:51:55 +04:00
|
|
|
}
|
|
|
|
sbuf = inplace_temp - lb;
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
err = ompi_datatype_copy_content_same_ddt(dtype, count, (char*)sbuf, (char*)rbuf);
|
2005-09-28 01:51:55 +04:00
|
|
|
}
|
2006-08-24 20:38:08 +04:00
|
|
|
snd_buffer = (char*)sbuf;
|
2005-09-28 01:51:55 +04:00
|
|
|
|
2005-09-14 00:06:54 +04:00
|
|
|
if (rank != root && 0 == (vrank & 1)) {
|
|
|
|
/* root is the only one required to provide a valid rbuf.
|
|
|
|
* Assume rbuf is invalid for all other ranks, so fix it up
|
|
|
|
* here to be valid on all non-leaf ranks */
|
2006-08-24 20:38:08 +04:00
|
|
|
free_rbuf = (char*)malloc(true_extent + (count - 1) * extent);
|
2005-09-14 00:06:54 +04:00
|
|
|
if (NULL == free_rbuf) {
|
2009-05-05 17:54:55 +04:00
|
|
|
err = OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
goto cleanup_and_return;
|
2005-08-10 21:53:43 +04:00
|
|
|
}
|
2005-09-14 00:06:54 +04:00
|
|
|
rbuf = free_rbuf - lb;
|
2005-08-10 14:51:42 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Loop over cube dimensions. High processes send to low ones in the
|
|
|
|
* dimension. */
|
|
|
|
|
|
|
|
for (i = 0, mask = 1; i < dim; ++i, mask <<= 1) {
|
|
|
|
|
2005-08-10 21:53:43 +04:00
|
|
|
/* A high-proc sends to low-proc and stops. */
|
|
|
|
if (vrank & mask) {
|
|
|
|
peer = vrank & ~mask;
|
|
|
|
if (ompi_op_is_commute(op)) {
|
|
|
|
peer = (peer + root) % size;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = MCA_PML_CALL(send(snd_buffer, count,
|
|
|
|
dtype, peer, MCA_COLL_BASE_TAG_REDUCE,
|
|
|
|
MCA_PML_BASE_SEND_STANDARD, comm));
|
|
|
|
if (MPI_SUCCESS != err) {
|
2009-05-05 17:54:55 +04:00
|
|
|
goto cleanup_and_return;
|
2005-08-10 21:53:43 +04:00
|
|
|
}
|
2006-08-24 20:38:08 +04:00
|
|
|
snd_buffer = (char*)rbuf;
|
2005-08-10 21:53:43 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* A low-proc receives, reduces, and moves to a higher
|
|
|
|
* dimension. */
|
|
|
|
|
|
|
|
else {
|
|
|
|
peer = vrank | mask;
|
|
|
|
if (peer >= size) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (ompi_op_is_commute(op)) {
|
|
|
|
peer = (peer + root) % size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Most of the time (all except the first one for commutative
|
|
|
|
* operations) we receive in the user provided buffer
|
|
|
|
* (rbuf). But the exception is here to allow us to dont have
|
|
|
|
* to copy from the sbuf to a temporary location. If the
|
|
|
|
* operation is commutative we dont care in which order we
|
|
|
|
* apply the operation, so for the first time we can receive
|
|
|
|
* the data in the pml_buffer and then apply to operation
|
|
|
|
* between this buffer and the user provided data. */
|
|
|
|
|
|
|
|
err = MCA_PML_CALL(recv(rcv_buffer, count, dtype, peer,
|
|
|
|
MCA_COLL_BASE_TAG_REDUCE, comm,
|
|
|
|
MPI_STATUS_IGNORE));
|
|
|
|
if (MPI_SUCCESS != err) {
|
2009-05-05 17:54:55 +04:00
|
|
|
goto cleanup_and_return;
|
2005-08-10 21:53:43 +04:00
|
|
|
}
|
|
|
|
/* Perform the operation. The target is always the user
|
|
|
|
* provided buffer We do the operation only if we receive it
|
|
|
|
* not in the user buffer */
|
|
|
|
if (snd_buffer != sbuf) {
|
|
|
|
/* the target buffer is the locally allocated one */
|
|
|
|
ompi_op_reduce(op, rcv_buffer, pml_buffer, count, dtype);
|
|
|
|
} else {
|
|
|
|
/* If we're commutative, we don't care about the order of
|
|
|
|
* operations and we can just reduce the operations now.
|
|
|
|
* If we are not commutative, we have to copy the send
|
|
|
|
* buffer into a temp buffer (pml_buffer) and then reduce
|
|
|
|
* what we just received against it. */
|
|
|
|
if (!ompi_op_is_commute(op)) {
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
ompi_datatype_copy_content_same_ddt(dtype, count, pml_buffer,
|
2006-08-24 20:38:08 +04:00
|
|
|
(char*)sbuf);
|
2005-08-10 21:53:43 +04:00
|
|
|
ompi_op_reduce(op, rbuf, pml_buffer, count, dtype);
|
|
|
|
} else {
|
|
|
|
ompi_op_reduce(op, sbuf, pml_buffer, count, dtype);
|
|
|
|
}
|
|
|
|
/* now we have to send the buffer containing the computed data */
|
|
|
|
snd_buffer = pml_buffer;
|
|
|
|
/* starting from now we always receive in the user
|
|
|
|
* provided buffer */
|
2006-08-24 20:38:08 +04:00
|
|
|
rcv_buffer = (char*)rbuf;
|
2005-08-10 21:53:43 +04:00
|
|
|
}
|
|
|
|
}
|
2005-08-10 14:51:42 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the result to the root if needed. */
|
|
|
|
err = MPI_SUCCESS;
|
|
|
|
if (0 == vrank) {
|
2005-08-10 21:53:43 +04:00
|
|
|
if (root == rank) {
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
ompi_datatype_copy_content_same_ddt(dtype, count, (char*)rbuf, snd_buffer);
|
2005-08-10 21:53:43 +04:00
|
|
|
} else {
|
|
|
|
err = MCA_PML_CALL(send(snd_buffer, count,
|
|
|
|
dtype, root, MCA_COLL_BASE_TAG_REDUCE,
|
|
|
|
MCA_PML_BASE_SEND_STANDARD, comm));
|
|
|
|
}
|
2005-08-10 14:51:42 +04:00
|
|
|
} else if (rank == root) {
|
2005-08-10 21:53:43 +04:00
|
|
|
err = MCA_PML_CALL(recv(rcv_buffer, count, dtype, 0,
|
|
|
|
MCA_COLL_BASE_TAG_REDUCE,
|
|
|
|
comm, MPI_STATUS_IGNORE));
|
|
|
|
if (rcv_buffer != rbuf) {
|
|
|
|
ompi_op_reduce(op, rcv_buffer, rbuf, count, dtype);
|
|
|
|
}
|
2005-08-10 14:51:42 +04:00
|
|
|
}
|
|
|
|
|
2009-05-05 17:54:55 +04:00
|
|
|
cleanup_and_return:
|
2005-09-28 01:51:55 +04:00
|
|
|
if (NULL != inplace_temp) {
|
|
|
|
free(inplace_temp);
|
|
|
|
}
|
2005-08-10 14:51:42 +04:00
|
|
|
if (NULL != free_buffer) {
|
2005-08-10 21:53:43 +04:00
|
|
|
free(free_buffer);
|
2005-08-10 14:51:42 +04:00
|
|
|
}
|
|
|
|
if (NULL != free_rbuf) {
|
2005-08-10 21:53:43 +04:00
|
|
|
free(free_rbuf);
|
2005-08-10 14:51:42 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* All done */
|
|
|
|
|
|
|
|
return err;
|
2004-06-29 04:02:25 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* reduce_lin_inter
|
|
|
|
*
|
|
|
|
* Function: - reduction using O(N) algorithm
|
|
|
|
* Accepts: - same as MPI_Reduce()
|
|
|
|
* Returns: - MPI_SUCCESS or error code
|
|
|
|
*/
|
2005-08-10 14:51:42 +04:00
|
|
|
int
|
|
|
|
mca_coll_basic_reduce_lin_inter(void *sbuf, void *rbuf, int count,
|
2005-08-10 21:53:43 +04:00
|
|
|
struct ompi_datatype_t *dtype,
|
|
|
|
struct ompi_op_t *op,
|
2007-08-19 07:37:49 +04:00
|
|
|
int root, struct ompi_communicator_t *comm,
|
2008-07-29 02:40:57 +04:00
|
|
|
mca_coll_base_module_t *module)
|
2004-06-29 04:02:25 +04:00
|
|
|
{
|
2008-08-06 18:07:20 +04:00
|
|
|
int i, err, size;
|
2006-10-18 00:20:58 +04:00
|
|
|
ptrdiff_t true_lb, true_extent, lb, extent;
|
2005-08-10 14:51:42 +04:00
|
|
|
char *free_buffer = NULL;
|
|
|
|
char *pml_buffer = NULL;
|
|
|
|
|
|
|
|
/* Initialize */
|
|
|
|
size = ompi_comm_remote_size(comm);
|
|
|
|
|
|
|
|
if (MPI_PROC_NULL == root) {
|
2005-08-10 21:53:43 +04:00
|
|
|
/* do nothing */
|
|
|
|
err = OMPI_SUCCESS;
|
2005-08-10 14:51:42 +04:00
|
|
|
} else if (MPI_ROOT != root) {
|
2005-08-10 21:53:43 +04:00
|
|
|
/* If not root, send data to the root. */
|
|
|
|
err = MCA_PML_CALL(send(sbuf, count, dtype, root,
|
|
|
|
MCA_COLL_BASE_TAG_REDUCE,
|
|
|
|
MCA_PML_BASE_SEND_STANDARD, comm));
|
2005-08-10 14:51:42 +04:00
|
|
|
} else {
|
2005-08-10 21:53:43 +04:00
|
|
|
/* Root receives and reduces messages */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
ompi_datatype_get_extent(dtype, &lb, &extent);
|
|
|
|
ompi_datatype_get_true_extent(dtype, &true_lb, &true_extent);
|
2005-08-10 21:53:43 +04:00
|
|
|
|
2006-08-24 20:38:08 +04:00
|
|
|
free_buffer = (char*)malloc(true_extent + (count - 1) * extent);
|
2005-08-10 21:53:43 +04:00
|
|
|
if (NULL == free_buffer) {
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
}
|
|
|
|
pml_buffer = free_buffer - lb;
|
|
|
|
|
|
|
|
|
|
|
|
/* Initialize the receive buffer. */
|
|
|
|
err = MCA_PML_CALL(recv(rbuf, count, dtype, 0,
|
|
|
|
MCA_COLL_BASE_TAG_REDUCE, comm,
|
|
|
|
MPI_STATUS_IGNORE));
|
|
|
|
if (MPI_SUCCESS != err) {
|
|
|
|
if (NULL != free_buffer) {
|
|
|
|
free(free_buffer);
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Loop receiving and calling reduction function (C or Fortran). */
|
|
|
|
for (i = 1; i < size; i++) {
|
|
|
|
err = MCA_PML_CALL(recv(pml_buffer, count, dtype, i,
|
|
|
|
MCA_COLL_BASE_TAG_REDUCE, comm,
|
|
|
|
MPI_STATUS_IGNORE));
|
|
|
|
if (MPI_SUCCESS != err) {
|
|
|
|
if (NULL != free_buffer) {
|
|
|
|
free(free_buffer);
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform the reduction */
|
|
|
|
ompi_op_reduce(op, pml_buffer, rbuf, count, dtype);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NULL != free_buffer) {
|
|
|
|
free(free_buffer);
|
|
|
|
}
|
2005-08-10 14:51:42 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* All done */
|
|
|
|
return err;
|
2004-06-29 04:02:25 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* reduce_log_inter
|
|
|
|
*
|
|
|
|
* Function: - reduction using O(N) algorithm
|
|
|
|
* Accepts: - same as MPI_Reduce()
|
|
|
|
* Returns: - MPI_SUCCESS or error code
|
|
|
|
*/
|
2005-08-10 14:51:42 +04:00
|
|
|
int
|
|
|
|
mca_coll_basic_reduce_log_inter(void *sbuf, void *rbuf, int count,
|
2005-08-10 21:53:43 +04:00
|
|
|
struct ompi_datatype_t *dtype,
|
|
|
|
struct ompi_op_t *op,
|
2007-08-19 07:37:49 +04:00
|
|
|
int root, struct ompi_communicator_t *comm,
|
2008-07-29 02:40:57 +04:00
|
|
|
mca_coll_base_module_t *module)
|
2004-06-29 04:02:25 +04:00
|
|
|
{
|
2005-08-10 14:51:42 +04:00
|
|
|
return OMPI_ERR_NOT_IMPLEMENTED;
|
2004-01-12 00:26:55 +03:00
|
|
|
}
|