2007-07-14 00:46:12 +04:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University.
|
|
|
|
* All rights reserved.
|
|
|
|
* Copyright (c) 2004-2006 The Trustees of the University of Tennessee.
|
|
|
|
* All rights reserved.
|
2008-04-16 17:29:55 +04:00
|
|
|
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
|
2007-07-14 00:46:12 +04:00
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
* Copyright (c) 2007 Los Alamos National Security, LLC. All rights
|
|
|
|
* reserved.
|
2009-02-24 20:17:33 +03:00
|
|
|
* Copyright (c) 2009 Sun Microsystems, Inc. All rights reserved.
|
2007-07-14 00:46:12 +04:00
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* utility functions for dealing with remote datatype and op structures
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "ompi_config.h"
|
|
|
|
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
#include "opal/datatype/opal_convertor.h"
|
|
|
|
#include "opal/datatype/opal_convertor_internal.h"
|
|
|
|
#include "opal/datatype/opal_datatype_prototypes.h"
|
|
|
|
|
2007-07-14 00:46:12 +04:00
|
|
|
#include "ompi/op/op.h"
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
#include "ompi/datatype/ompi_datatype.h"
|
|
|
|
#include "ompi/datatype/ompi_datatype_internal.h"
|
2007-07-14 00:46:12 +04:00
|
|
|
|
|
|
|
#include "osc_base_obj_convert.h"
|
2008-04-16 17:29:55 +04:00
|
|
|
#include "ompi/memchecker.h"
|
2007-07-14 00:46:12 +04:00
|
|
|
|
2007-07-16 20:29:51 +04:00
|
|
|
int
|
|
|
|
ompi_osc_base_get_primitive_type_info(ompi_datatype_t *datatype,
|
|
|
|
ompi_datatype_t **prim_datatype,
|
|
|
|
uint32_t *prim_count)
|
|
|
|
{
|
2011-02-25 23:43:17 +03:00
|
|
|
ompi_datatype_t *primitive_datatype = NULL;
|
|
|
|
size_t datatype_size, primitive_size, primitive_count;
|
2007-07-16 20:29:51 +04:00
|
|
|
|
2011-02-25 23:43:17 +03:00
|
|
|
primitive_datatype = ompi_datatype_get_single_predefined_type_from_args(datatype);
|
|
|
|
if( NULL == primitive_datatype ) {
|
|
|
|
*prim_count = 0;
|
|
|
|
return OMPI_SUCCESS;
|
2007-07-16 20:29:51 +04:00
|
|
|
}
|
2011-02-25 23:43:17 +03:00
|
|
|
ompi_datatype_type_size( datatype, &datatype_size );
|
|
|
|
ompi_datatype_type_size( primitive_datatype, &primitive_size );
|
|
|
|
primitive_count = datatype_size / primitive_size;
|
|
|
|
#if OPAL_ENABLE_DEBUG
|
|
|
|
assert( 0 == (datatype_size % primitive_size) );
|
|
|
|
#endif /* OPAL_ENABLE_DEBUG */
|
|
|
|
|
|
|
|
/* We now have the count as a size_t, convert it to an uint32_t */
|
2007-07-16 20:29:51 +04:00
|
|
|
*prim_datatype = primitive_datatype;
|
2011-02-25 23:43:17 +03:00
|
|
|
*prim_count = (uint32_t)primitive_count;
|
2007-07-16 20:29:51 +04:00
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-07-14 00:46:12 +04:00
|
|
|
struct ompi_osc_base_convertor_t {
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_t convertor;
|
2007-07-14 00:46:12 +04:00
|
|
|
ompi_op_t *op;
|
|
|
|
ompi_datatype_t *datatype;
|
|
|
|
};
|
|
|
|
typedef struct ompi_osc_base_convertor_t ompi_osc_base_convertor_t;
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
static OBJ_CLASS_INSTANCE(ompi_osc_base_convertor_t, opal_convertor_t, NULL, NULL);
|
2007-07-14 00:46:12 +04:00
|
|
|
|
2007-07-14 01:26:12 +04:00
|
|
|
#define COPY_TYPE( TYPENAME, TYPE, COUNT ) \
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
static int copy_##TYPENAME( opal_convertor_t *pConvertor, uint32_t count, \
|
2007-07-14 00:46:12 +04:00
|
|
|
char* from, size_t from_len, ptrdiff_t from_extent, \
|
2007-07-14 01:26:12 +04:00
|
|
|
char* to, size_t to_len, ptrdiff_t to_extent, \
|
|
|
|
ptrdiff_t *advance) \
|
|
|
|
{ \
|
|
|
|
size_t remote_TYPE_size = sizeof(TYPE) * (COUNT); /* TODO */ \
|
|
|
|
size_t local_TYPE_size = (COUNT) * sizeof(TYPE); \
|
|
|
|
ompi_osc_base_convertor_t *osc_convertor = \
|
|
|
|
(ompi_osc_base_convertor_t*) pConvertor; \
|
|
|
|
\
|
|
|
|
if( (from_extent == (ptrdiff_t)local_TYPE_size) && \
|
|
|
|
(to_extent == (ptrdiff_t)remote_TYPE_size) ) { \
|
|
|
|
ompi_op_reduce(osc_convertor->op, from, to, count, osc_convertor->datatype); \
|
|
|
|
} else { \
|
|
|
|
uint32_t i; \
|
|
|
|
for( i = 0; i < count; i++ ) { \
|
|
|
|
ompi_op_reduce(osc_convertor->op, from, to, 1, osc_convertor->datatype); \
|
|
|
|
to += to_extent; \
|
|
|
|
from += from_extent; \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
*advance = count * from_extent; \
|
|
|
|
return count; \
|
2007-07-14 00:46:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* set up copy functions for the basic C MPI data types */
|
|
|
|
COPY_TYPE( char, char, 1 )
|
|
|
|
COPY_TYPE( short, short, 1 )
|
|
|
|
COPY_TYPE( int, int, 1 )
|
|
|
|
COPY_TYPE( long, long, 1 )
|
|
|
|
COPY_TYPE( long_long, long long, 1 )
|
|
|
|
COPY_TYPE( float, float, 1 )
|
|
|
|
COPY_TYPE( double, double, 1 )
|
|
|
|
COPY_TYPE( long_double, long double, 1 )
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
COPY_TYPE( complex_float, ompi_mpi_cxx_cplex, 1 )
|
|
|
|
COPY_TYPE( complex_double, ompi_mpi_cxx_dblcplex, 1 )
|
|
|
|
COPY_TYPE( complex_long_double, ompi_mpi_cxx_ldblcplex, 1 )
|
2007-07-14 00:46:12 +04:00
|
|
|
|
|
|
|
/* table of predefined copy functions - one for each MPI type */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
/* XXX TODO Adapt to new layout */
|
|
|
|
static conversion_fct_t ompi_osc_base_copy_functions[OMPI_DATATYPE_MAX_PREDEFINED] = {
|
2007-07-14 00:46:12 +04:00
|
|
|
(conversion_fct_t)NULL, /* DT_LOOP */
|
|
|
|
(conversion_fct_t)NULL, /* DT_END_LOOP */
|
|
|
|
(conversion_fct_t)NULL, /* DT_LB */
|
|
|
|
(conversion_fct_t)NULL, /* DT_UB */
|
|
|
|
(conversion_fct_t)copy_char, /* DT_CHAR */
|
|
|
|
(conversion_fct_t)copy_char, /* DT_CHARACTER */
|
|
|
|
(conversion_fct_t)copy_char, /* DT_UNSIGNED_CHAR */
|
|
|
|
(conversion_fct_t)copy_char, /* DT_SIGNED_CHAR */
|
|
|
|
(conversion_fct_t)copy_char, /* DT_BYTE */
|
|
|
|
(conversion_fct_t)copy_short, /* DT_SHORT */
|
|
|
|
(conversion_fct_t)copy_short, /* DT_UNSIGNED_SHORT */
|
|
|
|
(conversion_fct_t)copy_int, /* DT_INT */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
(conversion_fct_t)copy_int, /* DT_UNSIGNED */
|
2007-07-14 00:46:12 +04:00
|
|
|
(conversion_fct_t)copy_long, /* DT_LONG */
|
|
|
|
(conversion_fct_t)copy_long, /* DT_UNSIGNED_LONG */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
(conversion_fct_t)copy_long_long, /* DT_LONG_LONG */
|
2007-07-14 00:46:12 +04:00
|
|
|
(conversion_fct_t)copy_long_long, /* DT_UNSIGNED_LONG_LONG */
|
|
|
|
(conversion_fct_t)copy_float, /* DT_FLOAT */
|
|
|
|
(conversion_fct_t)copy_double, /* DT_DOUBLE */
|
|
|
|
(conversion_fct_t)copy_long_double, /* DT_LONG_DOUBLE */
|
|
|
|
(conversion_fct_t)NULL, /* DT_PACKED */
|
|
|
|
(conversion_fct_t)NULL, /* DT_WCHAR */
|
|
|
|
#if SIZEOF_BOOL == SIZEOF_CHAR
|
|
|
|
(conversion_fct_t)copy_char, /* DT_CXX_BOOL */
|
|
|
|
#elif SIZEOF_BOOL == SIZEOF_SHORT
|
|
|
|
(conversion_fct_t)copy_short, /* DT_CXX_BOOL */
|
|
|
|
#elif SIZEOF_BOOL == SIZEOF_INT
|
|
|
|
(conversion_fct_t)copy_int, /* DT_CXX_BOOL */
|
|
|
|
#elif SIZEOF_BOOL == SIZEOF_LONG
|
|
|
|
(conversion_fct_t)copy_long, /* DT_CXX_BOOL */
|
|
|
|
#else
|
|
|
|
(conversion_fct_t)NULL, /* DT_CXX_BOOL */
|
|
|
|
#endif
|
2009-06-01 23:02:34 +04:00
|
|
|
#if OMPI_SIZEOF_FORTRAN_LOGICAL == SIZEOF_CHAR
|
2007-07-14 00:46:12 +04:00
|
|
|
(conversion_fct_t)copy_char, /* DT_LOGIC */
|
2009-06-01 23:02:34 +04:00
|
|
|
#elif OMPI_SIZEOF_FORTRAN_LOGICAL == SIZEOF_SHORT
|
2007-07-14 00:46:12 +04:00
|
|
|
(conversion_fct_t)copy_short, /* DT_LOGIC */
|
2009-06-01 23:02:34 +04:00
|
|
|
#elif OMPI_SIZEOF_FORTRAN_LOGICAL == SIZEOF_INT
|
2007-07-14 00:46:12 +04:00
|
|
|
(conversion_fct_t)copy_int, /* DT_LOGIC */
|
2009-06-01 23:02:34 +04:00
|
|
|
#elif OMPI_SIZEOF_FORTRAN_LOGICAL == SIZEOF_LONG
|
2007-07-14 00:46:12 +04:00
|
|
|
(conversion_fct_t)copy_long, /* DT_LOGIC */
|
|
|
|
#else
|
|
|
|
(conversion_fct_t)NULL, /* DT_LOGIC */
|
|
|
|
#endif
|
|
|
|
(conversion_fct_t)copy_int, /* DT_INTEGER */
|
|
|
|
(conversion_fct_t)copy_float, /* DT_REAL */
|
|
|
|
(conversion_fct_t)copy_double, /* DT_DBLPREC */
|
|
|
|
(conversion_fct_t)copy_complex_float, /* DT_COMPLEX_FLOAT */
|
|
|
|
(conversion_fct_t)copy_complex_double, /* DT_COMPLEX_DOUBLE */
|
|
|
|
(conversion_fct_t)copy_complex_long_double, /* DT_COMPLEX_LONG_DOUBLE */
|
|
|
|
(conversion_fct_t)NULL, /* DT_2INT */
|
|
|
|
(conversion_fct_t)NULL, /* DT_2INTEGER */
|
|
|
|
(conversion_fct_t)NULL, /* DT_2REAL */
|
|
|
|
(conversion_fct_t)NULL, /* DT_2DBLPREC */
|
|
|
|
(conversion_fct_t)NULL, /* DT_2COMPLEX */
|
|
|
|
(conversion_fct_t)NULL, /* DT_2DOUBLE_COMPLEX */
|
|
|
|
(conversion_fct_t)NULL, /* DT_FLOAT_INT */
|
|
|
|
(conversion_fct_t)NULL, /* DT_DOUBLE_INT */
|
|
|
|
(conversion_fct_t)NULL, /* DT_LONG_DOUBLE_INT */
|
|
|
|
(conversion_fct_t)NULL, /* DT_LONG_INT */
|
|
|
|
(conversion_fct_t)NULL, /* DT_SHORT_INT */
|
|
|
|
(conversion_fct_t)NULL, /* DT_UNAVAILABLE */
|
|
|
|
};
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_base_process_op(void *outbuf,
|
|
|
|
void *inbuf,
|
|
|
|
size_t inbuflen,
|
|
|
|
struct ompi_datatype_t *datatype,
|
|
|
|
int count,
|
|
|
|
ompi_op_t *op)
|
|
|
|
{
|
2009-02-24 20:17:33 +03:00
|
|
|
if (op == &ompi_mpi_op_replace.op) {
|
2007-07-14 00:46:12 +04:00
|
|
|
return OMPI_ERR_NOT_SUPPORTED;
|
|
|
|
}
|
|
|
|
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
if (ompi_datatype_is_predefined(datatype)) {
|
2007-07-14 00:46:12 +04:00
|
|
|
ompi_op_reduce(op, inbuf, outbuf, count, datatype);
|
|
|
|
} else {
|
|
|
|
struct ompi_datatype_t *primitive_datatype = NULL;
|
|
|
|
ompi_osc_base_convertor_t convertor;
|
|
|
|
struct iovec iov;
|
|
|
|
uint32_t iov_count = 1;
|
|
|
|
size_t max_data;
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
struct opal_convertor_master_t master = {NULL, 0, 0, 0, {0, }, NULL};
|
2007-07-14 00:46:12 +04:00
|
|
|
|
2011-02-25 23:43:17 +03:00
|
|
|
primitive_datatype = ompi_datatype_get_single_predefined_type_from_args(datatype);
|
2007-07-14 00:46:12 +04:00
|
|
|
|
|
|
|
/* create convertor */
|
|
|
|
OBJ_CONSTRUCT(&convertor, ompi_osc_base_convertor_t);
|
|
|
|
convertor.op = op;
|
|
|
|
convertor.datatype = primitive_datatype;
|
|
|
|
|
|
|
|
/* initialize convertor */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_copy_and_prepare_for_recv(ompi_proc_local()->proc_convertor,
|
|
|
|
&(datatype->super),
|
2007-07-14 00:46:12 +04:00
|
|
|
count,
|
|
|
|
outbuf,
|
|
|
|
0,
|
|
|
|
&convertor.convertor);
|
|
|
|
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
memcpy(&master, convertor.convertor.master, sizeof(struct opal_convertor_master_t));
|
2007-07-14 00:46:12 +04:00
|
|
|
master.next = convertor.convertor.master;
|
|
|
|
master.pFunctions = (conversion_fct_t*) &ompi_osc_base_copy_functions;
|
|
|
|
convertor.convertor.master = &master;
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
convertor.convertor.fAdvance = opal_unpack_general;
|
2007-07-14 00:46:12 +04:00
|
|
|
|
2008-04-16 17:29:55 +04:00
|
|
|
iov.iov_len = inbuflen;
|
2007-07-14 00:46:12 +04:00
|
|
|
iov.iov_base = (IOVBASE_TYPE*) inbuf;
|
2008-04-16 17:29:55 +04:00
|
|
|
max_data = iov.iov_len;
|
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_convertor_call(&opal_memchecker_base_mem_defined,
|
|
|
|
&convertor.convertor);
|
|
|
|
);
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_unpack(&convertor.convertor,
|
2007-07-14 00:46:12 +04:00
|
|
|
&iov,
|
|
|
|
&iov_count,
|
|
|
|
&max_data);
|
2008-04-16 17:29:55 +04:00
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_convertor_call(&opal_memchecker_base_mem_noaccess,
|
|
|
|
&convertor.convertor);
|
|
|
|
);
|
2007-07-14 00:46:12 +04:00
|
|
|
OBJ_DESTRUCT(&convertor);
|
|
|
|
}
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
2014-02-25 21:36:43 +04:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_base_sndrcv_op(void *origin,
|
|
|
|
int32_t origin_count,
|
|
|
|
struct ompi_datatype_t *origin_dt,
|
|
|
|
void *target,
|
|
|
|
int32_t target_count,
|
|
|
|
struct ompi_datatype_t *target_dt,
|
|
|
|
ompi_op_t *op)
|
|
|
|
{
|
|
|
|
if (ompi_datatype_is_predefined(origin_dt) && origin_dt == target_dt) {
|
|
|
|
ompi_op_reduce(op, origin, target, origin_count, origin_dt);
|
|
|
|
} else {
|
|
|
|
ompi_osc_base_convertor_t recv_convertor;
|
|
|
|
opal_convertor_t send_convertor;
|
|
|
|
struct iovec iov;
|
|
|
|
uint32_t iov_count = 1;
|
|
|
|
size_t max_data;
|
|
|
|
int completed, length;
|
|
|
|
struct opal_convertor_master_t master = {NULL, 0, 0, 0, {0, }, NULL};
|
|
|
|
|
|
|
|
/* initialize send convertor */
|
|
|
|
OBJ_CONSTRUCT(&send_convertor, opal_convertor_t);
|
|
|
|
opal_convertor_copy_and_prepare_for_send(ompi_proc_local()->proc_convertor,
|
|
|
|
&(origin_dt->super), origin_count, origin, 0,
|
|
|
|
&send_convertor);
|
|
|
|
|
|
|
|
/* initialize recv convertor */
|
|
|
|
OBJ_CONSTRUCT(&recv_convertor, ompi_osc_base_convertor_t);
|
|
|
|
recv_convertor.op = op;
|
|
|
|
recv_convertor.datatype = ompi_datatype_get_single_predefined_type_from_args(target_dt);
|
|
|
|
opal_convertor_copy_and_prepare_for_recv(ompi_proc_local()->proc_convertor,
|
|
|
|
&(target_dt->super), target_count,
|
|
|
|
target, 0, &recv_convertor.convertor);
|
|
|
|
|
|
|
|
memcpy(&master, recv_convertor.convertor.master, sizeof(struct opal_convertor_master_t));
|
|
|
|
master.next = recv_convertor.convertor.master;
|
|
|
|
master.pFunctions = (conversion_fct_t*) &ompi_osc_base_copy_functions;
|
|
|
|
recv_convertor.convertor.master = &master;
|
|
|
|
recv_convertor.convertor.fAdvance = opal_unpack_general;
|
|
|
|
|
|
|
|
/* copy */
|
|
|
|
iov.iov_len = length = 64 * 1024;
|
|
|
|
iov.iov_base = (IOVBASE_TYPE*)malloc( length * sizeof(char) );
|
|
|
|
|
|
|
|
completed = 0;
|
|
|
|
while(0 == completed) {
|
|
|
|
iov.iov_len = length;
|
|
|
|
iov_count = 1;
|
|
|
|
max_data = length;
|
|
|
|
completed |= opal_convertor_pack( &send_convertor, &iov, &iov_count, &max_data );
|
|
|
|
completed |= opal_convertor_unpack( &recv_convertor.convertor, &iov, &iov_count, &max_data );
|
|
|
|
}
|
|
|
|
free( iov.iov_base );
|
|
|
|
OBJ_DESTRUCT( &send_convertor );
|
|
|
|
OBJ_DESTRUCT( &recv_convertor );
|
|
|
|
}
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|