2006-07-04 01:20:20 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The University of Tennessee and The University
|
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2006 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "ompi_config.h"
|
2006-11-07 23:59:32 +00:00
|
|
|
|
|
|
|
#include "opal/prefetch.h"
|
|
|
|
|
2006-07-04 01:20:20 +00:00
|
|
|
#include "ompi/communicator/communicator.h"
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 04:56:31 +00:00
|
|
|
#include "opal/datatype/opal_convertor.h"
|
2006-07-04 01:20:20 +00:00
|
|
|
|
|
|
|
#include "mtl_mx.h"
|
2006-07-14 21:32:03 +00:00
|
|
|
#include "mtl_mx_types.h"
|
2006-07-04 01:20:20 +00:00
|
|
|
#include "mtl_mx_request.h"
|
2006-07-06 19:54:13 +00:00
|
|
|
#include "ompi/mca/mtl/base/mtl_base_datatype.h"
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_mtl_mx_send(struct mca_mtl_base_module_t* mtl,
|
|
|
|
struct ompi_communicator_t* comm,
|
|
|
|
int dest,
|
|
|
|
int tag,
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 04:56:31 +00:00
|
|
|
struct opal_convertor_t *convertor,
|
2006-07-06 19:54:13 +00:00
|
|
|
mca_pml_base_send_mode_t mode)
|
|
|
|
{
|
|
|
|
mx_return_t mx_return;
|
|
|
|
uint64_t match_bits;
|
|
|
|
mca_mtl_mx_request_t mtl_mx_request;
|
|
|
|
size_t length;
|
|
|
|
mx_status_t mx_status;
|
|
|
|
uint32_t result;
|
2006-09-20 22:14:46 +00:00
|
|
|
ompi_proc_t* ompi_proc = ompi_comm_peer_lookup( comm, dest );
|
2013-08-30 16:54:55 +00:00
|
|
|
mca_mtl_mx_endpoint_t* mx_endpoint = (mca_mtl_mx_endpoint_t*) ompi_proc->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_MTL];
|
2006-11-07 23:59:32 +00:00
|
|
|
char* where;
|
2006-07-04 01:20:20 +00:00
|
|
|
|
2006-07-06 19:54:13 +00:00
|
|
|
assert(mtl == &ompi_mtl_mx.super);
|
|
|
|
|
|
|
|
MX_SET_SEND_BITS(match_bits, comm->c_contextid, comm->c_my_rank, tag);
|
|
|
|
|
2006-11-07 23:59:32 +00:00
|
|
|
ompi_mtl_datatype_pack(convertor,
|
|
|
|
&mtl_mx_request.mx_segment[0].segment_ptr,
|
|
|
|
&length,
|
|
|
|
&mtl_mx_request.free_after);
|
2006-10-27 00:15:53 +00:00
|
|
|
|
2006-07-06 19:54:13 +00:00
|
|
|
mtl_mx_request.mx_segment[0].segment_length = length;
|
|
|
|
mtl_mx_request.convertor = convertor;
|
|
|
|
mtl_mx_request.type = OMPI_MTL_MX_ISEND;
|
|
|
|
|
2013-03-27 21:17:31 +00:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_mtl_base_framework.framework_output,
|
2007-01-31 17:11:06 +00:00
|
|
|
"issend bits: 0x%016" PRIu64 "\n",
|
|
|
|
match_bits));
|
2006-08-16 16:28:58 +00:00
|
|
|
|
2006-11-07 23:59:32 +00:00
|
|
|
if(mode == MCA_PML_BASE_SEND_SYNCHRONOUS) {
|
2006-07-06 19:54:13 +00:00
|
|
|
mx_return = mx_issend( ompi_mtl_mx.mx_endpoint,
|
|
|
|
mtl_mx_request.mx_segment,
|
|
|
|
1,
|
|
|
|
mx_endpoint->mx_peer_addr,
|
|
|
|
match_bits,
|
|
|
|
&mtl_mx_request,
|
|
|
|
&mtl_mx_request.mx_request
|
|
|
|
);
|
2006-11-07 23:59:32 +00:00
|
|
|
where = "mx_issend";
|
2006-07-06 19:54:13 +00:00
|
|
|
} else {
|
|
|
|
mx_return = mx_isend( ompi_mtl_mx.mx_endpoint,
|
|
|
|
mtl_mx_request.mx_segment,
|
|
|
|
1,
|
|
|
|
mx_endpoint->mx_peer_addr,
|
|
|
|
match_bits,
|
|
|
|
&mtl_mx_request,
|
|
|
|
&mtl_mx_request.mx_request
|
|
|
|
);
|
2006-11-07 23:59:32 +00:00
|
|
|
where = "mx_isend";
|
|
|
|
}
|
|
|
|
if( OPAL_UNLIKELY(mx_return != MX_SUCCESS) ) {
|
|
|
|
char peer_name[MX_MAX_HOSTNAME_LEN];
|
|
|
|
if(MX_SUCCESS != mx_nic_id_to_hostname( mx_endpoint->mx_peer->nic_id, peer_name)) {
|
|
|
|
sprintf( peer_name, "unknown %lx nic_id", (long)mx_endpoint->mx_peer->nic_id );
|
2006-07-06 19:54:13 +00:00
|
|
|
}
|
2013-03-27 21:17:31 +00:00
|
|
|
opal_output(ompi_mtl_base_framework.framework_output, "Error in %s (error %s) sending to %s\n",
|
2006-11-07 23:59:32 +00:00
|
|
|
where, mx_strerror(mx_return), peer_name);
|
2007-01-24 19:10:38 +00:00
|
|
|
|
|
|
|
/* Free buffer if needed */
|
2007-01-24 19:16:46 +00:00
|
|
|
if(mtl_mx_request.free_after) {
|
2007-01-24 19:10:38 +00:00
|
|
|
free(mtl_mx_request.mx_segment[0].segment_ptr);
|
|
|
|
}
|
2006-11-07 23:59:32 +00:00
|
|
|
return OMPI_ERROR;
|
2006-07-06 19:54:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
do {
|
|
|
|
mx_return = mx_test(ompi_mtl_mx.mx_endpoint,
|
|
|
|
&mtl_mx_request.mx_request,
|
|
|
|
&mx_status,
|
|
|
|
&result);
|
2006-11-07 23:59:32 +00:00
|
|
|
if( OPAL_UNLIKELY(mx_return != MX_SUCCESS) ) {
|
2013-03-27 21:17:31 +00:00
|
|
|
opal_output(ompi_mtl_base_framework.framework_output, "Error in mx_wait (error %s)\n", mx_strerror(mx_return));
|
2006-07-06 19:54:13 +00:00
|
|
|
abort();
|
|
|
|
}
|
2006-11-07 23:59:32 +00:00
|
|
|
if( OPAL_UNLIKELY(result && mx_status.code != MX_STATUS_SUCCESS) ) {
|
2013-03-27 21:17:31 +00:00
|
|
|
opal_output(ompi_mtl_base_framework.framework_output,
|
2007-01-31 17:11:06 +00:00
|
|
|
"Error in ompi_mtl_mx_send, mx_wait returned something other than MX_STATUS_SUCCESS: mx_status.code = %d.\n",
|
|
|
|
mx_status.code);
|
2006-07-06 19:54:13 +00:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
} while(!result);
|
2007-01-24 19:10:38 +00:00
|
|
|
|
|
|
|
/* Free buffer if needed */
|
2007-01-24 19:16:46 +00:00
|
|
|
if(mtl_mx_request.free_after) {
|
2007-01-24 19:10:38 +00:00
|
|
|
free(mtl_mx_request.mx_segment[0].segment_ptr);
|
|
|
|
}
|
2006-07-06 19:54:13 +00:00
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
2006-07-04 01:20:20 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
ompi_mtl_mx_isend(struct mca_mtl_base_module_t* mtl,
|
|
|
|
struct ompi_communicator_t* comm,
|
|
|
|
int dest,
|
|
|
|
int tag,
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 04:56:31 +00:00
|
|
|
struct opal_convertor_t *convertor,
|
2006-07-04 01:20:20 +00:00
|
|
|
mca_pml_base_send_mode_t mode,
|
|
|
|
bool blocking,
|
|
|
|
mca_mtl_request_t * mtl_request)
|
|
|
|
{
|
|
|
|
mx_return_t mx_return;
|
|
|
|
uint64_t match_bits;
|
|
|
|
mca_mtl_mx_request_t * mtl_mx_request = (mca_mtl_mx_request_t*) mtl_request;
|
|
|
|
size_t length;
|
2006-09-20 22:14:46 +00:00
|
|
|
ompi_proc_t* ompi_proc = ompi_comm_peer_lookup( comm, dest );
|
2013-08-30 16:54:55 +00:00
|
|
|
mca_mtl_mx_endpoint_t* mx_endpoint = (mca_mtl_mx_endpoint_t*) ompi_proc->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_MTL];
|
2006-11-07 23:59:32 +00:00
|
|
|
char* where;
|
2006-07-04 01:20:20 +00:00
|
|
|
|
|
|
|
assert(mtl == &ompi_mtl_mx.super);
|
|
|
|
|
|
|
|
MX_SET_SEND_BITS(match_bits, comm->c_contextid, comm->c_my_rank, tag);
|
|
|
|
|
2006-11-07 23:59:32 +00:00
|
|
|
ompi_mtl_datatype_pack(convertor,
|
|
|
|
&mtl_mx_request->mx_segment[0].segment_ptr,
|
|
|
|
&length,
|
|
|
|
&mtl_mx_request->free_after);
|
2006-07-04 01:20:20 +00:00
|
|
|
mtl_mx_request->mx_segment[0].segment_length = length;
|
|
|
|
mtl_mx_request->convertor = convertor;
|
|
|
|
mtl_mx_request->type = OMPI_MTL_MX_ISEND;
|
|
|
|
|
2013-03-27 21:17:31 +00:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_mtl_base_framework.framework_output,
|
2007-01-31 17:11:06 +00:00
|
|
|
"issend bits: 0x%016" PRIu64 "\n", match_bits));
|
2006-08-16 16:28:58 +00:00
|
|
|
|
2006-11-07 23:59:32 +00:00
|
|
|
if(mode == MCA_PML_BASE_SEND_SYNCHRONOUS) {
|
2006-07-04 01:20:20 +00:00
|
|
|
mx_return = mx_issend( ompi_mtl_mx.mx_endpoint,
|
|
|
|
mtl_mx_request->mx_segment,
|
|
|
|
1,
|
|
|
|
mx_endpoint->mx_peer_addr,
|
|
|
|
match_bits,
|
|
|
|
mtl_mx_request,
|
|
|
|
&mtl_mx_request->mx_request
|
|
|
|
);
|
2006-11-07 23:59:32 +00:00
|
|
|
where = "mx_issend";
|
2006-07-04 01:20:20 +00:00
|
|
|
} else {
|
|
|
|
mx_return = mx_isend( ompi_mtl_mx.mx_endpoint,
|
|
|
|
mtl_mx_request->mx_segment,
|
|
|
|
1,
|
|
|
|
mx_endpoint->mx_peer_addr,
|
|
|
|
match_bits,
|
|
|
|
mtl_mx_request,
|
|
|
|
&mtl_mx_request->mx_request
|
|
|
|
);
|
2006-11-07 23:59:32 +00:00
|
|
|
where = "mx_isend";
|
|
|
|
}
|
|
|
|
if( OPAL_UNLIKELY(mx_return != MX_SUCCESS) ) {
|
|
|
|
char peer_name[MX_MAX_HOSTNAME_LEN];
|
|
|
|
if(MX_SUCCESS != mx_nic_id_to_hostname( mx_endpoint->mx_peer->nic_id, peer_name)) {
|
|
|
|
sprintf( peer_name, "unknown %lx nic_id", (long)mx_endpoint->mx_peer->nic_id );
|
2006-07-04 01:20:20 +00:00
|
|
|
}
|
2013-03-27 21:17:31 +00:00
|
|
|
opal_output(ompi_mtl_base_framework.framework_output, "Error in %s (error %s) sending to %s\n",
|
2006-11-07 23:59:32 +00:00
|
|
|
where, mx_strerror(mx_return), peer_name);
|
|
|
|
return OMPI_ERROR;
|
2006-07-04 01:20:20 +00:00
|
|
|
}
|
2006-11-07 23:59:32 +00:00
|
|
|
return OMPI_SUCCESS;
|
2006-07-04 01:20:20 +00:00
|
|
|
}
|