2006-07-04 05:20:20 +04:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
2010-07-22 00:07:00 +04:00
|
|
|
* Copyright (c) 2004-2010 The University of Tennessee and The University
|
2006-07-04 05:20:20 +04:00
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2006 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef PML_CM_SENDREQ_H
|
|
|
|
#define PML_CM_SENDREQ_H
|
|
|
|
|
2006-07-14 23:32:26 +04:00
|
|
|
#include "pml_cm_request.h"
|
2006-07-04 05:20:20 +04:00
|
|
|
#include "ompi/mca/pml/base/pml_base_sendreq.h"
|
|
|
|
#include "ompi/mca/pml/base/pml_base_bsend.h"
|
|
|
|
#include "ompi/mca/pml/pml.h"
|
|
|
|
#include "ompi/mca/mtl/mtl.h"
|
2007-07-11 02:16:38 +04:00
|
|
|
#include "opal/prefetch.h"
|
2006-07-04 05:20:20 +04:00
|
|
|
|
2006-07-14 23:32:26 +04:00
|
|
|
struct mca_pml_cm_send_request_t {
|
|
|
|
mca_pml_cm_request_t req_base;
|
|
|
|
mca_pml_base_send_mode_t req_send_mode;
|
2006-07-04 05:20:20 +04:00
|
|
|
};
|
|
|
|
typedef struct mca_pml_cm_send_request_t mca_pml_cm_send_request_t;
|
2008-01-09 23:32:39 +03:00
|
|
|
OBJ_CLASS_DECLARATION(mca_pml_cm_send_request_t);
|
2006-07-14 23:32:26 +04:00
|
|
|
|
2006-07-04 05:20:20 +04:00
|
|
|
|
2006-07-14 23:32:26 +04:00
|
|
|
struct mca_pml_cm_thin_send_request_t {
|
|
|
|
mca_pml_cm_send_request_t req_send;
|
2006-12-15 20:54:14 +03:00
|
|
|
mca_mtl_request_t req_mtl; /**< the mtl specific memory. This field should be the last in the struct */
|
2006-07-14 23:32:26 +04:00
|
|
|
};
|
|
|
|
typedef struct mca_pml_cm_thin_send_request_t mca_pml_cm_thin_send_request_t;
|
2008-01-09 23:32:39 +03:00
|
|
|
OBJ_CLASS_DECLARATION(mca_pml_cm_thin_send_request_t);
|
2006-07-04 05:20:20 +04:00
|
|
|
|
2006-07-14 23:32:26 +04:00
|
|
|
|
|
|
|
struct mca_pml_cm_hvy_send_request_t {
|
|
|
|
mca_pml_cm_send_request_t req_send;
|
|
|
|
void *req_addr; /**< pointer to application buffer */
|
|
|
|
size_t req_count; /**< count of user datatype elements */
|
|
|
|
int32_t req_peer; /**< peer process - rank w/in this communicator */
|
|
|
|
int32_t req_tag; /**< user defined tag */
|
2007-07-11 02:16:38 +04:00
|
|
|
void *req_buff; /**< pointer to send buffer - may not be application buffer */
|
2006-07-14 23:32:26 +04:00
|
|
|
bool req_blocking;
|
2006-12-15 20:54:14 +03:00
|
|
|
mca_mtl_request_t req_mtl; /**< the mtl specific memory. This field should be the last in the struct */
|
2006-07-14 23:32:26 +04:00
|
|
|
};
|
|
|
|
typedef struct mca_pml_cm_hvy_send_request_t mca_pml_cm_hvy_send_request_t;
|
2008-01-09 23:32:39 +03:00
|
|
|
OBJ_CLASS_DECLARATION(mca_pml_cm_hvy_send_request_t);
|
2006-07-14 23:32:26 +04:00
|
|
|
|
|
|
|
|
|
|
|
#define MCA_PML_CM_THIN_SEND_REQUEST_ALLOC(sendreq, comm, dst, \
|
|
|
|
ompi_proc, rc) \
|
2007-07-11 19:10:01 +04:00
|
|
|
do { \
|
|
|
|
ompi_free_list_item_t* item; \
|
|
|
|
ompi_proc = ompi_comm_peer_lookup( comm, dst ); \
|
2006-07-04 05:20:20 +04:00
|
|
|
\
|
2007-07-11 19:10:01 +04:00
|
|
|
if(OPAL_UNLIKELY(NULL == ompi_proc)) { \
|
|
|
|
rc = OMPI_ERR_OUT_OF_RESOURCE; \
|
|
|
|
sendreq = NULL; \
|
|
|
|
} else { \
|
|
|
|
rc = OMPI_SUCCESS; \
|
|
|
|
OMPI_FREE_LIST_WAIT(&mca_pml_base_send_requests, \
|
|
|
|
item, rc); \
|
|
|
|
sendreq = (mca_pml_cm_thin_send_request_t*)item; \
|
|
|
|
sendreq->req_send.req_base.req_pml_type = MCA_PML_CM_REQUEST_SEND_THIN; \
|
|
|
|
sendreq->req_mtl.ompi_req = (ompi_request_t*) sendreq; \
|
|
|
|
sendreq->req_mtl.completion_callback = mca_pml_cm_send_request_completion; \
|
|
|
|
} \
|
|
|
|
} while(0)
|
2006-07-14 23:32:26 +04:00
|
|
|
|
|
|
|
|
|
|
|
#define MCA_PML_CM_HVY_SEND_REQUEST_ALLOC(sendreq, comm, dst, \
|
|
|
|
ompi_proc, rc) \
|
|
|
|
{ \
|
|
|
|
ompi_free_list_item_t* item; \
|
2006-09-21 02:14:46 +04:00
|
|
|
ompi_proc = ompi_comm_peer_lookup( comm, dst ); \
|
2007-07-11 02:16:38 +04:00
|
|
|
if(OPAL_UNLIKELY(NULL == ompi_proc)) { \
|
2006-07-04 05:20:20 +04:00
|
|
|
rc = OMPI_ERR_OUT_OF_RESOURCE; \
|
2006-07-04 10:20:13 +04:00
|
|
|
sendreq = NULL; \
|
2006-07-04 05:20:20 +04:00
|
|
|
} else { \
|
|
|
|
rc = OMPI_SUCCESS; \
|
2007-07-11 02:16:38 +04:00
|
|
|
OMPI_FREE_LIST_WAIT(&mca_pml_base_send_requests, \
|
2006-07-14 23:32:26 +04:00
|
|
|
item, rc); \
|
|
|
|
sendreq = (mca_pml_cm_hvy_send_request_t*)item; \
|
2007-07-11 02:16:38 +04:00
|
|
|
sendreq->req_send.req_base.req_pml_type = MCA_PML_CM_REQUEST_SEND_HEAVY; \
|
2007-07-11 19:10:01 +04:00
|
|
|
sendreq->req_mtl.ompi_req = (ompi_request_t*) sendreq; \
|
|
|
|
sendreq->req_mtl.completion_callback = mca_pml_cm_send_request_completion; \
|
2006-07-04 05:20:20 +04:00
|
|
|
} \
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-07-14 23:32:26 +04:00
|
|
|
#define MCA_PML_CM_SEND_REQUEST_INIT_COMMON(req_send, \
|
|
|
|
ompi_proc, \
|
|
|
|
comm, \
|
|
|
|
tag, \
|
|
|
|
datatype, \
|
|
|
|
sendmode, \
|
|
|
|
buf, \
|
|
|
|
count) \
|
2006-07-04 05:20:20 +04:00
|
|
|
{ \
|
2006-08-17 20:30:03 +04:00
|
|
|
OBJ_RETAIN(comm); \
|
|
|
|
OBJ_RETAIN(datatype); \
|
2006-11-08 07:58:23 +03:00
|
|
|
(req_send)->req_base.req_comm = comm; \
|
|
|
|
(req_send)->req_base.req_datatype = datatype; \
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_copy_and_prepare_for_send( \
|
2006-07-14 23:32:26 +04:00
|
|
|
ompi_proc->proc_convertor, \
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
&(datatype->super), \
|
2006-07-14 23:32:26 +04:00
|
|
|
count, \
|
|
|
|
buf, \
|
|
|
|
0, \
|
2006-11-08 07:58:23 +03:00
|
|
|
&(req_send)->req_base.req_convertor ); \
|
|
|
|
(req_send)->req_base.req_ompi.req_mpi_object.comm = comm; \
|
|
|
|
(req_send)->req_base.req_ompi.req_status.MPI_SOURCE = \
|
2006-07-14 23:32:26 +04:00
|
|
|
comm->c_my_rank; \
|
2006-11-08 07:58:23 +03:00
|
|
|
(req_send)->req_base.req_ompi.req_status.MPI_TAG = tag; \
|
2010-07-22 00:07:00 +04:00
|
|
|
(req_send)->req_base.req_ompi.req_status._ucount = count; \
|
2006-11-08 07:58:23 +03:00
|
|
|
(req_send)->req_send_mode = sendmode; \
|
|
|
|
(req_send)->req_base.req_free_called = false; \
|
2006-07-14 23:32:26 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
#define MCA_PML_CM_HVY_SEND_REQUEST_INIT( sendreq, \
|
|
|
|
ompi_proc, \
|
|
|
|
comm, \
|
|
|
|
tag, \
|
|
|
|
dst, \
|
|
|
|
datatype, \
|
|
|
|
sendmode, \
|
|
|
|
persistent, \
|
|
|
|
blocking, \
|
|
|
|
buf, \
|
|
|
|
count) \
|
|
|
|
do { \
|
|
|
|
OMPI_REQUEST_INIT(&(sendreq->req_send.req_base.req_ompi), \
|
|
|
|
persistent); \
|
|
|
|
sendreq->req_tag = tag; \
|
|
|
|
sendreq->req_peer = dst; \
|
|
|
|
sendreq->req_addr = buf; \
|
|
|
|
sendreq->req_count = count; \
|
2006-11-08 07:58:23 +03:00
|
|
|
MCA_PML_CM_SEND_REQUEST_INIT_COMMON( (&sendreq->req_send), \
|
|
|
|
ompi_proc, \
|
|
|
|
comm, \
|
|
|
|
tag, \
|
|
|
|
datatype, \
|
|
|
|
sendmode, \
|
|
|
|
buf, \
|
|
|
|
count); \
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_get_packed_size( \
|
2006-07-14 23:32:26 +04:00
|
|
|
&sendreq->req_send.req_base.req_convertor, \
|
2006-11-08 07:58:23 +03:00
|
|
|
&sendreq->req_count ); \
|
2006-07-04 05:20:20 +04:00
|
|
|
\
|
2006-07-14 23:32:26 +04:00
|
|
|
sendreq->req_blocking = blocking; \
|
|
|
|
sendreq->req_send.req_base.req_pml_complete = \
|
|
|
|
(persistent ? true:false); \
|
2006-11-08 07:58:23 +03:00
|
|
|
} while(0)
|
2006-07-04 05:20:20 +04:00
|
|
|
|
2006-07-06 20:37:59 +04:00
|
|
|
|
2006-07-14 23:32:26 +04:00
|
|
|
#define MCA_PML_CM_THIN_SEND_REQUEST_INIT( sendreq, \
|
|
|
|
ompi_proc, \
|
|
|
|
comm, \
|
|
|
|
tag, \
|
|
|
|
dst, \
|
|
|
|
datatype, \
|
|
|
|
sendmode, \
|
|
|
|
buf, \
|
|
|
|
count) \
|
|
|
|
do { \
|
|
|
|
OMPI_REQUEST_INIT(&(sendreq->req_send.req_base.req_ompi), \
|
|
|
|
false); \
|
2006-11-08 07:58:23 +03:00
|
|
|
MCA_PML_CM_SEND_REQUEST_INIT_COMMON( (&sendreq->req_send), \
|
|
|
|
ompi_proc, \
|
|
|
|
comm, \
|
|
|
|
tag, \
|
|
|
|
datatype, \
|
|
|
|
sendmode, \
|
|
|
|
buf, \
|
|
|
|
count); \
|
2006-07-14 23:32:26 +04:00
|
|
|
sendreq->req_send.req_base.req_pml_complete = false; \
|
2006-11-08 07:58:23 +03:00
|
|
|
} while(0)
|
2006-07-06 20:37:59 +04:00
|
|
|
|
2006-07-14 23:32:26 +04:00
|
|
|
|
|
|
|
#define MCA_PML_CM_SEND_REQUEST_START_SETUP(req_send) \
|
|
|
|
do { \
|
2006-11-08 07:58:23 +03:00
|
|
|
(req_send)->req_base.req_pml_complete = false; \
|
|
|
|
(req_send)->req_base.req_ompi.req_complete = false; \
|
|
|
|
(req_send)->req_base.req_ompi.req_state = \
|
|
|
|
OMPI_REQUEST_ACTIVE; \
|
|
|
|
(req_send)->req_base.req_ompi.req_status._cancelled = 0; \
|
|
|
|
} while (0)
|
2006-07-04 05:20:20 +04:00
|
|
|
|
|
|
|
|
2006-07-14 23:32:26 +04:00
|
|
|
#define MCA_PML_CM_THIN_SEND_REQUEST_START(sendreq, \
|
|
|
|
comm, \
|
|
|
|
tag, \
|
|
|
|
dst, \
|
|
|
|
sendmode, \
|
|
|
|
blocking, \
|
|
|
|
ret) \
|
2006-07-04 05:20:20 +04:00
|
|
|
do { \
|
2006-11-08 07:58:23 +03:00
|
|
|
MCA_PML_CM_SEND_REQUEST_START_SETUP(&(sendreq)->req_send); \
|
2006-07-14 23:32:26 +04:00
|
|
|
ret = OMPI_MTL_CALL(isend(ompi_mtl, \
|
|
|
|
comm, \
|
|
|
|
dst, \
|
|
|
|
tag, \
|
|
|
|
&sendreq->req_send.req_base.req_convertor, \
|
|
|
|
sendmode, \
|
|
|
|
blocking, \
|
2006-12-15 20:54:14 +03:00
|
|
|
&sendreq->req_mtl)); \
|
2006-07-14 23:32:26 +04:00
|
|
|
} while (0)
|
|
|
|
|
2006-08-16 21:32:31 +04:00
|
|
|
#define MCA_PML_CM_HVY_SEND_REQUEST_BSEND_ALLOC(sendreq, ret) \
|
|
|
|
do { \
|
|
|
|
struct iovec iov; \
|
|
|
|
unsigned int iov_count; \
|
|
|
|
size_t max_data; \
|
|
|
|
\
|
|
|
|
if(sendreq->req_count > 0) { \
|
2006-08-17 00:24:28 +04:00
|
|
|
sendreq->req_buff = \
|
2006-08-16 21:32:31 +04:00
|
|
|
mca_pml_base_bsend_request_alloc_buf(sendreq->req_count); \
|
2006-08-17 00:24:28 +04:00
|
|
|
if (NULL == sendreq->req_buff) { \
|
2006-08-16 21:32:31 +04:00
|
|
|
ret = MPI_ERR_BUFFER; \
|
|
|
|
} else { \
|
2006-08-24 20:38:08 +04:00
|
|
|
iov.iov_base = (IOVBASE_TYPE*)sendreq->req_buff; \
|
2006-08-16 21:32:31 +04:00
|
|
|
max_data = iov.iov_len = sendreq->req_count; \
|
|
|
|
iov_count = 1; \
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_pack( &sendreq->req_send.req_base.req_convertor, \
|
2006-08-16 21:32:31 +04:00
|
|
|
&iov, \
|
|
|
|
&iov_count, \
|
2006-10-27 03:11:26 +04:00
|
|
|
&max_data ); \
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_prepare_for_send( &sendreq->req_send.req_base.req_convertor, \
|
|
|
|
&(ompi_mpi_packed.dt.super), \
|
2006-08-17 00:24:28 +04:00
|
|
|
max_data, sendreq->req_buff ); \
|
2006-08-16 21:32:31 +04:00
|
|
|
} \
|
2006-07-14 23:32:26 +04:00
|
|
|
} \
|
|
|
|
} while(0);
|
|
|
|
|
2006-07-04 05:20:20 +04:00
|
|
|
|
2008-05-30 07:58:39 +04:00
|
|
|
#define MCA_PML_CM_HVY_SEND_REQUEST_START(sendreq, ret) \
|
|
|
|
do { \
|
|
|
|
ret = OMPI_SUCCESS; \
|
|
|
|
MCA_PML_CM_SEND_REQUEST_START_SETUP(&(sendreq)->req_send); \
|
|
|
|
if (sendreq->req_send.req_send_mode == MCA_PML_BASE_SEND_BUFFERED) { \
|
|
|
|
MCA_PML_CM_HVY_SEND_REQUEST_BSEND_ALLOC(sendreq, ret); \
|
|
|
|
} \
|
|
|
|
if (OMPI_SUCCESS == ret) { \
|
|
|
|
ret = OMPI_MTL_CALL(isend(ompi_mtl, \
|
|
|
|
sendreq->req_send.req_base.req_comm, \
|
|
|
|
sendreq->req_peer, \
|
|
|
|
sendreq->req_tag, \
|
|
|
|
&sendreq->req_send.req_base.req_convertor, \
|
|
|
|
sendreq->req_send.req_send_mode, \
|
|
|
|
sendreq->req_blocking, \
|
|
|
|
&sendreq->req_mtl)); \
|
|
|
|
if(OMPI_SUCCESS == ret && \
|
|
|
|
sendreq->req_send.req_send_mode == MCA_PML_BASE_SEND_BUFFERED) { \
|
|
|
|
sendreq->req_send.req_base.req_ompi.req_status.MPI_ERROR = 0; \
|
|
|
|
ompi_request_complete(&(sendreq)->req_send.req_base.req_ompi, true); \
|
|
|
|
} \
|
|
|
|
} \
|
2006-07-14 23:32:26 +04:00
|
|
|
} while (0)
|
2006-07-04 05:20:20 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The PML has completed a send request. Note that this request
|
|
|
|
* may have been orphaned by the user or have already completed
|
|
|
|
* at the MPI level.
|
|
|
|
* This macro will never be called directly from the upper level, as it should
|
|
|
|
* only be an internal call to the PML.
|
|
|
|
*/
|
2008-05-30 07:58:39 +04:00
|
|
|
#define MCA_PML_CM_HVY_SEND_REQUEST_PML_COMPLETE(sendreq) \
|
|
|
|
do { \
|
|
|
|
assert( false == sendreq->req_send.req_base.req_pml_complete ); \
|
|
|
|
\
|
|
|
|
if (sendreq->req_send.req_send_mode == MCA_PML_BASE_SEND_BUFFERED && \
|
|
|
|
sendreq->req_count > 0 ) { \
|
|
|
|
mca_pml_base_bsend_request_free(sendreq->req_buff); \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
OPAL_THREAD_LOCK(&ompi_request_lock); \
|
|
|
|
if( false == sendreq->req_send.req_base.req_ompi.req_complete ) { \
|
|
|
|
/* Should only be called for long messages (maybe synchronous) */ \
|
|
|
|
ompi_request_complete(&(sendreq->req_send.req_base.req_ompi), true); \
|
|
|
|
} \
|
|
|
|
sendreq->req_send.req_base.req_pml_complete = true; \
|
|
|
|
\
|
|
|
|
if( sendreq->req_send.req_base.req_free_called ) { \
|
|
|
|
MCA_PML_CM_HVY_SEND_REQUEST_RETURN( sendreq ); \
|
|
|
|
} else { \
|
|
|
|
if(sendreq->req_send.req_base.req_ompi.req_persistent) { \
|
|
|
|
/* rewind convertor */ \
|
|
|
|
size_t offset = 0; \
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_set_position(&sendreq->req_send.req_base.req_convertor, \
|
2008-05-30 07:58:39 +04:00
|
|
|
&offset); \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
OPAL_THREAD_UNLOCK(&ompi_request_lock); \
|
2006-07-14 23:32:26 +04:00
|
|
|
} while (0)
|
2008-05-30 07:58:39 +04:00
|
|
|
|
|
|
|
|
2006-07-14 23:32:26 +04:00
|
|
|
/*
|
|
|
|
* Release resources associated with a request
|
|
|
|
*/
|
|
|
|
#define MCA_PML_CM_HVY_SEND_REQUEST_RETURN(sendreq) \
|
|
|
|
{ \
|
|
|
|
/* Let the base handle the reference counts */ \
|
2006-11-08 07:58:23 +03:00
|
|
|
OBJ_RELEASE(sendreq->req_send.req_base.req_datatype); \
|
|
|
|
OBJ_RELEASE(sendreq->req_send.req_base.req_comm); \
|
2006-07-14 23:32:26 +04:00
|
|
|
OMPI_REQUEST_FINI(&sendreq->req_send.req_base.req_ompi); \
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_cleanup( &(sendreq->req_send.req_base.req_convertor) ); \
|
2007-07-11 02:16:38 +04:00
|
|
|
OMPI_FREE_LIST_RETURN( &mca_pml_base_send_requests, \
|
2006-11-08 07:58:23 +03:00
|
|
|
(ompi_free_list_item_t*)sendreq); \
|
2006-07-14 23:32:26 +04:00
|
|
|
}
|
2006-07-04 05:20:20 +04:00
|
|
|
|
2006-07-14 23:32:26 +04:00
|
|
|
/*
|
|
|
|
* The PML has completed a send request. Note that this request
|
|
|
|
* may have been orphaned by the user or have already completed
|
|
|
|
* at the MPI level.
|
|
|
|
* This macro will never be called directly from the upper level, as it should
|
|
|
|
* only be an internal call to the PML.
|
|
|
|
*/
|
2008-05-30 07:58:39 +04:00
|
|
|
#define MCA_PML_CM_THIN_SEND_REQUEST_PML_COMPLETE(sendreq) \
|
|
|
|
do { \
|
|
|
|
assert( false == sendreq->req_send.req_base.req_pml_complete ); \
|
|
|
|
\
|
|
|
|
OPAL_THREAD_LOCK(&ompi_request_lock); \
|
|
|
|
if( false == sendreq->req_send.req_base.req_ompi.req_complete ) { \
|
|
|
|
/* Should only be called for long messages (maybe synchronous) */ \
|
|
|
|
ompi_request_complete(&(sendreq->req_send.req_base.req_ompi), true); \
|
|
|
|
} \
|
|
|
|
sendreq->req_send.req_base.req_pml_complete = true; \
|
|
|
|
\
|
|
|
|
if( sendreq->req_send.req_base.req_free_called ) { \
|
|
|
|
MCA_PML_CM_THIN_SEND_REQUEST_RETURN( sendreq ); \
|
|
|
|
} \
|
|
|
|
OPAL_THREAD_UNLOCK(&ompi_request_lock); \
|
2006-07-14 23:32:26 +04:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
|
2006-07-04 05:20:20 +04:00
|
|
|
/*
|
|
|
|
* Release resources associated with a request
|
|
|
|
*/
|
2006-07-14 23:32:26 +04:00
|
|
|
#define MCA_PML_CM_THIN_SEND_REQUEST_RETURN(sendreq) \
|
|
|
|
{ \
|
|
|
|
/* Let the base handle the reference counts */ \
|
2006-11-08 07:58:23 +03:00
|
|
|
OBJ_RELEASE(sendreq->req_send.req_base.req_datatype); \
|
|
|
|
OBJ_RELEASE(sendreq->req_send.req_base.req_comm); \
|
2006-07-14 23:32:26 +04:00
|
|
|
OMPI_REQUEST_FINI(&sendreq->req_send.req_base.req_ompi); \
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_cleanup( &(sendreq->req_send.req_base.req_convertor) ); \
|
2007-07-11 02:16:38 +04:00
|
|
|
OMPI_FREE_LIST_RETURN( &mca_pml_base_send_requests, \
|
2006-11-08 07:58:23 +03:00
|
|
|
(ompi_free_list_item_t*)sendreq); \
|
2006-07-14 23:32:26 +04:00
|
|
|
}
|
2006-07-04 05:20:20 +04:00
|
|
|
|
2007-07-11 19:10:01 +04:00
|
|
|
extern void
|
|
|
|
mca_pml_cm_send_request_completion(struct mca_mtl_request_t *mtl_request);
|
|
|
|
|
2006-07-04 05:20:20 +04:00
|
|
|
#endif
|