2005-06-15 23:10:26 +04:00
|
|
|
/*
|
2007-03-17 02:11:45 +03:00
|
|
|
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
|
2005-11-05 22:57:48 +03:00
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
2013-07-04 12:34:37 +04:00
|
|
|
* Copyright (c) 2004-2013 The University of Tennessee and The University
|
2005-11-05 22:57:48 +03:00
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
2005-06-15 23:10:26 +04:00
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "ompi_config.h"
|
|
|
|
|
|
|
|
#include <string.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <errno.h>
|
|
|
|
|
2009-03-04 01:25:13 +03:00
|
|
|
#include "opal/class/opal_bitmap.h"
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
#include "opal/datatype/opal_convertor.h"
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "opal/sys/atomic.h"
|
2005-09-13 00:22:59 +04:00
|
|
|
#include "ompi/mca/btl/btl.h"
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "ompi/mca/mpool/base/base.h"
|
2005-06-30 09:50:55 +04:00
|
|
|
#include "btl_self.h"
|
|
|
|
#include "btl_self_frag.h"
|
2005-09-13 00:22:59 +04:00
|
|
|
#include "ompi/proc/proc.h"
|
2005-06-30 09:50:55 +04:00
|
|
|
|
|
|
|
mca_btl_base_module_t mca_btl_self = {
|
|
|
|
&mca_btl_self_component.super,
|
|
|
|
0, /* btl_eager_limit */
|
2007-12-16 11:35:17 +03:00
|
|
|
0, /* btl_rndv_eager_limit */
|
2005-06-30 09:50:55 +04:00
|
|
|
0, /* btl_max_send_size */
|
2007-06-21 11:12:40 +04:00
|
|
|
0, /* btl_rdma_pipeline_send_length */
|
2007-05-17 11:54:27 +04:00
|
|
|
0, /* btl_rdma_pipeline_frag_size */
|
|
|
|
0, /* btl_min_rdma_pipeline_size */
|
2005-06-30 09:50:55 +04:00
|
|
|
0, /* btl_exclusivity */
|
|
|
|
0, /* btl_latency */
|
|
|
|
0, /* btl_bandwidth */
|
|
|
|
0, /* btl flags */
|
2012-06-21 21:09:12 +04:00
|
|
|
0, /* btl segment size */
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_self_add_procs,
|
|
|
|
mca_btl_self_del_procs,
|
2008-01-15 08:32:53 +03:00
|
|
|
NULL,
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_self_finalize,
|
|
|
|
mca_btl_self_alloc,
|
|
|
|
mca_btl_self_free,
|
|
|
|
mca_btl_self_prepare_src,
|
|
|
|
mca_btl_self_prepare_dst,
|
2008-05-30 07:58:39 +04:00
|
|
|
mca_btl_self_send,
|
|
|
|
NULL, /* send immediate */
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_self_rdma, /* put */
|
2006-10-27 03:11:26 +04:00
|
|
|
mca_btl_self_rdma, /* get */
|
2006-08-18 02:02:01 +04:00
|
|
|
mca_btl_base_dump,
|
|
|
|
NULL, /* mpool */
|
2007-03-17 02:11:45 +03:00
|
|
|
NULL, /* register error cb */
|
|
|
|
mca_btl_self_ft_event
|
2005-06-15 23:10:26 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2006-10-27 03:11:26 +04:00
|
|
|
int mca_btl_self_add_procs( struct mca_btl_base_module_t* btl,
|
|
|
|
size_t nprocs,
|
|
|
|
struct ompi_proc_t **procs,
|
|
|
|
struct mca_btl_base_endpoint_t **peers,
|
2009-03-04 01:25:13 +03:00
|
|
|
opal_bitmap_t* reachability )
|
2005-06-15 23:10:26 +04:00
|
|
|
{
|
2006-11-08 20:02:46 +03:00
|
|
|
int i;
|
2006-10-27 03:11:26 +04:00
|
|
|
|
2006-11-08 20:02:46 +03:00
|
|
|
for( i = 0; i < (int)nprocs; i++ ) {
|
2006-10-27 03:11:26 +04:00
|
|
|
if( procs[i] == ompi_proc_local_proc ) {
|
2009-03-04 01:25:13 +03:00
|
|
|
opal_bitmap_set_bit( reachability, i );
|
2006-10-27 03:11:26 +04:00
|
|
|
break; /* there will always be only one ... */
|
2005-06-20 20:38:19 +04:00
|
|
|
}
|
2005-06-15 23:10:26 +04:00
|
|
|
}
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-10-27 03:11:26 +04:00
|
|
|
int mca_btl_self_del_procs( struct mca_btl_base_module_t* btl,
|
|
|
|
size_t nprocs,
|
|
|
|
struct ompi_proc_t **procs,
|
|
|
|
struct mca_btl_base_endpoint_t **peers )
|
2005-06-15 23:10:26 +04:00
|
|
|
{
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2005-06-30 09:50:55 +04:00
|
|
|
* MCA->BTL Clean up any resources held by BTL module
|
2005-06-15 23:10:26 +04:00
|
|
|
* before the module is unloaded.
|
|
|
|
*
|
2005-06-30 09:50:55 +04:00
|
|
|
* @param btl (IN) BTL module.
|
2005-06-15 23:10:26 +04:00
|
|
|
*
|
2005-06-30 09:50:55 +04:00
|
|
|
* Prior to unloading a BTL module, the MCA framework will call
|
|
|
|
* the BTL finalize method of the module. Any resources held by
|
|
|
|
* the BTL should be released and if required the memory corresponding
|
|
|
|
* to the BTL module freed.
|
2005-06-15 23:10:26 +04:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2005-06-30 09:50:55 +04:00
|
|
|
int mca_btl_self_finalize(struct mca_btl_base_module_t* btl)
|
2005-06-15 23:10:26 +04:00
|
|
|
{
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Allocate a segment.
|
|
|
|
*
|
2005-06-30 09:50:55 +04:00
|
|
|
* @param btl (IN) BTL module
|
2005-06-15 23:10:26 +04:00
|
|
|
* @param size (IN) Request segment size.
|
|
|
|
*/
|
2007-12-09 17:00:42 +03:00
|
|
|
mca_btl_base_descriptor_t* mca_btl_self_alloc(
|
|
|
|
struct mca_btl_base_module_t* btl,
|
|
|
|
struct mca_btl_base_endpoint_t* endpoint,
|
|
|
|
uint8_t order,
|
2007-12-09 17:08:01 +03:00
|
|
|
size_t size,
|
|
|
|
uint32_t flags)
|
2005-06-15 23:10:26 +04:00
|
|
|
{
|
2008-12-18 01:15:27 +03:00
|
|
|
mca_btl_self_frag_t* frag = NULL;
|
2013-07-01 15:40:42 +04:00
|
|
|
|
2005-06-30 09:50:55 +04:00
|
|
|
if(size <= mca_btl_self.btl_eager_limit) {
|
2013-07-04 12:34:37 +04:00
|
|
|
MCA_BTL_SELF_FRAG_ALLOC_EAGER(frag);
|
2006-02-22 20:37:59 +03:00
|
|
|
} else if (size <= btl->btl_max_send_size) {
|
2013-07-04 12:34:37 +04:00
|
|
|
MCA_BTL_SELF_FRAG_ALLOC_SEND(frag);
|
2008-12-18 01:15:27 +03:00
|
|
|
}
|
|
|
|
if( OPAL_UNLIKELY(NULL == frag) ) {
|
2006-02-22 20:37:59 +03:00
|
|
|
return NULL;
|
2005-06-15 23:10:26 +04:00
|
|
|
}
|
2006-02-22 20:37:59 +03:00
|
|
|
|
2008-12-18 01:15:27 +03:00
|
|
|
frag->segment.seg_len = size;
|
2008-02-18 20:39:30 +03:00
|
|
|
frag->base.des_flags = flags;
|
2006-10-27 03:11:26 +04:00
|
|
|
frag->base.des_src = &(frag->segment);
|
|
|
|
frag->base.des_src_cnt = 1;
|
2005-06-30 09:50:55 +04:00
|
|
|
return (mca_btl_base_descriptor_t*)frag;
|
2005-06-15 23:10:26 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2005-06-30 09:50:55 +04:00
|
|
|
* Return a segment allocated by this BTL.
|
2005-06-15 23:10:26 +04:00
|
|
|
*
|
2005-06-30 09:50:55 +04:00
|
|
|
* @param btl (IN) BTL module
|
2005-06-15 23:10:26 +04:00
|
|
|
* @param segment (IN) Allocated segment.
|
|
|
|
*/
|
2006-10-27 03:11:26 +04:00
|
|
|
int mca_btl_self_free( struct mca_btl_base_module_t* btl,
|
|
|
|
mca_btl_base_descriptor_t* des )
|
2005-06-15 23:10:26 +04:00
|
|
|
{
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_self_frag_t* frag = (mca_btl_self_frag_t*)des;
|
2006-10-27 03:11:26 +04:00
|
|
|
|
|
|
|
frag->base.des_src = NULL;
|
|
|
|
frag->base.des_src_cnt = 0;
|
|
|
|
frag->base.des_dst = NULL;
|
|
|
|
frag->base.des_dst_cnt = 0;
|
|
|
|
|
2006-10-25 12:45:29 +04:00
|
|
|
if(frag->size == mca_btl_self.btl_eager_limit) {
|
2005-06-30 09:50:55 +04:00
|
|
|
MCA_BTL_SELF_FRAG_RETURN_EAGER(frag);
|
2006-10-25 12:45:29 +04:00
|
|
|
} else if (frag->size == mca_btl_self.btl_max_send_size) {
|
2005-06-30 09:50:55 +04:00
|
|
|
MCA_BTL_SELF_FRAG_RETURN_SEND(frag);
|
2005-06-15 23:10:26 +04:00
|
|
|
} else {
|
2005-06-30 09:50:55 +04:00
|
|
|
MCA_BTL_SELF_FRAG_RETURN_RDMA(frag);
|
2005-06-15 23:10:26 +04:00
|
|
|
}
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Prepare data for send/put
|
|
|
|
*
|
2005-06-30 09:50:55 +04:00
|
|
|
* @param btl (IN) BTL module
|
2005-06-15 23:10:26 +04:00
|
|
|
*/
|
2008-04-02 10:37:42 +04:00
|
|
|
struct mca_btl_base_descriptor_t*
|
|
|
|
mca_btl_self_prepare_src( struct mca_btl_base_module_t* btl,
|
|
|
|
struct mca_btl_base_endpoint_t* endpoint,
|
|
|
|
mca_mpool_base_registration_t* registration,
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
struct opal_convertor_t* convertor,
|
2008-04-02 10:37:42 +04:00
|
|
|
uint8_t order,
|
|
|
|
size_t reserve,
|
|
|
|
size_t* size,
|
|
|
|
uint32_t flags )
|
2005-06-15 23:10:26 +04:00
|
|
|
{
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_self_frag_t* frag;
|
2005-06-15 23:10:26 +04:00
|
|
|
struct iovec iov;
|
|
|
|
uint32_t iov_count = 1;
|
|
|
|
size_t max_data = *size;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* non-contigous data */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
if( opal_convertor_need_buffers(convertor) ||
|
2006-10-27 03:11:26 +04:00
|
|
|
max_data < mca_btl_self.btl_max_send_size ||
|
|
|
|
reserve != 0 ) {
|
|
|
|
|
2013-07-04 12:34:37 +04:00
|
|
|
MCA_BTL_SELF_FRAG_ALLOC_SEND(frag);
|
2008-12-18 01:15:27 +03:00
|
|
|
if(OPAL_UNLIKELY(NULL == frag)) {
|
2005-06-15 23:10:26 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(reserve + max_data > frag->size) {
|
|
|
|
max_data = frag->size - reserve;
|
|
|
|
}
|
|
|
|
iov.iov_len = max_data;
|
2006-08-24 20:38:08 +04:00
|
|
|
iov.iov_base = (IOVBASE_TYPE*)((unsigned char*)(frag+1) + reserve);
|
2005-06-15 23:10:26 +04:00
|
|
|
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
rc = opal_convertor_pack(convertor, &iov, &iov_count, &max_data );
|
2005-06-15 23:10:26 +04:00
|
|
|
if(rc < 0) {
|
2005-06-30 09:50:55 +04:00
|
|
|
MCA_BTL_SELF_FRAG_RETURN_SEND(frag);
|
2005-06-15 23:10:26 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
2007-01-05 01:07:37 +03:00
|
|
|
frag->segment.seg_addr.pval = frag+1;
|
2005-06-15 23:10:26 +04:00
|
|
|
frag->segment.seg_len = reserve + max_data;
|
|
|
|
*size = max_data;
|
|
|
|
} else {
|
2013-07-04 12:34:37 +04:00
|
|
|
MCA_BTL_SELF_FRAG_ALLOC_RDMA(frag);
|
2008-12-18 01:15:27 +03:00
|
|
|
if(OPAL_UNLIKELY(NULL == frag)) {
|
2005-06-15 23:10:26 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
iov.iov_len = max_data;
|
|
|
|
iov.iov_base = NULL;
|
|
|
|
|
|
|
|
/* convertor should return offset into users buffer */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
rc = opal_convertor_pack(convertor, &iov, &iov_count, &max_data );
|
2005-06-15 23:10:26 +04:00
|
|
|
if(rc < 0) {
|
2005-06-30 09:50:55 +04:00
|
|
|
MCA_BTL_SELF_FRAG_RETURN_RDMA(frag);
|
2005-06-15 23:10:26 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
2012-07-14 01:19:16 +04:00
|
|
|
frag->segment.seg_addr.lval = (uint64_t)(uintptr_t) iov.iov_base;
|
2005-12-06 02:36:33 +03:00
|
|
|
frag->segment.seg_len = max_data;
|
2005-06-15 23:10:26 +04:00
|
|
|
*size = max_data;
|
|
|
|
}
|
2008-02-18 20:39:30 +03:00
|
|
|
frag->base.des_flags = flags;
|
2006-10-27 03:11:26 +04:00
|
|
|
frag->base.des_src = &frag->segment;
|
|
|
|
frag->base.des_src_cnt = 1;
|
2012-06-21 21:09:12 +04:00
|
|
|
|
2005-06-15 23:10:26 +04:00
|
|
|
return &frag->base;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Prepare data for receive.
|
|
|
|
*/
|
2008-04-02 10:37:42 +04:00
|
|
|
struct mca_btl_base_descriptor_t*
|
|
|
|
mca_btl_self_prepare_dst( struct mca_btl_base_module_t* btl,
|
|
|
|
struct mca_btl_base_endpoint_t* endpoint,
|
|
|
|
mca_mpool_base_registration_t* registration,
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
struct opal_convertor_t* convertor,
|
2008-04-02 10:37:42 +04:00
|
|
|
uint8_t order,
|
|
|
|
size_t reserve,
|
|
|
|
size_t* size,
|
|
|
|
uint32_t flags )
|
2005-06-15 23:10:26 +04:00
|
|
|
{
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_self_frag_t* frag;
|
2005-06-15 23:10:26 +04:00
|
|
|
size_t max_data = *size;
|
2012-07-14 01:19:16 +04:00
|
|
|
void *ptr;
|
2005-06-15 23:10:26 +04:00
|
|
|
|
2013-07-04 12:34:37 +04:00
|
|
|
MCA_BTL_SELF_FRAG_ALLOC_RDMA(frag);
|
2008-12-18 01:15:27 +03:00
|
|
|
if(OPAL_UNLIKELY(NULL == frag)) {
|
2005-06-15 23:10:26 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* setup descriptor to point directly to user buffer */
|
2012-07-14 01:19:16 +04:00
|
|
|
opal_convertor_get_current_pointer( convertor, &ptr );
|
|
|
|
frag->segment.seg_addr.lval = (uint64_t)(uintptr_t) ptr;
|
|
|
|
|
2005-06-15 23:10:26 +04:00
|
|
|
frag->segment.seg_len = reserve + max_data;
|
2005-06-20 20:38:19 +04:00
|
|
|
frag->base.des_dst = &frag->segment;
|
|
|
|
frag->base.des_dst_cnt = 1;
|
2008-02-18 20:39:30 +03:00
|
|
|
frag->base.des_flags = flags;
|
2005-06-15 23:10:26 +04:00
|
|
|
return &frag->base;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Initiate a send to the peer.
|
|
|
|
*
|
2005-06-30 09:50:55 +04:00
|
|
|
* @param btl (IN) BTL module
|
|
|
|
* @param peer (IN) BTL peer addressing
|
2005-06-15 23:10:26 +04:00
|
|
|
*/
|
|
|
|
|
2006-10-27 03:11:26 +04:00
|
|
|
int mca_btl_self_send( struct mca_btl_base_module_t* btl,
|
|
|
|
struct mca_btl_base_endpoint_t* endpoint,
|
|
|
|
struct mca_btl_base_descriptor_t* des,
|
|
|
|
mca_btl_base_tag_t tag )
|
2005-06-15 23:10:26 +04:00
|
|
|
{
|
2008-01-15 08:32:53 +03:00
|
|
|
mca_btl_active_message_callback_t* reg;
|
2008-02-18 20:39:30 +03:00
|
|
|
int btl_ownership = (des->des_flags & MCA_BTL_DES_FLAGS_BTL_OWNERSHIP);
|
2008-01-15 08:32:53 +03:00
|
|
|
|
2006-06-20 18:11:09 +04:00
|
|
|
/**
|
|
|
|
* We have to set the dst before the call to the function and reset them
|
|
|
|
* after.
|
|
|
|
*/
|
|
|
|
des->des_dst = des->des_src;
|
|
|
|
des->des_dst_cnt = des->des_src_cnt;
|
2005-06-20 20:38:19 +04:00
|
|
|
/* upcall */
|
2008-01-15 08:32:53 +03:00
|
|
|
reg = mca_btl_base_active_message_trigger + tag;
|
|
|
|
reg->cbfunc( btl, tag, des, reg->cbdata );
|
2006-06-20 18:11:09 +04:00
|
|
|
des->des_dst = NULL;
|
|
|
|
des->des_dst_cnt = 0;
|
2005-06-20 20:38:19 +04:00
|
|
|
/* send completion */
|
2008-05-30 07:58:39 +04:00
|
|
|
if( des->des_flags & MCA_BTL_DES_SEND_ALWAYS_CALLBACK ) {
|
|
|
|
des->des_cbfunc( btl, endpoint, des, OMPI_SUCCESS );
|
|
|
|
}
|
2008-02-18 20:39:30 +03:00
|
|
|
if( btl_ownership ) {
|
|
|
|
mca_btl_self_free( btl, des );
|
|
|
|
}
|
2008-05-30 07:58:39 +04:00
|
|
|
return 1;
|
2005-06-20 20:38:19 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Initiate a put to the peer.
|
|
|
|
*
|
2005-06-30 09:50:55 +04:00
|
|
|
* @param btl (IN) BTL module
|
|
|
|
* @param peer (IN) BTL peer addressing
|
2005-06-20 20:38:19 +04:00
|
|
|
*/
|
|
|
|
|
2006-10-27 03:11:26 +04:00
|
|
|
int mca_btl_self_rdma( struct mca_btl_base_module_t* btl,
|
|
|
|
struct mca_btl_base_endpoint_t* endpoint,
|
|
|
|
struct mca_btl_base_descriptor_t* des )
|
2005-06-20 20:38:19 +04:00
|
|
|
{
|
2005-06-30 09:50:55 +04:00
|
|
|
mca_btl_base_segment_t* src = des->des_src;
|
|
|
|
mca_btl_base_segment_t* dst = des->des_dst;
|
2005-06-20 20:38:19 +04:00
|
|
|
size_t src_cnt = des->des_src_cnt;
|
|
|
|
size_t dst_cnt = des->des_dst_cnt;
|
2012-07-14 01:19:16 +04:00
|
|
|
unsigned char* src_addr = (unsigned char *)(uintptr_t) src->seg_addr.lval;
|
2005-06-20 20:38:19 +04:00
|
|
|
size_t src_len = src->seg_len;
|
2012-07-14 01:19:16 +04:00
|
|
|
unsigned char* dst_addr = (unsigned char *)(uintptr_t) dst->seg_addr.lval;
|
2005-06-20 20:38:19 +04:00
|
|
|
size_t dst_len = dst->seg_len;
|
2008-02-18 20:39:30 +03:00
|
|
|
int btl_ownership = (des->des_flags & MCA_BTL_DES_FLAGS_BTL_OWNERSHIP);
|
2005-06-20 20:38:19 +04:00
|
|
|
|
|
|
|
while(src_len && dst_len) {
|
|
|
|
|
|
|
|
if(src_len == dst_len) {
|
|
|
|
memcpy(dst_addr, src_addr, src_len);
|
|
|
|
|
|
|
|
/* advance src */
|
|
|
|
if(--src_cnt != 0) {
|
|
|
|
src++;
|
2007-01-05 01:07:37 +03:00
|
|
|
src_addr = (unsigned char*)src->seg_addr.pval;
|
2005-06-20 20:38:19 +04:00
|
|
|
src_len = src->seg_len;
|
|
|
|
} else {
|
|
|
|
src_len = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* advance dst */
|
|
|
|
if(--dst_cnt != 0) {
|
|
|
|
dst++;
|
2007-01-05 01:07:37 +03:00
|
|
|
dst_addr = (unsigned char*)dst->seg_addr.pval;
|
2005-06-20 20:38:19 +04:00
|
|
|
dst_len = dst->seg_len;
|
|
|
|
} else {
|
|
|
|
dst_len = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
size_t bytes = src_len < dst_len ? src_len : dst_len;
|
|
|
|
memcpy(dst_addr, src_addr, bytes);
|
|
|
|
|
|
|
|
/* advance src */
|
|
|
|
src_len -= bytes;
|
|
|
|
if(src_len == 0) {
|
|
|
|
if(--src_cnt != 0) {
|
|
|
|
src++;
|
2007-01-05 01:07:37 +03:00
|
|
|
src_addr = (unsigned char*)src->seg_addr.pval;
|
2005-06-20 20:38:19 +04:00
|
|
|
src_len = src->seg_len;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
src_addr += bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* advance dst */
|
|
|
|
dst_len -= bytes;
|
|
|
|
if(dst_len == 0) {
|
|
|
|
if(--dst_cnt != 0) {
|
|
|
|
dst++;
|
2007-01-05 01:07:37 +03:00
|
|
|
dst_addr = (unsigned char*)src->seg_addr.pval;
|
2005-06-20 20:38:19 +04:00
|
|
|
dst_len = src->seg_len;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
dst_addr += bytes;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* rdma completion */
|
2008-02-18 20:39:30 +03:00
|
|
|
des->des_cbfunc( btl, endpoint, des, OMPI_SUCCESS );
|
|
|
|
if( btl_ownership ) {
|
|
|
|
mca_btl_self_free( btl, des );
|
|
|
|
}
|
2005-06-15 23:10:26 +04:00
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
2007-03-17 02:11:45 +03:00
|
|
|
|
|
|
|
int mca_btl_self_ft_event(int state) {
|
|
|
|
if(OPAL_CRS_CHECKPOINT == state) {
|
|
|
|
;
|
|
|
|
}
|
|
|
|
else if(OPAL_CRS_CONTINUE == state) {
|
|
|
|
;
|
|
|
|
}
|
|
|
|
else if(OPAL_CRS_RESTART == state) {
|
|
|
|
;
|
|
|
|
}
|
|
|
|
else if(OPAL_CRS_TERM == state ) {
|
|
|
|
;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|