1
1
openmpi/ompi/mca/osc/rdma/osc_rdma_replyreq.c

81 строка
2.6 KiB
C
Исходник Обычный вид История

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2010 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "osc_rdma_replyreq.h"
#include "opal/class/opal_list.h"
- Split the datatype engine into two parts: an MPI specific part in OMPI and a language agnostic part in OPAL. The convertor is completely moved into OPAL. This offers several benefits as described in RFC http://www.open-mpi.org/community/lists/devel/2009/07/6387.php namely: - Fewer basic types (int* and float* types, boolean and wchar - Fixing naming scheme to ompi-nomenclature. - Usability outside of the ompi-layer. - Due to the fixed nature of simple opal types, their information is completely known at compile time and therefore constified - With fewer datatypes (22), the actual sizes of bit-field types may be reduced from 64 to 32 bits, allowing reorganizing the opal_datatype structure, eliminating holes and keeping data required in convertor (upon send/recv) in one cacheline... This has implications to the convertor-datastructure and other parts of the code. - Several performance tests have been run, the netpipe latency does not change with this patch on Linux/x86-64 on the smoky cluster. - Extensive tests have been done to verify correctness (no new regressions) using: 1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and ompi-ddt: a. running both trunk and ompi-ddt resulted in no differences (except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run correctly). b. with --enable-memchecker and running under valgrind (one buglet when run with static found in test-suite, commited) 2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt: all passed (except for the dynamic/ tests failed!! as trunk/MTT) 3. compilation and usage of HDF5 tests on Jaguar using PGI and PathScale compilers. 4. compilation and usage on Scicortex. - Please note, that for the heterogeneous case, (-m32 compiled binaries/ompi), neither ompi-trunk, nor ompi-ddt branch would successfully launch. This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
#include "opal/datatype/opal_convertor.h"
int
ompi_osc_rdma_replyreq_alloc_init(ompi_osc_rdma_module_t *module,
int origin,
ompi_ptr_t origin_request,
OPAL_PTRDIFF_TYPE target_displacement,
int target_count,
struct ompi_datatype_t *datatype,
ompi_osc_rdma_replyreq_t **replyreq)
{
int ret;
void *target_addr = (unsigned char*) module->m_win->w_baseptr +
(target_displacement * module->m_win->w_disp_unit);
/* allocate a replyreq */
ret = ompi_osc_rdma_replyreq_alloc(module,
origin,
replyreq);
if (OMPI_SUCCESS != ret) return ret;
/* initialize local side of replyreq */
ret = ompi_osc_rdma_replyreq_init_target(*replyreq,
target_addr,
target_count,
datatype);
if (OMPI_SUCCESS != ret) {
ompi_osc_rdma_replyreq_free(*replyreq);
return ret;
}
/* initialize remote side of replyreq */
ret = ompi_osc_rdma_replyreq_init_origin(*replyreq,
origin_request);
if (OMPI_SUCCESS != ret) {
ompi_osc_rdma_replyreq_free(*replyreq);
return ret;
}
return OMPI_SUCCESS;
}
static void ompi_osc_rdma_replyreq_construct(ompi_osc_rdma_replyreq_t *replyreq)
{
- Split the datatype engine into two parts: an MPI specific part in OMPI and a language agnostic part in OPAL. The convertor is completely moved into OPAL. This offers several benefits as described in RFC http://www.open-mpi.org/community/lists/devel/2009/07/6387.php namely: - Fewer basic types (int* and float* types, boolean and wchar - Fixing naming scheme to ompi-nomenclature. - Usability outside of the ompi-layer. - Due to the fixed nature of simple opal types, their information is completely known at compile time and therefore constified - With fewer datatypes (22), the actual sizes of bit-field types may be reduced from 64 to 32 bits, allowing reorganizing the opal_datatype structure, eliminating holes and keeping data required in convertor (upon send/recv) in one cacheline... This has implications to the convertor-datastructure and other parts of the code. - Several performance tests have been run, the netpipe latency does not change with this patch on Linux/x86-64 on the smoky cluster. - Extensive tests have been done to verify correctness (no new regressions) using: 1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and ompi-ddt: a. running both trunk and ompi-ddt resulted in no differences (except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run correctly). b. with --enable-memchecker and running under valgrind (one buglet when run with static found in test-suite, commited) 2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt: all passed (except for the dynamic/ tests failed!! as trunk/MTT) 3. compilation and usage of HDF5 tests on Jaguar using PGI and PathScale compilers. 4. compilation and usage on Scicortex. - Please note, that for the heterogeneous case, (-m32 compiled binaries/ompi), neither ompi-trunk, nor ompi-ddt branch would successfully launch. This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
OBJ_CONSTRUCT(&(replyreq->rep_target_convertor), opal_convertor_t);
}
static void ompi_osc_rdma_replyreq_destruct(ompi_osc_rdma_replyreq_t *replyreq)
{
OBJ_DESTRUCT(&(replyreq->rep_target_convertor));
}
OBJ_CLASS_INSTANCE(ompi_osc_rdma_replyreq_t, opal_list_item_t,
ompi_osc_rdma_replyreq_construct,
ompi_osc_rdma_replyreq_destruct);