2006-07-18 02:08:55 +04:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University.
|
|
|
|
* All rights reserved.
|
2006-08-24 20:38:08 +04:00
|
|
|
* Copyright (c) 2004-2006 The Trustees of the University of Tennessee.
|
2006-07-18 02:08:55 +04:00
|
|
|
* All rights reserved.
|
2008-04-16 17:29:55 +04:00
|
|
|
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
|
2006-07-18 02:08:55 +04:00
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
2007-05-24 19:41:24 +04:00
|
|
|
* Copyright (c) 2007 Los Alamos National Security, LLC. All rights
|
|
|
|
* reserved.
|
2011-07-13 20:38:03 +04:00
|
|
|
* Copyright (c) 2009-2011 Oracle and/or its affiliates. All rights reserved.
|
2006-07-18 02:08:55 +04:00
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "ompi_config.h"
|
|
|
|
|
|
|
|
#include "osc_rdma.h"
|
|
|
|
#include "osc_rdma_sendreq.h"
|
|
|
|
#include "osc_rdma_header.h"
|
|
|
|
#include "osc_rdma_data_move.h"
|
|
|
|
#include "osc_rdma_obj_convert.h"
|
|
|
|
|
2008-04-18 00:43:56 +04:00
|
|
|
#include "opal/util/arch.h"
|
2009-02-14 05:26:12 +03:00
|
|
|
#include "opal/util/output.h"
|
2006-07-18 02:08:55 +04:00
|
|
|
#include "opal/sys/atomic.h"
|
2010-08-24 22:10:43 +04:00
|
|
|
#include "opal/align.h"
|
2009-03-04 20:06:51 +03:00
|
|
|
#include "ompi/mca/pml/pml.h"
|
2006-07-18 02:08:55 +04:00
|
|
|
#include "ompi/mca/bml/bml.h"
|
|
|
|
#include "ompi/mca/bml/base/base.h"
|
|
|
|
#include "ompi/mca/btl/btl.h"
|
2006-08-17 18:52:20 +04:00
|
|
|
#include "ompi/mca/osc/base/base.h"
|
2007-07-14 00:46:12 +04:00
|
|
|
#include "ompi/mca/osc/base/osc_base_obj_convert.h"
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
#include "ompi/datatype/ompi_datatype.h"
|
2009-02-24 20:17:33 +03:00
|
|
|
#include "ompi/op/op.h"
|
2008-04-16 17:29:55 +04:00
|
|
|
#include "ompi/memchecker.h"
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
static inline int32_t
|
|
|
|
create_send_tag(ompi_osc_rdma_module_t *module)
|
|
|
|
{
|
2011-03-19 00:36:35 +03:00
|
|
|
#if OPAL_ENABLE_MULTI_THREADS && OPAL_HAVE_ATOMIC_CMPSET_32
|
2006-07-18 02:08:55 +04:00
|
|
|
int32_t newval, oldval;
|
|
|
|
do {
|
2007-05-24 19:41:24 +04:00
|
|
|
oldval = module->m_tag_counter;
|
2006-07-18 02:08:55 +04:00
|
|
|
newval = (oldval + 1) % mca_pml.pml_max_tag;
|
2007-05-24 19:41:24 +04:00
|
|
|
} while (0 == opal_atomic_cmpset_32(&module->m_tag_counter, oldval, newval));
|
2006-07-18 02:08:55 +04:00
|
|
|
return newval;
|
2007-05-24 21:21:56 +04:00
|
|
|
#else
|
2006-07-18 02:08:55 +04:00
|
|
|
int32_t ret;
|
|
|
|
/* no compare and swap - have to lock the module */
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_LOCK(&module->m_lock);
|
|
|
|
module->m_tag_counter = (module->m_tag_counter + 1) % mca_pml.pml_max_tag;
|
|
|
|
ret = module->m_tag_counter;
|
|
|
|
OPAL_THREAD_UNLOCK(&module->m_lock);
|
2006-07-18 02:08:55 +04:00
|
|
|
return ret;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
static inline void
|
|
|
|
inmsg_mark_complete(ompi_osc_rdma_module_t *module)
|
|
|
|
{
|
|
|
|
int32_t count;
|
|
|
|
bool need_unlock = false;
|
|
|
|
|
|
|
|
OPAL_THREAD_LOCK(&module->m_lock);
|
|
|
|
count = (module->m_num_pending_in -= 1);
|
|
|
|
if ((0 != module->m_lock_status) &&
|
|
|
|
(opal_list_get_size(&module->m_unlocks_pending) != 0)) {
|
|
|
|
need_unlock = true;
|
|
|
|
}
|
|
|
|
OPAL_THREAD_UNLOCK(&module->m_lock);
|
|
|
|
|
|
|
|
if (0 == count) {
|
|
|
|
if (need_unlock) ompi_osc_rdma_passive_unlock_complete(module);
|
|
|
|
opal_condition_broadcast(&module->m_cond);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-07-06 01:40:06 +04:00
|
|
|
/**********************************************************************
|
|
|
|
*
|
|
|
|
* Multi-buffer support
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static int
|
|
|
|
send_multi_buffer(ompi_osc_rdma_module_t *module, int rank)
|
|
|
|
{
|
|
|
|
ompi_osc_rdma_base_header_t *header = (ompi_osc_rdma_base_header_t*)
|
|
|
|
((char*) module->m_pending_buffers[rank].descriptor->des_src[0].seg_addr.pval +
|
|
|
|
module->m_pending_buffers[rank].descriptor->des_src[0].seg_len);
|
|
|
|
|
|
|
|
header->hdr_type = OMPI_OSC_RDMA_HDR_MULTI_END;
|
|
|
|
header->hdr_flags = 0;
|
|
|
|
|
|
|
|
module->m_pending_buffers[rank].descriptor->des_src[0].seg_len +=
|
|
|
|
sizeof(ompi_osc_rdma_base_header_t);
|
|
|
|
mca_bml_base_send(module->m_pending_buffers[rank].bml_btl,
|
|
|
|
module->m_pending_buffers[rank].descriptor,
|
|
|
|
MCA_BTL_TAG_OSC_RDMA);
|
|
|
|
|
|
|
|
module->m_pending_buffers[rank].descriptor = NULL;
|
|
|
|
module->m_pending_buffers[rank].bml_btl = NULL;
|
|
|
|
module->m_pending_buffers[rank].remain_len = 0;
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_rdma_flush(ompi_osc_rdma_module_t *module)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0 ; i < ompi_comm_size(module->m_comm) ; ++i) {
|
|
|
|
if (module->m_pending_buffers[i].descriptor != NULL) {
|
|
|
|
send_multi_buffer(module, i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
|
2007-07-06 01:40:06 +04:00
|
|
|
/**********************************************************************
|
|
|
|
*
|
|
|
|
* RDMA data transfers (put / get)
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
2007-07-05 07:32:32 +04:00
|
|
|
static void
|
|
|
|
rdma_cb(struct mca_btl_base_module_t* btl,
|
|
|
|
struct mca_btl_base_endpoint_t* endpoint,
|
|
|
|
struct mca_btl_base_descriptor_t* descriptor,
|
|
|
|
int status)
|
|
|
|
{
|
|
|
|
ompi_osc_rdma_sendreq_t *sendreq =
|
|
|
|
(ompi_osc_rdma_sendreq_t*) descriptor->des_cbdata;
|
|
|
|
int32_t out_count, rdma_count;
|
|
|
|
|
|
|
|
assert(OMPI_SUCCESS == status);
|
|
|
|
|
|
|
|
OPAL_THREAD_LOCK(&sendreq->req_module->m_lock);
|
|
|
|
out_count = (sendreq->req_module->m_num_pending_out -= 1);
|
|
|
|
rdma_count = (sendreq->req_module->m_rdma_num_pending -= 1);
|
|
|
|
OPAL_THREAD_UNLOCK(&sendreq->req_module->m_lock);
|
|
|
|
|
|
|
|
btl->btl_free(btl, descriptor);
|
|
|
|
ompi_osc_rdma_sendreq_free(sendreq);
|
|
|
|
|
|
|
|
if ((0 == out_count) || (0 == rdma_count)) {
|
|
|
|
opal_condition_broadcast(&sendreq->req_module->m_cond);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
ompi_osc_rdma_sendreq_rdma(ompi_osc_rdma_module_t *module,
|
|
|
|
ompi_osc_rdma_sendreq_t *sendreq)
|
|
|
|
{
|
|
|
|
mca_btl_base_descriptor_t* descriptor;
|
|
|
|
ompi_osc_rdma_btl_t *rdma_btl = NULL;
|
2008-10-01 01:02:37 +04:00
|
|
|
mca_btl_base_module_t* btl;
|
|
|
|
size_t size = sendreq->req_origin_bytes_packed;
|
2007-07-05 07:32:32 +04:00
|
|
|
int index, target, ret;
|
|
|
|
|
|
|
|
target = sendreq->req_target_rank;
|
|
|
|
|
|
|
|
if (module->m_peer_info[target].peer_num_btls > 0) {
|
|
|
|
|
|
|
|
index = ++(module->m_peer_info[target].peer_index_btls);
|
|
|
|
if (index >= module->m_peer_info[target].peer_num_btls) {
|
|
|
|
module->m_peer_info[target].peer_index_btls = 0;
|
|
|
|
index = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
rdma_btl = &(module->m_peer_info[target].peer_btls[index]);
|
2008-10-01 01:02:37 +04:00
|
|
|
btl = rdma_btl->bml_btl->btl;
|
2007-07-05 07:32:32 +04:00
|
|
|
|
|
|
|
if (sendreq->req_type == OMPI_OSC_RDMA_PUT) {
|
2007-12-09 17:08:01 +03:00
|
|
|
mca_bml_base_prepare_src(rdma_btl->bml_btl, NULL,
|
|
|
|
&sendreq->req_origin_convertor, rdma_btl->rdma_order,
|
|
|
|
0, &size, 0, &descriptor);
|
2007-07-05 07:32:32 +04:00
|
|
|
|
|
|
|
assert(NULL != descriptor);
|
|
|
|
|
|
|
|
descriptor->des_dst = sendreq->remote_segs;
|
|
|
|
descriptor->des_dst_cnt = 1;
|
|
|
|
descriptor->des_dst[0].seg_addr.lval =
|
|
|
|
module->m_peer_info[target].peer_base +
|
2008-02-07 21:45:35 +03:00
|
|
|
((unsigned long)sendreq->req_target_disp * module->m_win->w_disp_unit);
|
2007-07-05 07:32:32 +04:00
|
|
|
descriptor->des_dst[0].seg_len =
|
|
|
|
sendreq->req_origin_bytes_packed;
|
2011-11-07 00:19:14 +04:00
|
|
|
descriptor->des_dst[0].seg_key.key64[0] =
|
2007-07-05 07:32:32 +04:00
|
|
|
rdma_btl->peer_seg_key;
|
|
|
|
#if 0
|
2008-06-09 18:53:58 +04:00
|
|
|
opal_output(0, "putting to %d: 0x%lx(%d), %d, %d",
|
2007-07-05 07:32:32 +04:00
|
|
|
target, descriptor->des_dst[0].seg_addr.lval,
|
|
|
|
descriptor->des_dst[0].seg_len,
|
|
|
|
rdma_btl->rdma_order,
|
|
|
|
descriptor->order);
|
|
|
|
#endif
|
|
|
|
descriptor->des_cbdata = sendreq;
|
|
|
|
descriptor->des_cbfunc = rdma_cb;
|
|
|
|
|
2008-10-01 01:02:37 +04:00
|
|
|
ret = btl->btl_put(btl, rdma_btl->bml_btl->btl_endpoint,
|
|
|
|
descriptor);
|
2007-07-05 07:32:32 +04:00
|
|
|
} else {
|
2007-12-09 17:08:01 +03:00
|
|
|
mca_bml_base_prepare_dst(rdma_btl->bml_btl,
|
|
|
|
NULL, &sendreq->req_origin_convertor, rdma_btl->rdma_order,
|
|
|
|
0, &size, 0, &descriptor);
|
2007-07-05 07:32:32 +04:00
|
|
|
|
|
|
|
assert(NULL != descriptor);
|
|
|
|
|
|
|
|
descriptor->des_src = sendreq->remote_segs;
|
|
|
|
descriptor->des_src_cnt = 1;
|
|
|
|
descriptor->des_src[0].seg_addr.lval =
|
|
|
|
module->m_peer_info[target].peer_base +
|
2008-02-07 21:45:35 +03:00
|
|
|
((unsigned long)sendreq->req_target_disp * module->m_win->w_disp_unit);
|
2007-07-05 07:32:32 +04:00
|
|
|
descriptor->des_src[0].seg_len =
|
|
|
|
sendreq->req_origin_bytes_packed;
|
2011-11-07 00:19:14 +04:00
|
|
|
descriptor->des_src[0].seg_key.key64[0] =
|
2007-07-05 07:32:32 +04:00
|
|
|
rdma_btl->peer_seg_key;
|
|
|
|
|
|
|
|
descriptor->des_cbdata = sendreq;
|
|
|
|
descriptor->des_cbfunc = rdma_cb;
|
|
|
|
|
2008-10-01 01:02:37 +04:00
|
|
|
ret = btl->btl_get(btl, rdma_btl->bml_btl->btl_endpoint,
|
|
|
|
descriptor);
|
2007-07-05 07:32:32 +04:00
|
|
|
}
|
|
|
|
rdma_btl->rdma_order = descriptor->order;
|
|
|
|
|
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
|
|
|
|
return ret;
|
|
|
|
} else {
|
|
|
|
OPAL_THREAD_LOCK(&module->m_lock);
|
|
|
|
rdma_btl->num_sent++;
|
|
|
|
sendreq->req_module->m_rdma_num_pending += 1;
|
|
|
|
OPAL_THREAD_UNLOCK(&module->m_lock);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return OMPI_ERR_NOT_SUPPORTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
/**********************************************************************
|
|
|
|
*
|
|
|
|
* Sending a sendreq to target
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
2011-07-01 00:05:16 +04:00
|
|
|
static int
|
|
|
|
ompi_osc_rdma_sendreq_send_long_cb(ompi_request_t *request)
|
2006-07-18 02:08:55 +04:00
|
|
|
{
|
2011-07-01 00:05:16 +04:00
|
|
|
ompi_osc_rdma_longreq_t *longreq =
|
|
|
|
(ompi_osc_rdma_longreq_t*) request->req_complete_cb_data;
|
|
|
|
ompi_osc_rdma_sendreq_t *sendreq = longreq->req_basereq.req_sendreq;
|
2007-05-24 19:41:24 +04:00
|
|
|
int32_t count;
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
|
2007-06-22 02:24:40 +04:00
|
|
|
"%d completed long sendreq to %d",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(sendreq->req_module->m_comm),
|
2007-06-22 02:24:40 +04:00
|
|
|
sendreq->req_target_rank));
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_LOCK(&sendreq->req_module->m_lock);
|
|
|
|
count = (sendreq->req_module->m_num_pending_out -= 1);
|
|
|
|
OPAL_THREAD_UNLOCK(&sendreq->req_module->m_lock);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
ompi_osc_rdma_longreq_free(longreq);
|
|
|
|
ompi_osc_rdma_sendreq_free(sendreq);
|
2007-05-24 19:41:24 +04:00
|
|
|
|
|
|
|
if (0 == count) opal_condition_broadcast(&sendreq->req_module->m_cond);
|
2011-07-01 00:05:16 +04:00
|
|
|
|
|
|
|
ompi_request_free(&request);
|
|
|
|
return OMPI_SUCCESS;
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
ompi_osc_rdma_sendreq_send_cb(struct mca_btl_base_module_t* btl,
|
|
|
|
struct mca_btl_base_endpoint_t *endpoint,
|
|
|
|
struct mca_btl_base_descriptor_t* descriptor,
|
|
|
|
int status)
|
|
|
|
{
|
2007-01-05 01:07:37 +03:00
|
|
|
ompi_osc_rdma_send_header_t *header =
|
|
|
|
(ompi_osc_rdma_send_header_t*) descriptor->des_src[0].seg_addr.pval;
|
2007-07-06 01:40:06 +04:00
|
|
|
ompi_osc_rdma_sendreq_t *sendreq = NULL;
|
|
|
|
ompi_osc_rdma_module_t *module = NULL;
|
2007-05-24 19:41:24 +04:00
|
|
|
int32_t count;
|
2007-07-06 01:40:06 +04:00
|
|
|
bool done = false;
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
if (OMPI_SUCCESS != status) {
|
|
|
|
/* requeue and return */
|
|
|
|
/* BWB - FIX ME - figure out where to put this bad boy */
|
|
|
|
abort();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2007-07-06 01:40:06 +04:00
|
|
|
if (header->hdr_base.hdr_type == OMPI_OSC_RDMA_HDR_MULTI_END) {
|
|
|
|
done = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (!done) {
|
|
|
|
sendreq = (ompi_osc_rdma_sendreq_t*) header->hdr_origin_sendreq.pval;
|
|
|
|
module = sendreq->req_module;
|
|
|
|
|
|
|
|
/* have to look at header, and not the sendreq because in the
|
|
|
|
case of get, it's possible that the sendreq has been freed
|
|
|
|
already (if the remote side replies before we get our send
|
|
|
|
completion callback) and already allocated to another
|
|
|
|
request. We don't wait for this completion before exiting
|
|
|
|
a synchronization point in the case of get, as we really
|
|
|
|
don't care when it completes - only when the data
|
|
|
|
arrives. */
|
|
|
|
if (OMPI_OSC_RDMA_HDR_GET != header->hdr_base.hdr_type) {
|
2009-05-07 00:11:28 +04:00
|
|
|
#if !defined(WORDS_BIGENDIAN) && OPAL_ENABLE_HETEROGENEOUS_SUPPORT
|
2007-07-06 01:40:06 +04:00
|
|
|
if (header->hdr_base.hdr_flags & OMPI_OSC_RDMA_HDR_FLAG_NBO) {
|
|
|
|
OMPI_OSC_RDMA_SEND_HDR_NTOH(*header);
|
|
|
|
}
|
2006-07-18 02:08:55 +04:00
|
|
|
#endif
|
2007-07-06 01:40:06 +04:00
|
|
|
/* do we need to post a send? */
|
|
|
|
if (header->hdr_msg_length != 0) {
|
|
|
|
/* sendreq is done. Mark it as so and get out of here */
|
|
|
|
OPAL_THREAD_LOCK(&sendreq->req_module->m_lock);
|
|
|
|
count = sendreq->req_module->m_num_pending_out -= 1;
|
|
|
|
OPAL_THREAD_UNLOCK(&sendreq->req_module->m_lock);
|
|
|
|
ompi_osc_rdma_sendreq_free(sendreq);
|
|
|
|
if (0 == count) {
|
|
|
|
opal_condition_broadcast(&sendreq->req_module->m_cond);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ompi_osc_rdma_longreq_t *longreq;
|
|
|
|
ompi_osc_rdma_longreq_alloc(&longreq);
|
|
|
|
|
2011-07-01 00:05:16 +04:00
|
|
|
longreq->req_basereq.req_sendreq = sendreq;
|
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
|
2007-07-06 01:40:06 +04:00
|
|
|
"%d starting long sendreq to %d (%d)",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(sendreq->req_module->m_comm),
|
2007-07-06 01:40:06 +04:00
|
|
|
sendreq->req_target_rank,
|
|
|
|
header->hdr_origin_tag));
|
2011-07-01 00:05:16 +04:00
|
|
|
|
|
|
|
ompi_osc_rdma_component_isend(sendreq->req_origin_convertor.pBaseBuf,
|
|
|
|
sendreq->req_origin_convertor.count,
|
|
|
|
sendreq->req_origin_datatype,
|
|
|
|
sendreq->req_target_rank,
|
|
|
|
header->hdr_origin_tag,
|
|
|
|
sendreq->req_module->m_comm,
|
|
|
|
&(longreq->request),
|
|
|
|
ompi_osc_rdma_sendreq_send_long_cb,
|
|
|
|
longreq);
|
2007-07-06 01:40:06 +04:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ompi_osc_rdma_sendreq_free(sendreq);
|
|
|
|
}
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-07-06 01:40:06 +04:00
|
|
|
if (0 == (header->hdr_base.hdr_flags & OMPI_OSC_RDMA_HDR_FLAG_MULTI)) {
|
|
|
|
done = true;
|
|
|
|
} else {
|
2010-09-16 22:58:11 +04:00
|
|
|
/* Find starting point for next header. Note that the last part
|
|
|
|
* added in to compute the starting point for the next header is
|
|
|
|
* extra padding that may have been inserted. */
|
2007-07-06 01:40:06 +04:00
|
|
|
header = (ompi_osc_rdma_send_header_t*)
|
|
|
|
(((char*) header) +
|
|
|
|
sizeof(ompi_osc_rdma_send_header_t) +
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
ompi_datatype_pack_description_length(sendreq->req_target_datatype) +
|
2010-09-16 22:58:11 +04:00
|
|
|
header->hdr_msg_length +
|
|
|
|
(header->hdr_base.hdr_flags & OMPI_OSC_RDMA_HDR_FLAG_ALIGN_MASK));
|
2010-08-24 22:10:43 +04:00
|
|
|
|
2007-07-06 01:40:06 +04:00
|
|
|
if (header->hdr_base.hdr_type == OMPI_OSC_RDMA_HDR_MULTI_END) {
|
|
|
|
done = true;
|
|
|
|
}
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* release the descriptor and sendreq */
|
|
|
|
btl->btl_free(btl, descriptor);
|
|
|
|
|
2009-01-14 23:15:15 +03:00
|
|
|
if (opal_list_get_size(&module->m_queued_sendreqs) > 0) {
|
2007-05-24 21:21:56 +04:00
|
|
|
opal_list_item_t *item;
|
2009-01-14 23:15:15 +03:00
|
|
|
int ret, i, len;
|
2006-11-28 00:41:29 +03:00
|
|
|
|
2009-01-14 23:15:15 +03:00
|
|
|
len = opal_list_get_size(&module->m_queued_sendreqs);
|
|
|
|
OPAL_OUTPUT_VERBOSE((40, ompi_osc_base_output,
|
|
|
|
"%d items in restart queue",
|
|
|
|
len));
|
|
|
|
for (i = 0 ; i < len ; ++i) {
|
2007-05-24 21:21:56 +04:00
|
|
|
OPAL_THREAD_LOCK(&module->m_lock);
|
2009-01-14 23:15:15 +03:00
|
|
|
item = opal_list_remove_first(&module->m_queued_sendreqs);
|
2007-05-24 21:21:56 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&module->m_lock);
|
2009-01-14 23:15:15 +03:00
|
|
|
if (NULL == item) break;
|
|
|
|
|
|
|
|
ret = ompi_osc_rdma_sendreq_send(module, (ompi_osc_rdma_sendreq_t*) item);
|
|
|
|
if (OMPI_SUCCESS != ret) {
|
|
|
|
OPAL_THREAD_LOCK(&module->m_lock);
|
|
|
|
opal_list_append(&(module->m_queued_sendreqs), item);
|
|
|
|
OPAL_THREAD_UNLOCK(&module->m_lock);
|
|
|
|
}
|
2006-11-28 00:41:29 +03:00
|
|
|
}
|
2009-01-14 23:15:15 +03:00
|
|
|
|
|
|
|
/* flush so things actually get sent out and resources restored */
|
|
|
|
ompi_osc_rdma_flush(module);
|
2006-11-28 00:41:29 +03:00
|
|
|
}
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* create the initial fragment, pack header, datatype, and payload (if
|
|
|
|
size fits) and send */
|
|
|
|
int
|
|
|
|
ompi_osc_rdma_sendreq_send(ompi_osc_rdma_module_t *module,
|
|
|
|
ompi_osc_rdma_sendreq_t *sendreq)
|
|
|
|
{
|
|
|
|
int ret = OMPI_SUCCESS;
|
|
|
|
mca_bml_base_endpoint_t *endpoint = NULL;
|
|
|
|
mca_bml_base_btl_t *bml_btl = NULL;
|
2008-10-01 01:02:37 +04:00
|
|
|
mca_btl_base_module_t* btl = NULL;
|
2006-07-18 02:08:55 +04:00
|
|
|
mca_btl_base_descriptor_t *descriptor = NULL;
|
|
|
|
ompi_osc_rdma_send_header_t *header = NULL;
|
|
|
|
size_t written_data = 0;
|
2010-08-24 22:10:43 +04:00
|
|
|
size_t offset;
|
2006-07-18 02:08:55 +04:00
|
|
|
size_t needed_len = sizeof(ompi_osc_rdma_send_header_t);
|
|
|
|
const void *packed_ddt;
|
2007-07-06 01:40:06 +04:00
|
|
|
size_t packed_ddt_len, remain;
|
2007-07-05 07:32:32 +04:00
|
|
|
|
|
|
|
if ((module->m_eager_send_active) &&
|
|
|
|
(module->m_use_rdma) &&
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
(ompi_datatype_is_contiguous_memory_layout(sendreq->req_target_datatype,
|
2007-07-05 07:32:32 +04:00
|
|
|
sendreq->req_target_count)) &&
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
(!opal_convertor_need_buffers(&sendreq->req_origin_convertor)) &&
|
2007-07-05 07:32:32 +04:00
|
|
|
(sendreq->req_type != OMPI_OSC_RDMA_ACC)) {
|
|
|
|
ret = ompi_osc_rdma_sendreq_rdma(module, sendreq);
|
|
|
|
if (OPAL_LIKELY(OMPI_SUCCESS == ret)) return ret;
|
|
|
|
}
|
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
/* we always need to send the ddt */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
packed_ddt_len = ompi_datatype_pack_description_length(sendreq->req_target_datatype);
|
2006-07-18 02:08:55 +04:00
|
|
|
needed_len += packed_ddt_len;
|
2006-08-03 04:10:19 +04:00
|
|
|
if (OMPI_OSC_RDMA_GET != sendreq->req_type) {
|
2006-07-18 02:08:55 +04:00
|
|
|
needed_len += sendreq->req_origin_bytes_packed;
|
|
|
|
}
|
|
|
|
|
2009-05-01 02:36:09 +04:00
|
|
|
/* Reuse the buffer if:
|
|
|
|
* - The whole message will fit
|
|
|
|
* - The header and datatype will fit AND the payload would be long anyway
|
|
|
|
* Note that if the datatype is too big for an eager, we'll fall
|
|
|
|
* through and return an error out of the new buffer case */
|
|
|
|
if ((module->m_pending_buffers[sendreq->req_target_rank].remain_len >= needed_len) ||
|
|
|
|
((sizeof(ompi_osc_rdma_send_header_t) + packed_ddt_len <
|
|
|
|
module->m_pending_buffers[sendreq->req_target_rank].remain_len) &&
|
|
|
|
(needed_len > module->m_pending_buffers[sendreq->req_target_rank].bml_btl->btl->btl_eager_limit))) {
|
2007-07-06 01:40:06 +04:00
|
|
|
bml_btl = module->m_pending_buffers[sendreq->req_target_rank].bml_btl;
|
|
|
|
descriptor = module->m_pending_buffers[sendreq->req_target_rank].descriptor;
|
|
|
|
remain = module->m_pending_buffers[sendreq->req_target_rank].remain_len;
|
|
|
|
} else {
|
|
|
|
/* send the existing buffer */
|
|
|
|
if (module->m_pending_buffers[sendreq->req_target_rank].descriptor) {
|
|
|
|
send_multi_buffer(module, sendreq->req_target_rank);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get a buffer... */
|
|
|
|
endpoint = (mca_bml_base_endpoint_t*) sendreq->req_target_proc->proc_bml;
|
|
|
|
bml_btl = mca_bml_base_btl_array_get_next(&endpoint->btl_eager);
|
2008-10-01 01:02:37 +04:00
|
|
|
btl = bml_btl->btl;
|
2007-10-28 19:04:17 +03:00
|
|
|
mca_bml_base_alloc(bml_btl, &descriptor, MCA_BTL_NO_ORDER,
|
2008-10-01 01:02:37 +04:00
|
|
|
module->m_use_buffers ? btl->btl_eager_limit :
|
|
|
|
needed_len < btl->btl_eager_limit ? needed_len :
|
|
|
|
btl->btl_eager_limit, MCA_BTL_DES_FLAGS_PRIORITY | MCA_BTL_DES_SEND_ALWAYS_CALLBACK);
|
2007-07-06 01:40:06 +04:00
|
|
|
if (NULL == descriptor) {
|
|
|
|
ret = OMPI_ERR_TEMP_OUT_OF_RESOURCE;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-07-06 01:40:06 +04:00
|
|
|
/* verify at least enough space for header */
|
2009-05-01 02:36:09 +04:00
|
|
|
if (descriptor->des_src[0].seg_len < sizeof(ompi_osc_rdma_send_header_t) + packed_ddt_len) {
|
|
|
|
ret = MPI_ERR_TRUNCATE;
|
2007-07-06 01:40:06 +04:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-07-06 01:40:06 +04:00
|
|
|
/* setup descriptor */
|
|
|
|
descriptor->des_cbfunc = ompi_osc_rdma_sendreq_send_cb;
|
|
|
|
|
|
|
|
module->m_pending_buffers[sendreq->req_target_rank].bml_btl = bml_btl;
|
|
|
|
module->m_pending_buffers[sendreq->req_target_rank].descriptor = descriptor;
|
|
|
|
module->m_pending_buffers[sendreq->req_target_rank].remain_len = descriptor->des_src[0].seg_len - sizeof(ompi_osc_rdma_base_header_t);
|
|
|
|
remain = module->m_pending_buffers[sendreq->req_target_rank].remain_len;
|
|
|
|
descriptor->des_src[0].seg_len = 0;
|
|
|
|
}
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
/* pack header */
|
2007-07-06 01:40:06 +04:00
|
|
|
header = (ompi_osc_rdma_send_header_t*)
|
|
|
|
((char*) descriptor->des_src[0].seg_addr.pval + descriptor->des_src[0].seg_len);
|
2006-07-18 02:08:55 +04:00
|
|
|
written_data += sizeof(ompi_osc_rdma_send_header_t);
|
|
|
|
header->hdr_base.hdr_flags = 0;
|
2007-07-11 21:16:06 +04:00
|
|
|
header->hdr_windx = ompi_comm_get_cid(sendreq->req_module->m_comm);
|
|
|
|
header->hdr_origin = ompi_comm_rank(sendreq->req_module->m_comm);
|
2007-01-05 01:07:37 +03:00
|
|
|
header->hdr_origin_sendreq.pval = (void*) sendreq;
|
2006-07-18 02:08:55 +04:00
|
|
|
header->hdr_origin_tag = 0;
|
|
|
|
header->hdr_target_disp = sendreq->req_target_disp;
|
|
|
|
header->hdr_target_count = sendreq->req_target_count;
|
|
|
|
|
|
|
|
switch (sendreq->req_type) {
|
2006-08-03 04:10:19 +04:00
|
|
|
case OMPI_OSC_RDMA_PUT:
|
|
|
|
header->hdr_base.hdr_type = OMPI_OSC_RDMA_HDR_PUT;
|
2009-05-07 00:11:28 +04:00
|
|
|
#if OPAL_ENABLE_MEM_DEBUG
|
2006-07-18 02:08:55 +04:00
|
|
|
header->hdr_target_op = 0;
|
|
|
|
#endif
|
|
|
|
break;
|
|
|
|
|
2006-08-03 04:10:19 +04:00
|
|
|
case OMPI_OSC_RDMA_ACC:
|
|
|
|
header->hdr_base.hdr_type = OMPI_OSC_RDMA_HDR_ACC;
|
2006-07-18 02:08:55 +04:00
|
|
|
header->hdr_target_op = sendreq->req_op_id;
|
|
|
|
break;
|
|
|
|
|
2006-08-03 04:10:19 +04:00
|
|
|
case OMPI_OSC_RDMA_GET:
|
|
|
|
header->hdr_base.hdr_type = OMPI_OSC_RDMA_HDR_GET;
|
2009-05-07 00:11:28 +04:00
|
|
|
#if OPAL_ENABLE_MEM_DEBUG
|
2006-07-18 02:08:55 +04:00
|
|
|
header->hdr_target_op = 0;
|
|
|
|
#endif
|
2007-07-06 01:40:06 +04:00
|
|
|
sendreq->req_refcount++;
|
2006-07-18 02:08:55 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set datatype id and / or pack datatype */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
ret = ompi_datatype_get_pack_description(sendreq->req_target_datatype, &packed_ddt);
|
2006-07-18 02:08:55 +04:00
|
|
|
if (OMPI_SUCCESS != ret) goto cleanup;
|
2007-07-06 01:40:06 +04:00
|
|
|
memcpy((unsigned char*) descriptor->des_src[0].seg_addr.pval + descriptor->des_src[0].seg_len + written_data,
|
2006-07-18 02:08:55 +04:00
|
|
|
packed_ddt, packed_ddt_len);
|
|
|
|
written_data += packed_ddt_len;
|
|
|
|
|
2006-08-03 04:10:19 +04:00
|
|
|
if (OMPI_OSC_RDMA_GET != sendreq->req_type) {
|
2006-07-18 02:08:55 +04:00
|
|
|
/* if sending data and it fits, pack payload */
|
2007-07-06 01:40:06 +04:00
|
|
|
if (remain >= written_data + sendreq->req_origin_bytes_packed) {
|
2006-07-18 02:08:55 +04:00
|
|
|
struct iovec iov;
|
|
|
|
uint32_t iov_count = 1;
|
|
|
|
size_t max_data = sendreq->req_origin_bytes_packed;
|
|
|
|
|
|
|
|
iov.iov_len = max_data;
|
2007-07-06 01:40:06 +04:00
|
|
|
iov.iov_base = (IOVBASE_TYPE*)((unsigned char*) descriptor->des_src[0].seg_addr.pval + descriptor->des_src[0].seg_len + written_data);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
ret = opal_convertor_pack(&sendreq->req_origin_convertor, &iov, &iov_count,
|
2006-10-27 03:11:26 +04:00
|
|
|
&max_data );
|
2006-07-18 02:08:55 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
ret = OMPI_ERR_FATAL;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
written_data += max_data;
|
2007-07-06 01:40:06 +04:00
|
|
|
descriptor->des_src[0].seg_len += written_data;
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
header->hdr_msg_length = sendreq->req_origin_bytes_packed;
|
|
|
|
} else {
|
2007-07-06 01:40:06 +04:00
|
|
|
descriptor->des_src[0].seg_len += written_data;
|
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
header->hdr_msg_length = 0;
|
|
|
|
header->hdr_origin_tag = create_send_tag(module);
|
|
|
|
}
|
|
|
|
} else {
|
2007-07-06 01:40:06 +04:00
|
|
|
descriptor->des_src[0].seg_len += written_data;
|
2006-07-18 02:08:55 +04:00
|
|
|
header->hdr_msg_length = 0;
|
|
|
|
}
|
2007-07-06 01:40:06 +04:00
|
|
|
module->m_pending_buffers[sendreq->req_target_rank].remain_len -= written_data;
|
|
|
|
|
|
|
|
if (module->m_use_buffers) {
|
|
|
|
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_MULTI;
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2010-09-01 18:12:08 +04:00
|
|
|
/* When putting multiple messages in a single buffer, the
|
|
|
|
* starting point for the next message needs to be aligned with
|
|
|
|
* pointer addresses. Therefore, the pointer, amount written
|
|
|
|
* and space remaining are adjusted forward so that the
|
|
|
|
* starting position for the next message is aligned properly.
|
2010-09-16 22:58:11 +04:00
|
|
|
* The amount of this alignment is embedded in the hdr_flags
|
|
|
|
* field so the callback completion and receiving side can
|
|
|
|
* also know how much to move the pointer to find the starting
|
|
|
|
* point of the next header. This strict alignment is
|
|
|
|
* required by certain platforms like SPARC. Without it,
|
|
|
|
* bus errors can occur. Keeping things aligned also may
|
|
|
|
* offer some performance improvements on other platforms.
|
|
|
|
*/
|
2011-07-13 20:38:03 +04:00
|
|
|
offset = OPAL_ALIGN_PAD_AMOUNT(descriptor->des_src[0].seg_len, sizeof(uint64_t));
|
2010-09-16 22:58:11 +04:00
|
|
|
if (0 != offset) {
|
|
|
|
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_ALIGN_MASK & offset;
|
|
|
|
descriptor->des_src[0].seg_len += offset;
|
|
|
|
written_data += offset;
|
|
|
|
module->m_pending_buffers[sendreq->req_target_rank].remain_len -= offset;
|
|
|
|
}
|
2010-09-01 18:12:08 +04:00
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
#ifdef WORDS_BIGENDIAN
|
2007-07-06 01:40:06 +04:00
|
|
|
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
|
2009-05-07 00:11:28 +04:00
|
|
|
#elif OPAL_ENABLE_HETEROGENEOUS_SUPPORT
|
2008-04-18 00:43:56 +04:00
|
|
|
if (sendreq->req_target_proc->proc_arch & OPAL_ARCH_ISBIGENDIAN) {
|
2007-07-06 01:40:06 +04:00
|
|
|
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
|
|
|
|
OMPI_OSC_RDMA_SEND_HDR_HTON(*header);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (module->m_pending_buffers[sendreq->req_target_rank].remain_len <
|
|
|
|
sizeof(ompi_osc_rdma_send_header_t) + 128) {
|
|
|
|
/* not enough space left - send now */
|
|
|
|
ret = send_multi_buffer(module, sendreq->req_target_rank);
|
|
|
|
} else {
|
|
|
|
ret = OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
goto done;
|
|
|
|
} else {
|
|
|
|
#ifdef WORDS_BIGENDIAN
|
2006-08-03 04:10:19 +04:00
|
|
|
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
|
2009-05-07 00:11:28 +04:00
|
|
|
#elif OPAL_ENABLE_HETEROGENEOUS_SUPPORT
|
2008-04-18 00:43:56 +04:00
|
|
|
if (sendreq->req_target_proc->proc_arch & OPAL_ARCH_ISBIGENDIAN) {
|
2007-07-06 01:40:06 +04:00
|
|
|
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
|
|
|
|
OMPI_OSC_RDMA_SEND_HDR_HTON(*header);
|
|
|
|
}
|
2006-07-18 02:08:55 +04:00
|
|
|
#endif
|
|
|
|
|
2007-07-06 01:40:06 +04:00
|
|
|
/* send fragment */
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
|
2007-07-06 01:40:06 +04:00
|
|
|
"%d sending sendreq to %d",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(sendreq->req_module->m_comm),
|
2007-07-06 01:40:06 +04:00
|
|
|
sendreq->req_target_rank));
|
|
|
|
|
|
|
|
module->m_pending_buffers[sendreq->req_target_rank].bml_btl = NULL;
|
|
|
|
module->m_pending_buffers[sendreq->req_target_rank].descriptor = NULL;
|
|
|
|
module->m_pending_buffers[sendreq->req_target_rank].remain_len = 0;
|
|
|
|
|
|
|
|
ret = mca_bml_base_send(bml_btl, descriptor, MCA_BTL_TAG_OSC_RDMA);
|
2009-01-02 02:48:29 +03:00
|
|
|
if (1 == ret) ret = OMPI_SUCCESS;
|
2007-07-06 01:40:06 +04:00
|
|
|
goto done;
|
|
|
|
}
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (descriptor != NULL) {
|
|
|
|
mca_bml_base_free(bml_btl, descriptor);
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
*
|
|
|
|
* Sending a replyreq back to origin
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
2011-07-01 00:05:16 +04:00
|
|
|
static int
|
|
|
|
ompi_osc_rdma_replyreq_send_long_cb(ompi_request_t *request)
|
2006-07-18 02:08:55 +04:00
|
|
|
{
|
2011-07-01 00:05:16 +04:00
|
|
|
ompi_osc_rdma_longreq_t *longreq =
|
|
|
|
(ompi_osc_rdma_longreq_t*) request->req_complete_cb_data;
|
|
|
|
ompi_osc_rdma_replyreq_t *replyreq = longreq->req_basereq.req_replyreq;
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
inmsg_mark_complete(replyreq->rep_module);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
ompi_osc_rdma_longreq_free(longreq);
|
|
|
|
ompi_osc_rdma_replyreq_free(replyreq);
|
2011-07-01 00:05:16 +04:00
|
|
|
|
|
|
|
ompi_request_free(&request);
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
ompi_osc_rdma_replyreq_send_cb(struct mca_btl_base_module_t* btl,
|
|
|
|
struct mca_btl_base_endpoint_t *endpoint,
|
|
|
|
struct mca_btl_base_descriptor_t* descriptor,
|
|
|
|
int status)
|
|
|
|
{
|
|
|
|
ompi_osc_rdma_replyreq_t *replyreq =
|
|
|
|
(ompi_osc_rdma_replyreq_t*) descriptor->des_cbdata;
|
2007-01-05 01:07:37 +03:00
|
|
|
ompi_osc_rdma_reply_header_t *header =
|
|
|
|
(ompi_osc_rdma_reply_header_t*) descriptor->des_src[0].seg_addr.pval;
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
if (OMPI_SUCCESS != status) {
|
|
|
|
/* requeue and return */
|
|
|
|
/* BWB - FIX ME - figure out where to put this bad boy */
|
|
|
|
abort();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-05-07 00:11:28 +04:00
|
|
|
#if !defined(WORDS_BIGENDIAN) && OPAL_ENABLE_HETEROGENEOUS_SUPPORT
|
2006-08-03 04:10:19 +04:00
|
|
|
if (header->hdr_base.hdr_flags & OMPI_OSC_RDMA_HDR_FLAG_NBO) {
|
|
|
|
OMPI_OSC_RDMA_REPLY_HDR_NTOH(*header);
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* do we need to post a send? */
|
|
|
|
if (header->hdr_msg_length != 0) {
|
|
|
|
/* sendreq is done. Mark it as so and get out of here */
|
2007-05-24 19:41:24 +04:00
|
|
|
inmsg_mark_complete(replyreq->rep_module);
|
2006-07-18 02:08:55 +04:00
|
|
|
ompi_osc_rdma_replyreq_free(replyreq);
|
|
|
|
} else {
|
|
|
|
ompi_osc_rdma_longreq_t *longreq;
|
|
|
|
ompi_osc_rdma_longreq_alloc(&longreq);
|
2011-07-01 00:05:16 +04:00
|
|
|
longreq->req_basereq.req_replyreq = replyreq;
|
|
|
|
|
|
|
|
ompi_osc_rdma_component_isend(replyreq->rep_target_convertor.pBaseBuf,
|
|
|
|
replyreq->rep_target_convertor.count,
|
|
|
|
replyreq->rep_target_datatype,
|
|
|
|
replyreq->rep_origin_rank,
|
|
|
|
header->hdr_target_tag,
|
|
|
|
replyreq->rep_module->m_comm,
|
|
|
|
&(longreq->request),
|
|
|
|
ompi_osc_rdma_replyreq_send_long_cb,
|
|
|
|
longreq);
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* release the descriptor and replyreq */
|
|
|
|
btl->btl_free(btl, descriptor);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_rdma_replyreq_send(ompi_osc_rdma_module_t *module,
|
|
|
|
ompi_osc_rdma_replyreq_t *replyreq)
|
|
|
|
{
|
|
|
|
int ret = OMPI_SUCCESS;
|
|
|
|
mca_bml_base_endpoint_t *endpoint = NULL;
|
|
|
|
mca_bml_base_btl_t *bml_btl = NULL;
|
|
|
|
mca_btl_base_descriptor_t *descriptor = NULL;
|
|
|
|
ompi_osc_rdma_reply_header_t *header = NULL;
|
|
|
|
size_t written_data = 0;
|
|
|
|
|
|
|
|
/* Get a BTL and a fragment to go with it */
|
|
|
|
endpoint = (mca_bml_base_endpoint_t*) replyreq->rep_origin_proc->proc_bml;
|
|
|
|
bml_btl = mca_bml_base_btl_array_get_next(&endpoint->btl_eager);
|
2007-10-28 19:04:17 +03:00
|
|
|
mca_bml_base_alloc(bml_btl, &descriptor, MCA_BTL_NO_ORDER,
|
2008-10-01 01:02:37 +04:00
|
|
|
bml_btl->btl->btl_eager_limit, MCA_BTL_DES_FLAGS_PRIORITY | MCA_BTL_DES_SEND_ALWAYS_CALLBACK);
|
2006-07-18 02:08:55 +04:00
|
|
|
if (NULL == descriptor) {
|
|
|
|
ret = OMPI_ERR_TEMP_OUT_OF_RESOURCE;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* verify at least enough space for header */
|
|
|
|
if (descriptor->des_src[0].seg_len < sizeof(ompi_osc_rdma_reply_header_t)) {
|
|
|
|
ret = OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* setup descriptor */
|
|
|
|
descriptor->des_cbfunc = ompi_osc_rdma_replyreq_send_cb;
|
|
|
|
descriptor->des_cbdata = (void*) replyreq;
|
|
|
|
|
|
|
|
/* pack header */
|
2007-01-05 01:07:37 +03:00
|
|
|
header = (ompi_osc_rdma_reply_header_t*) descriptor->des_src[0].seg_addr.pval;
|
2006-07-18 02:08:55 +04:00
|
|
|
written_data += sizeof(ompi_osc_rdma_reply_header_t);
|
2006-08-03 04:10:19 +04:00
|
|
|
header->hdr_base.hdr_type = OMPI_OSC_RDMA_HDR_REPLY;
|
2006-07-18 02:08:55 +04:00
|
|
|
header->hdr_base.hdr_flags = 0;
|
|
|
|
header->hdr_origin_sendreq = replyreq->rep_origin_sendreq;
|
|
|
|
header->hdr_target_tag = 0;
|
|
|
|
|
|
|
|
/* if sending data fits, pack payload */
|
|
|
|
if (descriptor->des_src[0].seg_len >=
|
|
|
|
written_data + replyreq->rep_target_bytes_packed) {
|
|
|
|
struct iovec iov;
|
|
|
|
uint32_t iov_count = 1;
|
|
|
|
size_t max_data = replyreq->rep_target_bytes_packed;
|
|
|
|
|
|
|
|
iov.iov_len = max_data;
|
2007-01-05 01:07:37 +03:00
|
|
|
iov.iov_base = (IOVBASE_TYPE*)((unsigned char*) descriptor->des_src[0].seg_addr.pval + written_data);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2008-04-16 17:29:55 +04:00
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_convertor_call(&opal_memchecker_base_mem_defined,
|
|
|
|
&replyreq->rep_target_convertor);
|
|
|
|
);
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
ret = opal_convertor_pack(&replyreq->rep_target_convertor, &iov, &iov_count,
|
2006-10-27 03:11:26 +04:00
|
|
|
&max_data );
|
2008-04-16 17:29:55 +04:00
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_convertor_call(&opal_memchecker_base_mem_noaccess,
|
|
|
|
&replyreq->rep_target_convertor);
|
|
|
|
);
|
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
ret = OMPI_ERR_FATAL;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(max_data == replyreq->rep_target_bytes_packed);
|
|
|
|
written_data += max_data;
|
|
|
|
descriptor->des_src[0].seg_len = written_data;
|
|
|
|
|
|
|
|
header->hdr_msg_length = replyreq->rep_target_bytes_packed;
|
|
|
|
} else {
|
|
|
|
header->hdr_msg_length = 0;
|
|
|
|
header->hdr_target_tag = create_send_tag(module);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef WORDS_BIGENDIAN
|
2006-08-03 04:10:19 +04:00
|
|
|
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
|
2009-05-07 00:11:28 +04:00
|
|
|
#elif OPAL_ENABLE_HETEROGENEOUS_SUPPORT
|
2008-04-18 00:43:56 +04:00
|
|
|
if (replyreq->rep_origin_proc->proc_arch & OPAL_ARCH_ISBIGENDIAN) {
|
2006-08-03 04:10:19 +04:00
|
|
|
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
|
|
|
|
OMPI_OSC_RDMA_REPLY_HDR_HTON(*header);
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* send fragment */
|
2006-08-03 04:10:19 +04:00
|
|
|
ret = mca_bml_base_send(bml_btl, descriptor, MCA_BTL_TAG_OSC_RDMA);
|
2009-01-02 02:48:29 +03:00
|
|
|
if (1 == ret) ret = OMPI_SUCCESS;
|
2006-07-18 02:08:55 +04:00
|
|
|
goto done;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (descriptor != NULL) {
|
|
|
|
mca_bml_base_free(bml_btl, descriptor);
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
*
|
|
|
|
* Receive a put on the target side
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
2011-07-01 00:05:16 +04:00
|
|
|
static int
|
|
|
|
ompi_osc_rdma_sendreq_recv_put_long_cb(ompi_request_t *request)
|
2006-07-18 02:08:55 +04:00
|
|
|
{
|
2011-07-01 00:05:16 +04:00
|
|
|
ompi_osc_rdma_longreq_t *longreq =
|
|
|
|
(ompi_osc_rdma_longreq_t*) request->req_complete_cb_data;
|
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
OBJ_RELEASE(longreq->req_datatype);
|
2007-07-06 01:40:06 +04:00
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
|
2007-07-06 01:40:06 +04:00
|
|
|
"%d finished receiving long put message",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(longreq->req_module->m_comm)));
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
inmsg_mark_complete(longreq->req_module);
|
2011-07-01 00:05:16 +04:00
|
|
|
ompi_osc_rdma_longreq_free(longreq);
|
|
|
|
|
|
|
|
ompi_request_free(&request);
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_rdma_sendreq_recv_put(ompi_osc_rdma_module_t *module,
|
2007-05-24 19:41:24 +04:00
|
|
|
ompi_osc_rdma_send_header_t *header,
|
2007-07-06 01:40:06 +04:00
|
|
|
void **inbuf)
|
2006-07-18 02:08:55 +04:00
|
|
|
{
|
|
|
|
int ret = OMPI_SUCCESS;
|
2007-05-24 19:41:24 +04:00
|
|
|
void *target = (unsigned char*) module->m_win->w_baseptr +
|
2008-02-07 21:45:35 +03:00
|
|
|
((unsigned long)header->hdr_target_disp * module->m_win->w_disp_unit);
|
2007-05-24 19:41:24 +04:00
|
|
|
ompi_proc_t *proc = ompi_comm_peer_lookup( module->m_comm, header->hdr_origin );
|
2006-07-18 02:08:55 +04:00
|
|
|
struct ompi_datatype_t *datatype =
|
2007-07-14 00:46:12 +04:00
|
|
|
ompi_osc_base_datatype_create(proc, inbuf);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-31 00:32:02 +04:00
|
|
|
if (NULL == datatype) {
|
2008-06-09 18:53:58 +04:00
|
|
|
opal_output(ompi_osc_base_output,
|
2007-05-31 00:32:02 +04:00
|
|
|
"Error recreating datatype. Aborting.");
|
|
|
|
ompi_mpi_abort(module->m_comm, 1, false);
|
|
|
|
}
|
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
if (header->hdr_msg_length > 0) {
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_t convertor;
|
2006-07-18 02:08:55 +04:00
|
|
|
struct iovec iov;
|
|
|
|
uint32_t iov_count = 1;
|
|
|
|
size_t max_data;
|
|
|
|
ompi_proc_t *proc;
|
|
|
|
|
|
|
|
/* create convertor */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
OBJ_CONSTRUCT(&convertor, opal_convertor_t);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
/* initialize convertor */
|
2008-04-16 17:29:55 +04:00
|
|
|
proc = ompi_comm_peer_lookup(module->m_comm, header->hdr_origin);
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_copy_and_prepare_for_recv(proc->proc_convertor,
|
|
|
|
&(datatype->super),
|
2006-07-18 02:08:55 +04:00
|
|
|
header->hdr_target_count,
|
|
|
|
target,
|
|
|
|
0,
|
|
|
|
&convertor);
|
2008-04-16 17:29:55 +04:00
|
|
|
iov.iov_len = header->hdr_msg_length;
|
2007-07-06 01:40:06 +04:00
|
|
|
iov.iov_base = (IOVBASE_TYPE*)*inbuf;
|
2008-04-16 17:29:55 +04:00
|
|
|
max_data = iov.iov_len;
|
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_convertor_call(&opal_memchecker_base_mem_defined, &convertor);
|
|
|
|
);
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_unpack(&convertor,
|
2006-07-18 02:08:55 +04:00
|
|
|
&iov,
|
|
|
|
&iov_count,
|
2006-10-27 03:11:26 +04:00
|
|
|
&max_data );
|
2008-04-16 17:29:55 +04:00
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_convertor_call(&opal_memchecker_base_mem_noaccess, &convertor);
|
|
|
|
);
|
2006-07-18 02:08:55 +04:00
|
|
|
OBJ_DESTRUCT(&convertor);
|
|
|
|
OBJ_RELEASE(datatype);
|
2007-05-24 19:41:24 +04:00
|
|
|
inmsg_mark_complete(module);
|
2007-07-06 01:40:06 +04:00
|
|
|
*inbuf = ((char*) *inbuf) + header->hdr_msg_length;
|
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
|
2007-07-06 01:40:06 +04:00
|
|
|
"%d received put message from %d",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(module->m_comm),
|
2007-07-06 01:40:06 +04:00
|
|
|
header->hdr_origin));
|
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
} else {
|
2007-07-06 01:40:06 +04:00
|
|
|
ompi_osc_rdma_longreq_t *longreq;
|
|
|
|
ompi_osc_rdma_longreq_alloc(&longreq);
|
|
|
|
longreq->req_datatype = datatype;
|
|
|
|
longreq->req_module = module;
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2011-07-01 00:05:16 +04:00
|
|
|
ompi_osc_rdma_component_irecv(target,
|
|
|
|
header->hdr_target_count,
|
|
|
|
datatype,
|
|
|
|
header->hdr_origin,
|
|
|
|
header->hdr_origin_tag,
|
|
|
|
module->m_comm,
|
|
|
|
&(longreq->request),
|
|
|
|
ompi_osc_rdma_sendreq_recv_put_long_cb,
|
|
|
|
longreq);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
|
2007-07-06 01:40:06 +04:00
|
|
|
"%d started long recv put message from %d (%d)",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(module->m_comm),
|
2007-07-06 01:40:06 +04:00
|
|
|
header->hdr_origin,
|
|
|
|
header->hdr_origin_tag));
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
*
|
|
|
|
* Receive an accumulate on the target side
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
2007-07-14 00:46:12 +04:00
|
|
|
|
|
|
|
|
2011-07-01 00:05:16 +04:00
|
|
|
static int
|
|
|
|
ompi_osc_rdma_sendreq_recv_accum_long_cb(ompi_request_t *request)
|
2006-07-18 02:08:55 +04:00
|
|
|
{
|
2011-07-01 00:05:16 +04:00
|
|
|
ompi_osc_rdma_longreq_t *longreq =
|
|
|
|
(ompi_osc_rdma_longreq_t*) request->req_complete_cb_data;
|
|
|
|
ompi_osc_rdma_send_header_t *header = longreq->req_basereq.req_sendhdr;
|
2006-07-18 02:08:55 +04:00
|
|
|
void *payload = (void*) (header + 1);
|
|
|
|
int ret;
|
2007-07-14 00:46:12 +04:00
|
|
|
ompi_osc_rdma_module_t *module = longreq->req_module;
|
|
|
|
unsigned char *target_buffer =
|
|
|
|
(unsigned char*) module->m_win->w_baseptr +
|
2008-02-07 21:45:35 +03:00
|
|
|
((unsigned long)header->hdr_target_disp * module->m_win->w_disp_unit);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
/* lock the window for accumulates */
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_LOCK(&longreq->req_module->m_acc_lock);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2009-02-24 20:17:33 +03:00
|
|
|
if (longreq->req_op == &ompi_mpi_op_replace.op) {
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_t convertor;
|
2007-07-14 00:46:12 +04:00
|
|
|
struct iovec iov;
|
|
|
|
uint32_t iov_count = 1;
|
|
|
|
size_t max_data;
|
|
|
|
|
|
|
|
/* create convertor */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
OBJ_CONSTRUCT(&convertor, opal_convertor_t);
|
2007-07-14 00:46:12 +04:00
|
|
|
|
|
|
|
/* initialize convertor */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_copy_and_prepare_for_recv(ompi_proc_local()->proc_convertor,
|
|
|
|
&(longreq->req_datatype->super),
|
2007-07-14 00:46:12 +04:00
|
|
|
header->hdr_target_count,
|
|
|
|
target_buffer,
|
|
|
|
0,
|
|
|
|
&convertor);
|
|
|
|
|
|
|
|
iov.iov_len = header->hdr_msg_length;
|
|
|
|
iov.iov_base = (IOVBASE_TYPE*) payload;
|
|
|
|
max_data = iov.iov_len;
|
2008-04-16 17:29:55 +04:00
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_convertor_call(&opal_memchecker_base_mem_defined,
|
|
|
|
&convertor);
|
|
|
|
);
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_unpack(&convertor,
|
2007-07-14 00:46:12 +04:00
|
|
|
&iov,
|
|
|
|
&iov_count,
|
|
|
|
&max_data);
|
2008-04-16 17:29:55 +04:00
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_convertor_call(&opal_memchecker_base_mem_noaccess,
|
|
|
|
&convertor);
|
|
|
|
);
|
2007-07-14 00:46:12 +04:00
|
|
|
OBJ_DESTRUCT(&convertor);
|
|
|
|
} else {
|
|
|
|
/* copy the data from the temporary buffer into the user window */
|
|
|
|
ret = ompi_osc_base_process_op(target_buffer,
|
|
|
|
payload,
|
|
|
|
header->hdr_msg_length,
|
|
|
|
longreq->req_datatype,
|
|
|
|
header->hdr_target_count,
|
|
|
|
longreq->req_op);
|
|
|
|
}
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
/* unlock the window for accumulates */
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&longreq->req_module->m_acc_lock);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
|
2007-06-22 02:24:40 +04:00
|
|
|
"%d finished receiving long accum message from %d",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(longreq->req_module->m_comm),
|
2007-06-22 02:24:40 +04:00
|
|
|
header->hdr_origin));
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
/* free the temp buffer */
|
2011-07-01 00:05:16 +04:00
|
|
|
free(longreq->req_basereq.req_sendhdr);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
/* Release datatype & op */
|
|
|
|
OBJ_RELEASE(longreq->req_datatype);
|
|
|
|
OBJ_RELEASE(longreq->req_op);
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
inmsg_mark_complete(longreq->req_module);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
ompi_osc_rdma_longreq_free(longreq);
|
2011-07-01 00:05:16 +04:00
|
|
|
|
|
|
|
ompi_request_free(&request);
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_rdma_sendreq_recv_accum(ompi_osc_rdma_module_t *module,
|
|
|
|
ompi_osc_rdma_send_header_t *header,
|
2007-07-06 01:40:06 +04:00
|
|
|
void **payload)
|
2006-07-18 02:08:55 +04:00
|
|
|
{
|
|
|
|
int ret = OMPI_SUCCESS;
|
2007-07-14 00:46:12 +04:00
|
|
|
struct ompi_op_t *op = ompi_osc_base_op_create(header->hdr_target_op);
|
2007-05-24 19:41:24 +04:00
|
|
|
ompi_proc_t *proc = ompi_comm_peer_lookup( module->m_comm, header->hdr_origin );
|
2006-07-18 02:08:55 +04:00
|
|
|
struct ompi_datatype_t *datatype =
|
2007-07-14 00:46:12 +04:00
|
|
|
ompi_osc_base_datatype_create(proc, payload);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-31 00:32:02 +04:00
|
|
|
if (NULL == datatype) {
|
2008-06-09 18:53:58 +04:00
|
|
|
opal_output(ompi_osc_base_output,
|
2007-05-31 00:32:02 +04:00
|
|
|
"Error recreating datatype. Aborting.");
|
|
|
|
ompi_mpi_abort(module->m_comm, 1, false);
|
|
|
|
}
|
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
if (header->hdr_msg_length > 0) {
|
2007-07-14 00:46:12 +04:00
|
|
|
unsigned char *target_buffer;
|
|
|
|
|
|
|
|
target_buffer = (unsigned char*) module->m_win->w_baseptr +
|
2008-02-07 21:45:35 +03:00
|
|
|
((unsigned long)header->hdr_target_disp * module->m_win->w_disp_unit);
|
2007-07-14 00:46:12 +04:00
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
/* lock the window for accumulates */
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_LOCK(&module->m_acc_lock);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2009-02-24 20:17:33 +03:00
|
|
|
if (op == &ompi_mpi_op_replace.op) {
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_t convertor;
|
2007-07-14 00:46:12 +04:00
|
|
|
struct iovec iov;
|
|
|
|
uint32_t iov_count = 1;
|
|
|
|
size_t max_data;
|
|
|
|
|
|
|
|
/* create convertor */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
OBJ_CONSTRUCT(&convertor, opal_convertor_t);
|
2007-07-14 00:46:12 +04:00
|
|
|
|
|
|
|
/* initialize convertor */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_copy_and_prepare_for_recv(proc->proc_convertor,
|
|
|
|
&(datatype->super),
|
2007-07-14 00:46:12 +04:00
|
|
|
header->hdr_target_count,
|
|
|
|
target_buffer,
|
|
|
|
0,
|
|
|
|
&convertor);
|
|
|
|
|
2008-04-16 17:29:55 +04:00
|
|
|
iov.iov_len = header->hdr_msg_length;
|
2007-07-14 00:46:12 +04:00
|
|
|
iov.iov_base = (IOVBASE_TYPE*)*payload;
|
2008-04-16 17:29:55 +04:00
|
|
|
max_data = iov.iov_len;
|
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_convertor_call(&opal_memchecker_base_mem_defined, &convertor);
|
|
|
|
);
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_unpack(&convertor,
|
2007-07-14 00:46:12 +04:00
|
|
|
&iov,
|
|
|
|
&iov_count,
|
|
|
|
&max_data);
|
2008-04-16 17:29:55 +04:00
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_convertor_call(&opal_memchecker_base_mem_noaccess, &convertor);
|
|
|
|
);
|
2007-07-14 00:46:12 +04:00
|
|
|
OBJ_DESTRUCT(&convertor);
|
|
|
|
} else {
|
|
|
|
void *buffer = NULL;
|
|
|
|
|
2009-05-07 00:11:28 +04:00
|
|
|
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
|
2007-07-14 00:46:12 +04:00
|
|
|
if (proc->proc_arch != ompi_proc_local()->proc_arch) {
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_t convertor;
|
2007-07-14 00:46:12 +04:00
|
|
|
struct iovec iov;
|
|
|
|
uint32_t iov_count = 1;
|
|
|
|
size_t max_data;
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
ompi_datatype_t *primitive_datatype = NULL;
|
2007-07-14 00:46:12 +04:00
|
|
|
uint32_t primitive_count;
|
|
|
|
size_t buflen;
|
|
|
|
|
|
|
|
ompi_osc_base_get_primitive_type_info(datatype, &primitive_datatype, &primitive_count);
|
|
|
|
primitive_count *= header->hdr_target_count;
|
|
|
|
|
|
|
|
/* figure out how big a buffer we need */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
ompi_datatype_type_size(primitive_datatype, &buflen);
|
2007-07-14 00:46:12 +04:00
|
|
|
buflen *= primitive_count;
|
|
|
|
|
|
|
|
/* create convertor */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
OBJ_CONSTRUCT(&convertor, opal_convertor_t);
|
2007-07-14 00:46:12 +04:00
|
|
|
|
|
|
|
payload = (void*) malloc(buflen);
|
|
|
|
|
|
|
|
/* initialize convertor */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_copy_and_prepare_for_recv(proc->proc_convertor,
|
|
|
|
&(primitive_datatype->super),
|
2007-07-14 00:46:12 +04:00
|
|
|
primitive_count,
|
|
|
|
buffer,
|
|
|
|
0,
|
|
|
|
&convertor);
|
|
|
|
|
2008-04-16 17:29:55 +04:00
|
|
|
iov.iov_len = header->hdr_msg_length;
|
2007-07-14 00:46:12 +04:00
|
|
|
iov.iov_base = (IOVBASE_TYPE*)*payload;
|
2008-04-16 17:29:55 +04:00
|
|
|
max_data = iov.iov_len;
|
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_convertor_call(&opal_memchecker_base_mem_defined, &convertor);
|
|
|
|
);
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_unpack(&convertor,
|
2007-07-14 00:46:12 +04:00
|
|
|
&iov,
|
|
|
|
&iov_count,
|
|
|
|
&max_data);
|
2008-04-16 17:29:55 +04:00
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_convertor_call(&opal_memchecker_base_mem_noaccess, &convertor);
|
|
|
|
);
|
2007-07-14 00:46:12 +04:00
|
|
|
OBJ_DESTRUCT(&convertor);
|
|
|
|
} else {
|
|
|
|
buffer = *payload;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
buffer = *payload;
|
|
|
|
#endif
|
|
|
|
/* copy the data from the temporary buffer into the user window */
|
|
|
|
ret = ompi_osc_base_process_op(target_buffer,
|
|
|
|
buffer,
|
|
|
|
header->hdr_msg_length,
|
|
|
|
datatype,
|
|
|
|
header->hdr_target_count,
|
|
|
|
op);
|
|
|
|
|
2009-05-07 00:11:28 +04:00
|
|
|
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
|
2007-07-14 00:46:12 +04:00
|
|
|
if (proc->proc_arch != ompi_proc_local()->proc_arch) {
|
|
|
|
if (NULL == buffer) free(buffer);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
/* unlock the window for accumulates */
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&module->m_acc_lock);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
/* Release datatype & op */
|
|
|
|
OBJ_RELEASE(datatype);
|
|
|
|
OBJ_RELEASE(op);
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
inmsg_mark_complete(module);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
|
2007-06-22 02:24:40 +04:00
|
|
|
"%d received accum message from %d",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(module->m_comm),
|
2007-06-22 02:24:40 +04:00
|
|
|
header->hdr_origin));
|
2007-07-06 01:40:06 +04:00
|
|
|
*payload = ((char*) *payload) + header->hdr_msg_length;
|
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
} else {
|
|
|
|
ompi_osc_rdma_longreq_t *longreq;
|
|
|
|
size_t buflen;
|
2007-07-14 00:46:12 +04:00
|
|
|
struct ompi_datatype_t *primitive_datatype = NULL;
|
|
|
|
uint32_t primitive_count;
|
|
|
|
|
|
|
|
/* get underlying type... */
|
|
|
|
ompi_osc_base_get_primitive_type_info(datatype, &primitive_datatype, &primitive_count);
|
|
|
|
primitive_count *= header->hdr_target_count;
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
/* figure out how big a buffer we need */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
ompi_datatype_type_size(primitive_datatype, &buflen);
|
2007-07-14 00:46:12 +04:00
|
|
|
buflen *= primitive_count;
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
/* get a longreq and fill it in */
|
|
|
|
ompi_osc_rdma_longreq_alloc(&longreq);
|
|
|
|
|
|
|
|
longreq->req_datatype = datatype;
|
|
|
|
longreq->req_op = op;
|
|
|
|
longreq->req_module = module;
|
|
|
|
|
|
|
|
/* allocate a buffer to receive into ... */
|
2011-07-06 12:08:53 +04:00
|
|
|
longreq->req_basereq.req_sendhdr = (ompi_osc_rdma_send_header_t *) malloc(buflen + sizeof(ompi_osc_rdma_send_header_t));
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2011-07-01 00:05:16 +04:00
|
|
|
if (NULL == longreq->req_basereq.req_sendhdr) return OMPI_ERR_TEMP_OUT_OF_RESOURCE;
|
2006-07-18 02:08:55 +04:00
|
|
|
/* fill in tmp header */
|
2011-07-01 00:05:16 +04:00
|
|
|
memcpy(longreq->req_basereq.req_sendhdr, header,
|
2006-07-18 02:08:55 +04:00
|
|
|
sizeof(ompi_osc_rdma_send_header_t));
|
2011-07-01 00:05:16 +04:00
|
|
|
longreq->req_basereq.req_sendhdr->hdr_msg_length = buflen;
|
|
|
|
|
|
|
|
ompi_osc_rdma_component_irecv(longreq->req_basereq.req_sendhdr + 1,
|
|
|
|
primitive_count,
|
|
|
|
primitive_datatype,
|
|
|
|
header->hdr_origin,
|
|
|
|
header->hdr_origin_tag,
|
|
|
|
module->m_comm,
|
|
|
|
&(longreq->request),
|
|
|
|
ompi_osc_rdma_sendreq_recv_accum_long_cb,
|
|
|
|
longreq);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2008-06-09 18:53:58 +04:00
|
|
|
OPAL_OUTPUT_VERBOSE((50, ompi_osc_base_output,
|
2007-06-22 02:24:40 +04:00
|
|
|
"%d started long recv accum message from %d (%d)",
|
2007-07-11 21:16:06 +04:00
|
|
|
ompi_comm_rank(module->m_comm),
|
2007-06-22 02:24:40 +04:00
|
|
|
header->hdr_origin,
|
|
|
|
header->hdr_origin_tag));
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
*
|
|
|
|
* Recveive a get on the origin side
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
2011-07-01 00:05:16 +04:00
|
|
|
static int
|
|
|
|
ompi_osc_rdma_replyreq_recv_long_cb(ompi_request_t *request)
|
2006-07-18 02:08:55 +04:00
|
|
|
{
|
2011-07-01 00:05:16 +04:00
|
|
|
ompi_osc_rdma_longreq_t *longreq =
|
|
|
|
(ompi_osc_rdma_longreq_t*) request->req_complete_cb_data;
|
2006-07-18 02:08:55 +04:00
|
|
|
ompi_osc_rdma_sendreq_t *sendreq =
|
2011-07-01 00:05:16 +04:00
|
|
|
(ompi_osc_rdma_sendreq_t*) longreq->req_basereq.req_sendreq;
|
2007-05-24 19:41:24 +04:00
|
|
|
int32_t count;
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
OPAL_THREAD_LOCK(&sendreq->req_module->m_lock);
|
|
|
|
count = (sendreq->req_module->m_num_pending_out -= 1);
|
|
|
|
OPAL_THREAD_UNLOCK(&sendreq->req_module->m_lock);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
ompi_osc_rdma_longreq_free(longreq);
|
|
|
|
ompi_osc_rdma_sendreq_free(sendreq);
|
2007-05-24 19:41:24 +04:00
|
|
|
|
|
|
|
if (0 == count) opal_condition_broadcast(&sendreq->req_module->m_cond);
|
2011-07-01 00:05:16 +04:00
|
|
|
|
|
|
|
ompi_request_free(&request);
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
int
|
|
|
|
ompi_osc_rdma_replyreq_recv(ompi_osc_rdma_module_t *module,
|
|
|
|
ompi_osc_rdma_sendreq_t *sendreq,
|
|
|
|
ompi_osc_rdma_reply_header_t *header,
|
2007-07-06 01:40:06 +04:00
|
|
|
void **payload)
|
2006-07-18 02:08:55 +04:00
|
|
|
{
|
|
|
|
int ret = OMPI_SUCCESS;
|
|
|
|
|
|
|
|
/* receive into user buffer */
|
|
|
|
if (header->hdr_msg_length > 0) {
|
|
|
|
/* short message. woo! */
|
|
|
|
|
|
|
|
struct iovec iov;
|
|
|
|
uint32_t iov_count = 1;
|
|
|
|
size_t max_data;
|
2007-05-24 19:41:24 +04:00
|
|
|
int32_t count;
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2008-04-16 17:29:55 +04:00
|
|
|
iov.iov_len = header->hdr_msg_length;
|
2007-07-06 01:40:06 +04:00
|
|
|
iov.iov_base = (IOVBASE_TYPE*)*payload;
|
2008-04-16 17:29:55 +04:00
|
|
|
max_data = iov.iov_len;
|
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_convertor_call(&opal_memchecker_base_mem_defined,
|
|
|
|
&sendreq->req_origin_convertor);
|
|
|
|
);
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_unpack(&sendreq->req_origin_convertor,
|
2006-07-18 02:08:55 +04:00
|
|
|
&iov,
|
|
|
|
&iov_count,
|
2006-10-27 03:11:26 +04:00
|
|
|
&max_data );
|
2008-04-16 17:29:55 +04:00
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_convertor_call(&opal_memchecker_base_mem_noaccess,
|
|
|
|
&sendreq->req_origin_convertor);
|
|
|
|
);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
count = sendreq->req_module->m_num_pending_out -= 1;
|
2006-07-18 02:08:55 +04:00
|
|
|
ompi_osc_rdma_sendreq_free(sendreq);
|
2007-07-06 01:40:06 +04:00
|
|
|
*payload = ((char*) *payload) + header->hdr_msg_length;
|
|
|
|
|
2007-05-24 19:41:24 +04:00
|
|
|
if (0 == count) opal_condition_broadcast(&sendreq->req_module->m_cond);
|
2006-07-18 02:08:55 +04:00
|
|
|
} else {
|
|
|
|
ompi_osc_rdma_longreq_t *longreq;
|
|
|
|
ompi_osc_rdma_longreq_alloc(&longreq);
|
|
|
|
|
2011-07-01 00:05:16 +04:00
|
|
|
longreq->req_basereq.req_sendreq = sendreq;
|
2006-07-18 02:08:55 +04:00
|
|
|
longreq->req_module = module;
|
|
|
|
|
2011-07-01 00:05:16 +04:00
|
|
|
ret = ompi_osc_rdma_component_irecv(sendreq->req_origin_convertor.pBaseBuf,
|
|
|
|
sendreq->req_origin_convertor.count,
|
|
|
|
sendreq->req_origin_datatype,
|
|
|
|
sendreq->req_target_rank,
|
|
|
|
header->hdr_target_tag,
|
|
|
|
module->m_comm,
|
|
|
|
&(longreq->request),
|
|
|
|
ompi_osc_rdma_replyreq_recv_long_cb,
|
|
|
|
longreq);
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
*
|
|
|
|
* Control message communication
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static void
|
|
|
|
ompi_osc_rdma_control_send_cb(struct mca_btl_base_module_t* btl,
|
|
|
|
struct mca_btl_base_endpoint_t *endpoint,
|
|
|
|
struct mca_btl_base_descriptor_t* descriptor,
|
|
|
|
int status)
|
|
|
|
{
|
2007-07-05 07:32:32 +04:00
|
|
|
ompi_osc_rdma_control_header_t *header = NULL;
|
|
|
|
|
|
|
|
header = (ompi_osc_rdma_control_header_t*) descriptor->des_src[0].seg_addr.pval;
|
|
|
|
|
2006-07-18 02:08:55 +04:00
|
|
|
/* release the descriptor and sendreq */
|
|
|
|
btl->btl_free(btl, descriptor);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_rdma_control_send(ompi_osc_rdma_module_t *module,
|
|
|
|
ompi_proc_t *proc,
|
|
|
|
uint8_t type, int32_t value0, int32_t value1)
|
|
|
|
{
|
|
|
|
int ret = OMPI_SUCCESS;
|
|
|
|
mca_bml_base_endpoint_t *endpoint = NULL;
|
|
|
|
mca_bml_base_btl_t *bml_btl = NULL;
|
|
|
|
mca_btl_base_descriptor_t *descriptor = NULL;
|
|
|
|
ompi_osc_rdma_control_header_t *header = NULL;
|
|
|
|
|
|
|
|
/* Get a BTL and a fragment to go with it */
|
|
|
|
endpoint = (mca_bml_base_endpoint_t*) proc->proc_bml;
|
|
|
|
bml_btl = mca_bml_base_btl_array_get_next(&endpoint->btl_eager);
|
2007-10-28 19:04:17 +03:00
|
|
|
mca_bml_base_alloc(bml_btl, &descriptor, MCA_BTL_NO_ORDER,
|
2007-12-09 17:08:01 +03:00
|
|
|
sizeof(ompi_osc_rdma_control_header_t),
|
2008-05-30 07:58:39 +04:00
|
|
|
MCA_BTL_DES_FLAGS_PRIORITY | MCA_BTL_DES_SEND_ALWAYS_CALLBACK);
|
2006-07-18 02:08:55 +04:00
|
|
|
if (NULL == descriptor) {
|
|
|
|
ret = OMPI_ERR_TEMP_OUT_OF_RESOURCE;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* verify at least enough space for header */
|
|
|
|
if (descriptor->des_src[0].seg_len < sizeof(ompi_osc_rdma_control_header_t)) {
|
|
|
|
ret = OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* setup descriptor */
|
|
|
|
descriptor->des_cbfunc = ompi_osc_rdma_control_send_cb;
|
|
|
|
descriptor->des_cbdata = NULL;
|
|
|
|
descriptor->des_src[0].seg_len = sizeof(ompi_osc_rdma_control_header_t);
|
|
|
|
|
|
|
|
/* pack header */
|
2007-01-05 01:07:37 +03:00
|
|
|
header = (ompi_osc_rdma_control_header_t*) descriptor->des_src[0].seg_addr.pval;
|
2006-07-18 02:08:55 +04:00
|
|
|
header->hdr_base.hdr_type = type;
|
2007-07-05 07:32:32 +04:00
|
|
|
header->hdr_base.hdr_flags = 0;
|
2006-07-18 02:08:55 +04:00
|
|
|
header->hdr_value[0] = value0;
|
|
|
|
header->hdr_value[1] = value1;
|
2007-07-11 21:16:06 +04:00
|
|
|
header->hdr_windx = ompi_comm_get_cid(module->m_comm);
|
2006-07-18 02:08:55 +04:00
|
|
|
|
|
|
|
#ifdef WORDS_BIGENDIAN
|
2006-08-03 04:10:19 +04:00
|
|
|
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
|
2009-05-07 00:11:28 +04:00
|
|
|
#elif OPAL_ENABLE_HETEROGENEOUS_SUPPORT
|
2008-04-18 00:43:56 +04:00
|
|
|
if (proc->proc_arch & OPAL_ARCH_ISBIGENDIAN) {
|
2006-08-03 04:10:19 +04:00
|
|
|
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
|
|
|
|
OMPI_OSC_RDMA_CONTROL_HDR_HTON(*header);
|
2006-07-18 02:08:55 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* send fragment */
|
2006-08-03 04:10:19 +04:00
|
|
|
ret = mca_bml_base_send(bml_btl, descriptor, MCA_BTL_TAG_OSC_RDMA);
|
2009-01-02 02:48:29 +03:00
|
|
|
if (1 == ret) ret = OMPI_SUCCESS;
|
2006-07-18 02:08:55 +04:00
|
|
|
goto done;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (descriptor != NULL) {
|
|
|
|
mca_bml_base_free(bml_btl, descriptor);
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
return ret;
|
|
|
|
}
|
2007-07-05 07:32:32 +04:00
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
ompi_osc_rdma_rdma_ack_send(ompi_osc_rdma_module_t *module,
|
|
|
|
ompi_proc_t *proc,
|
|
|
|
ompi_osc_rdma_btl_t *rdma_btl)
|
|
|
|
{
|
|
|
|
int ret = OMPI_SUCCESS;
|
|
|
|
mca_bml_base_btl_t *bml_btl = rdma_btl->bml_btl;
|
|
|
|
mca_btl_base_descriptor_t *descriptor = NULL;
|
|
|
|
ompi_osc_rdma_control_header_t *header = NULL;
|
|
|
|
|
|
|
|
/* Get a BTL and a fragment to go with it */
|
2007-10-28 19:04:17 +03:00
|
|
|
mca_bml_base_alloc(bml_btl, &descriptor, rdma_btl->rdma_order,
|
2007-12-09 17:08:01 +03:00
|
|
|
sizeof(ompi_osc_rdma_control_header_t),
|
2008-05-30 07:58:39 +04:00
|
|
|
MCA_BTL_DES_FLAGS_PRIORITY | MCA_BTL_DES_SEND_ALWAYS_CALLBACK);
|
2007-07-05 07:32:32 +04:00
|
|
|
if (NULL == descriptor) {
|
|
|
|
ret = OMPI_ERR_TEMP_OUT_OF_RESOURCE;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* verify at least enough space for header */
|
|
|
|
if (descriptor->des_src[0].seg_len < sizeof(ompi_osc_rdma_control_header_t)) {
|
|
|
|
ret = OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* setup descriptor */
|
|
|
|
descriptor->des_cbfunc = ompi_osc_rdma_control_send_cb;
|
|
|
|
descriptor->des_cbdata = NULL;
|
|
|
|
descriptor->des_src[0].seg_len = sizeof(ompi_osc_rdma_control_header_t);
|
|
|
|
|
|
|
|
/* pack header */
|
|
|
|
header = (ompi_osc_rdma_control_header_t*) descriptor->des_src[0].seg_addr.pval;
|
|
|
|
header->hdr_base.hdr_type = OMPI_OSC_RDMA_HDR_RDMA_COMPLETE;
|
|
|
|
header->hdr_base.hdr_flags = 0;
|
|
|
|
header->hdr_value[0] = rdma_btl->num_sent;
|
|
|
|
header->hdr_value[1] = 0;
|
2007-07-11 21:16:06 +04:00
|
|
|
header->hdr_windx = ompi_comm_get_cid(module->m_comm);
|
2007-07-05 07:32:32 +04:00
|
|
|
|
|
|
|
#ifdef WORDS_BIGENDIAN
|
|
|
|
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
|
2009-05-07 00:11:28 +04:00
|
|
|
#elif OPAL_ENABLE_HETEROGENEOUS_SUPPORT
|
2008-04-18 00:43:56 +04:00
|
|
|
if (proc->proc_arch & OPAL_ARCH_ISBIGENDIAN) {
|
2007-07-05 07:32:32 +04:00
|
|
|
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
|
|
|
|
OMPI_OSC_RDMA_CONTROL_HDR_HTON(*header);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
assert(header->hdr_base.hdr_flags == 0);
|
|
|
|
|
|
|
|
/* send fragment */
|
|
|
|
ret = mca_bml_base_send(bml_btl, descriptor, MCA_BTL_TAG_OSC_RDMA);
|
2009-01-02 02:48:29 +03:00
|
|
|
if (1 == ret) ret = OMPI_SUCCESS;
|
2007-07-05 07:32:32 +04:00
|
|
|
goto done;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (descriptor != NULL) {
|
|
|
|
mca_bml_base_free(bml_btl, descriptor);
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
return ret;
|
|
|
|
}
|