1
1
openmpi/ompi/mca/osc/rdma/osc_rdma_data_move.c

1409 строки
51 KiB
C
Исходник Обычный вид История

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2006 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "osc_rdma.h"
#include "osc_rdma_sendreq.h"
#include "osc_rdma_header.h"
#include "osc_rdma_data_move.h"
#include "osc_rdma_obj_convert.h"
This commit represents a bunch of work on a Mercurial side branch. As such, the commit message back to the master SVN repository is fairly long. = ORTE Job-Level Output Messages = Add two new interfaces that should be used for all new code throughout the ORTE and OMPI layers (we already make the search-and-replace on the existing ORTE / OMPI layers): * orte_output(): (and corresponding friends ORTE_OUTPUT, orte_output_verbose, etc.) This function sends the output directly to the HNP for processing as part of a job-specific output channel. It supports all the same outputs as opal_output() (syslog, file, stdout, stderr), but for stdout/stderr, the output is sent to the HNP for processing and output. More on this below. * orte_show_help(): This function is a drop-in-replacement for opal_show_help(), with two differences in functionality: 1. the rendered text help message output is sent to the HNP for display (rather than outputting directly into the process' stderr stream) 1. the HNP detects duplicate help messages and does not display them (so that you don't see the same error message N times, once from each of your N MPI processes); instead, it counts "new" instances of the help message and displays a message every ~5 seconds when there are new ones ("I got X new copies of the help message...") opal_show_help and opal_output still exist, but they only output in the current process. The intent for the new orte_* functions is that they can apply job-level intelligence to the output. As such, we recommend that all new ORTE and OMPI code use the new orte_* functions, not thei opal_* functions. === New code === For ORTE and OMPI programmers, here's what you need to do differently in new code: * Do not include opal/util/show_help.h or opal/util/output.h. Instead, include orte/util/output.h (this one header file has declarations for both the orte_output() series of functions and orte_show_help()). * Effectively s/opal_output/orte_output/gi throughout your code. Note that orte_output_open() takes a slightly different argument list (as a way to pass data to the filtering stream -- see below), so you if explicitly call opal_output_open(), you'll need to slightly adapt to the new signature of orte_output_open(). * Literally s/opal_show_help/orte_show_help/. The function signature is identical. === Notes === * orte_output'ing to stream 0 will do similar to what opal_output'ing did, so leaving a hard-coded "0" as the first argument is safe. * For systems that do not use ORTE's RML or the HNP, the effect of orte_output_* and orte_show_help will be identical to their opal counterparts (the additional information passed to orte_output_open() will be lost!). Indeed, the orte_* functions simply become trivial wrappers to their opal_* counterparts. Note that we have not tested this; the code is simple but it is quite possible that we mucked something up. = Filter Framework = Messages sent view the new orte_* functions described above and messages output via the IOF on the HNP will now optionally be passed through a new "filter" framework before being output to stdout/stderr. The "filter" OPAL MCA framework is intended to allow preprocessing to messages before they are sent to their final destinations. The first component that was written in the filter framework was to create an XML stream, segregating all the messages into different XML tags, etc. This will allow 3rd party tools to read the stdout/stderr from the HNP and be able to know exactly what each text message is (e.g., a help message, another OMPI infrastructure message, stdout from the user process, stderr from the user process, etc.). Filtering is not active by default. Filter components must be specifically requested, such as: {{{ $ mpirun --mca filter xml ... }}} There can only be one filter component active. = New MCA Parameters = The new functionality described above introduces two new MCA parameters: * '''orte_base_help_aggregate''': Defaults to 1 (true), meaning that help messages will be aggregated, as described above. If set to 0, all help messages will be displayed, even if they are duplicates (i.e., the original behavior). * '''orte_base_show_output_recursions''': An MCA parameter to help debug one of the known issues, described below. It is likely that this MCA parameter will disappear before v1.3 final. = Known Issues = * The XML filter component is not complete. The current output from this component is preliminary and not real XML. A bit more work needs to be done to configure.m4 search for an appropriate XML library/link it in/use it at run time. * There are possible recursion loops in the orte_output() and orte_show_help() functions -- e.g., if RML send calls orte_output() or orte_show_help(). We have some ideas how to fix these, but figured that it was ok to commit before feature freeze with known issues. The code currently contains sub-optimal workarounds so that this will not be a problem, but it would be good to actually solve the problem rather than have hackish workarounds before v1.3 final. This commit was SVN r18434.
2008-05-14 00:00:55 +04:00
#include "orte/util/output.h"
#include "opal/util/arch.h"
#include "opal/sys/atomic.h"
#include "ompi/mca/bml/bml.h"
#include "ompi/mca/bml/base/base.h"
#include "ompi/mca/btl/btl.h"
#include "ompi/mca/osc/base/base.h"
#include "ompi/mca/osc/base/osc_base_obj_convert.h"
#include "ompi/datatype/datatype.h"
#include "ompi/memchecker.h"
static inline int32_t
create_send_tag(ompi_osc_rdma_module_t *module)
{
#if OMPI_HAVE_THREAD_SUPPORT && OPAL_HAVE_ATOMIC_CMPSET_32
int32_t newval, oldval;
do {
oldval = module->m_tag_counter;
newval = (oldval + 1) % mca_pml.pml_max_tag;
} while (0 == opal_atomic_cmpset_32(&module->m_tag_counter, oldval, newval));
return newval;
#else
int32_t ret;
/* no compare and swap - have to lock the module */
OPAL_THREAD_LOCK(&module->m_lock);
module->m_tag_counter = (module->m_tag_counter + 1) % mca_pml.pml_max_tag;
ret = module->m_tag_counter;
OPAL_THREAD_UNLOCK(&module->m_lock);
return ret;
#endif
}
static inline void
inmsg_mark_complete(ompi_osc_rdma_module_t *module)
{
int32_t count;
bool need_unlock = false;
OPAL_THREAD_LOCK(&module->m_lock);
count = (module->m_num_pending_in -= 1);
if ((0 != module->m_lock_status) &&
(opal_list_get_size(&module->m_unlocks_pending) != 0)) {
need_unlock = true;
}
OPAL_THREAD_UNLOCK(&module->m_lock);
if (0 == count) {
if (need_unlock) ompi_osc_rdma_passive_unlock_complete(module);
opal_condition_broadcast(&module->m_cond);
}
}
/**********************************************************************
*
* Multi-buffer support
*
**********************************************************************/
static int
send_multi_buffer(ompi_osc_rdma_module_t *module, int rank)
{
ompi_osc_rdma_base_header_t *header = (ompi_osc_rdma_base_header_t*)
((char*) module->m_pending_buffers[rank].descriptor->des_src[0].seg_addr.pval +
module->m_pending_buffers[rank].descriptor->des_src[0].seg_len);
header->hdr_type = OMPI_OSC_RDMA_HDR_MULTI_END;
header->hdr_flags = 0;
module->m_pending_buffers[rank].descriptor->des_src[0].seg_len +=
sizeof(ompi_osc_rdma_base_header_t);
mca_bml_base_send(module->m_pending_buffers[rank].bml_btl,
module->m_pending_buffers[rank].descriptor,
MCA_BTL_TAG_OSC_RDMA);
module->m_pending_buffers[rank].descriptor = NULL;
module->m_pending_buffers[rank].bml_btl = NULL;
module->m_pending_buffers[rank].remain_len = 0;
return OMPI_SUCCESS;
}
int
ompi_osc_rdma_flush(ompi_osc_rdma_module_t *module)
{
int i;
for (i = 0 ; i < ompi_comm_size(module->m_comm) ; ++i) {
if (module->m_pending_buffers[i].descriptor != NULL) {
send_multi_buffer(module, i);
}
}
return OMPI_SUCCESS;
}
/**********************************************************************
*
* RDMA data transfers (put / get)
*
**********************************************************************/
static void
rdma_cb(struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t* endpoint,
struct mca_btl_base_descriptor_t* descriptor,
int status)
{
ompi_osc_rdma_sendreq_t *sendreq =
(ompi_osc_rdma_sendreq_t*) descriptor->des_cbdata;
int32_t out_count, rdma_count;
assert(OMPI_SUCCESS == status);
OPAL_THREAD_LOCK(&sendreq->req_module->m_lock);
out_count = (sendreq->req_module->m_num_pending_out -= 1);
rdma_count = (sendreq->req_module->m_rdma_num_pending -= 1);
OPAL_THREAD_UNLOCK(&sendreq->req_module->m_lock);
btl->btl_free(btl, descriptor);
ompi_osc_rdma_sendreq_free(sendreq);
if ((0 == out_count) || (0 == rdma_count)) {
opal_condition_broadcast(&sendreq->req_module->m_cond);
}
}
static int
ompi_osc_rdma_sendreq_rdma(ompi_osc_rdma_module_t *module,
ompi_osc_rdma_sendreq_t *sendreq)
{
mca_btl_base_descriptor_t* descriptor;
size_t size = sendreq->req_origin_bytes_packed;
ompi_osc_rdma_btl_t *rdma_btl = NULL;
int index, target, ret;
target = sendreq->req_target_rank;
if (module->m_peer_info[target].peer_num_btls > 0) {
index = ++(module->m_peer_info[target].peer_index_btls);
if (index >= module->m_peer_info[target].peer_num_btls) {
module->m_peer_info[target].peer_index_btls = 0;
index = 0;
}
rdma_btl = &(module->m_peer_info[target].peer_btls[index]);
if (sendreq->req_type == OMPI_OSC_RDMA_PUT) {
mca_bml_base_prepare_src(rdma_btl->bml_btl, NULL,
&sendreq->req_origin_convertor, rdma_btl->rdma_order,
0, &size, 0, &descriptor);
assert(NULL != descriptor);
descriptor->des_dst = sendreq->remote_segs;
descriptor->des_dst_cnt = 1;
descriptor->des_dst[0].seg_addr.lval =
module->m_peer_info[target].peer_base +
((unsigned long)sendreq->req_target_disp * module->m_win->w_disp_unit);
descriptor->des_dst[0].seg_len =
sendreq->req_origin_bytes_packed;
descriptor->des_dst[0].seg_key.key64 =
rdma_btl->peer_seg_key;
#if 0
This commit represents a bunch of work on a Mercurial side branch. As such, the commit message back to the master SVN repository is fairly long. = ORTE Job-Level Output Messages = Add two new interfaces that should be used for all new code throughout the ORTE and OMPI layers (we already make the search-and-replace on the existing ORTE / OMPI layers): * orte_output(): (and corresponding friends ORTE_OUTPUT, orte_output_verbose, etc.) This function sends the output directly to the HNP for processing as part of a job-specific output channel. It supports all the same outputs as opal_output() (syslog, file, stdout, stderr), but for stdout/stderr, the output is sent to the HNP for processing and output. More on this below. * orte_show_help(): This function is a drop-in-replacement for opal_show_help(), with two differences in functionality: 1. the rendered text help message output is sent to the HNP for display (rather than outputting directly into the process' stderr stream) 1. the HNP detects duplicate help messages and does not display them (so that you don't see the same error message N times, once from each of your N MPI processes); instead, it counts "new" instances of the help message and displays a message every ~5 seconds when there are new ones ("I got X new copies of the help message...") opal_show_help and opal_output still exist, but they only output in the current process. The intent for the new orte_* functions is that they can apply job-level intelligence to the output. As such, we recommend that all new ORTE and OMPI code use the new orte_* functions, not thei opal_* functions. === New code === For ORTE and OMPI programmers, here's what you need to do differently in new code: * Do not include opal/util/show_help.h or opal/util/output.h. Instead, include orte/util/output.h (this one header file has declarations for both the orte_output() series of functions and orte_show_help()). * Effectively s/opal_output/orte_output/gi throughout your code. Note that orte_output_open() takes a slightly different argument list (as a way to pass data to the filtering stream -- see below), so you if explicitly call opal_output_open(), you'll need to slightly adapt to the new signature of orte_output_open(). * Literally s/opal_show_help/orte_show_help/. The function signature is identical. === Notes === * orte_output'ing to stream 0 will do similar to what opal_output'ing did, so leaving a hard-coded "0" as the first argument is safe. * For systems that do not use ORTE's RML or the HNP, the effect of orte_output_* and orte_show_help will be identical to their opal counterparts (the additional information passed to orte_output_open() will be lost!). Indeed, the orte_* functions simply become trivial wrappers to their opal_* counterparts. Note that we have not tested this; the code is simple but it is quite possible that we mucked something up. = Filter Framework = Messages sent view the new orte_* functions described above and messages output via the IOF on the HNP will now optionally be passed through a new "filter" framework before being output to stdout/stderr. The "filter" OPAL MCA framework is intended to allow preprocessing to messages before they are sent to their final destinations. The first component that was written in the filter framework was to create an XML stream, segregating all the messages into different XML tags, etc. This will allow 3rd party tools to read the stdout/stderr from the HNP and be able to know exactly what each text message is (e.g., a help message, another OMPI infrastructure message, stdout from the user process, stderr from the user process, etc.). Filtering is not active by default. Filter components must be specifically requested, such as: {{{ $ mpirun --mca filter xml ... }}} There can only be one filter component active. = New MCA Parameters = The new functionality described above introduces two new MCA parameters: * '''orte_base_help_aggregate''': Defaults to 1 (true), meaning that help messages will be aggregated, as described above. If set to 0, all help messages will be displayed, even if they are duplicates (i.e., the original behavior). * '''orte_base_show_output_recursions''': An MCA parameter to help debug one of the known issues, described below. It is likely that this MCA parameter will disappear before v1.3 final. = Known Issues = * The XML filter component is not complete. The current output from this component is preliminary and not real XML. A bit more work needs to be done to configure.m4 search for an appropriate XML library/link it in/use it at run time. * There are possible recursion loops in the orte_output() and orte_show_help() functions -- e.g., if RML send calls orte_output() or orte_show_help(). We have some ideas how to fix these, but figured that it was ok to commit before feature freeze with known issues. The code currently contains sub-optimal workarounds so that this will not be a problem, but it would be good to actually solve the problem rather than have hackish workarounds before v1.3 final. This commit was SVN r18434.
2008-05-14 00:00:55 +04:00
orte_output(0, "putting to %d: 0x%lx(%d), %d, %d",
target, descriptor->des_dst[0].seg_addr.lval,
descriptor->des_dst[0].seg_len,
rdma_btl->rdma_order,
descriptor->order);
#endif
descriptor->des_cbdata = sendreq;
descriptor->des_cbfunc = rdma_cb;
ret = rdma_btl->bml_btl->
btl_put(rdma_btl->bml_btl->btl,
rdma_btl->bml_btl->btl_endpoint,
descriptor);
} else {
mca_bml_base_prepare_dst(rdma_btl->bml_btl,
NULL, &sendreq->req_origin_convertor, rdma_btl->rdma_order,
0, &size, 0, &descriptor);
assert(NULL != descriptor);
descriptor->des_src = sendreq->remote_segs;
descriptor->des_src_cnt = 1;
descriptor->des_src[0].seg_addr.lval =
module->m_peer_info[target].peer_base +
((unsigned long)sendreq->req_target_disp * module->m_win->w_disp_unit);
descriptor->des_src[0].seg_len =
sendreq->req_origin_bytes_packed;
descriptor->des_src[0].seg_key.key64 =
rdma_btl->peer_seg_key;
descriptor->des_cbdata = sendreq;
descriptor->des_cbfunc = rdma_cb;
ret = rdma_btl->bml_btl->
btl_get(rdma_btl->bml_btl->btl,
rdma_btl->bml_btl->btl_endpoint,
descriptor);
}
rdma_btl->rdma_order = descriptor->order;
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
return ret;
} else {
OPAL_THREAD_LOCK(&module->m_lock);
rdma_btl->num_sent++;
sendreq->req_module->m_rdma_num_pending += 1;
OPAL_THREAD_UNLOCK(&module->m_lock);
}
} else {
return OMPI_ERR_NOT_SUPPORTED;
}
return OMPI_SUCCESS;
}
/**********************************************************************
*
* Sending a sendreq to target
*
**********************************************************************/
static void
ompi_osc_rdma_sendreq_send_long_cb(ompi_osc_rdma_longreq_t *longreq)
{
ompi_osc_rdma_sendreq_t *sendreq =
(ompi_osc_rdma_sendreq_t*) longreq->cbdata;
int32_t count;
This commit represents a bunch of work on a Mercurial side branch. As such, the commit message back to the master SVN repository is fairly long. = ORTE Job-Level Output Messages = Add two new interfaces that should be used for all new code throughout the ORTE and OMPI layers (we already make the search-and-replace on the existing ORTE / OMPI layers): * orte_output(): (and corresponding friends ORTE_OUTPUT, orte_output_verbose, etc.) This function sends the output directly to the HNP for processing as part of a job-specific output channel. It supports all the same outputs as opal_output() (syslog, file, stdout, stderr), but for stdout/stderr, the output is sent to the HNP for processing and output. More on this below. * orte_show_help(): This function is a drop-in-replacement for opal_show_help(), with two differences in functionality: 1. the rendered text help message output is sent to the HNP for display (rather than outputting directly into the process' stderr stream) 1. the HNP detects duplicate help messages and does not display them (so that you don't see the same error message N times, once from each of your N MPI processes); instead, it counts "new" instances of the help message and displays a message every ~5 seconds when there are new ones ("I got X new copies of the help message...") opal_show_help and opal_output still exist, but they only output in the current process. The intent for the new orte_* functions is that they can apply job-level intelligence to the output. As such, we recommend that all new ORTE and OMPI code use the new orte_* functions, not thei opal_* functions. === New code === For ORTE and OMPI programmers, here's what you need to do differently in new code: * Do not include opal/util/show_help.h or opal/util/output.h. Instead, include orte/util/output.h (this one header file has declarations for both the orte_output() series of functions and orte_show_help()). * Effectively s/opal_output/orte_output/gi throughout your code. Note that orte_output_open() takes a slightly different argument list (as a way to pass data to the filtering stream -- see below), so you if explicitly call opal_output_open(), you'll need to slightly adapt to the new signature of orte_output_open(). * Literally s/opal_show_help/orte_show_help/. The function signature is identical. === Notes === * orte_output'ing to stream 0 will do similar to what opal_output'ing did, so leaving a hard-coded "0" as the first argument is safe. * For systems that do not use ORTE's RML or the HNP, the effect of orte_output_* and orte_show_help will be identical to their opal counterparts (the additional information passed to orte_output_open() will be lost!). Indeed, the orte_* functions simply become trivial wrappers to their opal_* counterparts. Note that we have not tested this; the code is simple but it is quite possible that we mucked something up. = Filter Framework = Messages sent view the new orte_* functions described above and messages output via the IOF on the HNP will now optionally be passed through a new "filter" framework before being output to stdout/stderr. The "filter" OPAL MCA framework is intended to allow preprocessing to messages before they are sent to their final destinations. The first component that was written in the filter framework was to create an XML stream, segregating all the messages into different XML tags, etc. This will allow 3rd party tools to read the stdout/stderr from the HNP and be able to know exactly what each text message is (e.g., a help message, another OMPI infrastructure message, stdout from the user process, stderr from the user process, etc.). Filtering is not active by default. Filter components must be specifically requested, such as: {{{ $ mpirun --mca filter xml ... }}} There can only be one filter component active. = New MCA Parameters = The new functionality described above introduces two new MCA parameters: * '''orte_base_help_aggregate''': Defaults to 1 (true), meaning that help messages will be aggregated, as described above. If set to 0, all help messages will be displayed, even if they are duplicates (i.e., the original behavior). * '''orte_base_show_output_recursions''': An MCA parameter to help debug one of the known issues, described below. It is likely that this MCA parameter will disappear before v1.3 final. = Known Issues = * The XML filter component is not complete. The current output from this component is preliminary and not real XML. A bit more work needs to be done to configure.m4 search for an appropriate XML library/link it in/use it at run time. * There are possible recursion loops in the orte_output() and orte_show_help() functions -- e.g., if RML send calls orte_output() or orte_show_help(). We have some ideas how to fix these, but figured that it was ok to commit before feature freeze with known issues. The code currently contains sub-optimal workarounds so that this will not be a problem, but it would be good to actually solve the problem rather than have hackish workarounds before v1.3 final. This commit was SVN r18434.
2008-05-14 00:00:55 +04:00
ORTE_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d completed long sendreq to %d",
ompi_comm_rank(sendreq->req_module->m_comm),
sendreq->req_target_rank));
OPAL_THREAD_LOCK(&sendreq->req_module->m_lock);
count = (sendreq->req_module->m_num_pending_out -= 1);
OPAL_THREAD_UNLOCK(&sendreq->req_module->m_lock);
ompi_osc_rdma_longreq_free(longreq);
ompi_osc_rdma_sendreq_free(sendreq);
if (0 == count) opal_condition_broadcast(&sendreq->req_module->m_cond);
}
static void
ompi_osc_rdma_sendreq_send_cb(struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t *endpoint,
struct mca_btl_base_descriptor_t* descriptor,
int status)
{
ompi_osc_rdma_send_header_t *header =
(ompi_osc_rdma_send_header_t*) descriptor->des_src[0].seg_addr.pval;
ompi_osc_rdma_sendreq_t *sendreq = NULL;
ompi_osc_rdma_module_t *module = NULL;
int32_t count;
bool done = false;
if (OMPI_SUCCESS != status) {
/* requeue and return */
/* BWB - FIX ME - figure out where to put this bad boy */
abort();
return;
}
if (header->hdr_base.hdr_type == OMPI_OSC_RDMA_HDR_MULTI_END) {
done = true;
}
while (!done) {
sendreq = (ompi_osc_rdma_sendreq_t*) header->hdr_origin_sendreq.pval;
module = sendreq->req_module;
/* have to look at header, and not the sendreq because in the
case of get, it's possible that the sendreq has been freed
already (if the remote side replies before we get our send
completion callback) and already allocated to another
request. We don't wait for this completion before exiting
a synchronization point in the case of get, as we really
don't care when it completes - only when the data
arrives. */
if (OMPI_OSC_RDMA_HDR_GET != header->hdr_base.hdr_type) {
#if !defined(WORDS_BIGENDIAN) && OMPI_ENABLE_HETEROGENEOUS_SUPPORT
if (header->hdr_base.hdr_flags & OMPI_OSC_RDMA_HDR_FLAG_NBO) {
OMPI_OSC_RDMA_SEND_HDR_NTOH(*header);
}
#endif
/* do we need to post a send? */
if (header->hdr_msg_length != 0) {
/* sendreq is done. Mark it as so and get out of here */
OPAL_THREAD_LOCK(&sendreq->req_module->m_lock);
count = sendreq->req_module->m_num_pending_out -= 1;
OPAL_THREAD_UNLOCK(&sendreq->req_module->m_lock);
ompi_osc_rdma_sendreq_free(sendreq);
if (0 == count) {
opal_condition_broadcast(&sendreq->req_module->m_cond);
}
} else {
ompi_osc_rdma_longreq_t *longreq;
ompi_osc_rdma_longreq_alloc(&longreq);
longreq->cbfunc = ompi_osc_rdma_sendreq_send_long_cb;
longreq->cbdata = sendreq;
This commit represents a bunch of work on a Mercurial side branch. As such, the commit message back to the master SVN repository is fairly long. = ORTE Job-Level Output Messages = Add two new interfaces that should be used for all new code throughout the ORTE and OMPI layers (we already make the search-and-replace on the existing ORTE / OMPI layers): * orte_output(): (and corresponding friends ORTE_OUTPUT, orte_output_verbose, etc.) This function sends the output directly to the HNP for processing as part of a job-specific output channel. It supports all the same outputs as opal_output() (syslog, file, stdout, stderr), but for stdout/stderr, the output is sent to the HNP for processing and output. More on this below. * orte_show_help(): This function is a drop-in-replacement for opal_show_help(), with two differences in functionality: 1. the rendered text help message output is sent to the HNP for display (rather than outputting directly into the process' stderr stream) 1. the HNP detects duplicate help messages and does not display them (so that you don't see the same error message N times, once from each of your N MPI processes); instead, it counts "new" instances of the help message and displays a message every ~5 seconds when there are new ones ("I got X new copies of the help message...") opal_show_help and opal_output still exist, but they only output in the current process. The intent for the new orte_* functions is that they can apply job-level intelligence to the output. As such, we recommend that all new ORTE and OMPI code use the new orte_* functions, not thei opal_* functions. === New code === For ORTE and OMPI programmers, here's what you need to do differently in new code: * Do not include opal/util/show_help.h or opal/util/output.h. Instead, include orte/util/output.h (this one header file has declarations for both the orte_output() series of functions and orte_show_help()). * Effectively s/opal_output/orte_output/gi throughout your code. Note that orte_output_open() takes a slightly different argument list (as a way to pass data to the filtering stream -- see below), so you if explicitly call opal_output_open(), you'll need to slightly adapt to the new signature of orte_output_open(). * Literally s/opal_show_help/orte_show_help/. The function signature is identical. === Notes === * orte_output'ing to stream 0 will do similar to what opal_output'ing did, so leaving a hard-coded "0" as the first argument is safe. * For systems that do not use ORTE's RML or the HNP, the effect of orte_output_* and orte_show_help will be identical to their opal counterparts (the additional information passed to orte_output_open() will be lost!). Indeed, the orte_* functions simply become trivial wrappers to their opal_* counterparts. Note that we have not tested this; the code is simple but it is quite possible that we mucked something up. = Filter Framework = Messages sent view the new orte_* functions described above and messages output via the IOF on the HNP will now optionally be passed through a new "filter" framework before being output to stdout/stderr. The "filter" OPAL MCA framework is intended to allow preprocessing to messages before they are sent to their final destinations. The first component that was written in the filter framework was to create an XML stream, segregating all the messages into different XML tags, etc. This will allow 3rd party tools to read the stdout/stderr from the HNP and be able to know exactly what each text message is (e.g., a help message, another OMPI infrastructure message, stdout from the user process, stderr from the user process, etc.). Filtering is not active by default. Filter components must be specifically requested, such as: {{{ $ mpirun --mca filter xml ... }}} There can only be one filter component active. = New MCA Parameters = The new functionality described above introduces two new MCA parameters: * '''orte_base_help_aggregate''': Defaults to 1 (true), meaning that help messages will be aggregated, as described above. If set to 0, all help messages will be displayed, even if they are duplicates (i.e., the original behavior). * '''orte_base_show_output_recursions''': An MCA parameter to help debug one of the known issues, described below. It is likely that this MCA parameter will disappear before v1.3 final. = Known Issues = * The XML filter component is not complete. The current output from this component is preliminary and not real XML. A bit more work needs to be done to configure.m4 search for an appropriate XML library/link it in/use it at run time. * There are possible recursion loops in the orte_output() and orte_show_help() functions -- e.g., if RML send calls orte_output() or orte_show_help(). We have some ideas how to fix these, but figured that it was ok to commit before feature freeze with known issues. The code currently contains sub-optimal workarounds so that this will not be a problem, but it would be good to actually solve the problem rather than have hackish workarounds before v1.3 final. This commit was SVN r18434.
2008-05-14 00:00:55 +04:00
ORTE_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d starting long sendreq to %d (%d)",
ompi_comm_rank(sendreq->req_module->m_comm),
sendreq->req_target_rank,
header->hdr_origin_tag));
mca_pml.pml_isend(sendreq->req_origin_convertor.pBaseBuf,
sendreq->req_origin_convertor.count,
sendreq->req_origin_datatype,
sendreq->req_target_rank,
header->hdr_origin_tag,
MCA_PML_BASE_SEND_STANDARD,
sendreq->req_module->m_comm,
&(longreq->request));
/* put the send request in the waiting list */
OPAL_THREAD_LOCK(&mca_osc_rdma_component.c_lock);
opal_list_append(&mca_osc_rdma_component.c_pending_requests,
&(longreq->super.super));
OPAL_THREAD_UNLOCK(&mca_osc_rdma_component.c_lock);
}
} else {
ompi_osc_rdma_sendreq_free(sendreq);
}
if (0 == (header->hdr_base.hdr_flags & OMPI_OSC_RDMA_HDR_FLAG_MULTI)) {
done = true;
} else {
header = (ompi_osc_rdma_send_header_t*)
(((char*) header) +
sizeof(ompi_osc_rdma_send_header_t) +
ompi_ddt_pack_description_length(sendreq->req_target_datatype) +
header->hdr_msg_length);
if (header->hdr_base.hdr_type == OMPI_OSC_RDMA_HDR_MULTI_END) {
done = true;
}
}
}
/* release the descriptor and sendreq */
btl->btl_free(btl, descriptor);
while (opal_list_get_size(&module->m_queued_sendreqs)) {
opal_list_item_t *item;
int ret;
OPAL_THREAD_LOCK(&module->m_lock);
item = opal_list_remove_first(&module->m_queued_sendreqs);
OPAL_THREAD_UNLOCK(&module->m_lock);
if (NULL == item) break;
ret = ompi_osc_rdma_sendreq_send(module, (ompi_osc_rdma_sendreq_t*) item);
if (OMPI_SUCCESS != ret) {
OPAL_THREAD_LOCK(&module->m_lock);
opal_list_append(&(module->m_queued_sendreqs), item);
OPAL_THREAD_UNLOCK(&module->m_lock);
break;
}
}
}
/* create the initial fragment, pack header, datatype, and payload (if
size fits) and send */
int
ompi_osc_rdma_sendreq_send(ompi_osc_rdma_module_t *module,
ompi_osc_rdma_sendreq_t *sendreq)
{
int ret = OMPI_SUCCESS;
mca_bml_base_endpoint_t *endpoint = NULL;
mca_bml_base_btl_t *bml_btl = NULL;
mca_btl_base_descriptor_t *descriptor = NULL;
ompi_osc_rdma_send_header_t *header = NULL;
size_t written_data = 0;
size_t needed_len = sizeof(ompi_osc_rdma_send_header_t);
const void *packed_ddt;
size_t packed_ddt_len, remain;
if ((module->m_eager_send_active) &&
(module->m_use_rdma) &&
(ompi_ddt_is_contiguous_memory_layout(sendreq->req_target_datatype,
sendreq->req_target_count)) &&
(!ompi_convertor_need_buffers(&sendreq->req_origin_convertor)) &&
(sendreq->req_type != OMPI_OSC_RDMA_ACC)) {
ret = ompi_osc_rdma_sendreq_rdma(module, sendreq);
if (OPAL_LIKELY(OMPI_SUCCESS == ret)) return ret;
}
/* we always need to send the ddt */
packed_ddt_len = ompi_ddt_pack_description_length(sendreq->req_target_datatype);
needed_len += packed_ddt_len;
if (OMPI_OSC_RDMA_GET != sendreq->req_type) {
needed_len += sendreq->req_origin_bytes_packed;
}
/* see if we already have a buffer */
if ((module->m_pending_buffers[sendreq->req_target_rank].remain_len >=
sizeof(ompi_osc_rdma_send_header_t) + sendreq->req_origin_bytes_packed) ||
(0 < module->m_pending_buffers[sendreq->req_target_rank].remain_len &&
sendreq->req_origin_bytes_packed > 2048)) {
bml_btl = module->m_pending_buffers[sendreq->req_target_rank].bml_btl;
descriptor = module->m_pending_buffers[sendreq->req_target_rank].descriptor;
remain = module->m_pending_buffers[sendreq->req_target_rank].remain_len;
} else {
/* send the existing buffer */
if (module->m_pending_buffers[sendreq->req_target_rank].descriptor) {
send_multi_buffer(module, sendreq->req_target_rank);
}
assert(OMPI_SUCCESS == ret);
/* get a buffer... */
endpoint = (mca_bml_base_endpoint_t*) sendreq->req_target_proc->proc_bml;
bml_btl = mca_bml_base_btl_array_get_next(&endpoint->btl_eager);
mca_bml_base_alloc(bml_btl, &descriptor, MCA_BTL_NO_ORDER,
module->m_use_buffers ? bml_btl->btl_eager_limit :
needed_len < bml_btl->btl_eager_limit ? needed_len :
bml_btl->btl_eager_limit, MCA_BTL_DES_FLAGS_PRIORITY | MCA_BTL_DES_SEND_ALWAYS_CALLBACK);
if (NULL == descriptor) {
ret = OMPI_ERR_TEMP_OUT_OF_RESOURCE;
goto cleanup;
}
/* verify at least enough space for header */
if (descriptor->des_src[0].seg_len < sizeof(ompi_osc_rdma_send_header_t)) {
ret = OMPI_ERR_OUT_OF_RESOURCE;
goto cleanup;
}
/* setup descriptor */
descriptor->des_cbfunc = ompi_osc_rdma_sendreq_send_cb;
descriptor->des_flags = MCA_BTL_DES_FLAGS_PRIORITY;
module->m_pending_buffers[sendreq->req_target_rank].bml_btl = bml_btl;
module->m_pending_buffers[sendreq->req_target_rank].descriptor = descriptor;
module->m_pending_buffers[sendreq->req_target_rank].remain_len = descriptor->des_src[0].seg_len - sizeof(ompi_osc_rdma_base_header_t);
remain = module->m_pending_buffers[sendreq->req_target_rank].remain_len;
descriptor->des_src[0].seg_len = 0;
}
/* pack header */
header = (ompi_osc_rdma_send_header_t*)
((char*) descriptor->des_src[0].seg_addr.pval + descriptor->des_src[0].seg_len);
written_data += sizeof(ompi_osc_rdma_send_header_t);
header->hdr_base.hdr_flags = 0;
header->hdr_windx = ompi_comm_get_cid(sendreq->req_module->m_comm);
header->hdr_origin = ompi_comm_rank(sendreq->req_module->m_comm);
header->hdr_origin_sendreq.pval = (void*) sendreq;
header->hdr_origin_tag = 0;
header->hdr_target_disp = sendreq->req_target_disp;
header->hdr_target_count = sendreq->req_target_count;
switch (sendreq->req_type) {
case OMPI_OSC_RDMA_PUT:
header->hdr_base.hdr_type = OMPI_OSC_RDMA_HDR_PUT;
#if OMPI_ENABLE_MEM_DEBUG
header->hdr_target_op = 0;
#endif
break;
case OMPI_OSC_RDMA_ACC:
header->hdr_base.hdr_type = OMPI_OSC_RDMA_HDR_ACC;
header->hdr_target_op = sendreq->req_op_id;
break;
case OMPI_OSC_RDMA_GET:
header->hdr_base.hdr_type = OMPI_OSC_RDMA_HDR_GET;
#if OMPI_ENABLE_MEM_DEBUG
header->hdr_target_op = 0;
#endif
sendreq->req_refcount++;
break;
}
/* Set datatype id and / or pack datatype */
ret = ompi_ddt_get_pack_description(sendreq->req_target_datatype, &packed_ddt);
if (OMPI_SUCCESS != ret) goto cleanup;
memcpy((unsigned char*) descriptor->des_src[0].seg_addr.pval + descriptor->des_src[0].seg_len + written_data,
packed_ddt, packed_ddt_len);
written_data += packed_ddt_len;
if (OMPI_OSC_RDMA_GET != sendreq->req_type) {
/* if sending data and it fits, pack payload */
if (remain >= written_data + sendreq->req_origin_bytes_packed) {
struct iovec iov;
uint32_t iov_count = 1;
size_t max_data = sendreq->req_origin_bytes_packed;
iov.iov_len = max_data;
iov.iov_base = (IOVBASE_TYPE*)((unsigned char*) descriptor->des_src[0].seg_addr.pval + descriptor->des_src[0].seg_len + written_data);
ret = ompi_convertor_pack(&sendreq->req_origin_convertor, &iov, &iov_count,
&max_data );
if (ret < 0) {
ret = OMPI_ERR_FATAL;
goto cleanup;
}
written_data += max_data;
descriptor->des_src[0].seg_len += written_data;
header->hdr_msg_length = sendreq->req_origin_bytes_packed;
} else {
descriptor->des_src[0].seg_len += written_data;
header->hdr_msg_length = 0;
header->hdr_origin_tag = create_send_tag(module);
}
} else {
descriptor->des_src[0].seg_len += written_data;
header->hdr_msg_length = 0;
}
module->m_pending_buffers[sendreq->req_target_rank].remain_len -= written_data;
if (module->m_use_buffers) {
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_MULTI;
#ifdef WORDS_BIGENDIAN
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
#elif OMPI_ENABLE_HETEROGENEOUS_SUPPORT
if (sendreq->req_target_proc->proc_arch & OPAL_ARCH_ISBIGENDIAN) {
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
OMPI_OSC_RDMA_SEND_HDR_HTON(*header);
}
#endif
if (module->m_pending_buffers[sendreq->req_target_rank].remain_len <
sizeof(ompi_osc_rdma_send_header_t) + 128) {
/* not enough space left - send now */
ret = send_multi_buffer(module, sendreq->req_target_rank);
} else {
ret = OMPI_SUCCESS;
}
goto done;
} else {
#ifdef WORDS_BIGENDIAN
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
#elif OMPI_ENABLE_HETEROGENEOUS_SUPPORT
if (sendreq->req_target_proc->proc_arch & OPAL_ARCH_ISBIGENDIAN) {
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
OMPI_OSC_RDMA_SEND_HDR_HTON(*header);
}
#endif
/* send fragment */
This commit represents a bunch of work on a Mercurial side branch. As such, the commit message back to the master SVN repository is fairly long. = ORTE Job-Level Output Messages = Add two new interfaces that should be used for all new code throughout the ORTE and OMPI layers (we already make the search-and-replace on the existing ORTE / OMPI layers): * orte_output(): (and corresponding friends ORTE_OUTPUT, orte_output_verbose, etc.) This function sends the output directly to the HNP for processing as part of a job-specific output channel. It supports all the same outputs as opal_output() (syslog, file, stdout, stderr), but for stdout/stderr, the output is sent to the HNP for processing and output. More on this below. * orte_show_help(): This function is a drop-in-replacement for opal_show_help(), with two differences in functionality: 1. the rendered text help message output is sent to the HNP for display (rather than outputting directly into the process' stderr stream) 1. the HNP detects duplicate help messages and does not display them (so that you don't see the same error message N times, once from each of your N MPI processes); instead, it counts "new" instances of the help message and displays a message every ~5 seconds when there are new ones ("I got X new copies of the help message...") opal_show_help and opal_output still exist, but they only output in the current process. The intent for the new orte_* functions is that they can apply job-level intelligence to the output. As such, we recommend that all new ORTE and OMPI code use the new orte_* functions, not thei opal_* functions. === New code === For ORTE and OMPI programmers, here's what you need to do differently in new code: * Do not include opal/util/show_help.h or opal/util/output.h. Instead, include orte/util/output.h (this one header file has declarations for both the orte_output() series of functions and orte_show_help()). * Effectively s/opal_output/orte_output/gi throughout your code. Note that orte_output_open() takes a slightly different argument list (as a way to pass data to the filtering stream -- see below), so you if explicitly call opal_output_open(), you'll need to slightly adapt to the new signature of orte_output_open(). * Literally s/opal_show_help/orte_show_help/. The function signature is identical. === Notes === * orte_output'ing to stream 0 will do similar to what opal_output'ing did, so leaving a hard-coded "0" as the first argument is safe. * For systems that do not use ORTE's RML or the HNP, the effect of orte_output_* and orte_show_help will be identical to their opal counterparts (the additional information passed to orte_output_open() will be lost!). Indeed, the orte_* functions simply become trivial wrappers to their opal_* counterparts. Note that we have not tested this; the code is simple but it is quite possible that we mucked something up. = Filter Framework = Messages sent view the new orte_* functions described above and messages output via the IOF on the HNP will now optionally be passed through a new "filter" framework before being output to stdout/stderr. The "filter" OPAL MCA framework is intended to allow preprocessing to messages before they are sent to their final destinations. The first component that was written in the filter framework was to create an XML stream, segregating all the messages into different XML tags, etc. This will allow 3rd party tools to read the stdout/stderr from the HNP and be able to know exactly what each text message is (e.g., a help message, another OMPI infrastructure message, stdout from the user process, stderr from the user process, etc.). Filtering is not active by default. Filter components must be specifically requested, such as: {{{ $ mpirun --mca filter xml ... }}} There can only be one filter component active. = New MCA Parameters = The new functionality described above introduces two new MCA parameters: * '''orte_base_help_aggregate''': Defaults to 1 (true), meaning that help messages will be aggregated, as described above. If set to 0, all help messages will be displayed, even if they are duplicates (i.e., the original behavior). * '''orte_base_show_output_recursions''': An MCA parameter to help debug one of the known issues, described below. It is likely that this MCA parameter will disappear before v1.3 final. = Known Issues = * The XML filter component is not complete. The current output from this component is preliminary and not real XML. A bit more work needs to be done to configure.m4 search for an appropriate XML library/link it in/use it at run time. * There are possible recursion loops in the orte_output() and orte_show_help() functions -- e.g., if RML send calls orte_output() or orte_show_help(). We have some ideas how to fix these, but figured that it was ok to commit before feature freeze with known issues. The code currently contains sub-optimal workarounds so that this will not be a problem, but it would be good to actually solve the problem rather than have hackish workarounds before v1.3 final. This commit was SVN r18434.
2008-05-14 00:00:55 +04:00
ORTE_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d sending sendreq to %d",
ompi_comm_rank(sendreq->req_module->m_comm),
sendreq->req_target_rank));
module->m_pending_buffers[sendreq->req_target_rank].bml_btl = NULL;
module->m_pending_buffers[sendreq->req_target_rank].descriptor = NULL;
module->m_pending_buffers[sendreq->req_target_rank].remain_len = 0;
ret = mca_bml_base_send(bml_btl, descriptor, MCA_BTL_TAG_OSC_RDMA);
goto done;
}
cleanup:
if (descriptor != NULL) {
mca_bml_base_free(bml_btl, descriptor);
}
done:
return ret;
}
/**********************************************************************
*
* Sending a replyreq back to origin
*
**********************************************************************/
static void
ompi_osc_rdma_replyreq_send_long_cb(ompi_osc_rdma_longreq_t *longreq)
{
ompi_osc_rdma_replyreq_t *replyreq =
(ompi_osc_rdma_replyreq_t*) longreq->cbdata;
inmsg_mark_complete(replyreq->rep_module);
ompi_osc_rdma_longreq_free(longreq);
ompi_osc_rdma_replyreq_free(replyreq);
}
static void
ompi_osc_rdma_replyreq_send_cb(struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t *endpoint,
struct mca_btl_base_descriptor_t* descriptor,
int status)
{
ompi_osc_rdma_replyreq_t *replyreq =
(ompi_osc_rdma_replyreq_t*) descriptor->des_cbdata;
ompi_osc_rdma_reply_header_t *header =
(ompi_osc_rdma_reply_header_t*) descriptor->des_src[0].seg_addr.pval;
if (OMPI_SUCCESS != status) {
/* requeue and return */
/* BWB - FIX ME - figure out where to put this bad boy */
abort();
return;
}
#if !defined(WORDS_BIGENDIAN) && OMPI_ENABLE_HETEROGENEOUS_SUPPORT
if (header->hdr_base.hdr_flags & OMPI_OSC_RDMA_HDR_FLAG_NBO) {
OMPI_OSC_RDMA_REPLY_HDR_NTOH(*header);
}
#endif
/* do we need to post a send? */
if (header->hdr_msg_length != 0) {
/* sendreq is done. Mark it as so and get out of here */
inmsg_mark_complete(replyreq->rep_module);
ompi_osc_rdma_replyreq_free(replyreq);
} else {
ompi_osc_rdma_longreq_t *longreq;
ompi_osc_rdma_longreq_alloc(&longreq);
longreq->cbfunc = ompi_osc_rdma_replyreq_send_long_cb;
longreq->cbdata = replyreq;
mca_pml.pml_isend(replyreq->rep_target_convertor.pBaseBuf,
replyreq->rep_target_convertor.count,
replyreq->rep_target_datatype,
replyreq->rep_origin_rank,
header->hdr_target_tag,
MCA_PML_BASE_SEND_STANDARD,
replyreq->rep_module->m_comm,
&(longreq->request));
/* put the send request in the waiting list */
OPAL_THREAD_LOCK(&mca_osc_rdma_component.c_lock);
opal_list_append(&mca_osc_rdma_component.c_pending_requests,
&(longreq->super.super));
OPAL_THREAD_UNLOCK(&mca_osc_rdma_component.c_lock);
}
/* release the descriptor and replyreq */
btl->btl_free(btl, descriptor);
}
int
ompi_osc_rdma_replyreq_send(ompi_osc_rdma_module_t *module,
ompi_osc_rdma_replyreq_t *replyreq)
{
int ret = OMPI_SUCCESS;
mca_bml_base_endpoint_t *endpoint = NULL;
mca_bml_base_btl_t *bml_btl = NULL;
mca_btl_base_descriptor_t *descriptor = NULL;
ompi_osc_rdma_reply_header_t *header = NULL;
size_t written_data = 0;
/* Get a BTL and a fragment to go with it */
endpoint = (mca_bml_base_endpoint_t*) replyreq->rep_origin_proc->proc_bml;
bml_btl = mca_bml_base_btl_array_get_next(&endpoint->btl_eager);
mca_bml_base_alloc(bml_btl, &descriptor, MCA_BTL_NO_ORDER,
bml_btl->btl_eager_limit, MCA_BTL_DES_FLAGS_PRIORITY | MCA_BTL_DES_SEND_ALWAYS_CALLBACK);
if (NULL == descriptor) {
ret = OMPI_ERR_TEMP_OUT_OF_RESOURCE;
goto cleanup;
}
/* verify at least enough space for header */
if (descriptor->des_src[0].seg_len < sizeof(ompi_osc_rdma_reply_header_t)) {
ret = OMPI_ERR_OUT_OF_RESOURCE;
goto cleanup;
}
/* setup descriptor */
descriptor->des_cbfunc = ompi_osc_rdma_replyreq_send_cb;
descriptor->des_cbdata = (void*) replyreq;
descriptor->des_flags = MCA_BTL_DES_FLAGS_PRIORITY;
/* pack header */
header = (ompi_osc_rdma_reply_header_t*) descriptor->des_src[0].seg_addr.pval;
written_data += sizeof(ompi_osc_rdma_reply_header_t);
header->hdr_base.hdr_type = OMPI_OSC_RDMA_HDR_REPLY;
header->hdr_base.hdr_flags = 0;
header->hdr_origin_sendreq = replyreq->rep_origin_sendreq;
header->hdr_target_tag = 0;
/* if sending data fits, pack payload */
if (descriptor->des_src[0].seg_len >=
written_data + replyreq->rep_target_bytes_packed) {
struct iovec iov;
uint32_t iov_count = 1;
size_t max_data = replyreq->rep_target_bytes_packed;
iov.iov_len = max_data;
iov.iov_base = (IOVBASE_TYPE*)((unsigned char*) descriptor->des_src[0].seg_addr.pval + written_data);
MEMCHECKER(
memchecker_convertor_call(&opal_memchecker_base_mem_defined,
&replyreq->rep_target_convertor);
);
ret = ompi_convertor_pack(&replyreq->rep_target_convertor, &iov, &iov_count,
&max_data );
MEMCHECKER(
memchecker_convertor_call(&opal_memchecker_base_mem_noaccess,
&replyreq->rep_target_convertor);
);
if (ret < 0) {
ret = OMPI_ERR_FATAL;
goto cleanup;
}
assert(max_data == replyreq->rep_target_bytes_packed);
written_data += max_data;
descriptor->des_src[0].seg_len = written_data;
header->hdr_msg_length = replyreq->rep_target_bytes_packed;
} else {
header->hdr_msg_length = 0;
header->hdr_target_tag = create_send_tag(module);
}
#ifdef WORDS_BIGENDIAN
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
#elif OMPI_ENABLE_HETEROGENEOUS_SUPPORT
if (replyreq->rep_origin_proc->proc_arch & OPAL_ARCH_ISBIGENDIAN) {
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
OMPI_OSC_RDMA_REPLY_HDR_HTON(*header);
}
#endif
/* send fragment */
ret = mca_bml_base_send(bml_btl, descriptor, MCA_BTL_TAG_OSC_RDMA);
goto done;
cleanup:
if (descriptor != NULL) {
mca_bml_base_free(bml_btl, descriptor);
}
done:
return ret;
}
/**********************************************************************
*
* Receive a put on the target side
*
**********************************************************************/
static void
ompi_osc_rdma_sendreq_recv_put_long_cb(ompi_osc_rdma_longreq_t *longreq)
{
OBJ_RELEASE(longreq->req_datatype);
ompi_osc_rdma_longreq_free(longreq);
This commit represents a bunch of work on a Mercurial side branch. As such, the commit message back to the master SVN repository is fairly long. = ORTE Job-Level Output Messages = Add two new interfaces that should be used for all new code throughout the ORTE and OMPI layers (we already make the search-and-replace on the existing ORTE / OMPI layers): * orte_output(): (and corresponding friends ORTE_OUTPUT, orte_output_verbose, etc.) This function sends the output directly to the HNP for processing as part of a job-specific output channel. It supports all the same outputs as opal_output() (syslog, file, stdout, stderr), but for stdout/stderr, the output is sent to the HNP for processing and output. More on this below. * orte_show_help(): This function is a drop-in-replacement for opal_show_help(), with two differences in functionality: 1. the rendered text help message output is sent to the HNP for display (rather than outputting directly into the process' stderr stream) 1. the HNP detects duplicate help messages and does not display them (so that you don't see the same error message N times, once from each of your N MPI processes); instead, it counts "new" instances of the help message and displays a message every ~5 seconds when there are new ones ("I got X new copies of the help message...") opal_show_help and opal_output still exist, but they only output in the current process. The intent for the new orte_* functions is that they can apply job-level intelligence to the output. As such, we recommend that all new ORTE and OMPI code use the new orte_* functions, not thei opal_* functions. === New code === For ORTE and OMPI programmers, here's what you need to do differently in new code: * Do not include opal/util/show_help.h or opal/util/output.h. Instead, include orte/util/output.h (this one header file has declarations for both the orte_output() series of functions and orte_show_help()). * Effectively s/opal_output/orte_output/gi throughout your code. Note that orte_output_open() takes a slightly different argument list (as a way to pass data to the filtering stream -- see below), so you if explicitly call opal_output_open(), you'll need to slightly adapt to the new signature of orte_output_open(). * Literally s/opal_show_help/orte_show_help/. The function signature is identical. === Notes === * orte_output'ing to stream 0 will do similar to what opal_output'ing did, so leaving a hard-coded "0" as the first argument is safe. * For systems that do not use ORTE's RML or the HNP, the effect of orte_output_* and orte_show_help will be identical to their opal counterparts (the additional information passed to orte_output_open() will be lost!). Indeed, the orte_* functions simply become trivial wrappers to their opal_* counterparts. Note that we have not tested this; the code is simple but it is quite possible that we mucked something up. = Filter Framework = Messages sent view the new orte_* functions described above and messages output via the IOF on the HNP will now optionally be passed through a new "filter" framework before being output to stdout/stderr. The "filter" OPAL MCA framework is intended to allow preprocessing to messages before they are sent to their final destinations. The first component that was written in the filter framework was to create an XML stream, segregating all the messages into different XML tags, etc. This will allow 3rd party tools to read the stdout/stderr from the HNP and be able to know exactly what each text message is (e.g., a help message, another OMPI infrastructure message, stdout from the user process, stderr from the user process, etc.). Filtering is not active by default. Filter components must be specifically requested, such as: {{{ $ mpirun --mca filter xml ... }}} There can only be one filter component active. = New MCA Parameters = The new functionality described above introduces two new MCA parameters: * '''orte_base_help_aggregate''': Defaults to 1 (true), meaning that help messages will be aggregated, as described above. If set to 0, all help messages will be displayed, even if they are duplicates (i.e., the original behavior). * '''orte_base_show_output_recursions''': An MCA parameter to help debug one of the known issues, described below. It is likely that this MCA parameter will disappear before v1.3 final. = Known Issues = * The XML filter component is not complete. The current output from this component is preliminary and not real XML. A bit more work needs to be done to configure.m4 search for an appropriate XML library/link it in/use it at run time. * There are possible recursion loops in the orte_output() and orte_show_help() functions -- e.g., if RML send calls orte_output() or orte_show_help(). We have some ideas how to fix these, but figured that it was ok to commit before feature freeze with known issues. The code currently contains sub-optimal workarounds so that this will not be a problem, but it would be good to actually solve the problem rather than have hackish workarounds before v1.3 final. This commit was SVN r18434.
2008-05-14 00:00:55 +04:00
ORTE_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d finished receiving long put message",
ompi_comm_rank(longreq->req_module->m_comm)));
inmsg_mark_complete(longreq->req_module);
}
int
ompi_osc_rdma_sendreq_recv_put(ompi_osc_rdma_module_t *module,
ompi_osc_rdma_send_header_t *header,
void **inbuf)
{
int ret = OMPI_SUCCESS;
void *target = (unsigned char*) module->m_win->w_baseptr +
((unsigned long)header->hdr_target_disp * module->m_win->w_disp_unit);
ompi_proc_t *proc = ompi_comm_peer_lookup( module->m_comm, header->hdr_origin );
struct ompi_datatype_t *datatype =
ompi_osc_base_datatype_create(proc, inbuf);
if (NULL == datatype) {
This commit represents a bunch of work on a Mercurial side branch. As such, the commit message back to the master SVN repository is fairly long. = ORTE Job-Level Output Messages = Add two new interfaces that should be used for all new code throughout the ORTE and OMPI layers (we already make the search-and-replace on the existing ORTE / OMPI layers): * orte_output(): (and corresponding friends ORTE_OUTPUT, orte_output_verbose, etc.) This function sends the output directly to the HNP for processing as part of a job-specific output channel. It supports all the same outputs as opal_output() (syslog, file, stdout, stderr), but for stdout/stderr, the output is sent to the HNP for processing and output. More on this below. * orte_show_help(): This function is a drop-in-replacement for opal_show_help(), with two differences in functionality: 1. the rendered text help message output is sent to the HNP for display (rather than outputting directly into the process' stderr stream) 1. the HNP detects duplicate help messages and does not display them (so that you don't see the same error message N times, once from each of your N MPI processes); instead, it counts "new" instances of the help message and displays a message every ~5 seconds when there are new ones ("I got X new copies of the help message...") opal_show_help and opal_output still exist, but they only output in the current process. The intent for the new orte_* functions is that they can apply job-level intelligence to the output. As such, we recommend that all new ORTE and OMPI code use the new orte_* functions, not thei opal_* functions. === New code === For ORTE and OMPI programmers, here's what you need to do differently in new code: * Do not include opal/util/show_help.h or opal/util/output.h. Instead, include orte/util/output.h (this one header file has declarations for both the orte_output() series of functions and orte_show_help()). * Effectively s/opal_output/orte_output/gi throughout your code. Note that orte_output_open() takes a slightly different argument list (as a way to pass data to the filtering stream -- see below), so you if explicitly call opal_output_open(), you'll need to slightly adapt to the new signature of orte_output_open(). * Literally s/opal_show_help/orte_show_help/. The function signature is identical. === Notes === * orte_output'ing to stream 0 will do similar to what opal_output'ing did, so leaving a hard-coded "0" as the first argument is safe. * For systems that do not use ORTE's RML or the HNP, the effect of orte_output_* and orte_show_help will be identical to their opal counterparts (the additional information passed to orte_output_open() will be lost!). Indeed, the orte_* functions simply become trivial wrappers to their opal_* counterparts. Note that we have not tested this; the code is simple but it is quite possible that we mucked something up. = Filter Framework = Messages sent view the new orte_* functions described above and messages output via the IOF on the HNP will now optionally be passed through a new "filter" framework before being output to stdout/stderr. The "filter" OPAL MCA framework is intended to allow preprocessing to messages before they are sent to their final destinations. The first component that was written in the filter framework was to create an XML stream, segregating all the messages into different XML tags, etc. This will allow 3rd party tools to read the stdout/stderr from the HNP and be able to know exactly what each text message is (e.g., a help message, another OMPI infrastructure message, stdout from the user process, stderr from the user process, etc.). Filtering is not active by default. Filter components must be specifically requested, such as: {{{ $ mpirun --mca filter xml ... }}} There can only be one filter component active. = New MCA Parameters = The new functionality described above introduces two new MCA parameters: * '''orte_base_help_aggregate''': Defaults to 1 (true), meaning that help messages will be aggregated, as described above. If set to 0, all help messages will be displayed, even if they are duplicates (i.e., the original behavior). * '''orte_base_show_output_recursions''': An MCA parameter to help debug one of the known issues, described below. It is likely that this MCA parameter will disappear before v1.3 final. = Known Issues = * The XML filter component is not complete. The current output from this component is preliminary and not real XML. A bit more work needs to be done to configure.m4 search for an appropriate XML library/link it in/use it at run time. * There are possible recursion loops in the orte_output() and orte_show_help() functions -- e.g., if RML send calls orte_output() or orte_show_help(). We have some ideas how to fix these, but figured that it was ok to commit before feature freeze with known issues. The code currently contains sub-optimal workarounds so that this will not be a problem, but it would be good to actually solve the problem rather than have hackish workarounds before v1.3 final. This commit was SVN r18434.
2008-05-14 00:00:55 +04:00
orte_output(ompi_osc_base_output,
"Error recreating datatype. Aborting.");
ompi_mpi_abort(module->m_comm, 1, false);
}
if (header->hdr_msg_length > 0) {
ompi_convertor_t convertor;
struct iovec iov;
uint32_t iov_count = 1;
size_t max_data;
ompi_proc_t *proc;
/* create convertor */
OBJ_CONSTRUCT(&convertor, ompi_convertor_t);
/* initialize convertor */
proc = ompi_comm_peer_lookup(module->m_comm, header->hdr_origin);
ompi_convertor_copy_and_prepare_for_recv(proc->proc_convertor,
datatype,
header->hdr_target_count,
target,
0,
&convertor);
iov.iov_len = header->hdr_msg_length;
iov.iov_base = (IOVBASE_TYPE*)*inbuf;
max_data = iov.iov_len;
MEMCHECKER(
memchecker_convertor_call(&opal_memchecker_base_mem_defined, &convertor);
);
ompi_convertor_unpack(&convertor,
&iov,
&iov_count,
&max_data );
MEMCHECKER(
memchecker_convertor_call(&opal_memchecker_base_mem_noaccess, &convertor);
);
OBJ_DESTRUCT(&convertor);
OBJ_RELEASE(datatype);
inmsg_mark_complete(module);
*inbuf = ((char*) *inbuf) + header->hdr_msg_length;
This commit represents a bunch of work on a Mercurial side branch. As such, the commit message back to the master SVN repository is fairly long. = ORTE Job-Level Output Messages = Add two new interfaces that should be used for all new code throughout the ORTE and OMPI layers (we already make the search-and-replace on the existing ORTE / OMPI layers): * orte_output(): (and corresponding friends ORTE_OUTPUT, orte_output_verbose, etc.) This function sends the output directly to the HNP for processing as part of a job-specific output channel. It supports all the same outputs as opal_output() (syslog, file, stdout, stderr), but for stdout/stderr, the output is sent to the HNP for processing and output. More on this below. * orte_show_help(): This function is a drop-in-replacement for opal_show_help(), with two differences in functionality: 1. the rendered text help message output is sent to the HNP for display (rather than outputting directly into the process' stderr stream) 1. the HNP detects duplicate help messages and does not display them (so that you don't see the same error message N times, once from each of your N MPI processes); instead, it counts "new" instances of the help message and displays a message every ~5 seconds when there are new ones ("I got X new copies of the help message...") opal_show_help and opal_output still exist, but they only output in the current process. The intent for the new orte_* functions is that they can apply job-level intelligence to the output. As such, we recommend that all new ORTE and OMPI code use the new orte_* functions, not thei opal_* functions. === New code === For ORTE and OMPI programmers, here's what you need to do differently in new code: * Do not include opal/util/show_help.h or opal/util/output.h. Instead, include orte/util/output.h (this one header file has declarations for both the orte_output() series of functions and orte_show_help()). * Effectively s/opal_output/orte_output/gi throughout your code. Note that orte_output_open() takes a slightly different argument list (as a way to pass data to the filtering stream -- see below), so you if explicitly call opal_output_open(), you'll need to slightly adapt to the new signature of orte_output_open(). * Literally s/opal_show_help/orte_show_help/. The function signature is identical. === Notes === * orte_output'ing to stream 0 will do similar to what opal_output'ing did, so leaving a hard-coded "0" as the first argument is safe. * For systems that do not use ORTE's RML or the HNP, the effect of orte_output_* and orte_show_help will be identical to their opal counterparts (the additional information passed to orte_output_open() will be lost!). Indeed, the orte_* functions simply become trivial wrappers to their opal_* counterparts. Note that we have not tested this; the code is simple but it is quite possible that we mucked something up. = Filter Framework = Messages sent view the new orte_* functions described above and messages output via the IOF on the HNP will now optionally be passed through a new "filter" framework before being output to stdout/stderr. The "filter" OPAL MCA framework is intended to allow preprocessing to messages before they are sent to their final destinations. The first component that was written in the filter framework was to create an XML stream, segregating all the messages into different XML tags, etc. This will allow 3rd party tools to read the stdout/stderr from the HNP and be able to know exactly what each text message is (e.g., a help message, another OMPI infrastructure message, stdout from the user process, stderr from the user process, etc.). Filtering is not active by default. Filter components must be specifically requested, such as: {{{ $ mpirun --mca filter xml ... }}} There can only be one filter component active. = New MCA Parameters = The new functionality described above introduces two new MCA parameters: * '''orte_base_help_aggregate''': Defaults to 1 (true), meaning that help messages will be aggregated, as described above. If set to 0, all help messages will be displayed, even if they are duplicates (i.e., the original behavior). * '''orte_base_show_output_recursions''': An MCA parameter to help debug one of the known issues, described below. It is likely that this MCA parameter will disappear before v1.3 final. = Known Issues = * The XML filter component is not complete. The current output from this component is preliminary and not real XML. A bit more work needs to be done to configure.m4 search for an appropriate XML library/link it in/use it at run time. * There are possible recursion loops in the orte_output() and orte_show_help() functions -- e.g., if RML send calls orte_output() or orte_show_help(). We have some ideas how to fix these, but figured that it was ok to commit before feature freeze with known issues. The code currently contains sub-optimal workarounds so that this will not be a problem, but it would be good to actually solve the problem rather than have hackish workarounds before v1.3 final. This commit was SVN r18434.
2008-05-14 00:00:55 +04:00
ORTE_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d received put message from %d",
ompi_comm_rank(module->m_comm),
header->hdr_origin));
} else {
ompi_osc_rdma_longreq_t *longreq;
ompi_osc_rdma_longreq_alloc(&longreq);
longreq->cbfunc = ompi_osc_rdma_sendreq_recv_put_long_cb;
longreq->cbdata = NULL;
longreq->req_datatype = datatype;
longreq->req_module = module;
ret = mca_pml.pml_irecv(target,
header->hdr_target_count,
datatype,
header->hdr_origin,
header->hdr_origin_tag,
module->m_comm,
&(longreq->request));
This commit represents a bunch of work on a Mercurial side branch. As such, the commit message back to the master SVN repository is fairly long. = ORTE Job-Level Output Messages = Add two new interfaces that should be used for all new code throughout the ORTE and OMPI layers (we already make the search-and-replace on the existing ORTE / OMPI layers): * orte_output(): (and corresponding friends ORTE_OUTPUT, orte_output_verbose, etc.) This function sends the output directly to the HNP for processing as part of a job-specific output channel. It supports all the same outputs as opal_output() (syslog, file, stdout, stderr), but for stdout/stderr, the output is sent to the HNP for processing and output. More on this below. * orte_show_help(): This function is a drop-in-replacement for opal_show_help(), with two differences in functionality: 1. the rendered text help message output is sent to the HNP for display (rather than outputting directly into the process' stderr stream) 1. the HNP detects duplicate help messages and does not display them (so that you don't see the same error message N times, once from each of your N MPI processes); instead, it counts "new" instances of the help message and displays a message every ~5 seconds when there are new ones ("I got X new copies of the help message...") opal_show_help and opal_output still exist, but they only output in the current process. The intent for the new orte_* functions is that they can apply job-level intelligence to the output. As such, we recommend that all new ORTE and OMPI code use the new orte_* functions, not thei opal_* functions. === New code === For ORTE and OMPI programmers, here's what you need to do differently in new code: * Do not include opal/util/show_help.h or opal/util/output.h. Instead, include orte/util/output.h (this one header file has declarations for both the orte_output() series of functions and orte_show_help()). * Effectively s/opal_output/orte_output/gi throughout your code. Note that orte_output_open() takes a slightly different argument list (as a way to pass data to the filtering stream -- see below), so you if explicitly call opal_output_open(), you'll need to slightly adapt to the new signature of orte_output_open(). * Literally s/opal_show_help/orte_show_help/. The function signature is identical. === Notes === * orte_output'ing to stream 0 will do similar to what opal_output'ing did, so leaving a hard-coded "0" as the first argument is safe. * For systems that do not use ORTE's RML or the HNP, the effect of orte_output_* and orte_show_help will be identical to their opal counterparts (the additional information passed to orte_output_open() will be lost!). Indeed, the orte_* functions simply become trivial wrappers to their opal_* counterparts. Note that we have not tested this; the code is simple but it is quite possible that we mucked something up. = Filter Framework = Messages sent view the new orte_* functions described above and messages output via the IOF on the HNP will now optionally be passed through a new "filter" framework before being output to stdout/stderr. The "filter" OPAL MCA framework is intended to allow preprocessing to messages before they are sent to their final destinations. The first component that was written in the filter framework was to create an XML stream, segregating all the messages into different XML tags, etc. This will allow 3rd party tools to read the stdout/stderr from the HNP and be able to know exactly what each text message is (e.g., a help message, another OMPI infrastructure message, stdout from the user process, stderr from the user process, etc.). Filtering is not active by default. Filter components must be specifically requested, such as: {{{ $ mpirun --mca filter xml ... }}} There can only be one filter component active. = New MCA Parameters = The new functionality described above introduces two new MCA parameters: * '''orte_base_help_aggregate''': Defaults to 1 (true), meaning that help messages will be aggregated, as described above. If set to 0, all help messages will be displayed, even if they are duplicates (i.e., the original behavior). * '''orte_base_show_output_recursions''': An MCA parameter to help debug one of the known issues, described below. It is likely that this MCA parameter will disappear before v1.3 final. = Known Issues = * The XML filter component is not complete. The current output from this component is preliminary and not real XML. A bit more work needs to be done to configure.m4 search for an appropriate XML library/link it in/use it at run time. * There are possible recursion loops in the orte_output() and orte_show_help() functions -- e.g., if RML send calls orte_output() or orte_show_help(). We have some ideas how to fix these, but figured that it was ok to commit before feature freeze with known issues. The code currently contains sub-optimal workarounds so that this will not be a problem, but it would be good to actually solve the problem rather than have hackish workarounds before v1.3 final. This commit was SVN r18434.
2008-05-14 00:00:55 +04:00
ORTE_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d started long recv put message from %d (%d)",
ompi_comm_rank(module->m_comm),
header->hdr_origin,
header->hdr_origin_tag));
/* put the send request in the waiting list */
OPAL_THREAD_LOCK(&mca_osc_rdma_component.c_lock);
opal_list_append(&mca_osc_rdma_component.c_pending_requests,
&(longreq->super.super));
OPAL_THREAD_UNLOCK(&mca_osc_rdma_component.c_lock);
}
return ret;
}
/**********************************************************************
*
* Receive an accumulate on the target side
*
**********************************************************************/
static void
ompi_osc_rdma_sendreq_recv_accum_long_cb(ompi_osc_rdma_longreq_t *longreq)
{
ompi_osc_rdma_send_header_t *header =
(ompi_osc_rdma_send_header_t*) longreq->cbdata;
void *payload = (void*) (header + 1);
int ret;
ompi_osc_rdma_module_t *module = longreq->req_module;
unsigned char *target_buffer =
(unsigned char*) module->m_win->w_baseptr +
((unsigned long)header->hdr_target_disp * module->m_win->w_disp_unit);
/* lock the window for accumulates */
OPAL_THREAD_LOCK(&longreq->req_module->m_acc_lock);
if (longreq->req_op == &ompi_mpi_op_replace) {
ompi_convertor_t convertor;
struct iovec iov;
uint32_t iov_count = 1;
size_t max_data;
/* create convertor */
OBJ_CONSTRUCT(&convertor, ompi_convertor_t);
/* initialize convertor */
ompi_convertor_copy_and_prepare_for_recv(ompi_proc_local()->proc_convertor,
longreq->req_datatype,
header->hdr_target_count,
target_buffer,
0,
&convertor);
iov.iov_len = header->hdr_msg_length;
iov.iov_base = (IOVBASE_TYPE*) payload;
max_data = iov.iov_len;
MEMCHECKER(
memchecker_convertor_call(&opal_memchecker_base_mem_defined,
&convertor);
);
ompi_convertor_unpack(&convertor,
&iov,
&iov_count,
&max_data);
MEMCHECKER(
memchecker_convertor_call(&opal_memchecker_base_mem_noaccess,
&convertor);
);
OBJ_DESTRUCT(&convertor);
} else {
/* copy the data from the temporary buffer into the user window */
ret = ompi_osc_base_process_op(target_buffer,
payload,
header->hdr_msg_length,
longreq->req_datatype,
header->hdr_target_count,
longreq->req_op);
}
/* unlock the window for accumulates */
OPAL_THREAD_UNLOCK(&longreq->req_module->m_acc_lock);
This commit represents a bunch of work on a Mercurial side branch. As such, the commit message back to the master SVN repository is fairly long. = ORTE Job-Level Output Messages = Add two new interfaces that should be used for all new code throughout the ORTE and OMPI layers (we already make the search-and-replace on the existing ORTE / OMPI layers): * orte_output(): (and corresponding friends ORTE_OUTPUT, orte_output_verbose, etc.) This function sends the output directly to the HNP for processing as part of a job-specific output channel. It supports all the same outputs as opal_output() (syslog, file, stdout, stderr), but for stdout/stderr, the output is sent to the HNP for processing and output. More on this below. * orte_show_help(): This function is a drop-in-replacement for opal_show_help(), with two differences in functionality: 1. the rendered text help message output is sent to the HNP for display (rather than outputting directly into the process' stderr stream) 1. the HNP detects duplicate help messages and does not display them (so that you don't see the same error message N times, once from each of your N MPI processes); instead, it counts "new" instances of the help message and displays a message every ~5 seconds when there are new ones ("I got X new copies of the help message...") opal_show_help and opal_output still exist, but they only output in the current process. The intent for the new orte_* functions is that they can apply job-level intelligence to the output. As such, we recommend that all new ORTE and OMPI code use the new orte_* functions, not thei opal_* functions. === New code === For ORTE and OMPI programmers, here's what you need to do differently in new code: * Do not include opal/util/show_help.h or opal/util/output.h. Instead, include orte/util/output.h (this one header file has declarations for both the orte_output() series of functions and orte_show_help()). * Effectively s/opal_output/orte_output/gi throughout your code. Note that orte_output_open() takes a slightly different argument list (as a way to pass data to the filtering stream -- see below), so you if explicitly call opal_output_open(), you'll need to slightly adapt to the new signature of orte_output_open(). * Literally s/opal_show_help/orte_show_help/. The function signature is identical. === Notes === * orte_output'ing to stream 0 will do similar to what opal_output'ing did, so leaving a hard-coded "0" as the first argument is safe. * For systems that do not use ORTE's RML or the HNP, the effect of orte_output_* and orte_show_help will be identical to their opal counterparts (the additional information passed to orte_output_open() will be lost!). Indeed, the orte_* functions simply become trivial wrappers to their opal_* counterparts. Note that we have not tested this; the code is simple but it is quite possible that we mucked something up. = Filter Framework = Messages sent view the new orte_* functions described above and messages output via the IOF on the HNP will now optionally be passed through a new "filter" framework before being output to stdout/stderr. The "filter" OPAL MCA framework is intended to allow preprocessing to messages before they are sent to their final destinations. The first component that was written in the filter framework was to create an XML stream, segregating all the messages into different XML tags, etc. This will allow 3rd party tools to read the stdout/stderr from the HNP and be able to know exactly what each text message is (e.g., a help message, another OMPI infrastructure message, stdout from the user process, stderr from the user process, etc.). Filtering is not active by default. Filter components must be specifically requested, such as: {{{ $ mpirun --mca filter xml ... }}} There can only be one filter component active. = New MCA Parameters = The new functionality described above introduces two new MCA parameters: * '''orte_base_help_aggregate''': Defaults to 1 (true), meaning that help messages will be aggregated, as described above. If set to 0, all help messages will be displayed, even if they are duplicates (i.e., the original behavior). * '''orte_base_show_output_recursions''': An MCA parameter to help debug one of the known issues, described below. It is likely that this MCA parameter will disappear before v1.3 final. = Known Issues = * The XML filter component is not complete. The current output from this component is preliminary and not real XML. A bit more work needs to be done to configure.m4 search for an appropriate XML library/link it in/use it at run time. * There are possible recursion loops in the orte_output() and orte_show_help() functions -- e.g., if RML send calls orte_output() or orte_show_help(). We have some ideas how to fix these, but figured that it was ok to commit before feature freeze with known issues. The code currently contains sub-optimal workarounds so that this will not be a problem, but it would be good to actually solve the problem rather than have hackish workarounds before v1.3 final. This commit was SVN r18434.
2008-05-14 00:00:55 +04:00
ORTE_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d finished receiving long accum message from %d",
ompi_comm_rank(longreq->req_module->m_comm),
header->hdr_origin));
/* free the temp buffer */
free(longreq->cbdata);
/* Release datatype & op */
OBJ_RELEASE(longreq->req_datatype);
OBJ_RELEASE(longreq->req_op);
inmsg_mark_complete(longreq->req_module);
ompi_osc_rdma_longreq_free(longreq);
}
int
ompi_osc_rdma_sendreq_recv_accum(ompi_osc_rdma_module_t *module,
ompi_osc_rdma_send_header_t *header,
void **payload)
{
int ret = OMPI_SUCCESS;
struct ompi_op_t *op = ompi_osc_base_op_create(header->hdr_target_op);
ompi_proc_t *proc = ompi_comm_peer_lookup( module->m_comm, header->hdr_origin );
struct ompi_datatype_t *datatype =
ompi_osc_base_datatype_create(proc, payload);
if (NULL == datatype) {
This commit represents a bunch of work on a Mercurial side branch. As such, the commit message back to the master SVN repository is fairly long. = ORTE Job-Level Output Messages = Add two new interfaces that should be used for all new code throughout the ORTE and OMPI layers (we already make the search-and-replace on the existing ORTE / OMPI layers): * orte_output(): (and corresponding friends ORTE_OUTPUT, orte_output_verbose, etc.) This function sends the output directly to the HNP for processing as part of a job-specific output channel. It supports all the same outputs as opal_output() (syslog, file, stdout, stderr), but for stdout/stderr, the output is sent to the HNP for processing and output. More on this below. * orte_show_help(): This function is a drop-in-replacement for opal_show_help(), with two differences in functionality: 1. the rendered text help message output is sent to the HNP for display (rather than outputting directly into the process' stderr stream) 1. the HNP detects duplicate help messages and does not display them (so that you don't see the same error message N times, once from each of your N MPI processes); instead, it counts "new" instances of the help message and displays a message every ~5 seconds when there are new ones ("I got X new copies of the help message...") opal_show_help and opal_output still exist, but they only output in the current process. The intent for the new orte_* functions is that they can apply job-level intelligence to the output. As such, we recommend that all new ORTE and OMPI code use the new orte_* functions, not thei opal_* functions. === New code === For ORTE and OMPI programmers, here's what you need to do differently in new code: * Do not include opal/util/show_help.h or opal/util/output.h. Instead, include orte/util/output.h (this one header file has declarations for both the orte_output() series of functions and orte_show_help()). * Effectively s/opal_output/orte_output/gi throughout your code. Note that orte_output_open() takes a slightly different argument list (as a way to pass data to the filtering stream -- see below), so you if explicitly call opal_output_open(), you'll need to slightly adapt to the new signature of orte_output_open(). * Literally s/opal_show_help/orte_show_help/. The function signature is identical. === Notes === * orte_output'ing to stream 0 will do similar to what opal_output'ing did, so leaving a hard-coded "0" as the first argument is safe. * For systems that do not use ORTE's RML or the HNP, the effect of orte_output_* and orte_show_help will be identical to their opal counterparts (the additional information passed to orte_output_open() will be lost!). Indeed, the orte_* functions simply become trivial wrappers to their opal_* counterparts. Note that we have not tested this; the code is simple but it is quite possible that we mucked something up. = Filter Framework = Messages sent view the new orte_* functions described above and messages output via the IOF on the HNP will now optionally be passed through a new "filter" framework before being output to stdout/stderr. The "filter" OPAL MCA framework is intended to allow preprocessing to messages before they are sent to their final destinations. The first component that was written in the filter framework was to create an XML stream, segregating all the messages into different XML tags, etc. This will allow 3rd party tools to read the stdout/stderr from the HNP and be able to know exactly what each text message is (e.g., a help message, another OMPI infrastructure message, stdout from the user process, stderr from the user process, etc.). Filtering is not active by default. Filter components must be specifically requested, such as: {{{ $ mpirun --mca filter xml ... }}} There can only be one filter component active. = New MCA Parameters = The new functionality described above introduces two new MCA parameters: * '''orte_base_help_aggregate''': Defaults to 1 (true), meaning that help messages will be aggregated, as described above. If set to 0, all help messages will be displayed, even if they are duplicates (i.e., the original behavior). * '''orte_base_show_output_recursions''': An MCA parameter to help debug one of the known issues, described below. It is likely that this MCA parameter will disappear before v1.3 final. = Known Issues = * The XML filter component is not complete. The current output from this component is preliminary and not real XML. A bit more work needs to be done to configure.m4 search for an appropriate XML library/link it in/use it at run time. * There are possible recursion loops in the orte_output() and orte_show_help() functions -- e.g., if RML send calls orte_output() or orte_show_help(). We have some ideas how to fix these, but figured that it was ok to commit before feature freeze with known issues. The code currently contains sub-optimal workarounds so that this will not be a problem, but it would be good to actually solve the problem rather than have hackish workarounds before v1.3 final. This commit was SVN r18434.
2008-05-14 00:00:55 +04:00
orte_output(ompi_osc_base_output,
"Error recreating datatype. Aborting.");
ompi_mpi_abort(module->m_comm, 1, false);
}
if (header->hdr_msg_length > 0) {
unsigned char *target_buffer;
target_buffer = (unsigned char*) module->m_win->w_baseptr +
((unsigned long)header->hdr_target_disp * module->m_win->w_disp_unit);
/* lock the window for accumulates */
OPAL_THREAD_LOCK(&module->m_acc_lock);
if (op == &ompi_mpi_op_replace) {
ompi_convertor_t convertor;
struct iovec iov;
uint32_t iov_count = 1;
size_t max_data;
/* create convertor */
OBJ_CONSTRUCT(&convertor, ompi_convertor_t);
/* initialize convertor */
ompi_convertor_copy_and_prepare_for_recv(proc->proc_convertor,
datatype,
header->hdr_target_count,
target_buffer,
0,
&convertor);
iov.iov_len = header->hdr_msg_length;
iov.iov_base = (IOVBASE_TYPE*)*payload;
max_data = iov.iov_len;
MEMCHECKER(
memchecker_convertor_call(&opal_memchecker_base_mem_defined, &convertor);
);
ompi_convertor_unpack(&convertor,
&iov,
&iov_count,
&max_data);
MEMCHECKER(
memchecker_convertor_call(&opal_memchecker_base_mem_noaccess, &convertor);
);
OBJ_DESTRUCT(&convertor);
} else {
void *buffer = NULL;
#if OMPI_ENABLE_HETEROGENEOUS_SUPPORT
if (proc->proc_arch != ompi_proc_local()->proc_arch) {
ompi_convertor_t convertor;
struct iovec iov;
uint32_t iov_count = 1;
size_t max_data;
struct ompi_datatype_t *primitive_datatype = NULL;
uint32_t primitive_count;
size_t buflen;
ompi_osc_base_get_primitive_type_info(datatype, &primitive_datatype, &primitive_count);
primitive_count *= header->hdr_target_count;
/* figure out how big a buffer we need */
ompi_ddt_type_size(primitive_datatype, &buflen);
buflen *= primitive_count;
/* create convertor */
OBJ_CONSTRUCT(&convertor, ompi_convertor_t);
payload = (void*) malloc(buflen);
/* initialize convertor */
ompi_convertor_copy_and_prepare_for_recv(proc->proc_convertor,
primitive_datatype,
primitive_count,
buffer,
0,
&convertor);
iov.iov_len = header->hdr_msg_length;
iov.iov_base = (IOVBASE_TYPE*)*payload;
max_data = iov.iov_len;
MEMCHECKER(
memchecker_convertor_call(&opal_memchecker_base_mem_defined, &convertor);
);
ompi_convertor_unpack(&convertor,
&iov,
&iov_count,
&max_data);
MEMCHECKER(
memchecker_convertor_call(&opal_memchecker_base_mem_noaccess, &convertor);
);
OBJ_DESTRUCT(&convertor);
} else {
buffer = *payload;
}
#else
buffer = *payload;
#endif
/* copy the data from the temporary buffer into the user window */
ret = ompi_osc_base_process_op(target_buffer,
buffer,
header->hdr_msg_length,
datatype,
header->hdr_target_count,
op);
#if OMPI_ENABLE_HETEROGENEOUS_SUPPORT
if (proc->proc_arch != ompi_proc_local()->proc_arch) {
if (NULL == buffer) free(buffer);
}
#endif
}
/* unlock the window for accumulates */
OPAL_THREAD_UNLOCK(&module->m_acc_lock);
/* Release datatype & op */
OBJ_RELEASE(datatype);
OBJ_RELEASE(op);
inmsg_mark_complete(module);
This commit represents a bunch of work on a Mercurial side branch. As such, the commit message back to the master SVN repository is fairly long. = ORTE Job-Level Output Messages = Add two new interfaces that should be used for all new code throughout the ORTE and OMPI layers (we already make the search-and-replace on the existing ORTE / OMPI layers): * orte_output(): (and corresponding friends ORTE_OUTPUT, orte_output_verbose, etc.) This function sends the output directly to the HNP for processing as part of a job-specific output channel. It supports all the same outputs as opal_output() (syslog, file, stdout, stderr), but for stdout/stderr, the output is sent to the HNP for processing and output. More on this below. * orte_show_help(): This function is a drop-in-replacement for opal_show_help(), with two differences in functionality: 1. the rendered text help message output is sent to the HNP for display (rather than outputting directly into the process' stderr stream) 1. the HNP detects duplicate help messages and does not display them (so that you don't see the same error message N times, once from each of your N MPI processes); instead, it counts "new" instances of the help message and displays a message every ~5 seconds when there are new ones ("I got X new copies of the help message...") opal_show_help and opal_output still exist, but they only output in the current process. The intent for the new orte_* functions is that they can apply job-level intelligence to the output. As such, we recommend that all new ORTE and OMPI code use the new orte_* functions, not thei opal_* functions. === New code === For ORTE and OMPI programmers, here's what you need to do differently in new code: * Do not include opal/util/show_help.h or opal/util/output.h. Instead, include orte/util/output.h (this one header file has declarations for both the orte_output() series of functions and orte_show_help()). * Effectively s/opal_output/orte_output/gi throughout your code. Note that orte_output_open() takes a slightly different argument list (as a way to pass data to the filtering stream -- see below), so you if explicitly call opal_output_open(), you'll need to slightly adapt to the new signature of orte_output_open(). * Literally s/opal_show_help/orte_show_help/. The function signature is identical. === Notes === * orte_output'ing to stream 0 will do similar to what opal_output'ing did, so leaving a hard-coded "0" as the first argument is safe. * For systems that do not use ORTE's RML or the HNP, the effect of orte_output_* and orte_show_help will be identical to their opal counterparts (the additional information passed to orte_output_open() will be lost!). Indeed, the orte_* functions simply become trivial wrappers to their opal_* counterparts. Note that we have not tested this; the code is simple but it is quite possible that we mucked something up. = Filter Framework = Messages sent view the new orte_* functions described above and messages output via the IOF on the HNP will now optionally be passed through a new "filter" framework before being output to stdout/stderr. The "filter" OPAL MCA framework is intended to allow preprocessing to messages before they are sent to their final destinations. The first component that was written in the filter framework was to create an XML stream, segregating all the messages into different XML tags, etc. This will allow 3rd party tools to read the stdout/stderr from the HNP and be able to know exactly what each text message is (e.g., a help message, another OMPI infrastructure message, stdout from the user process, stderr from the user process, etc.). Filtering is not active by default. Filter components must be specifically requested, such as: {{{ $ mpirun --mca filter xml ... }}} There can only be one filter component active. = New MCA Parameters = The new functionality described above introduces two new MCA parameters: * '''orte_base_help_aggregate''': Defaults to 1 (true), meaning that help messages will be aggregated, as described above. If set to 0, all help messages will be displayed, even if they are duplicates (i.e., the original behavior). * '''orte_base_show_output_recursions''': An MCA parameter to help debug one of the known issues, described below. It is likely that this MCA parameter will disappear before v1.3 final. = Known Issues = * The XML filter component is not complete. The current output from this component is preliminary and not real XML. A bit more work needs to be done to configure.m4 search for an appropriate XML library/link it in/use it at run time. * There are possible recursion loops in the orte_output() and orte_show_help() functions -- e.g., if RML send calls orte_output() or orte_show_help(). We have some ideas how to fix these, but figured that it was ok to commit before feature freeze with known issues. The code currently contains sub-optimal workarounds so that this will not be a problem, but it would be good to actually solve the problem rather than have hackish workarounds before v1.3 final. This commit was SVN r18434.
2008-05-14 00:00:55 +04:00
ORTE_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d received accum message from %d",
ompi_comm_rank(module->m_comm),
header->hdr_origin));
*payload = ((char*) *payload) + header->hdr_msg_length;
} else {
ompi_osc_rdma_longreq_t *longreq;
size_t buflen;
struct ompi_datatype_t *primitive_datatype = NULL;
uint32_t primitive_count;
/* get underlying type... */
ompi_osc_base_get_primitive_type_info(datatype, &primitive_datatype, &primitive_count);
primitive_count *= header->hdr_target_count;
/* figure out how big a buffer we need */
ompi_ddt_type_size(primitive_datatype, &buflen);
buflen *= primitive_count;
/* get a longreq and fill it in */
ompi_osc_rdma_longreq_alloc(&longreq);
longreq->cbfunc = ompi_osc_rdma_sendreq_recv_accum_long_cb;
longreq->req_datatype = datatype;
longreq->req_op = op;
longreq->req_module = module;
/* allocate a buffer to receive into ... */
longreq->cbdata = malloc(buflen + sizeof(ompi_osc_rdma_send_header_t));
if (NULL == longreq->cbdata) return OMPI_ERR_TEMP_OUT_OF_RESOURCE;
/* fill in tmp header */
memcpy(longreq->cbdata, header,
sizeof(ompi_osc_rdma_send_header_t));
((ompi_osc_rdma_send_header_t*) longreq->cbdata)->hdr_msg_length = buflen;
ret = mca_pml.pml_irecv(((char*) longreq->cbdata) + sizeof(ompi_osc_rdma_send_header_t),
primitive_count,
primitive_datatype,
header->hdr_origin,
header->hdr_origin_tag,
module->m_comm,
&(longreq->request));
This commit represents a bunch of work on a Mercurial side branch. As such, the commit message back to the master SVN repository is fairly long. = ORTE Job-Level Output Messages = Add two new interfaces that should be used for all new code throughout the ORTE and OMPI layers (we already make the search-and-replace on the existing ORTE / OMPI layers): * orte_output(): (and corresponding friends ORTE_OUTPUT, orte_output_verbose, etc.) This function sends the output directly to the HNP for processing as part of a job-specific output channel. It supports all the same outputs as opal_output() (syslog, file, stdout, stderr), but for stdout/stderr, the output is sent to the HNP for processing and output. More on this below. * orte_show_help(): This function is a drop-in-replacement for opal_show_help(), with two differences in functionality: 1. the rendered text help message output is sent to the HNP for display (rather than outputting directly into the process' stderr stream) 1. the HNP detects duplicate help messages and does not display them (so that you don't see the same error message N times, once from each of your N MPI processes); instead, it counts "new" instances of the help message and displays a message every ~5 seconds when there are new ones ("I got X new copies of the help message...") opal_show_help and opal_output still exist, but they only output in the current process. The intent for the new orte_* functions is that they can apply job-level intelligence to the output. As such, we recommend that all new ORTE and OMPI code use the new orte_* functions, not thei opal_* functions. === New code === For ORTE and OMPI programmers, here's what you need to do differently in new code: * Do not include opal/util/show_help.h or opal/util/output.h. Instead, include orte/util/output.h (this one header file has declarations for both the orte_output() series of functions and orte_show_help()). * Effectively s/opal_output/orte_output/gi throughout your code. Note that orte_output_open() takes a slightly different argument list (as a way to pass data to the filtering stream -- see below), so you if explicitly call opal_output_open(), you'll need to slightly adapt to the new signature of orte_output_open(). * Literally s/opal_show_help/orte_show_help/. The function signature is identical. === Notes === * orte_output'ing to stream 0 will do similar to what opal_output'ing did, so leaving a hard-coded "0" as the first argument is safe. * For systems that do not use ORTE's RML or the HNP, the effect of orte_output_* and orte_show_help will be identical to their opal counterparts (the additional information passed to orte_output_open() will be lost!). Indeed, the orte_* functions simply become trivial wrappers to their opal_* counterparts. Note that we have not tested this; the code is simple but it is quite possible that we mucked something up. = Filter Framework = Messages sent view the new orte_* functions described above and messages output via the IOF on the HNP will now optionally be passed through a new "filter" framework before being output to stdout/stderr. The "filter" OPAL MCA framework is intended to allow preprocessing to messages before they are sent to their final destinations. The first component that was written in the filter framework was to create an XML stream, segregating all the messages into different XML tags, etc. This will allow 3rd party tools to read the stdout/stderr from the HNP and be able to know exactly what each text message is (e.g., a help message, another OMPI infrastructure message, stdout from the user process, stderr from the user process, etc.). Filtering is not active by default. Filter components must be specifically requested, such as: {{{ $ mpirun --mca filter xml ... }}} There can only be one filter component active. = New MCA Parameters = The new functionality described above introduces two new MCA parameters: * '''orte_base_help_aggregate''': Defaults to 1 (true), meaning that help messages will be aggregated, as described above. If set to 0, all help messages will be displayed, even if they are duplicates (i.e., the original behavior). * '''orte_base_show_output_recursions''': An MCA parameter to help debug one of the known issues, described below. It is likely that this MCA parameter will disappear before v1.3 final. = Known Issues = * The XML filter component is not complete. The current output from this component is preliminary and not real XML. A bit more work needs to be done to configure.m4 search for an appropriate XML library/link it in/use it at run time. * There are possible recursion loops in the orte_output() and orte_show_help() functions -- e.g., if RML send calls orte_output() or orte_show_help(). We have some ideas how to fix these, but figured that it was ok to commit before feature freeze with known issues. The code currently contains sub-optimal workarounds so that this will not be a problem, but it would be good to actually solve the problem rather than have hackish workarounds before v1.3 final. This commit was SVN r18434.
2008-05-14 00:00:55 +04:00
ORTE_OUTPUT_VERBOSE((50, ompi_osc_base_output,
"%d started long recv accum message from %d (%d)",
ompi_comm_rank(module->m_comm),
header->hdr_origin,
header->hdr_origin_tag));
/* put the send request in the waiting list */
OPAL_THREAD_LOCK(&mca_osc_rdma_component.c_lock);
opal_list_append(&mca_osc_rdma_component.c_pending_requests,
&(longreq->super.super));
OPAL_THREAD_UNLOCK(&mca_osc_rdma_component.c_lock);
}
return ret;
}
/**********************************************************************
*
* Recveive a get on the origin side
*
**********************************************************************/
static void
ompi_osc_rdma_replyreq_recv_long_cb(ompi_osc_rdma_longreq_t *longreq)
{
ompi_osc_rdma_sendreq_t *sendreq =
(ompi_osc_rdma_sendreq_t*) longreq->cbdata;
int32_t count;
OPAL_THREAD_LOCK(&sendreq->req_module->m_lock);
count = (sendreq->req_module->m_num_pending_out -= 1);
OPAL_THREAD_UNLOCK(&sendreq->req_module->m_lock);
ompi_osc_rdma_longreq_free(longreq);
ompi_osc_rdma_sendreq_free(sendreq);
if (0 == count) opal_condition_broadcast(&sendreq->req_module->m_cond);
}
int
ompi_osc_rdma_replyreq_recv(ompi_osc_rdma_module_t *module,
ompi_osc_rdma_sendreq_t *sendreq,
ompi_osc_rdma_reply_header_t *header,
void **payload)
{
int ret = OMPI_SUCCESS;
/* receive into user buffer */
if (header->hdr_msg_length > 0) {
/* short message. woo! */
struct iovec iov;
uint32_t iov_count = 1;
size_t max_data;
int32_t count;
iov.iov_len = header->hdr_msg_length;
iov.iov_base = (IOVBASE_TYPE*)*payload;
max_data = iov.iov_len;
MEMCHECKER(
memchecker_convertor_call(&opal_memchecker_base_mem_defined,
&sendreq->req_origin_convertor);
);
ompi_convertor_unpack(&sendreq->req_origin_convertor,
&iov,
&iov_count,
&max_data );
MEMCHECKER(
memchecker_convertor_call(&opal_memchecker_base_mem_noaccess,
&sendreq->req_origin_convertor);
);
count = sendreq->req_module->m_num_pending_out -= 1;
ompi_osc_rdma_sendreq_free(sendreq);
*payload = ((char*) *payload) + header->hdr_msg_length;
if (0 == count) opal_condition_broadcast(&sendreq->req_module->m_cond);
} else {
ompi_osc_rdma_longreq_t *longreq;
ompi_osc_rdma_longreq_alloc(&longreq);
longreq->cbfunc = ompi_osc_rdma_replyreq_recv_long_cb;
longreq->cbdata = sendreq;
longreq->req_module = module;
/* BWB - FIX ME - George is going to kill me for this */
ret = mca_pml.pml_irecv(sendreq->req_origin_convertor.pBaseBuf,
sendreq->req_origin_convertor.count,
sendreq->req_origin_datatype,
sendreq->req_target_rank,
header->hdr_target_tag,
module->m_comm,
&(longreq->request));
/* put the send request in the waiting list */
OPAL_THREAD_LOCK(&mca_osc_rdma_component.c_lock);
opal_list_append(&mca_osc_rdma_component.c_pending_requests,
&(longreq->super.super));
OPAL_THREAD_UNLOCK(&mca_osc_rdma_component.c_lock);
}
return ret;
}
/**********************************************************************
*
* Control message communication
*
**********************************************************************/
static void
ompi_osc_rdma_control_send_cb(struct mca_btl_base_module_t* btl,
struct mca_btl_base_endpoint_t *endpoint,
struct mca_btl_base_descriptor_t* descriptor,
int status)
{
ompi_osc_rdma_control_header_t *header = NULL;
header = (ompi_osc_rdma_control_header_t*) descriptor->des_src[0].seg_addr.pval;
/* release the descriptor and sendreq */
btl->btl_free(btl, descriptor);
}
int
ompi_osc_rdma_control_send(ompi_osc_rdma_module_t *module,
ompi_proc_t *proc,
uint8_t type, int32_t value0, int32_t value1)
{
int ret = OMPI_SUCCESS;
mca_bml_base_endpoint_t *endpoint = NULL;
mca_bml_base_btl_t *bml_btl = NULL;
mca_btl_base_descriptor_t *descriptor = NULL;
ompi_osc_rdma_control_header_t *header = NULL;
/* Get a BTL and a fragment to go with it */
endpoint = (mca_bml_base_endpoint_t*) proc->proc_bml;
bml_btl = mca_bml_base_btl_array_get_next(&endpoint->btl_eager);
mca_bml_base_alloc(bml_btl, &descriptor, MCA_BTL_NO_ORDER,
sizeof(ompi_osc_rdma_control_header_t),
MCA_BTL_DES_FLAGS_PRIORITY | MCA_BTL_DES_SEND_ALWAYS_CALLBACK);
if (NULL == descriptor) {
ret = OMPI_ERR_TEMP_OUT_OF_RESOURCE;
goto cleanup;
}
/* verify at least enough space for header */
if (descriptor->des_src[0].seg_len < sizeof(ompi_osc_rdma_control_header_t)) {
ret = OMPI_ERR_OUT_OF_RESOURCE;
goto cleanup;
}
/* setup descriptor */
descriptor->des_cbfunc = ompi_osc_rdma_control_send_cb;
descriptor->des_cbdata = NULL;
descriptor->des_flags = MCA_BTL_DES_FLAGS_PRIORITY;
descriptor->des_src[0].seg_len = sizeof(ompi_osc_rdma_control_header_t);
/* pack header */
header = (ompi_osc_rdma_control_header_t*) descriptor->des_src[0].seg_addr.pval;
header->hdr_base.hdr_type = type;
header->hdr_base.hdr_flags = 0;
header->hdr_value[0] = value0;
header->hdr_value[1] = value1;
header->hdr_windx = ompi_comm_get_cid(module->m_comm);
#ifdef WORDS_BIGENDIAN
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
#elif OMPI_ENABLE_HETEROGENEOUS_SUPPORT
if (proc->proc_arch & OPAL_ARCH_ISBIGENDIAN) {
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
OMPI_OSC_RDMA_CONTROL_HDR_HTON(*header);
}
#endif
/* send fragment */
ret = mca_bml_base_send(bml_btl, descriptor, MCA_BTL_TAG_OSC_RDMA);
goto done;
cleanup:
if (descriptor != NULL) {
mca_bml_base_free(bml_btl, descriptor);
}
done:
return ret;
}
int
ompi_osc_rdma_rdma_ack_send(ompi_osc_rdma_module_t *module,
ompi_proc_t *proc,
ompi_osc_rdma_btl_t *rdma_btl)
{
int ret = OMPI_SUCCESS;
mca_bml_base_btl_t *bml_btl = rdma_btl->bml_btl;
mca_btl_base_descriptor_t *descriptor = NULL;
ompi_osc_rdma_control_header_t *header = NULL;
/* Get a BTL and a fragment to go with it */
mca_bml_base_alloc(bml_btl, &descriptor, rdma_btl->rdma_order,
sizeof(ompi_osc_rdma_control_header_t),
MCA_BTL_DES_FLAGS_PRIORITY | MCA_BTL_DES_SEND_ALWAYS_CALLBACK);
if (NULL == descriptor) {
ret = OMPI_ERR_TEMP_OUT_OF_RESOURCE;
goto cleanup;
}
/* verify at least enough space for header */
if (descriptor->des_src[0].seg_len < sizeof(ompi_osc_rdma_control_header_t)) {
ret = OMPI_ERR_OUT_OF_RESOURCE;
goto cleanup;
}
/* setup descriptor */
descriptor->des_cbfunc = ompi_osc_rdma_control_send_cb;
descriptor->des_cbdata = NULL;
descriptor->des_flags = MCA_BTL_DES_FLAGS_PRIORITY;
descriptor->des_src[0].seg_len = sizeof(ompi_osc_rdma_control_header_t);
/* pack header */
header = (ompi_osc_rdma_control_header_t*) descriptor->des_src[0].seg_addr.pval;
header->hdr_base.hdr_type = OMPI_OSC_RDMA_HDR_RDMA_COMPLETE;
header->hdr_base.hdr_flags = 0;
header->hdr_value[0] = rdma_btl->num_sent;
header->hdr_value[1] = 0;
header->hdr_windx = ompi_comm_get_cid(module->m_comm);
#ifdef WORDS_BIGENDIAN
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
#elif OMPI_ENABLE_HETEROGENEOUS_SUPPORT
if (proc->proc_arch & OPAL_ARCH_ISBIGENDIAN) {
header->hdr_base.hdr_flags |= OMPI_OSC_RDMA_HDR_FLAG_NBO;
OMPI_OSC_RDMA_CONTROL_HDR_HTON(*header);
}
#endif
assert(header->hdr_base.hdr_flags == 0);
/* send fragment */
ret = mca_bml_base_send(bml_btl, descriptor, MCA_BTL_TAG_OSC_RDMA);
goto done;
cleanup:
if (descriptor != NULL) {
mca_bml_base_free(bml_btl, descriptor);
}
done:
return ret;
}