2014-01-21 19:16:21 +04:00
|
|
|
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
|
2005-05-24 02:06:50 +04:00
|
|
|
/*
|
2005-11-05 22:57:48 +03:00
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
2014-01-21 19:16:21 +04:00
|
|
|
* Copyright (c) 2004-2014 The University of Tennessee and The University
|
2005-11-05 22:57:48 +03:00
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
2014-01-21 19:16:21 +04:00
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
2005-05-24 02:06:50 +04:00
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
2014-01-21 19:16:21 +04:00
|
|
|
* Copyright (c) 2007-2014 Los Alamos National Security, LLC. All rights
|
|
|
|
* reserved.
|
2014-05-17 05:05:59 +04:00
|
|
|
* Copyright (c) 2014 Cisco Systems, Inc. All rights reserved.
|
2005-05-24 02:06:50 +04:00
|
|
|
* $COPYRIGHT$
|
2014-01-21 19:16:21 +04:00
|
|
|
*
|
2005-05-24 02:06:50 +04:00
|
|
|
* Additional copyrights may follow
|
2014-01-21 19:16:21 +04:00
|
|
|
*
|
2005-05-24 02:06:50 +04:00
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "ompi_config.h"
|
|
|
|
|
|
|
|
#include "pml_ob1.h"
|
|
|
|
#include "pml_ob1_sendreq.h"
|
|
|
|
#include "pml_ob1_recvreq.h"
|
2006-03-31 21:09:09 +04:00
|
|
|
#include "ompi/peruse/peruse-internal.h"
|
2005-05-24 02:06:50 +04:00
|
|
|
|
|
|
|
int mca_pml_ob1_isend_init(void *buf,
|
|
|
|
size_t count,
|
|
|
|
ompi_datatype_t * datatype,
|
|
|
|
int dst,
|
|
|
|
int tag,
|
|
|
|
mca_pml_base_send_mode_t sendmode,
|
|
|
|
ompi_communicator_t * comm,
|
|
|
|
ompi_request_t ** request)
|
|
|
|
{
|
2005-07-20 10:48:11 +04:00
|
|
|
mca_pml_ob1_send_request_t *sendreq = NULL;
|
2013-07-04 12:34:37 +04:00
|
|
|
MCA_PML_OB1_SEND_REQUEST_ALLOC(comm, dst, sendreq);
|
|
|
|
if (NULL == sendreq)
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2005-05-24 02:22:20 +04:00
|
|
|
MCA_PML_OB1_SEND_REQUEST_INIT(sendreq,
|
2007-07-11 03:45:23 +04:00
|
|
|
buf,
|
|
|
|
count,
|
|
|
|
datatype,
|
|
|
|
dst, tag,
|
|
|
|
comm, sendmode, true);
|
2014-01-21 19:16:21 +04:00
|
|
|
|
2007-08-23 09:52:33 +04:00
|
|
|
PERUSE_TRACE_COMM_EVENT (PERUSE_COMM_REQ_ACTIVATE,
|
|
|
|
&(sendreq)->req_send.req_base,
|
|
|
|
PERUSE_SEND);
|
|
|
|
|
2005-05-24 02:06:50 +04:00
|
|
|
*request = (ompi_request_t *) sendreq;
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2014-01-21 19:16:21 +04:00
|
|
|
/* try to get a small message out on to the wire quickly */
|
|
|
|
static inline int mca_pml_ob1_send_inline (void *buf, size_t count,
|
|
|
|
ompi_datatype_t * datatype,
|
|
|
|
int dst, int tag, int16_t seqn,
|
|
|
|
ompi_proc_t *dst_proc, mca_bml_base_endpoint_t* endpoint,
|
|
|
|
ompi_communicator_t * comm)
|
|
|
|
{
|
2014-01-22 20:31:58 +04:00
|
|
|
mca_btl_base_descriptor_t *des = NULL;
|
2014-01-21 19:16:21 +04:00
|
|
|
mca_pml_ob1_match_hdr_t match;
|
2014-01-22 20:31:58 +04:00
|
|
|
mca_bml_base_btl_t *bml_btl;
|
2014-01-21 19:16:21 +04:00
|
|
|
OPAL_PTRDIFF_TYPE lb, extent;
|
|
|
|
opal_convertor_t convertor;
|
|
|
|
size_t size = 0;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
bml_btl = mca_bml_base_btl_array_get_next(&endpoint->btl_eager);
|
|
|
|
|
|
|
|
ompi_datatype_get_extent (datatype, &lb, &extent);
|
|
|
|
|
|
|
|
if (OPAL_UNLIKELY((extent * count) > 256 || !bml_btl->btl->btl_sendi)) {
|
|
|
|
return OMPI_ERR_NOT_AVAILABLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (count > 0) {
|
|
|
|
/* initialize just enough of the convertor to avoid a SEGV in opal_convertor_cleanup */
|
2014-02-07 01:27:42 +04:00
|
|
|
OBJ_CONSTRUCT(&convertor, opal_convertor_t);
|
2014-01-21 19:16:21 +04:00
|
|
|
|
|
|
|
/* We will create a convertor specialized for the */
|
|
|
|
/* remote architecture and prepared with the datatype. */
|
George did the work and deserves all the credit for it. Ralph did the merge, and deserves whatever blame results from errors in it :-)
WHAT: Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL
All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies. This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP. Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose. UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs. A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic.
This commit was SVN r32317.
2014-07-26 04:47:28 +04:00
|
|
|
opal_convertor_copy_and_prepare_for_send (dst_proc->super.proc_convertor,
|
2014-01-21 19:16:21 +04:00
|
|
|
(const struct opal_datatype_t *) datatype,
|
|
|
|
count, buf, 0, &convertor);
|
|
|
|
opal_convertor_get_packed_size (&convertor, &size);
|
|
|
|
}
|
|
|
|
|
2014-04-28 21:36:26 +04:00
|
|
|
match.hdr_common.hdr_flags = 0;
|
2014-01-21 19:16:21 +04:00
|
|
|
match.hdr_common.hdr_type = MCA_PML_OB1_HDR_TYPE_MATCH;
|
|
|
|
match.hdr_ctx = comm->c_contextid;
|
|
|
|
match.hdr_src = comm->c_my_rank;
|
|
|
|
match.hdr_tag = tag;
|
|
|
|
match.hdr_seq = seqn;
|
|
|
|
|
2014-04-28 21:36:26 +04:00
|
|
|
ob1_hdr_hton(&match, MCA_PML_OB1_HDR_TYPE_MATCH, dst_proc);
|
|
|
|
|
2014-01-21 19:16:21 +04:00
|
|
|
/* try to send immediately */
|
|
|
|
rc = mca_bml_base_sendi (bml_btl, &convertor, &match, OMPI_PML_OB1_MATCH_HDR_LEN,
|
|
|
|
size, MCA_BTL_NO_ORDER, MCA_BTL_DES_FLAGS_PRIORITY | MCA_BTL_DES_FLAGS_BTL_OWNERSHIP,
|
2014-01-22 20:31:58 +04:00
|
|
|
MCA_PML_OB1_HDR_TYPE_MATCH, &des);
|
2014-01-21 19:16:21 +04:00
|
|
|
if (count > 0) {
|
|
|
|
opal_convertor_cleanup (&convertor);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
|
2014-01-22 20:31:58 +04:00
|
|
|
if (des) {
|
|
|
|
mca_bml_base_free (bml_btl, des);
|
|
|
|
}
|
|
|
|
|
2014-01-21 19:16:21 +04:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (int) size;
|
|
|
|
}
|
2005-05-24 02:06:50 +04:00
|
|
|
|
|
|
|
int mca_pml_ob1_isend(void *buf,
|
|
|
|
size_t count,
|
|
|
|
ompi_datatype_t * datatype,
|
|
|
|
int dst,
|
|
|
|
int tag,
|
|
|
|
mca_pml_base_send_mode_t sendmode,
|
|
|
|
ompi_communicator_t * comm,
|
|
|
|
ompi_request_t ** request)
|
|
|
|
{
|
2014-05-19 23:34:59 +04:00
|
|
|
mca_pml_ob1_comm_t* ob1_comm = comm->c_pml_comm;
|
2005-07-20 10:48:11 +04:00
|
|
|
mca_pml_ob1_send_request_t *sendreq = NULL;
|
2014-05-19 23:34:59 +04:00
|
|
|
ompi_proc_t *dst_proc = ompi_comm_peer_lookup (comm, dst);
|
|
|
|
mca_bml_base_endpoint_t* endpoint = (mca_bml_base_endpoint_t*)
|
|
|
|
dst_proc->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_BML];
|
|
|
|
int16_t seqn;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
seqn = (uint16_t) OPAL_THREAD_ADD32(&ob1_comm->procs[dst].send_sequence, 1);
|
|
|
|
|
|
|
|
if (MCA_PML_BASE_SEND_SYNCHRONOUS != sendmode) {
|
|
|
|
rc = mca_pml_ob1_send_inline (buf, count, datatype, dst, tag, seqn, dst_proc,
|
|
|
|
endpoint, comm);
|
|
|
|
if (OPAL_LIKELY(0 <= rc)) {
|
|
|
|
/* NTH: it is legal to return ompi_request_empty since the only valid
|
|
|
|
* field in a send completion status is whether or not the send was
|
|
|
|
* cancelled (which it can't be at this point anyway). */
|
|
|
|
*request = &ompi_request_empty;
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
}
|
2014-01-21 19:16:21 +04:00
|
|
|
|
2013-07-04 12:34:37 +04:00
|
|
|
MCA_PML_OB1_SEND_REQUEST_ALLOC(comm, dst, sendreq);
|
|
|
|
if (NULL == sendreq)
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
2014-01-21 19:16:21 +04:00
|
|
|
|
2005-05-24 02:22:20 +04:00
|
|
|
MCA_PML_OB1_SEND_REQUEST_INIT(sendreq,
|
2007-07-11 03:45:23 +04:00
|
|
|
buf,
|
|
|
|
count,
|
|
|
|
datatype,
|
|
|
|
dst, tag,
|
|
|
|
comm, sendmode, false);
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2007-08-23 09:52:33 +04:00
|
|
|
PERUSE_TRACE_COMM_EVENT (PERUSE_COMM_REQ_ACTIVATE,
|
|
|
|
&(sendreq)->req_send.req_base,
|
|
|
|
PERUSE_SEND);
|
|
|
|
|
2014-05-19 23:34:59 +04:00
|
|
|
MCA_PML_OB1_SEND_REQUEST_START_W_SEQ(sendreq, endpoint, seqn, rc);
|
2005-05-24 02:06:50 +04:00
|
|
|
*request = (ompi_request_t *) sendreq;
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mca_pml_ob1_send(void *buf,
|
|
|
|
size_t count,
|
|
|
|
ompi_datatype_t * datatype,
|
|
|
|
int dst,
|
|
|
|
int tag,
|
|
|
|
mca_pml_base_send_mode_t sendmode,
|
|
|
|
ompi_communicator_t * comm)
|
|
|
|
{
|
2014-01-21 19:16:21 +04:00
|
|
|
mca_pml_ob1_comm_t* ob1_comm = comm->c_pml_comm;
|
|
|
|
ompi_proc_t *dst_proc = ompi_comm_peer_lookup (comm, dst);
|
|
|
|
mca_bml_base_endpoint_t* endpoint = (mca_bml_base_endpoint_t*)
|
|
|
|
dst_proc->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_BML];
|
2014-05-17 05:05:59 +04:00
|
|
|
mca_pml_ob1_send_request_t *sendreq =
|
2014-05-21 00:32:25 +04:00
|
|
|
alloca(mca_pml_base_send_requests.fl_frag_size);
|
2014-01-21 19:16:21 +04:00
|
|
|
int16_t seqn;
|
2005-05-24 02:06:50 +04:00
|
|
|
int rc;
|
2008-05-30 07:58:39 +04:00
|
|
|
|
2014-02-07 04:12:36 +04:00
|
|
|
if (OPAL_UNLIKELY(MCA_PML_BASE_SEND_BUFFERED == sendmode)) {
|
|
|
|
/* large buffered sends *need* a real request so use isend instead */
|
|
|
|
ompi_request_t *brequest;
|
|
|
|
|
|
|
|
rc = mca_pml_ob1_isend (buf, count, datatype, dst, tag, sendmode, comm, &brequest);
|
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* free the request and return. don't care if it completes now */
|
|
|
|
ompi_request_free (&brequest);
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2014-01-21 19:16:21 +04:00
|
|
|
if (OPAL_UNLIKELY(NULL == endpoint)) {
|
|
|
|
return OMPI_ERR_UNREACH;
|
|
|
|
}
|
|
|
|
|
|
|
|
seqn = (uint16_t) OPAL_THREAD_ADD32(&ob1_comm->procs[dst].send_sequence, 1);
|
|
|
|
|
|
|
|
if (MCA_PML_BASE_SEND_SYNCHRONOUS != sendmode) {
|
|
|
|
rc = mca_pml_ob1_send_inline (buf, count, datatype, dst, tag, seqn, dst_proc,
|
|
|
|
endpoint, comm);
|
|
|
|
if (OPAL_LIKELY(0 <= rc)) {
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-17 05:05:59 +04:00
|
|
|
OBJ_CONSTRUCT(sendreq, mca_pml_ob1_send_request_t);
|
|
|
|
sendreq->req_send.req_base.req_proc = dst_proc;
|
|
|
|
sendreq->src_des = NULL;
|
2014-01-21 19:16:21 +04:00
|
|
|
|
2014-05-17 05:05:59 +04:00
|
|
|
MCA_PML_OB1_SEND_REQUEST_INIT(sendreq,
|
2005-08-12 06:41:14 +04:00
|
|
|
buf,
|
|
|
|
count,
|
|
|
|
datatype,
|
|
|
|
dst, tag,
|
|
|
|
comm, sendmode, false);
|
2007-08-23 09:52:33 +04:00
|
|
|
|
|
|
|
PERUSE_TRACE_COMM_EVENT (PERUSE_COMM_REQ_ACTIVATE,
|
2014-05-17 05:05:59 +04:00
|
|
|
&sendreq->req_send.req_base,
|
2007-08-23 09:52:33 +04:00
|
|
|
PERUSE_SEND);
|
2014-01-21 19:16:21 +04:00
|
|
|
|
2014-05-17 05:05:59 +04:00
|
|
|
MCA_PML_OB1_SEND_REQUEST_START_W_SEQ(sendreq, endpoint, seqn, rc);
|
2005-05-24 02:06:50 +04:00
|
|
|
if (rc != OMPI_SUCCESS) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2014-05-17 05:05:59 +04:00
|
|
|
ompi_request_wait_completion(&sendreq->req_send.req_base.req_ompi);
|
2014-01-21 19:16:21 +04:00
|
|
|
|
2014-05-17 05:05:59 +04:00
|
|
|
rc = sendreq->req_send.req_base.req_ompi.req_status.MPI_ERROR;
|
|
|
|
MCA_PML_BASE_SEND_REQUEST_FINI(&sendreq->req_send);
|
|
|
|
OBJ_DESTRUCT(sendreq);
|
2006-03-31 21:09:09 +04:00
|
|
|
|
2006-03-16 01:53:41 +03:00
|
|
|
return rc;
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|