
r30343 introduced the optimization of putting the OB1 sendreq and recvreq on the stack for blocking sends and receives. However, the requests did not contain enough storage for the data that is normally immediately ''after'' the request (e.g., BTL data). This commit changes these requests to be pointers and to use alloca() to get enough total space for the OB1 request and all the associated data. The change is smaller than it looks; most of it is just changing from "foo.bar" to "foo->bar" notation (etc.). Submitted by Jeff, reviewed by Nathan. But we want George to look at this (and get a little soak time on the trunk) before moving to v1.8. cmr=v1.8.2:reviewer=bosilca This commit was SVN r31806. The following SVN revision numbers were found above: r30343 --> open-mpi/ompi@2b57f4227e
229 строки
8.0 KiB
C
229 строки
8.0 KiB
C
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
|
|
/*
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
* University Research and Technology
|
|
* Corporation. All rights reserved.
|
|
* Copyright (c) 2004-2014 The University of Tennessee and The University
|
|
* of Tennessee Research Foundation. All rights
|
|
* reserved.
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
* University of Stuttgart. All rights reserved.
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
* All rights reserved.
|
|
* Copyright (c) 2007-2014 Los Alamos National Security, LLC. All rights
|
|
* reserved.
|
|
* Copyright (c) 2014 Cisco Systems, Inc. All rights reserved.
|
|
* $COPYRIGHT$
|
|
*
|
|
* Additional copyrights may follow
|
|
*
|
|
* $HEADER$
|
|
*/
|
|
|
|
#include "ompi_config.h"
|
|
|
|
#include "pml_ob1.h"
|
|
#include "pml_ob1_sendreq.h"
|
|
#include "pml_ob1_recvreq.h"
|
|
#include "ompi/peruse/peruse-internal.h"
|
|
|
|
int mca_pml_ob1_isend_init(void *buf,
|
|
size_t count,
|
|
ompi_datatype_t * datatype,
|
|
int dst,
|
|
int tag,
|
|
mca_pml_base_send_mode_t sendmode,
|
|
ompi_communicator_t * comm,
|
|
ompi_request_t ** request)
|
|
{
|
|
mca_pml_ob1_send_request_t *sendreq = NULL;
|
|
MCA_PML_OB1_SEND_REQUEST_ALLOC(comm, dst, sendreq);
|
|
if (NULL == sendreq)
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
MCA_PML_OB1_SEND_REQUEST_INIT(sendreq,
|
|
buf,
|
|
count,
|
|
datatype,
|
|
dst, tag,
|
|
comm, sendmode, true);
|
|
|
|
PERUSE_TRACE_COMM_EVENT (PERUSE_COMM_REQ_ACTIVATE,
|
|
&(sendreq)->req_send.req_base,
|
|
PERUSE_SEND);
|
|
|
|
*request = (ompi_request_t *) sendreq;
|
|
return OMPI_SUCCESS;
|
|
}
|
|
|
|
/* try to get a small message out on to the wire quickly */
|
|
static inline int mca_pml_ob1_send_inline (void *buf, size_t count,
|
|
ompi_datatype_t * datatype,
|
|
int dst, int tag, int16_t seqn,
|
|
ompi_proc_t *dst_proc, mca_bml_base_endpoint_t* endpoint,
|
|
ompi_communicator_t * comm)
|
|
{
|
|
mca_btl_base_descriptor_t *des = NULL;
|
|
mca_pml_ob1_match_hdr_t match;
|
|
mca_bml_base_btl_t *bml_btl;
|
|
OPAL_PTRDIFF_TYPE lb, extent;
|
|
opal_convertor_t convertor;
|
|
size_t size = 0;
|
|
int rc;
|
|
|
|
bml_btl = mca_bml_base_btl_array_get_next(&endpoint->btl_eager);
|
|
|
|
ompi_datatype_get_extent (datatype, &lb, &extent);
|
|
|
|
if (OPAL_UNLIKELY((extent * count) > 256 || !bml_btl->btl->btl_sendi)) {
|
|
return OMPI_ERR_NOT_AVAILABLE;
|
|
}
|
|
|
|
if (count > 0) {
|
|
/* initialize just enough of the convertor to avoid a SEGV in opal_convertor_cleanup */
|
|
OBJ_CONSTRUCT(&convertor, opal_convertor_t);
|
|
|
|
/* We will create a convertor specialized for the */
|
|
/* remote architecture and prepared with the datatype. */
|
|
opal_convertor_copy_and_prepare_for_send (dst_proc->proc_convertor,
|
|
(const struct opal_datatype_t *) datatype,
|
|
count, buf, 0, &convertor);
|
|
opal_convertor_get_packed_size (&convertor, &size);
|
|
}
|
|
|
|
match.hdr_common.hdr_flags = 0;
|
|
match.hdr_common.hdr_type = MCA_PML_OB1_HDR_TYPE_MATCH;
|
|
match.hdr_ctx = comm->c_contextid;
|
|
match.hdr_src = comm->c_my_rank;
|
|
match.hdr_tag = tag;
|
|
match.hdr_seq = seqn;
|
|
|
|
ob1_hdr_hton(&match, MCA_PML_OB1_HDR_TYPE_MATCH, dst_proc);
|
|
|
|
/* try to send immediately */
|
|
rc = mca_bml_base_sendi (bml_btl, &convertor, &match, OMPI_PML_OB1_MATCH_HDR_LEN,
|
|
size, MCA_BTL_NO_ORDER, MCA_BTL_DES_FLAGS_PRIORITY | MCA_BTL_DES_FLAGS_BTL_OWNERSHIP,
|
|
MCA_PML_OB1_HDR_TYPE_MATCH, &des);
|
|
if (count > 0) {
|
|
opal_convertor_cleanup (&convertor);
|
|
}
|
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
|
|
if (des) {
|
|
mca_bml_base_free (bml_btl, des);
|
|
}
|
|
|
|
return rc;
|
|
}
|
|
|
|
return (int) size;
|
|
}
|
|
|
|
int mca_pml_ob1_isend(void *buf,
|
|
size_t count,
|
|
ompi_datatype_t * datatype,
|
|
int dst,
|
|
int tag,
|
|
mca_pml_base_send_mode_t sendmode,
|
|
ompi_communicator_t * comm,
|
|
ompi_request_t ** request)
|
|
{
|
|
int rc;
|
|
mca_pml_ob1_send_request_t *sendreq = NULL;
|
|
|
|
MCA_PML_OB1_SEND_REQUEST_ALLOC(comm, dst, sendreq);
|
|
if (NULL == sendreq)
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
MCA_PML_OB1_SEND_REQUEST_INIT(sendreq,
|
|
buf,
|
|
count,
|
|
datatype,
|
|
dst, tag,
|
|
comm, sendmode, false);
|
|
|
|
PERUSE_TRACE_COMM_EVENT (PERUSE_COMM_REQ_ACTIVATE,
|
|
&(sendreq)->req_send.req_base,
|
|
PERUSE_SEND);
|
|
|
|
MCA_PML_OB1_SEND_REQUEST_START(sendreq, rc);
|
|
*request = (ompi_request_t *) sendreq;
|
|
return rc;
|
|
}
|
|
|
|
int mca_pml_ob1_send(void *buf,
|
|
size_t count,
|
|
ompi_datatype_t * datatype,
|
|
int dst,
|
|
int tag,
|
|
mca_pml_base_send_mode_t sendmode,
|
|
ompi_communicator_t * comm)
|
|
{
|
|
mca_pml_ob1_comm_t* ob1_comm = comm->c_pml_comm;
|
|
ompi_proc_t *dst_proc = ompi_comm_peer_lookup (comm, dst);
|
|
mca_bml_base_endpoint_t* endpoint = (mca_bml_base_endpoint_t*)
|
|
dst_proc->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_BML];
|
|
mca_pml_ob1_send_request_t *sendreq =
|
|
alloca(sizeof(mca_pml_ob1_send_request_t) +
|
|
(mca_pml_ob1.max_rdma_per_request - 1) *
|
|
sizeof(mca_pml_ob1_com_btl_t));
|
|
int16_t seqn;
|
|
int rc;
|
|
|
|
if (OPAL_UNLIKELY(MCA_PML_BASE_SEND_BUFFERED == sendmode)) {
|
|
/* large buffered sends *need* a real request so use isend instead */
|
|
ompi_request_t *brequest;
|
|
|
|
rc = mca_pml_ob1_isend (buf, count, datatype, dst, tag, sendmode, comm, &brequest);
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
|
|
return rc;
|
|
}
|
|
|
|
/* free the request and return. don't care if it completes now */
|
|
ompi_request_free (&brequest);
|
|
return OMPI_SUCCESS;
|
|
}
|
|
|
|
if (OPAL_UNLIKELY(NULL == endpoint)) {
|
|
return OMPI_ERR_UNREACH;
|
|
}
|
|
|
|
seqn = (uint16_t) OPAL_THREAD_ADD32(&ob1_comm->procs[dst].send_sequence, 1);
|
|
|
|
if (MCA_PML_BASE_SEND_SYNCHRONOUS != sendmode) {
|
|
rc = mca_pml_ob1_send_inline (buf, count, datatype, dst, tag, seqn, dst_proc,
|
|
endpoint, comm);
|
|
if (OPAL_LIKELY(0 <= rc)) {
|
|
return OMPI_SUCCESS;
|
|
}
|
|
}
|
|
|
|
OBJ_CONSTRUCT(sendreq, mca_pml_ob1_send_request_t);
|
|
sendreq->req_send.req_base.req_proc = dst_proc;
|
|
sendreq->src_des = NULL;
|
|
|
|
MCA_PML_OB1_SEND_REQUEST_INIT(sendreq,
|
|
buf,
|
|
count,
|
|
datatype,
|
|
dst, tag,
|
|
comm, sendmode, false);
|
|
|
|
PERUSE_TRACE_COMM_EVENT (PERUSE_COMM_REQ_ACTIVATE,
|
|
&sendreq->req_send.req_base,
|
|
PERUSE_SEND);
|
|
|
|
MCA_PML_OB1_SEND_REQUEST_START_W_SEQ(sendreq, endpoint, seqn, rc);
|
|
if (rc != OMPI_SUCCESS) {
|
|
return rc;
|
|
}
|
|
|
|
ompi_request_wait_completion(&sendreq->req_send.req_base.req_ompi);
|
|
|
|
rc = sendreq->req_send.req_base.req_ompi.req_status.MPI_ERROR;
|
|
MCA_PML_BASE_SEND_REQUEST_FINI(&sendreq->req_send);
|
|
OBJ_DESTRUCT(sendreq);
|
|
|
|
return rc;
|
|
}
|