2012-10-27 07:04:45 +04:00
|
|
|
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
|
2005-05-24 02:06:50 +04:00
|
|
|
/*
|
2005-11-05 22:57:48 +03:00
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
2013-07-04 12:34:37 +04:00
|
|
|
* Copyright (c) 2004-2013 The University of Tennessee and The University
|
2005-11-05 22:57:48 +03:00
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
2015-06-24 06:59:57 +03:00
|
|
|
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
|
2005-05-24 02:06:50 +04:00
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
2008-05-30 05:29:09 +04:00
|
|
|
* Copyright (c) 2008 UT-Battelle, LLC. All rights reserved.
|
2010-09-11 01:01:52 +04:00
|
|
|
* Copyright (c) 2010 Oracle and/or its affiliates. All rights reserved.
|
2012-02-24 06:13:33 +04:00
|
|
|
* Copyright (c) 2012 NVIDIA Corporation. All rights reserved.
|
2015-01-06 18:45:08 +03:00
|
|
|
* Copyright (c) 2012-2015 Los Alamos National Security, LLC. All rights
|
2012-03-01 19:53:39 +04:00
|
|
|
* reserved.
|
2015-02-25 16:53:12 +03:00
|
|
|
* Copyright (c) 2015 Cisco Systems, Inc. All rights reserved.
|
2005-05-24 02:06:50 +04:00
|
|
|
* $COPYRIGHT$
|
2015-06-24 06:59:57 +03:00
|
|
|
*
|
2005-05-24 02:06:50 +04:00
|
|
|
* Additional copyrights may follow
|
2015-06-24 06:59:57 +03:00
|
|
|
*
|
2005-05-24 02:06:50 +04:00
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
|
2006-10-28 03:16:13 +04:00
|
|
|
|
2008-02-12 11:46:27 +03:00
|
|
|
#include "ompi_config.h"
|
2006-10-28 03:16:13 +04:00
|
|
|
#include "opal/prefetch.h"
|
2015-06-24 06:59:57 +03:00
|
|
|
#include "opal/mca/mpool/mpool.h"
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "ompi/constants.h"
|
|
|
|
#include "ompi/mca/pml/pml.h"
|
2005-05-24 02:06:50 +04:00
|
|
|
#include "pml_ob1.h"
|
|
|
|
#include "pml_ob1_hdr.h"
|
|
|
|
#include "pml_ob1_sendreq.h"
|
2005-06-10 00:16:33 +04:00
|
|
|
#include "pml_ob1_rdmafrag.h"
|
2005-05-24 02:06:50 +04:00
|
|
|
#include "pml_ob1_recvreq.h"
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "ompi/mca/bml/base/base.h"
|
2008-02-12 21:01:17 +03:00
|
|
|
#include "ompi/memchecker.h"
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2015-02-19 23:41:41 +03:00
|
|
|
OBJ_CLASS_INSTANCE(mca_pml_ob1_send_range_t, opal_free_list_item_t,
|
2007-06-03 12:30:07 +04:00
|
|
|
NULL, NULL);
|
2006-10-26 17:21:47 +04:00
|
|
|
|
|
|
|
void mca_pml_ob1_send_request_process_pending(mca_bml_base_btl_t *bml_btl)
|
2006-07-20 18:44:35 +04:00
|
|
|
{
|
2010-10-13 00:11:48 +04:00
|
|
|
int rc, i, s = opal_list_get_size(&mca_pml_ob1.send_pending);
|
2006-07-20 18:44:35 +04:00
|
|
|
|
|
|
|
/* advance pending requests */
|
|
|
|
for(i = 0; i < s; i++) {
|
2007-09-11 22:00:53 +04:00
|
|
|
mca_pml_ob1_send_pending_t pending_type = MCA_PML_OB1_SEND_PENDING_NONE;
|
2006-07-20 18:44:35 +04:00
|
|
|
mca_pml_ob1_send_request_t* sendreq;
|
2006-10-26 17:21:47 +04:00
|
|
|
mca_bml_base_btl_t *send_dst;
|
2007-07-11 03:45:23 +04:00
|
|
|
|
2007-08-30 16:10:04 +04:00
|
|
|
sendreq = get_request_from_send_pending(&pending_type);
|
|
|
|
if(OPAL_UNLIKELY(NULL == sendreq))
|
2006-07-20 18:44:35 +04:00
|
|
|
break;
|
2007-08-30 16:10:04 +04:00
|
|
|
|
2006-07-20 18:44:35 +04:00
|
|
|
switch(pending_type) {
|
|
|
|
case MCA_PML_OB1_SEND_PENDING_SCHEDULE:
|
2010-10-13 00:11:48 +04:00
|
|
|
rc = mca_pml_ob1_send_request_schedule_exclusive(sendreq);
|
2012-04-06 18:23:13 +04:00
|
|
|
if(OMPI_ERR_OUT_OF_RESOURCE == rc) {
|
2006-07-20 18:44:35 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case MCA_PML_OB1_SEND_PENDING_START:
|
2006-10-26 17:21:47 +04:00
|
|
|
send_dst = mca_bml_base_btl_array_find(
|
|
|
|
&sendreq->req_endpoint->btl_eager, bml_btl->btl);
|
2010-10-13 00:11:48 +04:00
|
|
|
if (NULL == send_dst) {
|
|
|
|
/* Put request back onto pending list and try next one. */
|
2007-08-30 16:10:04 +04:00
|
|
|
add_request_to_send_pending(sendreq,
|
2010-10-13 00:11:48 +04:00
|
|
|
MCA_PML_OB1_SEND_PENDING_START, true);
|
|
|
|
} else {
|
2012-06-12 19:44:47 +04:00
|
|
|
MCA_PML_OB1_SEND_REQUEST_RESET(sendreq);
|
|
|
|
rc = mca_pml_ob1_send_request_start_btl(sendreq, send_dst);
|
2012-04-06 18:23:13 +04:00
|
|
|
if (OMPI_ERR_OUT_OF_RESOURCE == rc) {
|
2010-10-13 00:11:48 +04:00
|
|
|
/* No more resources on this btl so prepend to the pending
|
|
|
|
* list to minimize reordering and give up for now. */
|
|
|
|
add_request_to_send_pending(sendreq,
|
|
|
|
MCA_PML_OB1_SEND_PENDING_START, false);
|
2007-08-30 16:10:04 +04:00
|
|
|
return;
|
2010-10-13 00:11:48 +04:00
|
|
|
}
|
2006-07-20 18:44:35 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
2008-06-09 18:53:58 +04:00
|
|
|
opal_output(0, "[%s:%d] wrong send request type\n",
|
2006-07-20 18:44:35 +04:00
|
|
|
__FILE__, __LINE__);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-03-16 01:53:41 +03:00
|
|
|
/*
|
|
|
|
* The free call mark the final stage in a request life-cycle. Starting from this
|
|
|
|
* point the request is completed at both PML and user level, and can be used
|
|
|
|
* for others p2p communications. Therefore, in the case of the OB1 PML it should
|
|
|
|
* be added to the free request list.
|
|
|
|
*/
|
2006-03-24 07:21:30 +03:00
|
|
|
static int mca_pml_ob1_send_request_free(struct ompi_request_t** request)
|
2005-05-24 02:06:50 +04:00
|
|
|
{
|
2006-03-16 01:53:41 +03:00
|
|
|
mca_pml_ob1_send_request_t* sendreq = *(mca_pml_ob1_send_request_t**)request;
|
2015-06-24 06:59:57 +03:00
|
|
|
|
2006-03-16 01:53:41 +03:00
|
|
|
assert( false == sendreq->req_send.req_base.req_free_called );
|
|
|
|
|
2006-03-02 03:39:07 +03:00
|
|
|
OPAL_THREAD_LOCK(&ompi_request_lock);
|
2006-03-16 01:53:41 +03:00
|
|
|
sendreq->req_send.req_base.req_free_called = true;
|
2006-03-31 21:09:09 +04:00
|
|
|
|
|
|
|
PERUSE_TRACE_COMM_EVENT( PERUSE_COMM_REQ_NOTIFY,
|
|
|
|
&(sendreq->req_send.req_base), PERUSE_SEND );
|
|
|
|
|
2006-09-29 03:54:38 +04:00
|
|
|
if( true == sendreq->req_send.req_base.req_pml_complete ) {
|
2008-11-27 19:34:02 +03:00
|
|
|
/* make buffer defined when the request is compeleted,
|
|
|
|
and before releasing the objects. */
|
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_call(&opal_memchecker_base_mem_defined,
|
|
|
|
sendreq->req_send.req_base.req_addr,
|
|
|
|
sendreq->req_send.req_base.req_count,
|
|
|
|
sendreq->req_send.req_base.req_datatype);
|
|
|
|
);
|
|
|
|
|
2007-08-30 16:08:33 +04:00
|
|
|
MCA_PML_OB1_SEND_REQUEST_RETURN( sendreq );
|
2006-09-29 03:54:38 +04:00
|
|
|
}
|
|
|
|
|
2006-03-02 03:39:07 +03:00
|
|
|
OPAL_THREAD_UNLOCK(&ompi_request_lock);
|
2008-11-27 19:34:02 +03:00
|
|
|
|
2006-02-14 12:09:05 +03:00
|
|
|
*request = MPI_REQUEST_NULL;
|
2005-05-24 02:06:50 +04:00
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mca_pml_ob1_send_request_cancel(struct ompi_request_t* request, int complete)
|
|
|
|
{
|
|
|
|
/* we dont cancel send requests by now */
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mca_pml_ob1_send_request_construct(mca_pml_ob1_send_request_t* req)
|
|
|
|
{
|
|
|
|
req->req_send.req_base.req_type = MCA_PML_REQUEST_SEND;
|
|
|
|
req->req_send.req_base.req_ompi.req_free = mca_pml_ob1_send_request_free;
|
|
|
|
req->req_send.req_base.req_ompi.req_cancel = mca_pml_ob1_send_request_cancel;
|
2006-03-16 01:53:41 +03:00
|
|
|
req->req_rdma_cnt = 0;
|
2007-06-03 12:30:07 +04:00
|
|
|
req->req_throttle_sends = false;
|
2015-01-06 18:45:08 +03:00
|
|
|
req->rdma_frag = NULL;
|
2007-06-03 12:30:07 +04:00
|
|
|
OBJ_CONSTRUCT(&req->req_send_ranges, opal_list_t);
|
2007-07-30 12:21:52 +04:00
|
|
|
OBJ_CONSTRUCT(&req->req_send_range_lock, opal_mutex_t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mca_pml_ob1_send_request_destruct(mca_pml_ob1_send_request_t* req)
|
|
|
|
{
|
|
|
|
OBJ_DESTRUCT(&req->req_send_ranges);
|
|
|
|
OBJ_DESTRUCT(&req->req_send_range_lock);
|
2015-01-06 18:45:08 +03:00
|
|
|
if (req->rdma_frag) {
|
|
|
|
MCA_PML_OB1_RDMA_FRAG_RETURN(req->rdma_frag);
|
|
|
|
req->rdma_frag = NULL;
|
|
|
|
}
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
|
|
|
|
2007-07-11 03:45:23 +04:00
|
|
|
OBJ_CLASS_INSTANCE( mca_pml_ob1_send_request_t,
|
|
|
|
mca_pml_base_send_request_t,
|
|
|
|
mca_pml_ob1_send_request_construct,
|
2007-07-30 12:21:52 +04:00
|
|
|
mca_pml_ob1_send_request_destruct );
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2005-09-14 21:08:08 +04:00
|
|
|
/**
|
|
|
|
* Completion of a short message - nothing left to schedule.
|
|
|
|
*/
|
|
|
|
|
2008-05-30 07:58:39 +04:00
|
|
|
static inline void
|
2015-06-24 06:59:57 +03:00
|
|
|
mca_pml_ob1_match_completion_free_request( mca_bml_base_btl_t* bml_btl,
|
2008-05-30 07:58:39 +04:00
|
|
|
mca_pml_ob1_send_request_t* sendreq )
|
|
|
|
{
|
2006-07-06 03:39:13 +04:00
|
|
|
if( sendreq->req_send.req_bytes_packed > 0 ) {
|
|
|
|
PERUSE_TRACE_COMM_EVENT( PERUSE_COMM_REQ_XFER_BEGIN,
|
|
|
|
&(sendreq->req_send.req_base), PERUSE_SEND );
|
|
|
|
}
|
2006-03-31 21:09:09 +04:00
|
|
|
|
2008-05-30 07:58:39 +04:00
|
|
|
/* signal request completion */
|
|
|
|
send_request_pml_complete(sendreq);
|
|
|
|
|
|
|
|
/* check for pending requests */
|
|
|
|
MCA_PML_OB1_PROGRESS_PENDING(bml_btl);
|
|
|
|
}
|
|
|
|
|
2008-06-03 20:03:36 +04:00
|
|
|
static void
|
2015-06-24 06:59:57 +03:00
|
|
|
mca_pml_ob1_match_completion_free( struct mca_btl_base_module_t* btl,
|
2008-05-30 07:58:39 +04:00
|
|
|
struct mca_btl_base_endpoint_t* ep,
|
|
|
|
struct mca_btl_base_descriptor_t* des,
|
|
|
|
int status )
|
|
|
|
{
|
|
|
|
mca_pml_ob1_send_request_t* sendreq = (mca_pml_ob1_send_request_t*)des->des_cbdata;
|
2015-06-24 06:59:57 +03:00
|
|
|
mca_bml_base_btl_t* bml_btl = (mca_bml_base_btl_t*) des->des_context;
|
2008-05-30 07:58:39 +04:00
|
|
|
|
2005-09-14 21:08:08 +04:00
|
|
|
/* check completion status */
|
2007-07-11 03:45:23 +04:00
|
|
|
if( OPAL_UNLIKELY(OMPI_SUCCESS != status) ) {
|
2005-09-14 21:08:08 +04:00
|
|
|
/* TSW - FIX */
|
2008-06-09 18:53:58 +04:00
|
|
|
opal_output(0, "%s:%d FATAL", __FILE__, __LINE__);
|
2013-01-28 03:25:10 +04:00
|
|
|
ompi_rte_abort(-1, NULL);
|
2005-09-14 21:08:08 +04:00
|
|
|
}
|
2008-05-30 07:58:39 +04:00
|
|
|
mca_pml_ob1_match_completion_free_request( bml_btl, sendreq );
|
|
|
|
}
|
2005-09-14 21:08:08 +04:00
|
|
|
|
2008-05-30 07:58:39 +04:00
|
|
|
static inline void
|
|
|
|
mca_pml_ob1_rndv_completion_request( mca_bml_base_btl_t* bml_btl,
|
|
|
|
mca_pml_ob1_send_request_t* sendreq,
|
|
|
|
size_t req_bytes_delivered )
|
|
|
|
{
|
|
|
|
if( sendreq->req_send.req_bytes_packed > 0 ) {
|
|
|
|
PERUSE_TRACE_COMM_EVENT( PERUSE_COMM_REQ_XFER_BEGIN,
|
|
|
|
&(sendreq->req_send.req_base), PERUSE_SEND );
|
|
|
|
}
|
|
|
|
|
|
|
|
OPAL_THREAD_ADD_SIZE_T(&sendreq->req_bytes_delivered, req_bytes_delivered);
|
|
|
|
|
|
|
|
/* advance the request */
|
|
|
|
OPAL_THREAD_ADD32(&sendreq->req_state, -1);
|
|
|
|
|
|
|
|
send_request_pml_complete_check(sendreq);
|
2006-07-20 18:44:35 +04:00
|
|
|
|
|
|
|
/* check for pending requests */
|
|
|
|
MCA_PML_OB1_PROGRESS_PENDING(bml_btl);
|
2005-09-14 21:08:08 +04:00
|
|
|
}
|
|
|
|
|
2005-07-18 22:54:25 +04:00
|
|
|
/*
|
2015-06-24 06:59:57 +03:00
|
|
|
* Completion of the first fragment of a long message that
|
2005-07-18 22:54:25 +04:00
|
|
|
* requires an acknowledgement
|
2005-06-09 00:37:19 +04:00
|
|
|
*/
|
2008-06-03 20:03:36 +04:00
|
|
|
static void
|
2007-07-11 03:45:23 +04:00
|
|
|
mca_pml_ob1_rndv_completion( mca_btl_base_module_t* btl,
|
|
|
|
struct mca_btl_base_endpoint_t* ep,
|
2008-05-30 07:58:39 +04:00
|
|
|
struct mca_btl_base_descriptor_t* des,
|
2007-07-11 03:45:23 +04:00
|
|
|
int status )
|
2005-06-09 00:37:19 +04:00
|
|
|
{
|
2008-05-30 07:58:39 +04:00
|
|
|
mca_pml_ob1_send_request_t* sendreq = (mca_pml_ob1_send_request_t*)des->des_cbdata;
|
|
|
|
mca_bml_base_btl_t* bml_btl = (mca_bml_base_btl_t*)des->des_context;
|
2012-06-21 21:09:12 +04:00
|
|
|
size_t req_bytes_delivered;
|
2006-02-08 09:03:54 +03:00
|
|
|
|
2005-06-10 00:16:33 +04:00
|
|
|
/* check completion status */
|
2007-07-11 03:45:23 +04:00
|
|
|
if( OPAL_UNLIKELY(OMPI_SUCCESS != status) ) {
|
2005-06-10 00:16:33 +04:00
|
|
|
/* TSW - FIX */
|
2008-06-09 18:53:58 +04:00
|
|
|
opal_output(0, "%s:%d FATAL", __FILE__, __LINE__);
|
2013-01-28 03:25:10 +04:00
|
|
|
ompi_rte_abort(-1, NULL);
|
2005-06-10 00:16:33 +04:00
|
|
|
}
|
2005-06-09 00:37:19 +04:00
|
|
|
|
2006-02-08 09:03:54 +03:00
|
|
|
/* count bytes of user data actually delivered. As the rndv completion only
|
|
|
|
* happens in one thread, the increase of the req_bytes_delivered does not
|
|
|
|
* have to be atomic.
|
|
|
|
*/
|
2015-01-06 18:45:08 +03:00
|
|
|
req_bytes_delivered = mca_pml_ob1_compute_segment_length_base ((void *) des->des_segments,
|
|
|
|
des->des_segment_count,
|
|
|
|
sizeof(mca_pml_ob1_rendezvous_hdr_t));
|
2005-06-09 00:37:19 +04:00
|
|
|
|
2008-05-30 07:58:39 +04:00
|
|
|
mca_pml_ob1_rndv_completion_request( bml_btl, sendreq, req_bytes_delivered );
|
2005-07-18 22:54:25 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-08-17 22:23:38 +04:00
|
|
|
/**
|
|
|
|
* Completion of a get request.
|
|
|
|
*/
|
|
|
|
|
2008-06-03 20:03:36 +04:00
|
|
|
static void
|
2015-01-06 18:45:08 +03:00
|
|
|
mca_pml_ob1_rget_completion (mca_pml_ob1_rdma_frag_t *frag, int64_t rdma_length)
|
2005-08-17 22:23:38 +04:00
|
|
|
{
|
2015-01-06 18:45:08 +03:00
|
|
|
mca_pml_ob1_send_request_t *sendreq = (mca_pml_ob1_send_request_t *) frag->rdma_req;
|
|
|
|
mca_bml_base_btl_t *bml_btl = frag->rdma_bml;
|
2005-08-17 22:23:38 +04:00
|
|
|
|
|
|
|
/* count bytes of user data actually delivered and check for request completion */
|
2015-01-06 18:45:08 +03:00
|
|
|
if (OPAL_LIKELY(0 < rdma_length)) {
|
|
|
|
OPAL_THREAD_ADD_SIZE_T(&sendreq->req_bytes_delivered, (size_t) rdma_length);
|
2012-06-21 21:09:12 +04:00
|
|
|
}
|
2007-08-30 16:08:33 +04:00
|
|
|
|
|
|
|
send_request_pml_complete_check(sendreq);
|
2015-01-06 18:45:08 +03:00
|
|
|
|
2006-07-20 18:44:35 +04:00
|
|
|
MCA_PML_OB1_PROGRESS_PENDING(bml_btl);
|
2005-08-17 22:23:38 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Completion of a control message - return resources.
|
|
|
|
*/
|
|
|
|
|
2008-06-03 20:03:36 +04:00
|
|
|
static void
|
2007-07-11 03:45:23 +04:00
|
|
|
mca_pml_ob1_send_ctl_completion( mca_btl_base_module_t* btl,
|
|
|
|
struct mca_btl_base_endpoint_t* ep,
|
2008-05-30 07:58:39 +04:00
|
|
|
struct mca_btl_base_descriptor_t* des,
|
2007-07-11 03:45:23 +04:00
|
|
|
int status )
|
2005-08-17 22:23:38 +04:00
|
|
|
{
|
2015-06-24 06:59:57 +03:00
|
|
|
mca_bml_base_btl_t* bml_btl = (mca_bml_base_btl_t*) des->des_context;
|
2006-07-20 18:44:35 +04:00
|
|
|
|
|
|
|
/* check for pending requests */
|
|
|
|
MCA_PML_OB1_PROGRESS_PENDING(bml_btl);
|
2005-08-17 22:23:38 +04:00
|
|
|
}
|
|
|
|
|
2005-07-18 22:54:25 +04:00
|
|
|
/**
|
|
|
|
* Completion of additional fragments of a large message - may need
|
|
|
|
* to schedule additional fragments.
|
|
|
|
*/
|
|
|
|
|
2008-06-03 20:03:36 +04:00
|
|
|
static void
|
2007-07-11 03:45:23 +04:00
|
|
|
mca_pml_ob1_frag_completion( mca_btl_base_module_t* btl,
|
|
|
|
struct mca_btl_base_endpoint_t* ep,
|
2008-05-30 07:58:39 +04:00
|
|
|
struct mca_btl_base_descriptor_t* des,
|
2007-07-11 03:45:23 +04:00
|
|
|
int status )
|
2005-07-18 22:54:25 +04:00
|
|
|
{
|
2008-05-30 07:58:39 +04:00
|
|
|
mca_pml_ob1_send_request_t* sendreq = (mca_pml_ob1_send_request_t*)des->des_cbdata;
|
|
|
|
mca_bml_base_btl_t* bml_btl = (mca_bml_base_btl_t*) des->des_context;
|
2012-06-21 21:09:12 +04:00
|
|
|
size_t req_bytes_delivered;
|
2005-07-18 22:54:25 +04:00
|
|
|
|
|
|
|
/* check completion status */
|
2007-07-11 03:45:23 +04:00
|
|
|
if( OPAL_UNLIKELY(OMPI_SUCCESS != status) ) {
|
2005-07-18 22:54:25 +04:00
|
|
|
/* TSW - FIX */
|
2008-06-09 18:53:58 +04:00
|
|
|
opal_output(0, "%s:%d FATAL", __FILE__, __LINE__);
|
2013-01-28 03:25:10 +04:00
|
|
|
ompi_rte_abort(-1, NULL);
|
2005-06-09 00:37:19 +04:00
|
|
|
}
|
|
|
|
|
2005-08-17 22:23:38 +04:00
|
|
|
/* count bytes of user data actually delivered */
|
2015-01-06 18:45:08 +03:00
|
|
|
req_bytes_delivered = mca_pml_ob1_compute_segment_length_base ((void *) des->des_segments,
|
|
|
|
des->des_segment_count,
|
|
|
|
sizeof(mca_pml_ob1_frag_hdr_t));
|
2006-07-20 18:44:35 +04:00
|
|
|
|
2007-01-03 17:44:20 +03:00
|
|
|
OPAL_THREAD_ADD_SIZE_T(&sendreq->req_pipeline_depth, -1);
|
2007-08-30 16:08:33 +04:00
|
|
|
OPAL_THREAD_ADD_SIZE_T(&sendreq->req_bytes_delivered, req_bytes_delivered);
|
|
|
|
|
2010-11-09 21:51:32 +03:00
|
|
|
if(send_request_pml_complete_check(sendreq) == false) {
|
2005-07-18 22:54:25 +04:00
|
|
|
mca_pml_ob1_send_request_schedule(sendreq);
|
2010-11-09 21:51:32 +03:00
|
|
|
}
|
2005-06-10 00:16:33 +04:00
|
|
|
|
2005-07-19 01:22:55 +04:00
|
|
|
/* check for pending requests */
|
2006-07-20 18:44:35 +04:00
|
|
|
MCA_PML_OB1_PROGRESS_PENDING(bml_btl);
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
|
|
|
|
2013-11-01 16:19:40 +04:00
|
|
|
#if OPAL_CUDA_SUPPORT /* CUDA_ASYNC_SEND */
|
2013-01-18 02:34:43 +04:00
|
|
|
/**
|
|
|
|
* This function is called when the copy of the frag from the GPU buffer
|
|
|
|
* to the internal buffer is complete. Used to support asynchronous
|
|
|
|
* copies from GPU to host buffers. Now the data can be sent.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
mca_pml_ob1_copy_frag_completion( mca_btl_base_module_t* btl,
|
|
|
|
struct mca_btl_base_endpoint_t* ep,
|
|
|
|
struct mca_btl_base_descriptor_t* des,
|
|
|
|
int status )
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
mca_bml_base_btl_t* bml_btl = (mca_bml_base_btl_t*) des->des_context;
|
|
|
|
|
|
|
|
des->des_cbfunc = mca_pml_ob1_frag_completion;
|
|
|
|
/* Reset the BTL onwership flag as the BTL can free it after completion. */
|
|
|
|
des->des_flags |= MCA_BTL_DES_FLAGS_BTL_OWNERSHIP;
|
2013-05-15 00:49:42 +04:00
|
|
|
OPAL_OUTPUT((-1, "copy_frag_completion FRAG frag=%p", (void *)des));
|
2013-01-18 02:34:43 +04:00
|
|
|
/* Currently, we cannot support a failure in the send. In the blocking
|
|
|
|
* case, the counters tracking the fragments being sent are not adjusted
|
|
|
|
* until the function returns success, so it handles the error by leaving
|
|
|
|
* all the buffer counters intact. In this case, it is too late so
|
|
|
|
* we just abort. In theory, a new queue could be created to hold this
|
|
|
|
* fragment and then attempt to send it out on another BTL. */
|
|
|
|
rc = mca_bml_base_send(bml_btl, des, MCA_PML_OB1_HDR_TYPE_FRAG);
|
|
|
|
if(OPAL_UNLIKELY(rc < 0)) {
|
|
|
|
opal_output(0, "%s:%d FATAL", __FILE__, __LINE__);
|
2014-01-23 01:36:24 +04:00
|
|
|
ompi_rte_abort(-1, NULL);
|
2013-01-18 02:34:43 +04:00
|
|
|
}
|
|
|
|
}
|
2013-11-01 16:19:40 +04:00
|
|
|
#endif /* OPAL_CUDA_SUPPORT */
|
2013-01-18 02:34:43 +04:00
|
|
|
|
2005-09-15 22:47:59 +04:00
|
|
|
/**
|
|
|
|
* Buffer the entire message and mark as complete.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int mca_pml_ob1_send_request_start_buffered(
|
|
|
|
mca_pml_ob1_send_request_t* sendreq,
|
|
|
|
mca_bml_base_btl_t* bml_btl,
|
|
|
|
size_t size)
|
|
|
|
{
|
2008-05-30 07:58:39 +04:00
|
|
|
mca_btl_base_descriptor_t* des;
|
2005-09-15 22:47:59 +04:00
|
|
|
mca_btl_base_segment_t* segment;
|
|
|
|
mca_pml_ob1_hdr_t* hdr;
|
|
|
|
struct iovec iov;
|
|
|
|
unsigned int iov_count;
|
2008-05-30 07:58:39 +04:00
|
|
|
size_t max_data, req_bytes_delivered;
|
2005-09-15 22:47:59 +04:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* allocate descriptor */
|
2015-06-24 06:59:57 +03:00
|
|
|
mca_bml_base_alloc(bml_btl, &des,
|
2007-12-09 17:08:01 +03:00
|
|
|
MCA_BTL_NO_ORDER,
|
|
|
|
sizeof(mca_pml_ob1_rendezvous_hdr_t) + size,
|
2014-12-05 02:18:16 +03:00
|
|
|
MCA_BTL_DES_FLAGS_PRIORITY | MCA_BTL_DES_FLAGS_BTL_OWNERSHIP |
|
|
|
|
MCA_BTL_DES_FLAGS_SIGNAL);
|
2008-05-30 07:58:39 +04:00
|
|
|
if( OPAL_UNLIKELY(NULL == des) ) {
|
2005-09-15 22:47:59 +04:00
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
2015-06-24 06:59:57 +03:00
|
|
|
}
|
2015-01-06 18:45:08 +03:00
|
|
|
segment = des->des_segments;
|
2005-09-15 22:47:59 +04:00
|
|
|
|
|
|
|
/* pack the data into the BTL supplied buffer */
|
2015-06-24 06:59:57 +03:00
|
|
|
iov.iov_base = (IOVBASE_TYPE*)((unsigned char*)segment->seg_addr.pval +
|
2006-08-24 20:38:08 +04:00
|
|
|
sizeof(mca_pml_ob1_rendezvous_hdr_t));
|
2005-09-15 22:47:59 +04:00
|
|
|
iov.iov_len = size;
|
|
|
|
iov_count = 1;
|
|
|
|
max_data = size;
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
if((rc = opal_convertor_pack( &sendreq->req_send.req_base.req_convertor,
|
2007-07-11 02:16:38 +04:00
|
|
|
&iov,
|
|
|
|
&iov_count,
|
|
|
|
&max_data)) < 0) {
|
2008-05-30 07:58:39 +04:00
|
|
|
mca_bml_base_free(bml_btl, des);
|
2005-09-15 22:47:59 +04:00
|
|
|
return rc;
|
|
|
|
}
|
2008-05-30 07:58:39 +04:00
|
|
|
req_bytes_delivered = max_data;
|
2005-09-15 22:47:59 +04:00
|
|
|
|
|
|
|
/* build rendezvous header */
|
2007-01-05 01:07:37 +03:00
|
|
|
hdr = (mca_pml_ob1_hdr_t*)segment->seg_addr.pval;
|
2015-01-06 18:45:08 +03:00
|
|
|
mca_pml_ob1_rendezvous_hdr_prepare (&hdr->hdr_rndv, MCA_PML_OB1_HDR_TYPE_RNDV, 0,
|
|
|
|
sendreq->req_send.req_base.req_comm->c_contextid,
|
|
|
|
sendreq->req_send.req_base.req_comm->c_my_rank,
|
|
|
|
sendreq->req_send.req_base.req_tag,
|
|
|
|
(uint16_t)sendreq->req_send.req_base.req_sequence,
|
|
|
|
sendreq->req_send.req_bytes_packed, sendreq);
|
|
|
|
|
|
|
|
ob1_hdr_hton(hdr, MCA_PML_OB1_HDR_TYPE_RNDV, sendreq->req_send.req_base.req_proc);
|
2006-02-26 03:45:54 +03:00
|
|
|
|
2005-09-15 22:47:59 +04:00
|
|
|
/* update lengths */
|
|
|
|
segment->seg_len = sizeof(mca_pml_ob1_rendezvous_hdr_t) + max_data;
|
|
|
|
|
2008-05-30 07:58:39 +04:00
|
|
|
des->des_cbfunc = mca_pml_ob1_rndv_completion;
|
|
|
|
des->des_cbdata = sendreq;
|
2005-09-15 22:47:59 +04:00
|
|
|
|
|
|
|
/* buffer the remainder of the message */
|
|
|
|
rc = mca_pml_base_bsend_request_alloc((ompi_request_t*)sendreq);
|
2007-07-11 03:45:23 +04:00
|
|
|
if( OPAL_UNLIKELY(OMPI_SUCCESS != rc) ) {
|
2008-05-30 07:58:39 +04:00
|
|
|
mca_bml_base_free(bml_btl, des);
|
2005-09-15 22:47:59 +04:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2007-06-03 12:30:07 +04:00
|
|
|
iov.iov_base = (IOVBASE_TYPE*)(((unsigned char*)sendreq->req_send.req_addr) + max_data);
|
|
|
|
iov.iov_len = max_data = sendreq->req_send.req_bytes_packed - max_data;
|
2005-09-15 22:47:59 +04:00
|
|
|
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
if((rc = opal_convertor_pack( &sendreq->req_send.req_base.req_convertor,
|
2006-04-01 11:42:43 +04:00
|
|
|
&iov,
|
|
|
|
&iov_count,
|
2006-10-27 03:11:26 +04:00
|
|
|
&max_data)) < 0) {
|
2008-05-30 07:58:39 +04:00
|
|
|
mca_bml_base_free(bml_btl, des);
|
2006-04-01 11:42:43 +04:00
|
|
|
return rc;
|
2005-09-15 22:47:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* re-init convertor for packed data */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_prepare_for_send( &sendreq->req_send.req_base.req_convertor,
|
|
|
|
&(ompi_mpi_byte.dt.super),
|
2006-07-06 21:58:25 +04:00
|
|
|
sendreq->req_send.req_bytes_packed,
|
2006-04-01 11:42:43 +04:00
|
|
|
sendreq->req_send.req_addr );
|
2015-06-24 06:59:57 +03:00
|
|
|
|
2007-08-30 16:08:33 +04:00
|
|
|
/* wait for ack and completion */
|
|
|
|
sendreq->req_state = 2;
|
|
|
|
|
2005-09-15 22:47:59 +04:00
|
|
|
/* request is complete at mpi level */
|
|
|
|
OPAL_THREAD_LOCK(&ompi_request_lock);
|
2008-05-30 07:58:39 +04:00
|
|
|
MCA_PML_OB1_SEND_REQUEST_MPI_COMPLETE(sendreq, true);
|
2005-09-15 22:47:59 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ompi_request_lock);
|
|
|
|
|
|
|
|
/* send */
|
2008-05-30 07:58:39 +04:00
|
|
|
rc = mca_bml_base_send(bml_btl, des, MCA_PML_OB1_HDR_TYPE_RNDV);
|
|
|
|
if( OPAL_LIKELY( rc >= 0 ) ) {
|
|
|
|
if( OPAL_LIKELY( 1 == rc ) ) {
|
|
|
|
mca_pml_ob1_rndv_completion_request( bml_btl, sendreq, req_bytes_delivered);
|
|
|
|
}
|
|
|
|
return OMPI_SUCCESS;
|
2005-09-15 22:47:59 +04:00
|
|
|
}
|
2008-05-30 07:58:39 +04:00
|
|
|
mca_bml_base_free(bml_btl, des );
|
2005-09-15 22:47:59 +04:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2013-09-18 04:07:58 +04:00
|
|
|
* We work on a buffered request with a size smaller than the eager size
|
2006-12-02 00:52:07 +03:00
|
|
|
* or the BTL is not able to send the data IN_PLACE. Request a segment
|
|
|
|
* that is used for initial hdr and any eager data. This is used only
|
|
|
|
* from the _START macro.
|
2005-09-15 22:47:59 +04:00
|
|
|
*/
|
2006-12-02 00:52:07 +03:00
|
|
|
int mca_pml_ob1_send_request_start_copy( mca_pml_ob1_send_request_t* sendreq,
|
|
|
|
mca_bml_base_btl_t* bml_btl,
|
|
|
|
size_t size )
|
2005-09-15 22:47:59 +04:00
|
|
|
{
|
2008-05-30 07:58:39 +04:00
|
|
|
mca_btl_base_descriptor_t* des = NULL;
|
2005-09-15 22:47:59 +04:00
|
|
|
mca_btl_base_segment_t* segment;
|
|
|
|
mca_pml_ob1_hdr_t* hdr;
|
|
|
|
struct iovec iov;
|
|
|
|
unsigned int iov_count;
|
2006-10-28 03:16:13 +04:00
|
|
|
size_t max_data = size;
|
2005-09-15 22:47:59 +04:00
|
|
|
int rc;
|
2008-06-10 03:03:56 +04:00
|
|
|
|
2008-10-01 01:02:37 +04:00
|
|
|
if(NULL != bml_btl->btl->btl_sendi) {
|
2008-05-30 07:58:39 +04:00
|
|
|
mca_pml_ob1_match_hdr_t match;
|
2015-01-06 18:45:08 +03:00
|
|
|
mca_pml_ob1_match_hdr_prepare (&match, MCA_PML_OB1_HDR_TYPE_MATCH, 0,
|
|
|
|
sendreq->req_send.req_base.req_comm->c_contextid,
|
|
|
|
sendreq->req_send.req_base.req_comm->c_my_rank,
|
|
|
|
sendreq->req_send.req_base.req_tag,
|
|
|
|
(uint16_t)sendreq->req_send.req_base.req_sequence);
|
2013-09-18 04:07:58 +04:00
|
|
|
|
2015-01-06 18:45:08 +03:00
|
|
|
ob1_hdr_hton (&match, MCA_PML_OB1_HDR_TYPE_MATCH, sendreq->req_send.req_base.req_proc);
|
2008-05-30 07:58:39 +04:00
|
|
|
|
|
|
|
/* try to send immediately */
|
|
|
|
rc = mca_bml_base_sendi( bml_btl, &sendreq->req_send.req_base.req_convertor,
|
2013-09-18 04:07:58 +04:00
|
|
|
&match, OMPI_PML_OB1_MATCH_HDR_LEN,
|
|
|
|
size, MCA_BTL_NO_ORDER,
|
2008-05-30 07:58:39 +04:00
|
|
|
MCA_BTL_DES_FLAGS_PRIORITY | MCA_BTL_DES_FLAGS_BTL_OWNERSHIP,
|
2013-09-18 04:07:58 +04:00
|
|
|
MCA_PML_OB1_HDR_TYPE_MATCH,
|
2008-05-30 07:58:39 +04:00
|
|
|
&des);
|
2008-06-10 03:03:56 +04:00
|
|
|
if( OPAL_LIKELY(OMPI_SUCCESS == rc) ) {
|
|
|
|
/* signal request completion */
|
2008-05-30 07:58:39 +04:00
|
|
|
send_request_pml_complete(sendreq);
|
|
|
|
|
|
|
|
/* check for pending requests */
|
|
|
|
MCA_PML_OB1_PROGRESS_PENDING(bml_btl);
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
2012-06-12 19:44:47 +04:00
|
|
|
|
2012-06-15 20:58:23 +04:00
|
|
|
/* just in case the btl changed the converter, reset it */
|
|
|
|
if (size > 0 && NULL != des) {
|
|
|
|
MCA_PML_OB1_SEND_REQUEST_RESET(sendreq);
|
|
|
|
}
|
2015-06-24 06:59:57 +03:00
|
|
|
} else {
|
2008-05-30 07:58:39 +04:00
|
|
|
/* allocate descriptor */
|
|
|
|
mca_bml_base_alloc( bml_btl, &des,
|
|
|
|
MCA_BTL_NO_ORDER,
|
|
|
|
OMPI_PML_OB1_MATCH_HDR_LEN + size,
|
|
|
|
MCA_BTL_DES_FLAGS_PRIORITY | MCA_BTL_DES_FLAGS_BTL_OWNERSHIP);
|
2007-12-09 16:58:17 +03:00
|
|
|
}
|
2008-06-10 03:03:56 +04:00
|
|
|
if( OPAL_UNLIKELY(NULL == des) ) {
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
}
|
|
|
|
|
2015-01-06 18:45:08 +03:00
|
|
|
segment = des->des_segments;
|
2006-10-29 11:12:44 +03:00
|
|
|
|
2007-12-09 16:58:17 +03:00
|
|
|
if(size > 0) {
|
2006-10-29 11:12:44 +03:00
|
|
|
/* pack the data into the supplied buffer */
|
2007-12-09 16:58:17 +03:00
|
|
|
iov.iov_base = (IOVBASE_TYPE*)((unsigned char*)segment->seg_addr.pval +
|
2008-05-30 07:58:39 +04:00
|
|
|
OMPI_PML_OB1_MATCH_HDR_LEN);
|
2008-02-12 11:46:27 +03:00
|
|
|
iov.iov_len = size;
|
|
|
|
iov_count = 1;
|
|
|
|
/*
|
2015-06-24 06:59:57 +03:00
|
|
|
* Before copy the user buffer, make the target part
|
2008-05-30 07:58:39 +04:00
|
|
|
* accessible.
|
2008-02-12 11:46:27 +03:00
|
|
|
*/
|
|
|
|
MEMCHECKER(
|
2008-05-07 16:28:51 +04:00
|
|
|
memchecker_call(&opal_memchecker_base_mem_defined,
|
|
|
|
sendreq->req_send.req_base.req_addr,
|
|
|
|
sendreq->req_send.req_base.req_count,
|
|
|
|
sendreq->req_send.req_base.req_datatype);
|
2008-02-12 11:46:27 +03:00
|
|
|
);
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
(void)opal_convertor_pack( &sendreq->req_send.req_base.req_convertor,
|
2008-02-12 11:46:27 +03:00
|
|
|
&iov, &iov_count, &max_data );
|
|
|
|
/*
|
|
|
|
* Packing finished, make the user buffer unaccessable.
|
|
|
|
*/
|
|
|
|
MEMCHECKER(
|
2008-05-07 16:28:51 +04:00
|
|
|
memchecker_call(&opal_memchecker_base_mem_noaccess,
|
|
|
|
sendreq->req_send.req_base.req_addr,
|
|
|
|
sendreq->req_send.req_base.req_count,
|
|
|
|
sendreq->req_send.req_base.req_datatype);
|
2008-02-12 11:46:27 +03:00
|
|
|
);
|
2005-09-15 22:47:59 +04:00
|
|
|
}
|
2008-02-12 11:46:27 +03:00
|
|
|
|
2015-06-24 06:59:57 +03:00
|
|
|
|
2005-09-15 22:47:59 +04:00
|
|
|
/* build match header */
|
2007-01-05 01:07:37 +03:00
|
|
|
hdr = (mca_pml_ob1_hdr_t*)segment->seg_addr.pval;
|
2015-01-06 18:45:08 +03:00
|
|
|
mca_pml_ob1_match_hdr_prepare (&hdr->hdr_match, MCA_PML_OB1_HDR_TYPE_MATCH, 0,
|
|
|
|
sendreq->req_send.req_base.req_comm->c_contextid,
|
|
|
|
sendreq->req_send.req_base.req_comm->c_my_rank,
|
|
|
|
sendreq->req_send.req_base.req_tag,
|
|
|
|
(uint16_t)sendreq->req_send.req_base.req_sequence);
|
2005-09-15 22:47:59 +04:00
|
|
|
|
2015-01-06 18:45:08 +03:00
|
|
|
ob1_hdr_hton(hdr, MCA_PML_OB1_HDR_TYPE_MATCH, sendreq->req_send.req_base.req_proc);
|
2006-02-26 03:45:54 +03:00
|
|
|
|
2005-09-15 22:47:59 +04:00
|
|
|
/* update lengths */
|
2008-05-30 07:58:39 +04:00
|
|
|
segment->seg_len = OMPI_PML_OB1_MATCH_HDR_LEN + max_data;
|
2005-09-15 22:47:59 +04:00
|
|
|
|
|
|
|
/* short message */
|
2008-05-30 07:58:39 +04:00
|
|
|
des->des_cbdata = sendreq;
|
|
|
|
des->des_cbfunc = mca_pml_ob1_match_completion_free;
|
2005-09-15 22:47:59 +04:00
|
|
|
|
|
|
|
/* send */
|
2008-05-30 07:58:39 +04:00
|
|
|
rc = mca_bml_base_send_status(bml_btl, des, MCA_PML_OB1_HDR_TYPE_MATCH);
|
2008-09-16 02:56:23 +04:00
|
|
|
if( OPAL_LIKELY( rc >= OMPI_SUCCESS ) ) {
|
2008-05-30 07:58:39 +04:00
|
|
|
if( OPAL_LIKELY( 1 == rc ) ) {
|
|
|
|
mca_pml_ob1_match_completion_free_request( bml_btl, sendreq );
|
|
|
|
}
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
2012-03-01 19:53:39 +04:00
|
|
|
|
2012-04-06 18:23:13 +04:00
|
|
|
if (OMPI_ERR_RESOURCE_BUSY == rc) {
|
2012-03-01 19:53:39 +04:00
|
|
|
/* No more resources. Allow the upper level to queue the send */
|
|
|
|
rc = OMPI_ERR_OUT_OF_RESOURCE;
|
2007-12-09 17:13:24 +03:00
|
|
|
}
|
2012-03-01 19:53:39 +04:00
|
|
|
|
|
|
|
mca_bml_base_free (bml_btl, des);
|
|
|
|
|
2005-09-15 22:47:59 +04:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* BTL can send directly from user buffer so allow the BTL
|
2006-03-16 01:53:41 +03:00
|
|
|
* to prepare the segment list. Start sending a small message.
|
2005-09-15 22:47:59 +04:00
|
|
|
*/
|
|
|
|
|
2006-12-02 00:52:07 +03:00
|
|
|
int mca_pml_ob1_send_request_start_prepare( mca_pml_ob1_send_request_t* sendreq,
|
|
|
|
mca_bml_base_btl_t* bml_btl,
|
|
|
|
size_t size )
|
2005-09-15 22:47:59 +04:00
|
|
|
{
|
2008-05-30 07:58:39 +04:00
|
|
|
mca_btl_base_descriptor_t* des;
|
2005-09-15 22:47:59 +04:00
|
|
|
mca_btl_base_segment_t* segment;
|
|
|
|
mca_pml_ob1_hdr_t* hdr;
|
|
|
|
int rc;
|
2012-10-27 20:43:45 +04:00
|
|
|
|
2005-09-15 22:47:59 +04:00
|
|
|
/* prepare descriptor */
|
2006-12-29 10:40:02 +03:00
|
|
|
mca_bml_base_prepare_src( bml_btl,
|
2007-07-11 02:16:38 +04:00
|
|
|
&sendreq->req_send.req_base.req_convertor,
|
2007-05-24 23:51:26 +04:00
|
|
|
MCA_BTL_NO_ORDER,
|
2008-05-30 07:58:39 +04:00
|
|
|
OMPI_PML_OB1_MATCH_HDR_LEN,
|
2006-12-29 10:40:02 +03:00
|
|
|
&size,
|
2008-02-18 20:39:30 +03:00
|
|
|
MCA_BTL_DES_FLAGS_PRIORITY | MCA_BTL_DES_FLAGS_BTL_OWNERSHIP,
|
2008-05-30 07:58:39 +04:00
|
|
|
&des );
|
|
|
|
if( OPAL_UNLIKELY(NULL == des) ) {
|
2005-09-15 22:47:59 +04:00
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
2006-03-16 01:53:41 +03:00
|
|
|
}
|
2015-01-06 18:45:08 +03:00
|
|
|
segment = des->des_segments;
|
2005-09-15 22:47:59 +04:00
|
|
|
|
|
|
|
/* build match header */
|
2007-01-05 01:07:37 +03:00
|
|
|
hdr = (mca_pml_ob1_hdr_t*)segment->seg_addr.pval;
|
2015-01-06 18:45:08 +03:00
|
|
|
mca_pml_ob1_match_hdr_prepare (&hdr->hdr_match, MCA_PML_OB1_HDR_TYPE_MATCH, 0,
|
|
|
|
sendreq->req_send.req_base.req_comm->c_contextid,
|
|
|
|
sendreq->req_send.req_base.req_comm->c_my_rank,
|
|
|
|
sendreq->req_send.req_base.req_tag,
|
|
|
|
(uint16_t)sendreq->req_send.req_base.req_sequence);
|
2005-09-15 22:47:59 +04:00
|
|
|
|
2015-01-06 18:45:08 +03:00
|
|
|
ob1_hdr_hton(hdr, MCA_PML_OB1_HDR_TYPE_MATCH, sendreq->req_send.req_base.req_proc);
|
2006-02-26 03:45:54 +03:00
|
|
|
|
2005-09-15 22:47:59 +04:00
|
|
|
/* short message */
|
2008-05-30 07:58:39 +04:00
|
|
|
des->des_cbfunc = mca_pml_ob1_match_completion_free;
|
|
|
|
des->des_cbdata = sendreq;
|
2005-09-15 22:47:59 +04:00
|
|
|
|
|
|
|
/* send */
|
2015-06-24 06:59:57 +03:00
|
|
|
rc = mca_bml_base_send(bml_btl, des, MCA_PML_OB1_HDR_TYPE_MATCH);
|
2008-05-30 07:58:39 +04:00
|
|
|
if( OPAL_LIKELY( rc >= 0 ) ) {
|
|
|
|
if( OPAL_LIKELY( 1 == rc ) ) {
|
|
|
|
mca_pml_ob1_match_completion_free_request( bml_btl, sendreq );
|
|
|
|
}
|
|
|
|
return OMPI_SUCCESS;
|
2005-09-15 22:47:59 +04:00
|
|
|
}
|
2008-05-30 07:58:39 +04:00
|
|
|
mca_bml_base_free(bml_btl, des );
|
2005-09-15 22:47:59 +04:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-09-13 02:28:23 +04:00
|
|
|
/**
|
|
|
|
* We have contigous data that is registered - schedule across
|
|
|
|
* available nics.
|
|
|
|
*/
|
|
|
|
|
2009-02-19 19:00:15 +03:00
|
|
|
int mca_pml_ob1_send_request_start_rdma( mca_pml_ob1_send_request_t* sendreq,
|
|
|
|
mca_bml_base_btl_t* bml_btl,
|
|
|
|
size_t size )
|
2005-09-13 02:28:23 +04:00
|
|
|
{
|
|
|
|
/*
|
2009-02-19 19:00:15 +03:00
|
|
|
* When req_rdma array is constructed the first element of the array always
|
2007-01-18 12:15:18 +03:00
|
|
|
* assigned different btl in round robin fashion (if there are more than
|
|
|
|
* one RDMA capable BTLs). This way round robin distribution of RDMA
|
|
|
|
* operation is achieved.
|
2005-09-13 02:28:23 +04:00
|
|
|
*/
|
2015-01-06 18:45:08 +03:00
|
|
|
mca_btl_base_registration_handle_t *local_handle;
|
|
|
|
mca_btl_base_descriptor_t *des;
|
|
|
|
mca_pml_ob1_rdma_frag_t *frag;
|
2012-06-21 21:09:12 +04:00
|
|
|
mca_pml_ob1_rget_hdr_t *hdr;
|
2015-01-06 18:45:08 +03:00
|
|
|
size_t reg_size;
|
|
|
|
void *data_ptr;
|
2005-09-13 02:28:23 +04:00
|
|
|
int rc;
|
|
|
|
|
2005-09-15 22:47:59 +04:00
|
|
|
bml_btl = sendreq->req_rdma[0].bml_btl;
|
2012-06-21 21:09:12 +04:00
|
|
|
if (!(bml_btl->btl_flags & (MCA_BTL_FLAGS_GET | MCA_BTL_FLAGS_CUDA_GET))) {
|
2015-01-06 18:45:08 +03:00
|
|
|
sendreq->rdma_frag = NULL;
|
2012-06-21 21:09:12 +04:00
|
|
|
/* This BTL does not support get. Use rendezvous to start the RDMA operation using put instead. */
|
|
|
|
return mca_pml_ob1_send_request_start_rndv (sendreq, bml_btl, 0, MCA_PML_OB1_HDR_FLAGS_CONTIG |
|
|
|
|
MCA_PML_OB1_HDR_FLAGS_PIN);
|
|
|
|
}
|
2012-04-25 00:18:56 +04:00
|
|
|
|
2015-01-06 18:45:08 +03:00
|
|
|
/* at this time ob1 does not support non-contiguous gets. the convertor represents a
|
|
|
|
* contiguous block of memory */
|
|
|
|
opal_convertor_get_current_pointer (&sendreq->req_send.req_base.req_convertor, &data_ptr);
|
|
|
|
|
|
|
|
local_handle = sendreq->req_rdma[0].btl_reg;
|
|
|
|
|
|
|
|
/* allocate an rdma fragment to keep track of the request size for use in the fin message */
|
|
|
|
MCA_PML_OB1_RDMA_FRAG_ALLOC(frag);
|
|
|
|
if (OPAL_UNLIKELY(NULL == frag)) {
|
|
|
|
return OPAL_ERR_OUT_OF_RESOURCE;
|
2012-06-21 21:09:12 +04:00
|
|
|
}
|
2006-02-26 03:45:54 +03:00
|
|
|
|
2015-01-06 18:45:08 +03:00
|
|
|
/* fill in necessary fragment data */
|
|
|
|
frag->rdma_req = sendreq;
|
|
|
|
frag->rdma_bml = bml_btl;
|
|
|
|
frag->rdma_length = size;
|
|
|
|
frag->cbfunc = mca_pml_ob1_rget_completion;
|
|
|
|
/* do not store the local handle in the fragment. it will be released by mca_pml_ob1_free_rdma_resources */
|
|
|
|
|
|
|
|
/* save the fragment for get->put fallback */
|
|
|
|
sendreq->rdma_frag = frag;
|
2007-07-11 03:45:23 +04:00
|
|
|
|
2015-01-06 18:45:08 +03:00
|
|
|
reg_size = bml_btl->btl->btl_registration_handle_size;
|
2007-07-11 03:45:23 +04:00
|
|
|
|
2012-06-21 21:09:12 +04:00
|
|
|
/* allocate space for get hdr + segment list */
|
2015-01-06 18:45:08 +03:00
|
|
|
mca_bml_base_alloc(bml_btl, &des, MCA_BTL_NO_ORDER, sizeof (*hdr) + reg_size,
|
2014-12-05 02:18:16 +03:00
|
|
|
MCA_BTL_DES_FLAGS_PRIORITY | MCA_BTL_DES_FLAGS_BTL_OWNERSHIP |
|
|
|
|
MCA_BTL_DES_FLAGS_SIGNAL);
|
2012-06-21 21:09:12 +04:00
|
|
|
if( OPAL_UNLIKELY(NULL == des) ) {
|
|
|
|
/* NTH: no need to reset the converter here. it will be reset before it is retried */
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
}
|
2007-07-11 03:45:23 +04:00
|
|
|
|
2012-06-21 21:09:12 +04:00
|
|
|
/* build match header */
|
2015-01-06 18:45:08 +03:00
|
|
|
hdr = (mca_pml_ob1_rget_hdr_t *) des->des_segments->seg_addr.pval;
|
|
|
|
/* TODO -- Add support for multiple segments for get */
|
|
|
|
mca_pml_ob1_rget_hdr_prepare (hdr, MCA_PML_OB1_HDR_FLAGS_CONTIG | MCA_PML_OB1_HDR_FLAGS_PIN,
|
|
|
|
sendreq->req_send.req_base.req_comm->c_contextid,
|
|
|
|
sendreq->req_send.req_base.req_comm->c_my_rank,
|
|
|
|
sendreq->req_send.req_base.req_tag,
|
|
|
|
(uint16_t)sendreq->req_send.req_base.req_sequence,
|
|
|
|
sendreq->req_send.req_bytes_packed, sendreq,
|
|
|
|
frag, data_ptr, local_handle, reg_size);
|
2007-07-11 03:45:23 +04:00
|
|
|
|
2012-06-21 21:09:12 +04:00
|
|
|
ob1_hdr_hton(hdr, MCA_PML_OB1_HDR_TYPE_RGET, sendreq->req_send.req_base.req_proc);
|
2007-07-11 03:45:23 +04:00
|
|
|
|
2012-06-21 21:09:12 +04:00
|
|
|
des->des_cbfunc = mca_pml_ob1_send_ctl_completion;
|
|
|
|
des->des_cbdata = sendreq;
|
2007-08-30 16:08:33 +04:00
|
|
|
|
2012-06-21 21:09:12 +04:00
|
|
|
/**
|
|
|
|
* Well, it's a get so we will not know when the peer will get the data anyway.
|
|
|
|
* If we generate the PERUSE event here, at least we will know when we
|
|
|
|
* sent the GET message ...
|
|
|
|
*/
|
|
|
|
if( sendreq->req_send.req_bytes_packed > 0 ) {
|
|
|
|
PERUSE_TRACE_COMM_EVENT( PERUSE_COMM_REQ_XFER_BEGIN,
|
|
|
|
&(sendreq->req_send.req_base), PERUSE_SEND );
|
2005-09-13 02:28:23 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* send */
|
2012-06-21 21:09:12 +04:00
|
|
|
rc = mca_bml_base_send(bml_btl, des, MCA_PML_OB1_HDR_TYPE_RGET);
|
|
|
|
if (OPAL_UNLIKELY(rc < 0)) {
|
|
|
|
mca_bml_base_free(bml_btl, des);
|
|
|
|
return rc;
|
2012-03-01 19:53:39 +04:00
|
|
|
}
|
|
|
|
|
2012-06-21 21:09:12 +04:00
|
|
|
return OMPI_SUCCESS;
|
2005-09-13 02:28:23 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Rendezvous is required. Not doing rdma so eager send up to
|
|
|
|
* the btls eager limit.
|
|
|
|
*/
|
|
|
|
|
2007-07-11 03:45:23 +04:00
|
|
|
int mca_pml_ob1_send_request_start_rndv( mca_pml_ob1_send_request_t* sendreq,
|
|
|
|
mca_bml_base_btl_t* bml_btl,
|
|
|
|
size_t size,
|
|
|
|
int flags )
|
2005-09-13 02:28:23 +04:00
|
|
|
{
|
|
|
|
mca_btl_base_descriptor_t* des;
|
|
|
|
mca_btl_base_segment_t* segment;
|
|
|
|
mca_pml_ob1_hdr_t* hdr;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* prepare descriptor */
|
2005-09-14 06:17:04 +04:00
|
|
|
if(size == 0) {
|
2015-06-24 06:59:57 +03:00
|
|
|
mca_bml_base_alloc( bml_btl,
|
|
|
|
&des,
|
2007-07-11 02:16:38 +04:00
|
|
|
MCA_BTL_NO_ORDER,
|
2007-12-09 17:08:01 +03:00
|
|
|
sizeof(mca_pml_ob1_rendezvous_hdr_t),
|
2015-06-24 06:59:57 +03:00
|
|
|
MCA_BTL_DES_FLAGS_PRIORITY | MCA_BTL_DES_FLAGS_BTL_OWNERSHIP );
|
2005-09-14 06:17:04 +04:00
|
|
|
} else {
|
2008-04-07 11:52:04 +04:00
|
|
|
MEMCHECKER(
|
2008-05-07 16:28:51 +04:00
|
|
|
memchecker_call(&opal_memchecker_base_mem_defined,
|
|
|
|
sendreq->req_send.req_base.req_addr,
|
|
|
|
sendreq->req_send.req_base.req_count,
|
|
|
|
sendreq->req_send.req_base.req_datatype);
|
2008-04-07 11:52:04 +04:00
|
|
|
);
|
2015-06-24 06:59:57 +03:00
|
|
|
mca_bml_base_prepare_src( bml_btl,
|
2007-07-11 02:16:38 +04:00
|
|
|
&sendreq->req_send.req_base.req_convertor,
|
|
|
|
MCA_BTL_NO_ORDER,
|
|
|
|
sizeof(mca_pml_ob1_rendezvous_hdr_t),
|
|
|
|
&size,
|
2014-12-05 02:18:16 +03:00
|
|
|
MCA_BTL_DES_FLAGS_PRIORITY | MCA_BTL_DES_FLAGS_BTL_OWNERSHIP |
|
|
|
|
MCA_BTL_DES_FLAGS_SIGNAL,
|
2007-07-11 02:16:38 +04:00
|
|
|
&des );
|
2008-04-07 11:52:04 +04:00
|
|
|
MEMCHECKER(
|
2008-05-07 16:28:51 +04:00
|
|
|
memchecker_call(&opal_memchecker_base_mem_noaccess,
|
|
|
|
sendreq->req_send.req_base.req_addr,
|
|
|
|
sendreq->req_send.req_base.req_count,
|
|
|
|
sendreq->req_send.req_base.req_datatype);
|
2008-04-07 11:52:04 +04:00
|
|
|
);
|
2005-09-14 06:17:04 +04:00
|
|
|
}
|
2005-09-13 02:28:23 +04:00
|
|
|
|
2007-07-11 03:45:23 +04:00
|
|
|
if( OPAL_UNLIKELY(NULL == des) ) {
|
2005-09-13 02:28:23 +04:00
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
2015-06-24 06:59:57 +03:00
|
|
|
}
|
2015-01-06 18:45:08 +03:00
|
|
|
segment = des->des_segments;
|
2005-09-13 02:28:23 +04:00
|
|
|
|
|
|
|
/* build hdr */
|
2007-01-05 01:07:37 +03:00
|
|
|
hdr = (mca_pml_ob1_hdr_t*)segment->seg_addr.pval;
|
2015-01-06 18:45:08 +03:00
|
|
|
mca_pml_ob1_rendezvous_hdr_prepare (&hdr->hdr_rndv, MCA_PML_OB1_HDR_TYPE_RNDV, flags |
|
|
|
|
MCA_PML_OB1_HDR_FLAGS_SIGNAL,
|
|
|
|
sendreq->req_send.req_base.req_comm->c_contextid,
|
|
|
|
sendreq->req_send.req_base.req_comm->c_my_rank,
|
|
|
|
sendreq->req_send.req_base.req_tag,
|
|
|
|
(uint16_t)sendreq->req_send.req_base.req_sequence,
|
|
|
|
sendreq->req_send.req_bytes_packed, sendreq);
|
|
|
|
|
|
|
|
ob1_hdr_hton(hdr, MCA_PML_OB1_HDR_TYPE_RNDV, sendreq->req_send.req_base.req_proc);
|
2006-02-26 03:45:54 +03:00
|
|
|
|
2005-09-13 02:28:23 +04:00
|
|
|
/* first fragment of a long message */
|
|
|
|
des->des_cbdata = sendreq;
|
|
|
|
des->des_cbfunc = mca_pml_ob1_rndv_completion;
|
|
|
|
|
2007-08-30 16:08:33 +04:00
|
|
|
/* wait for ack and completion */
|
|
|
|
sendreq->req_state = 2;
|
|
|
|
|
2005-09-13 02:28:23 +04:00
|
|
|
/* send */
|
2008-05-30 05:29:09 +04:00
|
|
|
rc = mca_bml_base_send(bml_btl, des, MCA_PML_OB1_HDR_TYPE_RNDV);
|
2008-05-30 07:58:39 +04:00
|
|
|
if( OPAL_LIKELY( rc >= 0 ) ) {
|
|
|
|
if( OPAL_LIKELY( 1 == rc ) ) {
|
|
|
|
mca_pml_ob1_rndv_completion_request( bml_btl, sendreq, size );
|
|
|
|
}
|
|
|
|
return OMPI_SUCCESS;
|
2005-09-13 02:28:23 +04:00
|
|
|
}
|
2008-05-30 07:58:39 +04:00
|
|
|
mca_bml_base_free(bml_btl, des );
|
2005-09-13 02:28:23 +04:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2007-07-11 03:45:23 +04:00
|
|
|
void mca_pml_ob1_send_request_copy_in_out( mca_pml_ob1_send_request_t *sendreq,
|
|
|
|
uint64_t send_offset,
|
|
|
|
uint64_t send_length )
|
2007-06-03 12:30:07 +04:00
|
|
|
{
|
|
|
|
mca_pml_ob1_send_range_t *sr;
|
2015-02-19 23:41:41 +03:00
|
|
|
opal_free_list_item_t *i;
|
2007-07-01 15:34:23 +04:00
|
|
|
mca_bml_base_endpoint_t* bml_endpoint = sendreq->req_endpoint;
|
|
|
|
int num_btls = mca_bml_base_btl_array_get_size(&bml_endpoint->btl_send);
|
2013-07-04 12:34:37 +04:00
|
|
|
int n;
|
2007-07-01 15:34:23 +04:00
|
|
|
double weight_total = 0;
|
2007-06-03 12:30:07 +04:00
|
|
|
|
2007-07-11 03:45:23 +04:00
|
|
|
if( OPAL_UNLIKELY(0 == send_length) )
|
2007-06-03 12:30:07 +04:00
|
|
|
return;
|
|
|
|
|
2015-02-19 23:41:41 +03:00
|
|
|
i = opal_free_list_wait (&mca_pml_ob1.send_ranges);
|
2007-06-03 12:30:07 +04:00
|
|
|
|
|
|
|
sr = (mca_pml_ob1_send_range_t*)i;
|
|
|
|
|
|
|
|
sr->range_send_offset = send_offset;
|
|
|
|
sr->range_send_length = send_length;
|
2007-07-01 15:34:23 +04:00
|
|
|
sr->range_btl_idx = 0;
|
|
|
|
|
|
|
|
for(n = 0; n < num_btls && n < mca_pml_ob1.max_send_per_range; n++) {
|
|
|
|
sr->range_btls[n].bml_btl =
|
|
|
|
mca_bml_base_btl_array_get_next(&bml_endpoint->btl_send);
|
|
|
|
weight_total += sr->range_btls[n].bml_btl->btl_weight;
|
|
|
|
}
|
|
|
|
|
|
|
|
sr->range_btl_cnt = n;
|
|
|
|
mca_pml_ob1_calc_weighted_length(sr->range_btls, n, send_length,
|
|
|
|
weight_total);
|
|
|
|
|
2007-06-03 12:30:07 +04:00
|
|
|
OPAL_THREAD_LOCK(&sendreq->req_send_range_lock);
|
|
|
|
opal_list_append(&sendreq->req_send_ranges, (opal_list_item_t*)sr);
|
|
|
|
OPAL_THREAD_UNLOCK(&sendreq->req_send_range_lock);
|
|
|
|
}
|
2005-09-13 02:28:23 +04:00
|
|
|
|
2007-08-30 16:10:04 +04:00
|
|
|
static inline mca_pml_ob1_send_range_t *
|
|
|
|
get_send_range_nolock(mca_pml_ob1_send_request_t* sendreq)
|
|
|
|
{
|
|
|
|
opal_list_item_t *item;
|
|
|
|
|
|
|
|
item = opal_list_get_first(&sendreq->req_send_ranges);
|
|
|
|
|
|
|
|
if(opal_list_get_end(&sendreq->req_send_ranges) == item)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return (mca_pml_ob1_send_range_t*)item;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline mca_pml_ob1_send_range_t *
|
|
|
|
get_send_range(mca_pml_ob1_send_request_t* sendreq)
|
|
|
|
{
|
|
|
|
mca_pml_ob1_send_range_t *range;
|
|
|
|
|
|
|
|
OPAL_THREAD_LOCK(&sendreq->req_send_range_lock);
|
|
|
|
range = get_send_range_nolock(sendreq);
|
|
|
|
OPAL_THREAD_UNLOCK(&sendreq->req_send_range_lock);
|
|
|
|
|
|
|
|
return range;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline mca_pml_ob1_send_range_t *
|
|
|
|
get_next_send_range(mca_pml_ob1_send_request_t* sendreq,
|
|
|
|
mca_pml_ob1_send_range_t *range)
|
|
|
|
{
|
|
|
|
OPAL_THREAD_LOCK(&sendreq->req_send_range_lock);
|
|
|
|
opal_list_remove_item(&sendreq->req_send_ranges, (opal_list_item_t *)range);
|
2015-02-19 23:41:41 +03:00
|
|
|
opal_free_list_return (&mca_pml_ob1.send_ranges, &range->base);
|
2007-08-30 16:10:04 +04:00
|
|
|
range = get_send_range_nolock(sendreq);
|
|
|
|
OPAL_THREAD_UNLOCK(&sendreq->req_send_range_lock);
|
|
|
|
|
|
|
|
return range;
|
|
|
|
}
|
|
|
|
|
2005-09-14 21:08:08 +04:00
|
|
|
/**
|
2005-09-15 22:47:59 +04:00
|
|
|
* Schedule pipeline of send descriptors for the given request.
|
|
|
|
* Up to the rdma threshold. If this is a send based protocol,
|
|
|
|
* the rdma threshold is the end of the message. Otherwise, schedule
|
|
|
|
* fragments up to the threshold to overlap initial registration/setup
|
2006-07-20 18:44:35 +04:00
|
|
|
* costs of the rdma. Only one thread can be inside this function.
|
2005-06-01 18:34:22 +04:00
|
|
|
*/
|
|
|
|
|
2007-08-30 16:10:04 +04:00
|
|
|
int
|
2007-09-12 11:08:38 +04:00
|
|
|
mca_pml_ob1_send_request_schedule_once(mca_pml_ob1_send_request_t* sendreq)
|
2015-06-24 06:59:57 +03:00
|
|
|
{
|
2007-08-30 16:10:04 +04:00
|
|
|
size_t prev_bytes_remaining = 0;
|
|
|
|
mca_pml_ob1_send_range_t *range;
|
|
|
|
int num_fail = 0;
|
|
|
|
|
|
|
|
/* check pipeline_depth here before attempting to get any locks */
|
|
|
|
if(true == sendreq->req_throttle_sends &&
|
|
|
|
sendreq->req_pipeline_depth >= mca_pml_ob1.send_pipeline_depth)
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
|
|
|
|
range = get_send_range(sendreq);
|
|
|
|
|
|
|
|
while(range && (false == sendreq->req_throttle_sends ||
|
|
|
|
sendreq->req_pipeline_depth < mca_pml_ob1.send_pipeline_depth)) {
|
|
|
|
mca_pml_ob1_frag_hdr_t* hdr;
|
|
|
|
mca_btl_base_descriptor_t* des;
|
|
|
|
int rc, btl_idx;
|
2007-10-18 16:07:37 +04:00
|
|
|
size_t size, offset, data_remaining = 0;
|
2007-08-30 16:10:04 +04:00
|
|
|
mca_bml_base_btl_t* bml_btl;
|
|
|
|
|
|
|
|
assert(range->range_send_length != 0);
|
2005-06-01 18:34:22 +04:00
|
|
|
|
2007-08-30 16:10:04 +04:00
|
|
|
if(prev_bytes_remaining == range->range_send_length)
|
|
|
|
num_fail++;
|
|
|
|
else
|
|
|
|
num_fail = 0;
|
|
|
|
|
|
|
|
prev_bytes_remaining = range->range_send_length;
|
|
|
|
|
|
|
|
if( OPAL_UNLIKELY(num_fail == range->range_btl_cnt) ) {
|
|
|
|
assert(sendreq->req_pending == MCA_PML_OB1_SEND_PENDING_NONE);
|
|
|
|
add_request_to_send_pending(sendreq,
|
|
|
|
MCA_PML_OB1_SEND_PENDING_SCHEDULE, true);
|
|
|
|
/* Note that request remains locked. send_request_process_pending()
|
|
|
|
* function will call shedule_exclusive() directly without taking
|
|
|
|
* the lock */
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
}
|
|
|
|
|
2007-10-18 16:07:37 +04:00
|
|
|
cannot_pack:
|
2007-08-30 16:10:04 +04:00
|
|
|
do {
|
|
|
|
btl_idx = range->range_btl_idx;
|
|
|
|
if(++range->range_btl_idx == range->range_btl_cnt)
|
|
|
|
range->range_btl_idx = 0;
|
2007-10-18 16:07:37 +04:00
|
|
|
} while(!range->range_btls[btl_idx].length);
|
|
|
|
|
|
|
|
bml_btl = range->range_btls[btl_idx].bml_btl;
|
|
|
|
/* If there is a remaining data from another BTL that was too small
|
|
|
|
* for converter to pack then send it through another BTL */
|
|
|
|
range->range_btls[btl_idx].length += data_remaining;
|
|
|
|
size = range->range_btls[btl_idx].length;
|
2007-08-30 16:10:04 +04:00
|
|
|
|
|
|
|
/* makes sure that we don't exceed BTL max send size */
|
2008-10-01 01:02:37 +04:00
|
|
|
if(bml_btl->btl->btl_max_send_size != 0) {
|
2015-07-02 18:11:15 +03:00
|
|
|
#if OPAL_CUDA_SUPPORT
|
|
|
|
size_t max_send_size;
|
|
|
|
if ((sendreq->req_send.req_base.req_convertor.flags & CONVERTOR_CUDA) && (bml_btl->btl->btl_cuda_max_send_size != 0)) {
|
|
|
|
max_send_size = bml_btl->btl->btl_cuda_max_send_size - sizeof(mca_pml_ob1_frag_hdr_t);
|
|
|
|
} else {
|
|
|
|
max_send_size = bml_btl->btl->btl_max_send_size - sizeof(mca_pml_ob1_frag_hdr_t);
|
|
|
|
}
|
|
|
|
#else /* OPAL_CUDA_SUPPORT */
|
2008-10-01 01:02:37 +04:00
|
|
|
size_t max_send_size = bml_btl->btl->btl_max_send_size -
|
2007-08-30 16:10:04 +04:00
|
|
|
sizeof(mca_pml_ob1_frag_hdr_t);
|
2015-07-02 18:11:15 +03:00
|
|
|
#endif /* OPAL_CUDA_SUPPORT */
|
2007-08-30 16:10:04 +04:00
|
|
|
if (size > max_send_size) {
|
|
|
|
size = max_send_size;
|
2006-07-20 18:44:35 +04:00
|
|
|
}
|
2007-08-30 16:10:04 +04:00
|
|
|
}
|
2015-06-24 06:59:57 +03:00
|
|
|
|
2007-08-30 16:10:04 +04:00
|
|
|
/* pack into a descriptor */
|
|
|
|
offset = (size_t)range->range_send_offset;
|
2015-06-24 06:59:57 +03:00
|
|
|
opal_convertor_set_position(&sendreq->req_send.req_base.req_convertor,
|
2007-08-30 16:10:04 +04:00
|
|
|
&offset);
|
|
|
|
range->range_send_offset = (uint64_t)offset;
|
|
|
|
|
2007-10-18 16:07:37 +04:00
|
|
|
data_remaining = size;
|
2008-04-07 11:52:04 +04:00
|
|
|
MEMCHECKER(
|
2008-05-07 16:28:51 +04:00
|
|
|
memchecker_call(&opal_memchecker_base_mem_defined,
|
|
|
|
sendreq->req_send.req_base.req_addr,
|
|
|
|
sendreq->req_send.req_base.req_count,
|
|
|
|
sendreq->req_send.req_base.req_datatype);
|
2008-04-07 11:52:04 +04:00
|
|
|
);
|
2015-01-06 18:45:08 +03:00
|
|
|
mca_bml_base_prepare_src(bml_btl, &sendreq->req_send.req_base.req_convertor,
|
|
|
|
MCA_BTL_NO_ORDER, sizeof(mca_pml_ob1_frag_hdr_t),
|
2014-12-05 02:18:16 +03:00
|
|
|
&size, MCA_BTL_DES_FLAGS_BTL_OWNERSHIP | MCA_BTL_DES_SEND_ALWAYS_CALLBACK |
|
2015-01-06 18:45:08 +03:00
|
|
|
MCA_BTL_DES_FLAGS_SIGNAL, &des);
|
2008-04-07 11:52:04 +04:00
|
|
|
MEMCHECKER(
|
2008-05-07 16:28:51 +04:00
|
|
|
memchecker_call(&opal_memchecker_base_mem_noaccess,
|
|
|
|
sendreq->req_send.req_base.req_addr,
|
|
|
|
sendreq->req_send.req_base.req_count,
|
|
|
|
sendreq->req_send.req_base.req_datatype);
|
2008-04-07 11:52:04 +04:00
|
|
|
);
|
2007-10-18 16:07:37 +04:00
|
|
|
|
|
|
|
if( OPAL_UNLIKELY(des == NULL || size == 0) ) {
|
|
|
|
if(des) {
|
|
|
|
/* Converter can't pack this chunk. Append to another chunk
|
|
|
|
* from other BTL */
|
|
|
|
mca_bml_base_free(bml_btl, des);
|
|
|
|
range->range_btls[btl_idx].length -= data_remaining;
|
|
|
|
goto cannot_pack;
|
2015-06-24 06:59:57 +03:00
|
|
|
}
|
2007-08-30 16:10:04 +04:00
|
|
|
continue;
|
|
|
|
}
|
2007-10-18 16:07:37 +04:00
|
|
|
|
2007-08-30 16:10:04 +04:00
|
|
|
des->des_cbfunc = mca_pml_ob1_frag_completion;
|
|
|
|
des->des_cbdata = sendreq;
|
2006-07-20 18:44:35 +04:00
|
|
|
|
2007-08-30 16:10:04 +04:00
|
|
|
/* setup header */
|
2015-01-06 18:45:08 +03:00
|
|
|
hdr = (mca_pml_ob1_frag_hdr_t*)des->des_segments->seg_addr.pval;
|
|
|
|
mca_pml_ob1_frag_hdr_prepare (hdr, 0, range->range_send_offset, sendreq,
|
|
|
|
sendreq->req_recv.lval);
|
2005-06-01 18:34:22 +04:00
|
|
|
|
2007-12-16 11:45:44 +03:00
|
|
|
ob1_hdr_hton(hdr, MCA_PML_OB1_HDR_TYPE_FRAG,
|
|
|
|
sendreq->req_send.req_base.req_proc);
|
2006-02-26 03:45:54 +03:00
|
|
|
|
2006-03-31 21:09:09 +04:00
|
|
|
#if OMPI_WANT_PERUSE
|
2007-08-30 16:10:04 +04:00
|
|
|
PERUSE_TRACE_COMM_OMPI_EVENT(PERUSE_COMM_REQ_XFER_CONTINUE,
|
|
|
|
&(sendreq->req_send.req_base), size, PERUSE_SEND);
|
2006-03-31 21:09:09 +04:00
|
|
|
#endif /* OMPI_WANT_PERUSE */
|
|
|
|
|
2013-11-01 16:19:40 +04:00
|
|
|
#if OPAL_CUDA_SUPPORT /* CUDA_ASYNC_SEND */
|
2013-01-18 02:34:43 +04:00
|
|
|
/* At this point, check to see if the BTL is doing an asynchronous
|
|
|
|
* copy. This would have been initiated in the mca_bml_base_prepare_src
|
|
|
|
* called above. The flag is checked here as we let the hdr be
|
|
|
|
* set up prior to checking.
|
|
|
|
*/
|
|
|
|
if (des->des_flags & MCA_BTL_DES_FLAGS_CUDA_COPY_ASYNC) {
|
2013-05-15 00:49:42 +04:00
|
|
|
OPAL_OUTPUT((-1, "Initiating async copy on FRAG frag=%p", (void *)des));
|
2013-01-18 02:34:43 +04:00
|
|
|
/* Need to make sure BTL does not free frag after completion
|
|
|
|
* of asynchronous copy as we still need to send the fragment. */
|
|
|
|
des->des_flags &= ~MCA_BTL_DES_FLAGS_BTL_OWNERSHIP;
|
|
|
|
/* Unclear that this flag needs to be set but to be sure, set it */
|
|
|
|
des->des_flags |= MCA_BTL_DES_SEND_ALWAYS_CALLBACK;
|
|
|
|
des->des_cbfunc = mca_pml_ob1_copy_frag_completion;
|
|
|
|
range->range_btls[btl_idx].length -= size;
|
|
|
|
range->range_send_length -= size;
|
|
|
|
range->range_send_offset += size;
|
|
|
|
OPAL_THREAD_ADD_SIZE_T(&sendreq->req_pipeline_depth, 1);
|
|
|
|
if(range->range_send_length == 0) {
|
|
|
|
range = get_next_send_range(sendreq, range);
|
|
|
|
prev_bytes_remaining = 0;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2013-11-01 16:19:40 +04:00
|
|
|
#endif /* OPAL_CUDA_SUPPORT */
|
2013-01-18 02:34:43 +04:00
|
|
|
|
2007-08-30 16:10:04 +04:00
|
|
|
/* initiate send - note that this may complete before the call returns */
|
2008-05-30 05:29:09 +04:00
|
|
|
rc = mca_bml_base_send(bml_btl, des, MCA_PML_OB1_HDR_TYPE_FRAG);
|
2008-05-30 07:58:39 +04:00
|
|
|
if( OPAL_LIKELY(rc >= 0) ) {
|
2007-08-30 16:10:04 +04:00
|
|
|
/* update state */
|
|
|
|
range->range_btls[btl_idx].length -= size;
|
|
|
|
range->range_send_length -= size;
|
|
|
|
range->range_send_offset += size;
|
|
|
|
OPAL_THREAD_ADD_SIZE_T(&sendreq->req_pipeline_depth, 1);
|
|
|
|
if(range->range_send_length == 0) {
|
|
|
|
range = get_next_send_range(sendreq, range);
|
|
|
|
prev_bytes_remaining = 0;
|
2005-06-01 18:34:22 +04:00
|
|
|
}
|
2015-06-24 06:59:57 +03:00
|
|
|
} else {
|
2007-08-30 16:10:04 +04:00
|
|
|
mca_bml_base_free(bml_btl,des);
|
2006-07-20 18:44:35 +04:00
|
|
|
}
|
2007-08-30 16:10:04 +04:00
|
|
|
}
|
2006-07-20 18:44:35 +04:00
|
|
|
|
2005-06-01 18:34:22 +04:00
|
|
|
return OMPI_SUCCESS;
|
2015-06-24 06:59:57 +03:00
|
|
|
}
|
2005-06-01 18:34:22 +04:00
|
|
|
|
|
|
|
|
2015-01-06 18:45:08 +03:00
|
|
|
/**
|
|
|
|
* A put fragment could not be started. Queue the fragment to be retried later or
|
|
|
|
* fall back on send/recv.
|
|
|
|
*/
|
|
|
|
static void mca_pml_ob1_send_request_put_frag_failed (mca_pml_ob1_rdma_frag_t *frag, int rc)
|
|
|
|
{
|
|
|
|
mca_pml_ob1_send_request_t* sendreq = (mca_pml_ob1_send_request_t *) frag->rdma_req;
|
|
|
|
mca_bml_base_btl_t *bml_btl = frag->rdma_bml;
|
|
|
|
|
|
|
|
if (++frag->retries < mca_pml_ob1.rdma_retries_limit && OMPI_ERR_OUT_OF_RESOURCE == rc) {
|
|
|
|
/* queue the frag for later if there was a resource error */
|
|
|
|
OPAL_THREAD_LOCK(&mca_pml_ob1.lock);
|
|
|
|
opal_list_append(&mca_pml_ob1.rdma_pending, (opal_list_item_t*)frag);
|
|
|
|
OPAL_THREAD_UNLOCK(&mca_pml_ob1.lock);
|
|
|
|
} else {
|
|
|
|
/* tell receiver to deregister memory */
|
|
|
|
mca_pml_ob1_send_fin (sendreq->req_send.req_base.req_proc, bml_btl,
|
|
|
|
frag->rdma_hdr.hdr_rdma.hdr_frag, 0, MCA_BTL_NO_ORDER,
|
|
|
|
OPAL_ERR_TEMP_OUT_OF_RESOURCE);
|
|
|
|
|
|
|
|
/* send fragment by copy in/out */
|
|
|
|
mca_pml_ob1_send_request_copy_in_out(sendreq, frag->rdma_hdr.hdr_rdma.hdr_rdma_offset,
|
|
|
|
frag->rdma_length);
|
|
|
|
/* if a pointer to a receive request is not set it means that
|
|
|
|
* ACK was not yet received. Don't schedule sends before ACK */
|
|
|
|
if (NULL != sendreq->req_recv.pval)
|
|
|
|
mca_pml_ob1_send_request_schedule (sendreq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-06-10 00:16:33 +04:00
|
|
|
/**
|
|
|
|
* An RDMA put operation has completed:
|
|
|
|
* (1) Update request status and if required set completed
|
2015-01-06 18:45:08 +03:00
|
|
|
* (2) Send FIN control message to the destination
|
2005-06-10 00:16:33 +04:00
|
|
|
*/
|
|
|
|
|
2015-01-06 18:45:08 +03:00
|
|
|
static void mca_pml_ob1_put_completion (mca_btl_base_module_t* btl, struct mca_btl_base_endpoint_t* ep,
|
|
|
|
void *local_address, mca_btl_base_registration_handle_t *local_handle,
|
|
|
|
void *context, void *cbdata, int status)
|
2005-06-10 00:16:33 +04:00
|
|
|
{
|
2015-01-06 18:45:08 +03:00
|
|
|
mca_pml_ob1_rdma_frag_t *frag = (mca_pml_ob1_rdma_frag_t *) cbdata;
|
|
|
|
mca_pml_ob1_send_request_t *sendreq = (mca_pml_ob1_send_request_t *) frag->rdma_req;
|
|
|
|
mca_bml_base_btl_t *bml_btl = (mca_bml_base_btl_t *) context;
|
2005-06-10 00:16:33 +04:00
|
|
|
|
|
|
|
/* check completion status */
|
2015-01-06 18:45:08 +03:00
|
|
|
if( OPAL_UNLIKELY(OMPI_SUCCESS == status) ) {
|
|
|
|
/* TODO -- readd ordering */
|
|
|
|
mca_pml_ob1_send_fin (sendreq->req_send.req_base.req_proc, bml_btl,
|
|
|
|
frag->rdma_hdr.hdr_rdma.hdr_frag, frag->rdma_length,
|
|
|
|
0, 0);
|
2006-12-03 11:55:59 +03:00
|
|
|
|
2015-01-06 18:45:08 +03:00
|
|
|
/* check for request completion */
|
|
|
|
OPAL_THREAD_ADD_SIZE_T(&sendreq->req_bytes_delivered, frag->rdma_length);
|
2007-08-30 16:08:33 +04:00
|
|
|
|
2015-01-06 18:45:08 +03:00
|
|
|
send_request_pml_complete_check(sendreq);
|
|
|
|
} else {
|
|
|
|
/* try to fall back on send/recv */
|
|
|
|
mca_pml_ob1_send_request_put_frag_failed (frag, status);
|
|
|
|
}
|
2005-06-10 00:16:33 +04:00
|
|
|
|
2006-07-20 18:44:35 +04:00
|
|
|
MCA_PML_OB1_RDMA_FRAG_RETURN(frag);
|
2005-06-10 00:16:33 +04:00
|
|
|
|
2006-07-20 18:44:35 +04:00
|
|
|
MCA_PML_OB1_PROGRESS_PENDING(bml_btl);
|
2005-06-10 00:16:33 +04:00
|
|
|
}
|
|
|
|
|
2012-04-25 00:18:56 +04:00
|
|
|
int mca_pml_ob1_send_request_put_frag( mca_pml_ob1_rdma_frag_t *frag )
|
2015-06-24 06:59:57 +03:00
|
|
|
{
|
2015-01-06 18:45:08 +03:00
|
|
|
mca_pml_ob1_send_request_t *sendreq = (mca_pml_ob1_send_request_t *) frag->rdma_req;
|
|
|
|
mca_btl_base_registration_handle_t *local_handle = NULL;
|
2012-04-25 00:18:56 +04:00
|
|
|
mca_bml_base_btl_t *bml_btl = frag->rdma_bml;
|
2005-06-10 00:16:33 +04:00
|
|
|
int rc;
|
|
|
|
|
2015-01-06 18:45:08 +03:00
|
|
|
if (bml_btl->btl->btl_register_mem && NULL == frag->local_handle) {
|
|
|
|
/* Check if the segment is already registered */
|
|
|
|
for (size_t i = 0 ; i < sendreq->req_rdma_cnt ; ++i) {
|
|
|
|
if (sendreq->req_rdma[i].bml_btl == frag->rdma_bml) {
|
|
|
|
/* do not copy the handle to the fragment to avoid deregistring it twice */
|
|
|
|
local_handle = sendreq->req_rdma[i].btl_reg;
|
|
|
|
break;
|
2014-10-30 22:54:06 +03:00
|
|
|
}
|
2015-01-06 18:45:08 +03:00
|
|
|
}
|
2014-10-30 22:54:06 +03:00
|
|
|
|
2015-01-06 18:45:08 +03:00
|
|
|
if (NULL == frag->local_handle) {
|
|
|
|
/* Not already registered. Register the region with the BTL. */
|
|
|
|
mca_bml_base_register_mem (bml_btl, frag->local_address, frag->rdma_length, 0,
|
|
|
|
&frag->local_handle);
|
|
|
|
|
|
|
|
if (OPAL_UNLIKELY(NULL == frag->local_handle)) {
|
|
|
|
mca_pml_ob1_send_request_put_frag_failed (frag, OMPI_ERR_OUT_OF_RESOURCE);
|
|
|
|
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
}
|
|
|
|
|
|
|
|
local_handle = frag->local_handle;
|
2007-06-03 12:31:58 +04:00
|
|
|
}
|
2005-06-10 00:16:33 +04:00
|
|
|
}
|
2012-04-25 00:18:56 +04:00
|
|
|
|
2006-06-27 00:08:33 +04:00
|
|
|
PERUSE_TRACE_COMM_OMPI_EVENT( PERUSE_COMM_REQ_XFER_CONTINUE,
|
2015-02-25 16:53:12 +03:00
|
|
|
&(((mca_pml_ob1_send_request_t*)frag->rdma_req)->req_send.req_base), frag->rdma_length, PERUSE_SEND );
|
2006-06-26 23:01:22 +04:00
|
|
|
|
2015-01-06 18:45:08 +03:00
|
|
|
rc = mca_bml_base_put (bml_btl, frag->local_address, frag->remote_address, local_handle,
|
|
|
|
(mca_btl_base_registration_handle_t *) frag->remote_handle, frag->rdma_length,
|
|
|
|
0, MCA_BTL_NO_ORDER, mca_pml_ob1_put_completion, frag);
|
2012-04-25 00:18:56 +04:00
|
|
|
if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
|
2015-01-06 18:45:08 +03:00
|
|
|
mca_pml_ob1_send_request_put_frag_failed (frag, rc);
|
|
|
|
return rc;
|
2005-06-10 00:16:33 +04:00
|
|
|
}
|
2012-04-25 00:18:56 +04:00
|
|
|
|
2006-07-20 18:44:35 +04:00
|
|
|
return OMPI_SUCCESS;
|
2005-06-09 07:34:33 +04:00
|
|
|
}
|
|
|
|
|
2006-07-20 18:44:35 +04:00
|
|
|
/**
|
|
|
|
* Receiver has scheduled an RDMA operation:
|
|
|
|
* (1) Allocate an RDMA fragment to maintain the state of the operation
|
|
|
|
* (2) Call BTL prepare_src to pin/prepare source buffers
|
2015-06-24 06:59:57 +03:00
|
|
|
* (3) Queue the RDMA put
|
2006-07-20 18:44:35 +04:00
|
|
|
*/
|
|
|
|
|
2007-07-01 20:19:13 +04:00
|
|
|
void mca_pml_ob1_send_request_put( mca_pml_ob1_send_request_t* sendreq,
|
2015-01-06 18:45:08 +03:00
|
|
|
mca_btl_base_module_t* btl,
|
2007-07-01 20:19:13 +04:00
|
|
|
mca_pml_ob1_rdma_hdr_t* hdr )
|
2006-07-20 18:44:35 +04:00
|
|
|
{
|
|
|
|
mca_bml_base_endpoint_t *bml_endpoint = sendreq->req_endpoint;
|
|
|
|
mca_pml_ob1_rdma_frag_t* frag;
|
2005-06-09 07:34:33 +04:00
|
|
|
|
2015-06-24 06:59:57 +03:00
|
|
|
if(hdr->hdr_common.hdr_flags & MCA_PML_OB1_HDR_TYPE_ACK) {
|
2007-08-30 16:08:33 +04:00
|
|
|
OPAL_THREAD_ADD32(&sendreq->req_state, -1);
|
2006-07-20 18:44:35 +04:00
|
|
|
}
|
|
|
|
|
2012-06-14 21:29:58 +04:00
|
|
|
sendreq->req_recv.pval = hdr->hdr_recv_req.pval;
|
|
|
|
|
2015-01-06 18:45:08 +03:00
|
|
|
if (NULL == sendreq->rdma_frag) {
|
|
|
|
MCA_PML_OB1_RDMA_FRAG_ALLOC(frag);
|
2006-07-20 18:44:35 +04:00
|
|
|
|
2015-01-06 18:45:08 +03:00
|
|
|
if( OPAL_UNLIKELY(NULL == frag) ) {
|
|
|
|
/* TSW - FIX */
|
|
|
|
OMPI_ERROR_LOG(OMPI_ERR_OUT_OF_RESOURCE);
|
|
|
|
ompi_rte_abort(-1, NULL);
|
2014-11-20 09:22:43 +03:00
|
|
|
}
|
2015-01-06 18:45:08 +03:00
|
|
|
} else {
|
|
|
|
/* rget fallback on put */
|
|
|
|
frag = sendreq->rdma_frag;
|
|
|
|
sendreq->rdma_frag = NULL;
|
|
|
|
sendreq->req_state = 0;
|
2014-11-20 09:22:43 +03:00
|
|
|
}
|
2014-10-30 22:54:06 +03:00
|
|
|
|
2015-01-06 18:45:08 +03:00
|
|
|
/* copy registration data */
|
|
|
|
memcpy (frag->remote_handle, hdr + 1, btl->btl_registration_handle_size);
|
|
|
|
|
2007-05-09 16:11:51 +04:00
|
|
|
frag->rdma_bml = mca_bml_base_btl_array_find(&bml_endpoint->btl_rdma, btl);
|
2006-07-20 18:44:35 +04:00
|
|
|
frag->rdma_hdr.hdr_rdma = *hdr;
|
2015-06-24 06:59:57 +03:00
|
|
|
frag->rdma_req = sendreq;
|
2015-01-06 18:45:08 +03:00
|
|
|
frag->rdma_length = hdr->hdr_dst_size;
|
2006-07-20 18:44:35 +04:00
|
|
|
frag->rdma_state = MCA_PML_OB1_RDMA_PUT;
|
2015-01-06 18:45:08 +03:00
|
|
|
frag->remote_address = hdr->hdr_dst_ptr;
|
2007-06-03 12:31:58 +04:00
|
|
|
frag->retries = 0;
|
2007-05-09 16:11:51 +04:00
|
|
|
|
2015-01-06 18:45:08 +03:00
|
|
|
/* Get the address of the current offset. Note: at this time ob1 CAN NOT handle
|
|
|
|
* non-contiguous RDMA. If that changes this code will be wrong. */
|
|
|
|
opal_convertor_get_offset_pointer (&sendreq->req_send.req_base.req_convertor,
|
|
|
|
hdr->hdr_rdma_offset, &frag->local_address);
|
2007-05-03 13:13:17 +04:00
|
|
|
|
2006-07-20 18:44:35 +04:00
|
|
|
mca_pml_ob1_send_request_put_frag(frag);
|
|
|
|
}
|