2012-04-25 00:19:06 +04:00
|
|
|
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
|
2005-05-24 02:06:50 +04:00
|
|
|
/*
|
2005-11-05 22:57:48 +03:00
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
2013-07-04 12:34:37 +04:00
|
|
|
* Copyright (c) 2004-2013 The University of Tennessee and The University
|
2005-11-05 22:57:48 +03:00
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
2008-02-12 11:46:27 +03:00
|
|
|
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
|
2005-05-24 02:06:50 +04:00
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
2008-05-30 05:29:09 +04:00
|
|
|
* Copyright (c) 2008 UT-Battelle, LLC. All rights reserved.
|
2012-02-06 21:35:21 +04:00
|
|
|
* Copyright (c) 2011 Sandia National Laboratories. All rights reserved.
|
2013-06-21 22:35:16 +04:00
|
|
|
* Copyright (c) 2012-2013 NVIDIA Corporation. All rights reserved.
|
2012-04-25 00:19:15 +04:00
|
|
|
* Copyright (c) 2011-2012 Los Alamos National Security, LLC. All rights
|
|
|
|
* reserved.
|
2012-07-26 18:06:24 +04:00
|
|
|
* Copyright (c) 2012 FUJITSU LIMITED. All rights reserved.
|
2005-05-24 02:06:50 +04:00
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "ompi_config.h"
|
|
|
|
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "ompi/mca/pml/pml.h"
|
|
|
|
#include "ompi/mca/bml/bml.h"
|
|
|
|
#include "ompi/mca/btl/btl.h"
|
|
|
|
#include "ompi/mca/mpool/mpool.h"
|
2005-05-24 02:06:50 +04:00
|
|
|
#include "pml_ob1_comm.h"
|
|
|
|
#include "pml_ob1_recvreq.h"
|
|
|
|
#include "pml_ob1_recvfrag.h"
|
|
|
|
#include "pml_ob1_sendreq.h"
|
2005-08-17 22:23:38 +04:00
|
|
|
#include "pml_ob1_rdmafrag.h"
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "ompi/mca/bml/base/base.h"
|
2008-04-18 00:43:56 +04:00
|
|
|
#include "opal/util/arch.h"
|
2008-02-12 21:01:17 +03:00
|
|
|
#include "ompi/memchecker.h"
|
2013-11-01 16:19:40 +04:00
|
|
|
#if OPAL_CUDA_SUPPORT
|
2013-01-18 02:34:43 +04:00
|
|
|
#include "opal/datatype/opal_datatype_cuda.h"
|
|
|
|
#include "ompi/mca/common/cuda/common_cuda.h"
|
2013-11-01 16:19:40 +04:00
|
|
|
#endif /* OPAL_CUDA_SUPPORT */
|
2006-02-26 03:45:54 +03:00
|
|
|
|
2013-11-01 16:19:40 +04:00
|
|
|
#if OPAL_CUDA_SUPPORT
|
2012-02-24 06:13:33 +04:00
|
|
|
int mca_pml_ob1_cuda_need_buffers(mca_pml_ob1_recv_request_t* recvreq,
|
|
|
|
mca_btl_base_module_t* btl);
|
2013-11-01 16:19:40 +04:00
|
|
|
#endif /* OPAL_CUDA_SUPPORT */
|
2012-02-24 06:13:33 +04:00
|
|
|
|
2006-07-20 18:44:35 +04:00
|
|
|
void mca_pml_ob1_recv_request_process_pending(void)
|
|
|
|
{
|
|
|
|
mca_pml_ob1_recv_request_t* recvreq;
|
2010-10-13 00:11:48 +04:00
|
|
|
int rc, i, s = (int)opal_list_get_size(&mca_pml_ob1.recv_pending);
|
2006-07-20 18:44:35 +04:00
|
|
|
|
|
|
|
for(i = 0; i < s; i++) {
|
|
|
|
OPAL_THREAD_LOCK(&mca_pml_ob1.lock);
|
|
|
|
recvreq = (mca_pml_ob1_recv_request_t*)
|
|
|
|
opal_list_remove_first(&mca_pml_ob1.recv_pending);
|
|
|
|
OPAL_THREAD_UNLOCK(&mca_pml_ob1.lock);
|
2007-07-11 03:45:23 +04:00
|
|
|
if( OPAL_UNLIKELY(NULL == recvreq) )
|
2006-07-20 18:44:35 +04:00
|
|
|
break;
|
|
|
|
recvreq->req_pending = false;
|
2010-10-13 00:11:48 +04:00
|
|
|
rc = mca_pml_ob1_recv_request_schedule_exclusive(recvreq, NULL);
|
2012-04-06 18:23:13 +04:00
|
|
|
if(OMPI_ERR_OUT_OF_RESOURCE == rc)
|
2006-07-20 18:44:35 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2006-03-24 07:21:30 +03:00
|
|
|
static int mca_pml_ob1_recv_request_free(struct ompi_request_t** request)
|
2005-05-24 02:06:50 +04:00
|
|
|
{
|
2005-09-15 22:47:59 +04:00
|
|
|
mca_pml_ob1_recv_request_t* recvreq = *(mca_pml_ob1_recv_request_t**)request;
|
2006-03-16 01:53:41 +03:00
|
|
|
|
|
|
|
assert( false == recvreq->req_recv.req_base.req_free_called );
|
|
|
|
|
|
|
|
OPAL_THREAD_LOCK(&ompi_request_lock);
|
|
|
|
recvreq->req_recv.req_base.req_free_called = true;
|
2006-03-31 21:09:09 +04:00
|
|
|
|
|
|
|
PERUSE_TRACE_COMM_EVENT( PERUSE_COMM_REQ_NOTIFY,
|
|
|
|
&(recvreq->req_recv.req_base), PERUSE_RECV );
|
|
|
|
|
2006-09-29 03:54:38 +04:00
|
|
|
if( true == recvreq->req_recv.req_base.req_pml_complete ) {
|
2008-11-27 19:34:02 +03:00
|
|
|
/* make buffer defined when the request is compeleted,
|
|
|
|
and before releasing the objects. */
|
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_call(&opal_memchecker_base_mem_defined,
|
|
|
|
recvreq->req_recv.req_base.req_addr,
|
|
|
|
recvreq->req_recv.req_base.req_count,
|
|
|
|
recvreq->req_recv.req_base.req_datatype);
|
|
|
|
);
|
|
|
|
|
2007-09-18 20:18:47 +04:00
|
|
|
MCA_PML_OB1_RECV_REQUEST_RETURN( recvreq );
|
2006-09-29 03:54:38 +04:00
|
|
|
}
|
|
|
|
|
2006-03-16 01:53:41 +03:00
|
|
|
OPAL_THREAD_UNLOCK(&ompi_request_lock);
|
2006-02-14 12:09:05 +03:00
|
|
|
*request = MPI_REQUEST_NULL;
|
2005-05-24 02:06:50 +04:00
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mca_pml_ob1_recv_request_cancel(struct ompi_request_t* ompi_request, int complete)
|
|
|
|
{
|
|
|
|
mca_pml_ob1_recv_request_t* request = (mca_pml_ob1_recv_request_t*)ompi_request;
|
|
|
|
mca_pml_ob1_comm_t* comm = request->req_recv.req_base.req_comm->c_pml_comm;
|
|
|
|
|
2012-07-26 18:06:24 +04:00
|
|
|
if( true == request->req_match_received ) { /* way to late to cancel this one */
|
|
|
|
assert( OMPI_ANY_TAG != ompi_request->req_status.MPI_TAG ); /* not matched isn't it */
|
2008-02-12 11:46:27 +03:00
|
|
|
return OMPI_SUCCESS;
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
2008-02-12 11:46:27 +03:00
|
|
|
|
2005-05-24 02:06:50 +04:00
|
|
|
/* The rest should be protected behind the match logic lock */
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_LOCK(&comm->matching_lock);
|
2012-07-26 18:06:24 +04:00
|
|
|
if( request->req_recv.req_base.req_peer == OMPI_ANY_SOURCE ) {
|
|
|
|
opal_list_remove_item( &comm->wild_receives, (opal_list_item_t*)request );
|
|
|
|
} else {
|
|
|
|
mca_pml_ob1_comm_proc_t* proc = comm->procs + request->req_recv.req_base.req_peer;
|
|
|
|
opal_list_remove_item(&proc->specific_receives, (opal_list_item_t*)request);
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
2012-07-26 18:06:24 +04:00
|
|
|
PERUSE_TRACE_COMM_EVENT( PERUSE_COMM_REQ_REMOVE_FROM_POSTED_Q,
|
|
|
|
&(request->req_recv.req_base), PERUSE_RECV );
|
|
|
|
/**
|
|
|
|
* As now the PML is done with this request we have to force the pml_complete
|
|
|
|
* to true. Otherwise, the request will never be freed.
|
|
|
|
*/
|
|
|
|
request->req_recv.req_base.req_pml_complete = true;
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&comm->matching_lock);
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_LOCK(&ompi_request_lock);
|
2005-05-24 02:06:50 +04:00
|
|
|
ompi_request->req_status._cancelled = true;
|
2006-01-26 02:17:17 +03:00
|
|
|
/* This macro will set the req_complete to true so the MPI Test/Wait* functions
|
|
|
|
* on this request will be able to complete. As the status is marked as
|
|
|
|
* cancelled the cancel state will be detected.
|
2005-05-24 02:06:50 +04:00
|
|
|
*/
|
2006-03-31 21:09:09 +04:00
|
|
|
MCA_PML_OB1_RECV_REQUEST_MPI_COMPLETE(request);
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ompi_request_lock);
|
2008-02-12 11:46:27 +03:00
|
|
|
/*
|
2012-07-26 18:06:24 +04:00
|
|
|
* Receive request cancelled, make user buffer accessible.
|
2008-02-12 11:46:27 +03:00
|
|
|
*/
|
|
|
|
MEMCHECKER(
|
2008-05-07 16:28:51 +04:00
|
|
|
memchecker_call(&opal_memchecker_base_mem_defined,
|
|
|
|
request->req_recv.req_base.req_addr,
|
|
|
|
request->req_recv.req_base.req_count,
|
|
|
|
request->req_recv.req_base.req_datatype);
|
2008-02-12 11:46:27 +03:00
|
|
|
);
|
2005-05-24 02:06:50 +04:00
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mca_pml_ob1_recv_request_construct(mca_pml_ob1_recv_request_t* request)
|
|
|
|
{
|
2014-01-21 19:16:21 +04:00
|
|
|
/* the request type is set by the superclass */
|
2005-05-24 02:06:50 +04:00
|
|
|
request->req_recv.req_base.req_ompi.req_free = mca_pml_ob1_recv_request_free;
|
|
|
|
request->req_recv.req_base.req_ompi.req_cancel = mca_pml_ob1_recv_request_cancel;
|
2006-03-16 01:53:41 +03:00
|
|
|
request->req_rdma_cnt = 0;
|
2007-07-30 16:50:38 +04:00
|
|
|
OBJ_CONSTRUCT(&request->lock, opal_mutex_t);
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
OBJ_CLASS_INSTANCE(
|
|
|
|
mca_pml_ob1_recv_request_t,
|
|
|
|
mca_pml_base_recv_request_t,
|
|
|
|
mca_pml_ob1_recv_request_construct,
|
2006-09-29 03:57:49 +04:00
|
|
|
NULL);
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2005-06-01 18:34:22 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Release resources.
|
|
|
|
*/
|
|
|
|
|
2006-12-02 01:26:14 +03:00
|
|
|
static void mca_pml_ob1_recv_ctl_completion( mca_btl_base_module_t* btl,
|
|
|
|
struct mca_btl_base_endpoint_t* ep,
|
|
|
|
struct mca_btl_base_descriptor_t* des,
|
|
|
|
int status )
|
2005-06-01 18:34:22 +04:00
|
|
|
{
|
2005-08-12 06:41:14 +04:00
|
|
|
mca_bml_base_btl_t* bml_btl = (mca_bml_base_btl_t*)des->des_context;
|
2007-12-09 16:58:17 +03:00
|
|
|
|
2006-07-20 18:44:35 +04:00
|
|
|
MCA_PML_OB1_PROGRESS_PENDING(bml_btl);
|
2005-06-01 18:34:22 +04:00
|
|
|
}
|
|
|
|
|
2005-08-17 22:23:38 +04:00
|
|
|
/*
|
|
|
|
* Put operation has completed remotely - update request status
|
|
|
|
*/
|
|
|
|
|
2006-12-02 01:26:14 +03:00
|
|
|
static void mca_pml_ob1_put_completion( mca_btl_base_module_t* btl,
|
|
|
|
struct mca_btl_base_endpoint_t* ep,
|
|
|
|
struct mca_btl_base_descriptor_t* des,
|
|
|
|
int status )
|
2005-08-17 22:23:38 +04:00
|
|
|
{
|
|
|
|
mca_bml_base_btl_t* bml_btl = (mca_bml_base_btl_t*)des->des_context;
|
|
|
|
mca_pml_ob1_recv_request_t* recvreq = (mca_pml_ob1_recv_request_t*)des->des_cbdata;
|
2006-02-08 10:20:48 +03:00
|
|
|
size_t bytes_received = 0;
|
2005-08-24 03:05:01 +04:00
|
|
|
|
2007-07-11 03:45:23 +04:00
|
|
|
if( OPAL_LIKELY(status == OMPI_SUCCESS) ) {
|
2012-06-21 21:09:12 +04:00
|
|
|
bytes_received = mca_pml_ob1_compute_segment_length (btl->btl_seg_size,
|
|
|
|
(void *) des->des_dst,
|
|
|
|
des->des_dst_cnt, 0);
|
2007-06-03 12:31:58 +04:00
|
|
|
}
|
2005-08-18 21:06:35 +04:00
|
|
|
OPAL_THREAD_ADD_SIZE_T(&recvreq->req_pipeline_depth,-1);
|
2008-02-18 20:39:30 +03:00
|
|
|
|
2005-08-18 21:06:35 +04:00
|
|
|
mca_bml_base_free(bml_btl, des);
|
2005-08-24 03:05:01 +04:00
|
|
|
|
|
|
|
/* check completion status */
|
2007-09-18 20:18:47 +04:00
|
|
|
OPAL_THREAD_ADD_SIZE_T(&recvreq->req_bytes_received, bytes_received);
|
|
|
|
if(recv_request_pml_complete_check(recvreq) == false &&
|
|
|
|
recvreq->req_rdma_offset < recvreq->req_send_offset) {
|
2006-01-22 00:02:35 +03:00
|
|
|
/* schedule additional rdma operations */
|
2007-07-01 15:35:55 +04:00
|
|
|
mca_pml_ob1_recv_request_schedule(recvreq, bml_btl);
|
2005-08-24 03:05:01 +04:00
|
|
|
}
|
2006-07-20 18:44:35 +04:00
|
|
|
MCA_PML_OB1_PROGRESS_PENDING(bml_btl);
|
2005-08-17 22:23:38 +04:00
|
|
|
}
|
2005-06-01 18:34:22 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2006-07-20 18:44:35 +04:00
|
|
|
int mca_pml_ob1_recv_request_ack_send_btl(
|
|
|
|
ompi_proc_t* proc, mca_bml_base_btl_t* bml_btl,
|
2008-03-27 11:56:43 +03:00
|
|
|
uint64_t hdr_src_req, void *hdr_dst_req, uint64_t hdr_send_offset,
|
|
|
|
bool nordma)
|
2006-07-20 18:44:35 +04:00
|
|
|
{
|
|
|
|
mca_btl_base_descriptor_t* des;
|
|
|
|
mca_pml_ob1_ack_hdr_t* ack;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* allocate descriptor */
|
2007-12-09 16:58:17 +03:00
|
|
|
mca_bml_base_alloc(bml_btl, &des, MCA_BTL_NO_ORDER,
|
2008-02-18 20:39:30 +03:00
|
|
|
sizeof(mca_pml_ob1_ack_hdr_t),
|
2009-04-07 20:56:37 +04:00
|
|
|
MCA_BTL_DES_FLAGS_PRIORITY | MCA_BTL_DES_FLAGS_BTL_OWNERSHIP | MCA_BTL_DES_SEND_ALWAYS_CALLBACK);
|
2007-07-11 03:45:23 +04:00
|
|
|
if( OPAL_UNLIKELY(NULL == des) ) {
|
2006-07-20 18:44:35 +04:00
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* fill out header */
|
2007-01-05 01:07:37 +03:00
|
|
|
ack = (mca_pml_ob1_ack_hdr_t*)des->des_src->seg_addr.pval;
|
2006-07-20 18:44:35 +04:00
|
|
|
ack->hdr_common.hdr_type = MCA_PML_OB1_HDR_TYPE_ACK;
|
2008-03-27 11:56:43 +03:00
|
|
|
ack->hdr_common.hdr_flags = nordma ? MCA_PML_OB1_HDR_FLAGS_NORDMA : 0;
|
2007-01-05 01:07:37 +03:00
|
|
|
ack->hdr_src_req.lval = hdr_src_req;
|
|
|
|
ack->hdr_dst_req.pval = hdr_dst_req;
|
2007-06-03 12:30:07 +04:00
|
|
|
ack->hdr_send_offset = hdr_send_offset;
|
2006-07-20 18:44:35 +04:00
|
|
|
|
2007-12-16 11:45:44 +03:00
|
|
|
ob1_hdr_hton(ack, MCA_PML_OB1_HDR_TYPE_ACK, proc);
|
2006-07-20 18:44:35 +04:00
|
|
|
|
|
|
|
/* initialize descriptor */
|
|
|
|
des->des_cbfunc = mca_pml_ob1_recv_ctl_completion;
|
|
|
|
|
2008-05-30 05:29:09 +04:00
|
|
|
rc = mca_bml_base_send(bml_btl, des, MCA_PML_OB1_HDR_TYPE_ACK);
|
2008-05-30 07:58:39 +04:00
|
|
|
if( OPAL_LIKELY( rc >= 0 ) ) {
|
|
|
|
return OMPI_SUCCESS;
|
2006-07-20 18:44:35 +04:00
|
|
|
}
|
2008-05-30 07:58:39 +04:00
|
|
|
mca_bml_base_free(bml_btl, des);
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
2006-07-20 18:44:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int mca_pml_ob1_recv_request_ack(
|
2005-06-01 18:34:22 +04:00
|
|
|
mca_pml_ob1_recv_request_t* recvreq,
|
2005-09-22 03:23:47 +04:00
|
|
|
mca_pml_ob1_rendezvous_hdr_t* hdr,
|
|
|
|
size_t bytes_received)
|
2005-06-01 18:34:22 +04:00
|
|
|
{
|
2006-02-05 09:13:07 +03:00
|
|
|
ompi_proc_t* proc = (ompi_proc_t*)recvreq->req_recv.req_base.req_proc;
|
2008-03-27 11:56:43 +03:00
|
|
|
mca_bml_base_endpoint_t* bml_endpoint = NULL;
|
2005-06-01 18:34:22 +04:00
|
|
|
|
2013-08-30 20:54:55 +04:00
|
|
|
bml_endpoint = (mca_bml_base_endpoint_t*) proc->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_BML];
|
2006-07-20 18:44:35 +04:00
|
|
|
|
2007-06-03 12:30:07 +04:00
|
|
|
/* by default copy everything */
|
|
|
|
recvreq->req_send_offset = bytes_received;
|
2005-09-22 03:23:47 +04:00
|
|
|
if(hdr->hdr_msg_length > bytes_received) {
|
2007-07-01 15:31:26 +04:00
|
|
|
size_t rdma_num = mca_bml_base_btl_array_get_size(&bml_endpoint->btl_rdma);
|
2005-09-13 02:28:23 +04:00
|
|
|
/*
|
|
|
|
* lookup request buffer to determine if memory is already
|
|
|
|
* registered.
|
|
|
|
*/
|
|
|
|
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
if(opal_convertor_need_buffers(&recvreq->req_recv.req_base.req_convertor) == 0 &&
|
2007-07-01 15:31:26 +04:00
|
|
|
hdr->hdr_match.hdr_common.hdr_flags & MCA_PML_OB1_HDR_FLAGS_CONTIG &&
|
|
|
|
rdma_num != 0) {
|
2007-03-31 02:02:45 +04:00
|
|
|
unsigned char *base;
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_get_current_pointer( &recvreq->req_recv.req_base.req_convertor, (void**)&(base) );
|
2007-08-28 11:43:06 +04:00
|
|
|
|
|
|
|
if(hdr->hdr_match.hdr_common.hdr_flags & MCA_PML_OB1_HDR_FLAGS_PIN)
|
|
|
|
recvreq->req_rdma_cnt = mca_pml_ob1_rdma_btls(bml_endpoint,
|
|
|
|
base, recvreq->req_recv.req_bytes_packed,
|
|
|
|
recvreq->req_rdma );
|
|
|
|
else
|
|
|
|
recvreq->req_rdma_cnt = 0;
|
2005-09-13 02:28:23 +04:00
|
|
|
|
|
|
|
/* memory is already registered on both sides */
|
2007-08-28 11:43:06 +04:00
|
|
|
if (recvreq->req_rdma_cnt != 0) {
|
2007-06-03 12:30:07 +04:00
|
|
|
recvreq->req_send_offset = hdr->hdr_msg_length;
|
2006-02-09 18:49:51 +03:00
|
|
|
/* are rdma devices available for long rdma protocol */
|
2007-07-01 15:31:26 +04:00
|
|
|
} else if(bml_endpoint->btl_send_limit < hdr->hdr_msg_length) {
|
2005-07-16 00:58:11 +04:00
|
|
|
/* use convertor to figure out the rdma offset for this request */
|
2007-06-03 12:30:07 +04:00
|
|
|
recvreq->req_send_offset = hdr->hdr_msg_length -
|
2007-06-21 11:12:40 +04:00
|
|
|
bml_endpoint->btl_pipeline_send_length;
|
2007-07-01 15:31:26 +04:00
|
|
|
|
|
|
|
if(recvreq->req_send_offset < bytes_received)
|
2007-06-03 12:30:07 +04:00
|
|
|
recvreq->req_send_offset = bytes_received;
|
2007-07-01 15:31:26 +04:00
|
|
|
|
|
|
|
/* use converter to figure out the rdma offset for this
|
|
|
|
* request */
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_set_position(&recvreq->req_recv.req_base.req_convertor,
|
2007-07-01 15:31:26 +04:00
|
|
|
&recvreq->req_send_offset);
|
|
|
|
|
|
|
|
recvreq->req_rdma_cnt =
|
|
|
|
mca_pml_ob1_rdma_pipeline_btls(bml_endpoint,
|
|
|
|
recvreq->req_send_offset - bytes_received,
|
|
|
|
recvreq->req_rdma);
|
2006-02-09 18:49:51 +03:00
|
|
|
}
|
2005-06-22 00:58:24 +04:00
|
|
|
}
|
2007-06-03 12:30:07 +04:00
|
|
|
/* nothing to send by copy in/out - no need to ack */
|
|
|
|
if(recvreq->req_send_offset == hdr->hdr_msg_length)
|
2006-12-17 15:26:41 +03:00
|
|
|
return OMPI_SUCCESS;
|
2005-06-23 23:24:44 +04:00
|
|
|
}
|
Fix hang in receiving into MPI_alloced area.
This code hangs with openib BTL:
int size = 4000000;
sbuf = malloc(size);
MPI_Alloc_mem(size, MPI_INFO_NULL, &rbuf);
if (rank == 0)
{
MPI_Recv(rbuf, size, MPI_CHAR, 1, 1, MPI_COMM_WORLD, &stat);
}else{
MPI_Send(sbuf, size, MPI_CHAR, 0, 1, MPI_COMM_WORLD);
}
This commit was SVN r11613.
2006-09-11 16:18:59 +04:00
|
|
|
/* let know to shedule function there is no need to put ACK flag */
|
|
|
|
recvreq->req_ack_sent = true;
|
2007-01-05 01:07:37 +03:00
|
|
|
return mca_pml_ob1_recv_request_ack_send(proc, hdr->hdr_src_req.lval,
|
2010-04-23 19:14:55 +04:00
|
|
|
recvreq, recvreq->req_send_offset,
|
|
|
|
recvreq->req_send_offset == bytes_received);
|
2005-06-01 18:34:22 +04:00
|
|
|
}
|
|
|
|
|
2005-08-17 22:23:38 +04:00
|
|
|
/**
|
|
|
|
* Return resources used by the RDMA
|
|
|
|
*/
|
2006-03-16 01:53:41 +03:00
|
|
|
|
2007-07-11 03:45:23 +04:00
|
|
|
static void mca_pml_ob1_rget_completion( mca_btl_base_module_t* btl,
|
|
|
|
struct mca_btl_base_endpoint_t* ep,
|
|
|
|
struct mca_btl_base_descriptor_t* des,
|
|
|
|
int status )
|
2005-08-17 22:23:38 +04:00
|
|
|
{
|
|
|
|
mca_bml_base_btl_t* bml_btl = (mca_bml_base_btl_t*)des->des_context;
|
|
|
|
mca_pml_ob1_rdma_frag_t* frag = (mca_pml_ob1_rdma_frag_t*)des->des_cbdata;
|
2006-08-24 20:38:08 +04:00
|
|
|
mca_pml_ob1_recv_request_t* recvreq = (mca_pml_ob1_recv_request_t*)frag->rdma_req;
|
2005-08-17 22:23:38 +04:00
|
|
|
|
2006-07-20 18:44:35 +04:00
|
|
|
/* check completion status */
|
2007-07-11 03:45:23 +04:00
|
|
|
if( OPAL_UNLIKELY(OMPI_SUCCESS != status) ) {
|
2006-07-20 18:44:35 +04:00
|
|
|
/* TSW - FIX */
|
2013-01-28 03:25:10 +04:00
|
|
|
OMPI_ERROR_LOG(status);
|
|
|
|
ompi_rte_abort(-1, NULL);
|
2005-08-17 22:23:38 +04:00
|
|
|
}
|
2006-03-16 01:53:41 +03:00
|
|
|
|
|
|
|
/* is receive request complete */
|
2007-09-18 20:18:47 +04:00
|
|
|
OPAL_THREAD_ADD_SIZE_T(&recvreq->req_bytes_received, frag->rdma_length);
|
2013-06-21 22:35:16 +04:00
|
|
|
if (recvreq->req_recv.req_bytes_packed <= recvreq->req_bytes_received) {
|
2012-08-07 11:15:21 +04:00
|
|
|
mca_pml_ob1_send_fin(recvreq->req_recv.req_base.req_proc,
|
2013-06-21 22:35:16 +04:00
|
|
|
bml_btl,
|
|
|
|
frag->rdma_hdr.hdr_rget.hdr_des,
|
|
|
|
des->order, 0);
|
2012-08-07 11:15:21 +04:00
|
|
|
}
|
|
|
|
|
2007-09-18 20:18:47 +04:00
|
|
|
recv_request_pml_complete_check(recvreq);
|
2006-03-16 01:53:41 +03:00
|
|
|
|
2006-07-20 18:44:35 +04:00
|
|
|
MCA_PML_OB1_RDMA_FRAG_RETURN(frag);
|
|
|
|
|
|
|
|
MCA_PML_OB1_PROGRESS_PENDING(bml_btl);
|
2005-08-17 22:23:38 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-04-25 00:18:56 +04:00
|
|
|
static int mca_pml_ob1_init_get_fallback (mca_pml_ob1_rdma_frag_t *frag,
|
|
|
|
mca_btl_base_descriptor_t *dst) {
|
|
|
|
mca_pml_ob1_recv_request_t *recvreq = (mca_pml_ob1_recv_request_t *) frag->rdma_req;
|
|
|
|
mca_bml_base_btl_t *bml_btl = frag->rdma_bml;
|
|
|
|
mca_btl_base_descriptor_t *ctl;
|
|
|
|
mca_pml_ob1_rdma_hdr_t *hdr;
|
2012-06-21 21:09:12 +04:00
|
|
|
size_t seg_size;
|
2012-04-25 00:18:56 +04:00
|
|
|
int rc;
|
|
|
|
|
2012-06-21 21:09:12 +04:00
|
|
|
seg_size = bml_btl->btl->btl_seg_size * dst->des_dst_cnt;
|
2012-04-25 00:18:56 +04:00
|
|
|
|
2012-06-21 21:09:12 +04:00
|
|
|
/* prepare a descriptor for rdma control message */
|
|
|
|
mca_bml_base_alloc (bml_btl, &ctl, MCA_BTL_NO_ORDER, sizeof (mca_pml_ob1_rdma_hdr_t) + seg_size,
|
2012-04-25 00:18:56 +04:00
|
|
|
MCA_BTL_DES_FLAGS_PRIORITY | MCA_BTL_DES_FLAGS_BTL_OWNERSHIP |
|
|
|
|
MCA_BTL_DES_SEND_ALWAYS_CALLBACK);
|
|
|
|
if (OPAL_UNLIKELY(NULL == ctl)) {
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
}
|
|
|
|
ctl->des_cbfunc = mca_pml_ob1_recv_ctl_completion;
|
|
|
|
|
|
|
|
/* fill in rdma header */
|
|
|
|
hdr = (mca_pml_ob1_rdma_hdr_t *) ctl->des_src->seg_addr.pval;
|
|
|
|
hdr->hdr_common.hdr_type = MCA_PML_OB1_HDR_TYPE_PUT;
|
|
|
|
hdr->hdr_common.hdr_flags =
|
|
|
|
(!recvreq->req_ack_sent) ? MCA_PML_OB1_HDR_TYPE_ACK : 0;
|
|
|
|
|
|
|
|
hdr->hdr_req = frag->rdma_hdr.hdr_rget.hdr_rndv.hdr_src_req;
|
|
|
|
hdr->hdr_rdma_offset = recvreq->req_rdma_offset;
|
|
|
|
hdr->hdr_des.pval = dst;
|
2012-06-14 21:29:58 +04:00
|
|
|
hdr->hdr_recv_req.pval = recvreq;
|
2012-04-25 00:18:56 +04:00
|
|
|
|
|
|
|
hdr->hdr_seg_cnt = dst->des_dst_cnt;
|
|
|
|
|
2012-06-21 21:09:12 +04:00
|
|
|
/* copy segments */
|
|
|
|
memcpy (hdr + 1, dst->des_dst, seg_size);
|
2012-04-25 00:18:56 +04:00
|
|
|
|
|
|
|
dst->des_cbfunc = mca_pml_ob1_put_completion;
|
|
|
|
dst->des_cbdata = recvreq;
|
|
|
|
|
|
|
|
if (!recvreq->req_ack_sent)
|
|
|
|
recvreq->req_ack_sent = true;
|
|
|
|
|
|
|
|
/* send rdma request to peer */
|
|
|
|
rc = mca_bml_base_send (bml_btl, ctl, MCA_PML_OB1_HDR_TYPE_PUT);
|
|
|
|
if (OPAL_UNLIKELY(rc < 0)) {
|
|
|
|
mca_bml_base_free (bml_btl, ctl);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2005-08-17 22:23:38 +04:00
|
|
|
/*
|
|
|
|
*
|
|
|
|
*/
|
2007-07-11 02:16:38 +04:00
|
|
|
int mca_pml_ob1_recv_request_get_frag( mca_pml_ob1_rdma_frag_t* frag )
|
2005-08-17 22:23:38 +04:00
|
|
|
{
|
2006-08-24 20:38:08 +04:00
|
|
|
mca_pml_ob1_recv_request_t* recvreq = (mca_pml_ob1_recv_request_t*)frag->rdma_req;
|
2007-05-09 16:11:51 +04:00
|
|
|
mca_bml_base_btl_t* bml_btl = frag->rdma_bml;
|
2005-08-17 22:23:38 +04:00
|
|
|
mca_btl_base_descriptor_t* descriptor;
|
2006-07-20 18:44:35 +04:00
|
|
|
size_t save_size = frag->rdma_length;
|
2005-08-17 22:23:38 +04:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* prepare descriptor */
|
2007-07-11 02:16:38 +04:00
|
|
|
mca_bml_base_prepare_dst( bml_btl,
|
|
|
|
NULL,
|
|
|
|
&recvreq->req_recv.req_base.req_convertor,
|
|
|
|
MCA_BTL_NO_ORDER,
|
|
|
|
0,
|
2007-12-09 17:08:01 +03:00
|
|
|
&frag->rdma_length,
|
2011-12-01 01:37:23 +04:00
|
|
|
MCA_BTL_DES_FLAGS_BTL_OWNERSHIP | MCA_BTL_DES_SEND_ALWAYS_CALLBACK |
|
2012-04-25 00:18:56 +04:00
|
|
|
MCA_BTL_DES_FLAGS_GET,
|
2007-07-11 02:16:38 +04:00
|
|
|
&descriptor );
|
|
|
|
if( OPAL_UNLIKELY(NULL == descriptor) ) {
|
2012-04-25 00:18:56 +04:00
|
|
|
if (frag->retries < mca_pml_ob1.rdma_retries_limit) {
|
|
|
|
frag->rdma_length = save_size;
|
|
|
|
OPAL_THREAD_LOCK(&mca_pml_ob1.lock);
|
|
|
|
opal_list_append(&mca_pml_ob1.rdma_pending, (opal_list_item_t*)frag);
|
|
|
|
OPAL_THREAD_UNLOCK(&mca_pml_ob1.lock);
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
|
|
|
} else {
|
|
|
|
ompi_proc_t *proc = (ompi_proc_t *) recvreq->req_recv.req_base.req_proc;
|
|
|
|
|
|
|
|
/* tell peer to fall back on send */
|
|
|
|
recvreq->req_send_offset = 0;
|
|
|
|
rc = mca_pml_ob1_recv_request_ack_send(proc, frag->rdma_hdr.hdr_rget.hdr_rndv.hdr_src_req.lval,
|
|
|
|
recvreq, recvreq->req_send_offset, true);
|
|
|
|
MCA_PML_OB1_RDMA_FRAG_RETURN(frag);
|
|
|
|
return rc;
|
|
|
|
}
|
2005-08-17 22:23:38 +04:00
|
|
|
}
|
|
|
|
|
2012-06-21 21:09:12 +04:00
|
|
|
descriptor->des_src = (mca_btl_base_segment_t *) frag->rdma_segs;
|
2006-07-20 18:44:35 +04:00
|
|
|
descriptor->des_src_cnt = frag->rdma_hdr.hdr_rdma.hdr_seg_cnt;
|
2005-08-17 22:23:38 +04:00
|
|
|
descriptor->des_cbfunc = mca_pml_ob1_rget_completion;
|
2006-07-20 18:44:35 +04:00
|
|
|
descriptor->des_cbdata = frag;
|
2005-08-17 22:23:38 +04:00
|
|
|
|
2006-07-20 18:44:35 +04:00
|
|
|
PERUSE_TRACE_COMM_OMPI_EVENT(PERUSE_COMM_REQ_XFER_CONTINUE,
|
|
|
|
&(recvreq->req_recv.req_base),
|
|
|
|
frag->rdma_length, PERUSE_RECV);
|
2006-06-26 23:00:07 +04:00
|
|
|
|
2005-08-17 22:23:38 +04:00
|
|
|
/* queue up get request */
|
2007-07-11 03:45:23 +04:00
|
|
|
rc = mca_bml_base_get(bml_btl,descriptor);
|
|
|
|
if( OPAL_UNLIKELY(OMPI_SUCCESS != rc) ) {
|
2012-04-25 00:18:56 +04:00
|
|
|
if (OPAL_UNLIKELY(OMPI_ERR_NOT_AVAILABLE == rc)) {
|
|
|
|
/* get isn't supported for this transfer. tell peer to fallback on put */
|
|
|
|
rc = mca_pml_ob1_init_get_fallback (frag, descriptor);
|
|
|
|
}
|
|
|
|
|
2012-04-06 18:23:13 +04:00
|
|
|
if(OMPI_ERR_OUT_OF_RESOURCE == rc) {
|
2006-07-20 18:44:35 +04:00
|
|
|
mca_bml_base_free(bml_btl, descriptor);
|
|
|
|
OPAL_THREAD_LOCK(&mca_pml_ob1.lock);
|
|
|
|
opal_list_append(&mca_pml_ob1.rdma_pending,
|
|
|
|
(opal_list_item_t*)frag);
|
|
|
|
OPAL_THREAD_UNLOCK(&mca_pml_ob1.lock);
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
2012-04-25 00:18:56 +04:00
|
|
|
} else if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) {
|
2013-01-28 03:25:10 +04:00
|
|
|
OMPI_ERROR_LOG(rc);
|
|
|
|
ompi_rte_abort(-1, NULL);
|
2006-07-20 18:44:35 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2008-05-30 05:29:09 +04:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update the recv request status to reflect the number of bytes
|
|
|
|
* received and actually delivered to the application.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void mca_pml_ob1_recv_request_progress_frag( mca_pml_ob1_recv_request_t* recvreq,
|
|
|
|
mca_btl_base_module_t* btl,
|
|
|
|
mca_btl_base_segment_t* segments,
|
|
|
|
size_t num_segments )
|
|
|
|
{
|
2012-06-21 21:09:12 +04:00
|
|
|
size_t bytes_received, data_offset = 0;
|
2008-08-06 17:46:23 +04:00
|
|
|
size_t bytes_delivered __opal_attribute_unused__; /* is being set to zero in MCA_PML_OB1_RECV_REQUEST_UNPACK */
|
2008-05-30 05:29:09 +04:00
|
|
|
mca_pml_ob1_hdr_t* hdr = (mca_pml_ob1_hdr_t*)segments->seg_addr.pval;
|
|
|
|
|
2012-06-21 21:09:12 +04:00
|
|
|
bytes_received = mca_pml_ob1_compute_segment_length_base (segments, num_segments,
|
|
|
|
sizeof(mca_pml_ob1_frag_hdr_t));
|
2008-05-30 05:29:09 +04:00
|
|
|
data_offset = hdr->hdr_frag.hdr_frag_offset;
|
|
|
|
/*
|
2012-07-25 16:42:31 +04:00
|
|
|
* Make user buffer accessible(defined) before unpacking.
|
2008-05-30 05:29:09 +04:00
|
|
|
*/
|
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_call(&opal_memchecker_base_mem_defined,
|
|
|
|
recvreq->req_recv.req_base.req_addr,
|
|
|
|
recvreq->req_recv.req_base.req_count,
|
|
|
|
recvreq->req_recv.req_base.req_datatype);
|
|
|
|
);
|
|
|
|
MCA_PML_OB1_RECV_REQUEST_UNPACK( recvreq,
|
|
|
|
segments,
|
|
|
|
num_segments,
|
|
|
|
sizeof(mca_pml_ob1_frag_hdr_t),
|
|
|
|
data_offset,
|
|
|
|
bytes_received,
|
|
|
|
bytes_delivered );
|
|
|
|
/*
|
|
|
|
* Unpacking finished, make the user buffer unaccessable again.
|
|
|
|
*/
|
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_call(&opal_memchecker_base_mem_noaccess,
|
|
|
|
recvreq->req_recv.req_base.req_addr,
|
|
|
|
recvreq->req_recv.req_base.req_count,
|
|
|
|
recvreq->req_recv.req_base.req_datatype);
|
|
|
|
);
|
|
|
|
|
|
|
|
OPAL_THREAD_ADD_SIZE_T(&recvreq->req_bytes_received, bytes_received);
|
|
|
|
/* check completion status */
|
|
|
|
if(recv_request_pml_complete_check(recvreq) == false &&
|
|
|
|
recvreq->req_rdma_offset < recvreq->req_send_offset) {
|
|
|
|
/* schedule additional rdma operations */
|
|
|
|
mca_pml_ob1_recv_request_schedule(recvreq, NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-01 16:19:40 +04:00
|
|
|
#if OPAL_CUDA_SUPPORT /* CUDA_ASYNC_RECV */
|
2013-01-18 02:34:43 +04:00
|
|
|
/**
|
|
|
|
* This function is basically the first half of the code in the
|
|
|
|
* mca_pml_ob1_recv_request_progress_frag function. This fires off
|
|
|
|
* the asynchronous copy and returns. Unused fields in the descriptor
|
|
|
|
* are used to pass extra information for when the asynchronous copy
|
|
|
|
* completes. No memchecker support in this function as copies are
|
|
|
|
* happening asynchronously.
|
|
|
|
*/
|
|
|
|
void mca_pml_ob1_recv_request_frag_copy_start( mca_pml_ob1_recv_request_t* recvreq,
|
|
|
|
mca_btl_base_module_t* btl,
|
|
|
|
mca_btl_base_segment_t* segments,
|
|
|
|
size_t num_segments,
|
|
|
|
mca_btl_base_descriptor_t* des)
|
|
|
|
{
|
|
|
|
int result;
|
|
|
|
size_t bytes_received = 0, data_offset = 0;
|
|
|
|
size_t bytes_delivered __opal_attribute_unused__; /* is being set to zero in MCA_PML_OB1_RECV_REQUEST_UNPACK */
|
|
|
|
mca_pml_ob1_hdr_t* hdr = (mca_pml_ob1_hdr_t*)segments->seg_addr.pval;
|
|
|
|
|
|
|
|
OPAL_OUTPUT((-1, "start_frag_copy frag=%p", (void *)des));
|
|
|
|
|
|
|
|
bytes_received = mca_pml_ob1_compute_segment_length_base (segments, num_segments,
|
|
|
|
sizeof(mca_pml_ob1_frag_hdr_t));
|
|
|
|
data_offset = hdr->hdr_frag.hdr_frag_offset;
|
|
|
|
|
|
|
|
MCA_PML_OB1_RECV_REQUEST_UNPACK( recvreq,
|
|
|
|
segments,
|
|
|
|
num_segments,
|
|
|
|
sizeof(mca_pml_ob1_frag_hdr_t),
|
|
|
|
data_offset,
|
|
|
|
bytes_received,
|
|
|
|
bytes_delivered );
|
|
|
|
/* Store the receive request in unused context pointer. */
|
|
|
|
des->des_context = (void *)recvreq;
|
|
|
|
/* Store the amount of bytes in unused src count value */
|
|
|
|
des->des_src_cnt = bytes_delivered;
|
|
|
|
/* Then record an event that will get triggered by a PML progress call which
|
|
|
|
* checks the stream events. If we get an error, abort. Should get message
|
|
|
|
* from CUDA code about what went wrong. */
|
|
|
|
result = mca_common_cuda_record_htod_event("pml", des);
|
|
|
|
if (OMPI_SUCCESS != result) {
|
|
|
|
opal_output(0, "%s:%d FATAL", __FILE__, __LINE__);
|
2014-01-23 01:36:24 +04:00
|
|
|
ompi_rte_abort(-1, NULL);
|
2013-01-18 02:34:43 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This function is basically the second half of the code in the
|
|
|
|
* mca_pml_ob1_recv_request_progress_frag function. The number of
|
|
|
|
* bytes delivered is updated. Then a call is made into the BTL so it
|
|
|
|
* can free the fragment that held that data. This is currently
|
|
|
|
* called directly by the common CUDA code. No memchecker support
|
|
|
|
* in this function as copies are happening asynchronously.
|
|
|
|
*/
|
|
|
|
void mca_pml_ob1_recv_request_frag_copy_finished( mca_btl_base_module_t* btl,
|
|
|
|
struct mca_btl_base_endpoint_t* ep,
|
|
|
|
struct mca_btl_base_descriptor_t* des,
|
|
|
|
int status )
|
|
|
|
{
|
|
|
|
mca_pml_ob1_recv_request_t* recvreq = (mca_pml_ob1_recv_request_t*)des->des_context;
|
|
|
|
size_t bytes_received = des->des_src_cnt;
|
|
|
|
|
|
|
|
OPAL_OUTPUT((-1, "frag_copy_finished (delivered=%d), frag=%p", (int)bytes_received, (void *)des));
|
|
|
|
/* Call into the BTL so it can free the descriptor. At this point, it is
|
|
|
|
* known that the data has been copied out of the descriptor. */
|
|
|
|
des->des_cbfunc(NULL, (struct mca_btl_base_endpoint_t *)des->des_cbdata, des, 0);
|
|
|
|
|
|
|
|
OPAL_THREAD_ADD_SIZE_T(&recvreq->req_bytes_received, bytes_received);
|
|
|
|
|
|
|
|
/* check completion status */
|
|
|
|
if(recv_request_pml_complete_check(recvreq) == false &&
|
|
|
|
recvreq->req_rdma_offset < recvreq->req_send_offset) {
|
|
|
|
/* schedule additional rdma operations */
|
|
|
|
mca_pml_ob1_recv_request_schedule(recvreq, NULL);
|
|
|
|
}
|
|
|
|
}
|
2013-11-01 16:19:40 +04:00
|
|
|
#endif /* OPAL_CUDA_SUPPORT */
|
2013-01-18 02:34:43 +04:00
|
|
|
|
2008-05-30 05:29:09 +04:00
|
|
|
/*
|
|
|
|
* Update the recv request status to reflect the number of bytes
|
|
|
|
* received and actually delivered to the application.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void mca_pml_ob1_recv_request_progress_rget( mca_pml_ob1_recv_request_t* recvreq,
|
|
|
|
mca_btl_base_module_t* btl,
|
|
|
|
mca_btl_base_segment_t* segments,
|
|
|
|
size_t num_segments )
|
2006-07-20 18:44:35 +04:00
|
|
|
{
|
2008-05-30 05:29:09 +04:00
|
|
|
mca_pml_ob1_rget_hdr_t* hdr = (mca_pml_ob1_rget_hdr_t*)segments->seg_addr.pval;
|
2006-07-20 18:44:35 +04:00
|
|
|
mca_bml_base_endpoint_t* bml_endpoint = NULL;
|
2012-08-07 11:15:21 +04:00
|
|
|
size_t bytes_remaining, prev_sent, offset;
|
2012-08-13 20:26:06 +04:00
|
|
|
mca_btl_base_segment_t *r_segments;
|
|
|
|
mca_pml_ob1_rdma_frag_t *frag;
|
|
|
|
mca_bml_base_btl_t *rdma_bml;
|
|
|
|
int rc;
|
2008-05-30 05:29:09 +04:00
|
|
|
|
2012-08-07 11:15:21 +04:00
|
|
|
prev_sent = offset = 0;
|
|
|
|
bytes_remaining = hdr->hdr_rndv.hdr_msg_length;
|
2008-05-30 05:29:09 +04:00
|
|
|
recvreq->req_recv.req_bytes_packed = hdr->hdr_rndv.hdr_msg_length;
|
|
|
|
|
|
|
|
MCA_PML_OB1_RECV_REQUEST_MATCHED(recvreq, &hdr->hdr_rndv.hdr_match);
|
2007-04-12 02:03:06 +04:00
|
|
|
|
|
|
|
/* if receive buffer is not contiguous we can't just RDMA read into it, so
|
|
|
|
* fall back to copy in/out protocol. It is a pity because buffer on the
|
2008-05-30 05:29:09 +04:00
|
|
|
* sender side is already registered. We need to be smarter here, perhaps
|
2007-04-12 02:03:06 +04:00
|
|
|
* do couple of RDMA reads */
|
2012-08-07 11:15:21 +04:00
|
|
|
if (opal_convertor_need_buffers(&recvreq->req_recv.req_base.req_convertor) == true) {
|
2013-11-01 16:19:40 +04:00
|
|
|
#if OPAL_CUDA_SUPPORT
|
2012-08-13 20:26:06 +04:00
|
|
|
if (mca_pml_ob1_cuda_need_buffers(recvreq, btl))
|
2013-11-01 16:19:40 +04:00
|
|
|
#endif /* OPAL_CUDA_SUPPORT */
|
2012-08-13 20:26:06 +04:00
|
|
|
{
|
2012-02-24 06:13:33 +04:00
|
|
|
mca_pml_ob1_recv_request_ack(recvreq, &hdr->hdr_rndv, 0);
|
|
|
|
return;
|
|
|
|
}
|
2007-04-12 02:03:06 +04:00
|
|
|
}
|
|
|
|
|
2012-08-13 20:26:06 +04:00
|
|
|
/* lookup bml datastructures */
|
2013-08-30 20:54:55 +04:00
|
|
|
bml_endpoint = (mca_bml_base_endpoint_t*)recvreq->req_recv.req_base.req_proc->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_BML];
|
2012-08-13 20:26:06 +04:00
|
|
|
rdma_bml = mca_bml_base_btl_array_find(&bml_endpoint->btl_rdma, btl);
|
|
|
|
|
2013-11-01 16:19:40 +04:00
|
|
|
#if OPAL_CUDA_SUPPORT
|
2012-08-13 20:26:06 +04:00
|
|
|
if (OPAL_UNLIKELY(NULL == rdma_bml)) {
|
|
|
|
if (recvreq->req_recv.req_base.req_convertor.flags & CONVERTOR_CUDA) {
|
2013-08-22 01:00:09 +04:00
|
|
|
mca_bml_base_btl_t *bml_btl;
|
|
|
|
bml_btl = mca_bml_base_btl_array_find(&bml_endpoint->btl_send, btl);
|
2012-08-13 20:26:06 +04:00
|
|
|
/* Check to see if this is a CUDA get */
|
2013-08-22 01:00:09 +04:00
|
|
|
if (bml_btl->btl_flags & MCA_BTL_FLAGS_CUDA_GET) {
|
|
|
|
rdma_bml = bml_btl;
|
2012-08-13 20:26:06 +04:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Just default back to send and receive. Must be mix of GPU and HOST memory. */
|
|
|
|
mca_pml_ob1_recv_request_ack(recvreq, &hdr->hdr_rndv, 0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2013-11-01 16:19:40 +04:00
|
|
|
#endif /* OPAL_CUDA_SUPPORT */
|
2012-08-13 20:26:06 +04:00
|
|
|
|
|
|
|
if (OPAL_UNLIKELY(NULL == rdma_bml)) {
|
|
|
|
opal_output(0, "[%s:%d] invalid bml for rdma get", __FILE__, __LINE__);
|
2013-01-28 03:25:10 +04:00
|
|
|
ompi_rte_abort(-1, NULL);
|
2012-08-13 20:26:06 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
bytes_remaining = mca_pml_ob1_compute_segment_length_remote (btl->btl_seg_size, (void *)(hdr + 1),
|
|
|
|
hdr->hdr_seg_cnt, recvreq->req_recv.req_base.req_proc);
|
2006-07-20 18:44:35 +04:00
|
|
|
|
2012-08-07 11:15:21 +04:00
|
|
|
/* The while loop adds a fragmentation mechanism. The variable bytes_remaining holds the num
|
|
|
|
* of bytes left to be send. In each iteration we send the max possible bytes supported
|
|
|
|
* by the HCA. The field frag->rdma_length holds the actual num of bytes that were
|
|
|
|
* sent in each iteration. We subtract this number from bytes_remaining and continue to
|
|
|
|
* the next iteration with the updated size.
|
|
|
|
* Also - In each iteration we update the location in the buffer to be used for writing
|
|
|
|
* the message ,and the location to read from. This is done using the offset variable that
|
|
|
|
* accumulates the number of bytes that were sent so far. */
|
|
|
|
while (bytes_remaining > 0) {
|
2012-08-13 20:26:06 +04:00
|
|
|
/* allocate/initialize a fragment */
|
2013-07-04 12:34:37 +04:00
|
|
|
MCA_PML_OB1_RDMA_FRAG_ALLOC(frag);
|
2012-08-07 11:15:21 +04:00
|
|
|
if (OPAL_UNLIKELY(NULL == frag)) {
|
|
|
|
/* GLB - FIX */
|
2013-07-04 12:34:37 +04:00
|
|
|
OMPI_ERROR_LOG(OMPI_ERR_OUT_OF_RESOURCE);
|
2013-01-28 03:25:10 +04:00
|
|
|
ompi_rte_abort(-1, NULL);
|
2012-08-07 11:15:21 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
assert (btl->btl_seg_size * hdr->hdr_seg_cnt <= sizeof (frag->rdma_segs));
|
2012-06-21 21:09:12 +04:00
|
|
|
|
2012-08-07 11:15:21 +04:00
|
|
|
memcpy (frag->rdma_segs, hdr + 1, btl->btl_seg_size * hdr->hdr_seg_cnt);
|
2012-06-21 21:09:12 +04:00
|
|
|
|
2012-08-13 20:26:06 +04:00
|
|
|
/* update the read location -- NTH: note this will only work if there is exactly one
|
|
|
|
segment. TODO -- make this work with multiple segments */
|
2012-08-07 11:15:21 +04:00
|
|
|
r_segments = (mca_btl_base_segment_t *) frag->rdma_segs;
|
|
|
|
r_segments->seg_addr.lval += offset;
|
|
|
|
|
|
|
|
/* updating the write location */
|
|
|
|
OPAL_THREAD_LOCK(&recvreq->lock);
|
|
|
|
opal_convertor_set_position( &recvreq->req_recv.req_base.req_convertor, &offset);
|
|
|
|
OPAL_THREAD_UNLOCK(&recvreq->lock);
|
|
|
|
|
2012-08-13 20:26:06 +04:00
|
|
|
frag->rdma_bml = rdma_bml;
|
2012-04-25 00:18:56 +04:00
|
|
|
|
2012-08-07 11:15:21 +04:00
|
|
|
frag->rdma_hdr.hdr_rget = *hdr;
|
2012-08-13 20:26:06 +04:00
|
|
|
frag->retries = 0;
|
|
|
|
frag->rdma_req = recvreq;
|
|
|
|
frag->rdma_ep = bml_endpoint;
|
|
|
|
frag->rdma_state = MCA_PML_OB1_RDMA_GET;
|
|
|
|
frag->reg = NULL;
|
2012-08-07 11:15:21 +04:00
|
|
|
frag->rdma_length = bytes_remaining;
|
2012-08-13 20:26:06 +04:00
|
|
|
|
|
|
|
/* NTH: TODO -- handle error conditions gracefully */
|
|
|
|
rc = mca_pml_ob1_recv_request_get_frag(frag);
|
|
|
|
if (OMPI_SUCCESS != rc) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-08-07 11:15:21 +04:00
|
|
|
prev_sent = frag->rdma_length;
|
|
|
|
bytes_remaining -= prev_sent;
|
|
|
|
offset += prev_sent;
|
|
|
|
}
|
2005-08-17 22:23:38 +04:00
|
|
|
}
|
|
|
|
|
2005-05-24 02:06:50 +04:00
|
|
|
/*
|
|
|
|
* Update the recv request status to reflect the number of bytes
|
|
|
|
* received and actually delivered to the application.
|
|
|
|
*/
|
|
|
|
|
2008-05-30 05:29:09 +04:00
|
|
|
void mca_pml_ob1_recv_request_progress_rndv( mca_pml_ob1_recv_request_t* recvreq,
|
|
|
|
mca_btl_base_module_t* btl,
|
|
|
|
mca_btl_base_segment_t* segments,
|
|
|
|
size_t num_segments )
|
2005-05-24 02:06:50 +04:00
|
|
|
{
|
2005-06-08 23:10:15 +04:00
|
|
|
size_t bytes_received = 0;
|
2008-08-06 17:46:23 +04:00
|
|
|
size_t bytes_delivered __opal_attribute_unused__; /* is being set to zero in MCA_PML_OB1_RECV_REQUEST_UNPACK */
|
2005-06-09 00:37:19 +04:00
|
|
|
size_t data_offset = 0;
|
2007-01-05 01:07:37 +03:00
|
|
|
mca_pml_ob1_hdr_t* hdr = (mca_pml_ob1_hdr_t*)segments->seg_addr.pval;
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2012-06-21 21:09:12 +04:00
|
|
|
bytes_received = mca_pml_ob1_compute_segment_length_base (segments, num_segments,
|
|
|
|
sizeof(mca_pml_ob1_rendezvous_hdr_t));
|
|
|
|
|
2008-05-30 05:29:09 +04:00
|
|
|
recvreq->req_recv.req_bytes_packed = hdr->hdr_rndv.hdr_msg_length;
|
2008-07-17 08:50:39 +04:00
|
|
|
recvreq->remote_req_send = hdr->hdr_rndv.hdr_src_req;
|
2008-05-30 05:29:09 +04:00
|
|
|
recvreq->req_rdma_offset = bytes_received;
|
|
|
|
MCA_PML_OB1_RECV_REQUEST_MATCHED(recvreq, &hdr->hdr_match);
|
|
|
|
mca_pml_ob1_recv_request_ack(recvreq, &hdr->hdr_rndv, bytes_received);
|
|
|
|
/**
|
|
|
|
* The PUT protocol do not attach any data to the original request.
|
|
|
|
* Therefore, we might want to avoid unpacking if there is nothing to
|
|
|
|
* unpack.
|
|
|
|
*/
|
|
|
|
if( 0 < bytes_received ) {
|
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_call(&opal_memchecker_base_mem_defined,
|
|
|
|
recvreq->req_recv.req_base.req_addr,
|
|
|
|
recvreq->req_recv.req_base.req_count,
|
|
|
|
recvreq->req_recv.req_base.req_datatype);
|
|
|
|
);
|
|
|
|
MCA_PML_OB1_RECV_REQUEST_UNPACK( recvreq,
|
|
|
|
segments,
|
|
|
|
num_segments,
|
|
|
|
sizeof(mca_pml_ob1_rendezvous_hdr_t),
|
|
|
|
data_offset,
|
|
|
|
bytes_received,
|
|
|
|
bytes_delivered );
|
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_call(&opal_memchecker_base_mem_noaccess,
|
|
|
|
recvreq->req_recv.req_base.req_addr,
|
|
|
|
recvreq->req_recv.req_base.req_count,
|
|
|
|
recvreq->req_recv.req_base.req_datatype);
|
|
|
|
);
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
2007-09-18 20:18:47 +04:00
|
|
|
OPAL_THREAD_ADD_SIZE_T(&recvreq->req_bytes_received, bytes_received);
|
2005-06-01 18:34:22 +04:00
|
|
|
/* check completion status */
|
2007-09-18 20:18:47 +04:00
|
|
|
if(recv_request_pml_complete_check(recvreq) == false &&
|
2008-05-30 05:29:09 +04:00
|
|
|
recvreq->req_rdma_offset < recvreq->req_send_offset) {
|
2006-01-22 00:02:35 +03:00
|
|
|
/* schedule additional rdma operations */
|
2007-07-01 15:35:55 +04:00
|
|
|
mca_pml_ob1_recv_request_schedule(recvreq, NULL);
|
2005-06-10 00:16:33 +04:00
|
|
|
}
|
2013-01-18 02:34:43 +04:00
|
|
|
|
2013-11-01 16:19:40 +04:00
|
|
|
#if OPAL_CUDA_SUPPORT /* CUDA_ASYNC_RECV */
|
2013-01-18 02:34:43 +04:00
|
|
|
/* If BTL supports it and this is a CUDA buffer being received into,
|
|
|
|
* have all subsequent FRAGS copied in asynchronously. */
|
|
|
|
if ((recvreq->req_recv.req_base.req_convertor.flags & CONVERTOR_CUDA) &&
|
|
|
|
(btl->btl_flags & MCA_BTL_FLAGS_CUDA_COPY_ASYNC_RECV)) {
|
|
|
|
void *strm = mca_common_cuda_get_htod_stream();
|
|
|
|
opal_cuda_set_copy_function_async(&recvreq->req_recv.req_base.req_convertor, strm);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
|
|
|
|
2008-05-30 05:29:09 +04:00
|
|
|
/*
|
|
|
|
* Update the recv request status to reflect the number of bytes
|
|
|
|
* received and actually delivered to the application.
|
|
|
|
*/
|
|
|
|
void mca_pml_ob1_recv_request_progress_match( mca_pml_ob1_recv_request_t* recvreq,
|
|
|
|
mca_btl_base_module_t* btl,
|
|
|
|
mca_btl_base_segment_t* segments,
|
|
|
|
size_t num_segments )
|
|
|
|
{
|
2012-06-21 21:09:12 +04:00
|
|
|
size_t bytes_received, data_offset = 0;
|
2008-08-06 17:46:23 +04:00
|
|
|
size_t bytes_delivered __opal_attribute_unused__; /* is being set to zero in MCA_PML_OB1_RECV_REQUEST_UNPACK */
|
2008-05-30 05:29:09 +04:00
|
|
|
mca_pml_ob1_hdr_t* hdr = (mca_pml_ob1_hdr_t*)segments->seg_addr.pval;
|
|
|
|
|
2012-06-21 21:09:12 +04:00
|
|
|
bytes_received = mca_pml_ob1_compute_segment_length_base (segments, num_segments,
|
|
|
|
OMPI_PML_OB1_MATCH_HDR_LEN);
|
|
|
|
|
2008-05-30 05:29:09 +04:00
|
|
|
recvreq->req_recv.req_bytes_packed = bytes_received;
|
|
|
|
|
|
|
|
MCA_PML_OB1_RECV_REQUEST_MATCHED(recvreq, &hdr->hdr_match);
|
|
|
|
/*
|
|
|
|
* Make user buffer accessable(defined) before unpacking.
|
|
|
|
*/
|
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_call(&opal_memchecker_base_mem_defined,
|
|
|
|
recvreq->req_recv.req_base.req_addr,
|
|
|
|
recvreq->req_recv.req_base.req_count,
|
|
|
|
recvreq->req_recv.req_base.req_datatype);
|
|
|
|
);
|
|
|
|
MCA_PML_OB1_RECV_REQUEST_UNPACK( recvreq,
|
|
|
|
segments,
|
|
|
|
num_segments,
|
2008-05-30 07:58:39 +04:00
|
|
|
OMPI_PML_OB1_MATCH_HDR_LEN,
|
2008-05-30 05:29:09 +04:00
|
|
|
data_offset,
|
|
|
|
bytes_received,
|
|
|
|
bytes_delivered);
|
|
|
|
/*
|
|
|
|
* Unpacking finished, make the user buffer unaccessable again.
|
|
|
|
*/
|
|
|
|
MEMCHECKER(
|
|
|
|
memchecker_call(&opal_memchecker_base_mem_noaccess,
|
|
|
|
recvreq->req_recv.req_base.req_addr,
|
|
|
|
recvreq->req_recv.req_base.req_count,
|
|
|
|
recvreq->req_recv.req_base.req_datatype);
|
|
|
|
);
|
|
|
|
|
2008-07-17 08:50:39 +04:00
|
|
|
/*
|
|
|
|
* No need for atomic here, as we know there is only one fragment
|
|
|
|
* for this request.
|
|
|
|
*/
|
|
|
|
recvreq->req_bytes_received += bytes_received;
|
|
|
|
recv_request_pml_complete(recvreq);
|
2008-05-30 05:29:09 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-09-13 20:45:41 +04:00
|
|
|
/**
|
|
|
|
* Handle completion of a probe request
|
|
|
|
*/
|
|
|
|
|
2007-07-11 03:45:23 +04:00
|
|
|
void mca_pml_ob1_recv_request_matched_probe( mca_pml_ob1_recv_request_t* recvreq,
|
|
|
|
mca_btl_base_module_t* btl,
|
|
|
|
mca_btl_base_segment_t* segments,
|
|
|
|
size_t num_segments )
|
2005-09-13 20:45:41 +04:00
|
|
|
{
|
|
|
|
size_t bytes_packed = 0;
|
2007-01-05 01:07:37 +03:00
|
|
|
mca_pml_ob1_hdr_t* hdr = (mca_pml_ob1_hdr_t*)segments->seg_addr.pval;
|
2005-09-13 20:45:41 +04:00
|
|
|
|
|
|
|
switch(hdr->hdr_common.hdr_type) {
|
|
|
|
case MCA_PML_OB1_HDR_TYPE_MATCH:
|
2012-06-21 21:09:12 +04:00
|
|
|
bytes_packed = mca_pml_ob1_compute_segment_length_base (segments, num_segments,
|
|
|
|
OMPI_PML_OB1_MATCH_HDR_LEN);
|
2005-09-13 20:45:41 +04:00
|
|
|
break;
|
|
|
|
case MCA_PML_OB1_HDR_TYPE_RNDV:
|
|
|
|
case MCA_PML_OB1_HDR_TYPE_RGET:
|
|
|
|
|
|
|
|
bytes_packed = hdr->hdr_rndv.hdr_msg_length;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2005-11-30 20:57:59 +03:00
|
|
|
/* set completion status */
|
2005-09-13 20:45:41 +04:00
|
|
|
recvreq->req_recv.req_base.req_ompi.req_status.MPI_TAG = hdr->hdr_match.hdr_tag;
|
|
|
|
recvreq->req_recv.req_base.req_ompi.req_status.MPI_SOURCE = hdr->hdr_match.hdr_src;
|
2006-03-16 01:53:41 +03:00
|
|
|
recvreq->req_bytes_received = bytes_packed;
|
2009-04-10 20:36:20 +04:00
|
|
|
recvreq->req_bytes_expected = bytes_packed;
|
2012-02-06 21:35:21 +04:00
|
|
|
|
2007-09-18 20:18:47 +04:00
|
|
|
recv_request_pml_complete(recvreq);
|
2005-09-13 20:45:41 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-06-09 07:11:51 +04:00
|
|
|
/*
|
|
|
|
* Schedule RDMA protocol.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2008-10-01 01:02:37 +04:00
|
|
|
int mca_pml_ob1_recv_request_schedule_once( mca_pml_ob1_recv_request_t* recvreq,
|
|
|
|
mca_bml_base_btl_t *start_bml_btl )
|
2005-06-09 07:11:51 +04:00
|
|
|
{
|
2006-07-20 18:44:35 +04:00
|
|
|
mca_bml_base_btl_t* bml_btl;
|
2007-09-18 20:18:47 +04:00
|
|
|
int num_tries = recvreq->req_rdma_cnt, num_fail = 0;
|
|
|
|
size_t i, prev_bytes_remaining = 0;
|
2007-07-01 15:35:55 +04:00
|
|
|
size_t bytes_remaining = recvreq->req_send_offset -
|
2007-09-18 20:18:47 +04:00
|
|
|
recvreq->req_rdma_offset;
|
2007-07-01 15:35:55 +04:00
|
|
|
|
|
|
|
/* if starting bml_btl is provided schedule next fragment on it first */
|
|
|
|
if(start_bml_btl != NULL) {
|
|
|
|
for(i = 0; i < recvreq->req_rdma_cnt; i++) {
|
|
|
|
if(recvreq->req_rdma[i].bml_btl != start_bml_btl)
|
|
|
|
continue;
|
|
|
|
/* something left to be send? */
|
2007-07-11 03:45:23 +04:00
|
|
|
if( OPAL_LIKELY(recvreq->req_rdma[i].length) )
|
2007-07-01 15:35:55 +04:00
|
|
|
recvreq->req_rdma_idx = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2006-07-20 18:44:35 +04:00
|
|
|
|
2007-09-18 20:18:47 +04:00
|
|
|
while(bytes_remaining > 0 &&
|
|
|
|
recvreq->req_pipeline_depth < mca_pml_ob1.recv_pipeline_depth) {
|
2012-06-21 21:09:12 +04:00
|
|
|
size_t size, seg_size;
|
2007-09-18 20:18:47 +04:00
|
|
|
mca_pml_ob1_rdma_hdr_t* hdr;
|
|
|
|
mca_btl_base_descriptor_t* dst;
|
|
|
|
mca_btl_base_descriptor_t* ctl;
|
|
|
|
mca_mpool_base_registration_t * reg = NULL;
|
2008-10-01 01:02:37 +04:00
|
|
|
mca_btl_base_module_t* btl;
|
2007-09-18 20:18:47 +04:00
|
|
|
int rc, rdma_idx;
|
|
|
|
|
|
|
|
if(prev_bytes_remaining == bytes_remaining) {
|
|
|
|
if(++num_fail == num_tries) {
|
|
|
|
OPAL_THREAD_LOCK(&mca_pml_ob1.lock);
|
|
|
|
if(false == recvreq->req_pending) {
|
|
|
|
opal_list_append(&mca_pml_ob1.recv_pending,
|
|
|
|
(opal_list_item_t*)recvreq);
|
|
|
|
recvreq->req_pending = true;
|
2005-07-01 21:00:59 +04:00
|
|
|
}
|
2007-09-18 20:18:47 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&mca_pml_ob1.lock);
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE;
|
2006-07-20 18:44:35 +04:00
|
|
|
}
|
2007-09-18 20:18:47 +04:00
|
|
|
} else {
|
|
|
|
num_fail = 0;
|
|
|
|
prev_bytes_remaining = bytes_remaining;
|
|
|
|
}
|
2006-10-17 08:38:38 +04:00
|
|
|
|
2007-09-18 20:18:47 +04:00
|
|
|
do {
|
|
|
|
rdma_idx = recvreq->req_rdma_idx;
|
|
|
|
bml_btl = recvreq->req_rdma[rdma_idx].bml_btl;
|
|
|
|
reg = recvreq->req_rdma[rdma_idx].btl_reg;
|
|
|
|
size = recvreq->req_rdma[rdma_idx].length;
|
|
|
|
if(++recvreq->req_rdma_idx >= recvreq->req_rdma_cnt)
|
|
|
|
recvreq->req_rdma_idx = 0;
|
|
|
|
} while(!size);
|
2008-10-01 01:02:37 +04:00
|
|
|
btl = bml_btl->btl;
|
2007-09-18 20:18:47 +04:00
|
|
|
|
|
|
|
/* makes sure that we don't exceed BTL max rdma size
|
|
|
|
* if memory is not pinned already */
|
2008-10-01 01:02:37 +04:00
|
|
|
if( (NULL == reg) && (btl->btl_rdma_pipeline_frag_size != 0) &&
|
|
|
|
(size > btl->btl_rdma_pipeline_frag_size)) {
|
|
|
|
size = btl->btl_rdma_pipeline_frag_size;
|
2007-09-18 20:18:47 +04:00
|
|
|
}
|
2007-08-27 15:41:42 +04:00
|
|
|
|
2007-09-18 20:18:47 +04:00
|
|
|
/* take lock to protect converter against concurrent access
|
|
|
|
* from unpack */
|
|
|
|
OPAL_THREAD_LOCK(&recvreq->lock);
|
- Split the datatype engine into two parts: an MPI specific part in
OMPI
and a language agnostic part in OPAL. The convertor is completely
moved into OPAL. This offers several benefits as described in RFC
http://www.open-mpi.org/community/lists/devel/2009/07/6387.php
namely:
- Fewer basic types (int* and float* types, boolean and wchar
- Fixing naming scheme to ompi-nomenclature.
- Usability outside of the ompi-layer.
- Due to the fixed nature of simple opal types, their information is
completely
known at compile time and therefore constified
- With fewer datatypes (22), the actual sizes of bit-field types may be
reduced
from 64 to 32 bits, allowing reorganizing the opal_datatype
structure, eliminating holes and keeping data required in convertor
(upon send/recv) in one cacheline...
This has implications to the convertor-datastructure and other parts
of the code.
- Several performance tests have been run, the netpipe latency does not
change with
this patch on Linux/x86-64 on the smoky cluster.
- Extensive tests have been done to verify correctness (no new
regressions) using:
1. mpi_test_suite on linux/x86-64 using clean ompi-trunk and
ompi-ddt:
a. running both trunk and ompi-ddt resulted in no differences
(except for MPI_SHORT_INT and MPI_TYPE_MIX_LB_UB do now run
correctly).
b. with --enable-memchecker and running under valgrind (one buglet
when run with static found in test-suite, commited)
2. ibm testsuite on linux/x86-64 using clean ompi-trunk and ompi-ddt:
all passed (except for the dynamic/ tests failed!! as trunk/MTT)
3. compilation and usage of HDF5 tests on Jaguar using PGI and
PathScale compilers.
4. compilation and usage on Scicortex.
- Please note, that for the heterogeneous case, (-m32 compiled
binaries/ompi), neither
ompi-trunk, nor ompi-ddt branch would successfully launch.
This commit was SVN r21641.
2009-07-13 08:56:31 +04:00
|
|
|
opal_convertor_set_position( &recvreq->req_recv.req_base.req_convertor,
|
2008-10-01 01:02:37 +04:00
|
|
|
&recvreq->req_rdma_offset );
|
2007-09-18 20:18:47 +04:00
|
|
|
|
|
|
|
/* prepare a descriptor for RDMA */
|
|
|
|
mca_bml_base_prepare_dst(bml_btl, reg,
|
|
|
|
&recvreq->req_recv.req_base.req_convertor,
|
2011-12-01 01:37:23 +04:00
|
|
|
MCA_BTL_NO_ORDER, 0, &size, MCA_BTL_DES_FLAGS_BTL_OWNERSHIP |
|
2012-04-25 00:18:56 +04:00
|
|
|
MCA_BTL_DES_FLAGS_PUT, &dst);
|
2007-09-18 20:18:47 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&recvreq->lock);
|
|
|
|
|
|
|
|
if(OPAL_UNLIKELY(dst == NULL)) {
|
|
|
|
continue;
|
|
|
|
}
|
2006-07-20 18:44:35 +04:00
|
|
|
|
2007-09-18 20:18:47 +04:00
|
|
|
dst->des_cbfunc = mca_pml_ob1_put_completion;
|
|
|
|
dst->des_cbdata = recvreq;
|
2006-07-20 18:44:35 +04:00
|
|
|
|
2012-06-21 21:09:12 +04:00
|
|
|
seg_size = btl->btl_seg_size * dst->des_dst_cnt;
|
2006-07-20 18:44:35 +04:00
|
|
|
|
2012-06-21 21:09:12 +04:00
|
|
|
/* prepare a descriptor for rdma control message */
|
|
|
|
mca_bml_base_alloc(bml_btl, &ctl, MCA_BTL_NO_ORDER, sizeof(mca_pml_ob1_rdma_hdr_t) + seg_size,
|
2009-04-07 20:56:37 +04:00
|
|
|
MCA_BTL_DES_FLAGS_PRIORITY | MCA_BTL_DES_FLAGS_BTL_OWNERSHIP | MCA_BTL_DES_SEND_ALWAYS_CALLBACK);
|
2007-12-09 16:58:17 +03:00
|
|
|
|
2007-09-18 20:18:47 +04:00
|
|
|
if( OPAL_UNLIKELY(NULL == ctl) ) {
|
|
|
|
mca_bml_base_free(bml_btl,dst);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
ctl->des_cbfunc = mca_pml_ob1_recv_ctl_completion;
|
|
|
|
|
|
|
|
/* fill in rdma header */
|
|
|
|
hdr = (mca_pml_ob1_rdma_hdr_t*)ctl->des_src->seg_addr.pval;
|
|
|
|
hdr->hdr_common.hdr_type = MCA_PML_OB1_HDR_TYPE_PUT;
|
|
|
|
hdr->hdr_common.hdr_flags =
|
|
|
|
(!recvreq->req_ack_sent) ? MCA_PML_OB1_HDR_TYPE_ACK : 0;
|
2008-07-17 08:50:39 +04:00
|
|
|
hdr->hdr_req = recvreq->remote_req_send;
|
2007-09-18 20:18:47 +04:00
|
|
|
hdr->hdr_des.pval = dst;
|
2012-06-14 21:29:58 +04:00
|
|
|
hdr->hdr_recv_req.pval = recvreq;
|
2007-09-18 20:18:47 +04:00
|
|
|
hdr->hdr_rdma_offset = recvreq->req_rdma_offset;
|
|
|
|
hdr->hdr_seg_cnt = dst->des_dst_cnt;
|
|
|
|
|
2012-06-21 21:09:12 +04:00
|
|
|
/* copy segments */
|
|
|
|
memmove (hdr + 1, dst->des_dst, seg_size);
|
2007-01-07 04:48:57 +03:00
|
|
|
|
2007-09-18 20:18:47 +04:00
|
|
|
if(!recvreq->req_ack_sent)
|
|
|
|
recvreq->req_ack_sent = true;
|
2008-10-01 01:02:37 +04:00
|
|
|
ob1_hdr_hton(hdr, MCA_PML_OB1_HDR_TYPE_PUT, recvreq->req_recv.req_base.req_proc);
|
2006-02-26 03:45:54 +03:00
|
|
|
|
2007-09-18 20:18:47 +04:00
|
|
|
PERUSE_TRACE_COMM_OMPI_EVENT( PERUSE_COMM_REQ_XFER_CONTINUE,
|
|
|
|
&(recvreq->req_recv.req_base), size,
|
|
|
|
PERUSE_RECV);
|
|
|
|
|
|
|
|
/* send rdma request to peer */
|
2008-05-30 05:29:09 +04:00
|
|
|
rc = mca_bml_base_send(bml_btl, ctl, MCA_PML_OB1_HDR_TYPE_PUT);
|
2008-05-30 07:58:39 +04:00
|
|
|
if( OPAL_LIKELY( rc >= 0 ) ) {
|
2007-09-18 20:18:47 +04:00
|
|
|
/* update request state */
|
|
|
|
recvreq->req_rdma_offset += size;
|
|
|
|
OPAL_THREAD_ADD_SIZE_T(&recvreq->req_pipeline_depth, 1);
|
|
|
|
recvreq->req_rdma[rdma_idx].length -= size;
|
|
|
|
bytes_remaining -= size;
|
|
|
|
} else {
|
|
|
|
mca_bml_base_free(bml_btl,ctl);
|
|
|
|
mca_bml_base_free(bml_btl,dst);
|
2007-08-27 15:31:40 +04:00
|
|
|
}
|
2007-09-18 20:18:47 +04:00
|
|
|
}
|
2005-06-15 19:25:19 +04:00
|
|
|
|
2006-07-20 18:44:35 +04:00
|
|
|
return OMPI_SUCCESS;
|
2005-06-09 07:11:51 +04:00
|
|
|
}
|
|
|
|
|
2007-12-19 12:16:20 +03:00
|
|
|
#define IS_PROB_REQ(R) \
|
|
|
|
((MCA_PML_REQUEST_IPROBE == (R)->req_recv.req_base.req_type) || \
|
2012-02-06 21:35:21 +04:00
|
|
|
(MCA_PML_REQUEST_PROBE == (R)->req_recv.req_base.req_type) || \
|
|
|
|
(MCA_PML_REQUEST_IMPROBE == (R)->req_recv.req_base.req_type) || \
|
|
|
|
(MCA_PML_REQUEST_MPROBE == (R)->req_recv.req_base.req_type))
|
|
|
|
#define IS_MPROB_REQ(R) \
|
|
|
|
((MCA_PML_REQUEST_IMPROBE == (R)->req_recv.req_base.req_type) || \
|
|
|
|
(MCA_PML_REQUEST_MPROBE == (R)->req_recv.req_base.req_type))
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2008-01-08 21:45:51 +03:00
|
|
|
static inline void append_recv_req_to_queue(opal_list_t *queue,
|
2007-12-19 12:16:20 +03:00
|
|
|
mca_pml_ob1_recv_request_t *req)
|
2005-05-24 02:06:50 +04:00
|
|
|
{
|
2012-02-06 21:35:21 +04:00
|
|
|
if(OPAL_UNLIKELY(req->req_recv.req_base.req_type == MCA_PML_REQUEST_IPROBE ||
|
|
|
|
req->req_recv.req_base.req_type == MCA_PML_REQUEST_IMPROBE))
|
2007-12-19 12:16:20 +03:00
|
|
|
return;
|
|
|
|
|
|
|
|
opal_list_append(queue, (opal_list_item_t*)req);
|
|
|
|
|
2006-03-31 21:09:09 +04:00
|
|
|
/**
|
2007-12-19 12:16:20 +03:00
|
|
|
* We don't want to generate this kind of event for MPI_Probe. Hopefully,
|
|
|
|
* the compiler will optimize out the empty if loop in the case where PERUSE
|
|
|
|
* support is not required by the user.
|
2006-03-31 21:09:09 +04:00
|
|
|
*/
|
2012-02-06 21:35:21 +04:00
|
|
|
if(req->req_recv.req_base.req_type != MCA_PML_REQUEST_PROBE ||
|
|
|
|
req->req_recv.req_base.req_type != MCA_PML_REQUEST_MPROBE) {
|
2007-12-19 12:16:20 +03:00
|
|
|
PERUSE_TRACE_COMM_EVENT(PERUSE_COMM_REQ_INSERT_IN_POSTED_Q,
|
2008-07-17 08:50:39 +04:00
|
|
|
&(req->req_recv.req_base), PERUSE_RECV);
|
2007-12-19 12:16:20 +03:00
|
|
|
}
|
|
|
|
}
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2007-12-19 12:16:20 +03:00
|
|
|
/*
|
|
|
|
* this routine tries to match a posted receive. If a match is found,
|
|
|
|
* it places the request in the appropriate matched receive list. This
|
|
|
|
* function has to be called with the communicator matching lock held.
|
|
|
|
*/
|
2008-07-17 08:50:39 +04:00
|
|
|
static mca_pml_ob1_recv_frag_t*
|
|
|
|
recv_req_match_specific_proc( const mca_pml_ob1_recv_request_t *req,
|
|
|
|
mca_pml_ob1_comm_proc_t *proc )
|
2007-12-19 12:16:20 +03:00
|
|
|
{
|
|
|
|
opal_list_t* unexpected_frags = &proc->unexpected_frags;
|
|
|
|
opal_list_item_t *i;
|
|
|
|
mca_pml_ob1_recv_frag_t* frag;
|
|
|
|
int tag = req->req_recv.req_base.req_tag;
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2007-12-19 12:16:20 +03:00
|
|
|
if(opal_list_get_size(unexpected_frags) == 0)
|
|
|
|
return NULL;
|
2006-03-31 21:09:09 +04:00
|
|
|
|
2008-07-17 08:50:39 +04:00
|
|
|
if( OMPI_ANY_TAG == tag ) {
|
|
|
|
for (i = opal_list_get_first(unexpected_frags);
|
|
|
|
i != opal_list_get_end(unexpected_frags);
|
|
|
|
i = opal_list_get_next(i)) {
|
|
|
|
frag = (mca_pml_ob1_recv_frag_t*)i;
|
|
|
|
|
|
|
|
if( frag->hdr.hdr_match.hdr_tag >= 0 )
|
|
|
|
return frag;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (i = opal_list_get_first(unexpected_frags);
|
|
|
|
i != opal_list_get_end(unexpected_frags);
|
|
|
|
i = opal_list_get_next(i)) {
|
|
|
|
frag = (mca_pml_ob1_recv_frag_t*)i;
|
|
|
|
|
|
|
|
if( frag->hdr.hdr_match.hdr_tag == tag )
|
|
|
|
return frag;
|
|
|
|
}
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
2007-12-19 12:16:20 +03:00
|
|
|
return NULL;
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* this routine is used to try and match a wild posted receive - where
|
|
|
|
* wild is determined by the value assigned to the source process
|
|
|
|
*/
|
2008-07-17 08:50:39 +04:00
|
|
|
static mca_pml_ob1_recv_frag_t*
|
|
|
|
recv_req_match_wild( mca_pml_ob1_recv_request_t* req,
|
|
|
|
mca_pml_ob1_comm_proc_t **p)
|
2005-05-24 02:06:50 +04:00
|
|
|
{
|
2007-12-19 12:16:20 +03:00
|
|
|
mca_pml_ob1_comm_t* comm = req->req_recv.req_base.req_comm->c_pml_comm;
|
2005-05-24 02:06:50 +04:00
|
|
|
mca_pml_ob1_comm_proc_t* proc = comm->procs;
|
2011-10-14 19:32:18 +04:00
|
|
|
size_t i;
|
2005-05-24 02:06:50 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Loop over all the outstanding messages to find one that matches.
|
|
|
|
* There is an outer loop over lists of messages from each
|
|
|
|
* process, then an inner loop over the messages from the
|
|
|
|
* process.
|
2011-10-12 00:24:54 +04:00
|
|
|
*
|
|
|
|
* In order to avoid starvation do this in a round-robin fashion.
|
2008-07-17 08:50:39 +04:00
|
|
|
*/
|
2011-10-12 00:24:54 +04:00
|
|
|
for (i = comm->last_probed + 1; i < comm->num_procs; i++) {
|
2005-06-10 00:16:33 +04:00
|
|
|
mca_pml_ob1_recv_frag_t* frag;
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2007-12-19 12:16:20 +03:00
|
|
|
/* loop over messages from the current proc */
|
|
|
|
if((frag = recv_req_match_specific_proc(req, &proc[i]))) {
|
|
|
|
*p = &proc[i];
|
2011-10-12 00:24:54 +04:00
|
|
|
comm->last_probed = i;
|
|
|
|
req->req_recv.req_base.req_proc = proc[i].ompi_proc;
|
|
|
|
prepare_recv_req_converter(req);
|
|
|
|
return frag; /* match found */
|
|
|
|
}
|
|
|
|
}
|
2011-10-12 00:28:33 +04:00
|
|
|
for (i = 0; i <= comm->last_probed; i++) {
|
2011-10-12 00:24:54 +04:00
|
|
|
mca_pml_ob1_recv_frag_t* frag;
|
|
|
|
|
|
|
|
/* loop over messages from the current proc */
|
|
|
|
if((frag = recv_req_match_specific_proc(req, &proc[i]))) {
|
|
|
|
*p = &proc[i];
|
|
|
|
comm->last_probed = i;
|
2008-07-17 07:04:28 +04:00
|
|
|
req->req_recv.req_base.req_proc = proc[i].ompi_proc;
|
2008-07-09 21:38:41 +04:00
|
|
|
prepare_recv_req_converter(req);
|
2007-12-19 12:16:20 +03:00
|
|
|
return frag; /* match found */
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
2007-12-19 12:16:20 +03:00
|
|
|
}
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2007-12-19 12:16:20 +03:00
|
|
|
*p = NULL;
|
|
|
|
return NULL;
|
|
|
|
}
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2006-03-31 21:09:09 +04:00
|
|
|
|
2007-12-19 12:16:20 +03:00
|
|
|
void mca_pml_ob1_recv_req_start(mca_pml_ob1_recv_request_t *req)
|
|
|
|
{
|
|
|
|
mca_pml_ob1_comm_t* comm = req->req_recv.req_base.req_comm->c_pml_comm;
|
|
|
|
mca_pml_ob1_comm_proc_t* proc;
|
|
|
|
mca_pml_ob1_recv_frag_t* frag;
|
|
|
|
opal_list_t *queue;
|
2008-05-30 05:29:09 +04:00
|
|
|
mca_pml_ob1_hdr_t* hdr;
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2007-12-19 12:16:20 +03:00
|
|
|
/* init/re-init the request */
|
|
|
|
req->req_lock = 0;
|
2009-04-10 20:36:20 +04:00
|
|
|
req->req_pipeline_depth = 0;
|
|
|
|
req->req_bytes_received = 0;
|
|
|
|
req->req_bytes_expected = 0;
|
2007-12-19 12:16:20 +03:00
|
|
|
/* What about req_rdma_cnt ? */
|
|
|
|
req->req_rdma_idx = 0;
|
|
|
|
req->req_pending = false;
|
|
|
|
req->req_ack_sent = false;
|
2006-03-31 21:09:09 +04:00
|
|
|
|
2007-12-19 12:16:20 +03:00
|
|
|
MCA_PML_BASE_RECV_START(&req->req_recv.req_base);
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2007-12-19 12:16:20 +03:00
|
|
|
OPAL_THREAD_LOCK(&comm->matching_lock);
|
|
|
|
/**
|
|
|
|
* The laps of time between the ACTIVATE event and the SEARCH_UNEX one include
|
|
|
|
* the cost of the request lock.
|
|
|
|
*/
|
|
|
|
PERUSE_TRACE_COMM_EVENT(PERUSE_COMM_SEARCH_UNEX_Q_BEGIN,
|
2009-03-23 23:25:53 +03:00
|
|
|
&(req->req_recv.req_base), PERUSE_RECV);
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2007-12-19 12:16:20 +03:00
|
|
|
/* assign sequence number */
|
|
|
|
req->req_recv.req_base.req_sequence = comm->recv_sequence++;
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2007-12-19 12:16:20 +03:00
|
|
|
/* attempt to match posted recv */
|
|
|
|
if(req->req_recv.req_base.req_peer == OMPI_ANY_SOURCE) {
|
|
|
|
frag = recv_req_match_wild(req, &proc);
|
|
|
|
queue = &comm->wild_receives;
|
2009-05-07 00:11:28 +04:00
|
|
|
#if !OPAL_ENABLE_HETEROGENEOUS_SUPPORT
|
2008-07-17 08:57:55 +04:00
|
|
|
/* As we are in a homogeneous environment we know that all remote
|
|
|
|
* architectures are exactly the same as the local one. Therefore,
|
|
|
|
* we can safely construct the convertor based on the proc
|
|
|
|
* information of rank 0.
|
|
|
|
*/
|
|
|
|
if( NULL == frag ) {
|
|
|
|
req->req_recv.req_base.req_proc = ompi_proc_local_proc;
|
|
|
|
prepare_recv_req_converter(req);
|
|
|
|
}
|
2009-05-07 00:11:28 +04:00
|
|
|
#endif /* !OPAL_ENABLE_HETEROGENEOUS_SUPPORT */
|
2005-05-24 02:06:50 +04:00
|
|
|
} else {
|
2007-12-19 12:16:20 +03:00
|
|
|
proc = &comm->procs[req->req_recv.req_base.req_peer];
|
|
|
|
req->req_recv.req_base.req_proc = proc->ompi_proc;
|
|
|
|
frag = recv_req_match_specific_proc(req, proc);
|
|
|
|
queue = &proc->specific_receives;
|
|
|
|
/* wild cardrecv will be prepared on match */
|
2008-07-09 21:38:41 +04:00
|
|
|
prepare_recv_req_converter(req);
|
2005-05-24 02:06:50 +04:00
|
|
|
}
|
2007-08-23 11:09:43 +04:00
|
|
|
|
2007-12-19 12:16:20 +03:00
|
|
|
if(OPAL_UNLIKELY(NULL == frag)) {
|
|
|
|
PERUSE_TRACE_COMM_EVENT(PERUSE_COMM_SEARCH_UNEX_Q_END,
|
2009-03-23 23:25:53 +03:00
|
|
|
&(req->req_recv.req_base), PERUSE_RECV);
|
2007-12-19 12:16:20 +03:00
|
|
|
/* We didn't find any matches. Record this irecv so we can match
|
|
|
|
it when the message comes in. */
|
|
|
|
append_recv_req_to_queue(queue, req);
|
2008-07-17 08:50:39 +04:00
|
|
|
req->req_match_received = false;
|
2007-12-19 12:16:20 +03:00
|
|
|
OPAL_THREAD_UNLOCK(&comm->matching_lock);
|
|
|
|
} else {
|
|
|
|
if(OPAL_LIKELY(!IS_PROB_REQ(req))) {
|
|
|
|
PERUSE_TRACE_COMM_EVENT(PERUSE_COMM_REQ_MATCH_UNEX,
|
2009-03-23 23:25:53 +03:00
|
|
|
&(req->req_recv.req_base), PERUSE_RECV);
|
2007-08-23 11:09:43 +04:00
|
|
|
|
2009-03-23 23:25:53 +03:00
|
|
|
hdr = (mca_pml_ob1_hdr_t*)frag->segments->seg_addr.pval;
|
2007-12-19 12:16:20 +03:00
|
|
|
PERUSE_TRACE_MSG_EVENT(PERUSE_COMM_MSG_REMOVE_FROM_UNEX_Q,
|
2009-03-23 23:25:53 +03:00
|
|
|
req->req_recv.req_base.req_comm,
|
|
|
|
hdr->hdr_match.hdr_src,
|
|
|
|
hdr->hdr_match.hdr_tag,
|
|
|
|
PERUSE_RECV);
|
2007-08-23 11:09:43 +04:00
|
|
|
|
2007-12-19 12:16:20 +03:00
|
|
|
PERUSE_TRACE_COMM_EVENT(PERUSE_COMM_SEARCH_UNEX_Q_END,
|
2009-03-23 23:25:53 +03:00
|
|
|
&(req->req_recv.req_base), PERUSE_RECV);
|
2007-12-19 12:16:20 +03:00
|
|
|
|
|
|
|
opal_list_remove_item(&proc->unexpected_frags,
|
2008-07-17 08:50:39 +04:00
|
|
|
(opal_list_item_t*)frag);
|
2007-12-19 12:16:20 +03:00
|
|
|
OPAL_THREAD_UNLOCK(&comm->matching_lock);
|
2008-05-30 05:29:09 +04:00
|
|
|
|
|
|
|
switch(hdr->hdr_common.hdr_type) {
|
|
|
|
case MCA_PML_OB1_HDR_TYPE_MATCH:
|
|
|
|
mca_pml_ob1_recv_request_progress_match(req, frag->btl, frag->segments,
|
|
|
|
frag->num_segments);
|
|
|
|
break;
|
|
|
|
case MCA_PML_OB1_HDR_TYPE_RNDV:
|
|
|
|
mca_pml_ob1_recv_request_progress_rndv(req, frag->btl, frag->segments,
|
|
|
|
frag->num_segments);
|
|
|
|
break;
|
|
|
|
case MCA_PML_OB1_HDR_TYPE_RGET:
|
|
|
|
mca_pml_ob1_recv_request_progress_rget(req, frag->btl, frag->segments,
|
|
|
|
frag->num_segments);
|
|
|
|
break;
|
2008-07-17 08:50:39 +04:00
|
|
|
default:
|
|
|
|
assert(0);
|
2008-05-30 05:29:09 +04:00
|
|
|
}
|
|
|
|
|
2007-12-19 12:16:20 +03:00
|
|
|
MCA_PML_OB1_RECV_FRAG_RETURN(frag);
|
2008-05-30 05:29:09 +04:00
|
|
|
|
2012-02-06 21:35:21 +04:00
|
|
|
} else if (OPAL_UNLIKELY(IS_MPROB_REQ(req))) {
|
|
|
|
/* Remove the fragment from the match list, as it's now
|
|
|
|
matched. Stash it somewhere in the request (which,
|
|
|
|
yes, is a complete hack), where it will be plucked out
|
|
|
|
during the end of mprobe. The request will then be
|
|
|
|
"recreated" as a receive request, and the frag will be
|
|
|
|
restarted with this request during mrecv */
|
|
|
|
opal_list_remove_item(&proc->unexpected_frags,
|
|
|
|
(opal_list_item_t*)frag);
|
|
|
|
OPAL_THREAD_UNLOCK(&comm->matching_lock);
|
|
|
|
|
|
|
|
req->req_recv.req_base.req_addr = frag;
|
|
|
|
mca_pml_ob1_recv_request_matched_probe(req, frag->btl,
|
|
|
|
frag->segments, frag->num_segments);
|
|
|
|
|
2007-12-19 12:16:20 +03:00
|
|
|
} else {
|
|
|
|
OPAL_THREAD_UNLOCK(&comm->matching_lock);
|
|
|
|
mca_pml_ob1_recv_request_matched_probe(req, frag->btl,
|
2008-05-30 05:29:09 +04:00
|
|
|
frag->segments, frag->num_segments);
|
2007-12-19 12:16:20 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|