2005-05-24 02:06:50 +04:00
|
|
|
/*
|
2005-11-05 22:57:48 +03:00
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
2007-07-11 02:16:38 +04:00
|
|
|
* Copyright (c) 2004-2007 The University of Tennessee and The University
|
2005-11-05 22:57:48 +03:00
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
2005-05-24 02:06:50 +04:00
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*/
|
2006-06-26 23:00:37 +04:00
|
|
|
|
2005-05-24 02:22:20 +04:00
|
|
|
#ifndef OMPI_PML_OB1_SEND_REQUEST_H
|
|
|
|
#define OMPI_PML_OB1_SEND_REQUEST_H
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "ompi/mca/btl/btl.h"
|
|
|
|
#include "ompi/mca/pml/base/pml_base_sendreq.h"
|
|
|
|
#include "ompi/mca/mpool/base/base.h"
|
2005-05-24 02:06:50 +04:00
|
|
|
#include "pml_ob1_comm.h"
|
2005-06-01 18:34:22 +04:00
|
|
|
#include "pml_ob1_hdr.h"
|
2005-09-13 02:28:23 +04:00
|
|
|
#include "pml_ob1_rdma.h"
|
2006-07-20 18:44:35 +04:00
|
|
|
#include "pml_ob1_rdmafrag.h"
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "ompi/datatype/convertor.h"
|
2006-02-26 03:45:54 +03:00
|
|
|
#include "ompi/datatype/dt_arch.h"
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "ompi/mca/bml/bml.h"
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2007-07-11 03:45:23 +04:00
|
|
|
BEGIN_C_DECLS
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2006-07-20 18:44:35 +04:00
|
|
|
typedef enum {
|
|
|
|
MCA_PML_OB1_SEND_PENDING_NONE,
|
|
|
|
MCA_PML_OB1_SEND_PENDING_SCHEDULE,
|
|
|
|
MCA_PML_OB1_SEND_PENDING_START
|
|
|
|
} mca_pml_ob1_send_pending_t;
|
2005-05-24 02:06:50 +04:00
|
|
|
|
|
|
|
struct mca_pml_ob1_send_request_t {
|
|
|
|
mca_pml_base_send_request_t req_send;
|
2005-09-13 02:28:23 +04:00
|
|
|
mca_bml_base_endpoint_t* req_endpoint;
|
2005-06-02 01:09:43 +04:00
|
|
|
ompi_ptr_t req_recv;
|
2005-12-06 21:27:56 +03:00
|
|
|
int32_t req_state;
|
|
|
|
int32_t req_lock;
|
2007-06-03 12:30:07 +04:00
|
|
|
bool req_throttle_sends;
|
2005-06-09 07:11:51 +04:00
|
|
|
size_t req_pipeline_depth;
|
2005-06-09 00:37:19 +04:00
|
|
|
size_t req_bytes_delivered;
|
2005-09-13 02:28:23 +04:00
|
|
|
uint32_t req_rdma_cnt;
|
2006-07-20 18:44:35 +04:00
|
|
|
mca_pml_ob1_send_pending_t req_pending;
|
2007-06-03 12:30:07 +04:00
|
|
|
opal_mutex_t req_send_range_lock;
|
|
|
|
opal_list_t req_send_ranges;
|
2007-07-01 15:34:23 +04:00
|
|
|
mca_pml_ob1_com_btl_t req_rdma[1];
|
2005-05-24 02:06:50 +04:00
|
|
|
};
|
|
|
|
typedef struct mca_pml_ob1_send_request_t mca_pml_ob1_send_request_t;
|
|
|
|
|
|
|
|
OBJ_CLASS_DECLARATION(mca_pml_ob1_send_request_t);
|
|
|
|
|
2007-06-03 12:30:07 +04:00
|
|
|
struct mca_pml_ob1_send_range_t {
|
|
|
|
ompi_free_list_item_t base;
|
2007-06-07 15:24:24 +04:00
|
|
|
uint64_t range_send_offset;
|
|
|
|
uint64_t range_send_length;
|
2007-07-01 15:34:23 +04:00
|
|
|
int range_btl_idx;
|
|
|
|
int range_btl_cnt;
|
|
|
|
mca_pml_ob1_com_btl_t range_btls[1];
|
2007-06-03 12:30:07 +04:00
|
|
|
};
|
|
|
|
typedef struct mca_pml_ob1_send_range_t mca_pml_ob1_send_range_t;
|
|
|
|
OBJ_CLASS_DECLARATION(mca_pml_ob1_send_range_t);
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2007-08-30 16:10:04 +04:00
|
|
|
static inline bool lock_send_request(mca_pml_ob1_send_request_t *sendreq)
|
|
|
|
{
|
|
|
|
return OPAL_THREAD_ADD32(&sendreq->req_lock, 1) == 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool unlock_send_request(mca_pml_ob1_send_request_t *sendreq)
|
|
|
|
{
|
|
|
|
return OPAL_THREAD_ADD32(&sendreq->req_lock, -1) == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
add_request_to_send_pending(mca_pml_ob1_send_request_t* sendreq,
|
|
|
|
const mca_pml_ob1_send_pending_t type, const bool append)
|
|
|
|
{
|
|
|
|
opal_list_item_t *item = (opal_list_item_t*)sendreq;
|
|
|
|
|
|
|
|
OPAL_THREAD_LOCK(&mca_pml_ob1.lock);
|
|
|
|
sendreq->req_pending = type;
|
|
|
|
if(append)
|
|
|
|
opal_list_append(&mca_pml_ob1.send_pending, item);
|
|
|
|
else
|
|
|
|
opal_list_prepend(&mca_pml_ob1.send_pending, item);
|
|
|
|
|
|
|
|
OPAL_THREAD_UNLOCK(&mca_pml_ob1.lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline mca_pml_ob1_send_request_t*
|
|
|
|
get_request_from_send_pending(mca_pml_ob1_send_pending_t *type)
|
|
|
|
{
|
|
|
|
mca_pml_ob1_send_request_t *sendreq;
|
|
|
|
|
|
|
|
OPAL_THREAD_LOCK(&mca_pml_ob1.lock);
|
|
|
|
sendreq = (mca_pml_ob1_send_request_t*)
|
|
|
|
opal_list_remove_first(&mca_pml_ob1.send_pending);
|
|
|
|
if(sendreq) {
|
|
|
|
*type = sendreq->req_pending;
|
|
|
|
sendreq->req_pending = MCA_PML_OB1_SEND_PENDING_NONE;
|
|
|
|
}
|
|
|
|
OPAL_THREAD_UNLOCK(&mca_pml_ob1.lock);
|
|
|
|
|
|
|
|
return sendreq;
|
|
|
|
}
|
|
|
|
|
2007-07-11 02:16:38 +04:00
|
|
|
#define MCA_PML_OB1_SEND_REQUEST_ALLOC( comm, \
|
|
|
|
dst, \
|
|
|
|
sendreq, \
|
|
|
|
rc) \
|
|
|
|
{ \
|
|
|
|
ompi_proc_t *proc = ompi_comm_peer_lookup( comm, dst ); \
|
|
|
|
ompi_free_list_item_t* item; \
|
|
|
|
\
|
|
|
|
rc = OMPI_ERR_OUT_OF_RESOURCE; \
|
|
|
|
if( OPAL_LIKELY(NULL != proc) ) { \
|
|
|
|
rc = OMPI_SUCCESS; \
|
|
|
|
OMPI_FREE_LIST_WAIT(&mca_pml_base_send_requests, item, rc); \
|
|
|
|
sendreq = (mca_pml_ob1_send_request_t*)item; \
|
|
|
|
sendreq->req_send.req_base.req_proc = proc; \
|
|
|
|
} \
|
|
|
|
}
|
2005-05-24 02:06:50 +04:00
|
|
|
|
|
|
|
|
2007-07-11 02:16:38 +04:00
|
|
|
#define MCA_PML_OB1_SEND_REQUEST_INIT( sendreq, \
|
|
|
|
buf, \
|
|
|
|
count, \
|
|
|
|
datatype, \
|
|
|
|
dst, \
|
|
|
|
tag, \
|
|
|
|
comm, \
|
|
|
|
sendmode, \
|
|
|
|
persistent) \
|
|
|
|
{ \
|
|
|
|
MCA_PML_BASE_SEND_REQUEST_INIT(&sendreq->req_send, \
|
|
|
|
buf, \
|
|
|
|
count, \
|
|
|
|
datatype, \
|
|
|
|
dst, \
|
|
|
|
tag, \
|
|
|
|
comm, \
|
|
|
|
sendmode, \
|
|
|
|
persistent); \
|
|
|
|
}
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2005-06-17 22:25:44 +04:00
|
|
|
|
2006-10-17 09:01:47 +04:00
|
|
|
static inline void mca_pml_ob1_free_rdma_resources(mca_pml_ob1_send_request_t* sendreq)
|
2006-07-20 18:44:35 +04:00
|
|
|
{
|
|
|
|
size_t r;
|
|
|
|
|
|
|
|
/* return mpool resources */
|
|
|
|
for(r = 0; r < sendreq->req_rdma_cnt; r++) {
|
|
|
|
mca_mpool_base_registration_t* reg = sendreq->req_rdma[r].btl_reg;
|
2007-07-01 15:31:26 +04:00
|
|
|
if( NULL != reg && reg->mpool != NULL ) {
|
2006-12-17 15:26:41 +03:00
|
|
|
reg->mpool->mpool_deregister(reg->mpool, reg);
|
2006-07-20 18:44:35 +04:00
|
|
|
}
|
2006-10-17 09:01:47 +04:00
|
|
|
}
|
|
|
|
sendreq->req_rdma_cnt = 0;
|
2006-07-20 18:44:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-05-24 02:06:50 +04:00
|
|
|
/**
|
|
|
|
* Start a send request.
|
|
|
|
*/
|
|
|
|
|
2006-07-20 18:44:35 +04:00
|
|
|
#define MCA_PML_OB1_SEND_REQUEST_START(sendreq, rc) \
|
|
|
|
do { \
|
|
|
|
rc = mca_pml_ob1_send_request_start(sendreq); \
|
|
|
|
} while (0)
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2005-06-17 22:25:44 +04:00
|
|
|
|
2005-06-10 00:16:33 +04:00
|
|
|
/*
|
2005-09-15 22:47:59 +04:00
|
|
|
* Mark a send request as completed at the MPI level.
|
|
|
|
*/
|
|
|
|
|
2006-03-16 01:53:41 +03:00
|
|
|
#define MCA_PML_OB1_SEND_REQUEST_MPI_COMPLETE(sendreq) \
|
|
|
|
do { \
|
|
|
|
(sendreq)->req_send.req_base.req_ompi.req_status.MPI_SOURCE = \
|
|
|
|
(sendreq)->req_send.req_base.req_comm->c_my_rank; \
|
|
|
|
(sendreq)->req_send.req_base.req_ompi.req_status.MPI_TAG = \
|
|
|
|
(sendreq)->req_send.req_base.req_tag; \
|
|
|
|
(sendreq)->req_send.req_base.req_ompi.req_status.MPI_ERROR = OMPI_SUCCESS; \
|
|
|
|
(sendreq)->req_send.req_base.req_ompi.req_status._count = \
|
2006-11-08 20:02:46 +03:00
|
|
|
(int)(sendreq)->req_send.req_bytes_packed; \
|
2006-03-16 01:53:41 +03:00
|
|
|
MCA_PML_BASE_REQUEST_MPI_COMPLETE( &((sendreq)->req_send.req_base.req_ompi) ); \
|
2006-03-31 21:09:09 +04:00
|
|
|
\
|
2006-10-18 20:11:50 +04:00
|
|
|
/* Could be moved to MCA_PML_BASE_REQUEST_MPI_COMPLETE, but before broadcast */ \
|
2006-03-31 21:09:09 +04:00
|
|
|
PERUSE_TRACE_COMM_EVENT( PERUSE_COMM_REQ_COMPLETE, \
|
|
|
|
&(sendreq->req_send.req_base), PERUSE_SEND); \
|
2005-09-15 22:47:59 +04:00
|
|
|
} while(0)
|
|
|
|
|
2007-08-30 16:08:33 +04:00
|
|
|
/*
|
|
|
|
* Release resources associated with a request
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define MCA_PML_OB1_SEND_REQUEST_RETURN(sendreq) \
|
|
|
|
do { \
|
|
|
|
/* Let the base handle the reference counts */ \
|
|
|
|
MCA_PML_BASE_SEND_REQUEST_FINI((&(sendreq)->req_send)); \
|
|
|
|
OMPI_FREE_LIST_RETURN( &mca_pml_base_send_requests, \
|
|
|
|
(ompi_free_list_item_t*)sendreq); \
|
|
|
|
} while(0)
|
|
|
|
|
|
|
|
|
2005-09-15 22:47:59 +04:00
|
|
|
/*
|
|
|
|
* The PML has completed a send request. Note that this request
|
|
|
|
* may have been orphaned by the user or have already completed
|
2007-08-30 16:08:33 +04:00
|
|
|
* at the MPI level.
|
|
|
|
* This function will never be called directly from the upper level, as it
|
|
|
|
* should only be an internal call to the PML.
|
|
|
|
*
|
2005-06-10 00:16:33 +04:00
|
|
|
*/
|
2007-08-30 16:08:33 +04:00
|
|
|
void static inline
|
|
|
|
send_request_pml_complete(mca_pml_ob1_send_request_t *sendreq)
|
|
|
|
{
|
|
|
|
assert(false == sendreq->req_send.req_base.req_pml_complete);
|
|
|
|
|
|
|
|
if(sendreq->req_send.req_bytes_packed > 0) {
|
|
|
|
PERUSE_TRACE_COMM_EVENT( PERUSE_COMM_REQ_XFER_END,
|
|
|
|
&(sendreq->req_send.req_base), PERUSE_SEND);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* return mpool resources */
|
|
|
|
mca_pml_ob1_free_rdma_resources(sendreq);
|
|
|
|
|
|
|
|
if (sendreq->req_send.req_send_mode == MCA_PML_BASE_SEND_BUFFERED &&
|
|
|
|
sendreq->req_send.req_addr != sendreq->req_send.req_base.req_addr) {
|
|
|
|
mca_pml_base_bsend_request_fini((ompi_request_t*)sendreq);
|
|
|
|
}
|
|
|
|
|
|
|
|
OPAL_THREAD_LOCK(&ompi_request_lock);
|
|
|
|
if(false == sendreq->req_send.req_base.req_ompi.req_complete) {
|
|
|
|
/* Should only be called for long messages (maybe synchronous) */
|
|
|
|
MCA_PML_OB1_SEND_REQUEST_MPI_COMPLETE(sendreq);
|
|
|
|
}
|
|
|
|
sendreq->req_send.req_base.req_pml_complete = true;
|
|
|
|
|
|
|
|
if(sendreq->req_send.req_base.req_free_called) {
|
|
|
|
MCA_PML_OB1_SEND_REQUEST_RETURN(sendreq);
|
|
|
|
}
|
|
|
|
OPAL_THREAD_UNLOCK(&ompi_request_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* returns true if request was completed on PML level */
|
|
|
|
bool static inline
|
|
|
|
send_request_pml_complete_check(mca_pml_ob1_send_request_t *sendreq)
|
|
|
|
{
|
|
|
|
opal_atomic_rmb();
|
|
|
|
/* if no more events are expected for the request and the whole message is
|
|
|
|
* already sent and send fragment scheduling isn't running in another
|
|
|
|
* thread then complete the request on PML level. From now on, if user
|
|
|
|
* called free on this request, the request structure can be reused for
|
|
|
|
* another request or if the request is persistent it can be restarted */
|
|
|
|
if(sendreq->req_state == 0 &&
|
|
|
|
sendreq->req_bytes_delivered >= sendreq->req_send.req_bytes_packed
|
2007-08-30 16:10:04 +04:00
|
|
|
&& lock_send_request(sendreq)) {
|
2007-08-30 16:08:33 +04:00
|
|
|
send_request_pml_complete(sendreq);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2006-03-16 01:53:41 +03:00
|
|
|
|
2006-07-20 18:44:35 +04:00
|
|
|
/**
|
|
|
|
* Schedule additional fragments
|
|
|
|
*/
|
2007-08-30 16:10:04 +04:00
|
|
|
int
|
|
|
|
mca_pml_ob1_send_request_schedule_exclusive(mca_pml_ob1_send_request_t*);
|
2006-07-20 18:44:35 +04:00
|
|
|
|
2007-08-30 16:10:04 +04:00
|
|
|
static inline void
|
|
|
|
mca_pml_ob1_send_request_schedule(mca_pml_ob1_send_request_t* sendreq)
|
2006-07-20 18:44:35 +04:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Only allow one thread in this routine for a given request.
|
|
|
|
* However, we cannot block callers on a mutex, so simply keep track
|
|
|
|
* of the number of times the routine has been called and run through
|
|
|
|
* the scheduling logic once for every call.
|
|
|
|
*/
|
|
|
|
|
2007-08-30 16:10:04 +04:00
|
|
|
if(!lock_send_request(sendreq))
|
|
|
|
return;
|
|
|
|
|
|
|
|
do {
|
|
|
|
int rc;
|
|
|
|
rc = mca_pml_ob1_send_request_schedule_exclusive(sendreq);
|
|
|
|
if(rc == OMPI_ERR_OUT_OF_RESOURCE)
|
|
|
|
return;
|
|
|
|
} while(!unlock_send_request(sendreq));
|
|
|
|
send_request_pml_complete_check(sendreq);
|
2006-07-20 18:44:35 +04:00
|
|
|
}
|
2005-07-18 22:54:25 +04:00
|
|
|
|
2005-06-01 18:34:22 +04:00
|
|
|
/**
|
2005-07-19 01:22:55 +04:00
|
|
|
* Start the specified request
|
2005-06-01 18:34:22 +04:00
|
|
|
*/
|
|
|
|
|
2005-09-14 21:08:08 +04:00
|
|
|
int mca_pml_ob1_send_request_start_buffered(
|
|
|
|
mca_pml_ob1_send_request_t* sendreq,
|
2005-09-15 22:47:59 +04:00
|
|
|
mca_bml_base_btl_t* bml_btl,
|
|
|
|
size_t size);
|
2005-09-14 21:08:08 +04:00
|
|
|
|
2005-08-13 01:33:01 +04:00
|
|
|
int mca_pml_ob1_send_request_start_copy(
|
|
|
|
mca_pml_ob1_send_request_t* sendreq,
|
2005-09-15 22:47:59 +04:00
|
|
|
mca_bml_base_btl_t* bml_btl,
|
|
|
|
size_t size);
|
2005-08-13 01:33:01 +04:00
|
|
|
|
|
|
|
int mca_pml_ob1_send_request_start_prepare(
|
|
|
|
mca_pml_ob1_send_request_t* sendreq,
|
2005-09-15 22:47:59 +04:00
|
|
|
mca_bml_base_btl_t* bml_btl,
|
|
|
|
size_t size);
|
|
|
|
|
|
|
|
int mca_pml_ob1_send_request_start_rdma(
|
|
|
|
mca_pml_ob1_send_request_t* sendreq,
|
|
|
|
mca_bml_base_btl_t* bml_btl,
|
|
|
|
size_t size);
|
|
|
|
|
|
|
|
int mca_pml_ob1_send_request_start_rndv(
|
|
|
|
mca_pml_ob1_send_request_t* sendreq,
|
|
|
|
mca_bml_base_btl_t* bml_btl,
|
2005-11-11 18:33:25 +03:00
|
|
|
size_t size,
|
|
|
|
int flags);
|
2005-06-01 18:34:22 +04:00
|
|
|
|
2007-07-11 03:45:23 +04:00
|
|
|
static inline int
|
|
|
|
mca_pml_ob1_send_request_start_btl( mca_pml_ob1_send_request_t* sendreq,
|
|
|
|
mca_bml_base_btl_t* bml_btl )
|
2006-07-20 18:44:35 +04:00
|
|
|
{
|
|
|
|
size_t size = sendreq->req_send.req_bytes_packed;
|
2007-07-11 03:45:23 +04:00
|
|
|
size_t eager_limit = bml_btl->btl_eager_limit;
|
2006-07-20 18:44:35 +04:00
|
|
|
int rc;
|
|
|
|
|
2007-07-11 03:45:23 +04:00
|
|
|
if( eager_limit > mca_pml_ob1.eager_limit )
|
|
|
|
eager_limit = mca_pml_ob1.eager_limit;
|
|
|
|
eager_limit -= sizeof(mca_pml_ob1_hdr_t);
|
|
|
|
|
|
|
|
if( OPAL_LIKELY(size <= eager_limit) ) {
|
2006-07-20 18:44:35 +04:00
|
|
|
switch(sendreq->req_send.req_send_mode) {
|
|
|
|
case MCA_PML_BASE_SEND_SYNCHRONOUS:
|
|
|
|
rc = mca_pml_ob1_send_request_start_rndv(sendreq, bml_btl, size, 0);
|
|
|
|
break;
|
|
|
|
case MCA_PML_BASE_SEND_BUFFERED:
|
|
|
|
rc = mca_pml_ob1_send_request_start_copy(sendreq, bml_btl, size);
|
|
|
|
break;
|
|
|
|
case MCA_PML_BASE_SEND_COMPLETE:
|
|
|
|
rc = mca_pml_ob1_send_request_start_prepare(sendreq, bml_btl, size);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if (size != 0 && bml_btl->btl_flags & MCA_BTL_FLAGS_SEND_INPLACE) {
|
|
|
|
rc = mca_pml_ob1_send_request_start_prepare(sendreq, bml_btl, size);
|
|
|
|
} else {
|
|
|
|
rc = mca_pml_ob1_send_request_start_copy(sendreq, bml_btl, size);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
size = eager_limit;
|
|
|
|
if(sendreq->req_send.req_send_mode == MCA_PML_BASE_SEND_BUFFERED) {
|
|
|
|
rc = mca_pml_ob1_send_request_start_buffered(sendreq, bml_btl, size);
|
|
|
|
} else if
|
2007-07-11 03:45:23 +04:00
|
|
|
(ompi_convertor_need_buffers(&sendreq->req_send.req_base.req_convertor) == false) {
|
2007-03-31 02:02:45 +04:00
|
|
|
unsigned char *base;
|
2007-07-11 02:16:38 +04:00
|
|
|
ompi_convertor_get_current_pointer( &sendreq->req_send.req_base.req_convertor, (void**)&base );
|
2007-03-30 06:06:08 +04:00
|
|
|
|
2006-10-20 07:57:44 +04:00
|
|
|
if( 0 != (sendreq->req_rdma_cnt = (uint32_t)mca_pml_ob1_rdma_btls(
|
2007-07-11 03:45:23 +04:00
|
|
|
sendreq->req_endpoint,
|
|
|
|
base,
|
|
|
|
sendreq->req_send.req_bytes_packed,
|
|
|
|
sendreq->req_rdma))) {
|
2006-07-20 18:44:35 +04:00
|
|
|
rc = mca_pml_ob1_send_request_start_rdma(sendreq, bml_btl,
|
2007-07-11 03:45:23 +04:00
|
|
|
sendreq->req_send.req_bytes_packed);
|
|
|
|
if( OPAL_UNLIKELY(OMPI_SUCCESS != rc) ) {
|
2006-07-20 18:44:35 +04:00
|
|
|
mca_pml_ob1_free_rdma_resources(sendreq);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
rc = mca_pml_ob1_send_request_start_rndv(sendreq, bml_btl, size,
|
2007-07-11 03:45:23 +04:00
|
|
|
MCA_PML_OB1_HDR_FLAGS_CONTIG);
|
2006-07-20 18:44:35 +04:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
rc = mca_pml_ob1_send_request_start_rndv(sendreq, bml_btl, size, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2007-07-11 03:45:23 +04:00
|
|
|
static inline int
|
|
|
|
mca_pml_ob1_send_request_start( mca_pml_ob1_send_request_t* sendreq )
|
2006-07-20 18:44:35 +04:00
|
|
|
{
|
|
|
|
mca_pml_ob1_comm_t* comm = sendreq->req_send.req_base.req_comm->c_pml_comm;
|
|
|
|
mca_bml_base_endpoint_t* endpoint = (mca_bml_base_endpoint_t*)
|
|
|
|
sendreq->req_send.req_base.req_proc->proc_bml;
|
|
|
|
size_t i;
|
|
|
|
|
2007-07-11 03:45:23 +04:00
|
|
|
if( OPAL_UNLIKELY(endpoint == NULL) ) {
|
2006-07-20 18:44:35 +04:00
|
|
|
return OMPI_ERR_UNREACH;
|
|
|
|
}
|
|
|
|
|
2006-10-18 20:11:50 +04:00
|
|
|
sendreq->req_endpoint = endpoint;
|
|
|
|
sendreq->req_state = 0;
|
2007-08-30 16:08:33 +04:00
|
|
|
sendreq->req_lock = 0;
|
2006-07-20 18:44:35 +04:00
|
|
|
sendreq->req_pipeline_depth = 0;
|
|
|
|
sendreq->req_bytes_delivered = 0;
|
|
|
|
sendreq->req_pending = MCA_PML_OB1_SEND_PENDING_NONE;
|
|
|
|
sendreq->req_send.req_base.req_sequence = OPAL_THREAD_ADD32(
|
|
|
|
&comm->procs[sendreq->req_send.req_base.req_peer].send_sequence,1);
|
|
|
|
|
|
|
|
MCA_PML_BASE_SEND_START( &sendreq->req_send.req_base );
|
|
|
|
|
|
|
|
for(i = 0; i < mca_bml_base_btl_array_get_size(&endpoint->btl_eager); i++) {
|
|
|
|
mca_bml_base_btl_t* bml_btl;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* select a btl */
|
|
|
|
bml_btl = mca_bml_base_btl_array_get_next(&endpoint->btl_eager);
|
|
|
|
rc = mca_pml_ob1_send_request_start_btl(sendreq, bml_btl);
|
2007-07-11 03:45:23 +04:00
|
|
|
if( OPAL_LIKELY(OMPI_ERR_OUT_OF_RESOURCE != rc) )
|
2006-07-20 18:44:35 +04:00
|
|
|
return rc;
|
|
|
|
}
|
2007-08-30 16:10:04 +04:00
|
|
|
add_request_to_send_pending(sendreq, MCA_PML_OB1_SEND_PENDING_START, true);
|
2006-07-20 18:44:35 +04:00
|
|
|
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2005-06-09 07:11:51 +04:00
|
|
|
/**
|
2005-07-19 01:22:55 +04:00
|
|
|
* Initiate a put scheduled by the receiver.
|
2005-06-09 07:11:51 +04:00
|
|
|
*/
|
|
|
|
|
2007-07-11 03:45:23 +04:00
|
|
|
void mca_pml_ob1_send_request_put( mca_pml_ob1_send_request_t* sendreq,
|
|
|
|
mca_btl_base_module_t* btl,
|
|
|
|
mca_pml_ob1_rdma_hdr_t* hdr );
|
2005-06-09 07:11:51 +04:00
|
|
|
|
2006-10-29 12:12:24 +03:00
|
|
|
int mca_pml_ob1_send_request_put_frag(mca_pml_ob1_rdma_frag_t* frag);
|
|
|
|
|
|
|
|
/* This function tries to continue sendreq that was stuck because of resource
|
|
|
|
* unavailability. A sendreq may be added to send_pending list if there is no
|
|
|
|
* resource to send initial packet or there is not resource to schedule data
|
|
|
|
* for sending. The reason the sendreq was added to the list is stored inside
|
|
|
|
* sendreq struct and appropriate operation is retried when resource became
|
|
|
|
* available. bml_btl passed to the function doesn't represents sendreq
|
|
|
|
* destination, it represents BTL on which resource was freed, so only this BTL
|
|
|
|
* should be considered for sending packets */
|
|
|
|
void mca_pml_ob1_send_request_process_pending(mca_bml_base_btl_t *bml_btl);
|
2006-07-20 18:44:35 +04:00
|
|
|
|
2007-07-11 02:16:38 +04:00
|
|
|
void mca_pml_ob1_send_request_copy_in_out(mca_pml_ob1_send_request_t *sendreq,
|
2007-06-03 12:30:07 +04:00
|
|
|
uint64_t send_offset, uint64_t send_length);
|
2007-07-11 02:16:38 +04:00
|
|
|
|
|
|
|
END_C_DECLS
|
|
|
|
|
|
|
|
#endif /* !defined(OMPI_PML_OB1_SEND_REQUEST_H) */
|
2005-05-24 02:06:50 +04:00
|
|
|
|