2005-05-09 23:37:10 +04:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University.
|
|
|
|
* All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
|
|
|
|
* All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
*/
|
|
|
|
#ifndef OMPI_PML_TEG_SEND_REQUEST_H
|
|
|
|
#define OMPI_PML_TEG_SEND_REQUEST_H
|
|
|
|
|
|
|
|
#include "mca/ptl/ptl.h"
|
|
|
|
#include "mca/ptl/base/ptl_base_sendreq.h"
|
|
|
|
#include "mca/ptl/base/ptl_base_sendfrag.h"
|
|
|
|
#include "mca/ptl/base/ptl_base_comm.h"
|
|
|
|
#include "pml_teg_proc.h"
|
|
|
|
#include "pml_teg_ptl.h"
|
|
|
|
|
|
|
|
#if defined(c_plusplus) || defined(__cplusplus)
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
typedef mca_ptl_base_send_request_t mca_pml_teg_send_request_t;
|
|
|
|
OBJ_CLASS_DECLARATION(mca_pml_teg_send_request_t);
|
|
|
|
|
|
|
|
|
|
|
|
#define MCA_PML_TEG_SEND_REQUEST_ALLOC( \
|
|
|
|
comm, \
|
|
|
|
dst, \
|
|
|
|
sendreq, \
|
|
|
|
rc) \
|
|
|
|
{ \
|
|
|
|
mca_pml_proc_t *proc = mca_pml_teg_proc_lookup_remote(comm,dst); \
|
|
|
|
mca_ptl_proc_t* ptl_proc; \
|
|
|
|
mca_pml_base_ptl_t* ptl_base; \
|
|
|
|
\
|
|
|
|
if(NULL == proc) { \
|
|
|
|
return OMPI_ERR_OUT_OF_RESOURCE; \
|
|
|
|
} \
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_SCOPED_LOCK(&proc->proc_lock, \
|
2005-05-09 23:37:10 +04:00
|
|
|
(ptl_proc = mca_ptl_array_get_next(&proc->proc_ptl_first))); \
|
|
|
|
ptl_base = ptl_proc->ptl_base; \
|
|
|
|
/* \
|
|
|
|
* check to see if there is a cache of send requests associated with \
|
|
|
|
* this ptl - if so try the allocation from there. \
|
|
|
|
*/ \
|
|
|
|
if(NULL != ptl_base) { \
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_LOCK(&ptl_base->ptl_cache_lock); \
|
2005-05-09 23:37:10 +04:00
|
|
|
sendreq = (mca_pml_teg_send_request_t*) \
|
2005-07-03 20:22:16 +04:00
|
|
|
opal_list_remove_first(&ptl_base->ptl_cache); \
|
2005-05-09 23:37:10 +04:00
|
|
|
if(NULL != sendreq) { \
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ptl_base->ptl_cache_lock); \
|
2005-05-09 23:37:10 +04:00
|
|
|
rc = OMPI_SUCCESS; \
|
|
|
|
} else if (ptl_base->ptl_cache_alloc < ptl_base->ptl_cache_size) { \
|
|
|
|
/* \
|
|
|
|
* allocate an additional request to the cache \
|
|
|
|
*/ \
|
|
|
|
mca_ptl_base_module_t* ptl = ptl_base->ptl; \
|
2005-07-03 20:22:16 +04:00
|
|
|
opal_list_item_t* item; \
|
2005-05-09 23:37:10 +04:00
|
|
|
OMPI_FREE_LIST_WAIT(&mca_pml_teg.teg_send_requests, item, rc); \
|
|
|
|
sendreq = (mca_pml_teg_send_request_t*)item; \
|
|
|
|
sendreq->req_ptl = ptl; \
|
|
|
|
if(ptl->ptl_request_init(ptl, sendreq) == OMPI_SUCCESS) { \
|
|
|
|
sendreq->req_cached = true; \
|
|
|
|
ptl_base->ptl_cache_alloc++; \
|
|
|
|
} \
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ptl_base->ptl_cache_lock); \
|
2005-05-09 23:37:10 +04:00
|
|
|
} else { \
|
|
|
|
/* \
|
|
|
|
* take a request from the global pool \
|
|
|
|
*/ \
|
2005-07-03 20:22:16 +04:00
|
|
|
opal_list_item_t* item; \
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ptl_base->ptl_cache_lock); \
|
2005-05-09 23:37:10 +04:00
|
|
|
OMPI_FREE_LIST_WAIT(&mca_pml_teg.teg_send_requests, item, rc); \
|
|
|
|
sendreq = (mca_pml_teg_send_request_t*)item; \
|
|
|
|
sendreq->req_ptl = ptl_proc->ptl; \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
/* otherwise - take the allocation from the global list */ \
|
|
|
|
} else { \
|
2005-07-03 20:22:16 +04:00
|
|
|
opal_list_item_t* item; \
|
2005-05-09 23:37:10 +04:00
|
|
|
OMPI_FREE_LIST_WAIT(&mca_pml_teg.teg_send_requests, item, rc); \
|
|
|
|
sendreq = (mca_pml_teg_send_request_t*)item; \
|
|
|
|
sendreq->req_ptl = ptl_proc->ptl; \
|
|
|
|
} \
|
|
|
|
/* update request to point to current peer */ \
|
|
|
|
sendreq->req_peer = ptl_proc->ptl_peer; \
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#define MCA_PML_TEG_SEND_REQUEST_INIT( request, \
|
|
|
|
addr, \
|
|
|
|
count, \
|
|
|
|
datatype, \
|
|
|
|
peer, \
|
|
|
|
tag, \
|
|
|
|
comm, \
|
|
|
|
mode, \
|
|
|
|
persistent) \
|
|
|
|
{ \
|
|
|
|
MCA_PML_BASE_SEND_REQUEST_INIT((&request->req_send), \
|
|
|
|
addr, \
|
|
|
|
count, \
|
|
|
|
datatype, \
|
|
|
|
peer, \
|
|
|
|
tag, \
|
|
|
|
comm, \
|
|
|
|
mode, \
|
|
|
|
persistent \
|
|
|
|
); \
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#define MCA_PML_TEG_SEND_REQUEST_RETURN(sendreq) \
|
|
|
|
{ \
|
|
|
|
mca_ptl_base_module_t* ptl = (sendreq)->req_ptl; \
|
|
|
|
mca_pml_base_ptl_t* ptl_base = ptl->ptl_base; \
|
|
|
|
\
|
|
|
|
/* Let the base handle the reference counts */ \
|
2005-05-24 02:06:50 +04:00
|
|
|
MCA_PML_BASE_SEND_REQUEST_FINI((&sendreq->req_send)); \
|
2005-05-09 23:37:10 +04:00
|
|
|
\
|
|
|
|
/* \
|
|
|
|
* If there is a cache associated with the ptl - first attempt \
|
|
|
|
* to return the send descriptor to the cache. \
|
|
|
|
*/ \
|
|
|
|
if(NULL != ptl->ptl_base && (sendreq)->req_cached) { \
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_LOCK(&ptl_base->ptl_cache_lock); \
|
2005-07-03 20:22:16 +04:00
|
|
|
opal_list_prepend(&ptl_base->ptl_cache, \
|
|
|
|
(opal_list_item_t*)sendreq); \
|
2005-07-04 02:45:48 +04:00
|
|
|
OPAL_THREAD_UNLOCK(&ptl_base->ptl_cache_lock); \
|
2005-05-09 23:37:10 +04:00
|
|
|
} else { \
|
|
|
|
OMPI_FREE_LIST_RETURN( \
|
2005-07-03 20:22:16 +04:00
|
|
|
&mca_pml_teg.teg_send_requests, (opal_list_item_t*)sendreq); \
|
2005-05-09 23:37:10 +04:00
|
|
|
} \
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Start a send request.
|
|
|
|
*/
|
|
|
|
#define MCA_PML_TEG_SEND_REQUEST_START(req, rc) \
|
|
|
|
{ \
|
|
|
|
mca_ptl_base_module_t* ptl = req->req_ptl; \
|
|
|
|
size_t first_fragment_size = ptl->ptl_first_frag_size; \
|
|
|
|
int flags; \
|
|
|
|
\
|
|
|
|
req->req_lock = 0; \
|
|
|
|
req->req_bytes_sent = 0; \
|
|
|
|
req->req_peer_match.lval = 0; \
|
|
|
|
req->req_peer_addr.lval = 0; \
|
|
|
|
req->req_peer_size = 0; \
|
|
|
|
req->req_offset = 0; \
|
|
|
|
req->req_send.req_base.req_pml_complete = false; \
|
|
|
|
req->req_send.req_base.req_ompi.req_complete = false; \
|
|
|
|
req->req_send.req_base.req_ompi.req_state = OMPI_REQUEST_ACTIVE; \
|
|
|
|
req->req_send.req_base.req_sequence = mca_pml_ptl_comm_send_sequence( \
|
|
|
|
req->req_send.req_base.req_comm->c_pml_comm, req->req_send.req_base.req_peer); \
|
|
|
|
\
|
|
|
|
/* handle buffered send */ \
|
|
|
|
if(req->req_send.req_send_mode == MCA_PML_BASE_SEND_BUFFERED) { \
|
|
|
|
mca_pml_base_bsend_request_start(&req->req_send.req_base.req_ompi); \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
/* start the first fragment */ \
|
|
|
|
if (first_fragment_size == 0 || \
|
|
|
|
req->req_send.req_bytes_packed <= first_fragment_size) { \
|
|
|
|
first_fragment_size = req->req_send.req_bytes_packed; \
|
|
|
|
flags = (req->req_send.req_send_mode == MCA_PML_BASE_SEND_SYNCHRONOUS) ? \
|
|
|
|
MCA_PTL_FLAGS_ACK : 0; \
|
|
|
|
} else { \
|
|
|
|
/* require match for first fragment of a multi-fragment */ \
|
|
|
|
flags = MCA_PTL_FLAGS_ACK; \
|
|
|
|
} \
|
|
|
|
rc = ptl->ptl_send(ptl, req->req_peer, req, 0, first_fragment_size, \
|
|
|
|
flags); \
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Schedule any data that was not delivered in the first fragment
|
|
|
|
* across the available PTLs.
|
|
|
|
*/
|
|
|
|
int mca_pml_teg_send_request_schedule(mca_ptl_base_send_request_t* req);
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Update the request to reflect the number of bytes delivered. If this
|
|
|
|
* was the first fragment - schedule the rest of the data.
|
|
|
|
*/
|
|
|
|
void mca_pml_teg_send_request_progress(
|
|
|
|
struct mca_ptl_base_module_t* ptl,
|
|
|
|
mca_ptl_base_send_request_t* send_request,
|
|
|
|
size_t bytes_sent
|
|
|
|
);
|
|
|
|
|
|
|
|
#if defined(c_plusplus) || defined(__cplusplus)
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|