2005-11-22 20:24:47 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The University of Tennessee and The University
|
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
2006-03-28 02:44:26 +04:00
|
|
|
* Copyright (c) 2004-2006 The Regents of the University of California.
|
2005-11-22 20:24:47 +03:00
|
|
|
* All rights reserved.
|
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
*/
|
|
|
|
#ifndef OMPI_PML_DR_SEND_REQUEST_H
|
|
|
|
#define OMPI_PML_DR_SEND_REQUEST_H
|
|
|
|
|
2006-02-24 20:08:14 +03:00
|
|
|
#include "opal/util/crc.h"
|
2005-12-21 00:42:58 +03:00
|
|
|
#include "ompi_config.h"
|
|
|
|
#include "ompi/datatype/convertor.h"
|
|
|
|
#include "ompi/mca/btl/btl.h"
|
|
|
|
#include "ompi/mca/pml/base/pml_base_sendreq.h"
|
|
|
|
#include "ompi/mca/mpool/base/base.h"
|
|
|
|
#include "ompi/mca/bml/bml.h"
|
|
|
|
|
2005-11-22 20:24:47 +03:00
|
|
|
#include "pml_dr_proc.h"
|
|
|
|
#include "pml_dr_comm.h"
|
|
|
|
#include "pml_dr_hdr.h"
|
2005-12-21 00:42:58 +03:00
|
|
|
#include "pml_dr_vfrag.h"
|
2006-03-29 20:19:17 +04:00
|
|
|
#include "pml_dr_endpoint.h"
|
2006-02-24 20:08:14 +03:00
|
|
|
#include "opal/event/event.h"
|
2005-11-22 20:24:47 +03:00
|
|
|
|
|
|
|
#if defined(c_plusplus) || defined(__cplusplus)
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
|
|
|
struct mca_pml_dr_send_request_t {
|
|
|
|
mca_pml_base_send_request_t req_send;
|
2006-03-23 02:06:18 +03:00
|
|
|
mca_pml_dr_comm_proc_t* req_proc;
|
2006-03-29 20:19:17 +04:00
|
|
|
mca_pml_dr_endpoint_t* req_endpoint;
|
2005-11-22 20:24:47 +03:00
|
|
|
#if OMPI_HAVE_THREAD_SUPPORT
|
|
|
|
volatile int32_t req_state;
|
|
|
|
volatile int32_t req_lock;
|
|
|
|
#else
|
2005-12-21 00:42:58 +03:00
|
|
|
int32_t req_state;
|
|
|
|
int32_t req_lock;
|
2005-11-22 20:24:47 +03:00
|
|
|
#endif
|
|
|
|
size_t req_pipeline_depth;
|
|
|
|
size_t req_bytes_delivered;
|
|
|
|
size_t req_send_offset;
|
2005-12-21 00:42:58 +03:00
|
|
|
|
|
|
|
mca_pml_dr_vfrag_t* req_vfrag;
|
|
|
|
mca_pml_dr_vfrag_t req_vfrag0;
|
|
|
|
opal_list_t req_retrans;
|
2006-05-04 20:16:26 +04:00
|
|
|
mca_btl_base_descriptor_t* req_descriptor; /* descriptor for first frag, retransmission */
|
2006-03-23 02:06:18 +03:00
|
|
|
|
2005-11-22 20:24:47 +03:00
|
|
|
};
|
|
|
|
typedef struct mca_pml_dr_send_request_t mca_pml_dr_send_request_t;
|
|
|
|
|
|
|
|
|
|
|
|
OBJ_CLASS_DECLARATION(mca_pml_dr_send_request_t);
|
|
|
|
|
|
|
|
|
2005-12-21 00:42:58 +03:00
|
|
|
#define MCA_PML_DR_SEND_REQUEST_ALLOC( \
|
2005-11-22 20:24:47 +03:00
|
|
|
comm, \
|
|
|
|
dst, \
|
|
|
|
sendreq, \
|
|
|
|
rc) \
|
|
|
|
{ \
|
|
|
|
ompi_proc_t *proc = \
|
|
|
|
comm->c_pml_procs[dst]->proc_ompi; \
|
|
|
|
opal_list_item_t* item; \
|
|
|
|
\
|
|
|
|
if(NULL == proc) { \
|
|
|
|
rc = OMPI_ERR_OUT_OF_RESOURCE; \
|
|
|
|
} else { \
|
|
|
|
rc = OMPI_SUCCESS; \
|
2005-12-21 00:42:58 +03:00
|
|
|
OMPI_FREE_LIST_WAIT(&mca_pml_dr.send_requests, item, rc); \
|
|
|
|
sendreq = (mca_pml_dr_send_request_t*)item; \
|
2006-02-16 19:15:16 +03:00
|
|
|
sendreq->req_send.req_base.req_proc = proc; \
|
2005-11-22 20:24:47 +03:00
|
|
|
} \
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-03-17 21:46:48 +03:00
|
|
|
#define MCA_PML_DR_SEND_REQUEST_INIT( \
|
|
|
|
sendreq, \
|
|
|
|
addr, \
|
|
|
|
count, \
|
|
|
|
datatype, \
|
|
|
|
peer, \
|
|
|
|
tag, \
|
|
|
|
comm, \
|
|
|
|
sendmode, \
|
|
|
|
persistent) \
|
|
|
|
do { \
|
|
|
|
/* increment reference counts */ \
|
|
|
|
OBJ_RETAIN(comm); \
|
|
|
|
OBJ_RETAIN(datatype); \
|
|
|
|
\
|
|
|
|
OMPI_REQUEST_INIT(&(sendreq)->req_send.req_base.req_ompi, persistent); \
|
|
|
|
(sendreq)->req_send.req_addr = addr; \
|
|
|
|
(sendreq)->req_send.req_count = count; \
|
|
|
|
(sendreq)->req_send.req_datatype = datatype; \
|
|
|
|
(sendreq)->req_send.req_send_mode = sendmode; \
|
|
|
|
(sendreq)->req_send.req_base.req_addr = addr; \
|
|
|
|
(sendreq)->req_send.req_base.req_count = count; \
|
|
|
|
(sendreq)->req_send.req_base.req_datatype = datatype; \
|
|
|
|
(sendreq)->req_send.req_base.req_peer = (int32_t)peer; \
|
|
|
|
(sendreq)->req_send.req_base.req_tag = (int32_t)tag; \
|
|
|
|
(sendreq)->req_send.req_base.req_comm = comm; \
|
|
|
|
(sendreq)->req_send.req_base.req_pml_complete = (persistent ? true : false); \
|
|
|
|
(sendreq)->req_send.req_base.req_free_called = false; \
|
|
|
|
(sendreq)->req_send.req_base.req_ompi.req_status._cancelled = 0; \
|
|
|
|
\
|
|
|
|
/* initialize datatype convertor for this request */ \
|
|
|
|
if(count > 0) { \
|
|
|
|
/* We will create a convertor specialized for the */ \
|
|
|
|
/* remote architecture and prepared with the datatype. */ \
|
|
|
|
ompi_convertor_copy_and_prepare_for_send( \
|
|
|
|
(sendreq)->req_send.req_base.req_proc->proc_convertor, \
|
|
|
|
(sendreq)->req_send.req_base.req_datatype, \
|
|
|
|
(sendreq)->req_send.req_base.req_count, \
|
|
|
|
(sendreq)->req_send.req_base.req_addr, \
|
2006-04-11 01:54:46 +04:00
|
|
|
(mca_pml_dr.enable_csum ? CONVERTOR_WITH_CHECKSUM: 0), \
|
2006-03-17 21:46:48 +03:00
|
|
|
&(sendreq)->req_send.req_convertor ); \
|
|
|
|
ompi_convertor_get_packed_size(&(sendreq)->req_send.req_convertor, \
|
|
|
|
&((sendreq)->req_send.req_bytes_packed) ); \
|
|
|
|
} else { \
|
|
|
|
(sendreq)->req_send.req_bytes_packed = 0; \
|
|
|
|
} \
|
|
|
|
} while(0)
|
2005-11-22 20:24:47 +03:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Start a send request.
|
|
|
|
*/
|
2006-03-23 02:06:18 +03:00
|
|
|
|
2005-11-22 20:24:47 +03:00
|
|
|
#define MCA_PML_DR_SEND_REQUEST_START(sendreq, rc) \
|
|
|
|
do { \
|
|
|
|
mca_pml_dr_comm_t* comm = sendreq->req_send.req_base.req_comm->c_pml_comm; \
|
2006-03-29 20:19:17 +04:00
|
|
|
mca_pml_dr_endpoint_t* endpoint = \
|
|
|
|
(mca_pml_dr_endpoint_t*)sendreq->req_send.req_base.req_proc->proc_pml; \
|
|
|
|
mca_pml_dr_comm_proc_t* proc = \
|
|
|
|
comm->procs + sendreq->req_send.req_base.req_peer; \
|
2005-11-22 20:24:47 +03:00
|
|
|
mca_bml_base_btl_t* bml_btl; \
|
|
|
|
size_t size = sendreq->req_send.req_bytes_packed; \
|
2006-02-16 19:15:16 +03:00
|
|
|
size_t eager_limit; \
|
2005-11-22 20:24:47 +03:00
|
|
|
if(endpoint == NULL) { \
|
|
|
|
rc = OMPI_ERR_UNREACH; \
|
|
|
|
break; \
|
|
|
|
} \
|
|
|
|
\
|
2006-03-29 20:19:17 +04:00
|
|
|
bml_btl = mca_bml_base_btl_array_get_next(&endpoint->base.btl_eager); \
|
2006-03-24 01:08:59 +03:00
|
|
|
MCA_PML_DR_VFRAG_INIT(&sendreq->req_vfrag0); \
|
2006-03-29 20:19:17 +04:00
|
|
|
sendreq->req_vfrag0.vf_id = OPAL_THREAD_ADD32(&endpoint->vfrag_seq,1); \
|
2006-05-04 20:16:26 +04:00
|
|
|
sendreq->req_vfrag0.bml_btl = bml_btl; \
|
2006-03-24 01:08:59 +03:00
|
|
|
sendreq->req_vfrag = &sendreq->req_vfrag0; \
|
|
|
|
sendreq->req_endpoint = endpoint; \
|
|
|
|
sendreq->req_proc = proc; \
|
|
|
|
\
|
2005-11-22 20:24:47 +03:00
|
|
|
sendreq->req_lock = 0; \
|
2006-05-04 20:16:26 +04:00
|
|
|
sendreq->req_pipeline_depth = 1; \
|
2005-11-22 20:24:47 +03:00
|
|
|
sendreq->req_bytes_delivered = 0; \
|
|
|
|
sendreq->req_state = 0; \
|
|
|
|
sendreq->req_send_offset = 0; \
|
|
|
|
sendreq->req_send.req_base.req_pml_complete = false; \
|
|
|
|
sendreq->req_send.req_base.req_ompi.req_complete = false; \
|
|
|
|
sendreq->req_send.req_base.req_ompi.req_state = OMPI_REQUEST_ACTIVE; \
|
|
|
|
sendreq->req_send.req_base.req_ompi.req_status._cancelled = 0; \
|
2005-12-21 00:42:58 +03:00
|
|
|
sendreq->req_send.req_base.req_sequence = OPAL_THREAD_ADD32(&proc->send_sequence,1); \
|
2005-11-22 20:24:47 +03:00
|
|
|
\
|
|
|
|
/* select a btl */ \
|
2006-02-16 19:15:16 +03:00
|
|
|
eager_limit = bml_btl->btl_eager_limit - sizeof(mca_pml_dr_hdr_t); \
|
|
|
|
if(size <= eager_limit) { \
|
|
|
|
switch(sendreq->req_send.req_send_mode) { \
|
|
|
|
case MCA_PML_BASE_SEND_SYNCHRONOUS: \
|
|
|
|
rc = mca_pml_dr_send_request_start_rndv(sendreq, bml_btl, size, 0); \
|
|
|
|
break; \
|
|
|
|
case MCA_PML_BASE_SEND_BUFFERED: \
|
|
|
|
rc = mca_pml_dr_send_request_start_copy(sendreq, bml_btl, size); \
|
|
|
|
break; \
|
2006-03-04 03:36:16 +03:00
|
|
|
case MCA_PML_BASE_SEND_COMPLETE: \
|
|
|
|
rc = mca_pml_dr_send_request_start_prepare(sendreq, bml_btl, size); \
|
|
|
|
break; \
|
2006-02-16 19:15:16 +03:00
|
|
|
default: \
|
2006-03-04 03:36:16 +03:00
|
|
|
if (bml_btl->btl_flags & MCA_BTL_FLAGS_SEND_INPLACE) { \
|
2006-02-16 19:15:16 +03:00
|
|
|
rc = mca_pml_dr_send_request_start_prepare(sendreq, bml_btl, size); \
|
|
|
|
} else { \
|
|
|
|
rc = mca_pml_dr_send_request_start_copy(sendreq, bml_btl, size); \
|
|
|
|
} \
|
|
|
|
break; \
|
|
|
|
} \
|
|
|
|
} else { \
|
|
|
|
size = eager_limit; \
|
|
|
|
if(sendreq->req_send.req_send_mode == MCA_PML_BASE_SEND_BUFFERED) { \
|
|
|
|
rc = mca_pml_dr_send_request_start_buffered(sendreq, bml_btl, size); \
|
|
|
|
} else { \
|
|
|
|
rc = mca_pml_dr_send_request_start_rndv(sendreq, bml_btl, size, 0); \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
} while (0)
|
2005-11-22 20:24:47 +03:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark a send request as completed at the MPI level.
|
|
|
|
*/
|
|
|
|
|
2006-03-16 01:53:41 +03:00
|
|
|
#define MCA_PML_DR_SEND_REQUEST_MPI_COMPLETE(sendreq) \
|
|
|
|
do { \
|
|
|
|
(sendreq)->req_send.req_base.req_ompi.req_status.MPI_SOURCE = \
|
|
|
|
(sendreq)->req_send.req_base.req_comm->c_my_rank; \
|
|
|
|
(sendreq)->req_send.req_base.req_ompi.req_status.MPI_TAG = \
|
|
|
|
(sendreq)->req_send.req_base.req_tag; \
|
|
|
|
(sendreq)->req_send.req_base.req_ompi.req_status.MPI_ERROR = OMPI_SUCCESS; \
|
|
|
|
(sendreq)->req_send.req_base.req_ompi.req_status._count = \
|
|
|
|
(sendreq)->req_send.req_bytes_packed; \
|
|
|
|
MCA_PML_BASE_REQUEST_MPI_COMPLETE( &((sendreq)->req_send.req_base.req_ompi) ); \
|
2005-11-22 20:24:47 +03:00
|
|
|
} while(0)
|
|
|
|
|
|
|
|
/*
|
2006-03-16 01:53:41 +03:00
|
|
|
* The request fini is responsible for releasing all ressources at the PML
|
|
|
|
* level. It will never be called directly from the upper level, as it should
|
|
|
|
* only be an internal call to the PML. However, in the case when the user
|
|
|
|
* already lost the MPI reference to the request (MPI_Request_free was called)
|
|
|
|
* fini should completely free the MPI request.
|
2005-11-22 20:24:47 +03:00
|
|
|
*/
|
|
|
|
|
2006-03-16 01:53:41 +03:00
|
|
|
#define MCA_PML_DR_SEND_REQUEST_PML_COMPLETE(sendreq) \
|
|
|
|
do { \
|
|
|
|
assert( false == sendreq->req_send.req_base.req_pml_complete ); \
|
|
|
|
if (sendreq->req_send.req_send_mode == MCA_PML_BASE_SEND_BUFFERED && \
|
|
|
|
sendreq->req_send.req_addr != sendreq->req_send.req_base.req_addr) { \
|
|
|
|
mca_pml_base_bsend_request_fini((ompi_request_t*)sendreq); \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
OPAL_THREAD_LOCK(&ompi_request_lock); \
|
|
|
|
if( false == sendreq->req_send.req_base.req_ompi.req_complete ) { \
|
|
|
|
/* Should only be called for long messages (maybe synchronous) */ \
|
|
|
|
MCA_PML_DR_SEND_REQUEST_MPI_COMPLETE(sendreq); \
|
|
|
|
} \
|
|
|
|
sendreq->req_send.req_base.req_pml_complete = true; \
|
|
|
|
\
|
|
|
|
if( sendreq->req_send.req_base.req_free_called ) { \
|
|
|
|
MCA_PML_DR_SEND_REQUEST_RETURN( sendreq ); \
|
|
|
|
} else { \
|
|
|
|
if(sendreq->req_send.req_base.req_ompi.req_persistent) { \
|
|
|
|
/* rewind convertor */ \
|
|
|
|
size_t offset = 0; \
|
|
|
|
ompi_convertor_set_position(&sendreq->req_send.req_convertor, &offset); \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
OPAL_THREAD_UNLOCK(&ompi_request_lock); \
|
|
|
|
} while (0)
|
2005-11-22 20:24:47 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Release resources associated with a request
|
|
|
|
*/
|
|
|
|
|
2006-03-16 01:53:41 +03:00
|
|
|
#define MCA_PML_DR_SEND_REQUEST_RETURN(sendreq) \
|
|
|
|
do { \
|
|
|
|
/* Let the base handle the reference counts */ \
|
|
|
|
MCA_PML_BASE_SEND_REQUEST_FINI((&(sendreq)->req_send)); \
|
|
|
|
OMPI_FREE_LIST_RETURN( &mca_pml_dr.send_requests, \
|
|
|
|
(opal_list_item_t*)sendreq ); \
|
2005-12-21 00:42:58 +03:00
|
|
|
} while(0)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lookup/allocate a vfrag for the pending send
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define MCA_PML_DR_SEND_REQUEST_VFRAG_INIT(sendreq, endpoint, size, vfrag) \
|
|
|
|
do { \
|
2006-03-29 20:19:17 +04:00
|
|
|
size_t max_send_size = endpoint->base.btl_max_send_size - \
|
|
|
|
sizeof(mca_pml_dr_frag_hdr_t); \
|
2005-12-21 00:42:58 +03:00
|
|
|
size_t div = size / max_send_size; \
|
|
|
|
\
|
2006-03-24 01:08:59 +03:00
|
|
|
MCA_PML_DR_VFRAG_INIT(vfrag); \
|
2005-12-21 00:42:58 +03:00
|
|
|
if(div == 0) { \
|
|
|
|
vfrag->vf_len = 1; \
|
|
|
|
vfrag->vf_size = size; \
|
|
|
|
vfrag->vf_mask = 1; \
|
|
|
|
} else if(div > 64) { \
|
|
|
|
vfrag->vf_len = 64; \
|
|
|
|
vfrag->vf_size = (max_send_size << 6); /* size * 64 */ \
|
|
|
|
vfrag->vf_mask = ~(uint64_t)0; \
|
|
|
|
} else if (div == 64) { \
|
|
|
|
size_t mod = size % max_send_size; \
|
|
|
|
vfrag->vf_len = 64; \
|
|
|
|
vfrag->vf_size = (mod ? (size - mod) : size); \
|
|
|
|
vfrag->vf_mask = ~(uint64_t)0; \
|
|
|
|
} else { \
|
|
|
|
size_t mod = size % max_send_size; \
|
|
|
|
vfrag->vf_len = div + (mod ? 1 : 0); \
|
|
|
|
vfrag->vf_size = size; \
|
2006-03-14 21:25:25 +03:00
|
|
|
if(vfrag->vf_len == 64) \
|
|
|
|
vfrag->vf_mask = ~(uint64_t)0; \
|
|
|
|
else \
|
|
|
|
vfrag->vf_mask = (((uint64_t)1 << vfrag->vf_len) - (uint64_t)1); \
|
2005-12-21 00:42:58 +03:00
|
|
|
} \
|
2006-03-30 02:21:35 +04:00
|
|
|
vfrag->vf_id = OPAL_THREAD_ADD32(&endpoint->vfrag_seq,1); \
|
2005-12-21 00:42:58 +03:00
|
|
|
vfrag->vf_offset = sendreq->req_send_offset; \
|
|
|
|
vfrag->vf_max_send_size = max_send_size; \
|
2006-03-04 03:36:16 +03:00
|
|
|
vfrag->vf_send.pval = sendreq; \
|
2005-12-21 00:42:58 +03:00
|
|
|
sendreq->req_vfrag = vfrag; \
|
|
|
|
} while(0)
|
|
|
|
|
2006-03-24 09:49:45 +03:00
|
|
|
/*
|
|
|
|
* Reschedule unacked fragments
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define MCA_PML_DR_SEND_REQUEST_VFRAG_RETRANS(sendreq, vfrag) \
|
|
|
|
do { \
|
2006-05-04 21:30:58 +04:00
|
|
|
if(((vfrag)->vf_state & MCA_PML_DR_VFRAG_RETRANS) == 0) { \
|
|
|
|
opal_list_append(&(sendreq)->req_retrans, (opal_list_item_t*)(vfrag));\
|
|
|
|
(vfrag)->vf_state |= MCA_PML_DR_VFRAG_RETRANS; \
|
2006-05-04 20:16:26 +04:00
|
|
|
} \
|
2006-05-04 21:30:58 +04:00
|
|
|
(vfrag)->vf_state &= ~MCA_PML_DR_VFRAG_NACKED; \
|
|
|
|
(vfrag)->vf_idx = 0; \
|
2006-03-24 09:49:45 +03:00
|
|
|
} while(0)
|
|
|
|
|
2005-11-22 20:24:47 +03:00
|
|
|
/*
|
|
|
|
* Update bytes delivered on request based on supplied descriptor
|
|
|
|
*/
|
|
|
|
|
2006-03-24 09:49:45 +03:00
|
|
|
#define MCA_PML_DR_SEND_REQUEST_SET_BYTES_DELIVERED(sendreq, vfrag, hdrlen) \
|
|
|
|
do { \
|
|
|
|
sendreq->req_bytes_delivered += vfrag->vf_size; \
|
2005-11-22 20:24:47 +03:00
|
|
|
} while(0)
|
|
|
|
/*
|
|
|
|
* Attempt to process any pending requests
|
|
|
|
*/
|
|
|
|
|
2005-12-21 00:42:58 +03:00
|
|
|
#define MCA_PML_DR_SEND_REQUEST_PROCESS_PENDING() \
|
2005-11-22 20:24:47 +03:00
|
|
|
do { \
|
|
|
|
/* advance pending requests */ \
|
2005-12-21 00:42:58 +03:00
|
|
|
while(opal_list_get_size(&mca_pml_dr.send_pending)) { \
|
|
|
|
mca_pml_dr_send_request_t* sendreq; \
|
2006-03-23 18:11:06 +03:00
|
|
|
OPAL_THREAD_LOCK(&ompi_request_lock); \
|
2005-12-21 00:42:58 +03:00
|
|
|
sendreq = (mca_pml_dr_send_request_t*) \
|
|
|
|
opal_list_remove_first(&mca_pml_dr.send_pending); \
|
2006-03-23 18:11:06 +03:00
|
|
|
OPAL_THREAD_UNLOCK(&ompi_request_lock); \
|
2005-11-22 20:24:47 +03:00
|
|
|
if(NULL == sendreq) \
|
|
|
|
break; \
|
2005-12-21 00:42:58 +03:00
|
|
|
mca_pml_dr_send_request_schedule(sendreq); \
|
2005-11-22 20:24:47 +03:00
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
|
2006-03-21 00:57:30 +03:00
|
|
|
/*
|
|
|
|
* Requeue first fragment of message for retransmission
|
|
|
|
*/
|
|
|
|
|
2006-03-22 00:23:33 +03:00
|
|
|
#define MCA_PML_DR_SEND_REQUEST_EAGER_RETRY(sendreq, vfrag) \
|
2006-03-21 00:57:30 +03:00
|
|
|
do { \
|
2006-03-22 00:23:33 +03:00
|
|
|
mca_btl_base_descriptor_t *des_old, *des_new; \
|
2006-03-24 01:08:59 +03:00
|
|
|
OPAL_THREAD_ADD64(&vfrag->vf_pending,1); \
|
2006-05-04 21:30:58 +04:00
|
|
|
OPAL_OUTPUT((0, "%s:%d:%s: retransmitting eager\n", __FILE__, __LINE__, __func__)); \
|
2006-05-04 20:16:26 +04:00
|
|
|
assert(sendreq->req_descriptor->des_src != NULL); \
|
|
|
|
\
|
|
|
|
OPAL_THREAD_ADD32(&sendreq->req_pipeline_depth,1); \
|
|
|
|
OPAL_THREAD_ADD64(&(vfrag)->vf_pending,1); \
|
|
|
|
(vfrag)->vf_state &= ~MCA_PML_DR_VFRAG_NACKED; \
|
|
|
|
\
|
|
|
|
des_old = sendreq->req_descriptor; \
|
|
|
|
mca_bml_base_alloc(vfrag->bml_btl, &des_new, des_old->des_src->seg_len);\
|
|
|
|
sendreq->req_descriptor = des_new; \
|
2006-03-24 01:08:59 +03:00
|
|
|
memcpy(des_new->des_src->seg_addr.pval, \
|
|
|
|
des_old->des_src->seg_addr.pval, \
|
2006-03-22 00:23:33 +03:00
|
|
|
des_old->des_src->seg_len); \
|
|
|
|
des_new->des_flags = des_old->des_flags; \
|
|
|
|
des_new->des_cbdata = des_old->des_cbdata; \
|
|
|
|
des_new->des_cbfunc = des_old->des_cbfunc; \
|
2006-05-04 20:16:26 +04:00
|
|
|
mca_bml_base_send(vfrag->bml_btl, des_new, MCA_BTL_TAG_PML); \
|
2006-03-22 00:23:33 +03:00
|
|
|
} while(0)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Requeue first fragment of message for retransmission
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define MCA_PML_DR_SEND_REQUEST_RNDV_PROBE(sendreq, vfrag) \
|
|
|
|
do { \
|
2006-03-29 20:19:17 +04:00
|
|
|
mca_pml_dr_endpoint_t* endpoint = sendreq->req_endpoint; \
|
|
|
|
mca_bml_base_btl_t* bml_btl = \
|
|
|
|
mca_bml_base_btl_array_get_next(&endpoint->base.btl_eager); \
|
2006-03-22 00:23:33 +03:00
|
|
|
mca_btl_base_descriptor_t *des_old, *des_new; \
|
|
|
|
mca_pml_dr_hdr_t *hdr; \
|
2006-03-24 01:08:59 +03:00
|
|
|
\
|
2006-05-04 21:30:58 +04:00
|
|
|
opal_output(0, "%s:%d:%s: (re)transmitting rndv probe\n", __FILE__, __LINE__, __func__); \
|
2006-05-04 20:16:26 +04:00
|
|
|
OPAL_THREAD_ADD32(&sendreq->req_pipeline_depth,1); \
|
2006-03-24 01:08:59 +03:00
|
|
|
OPAL_THREAD_ADD64(&vfrag->vf_pending,1); \
|
2006-05-04 20:16:26 +04:00
|
|
|
(vfrag)->vf_state &= ~MCA_PML_DR_VFRAG_NACKED; \
|
2006-03-21 01:11:23 +03:00
|
|
|
\
|
2006-05-04 20:16:26 +04:00
|
|
|
assert(sendreq->req_descriptor->des_src != NULL); \
|
2006-03-22 00:23:33 +03:00
|
|
|
mca_bml_base_alloc(bml_btl, &des_new, \
|
|
|
|
sizeof(mca_pml_dr_rendezvous_hdr_t)); \
|
2006-05-04 20:16:26 +04:00
|
|
|
des_old = sendreq->req_descriptor; \
|
2006-03-22 00:23:33 +03:00
|
|
|
/* build hdr */ \
|
|
|
|
hdr = (mca_pml_dr_hdr_t*)des_new->des_src->seg_addr.pval; \
|
|
|
|
hdr->hdr_common.hdr_flags = 0; \
|
|
|
|
hdr->hdr_common.hdr_type = MCA_PML_DR_HDR_TYPE_RNDV; \
|
2006-03-29 20:19:17 +04:00
|
|
|
hdr->hdr_common.hdr_dst = endpoint->dst; \
|
2006-03-22 00:23:33 +03:00
|
|
|
hdr->hdr_common.hdr_ctx = sendreq->req_send.req_base.req_comm->c_contextid; \
|
2006-03-29 20:19:17 +04:00
|
|
|
hdr->hdr_common.hdr_src = endpoint->src; \
|
2006-03-22 00:23:33 +03:00
|
|
|
hdr->hdr_match.hdr_tag = sendreq->req_send.req_base.req_tag; \
|
|
|
|
hdr->hdr_match.hdr_seq = sendreq->req_send.req_base.req_sequence; \
|
|
|
|
hdr->hdr_match.hdr_src_ptr.pval = &sendreq->req_vfrag0; \
|
|
|
|
hdr->hdr_match.hdr_csum = OPAL_CSUM_ZERO; \
|
|
|
|
hdr->hdr_common.hdr_vid = sendreq->req_vfrag0.vf_id; \
|
|
|
|
hdr->hdr_rndv.hdr_msg_length = sendreq->req_send.req_bytes_packed; \
|
2006-04-11 01:54:46 +04:00
|
|
|
hdr->hdr_common.hdr_csum = (mca_pml_dr.enable_csum ? \
|
|
|
|
opal_csum(hdr, sizeof(mca_pml_dr_rendezvous_hdr_t)): OPAL_CSUM_ZERO); \
|
2006-03-22 18:02:36 +03:00
|
|
|
des_new->des_flags = des_old->des_flags; \
|
|
|
|
des_new->des_cbdata = des_old->des_cbdata; \
|
|
|
|
des_new->des_cbfunc = des_old->des_cbfunc; \
|
|
|
|
mca_bml_base_send(bml_btl, des_new, MCA_BTL_TAG_PML); \
|
2006-03-21 00:57:30 +03:00
|
|
|
} while(0)
|
2006-03-22 00:23:33 +03:00
|
|
|
|
2005-11-22 20:24:47 +03:00
|
|
|
/**
|
|
|
|
* Start the specified request
|
|
|
|
*/
|
|
|
|
|
|
|
|
int mca_pml_dr_send_request_start_buffered(
|
|
|
|
mca_pml_dr_send_request_t* sendreq,
|
|
|
|
mca_bml_base_btl_t* bml_btl,
|
|
|
|
size_t size);
|
|
|
|
|
|
|
|
int mca_pml_dr_send_request_start_copy(
|
|
|
|
mca_pml_dr_send_request_t* sendreq,
|
|
|
|
mca_bml_base_btl_t* bml_btl,
|
|
|
|
size_t size);
|
|
|
|
|
|
|
|
int mca_pml_dr_send_request_start_prepare(
|
|
|
|
mca_pml_dr_send_request_t* sendreq,
|
|
|
|
mca_bml_base_btl_t* bml_btl,
|
|
|
|
size_t size);
|
|
|
|
|
|
|
|
int mca_pml_dr_send_request_start_rndv(
|
|
|
|
mca_pml_dr_send_request_t* sendreq,
|
|
|
|
mca_bml_base_btl_t* bml_btl,
|
|
|
|
size_t size,
|
|
|
|
int flags);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Schedule additional fragments
|
|
|
|
*/
|
|
|
|
int mca_pml_dr_send_request_schedule(
|
|
|
|
mca_pml_dr_send_request_t* sendreq);
|
|
|
|
|
2005-12-21 00:42:58 +03:00
|
|
|
int mca_pml_dr_send_request_reschedule(
|
|
|
|
mca_pml_dr_send_request_t* sendreq,
|
|
|
|
mca_pml_dr_vfrag_t* vfrag);
|
|
|
|
|
|
|
|
/**
|
2006-03-04 03:36:16 +03:00
|
|
|
* Acknowledgment of vfrags
|
2005-12-21 00:42:58 +03:00
|
|
|
*/
|
2006-03-04 03:36:16 +03:00
|
|
|
void mca_pml_dr_send_request_match_ack(
|
|
|
|
mca_btl_base_module_t* btl,
|
2005-12-21 00:42:58 +03:00
|
|
|
mca_pml_dr_ack_hdr_t*);
|
|
|
|
|
2006-03-04 03:36:16 +03:00
|
|
|
void mca_pml_dr_send_request_rndv_ack(
|
|
|
|
mca_btl_base_module_t* btl,
|
2005-12-21 00:42:58 +03:00
|
|
|
mca_pml_dr_ack_hdr_t*);
|
|
|
|
|
2006-03-04 03:36:16 +03:00
|
|
|
void mca_pml_dr_send_request_frag_ack(
|
|
|
|
mca_btl_base_module_t* btl,
|
|
|
|
mca_pml_dr_ack_hdr_t*);
|
2005-11-22 20:24:47 +03:00
|
|
|
|
|
|
|
|
|
|
|
#if defined(c_plusplus) || defined(__cplusplus)
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|