1
1

Merge pull request #602 from jithinjosepkl/pr/pml-cm-opt

Optimizations to PML-CM
Этот коммит содержится в:
Jeff Squyres 2015-06-02 13:47:10 -05:00
родитель 8bb00824b6 5ba5a9ade2
Коммит a55eb5e2c6
12 изменённых файлов: 653 добавлений и 578 удалений

Просмотреть файл

@ -38,6 +38,17 @@ ompi_mtl_datatype_pack(struct opal_convertor_t *convertor,
struct iovec iov;
uint32_t iov_count = 1;
#if !(OPAL_ENABLE_HETEROGENEOUS_SUPPORT)
if (convertor->pDesc &&
opal_datatype_is_contiguous_memory_layout(convertor->pDesc,
convertor->count)) {
*freeAfter = false;
*buffer = convertor->pBaseBuf;
*buffer_len = convertor->local_size;
return OPAL_SUCCESS;
}
#endif
opal_convertor_get_packed_size(convertor, buffer_len);
*freeAfter = false;
if( 0 == *buffer_len ) {

Просмотреть файл

@ -39,7 +39,7 @@
#include "ompi_config.h"
#include "mpi.h" /* needed for MPI_ANY_TAG */
#include "ompi/mca/mca.h"
#include "ompi/mca/pml/pml.h" /* for send_mode enum */
#include "ompi/mca/pml/pml_constants.h" /* for send_mode enum */
#include "ompi/request/request.h"
BEGIN_C_DECLS
@ -425,22 +425,24 @@ typedef struct mca_mtl_base_module_t mca_mtl_base_module_t;
#define MCA_MTL_BASE_VERSION_2_0_0 \
OMPI_MCA_BASE_VERSION_2_1_0("mtl", 2, 0, 0)
OMPI_DECLSPEC extern mca_mtl_base_module_t *ompi_mtl;
/*
* macro for doing direct call / call through struct
*/
#if MCA_ompi_mtl_DIRECT_CALL
#include MCA_ompi_mtl_DIRECT_CALL_HEADER
#define OMPI_MTL_CALL_STAMP(a, b) ompi_mtl_ ## a ## _ ## b
#define OMPI_MTL_CALL_EXPANDER(a, b) OMPI_MTL_CALL_STAMP(a,b)
#define OMPI_MTL_CALL(a) OMPI_MTL_CALL_EXPANDER(MCA_ompi_mtl_DIRECT_CALL_COMPONENT, a)
#include MCA_ompi_mtl_DIRECT_CALL_HEADER
#else
#define OMPI_MTL_CALL(a) ompi_mtl->mtl_ ## a
#endif
OMPI_DECLSPEC extern mca_mtl_base_module_t *ompi_mtl;
END_C_DECLS
#endif

Просмотреть файл

@ -57,6 +57,17 @@ static inline __opal_attribute_always_inline__ int
size_t *buffer_len = &mxm_send_req->base.data.buffer.length;
#if !(OPAL_ENABLE_HETEROGENEOUS_SUPPORT)
if (convertor->pDesc &&
opal_datatype_is_contiguous_memory_layout(convertor->pDesc,
convertor->count)) {
mxm_send_req->base.data.buffer.ptr = convertor->pBaseBuf;
mxm_send_req->base.data.buffer.length = convertor->local_size;
mxm_send_req->base.data_type = MXM_REQ_DATA_BUFFER;
return OMPI_SUCCESS;
}
#endif
opal_convertor_get_packed_size(convertor, buffer_len);
if (0 == *buffer_len) {
mxm_send_req->base.data.buffer.ptr = NULL;

Просмотреть файл

@ -31,13 +31,10 @@ local_sources = \
pml_cm_cancel.c \
pml_cm_component.c \
pml_cm_component.h \
pml_cm_probe.c \
pml_cm_recv.c \
pml_cm_recvreq.h \
pml_cm_recvreq.c \
pml_cm_request.h \
pml_cm_request.c \
pml_cm_send.c \
pml_cm_sendreq.h \
pml_cm_sendreq.c \
pml_cm_start.c

Просмотреть файл

@ -23,6 +23,15 @@
#include "ompi/request/request.h"
#include "ompi/mca/mtl/mtl.h"
#include "pml_cm_request.h"
#include "ompi/mca/pml/base/pml_base_recvreq.h"
#include "ompi/mca/mtl/mtl.h"
#include "pml_cm_recvreq.h"
#include "pml_cm_sendreq.h"
#include "ompi/message/message.h"
BEGIN_C_DECLS
struct mca_mtl_request_t;
@ -54,91 +63,454 @@ OMPI_DECLSPEC extern int mca_pml_cm_progress(void);
OMPI_DECLSPEC extern int mca_pml_cm_add_comm(struct ompi_communicator_t* comm);
OMPI_DECLSPEC extern int mca_pml_cm_del_comm(struct ompi_communicator_t* comm);
OMPI_DECLSPEC extern int mca_pml_cm_irecv_init(void *buf,
size_t count,
ompi_datatype_t *datatype,
int src,
int tag,
struct ompi_communicator_t* comm,
struct ompi_request_t **request);
OMPI_DECLSPEC extern int mca_pml_cm_irecv(void *buf,
size_t count,
ompi_datatype_t *datatype,
int src,
int tag,
struct ompi_communicator_t* comm,
struct ompi_request_t **request);
__opal_attribute_always_inline__ static inline int
mca_pml_cm_irecv_init(void *addr,
size_t count,
ompi_datatype_t * datatype,
int src,
int tag,
struct ompi_communicator_t *comm,
struct ompi_request_t **request)
{
mca_pml_cm_hvy_recv_request_t *recvreq;
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
ompi_proc_t* ompi_proc;
#endif
MCA_PML_CM_HVY_RECV_REQUEST_ALLOC(recvreq);
if( OPAL_UNLIKELY(NULL == recvreq) ) return OMPI_ERR_OUT_OF_RESOURCE;
MCA_PML_CM_HVY_RECV_REQUEST_INIT(recvreq, ompi_proc, comm, tag, src,
datatype, addr, count, true);
*request = (ompi_request_t*) recvreq;
OMPI_DECLSPEC extern int mca_pml_cm_recv(void *buf,
size_t count,
ompi_datatype_t *datatype,
int src,
int tag,
struct ompi_communicator_t* comm,
ompi_status_public_t* status );
return OMPI_SUCCESS;
}
OMPI_DECLSPEC extern int mca_pml_cm_isend_init(void *buf,
size_t count,
ompi_datatype_t *datatype,
int dst,
int tag,
mca_pml_base_send_mode_t mode,
struct ompi_communicator_t* comm,
struct ompi_request_t **request);
__opal_attribute_always_inline__ static inline int
mca_pml_cm_irecv(void *addr,
size_t count,
ompi_datatype_t * datatype,
int src,
int tag,
struct ompi_communicator_t *comm,
struct ompi_request_t **request)
{
int ret;
mca_pml_cm_thin_recv_request_t *recvreq;
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
ompi_proc_t* ompi_proc = NULL;
#endif
OMPI_DECLSPEC extern int mca_pml_cm_isend(void *buf,
size_t count,
ompi_datatype_t *datatype,
int dst,
int tag,
mca_pml_base_send_mode_t mode,
struct ompi_communicator_t* comm,
struct ompi_request_t **request);
MCA_PML_CM_THIN_RECV_REQUEST_ALLOC(recvreq);
if( OPAL_UNLIKELY(NULL == recvreq) ) return OMPI_ERR_OUT_OF_RESOURCE;
MCA_PML_CM_THIN_RECV_REQUEST_INIT(recvreq,
ompi_proc,
comm,
src,
datatype,
addr,
count);
MCA_PML_CM_THIN_RECV_REQUEST_START(recvreq, comm, tag, src, ret);
OMPI_DECLSPEC extern int mca_pml_cm_send(void *buf,
size_t count,
ompi_datatype_t *datatype,
int dst,
int tag,
mca_pml_base_send_mode_t mode,
struct ompi_communicator_t* comm);
if( OPAL_LIKELY(OMPI_SUCCESS == ret) ) *request = (ompi_request_t*) recvreq;
OMPI_DECLSPEC extern int mca_pml_cm_iprobe(int dst,
int tag,
struct ompi_communicator_t* comm,
int *matched,
ompi_status_public_t* status);
return ret;
}
OMPI_DECLSPEC extern int mca_pml_cm_probe(int dst,
int tag,
struct ompi_communicator_t* comm,
ompi_status_public_t* status);
__opal_attribute_always_inline__ static inline void
mca_pml_cm_recv_fast_completion(struct mca_mtl_request_t *mtl_request)
{
// Do nothing!
ompi_request_complete(mtl_request->ompi_req, true);
return;
}
OMPI_DECLSPEC extern int mca_pml_cm_improbe(int dst,
int tag,
struct ompi_communicator_t* comm,
int *matched,
struct ompi_message_t **message,
ompi_status_public_t* status);
__opal_attribute_always_inline__ static inline int
mca_pml_cm_recv(void *addr,
size_t count,
ompi_datatype_t * datatype,
int src,
int tag,
struct ompi_communicator_t *comm,
ompi_status_public_t * status)
{
int ret;
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
ompi_proc_t *ompi_proc;
#endif
opal_convertor_t convertor;
mca_pml_cm_request_t req;
mca_mtl_request_t *req_mtl =
alloca(sizeof(mca_mtl_request_t) + ompi_mtl->mtl_request_size);
OMPI_DECLSPEC extern int mca_pml_cm_mprobe(int dst,
int tag,
struct ompi_communicator_t* comm,
struct ompi_message_t **message,
ompi_status_public_t* status);
req_mtl->ompi_req = &req.req_ompi;
req_mtl->completion_callback = mca_pml_cm_recv_fast_completion;
OMPI_DECLSPEC extern int mca_pml_cm_imrecv(void *buf,
size_t count,
ompi_datatype_t *datatype,
struct ompi_message_t **message,
struct ompi_request_t **request);
req.req_pml_type = MCA_PML_CM_REQUEST_RECV_THIN;
req.req_free_called = false;
req.req_ompi.req_complete = false;
req.req_ompi.req_complete_cb = NULL;
req.req_ompi.req_state = OMPI_REQUEST_ACTIVE;
req.req_ompi.req_status.MPI_TAG = OMPI_ANY_TAG;
req.req_ompi.req_status.MPI_ERROR = OMPI_SUCCESS;
req.req_ompi.req_status._cancelled = 0;
OMPI_DECLSPEC extern int mca_pml_cm_mrecv(void *buf,
size_t count,
ompi_datatype_t *datatype,
struct ompi_message_t **message,
ompi_status_public_t* status);
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
if( MPI_ANY_SOURCE == src ) {
ompi_proc = ompi_proc_local_proc;
} else {
ompi_proc = ompi_comm_peer_lookup( comm, src );
}
opal_convertor_copy_and_prepare_for_recv(
ompi_proc->super.proc_convertor,
&(datatype->super),
count,
addr,
0,
&convertor );
#else
opal_convertor_copy_and_prepare_for_recv(
ompi_mpi_local_convertor,
&(datatype->super),
count,
addr,
0,
&convertor );
#endif
ret = OMPI_MTL_CALL(irecv(ompi_mtl,
comm,
src,
tag,
&convertor,
req_mtl));
if( OPAL_UNLIKELY(OMPI_SUCCESS != ret) ) {
return ret;
}
ompi_request_wait_completion(&req.req_ompi);
if (NULL != status) { /* return status */
*status = req.req_ompi.req_status;
}
ret = req.req_ompi.req_status.MPI_ERROR;
return ret;
}
__opal_attribute_always_inline__ static inline int
mca_pml_cm_isend_init(void* buf,
size_t count,
ompi_datatype_t* datatype,
int dst,
int tag,
mca_pml_base_send_mode_t sendmode,
ompi_communicator_t* comm,
ompi_request_t** request)
{
mca_pml_cm_hvy_send_request_t *sendreq;
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
ompi_proc_t* ompi_proc;
#endif
MCA_PML_CM_HVY_SEND_REQUEST_ALLOC(sendreq, comm, dst, ompi_proc);
if (OPAL_UNLIKELY(NULL == sendreq)) return OMPI_ERR_OUT_OF_RESOURCE;
MCA_PML_CM_HVY_SEND_REQUEST_INIT(sendreq, ompi_proc, comm, tag, dst,
datatype, sendmode, true, false, buf, count);
*request = (ompi_request_t*) sendreq;
return OMPI_SUCCESS;
}
__opal_attribute_always_inline__ static inline int
mca_pml_cm_isend(void* buf,
size_t count,
ompi_datatype_t* datatype,
int dst,
int tag,
mca_pml_base_send_mode_t sendmode,
ompi_communicator_t* comm,
ompi_request_t** request)
{
int ret;
if(sendmode == MCA_PML_BASE_SEND_BUFFERED ) {
mca_pml_cm_hvy_send_request_t* sendreq;
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
ompi_proc_t* ompi_proc = NULL;
#endif
MCA_PML_CM_HVY_SEND_REQUEST_ALLOC(sendreq, comm, dst, ompi_proc);
if (OPAL_UNLIKELY(NULL == sendreq)) return OMPI_ERR_OUT_OF_RESOURCE;
MCA_PML_CM_HVY_SEND_REQUEST_INIT(sendreq,
ompi_proc,
comm,
tag,
dst,
datatype,
sendmode,
false,
false,
buf,
count);
MCA_PML_CM_HVY_SEND_REQUEST_START( sendreq, ret);
if (OPAL_LIKELY(OMPI_SUCCESS == ret)) *request = (ompi_request_t*) sendreq;
} else {
mca_pml_cm_thin_send_request_t* sendreq;
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
ompi_proc_t* ompi_proc = NULL;
#endif
MCA_PML_CM_THIN_SEND_REQUEST_ALLOC(sendreq, comm, dst, ompi_proc);
if (OPAL_UNLIKELY(NULL == sendreq)) return OMPI_ERR_OUT_OF_RESOURCE;
MCA_PML_CM_THIN_SEND_REQUEST_INIT(sendreq,
ompi_proc,
comm,
tag,
dst,
datatype,
sendmode,
buf,
count);
MCA_PML_CM_THIN_SEND_REQUEST_START(
sendreq,
comm,
tag,
dst,
sendmode,
false,
ret);
if (OPAL_LIKELY(OMPI_SUCCESS == ret)) *request = (ompi_request_t*) sendreq;
}
return ret;
}
__opal_attribute_always_inline__ static inline int
mca_pml_cm_send(void *buf,
size_t count,
ompi_datatype_t* datatype,
int dst,
int tag,
mca_pml_base_send_mode_t sendmode,
ompi_communicator_t* comm)
{
int ret = OMPI_ERROR;
ompi_proc_t * ompi_proc;
if(sendmode == MCA_PML_BASE_SEND_BUFFERED) {
mca_pml_cm_hvy_send_request_t *sendreq;
MCA_PML_CM_HVY_SEND_REQUEST_ALLOC(sendreq, comm, dst, ompi_proc);
if (OPAL_UNLIKELY(NULL == sendreq)) return OMPI_ERR_OUT_OF_RESOURCE;
MCA_PML_CM_HVY_SEND_REQUEST_INIT(sendreq,
ompi_proc,
comm,
tag,
dst,
datatype,
sendmode,
false,
false,
buf,
count);
MCA_PML_CM_HVY_SEND_REQUEST_START(sendreq, ret);
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
MCA_PML_CM_HVY_SEND_REQUEST_RETURN(sendreq);
return ret;
}
ompi_request_free( (ompi_request_t**)&sendreq );
} else {
opal_convertor_t convertor;
#if !(OPAL_ENABLE_HETEROGENEOUS_SUPPORT)
if (opal_datatype_is_contiguous_memory_layout(&datatype->super, count)) {
convertor.remoteArch = ompi_mpi_local_convertor->remoteArch;
convertor.flags = ompi_mpi_local_convertor->flags;
convertor.master = ompi_mpi_local_convertor->master;
convertor.local_size = count * datatype->super.size;
convertor.pBaseBuf = (unsigned char*)buf + datatype->super.true_lb;
convertor.count = count;
convertor.pDesc = &datatype->super;
} else
#endif
{
ompi_proc = ompi_comm_peer_lookup(comm, dst);
opal_convertor_copy_and_prepare_for_send(
ompi_proc->super.proc_convertor,
&datatype->super, count, buf, 0,
&convertor);
}
ret = OMPI_MTL_CALL(send(ompi_mtl,
comm,
dst,
tag,
&convertor,
sendmode));
}
return ret;
}
__opal_attribute_always_inline__ static inline int
mca_pml_cm_iprobe(int src, int tag,
struct ompi_communicator_t *comm,
int *matched, ompi_status_public_t * status)
{
return OMPI_MTL_CALL(iprobe(ompi_mtl,
comm, src, tag,
matched, status));
}
__opal_attribute_always_inline__ static inline int
mca_pml_cm_probe(int src, int tag,
struct ompi_communicator_t *comm,
ompi_status_public_t * status)
{
int ret, matched = 0;
while (true) {
ret = OMPI_MTL_CALL(iprobe(ompi_mtl,
comm, src, tag,
&matched, status));
if (OMPI_SUCCESS != ret) break;
if (matched) break;
opal_progress();
}
return ret;
}
__opal_attribute_always_inline__ static inline int
mca_pml_cm_improbe(int src,
int tag,
struct ompi_communicator_t* comm,
int *matched,
struct ompi_message_t **message,
ompi_status_public_t* status)
{
return OMPI_MTL_CALL(improbe(ompi_mtl,
comm, src, tag,
matched, message,
status));
}
__opal_attribute_always_inline__ static inline int
mca_pml_cm_mprobe(int src,
int tag,
struct ompi_communicator_t* comm,
struct ompi_message_t **message,
ompi_status_public_t* status)
{
int ret, matched = 0;
while (true) {
ret = OMPI_MTL_CALL(improbe(ompi_mtl,
comm, src, tag,
&matched, message,
status));
if (OMPI_SUCCESS != ret) break;
if (matched) break;
opal_progress();
}
return ret;
}
__opal_attribute_always_inline__ static inline int
mca_pml_cm_imrecv(void *buf,
size_t count,
ompi_datatype_t *datatype,
struct ompi_message_t **message,
struct ompi_request_t **request)
{
int ret;
mca_pml_cm_thin_recv_request_t *recvreq;
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
ompi_proc_t* ompi_proc;
#endif
ompi_communicator_t *comm = (*message)->comm;
MCA_PML_CM_THIN_RECV_REQUEST_ALLOC(recvreq);
if( OPAL_UNLIKELY(NULL == recvreq) ) return OMPI_ERR_OUT_OF_RESOURCE;
MCA_PML_CM_THIN_RECV_REQUEST_INIT(recvreq,
ompi_proc,
comm,
(*message)->peer,
datatype,
buf,
count);
MCA_PML_CM_THIN_RECV_REQUEST_MATCHED_START(recvreq, message, ret);
if( OPAL_LIKELY(OMPI_SUCCESS == ret) ) *request = (ompi_request_t*) recvreq;
return ret;
}
__opal_attribute_always_inline__ static inline int
mca_pml_cm_mrecv(void *buf,
size_t count,
ompi_datatype_t *datatype,
struct ompi_message_t **message,
ompi_status_public_t* status)
{
int ret;
mca_pml_cm_thin_recv_request_t *recvreq;
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
ompi_proc_t* ompi_proc;
#endif
ompi_communicator_t *comm = (*message)->comm;
MCA_PML_CM_THIN_RECV_REQUEST_ALLOC(recvreq);
if( OPAL_UNLIKELY(NULL == recvreq) ) return OMPI_ERR_OUT_OF_RESOURCE;
MCA_PML_CM_THIN_RECV_REQUEST_INIT(recvreq,
ompi_proc,
comm,
(*message)->peer,
datatype,
buf,
count);
MCA_PML_CM_THIN_RECV_REQUEST_MATCHED_START(recvreq,
message, ret);
if( OPAL_UNLIKELY(OMPI_SUCCESS != ret) ) {
MCA_PML_CM_THIN_RECV_REQUEST_RETURN(recvreq);
return ret;
}
ompi_request_wait_completion(&recvreq->req_base.req_ompi);
if (NULL != status) { /* return status */
*status = recvreq->req_base.req_ompi.req_status;
}
ret = recvreq->req_base.req_ompi.req_status.MPI_ERROR;
ompi_request_free( (ompi_request_t**)&recvreq );
return ret;
}
OMPI_DECLSPEC extern int mca_pml_cm_start(size_t count, ompi_request_t** requests);

Просмотреть файл

@ -1,85 +0,0 @@
/*
* Copyright (c) 2004-2006 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2012 Sandia National Laboratories. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "opal/runtime/opal_progress.h"
#include "ompi/mca/mtl/mtl.h"
#include "pml_cm.h"
int
mca_pml_cm_iprobe(int src, int tag,
struct ompi_communicator_t *comm,
int *matched, ompi_status_public_t * status)
{
return OMPI_MTL_CALL(iprobe(ompi_mtl,
comm, src, tag,
matched, status));
}
int
mca_pml_cm_probe(int src, int tag,
struct ompi_communicator_t *comm,
ompi_status_public_t * status)
{
int ret, matched = 0;
while (true) {
ret = OMPI_MTL_CALL(iprobe(ompi_mtl,
comm, src, tag,
&matched, status));
if (OMPI_SUCCESS != ret) break;
if (matched) break;
opal_progress();
}
return ret;
}
int
mca_pml_cm_improbe(int src,
int tag,
struct ompi_communicator_t* comm,
int *matched,
struct ompi_message_t **message,
ompi_status_public_t* status)
{
return OMPI_MTL_CALL(improbe(ompi_mtl,
comm, src, tag,
matched, message,
status));
}
int
mca_pml_cm_mprobe(int src,
int tag,
struct ompi_communicator_t* comm,
struct ompi_message_t **message,
ompi_status_public_t* status)
{
int ret, matched = 0;
while (true) {
ret = OMPI_MTL_CALL(improbe(ompi_mtl,
comm, src, tag,
&matched, message,
status));
if (OMPI_SUCCESS != ret) break;
if (matched) break;
opal_progress();
}
return ret;
}

Просмотреть файл

@ -1,226 +0,0 @@
/*
* Copyright (c) 2004-2006 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2010-2012 Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 Sandia National Laboratories. All rights reserved.
* Copyright (c) 2013 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "opal/prefetch.h"
#include "ompi/request/request.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/communicator/communicator.h"
#include "ompi/message/message.h"
#include "pml_cm.h"
#include "pml_cm_recvreq.h"
int
mca_pml_cm_irecv_init(void *addr,
size_t count,
ompi_datatype_t * datatype,
int src,
int tag,
struct ompi_communicator_t *comm,
struct ompi_request_t **request)
{
mca_pml_cm_hvy_recv_request_t *recvreq;
ompi_proc_t* ompi_proc;
MCA_PML_CM_HVY_RECV_REQUEST_ALLOC(recvreq);
if( OPAL_UNLIKELY(NULL == recvreq) ) return OMPI_ERR_OUT_OF_RESOURCE;
MCA_PML_CM_HVY_RECV_REQUEST_INIT(recvreq, ompi_proc, comm, tag, src,
datatype, addr, count, true);
*request = (ompi_request_t*) recvreq;
return OMPI_SUCCESS;
}
int
mca_pml_cm_irecv(void *addr,
size_t count,
ompi_datatype_t * datatype,
int src,
int tag,
struct ompi_communicator_t *comm,
struct ompi_request_t **request)
{
int ret;
mca_pml_cm_thin_recv_request_t *recvreq;
ompi_proc_t* ompi_proc;
MCA_PML_CM_THIN_RECV_REQUEST_ALLOC(recvreq);
if( OPAL_UNLIKELY(NULL == recvreq) ) return OMPI_ERR_OUT_OF_RESOURCE;
MCA_PML_CM_THIN_RECV_REQUEST_INIT(recvreq,
ompi_proc,
comm,
src,
datatype,
addr,
count);
MCA_PML_CM_THIN_RECV_REQUEST_START(recvreq, comm, tag, src, ret);
if( OPAL_LIKELY(OMPI_SUCCESS == ret) ) *request = (ompi_request_t*) recvreq;
return ret;
}
void mca_pml_cm_recv_fast_completion(struct mca_mtl_request_t *mtl_request)
{
// Do nothing!
ompi_request_complete(mtl_request->ompi_req, true);
return;
}
int
mca_pml_cm_recv(void *addr,
size_t count,
ompi_datatype_t * datatype,
int src,
int tag,
struct ompi_communicator_t *comm,
ompi_status_public_t * status)
{
int ret;
ompi_proc_t *ompi_proc;
opal_convertor_t convertor;
mca_pml_cm_request_t req;
mca_mtl_request_t *req_mtl =
alloca(sizeof(mca_mtl_request_t) + ompi_mtl->mtl_request_size);
req_mtl->ompi_req = &req.req_ompi;
req_mtl->completion_callback = mca_pml_cm_recv_fast_completion;
req.req_pml_type = MCA_PML_CM_REQUEST_RECV_THIN;
req.req_free_called = false;
req.req_ompi.req_complete = false;
req.req_ompi.req_complete_cb = NULL;
req.req_ompi.req_state = OMPI_REQUEST_ACTIVE;
req.req_ompi.req_status.MPI_TAG = OMPI_ANY_TAG;
req.req_ompi.req_status.MPI_ERROR = OMPI_SUCCESS;
req.req_ompi.req_status._cancelled = 0;
if( MPI_ANY_SOURCE == src ) {
ompi_proc = ompi_proc_local_proc;
} else {
ompi_proc = ompi_comm_peer_lookup( comm, src );
}
opal_convertor_copy_and_prepare_for_recv(
ompi_proc->super.proc_convertor,
&(datatype->super),
count,
addr,
0,
&convertor );
ret = OMPI_MTL_CALL(irecv(ompi_mtl,
comm,
src,
tag,
&convertor,
req_mtl));
if( OPAL_UNLIKELY(OMPI_SUCCESS != ret) ) {
return ret;
}
ompi_request_wait_completion(&req.req_ompi);
if (NULL != status) { /* return status */
*status = req.req_ompi.req_status;
}
ret = req.req_ompi.req_status.MPI_ERROR;
return ret;
}
int
mca_pml_cm_imrecv(void *buf,
size_t count,
ompi_datatype_t *datatype,
struct ompi_message_t **message,
struct ompi_request_t **request)
{
int ret;
mca_pml_cm_thin_recv_request_t *recvreq;
ompi_proc_t* ompi_proc;
ompi_communicator_t *comm = (*message)->comm;
int peer = (*message)->peer;
MCA_PML_CM_THIN_RECV_REQUEST_ALLOC(recvreq);
if( OPAL_UNLIKELY(NULL == recvreq) ) return OMPI_ERR_OUT_OF_RESOURCE;
MCA_PML_CM_THIN_RECV_REQUEST_INIT(recvreq,
ompi_proc,
comm,
peer,
datatype,
buf,
count);
MCA_PML_CM_THIN_RECV_REQUEST_MATCHED_START(recvreq, message, ret);
if( OPAL_LIKELY(OMPI_SUCCESS == ret) ) *request = (ompi_request_t*) recvreq;
return ret;
}
int
mca_pml_cm_mrecv(void *buf,
size_t count,
ompi_datatype_t *datatype,
struct ompi_message_t **message,
ompi_status_public_t* status)
{
int ret;
mca_pml_cm_thin_recv_request_t *recvreq;
ompi_proc_t* ompi_proc;
ompi_communicator_t *comm = (*message)->comm;
int peer = (*message)->peer;
MCA_PML_CM_THIN_RECV_REQUEST_ALLOC(recvreq);
if( OPAL_UNLIKELY(NULL == recvreq) ) return OMPI_ERR_OUT_OF_RESOURCE;
MCA_PML_CM_THIN_RECV_REQUEST_INIT(recvreq,
ompi_proc,
comm,
peer,
datatype,
buf,
count);
MCA_PML_CM_THIN_RECV_REQUEST_MATCHED_START(recvreq,
message, ret);
if( OPAL_UNLIKELY(OMPI_SUCCESS != ret) ) {
MCA_PML_CM_THIN_RECV_REQUEST_RETURN(recvreq);
return ret;
}
ompi_request_wait_completion(&recvreq->req_base.req_ompi);
if (NULL != status) { /* return status */
*status = recvreq->req_base.req_ompi.req_status;
}
ret = recvreq->req_base.req_ompi.req_status.MPI_ERROR;
ompi_request_free( (ompi_request_t**)&recvreq );
return ret;
}

Просмотреть файл

@ -85,6 +85,7 @@ do { \
* @param comm (IN) Communicator.
* @param persistent (IN) Is this a ersistent request.
*/
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
#define MCA_PML_CM_THIN_RECV_REQUEST_INIT( request, \
ompi_proc, \
comm, \
@ -115,7 +116,35 @@ do { \
0, \
&(request)->req_base.req_convertor ); \
} while(0)
#else
#define MCA_PML_CM_THIN_RECV_REQUEST_INIT( request, \
ompi_proc, \
comm, \
src, \
datatype, \
addr, \
count ) \
do { \
OMPI_REQUEST_INIT(&(request)->req_base.req_ompi, false); \
(request)->req_base.req_ompi.req_mpi_object.comm = comm; \
(request)->req_base.req_pml_complete = false; \
(request)->req_base.req_free_called = false; \
request->req_base.req_comm = comm; \
request->req_base.req_datatype = datatype; \
OBJ_RETAIN(comm); \
OBJ_RETAIN(datatype); \
\
opal_convertor_copy_and_prepare_for_recv( \
ompi_mpi_local_convertor, \
&(datatype->super), \
count, \
addr, \
0, \
&(request)->req_base.req_convertor ); \
} while(0)
#endif
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
#define MCA_PML_CM_HVY_RECV_REQUEST_INIT( request, \
ompi_proc, \
comm, \
@ -152,7 +181,39 @@ do { \
0, \
&(request)->req_base.req_convertor ); \
} while(0)
#else
#define MCA_PML_CM_HVY_RECV_REQUEST_INIT( request, \
ompi_proc, \
comm, \
tag, \
src, \
datatype, \
addr, \
count, \
persistent) \
do { \
OMPI_REQUEST_INIT(&(request)->req_base.req_ompi, persistent); \
(request)->req_base.req_ompi.req_mpi_object.comm = comm; \
(request)->req_base.req_pml_complete = OPAL_INT_TO_BOOL(persistent); \
(request)->req_base.req_free_called = false; \
request->req_base.req_comm = comm; \
request->req_base.req_datatype = datatype; \
request->req_tag = tag; \
request->req_peer = src; \
request->req_addr = addr; \
request->req_count = count; \
OBJ_RETAIN(comm); \
OBJ_RETAIN(datatype); \
\
opal_convertor_copy_and_prepare_for_recv( \
ompi_mpi_local_convertor, \
&(datatype->super), \
count, \
addr, \
0, \
&(request)->req_base.req_convertor ); \
} while(0)
#endif
/**
* Start an initialized request.
@ -315,7 +376,6 @@ do { \
}
extern void mca_pml_cm_recv_request_completion(struct mca_mtl_request_t *mtl_request);
extern void mca_pml_cm_recv_fast_completion(struct mca_mtl_request_t *mtl_request);
#endif

Просмотреть файл

@ -1,170 +0,0 @@
/*
* Copyright (c) 2004-2006 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2013 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "opal/prefetch.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/communicator/communicator.h"
#include "pml_cm.h"
#include "pml_cm_sendreq.h"
int
mca_pml_cm_isend_init(void* buf,
size_t count,
ompi_datatype_t* datatype,
int dst,
int tag,
mca_pml_base_send_mode_t sendmode,
ompi_communicator_t* comm,
ompi_request_t** request)
{
mca_pml_cm_hvy_send_request_t *sendreq;
ompi_proc_t* ompi_proc;
MCA_PML_CM_HVY_SEND_REQUEST_ALLOC(sendreq, comm, dst, ompi_proc);
if (OPAL_UNLIKELY(NULL == sendreq)) return OMPI_ERR_OUT_OF_RESOURCE;
MCA_PML_CM_HVY_SEND_REQUEST_INIT(sendreq, ompi_proc, comm, tag, dst,
datatype, sendmode, true, false, buf, count);
*request = (ompi_request_t*) sendreq;
return OMPI_SUCCESS;
}
int
mca_pml_cm_isend(void* buf,
size_t count,
ompi_datatype_t* datatype,
int dst,
int tag,
mca_pml_base_send_mode_t sendmode,
ompi_communicator_t* comm,
ompi_request_t** request)
{
int ret;
if(sendmode == MCA_PML_BASE_SEND_BUFFERED ) {
mca_pml_cm_hvy_send_request_t* sendreq;
ompi_proc_t* ompi_proc;
MCA_PML_CM_HVY_SEND_REQUEST_ALLOC(sendreq, comm, dst, ompi_proc);
if (OPAL_UNLIKELY(NULL == sendreq)) return OMPI_ERR_OUT_OF_RESOURCE;
MCA_PML_CM_HVY_SEND_REQUEST_INIT(sendreq,
ompi_proc,
comm,
tag,
dst,
datatype,
sendmode,
false,
false,
buf,
count);
MCA_PML_CM_HVY_SEND_REQUEST_START( sendreq, ret);
if (OPAL_LIKELY(OMPI_SUCCESS == ret)) *request = (ompi_request_t*) sendreq;
} else {
mca_pml_cm_thin_send_request_t* sendreq;
ompi_proc_t* ompi_proc;
MCA_PML_CM_THIN_SEND_REQUEST_ALLOC(sendreq, comm, dst, ompi_proc);
if (OPAL_UNLIKELY(NULL == sendreq)) return OMPI_ERR_OUT_OF_RESOURCE;
MCA_PML_CM_THIN_SEND_REQUEST_INIT(sendreq,
ompi_proc,
comm,
tag,
dst,
datatype,
sendmode,
buf,
count);
MCA_PML_CM_THIN_SEND_REQUEST_START(
sendreq,
comm,
tag,
dst,
sendmode,
false,
ret);
if (OPAL_LIKELY(OMPI_SUCCESS == ret)) *request = (ompi_request_t*) sendreq;
}
return ret;
}
int
mca_pml_cm_send(void *buf,
size_t count,
ompi_datatype_t* datatype,
int dst,
int tag,
mca_pml_base_send_mode_t sendmode,
ompi_communicator_t* comm)
{
int ret = OMPI_ERROR;
if(sendmode == MCA_PML_BASE_SEND_BUFFERED) {
mca_pml_cm_hvy_send_request_t *sendreq;
ompi_proc_t * ompi_proc;
MCA_PML_CM_HVY_SEND_REQUEST_ALLOC(sendreq, comm, dst, ompi_proc);
if (OPAL_UNLIKELY(NULL == sendreq)) return OMPI_ERR_OUT_OF_RESOURCE;
MCA_PML_CM_HVY_SEND_REQUEST_INIT(sendreq,
ompi_proc,
comm,
tag,
dst,
datatype,
sendmode,
false,
false,
buf,
count);
MCA_PML_CM_HVY_SEND_REQUEST_START(sendreq, ret);
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
MCA_PML_CM_HVY_SEND_REQUEST_RETURN(sendreq);
return ret;
}
ompi_request_free( (ompi_request_t**)&sendreq );
} else {
opal_convertor_t convertor;
ompi_proc_t *ompi_proc = ompi_comm_peer_lookup(comm, dst);
opal_convertor_copy_and_prepare_for_send(
ompi_proc->super.proc_convertor,
&datatype->super, count, buf, 0,
&convertor);
ret = OMPI_MTL_CALL(send(ompi_mtl,
comm,
dst,
tag,
&convertor,
sendmode));
}
return ret;
}

Просмотреть файл

@ -59,6 +59,7 @@ typedef struct mca_pml_cm_hvy_send_request_t mca_pml_cm_hvy_send_request_t;
OBJ_CLASS_DECLARATION(mca_pml_cm_hvy_send_request_t);
#if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
#define MCA_PML_CM_THIN_SEND_REQUEST_ALLOC(sendreq, comm, dst, \
ompi_proc) \
do { \
@ -74,8 +75,20 @@ do { \
sendreq->req_mtl.completion_callback = mca_pml_cm_send_request_completion; \
} \
} while(0)
#else
#define MCA_PML_CM_THIN_SEND_REQUEST_ALLOC(sendreq, comm, dst, \
ompi_proc) \
do { \
sendreq = (mca_pml_cm_thin_send_request_t*) \
opal_free_list_wait (&mca_pml_base_send_requests); \
sendreq->req_send.req_base.req_pml_type = MCA_PML_CM_REQUEST_SEND_THIN; \
sendreq->req_mtl.ompi_req = (ompi_request_t*) sendreq; \
sendreq->req_mtl.completion_callback = mca_pml_cm_send_request_completion; \
} while(0)
#endif
#if (OPAL_ENABLE_HETEROGENEOUS_SUPPORT)
#define MCA_PML_CM_HVY_SEND_REQUEST_ALLOC(sendreq, comm, dst, \
ompi_proc) \
{ \
@ -90,8 +103,19 @@ do { \
sendreq->req_mtl.completion_callback = mca_pml_cm_send_request_completion; \
} \
}
#else
#define MCA_PML_CM_HVY_SEND_REQUEST_ALLOC(sendreq, comm, dst, \
ompi_proc) \
{ \
sendreq = (mca_pml_cm_hvy_send_request_t*) \
opal_free_list_wait (&mca_pml_base_send_requests); \
sendreq->req_send.req_base.req_pml_type = MCA_PML_CM_REQUEST_SEND_HEAVY; \
sendreq->req_mtl.ompi_req = (ompi_request_t*) sendreq; \
sendreq->req_mtl.completion_callback = mca_pml_cm_send_request_completion; \
}
#endif
#if (OPAL_ENABLE_HETEROGENEOUS_SUPPORT)
#define MCA_PML_CM_SEND_REQUEST_INIT_COMMON(req_send, \
ompi_proc, \
comm, \
@ -121,6 +145,52 @@ do { \
(req_send)->req_base.req_free_called = false; \
}
#else
#define MCA_PML_CM_SEND_REQUEST_INIT_COMMON(req_send, \
ompi_proc, \
comm, \
tag, \
datatype, \
sendmode, \
buf, \
count) \
{ \
OBJ_RETAIN(comm); \
OBJ_RETAIN(datatype); \
(req_send)->req_base.req_comm = comm; \
(req_send)->req_base.req_datatype = datatype; \
if (opal_datatype_is_contiguous_memory_layout(&datatype->super, count)) { \
(req_send)->req_base.req_convertor.remoteArch = \
ompi_mpi_local_convertor->remoteArch; \
(req_send)->req_base.req_convertor.flags = \
ompi_mpi_local_convertor->flags; \
(req_send)->req_base.req_convertor.master = \
ompi_mpi_local_convertor->master; \
(req_send)->req_base.req_convertor.local_size = \
count * datatype->super.size; \
(req_send)->req_base.req_convertor.pBaseBuf = \
(unsigned char*)buf + datatype->super.true_lb; \
(req_send)->req_base.req_convertor.count = count; \
(req_send)->req_base.req_convertor.pDesc = &datatype->super; \
} else { \
opal_convertor_copy_and_prepare_for_send( \
ompi_mpi_local_convertor, \
&(datatype->super), \
count, \
buf, \
0, \
&(req_send)->req_base.req_convertor ); \
} \
(req_send)->req_base.req_ompi.req_mpi_object.comm = comm; \
(req_send)->req_base.req_ompi.req_status.MPI_SOURCE = \
comm->c_my_rank; \
(req_send)->req_base.req_ompi.req_status.MPI_TAG = tag; \
(req_send)->req_base.req_ompi.req_status._ucount = count; \
(req_send)->req_send_mode = sendmode; \
(req_send)->req_base.req_free_called = false; \
}
#endif
#define MCA_PML_CM_HVY_SEND_REQUEST_INIT( sendreq, \
ompi_proc, \
comm, \

Просмотреть файл

@ -66,6 +66,7 @@
#include "ompi_config.h"
#include "ompi/mca/mca.h"
#include "mpi.h" /* needed for MPI_ANY_TAG */
#include "ompi/mca/pml/pml_constants.h"
BEGIN_C_DECLS
@ -76,20 +77,6 @@ BEGIN_C_DECLS
typedef uint64_t mca_pml_sequence_t;
struct ompi_proc_t;
typedef enum {
MCA_PML_BASE_SEND_SYNCHRONOUS,
MCA_PML_BASE_SEND_COMPLETE,
MCA_PML_BASE_SEND_BUFFERED,
MCA_PML_BASE_SEND_READY,
MCA_PML_BASE_SEND_STANDARD,
MCA_PML_BASE_SEND_SIZE
} mca_pml_base_send_mode_t;
#define OMPI_ANY_TAG MPI_ANY_TAG
#define OMPI_ANY_SOURCE MPI_ANY_SOURCE
#define OMPI_PROC_NULL MPI_PROC_NULL
/**
* MCA->PML Called by MCA framework to initialize the component.
*

46
ompi/mca/pml/pml_constants.h Обычный файл
Просмотреть файл

@ -0,0 +1,46 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2006 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2015 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2011 Sandia National Laboratories. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef MCA_PML_CONSTANTS_H
#define MCA_PML_CONSTANTS_H
#include "ompi_config.h"
#include "ompi/mca/mca.h"
#include "mpi.h" /* needed for MPI_ANY_TAG */
typedef enum {
MCA_PML_BASE_SEND_SYNCHRONOUS,
MCA_PML_BASE_SEND_COMPLETE,
MCA_PML_BASE_SEND_BUFFERED,
MCA_PML_BASE_SEND_READY,
MCA_PML_BASE_SEND_STANDARD,
MCA_PML_BASE_SEND_SIZE
} mca_pml_base_send_mode_t;
#define OMPI_ANY_TAG MPI_ANY_TAG
#define OMPI_ANY_SOURCE MPI_ANY_SOURCE
#define OMPI_PROC_NULL MPI_PROC_NULL
struct ompi_proc_t;
#endif /* #define MCA_PML_CONSTANTS_H */