1
1

- cleanup of some of the c bindings

- for threaded case - cleanup event libraries progress thread
- cleanup of request handling for persistent sends
- added support for buffered sends

This commit was SVN r1461.
Этот коммит содержится в:
Tim Woodall 2004-06-24 16:47:00 +00:00
родитель 9870766352
Коммит 2ce7ca725b
104 изменённых файлов: 783 добавлений и 242 удалений

Просмотреть файл

@ -100,7 +100,7 @@ extern ompi_communicator_t *ompi_mpi_comm_parent;
*/
static inline int ompi_comm_invalid(ompi_communicator_t* comm)
{
if ( comm->c_flags & OMPI_COMM_ISFREED )
if ((NULL == comm) || (MPI_COMM_NULL == comm) || (comm->c_flags & OMPI_COMM_ISFREED ))
return true;
else
return false;

Просмотреть файл

@ -225,7 +225,7 @@ extern "C" {
* may not return (e.g., for MPI_ERRORS_ARE_FATAL).
*/
int ompi_errhandler_invoke(ompi_errhandler_t *errhandler, void *mpi_object,
int err_code, char *message);
int err_code, const char *message);
/**

Просмотреть файл

@ -11,7 +11,7 @@
int ompi_errhandler_invoke(ompi_errhandler_t *errhandler, void *mpi_object,
int err_code, char *message)
int err_code, const char *message)
{
int fortran_handle;
ompi_communicator_t *comm;

Просмотреть файл

@ -131,11 +131,12 @@ struct ompi_event_list ompi_signalqueue;
struct ompi_event_list ompi_eventqueue;
static struct timeval ompi_event_tv;
ompi_mutex_t ompi_event_lock;
static int ompi_event_inited;
#if OMPI_HAVE_THREADS
ompi_thread_t ompi_event_thread;
ompi_event_t ompi_event_pipe_event;
int ompi_event_pipe[2];
int ompi_event_pipe_signalled;
static ompi_thread_t ompi_event_thread;
static ompi_event_t ompi_event_pipe_event;
static int ompi_event_pipe[2];
static int ompi_event_pipe_signalled;
#endif
static int
@ -198,13 +199,12 @@ static void ompi_event_pipe_handler(int sd, short flags, void* user)
int
ompi_event_init(void)
{
static int inited = false;
int i;
#if OMPI_HAVE_THREADS
int rc;
#endif
if(inited)
if(ompi_event_inited++ != 0)
return OMPI_SUCCESS;
ompi_event_sigcb = NULL;
@ -253,10 +253,26 @@ ompi_event_init(void)
log_to(stderr);
log_debug_cmd(LOG_MISC, 80);
#endif
inited = true;
return OMPI_SUCCESS;
}
int ompi_event_fini(void)
{
#if OMPI_HAVE_THREADS
if(ompi_event_inited && --ompi_event_inited == 0) {
if(ompi_event_pipe_signalled == 0) {
unsigned char byte = 0;
if(write(ompi_event_pipe[1], &byte, 1) != 1)
ompi_output(0, "ompi_event_add: write() to ompi_event_pipe[1] failed with errno=%d\n", errno);
ompi_event_pipe_signalled++;
}
ompi_thread_join(&ompi_event_thread, NULL);
}
#endif
return OMPI_SUCCESS;
}
int
ompi_event_haveevents(void)
{
@ -315,7 +331,7 @@ ompi_event_loop(int flags)
}
done = 0;
while (!done) {
while (!done && ompi_event_inited) {
while (ompi_event_gotsig) {
ompi_event_gotsig = 0;
if (ompi_event_sigcb) {

Просмотреть файл

@ -143,6 +143,7 @@ struct ompi_eventop {
int ompi_event_init(void);
int ompi_event_fini(void);
int ompi_event_dispatch(void);
int ompi_event_loop(int);

Просмотреть файл

@ -23,7 +23,9 @@ enum {
OMPI_ERR_WOULD_BLOCK = -13,
OMPI_ERR_IN_ERRNO = -14,
OMPI_ERR_UNREACH = -15,
OMPI_ERR_NOT_FOUND = -16
OMPI_ERR_NOT_FOUND = -16,
OMPI_ERR_BUFFER = -17, /* equivalent to MPI_ERR_BUFFER */
OMPI_ERR_REQUEST = -18 /* equivalent to MPI_ERR_REQUEST */
};
#endif /* OMPI_CONSTANTS_H */

Просмотреть файл

@ -18,6 +18,13 @@ static int mca_allocator_num_buckets;
int mca_allocator_bucket_finalize(struct mca_allocator_t* allocator)
{
mca_allocator_bucket_cleanup(allocator);
free(allocator);
return(OMPI_SUCCESS);
}
struct mca_allocator_t* mca_allocator_bucket_module_init(
bool *allow_multi_user_threads,
mca_allocator_segment_alloc_fn_t segment_alloc,
@ -38,7 +45,8 @@ struct mca_allocator_t* mca_allocator_bucket_module_init(
allocator->super.alc_alloc = mca_allocator_bucket_alloc_wrapper;
allocator->super.alc_realloc = mca_allocator_bucket_realloc;
allocator->super.alc_free = mca_allocator_bucket_free;
allocator->super.alc_finalize = mca_allocator_bucket_cleanup;
allocator->super.alc_return = mca_allocator_bucket_cleanup;
allocator->super.alc_finalize = mca_allocator_bucket_finalize;
return((mca_allocator_t *) allocator);
}

Просмотреть файл

@ -317,7 +317,8 @@ int mca_allocator_bucket_cleanup(mca_allocator_t * mem)
segment = *segment_header;
*segment_header = segment->next_segment;
/* free the memory */
mem_options->free_mem_fn(segment);
if(mem_options->free_mem_fn)
mem_options->free_mem_fn(segment);
} else {
/* go to next segment */
segment_header = &((*segment_header)->next_segment);

Просмотреть файл

@ -189,6 +189,17 @@ extern "C" {
*/
int mca_allocator_bucket_cleanup(mca_allocator_t * mem);
/**
* Cleanup all resources held by this allocator.
*
* @param mem_options A pointer to the appropriate struct for the area of
* memory.
*
* @retval None
*
*/
int mca_allocator_bucket_finalize(mca_allocator_t * mem);
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif

Просмотреть файл

@ -21,6 +21,11 @@ typedef void* (*mca_mpool_base_address_fn_t)(void);
*/
typedef void* (*mca_mpool_base_alloc_fn_t)(size_t size, size_t align);
/**
* allocate function typedef
*/
typedef void* (*mca_mpool_base_alloc_and_register_fn_t)(size_t size, size_t align, void* user);
/**
* realloc function typedef
*/
@ -34,12 +39,12 @@ typedef void (*mca_mpool_base_free_fn_t)(void *);
/**
* register memory
*/
typedef void (*mca_mpool_base_register_fn_t)(void * addr, size_t size, void* user);
typedef int (*mca_mpool_base_register_fn_t)(void * addr, size_t size, void* user);
/**
* deregister memory
*/
typedef void (*mca_mpool_base_deregister_fn_t)(void * addr, size_t size);
typedef int (*mca_mpool_base_deregister_fn_t)(void * addr, size_t size);
/**
* finalize

Просмотреть файл

@ -14,10 +14,12 @@ AM_CPPFLAGS = -I$(top_builddir)/src
headers = \
base.h \
pml_base_bsend.h \
pml_base_request.h
libmca_pml_base_la_SOURCES = \
$(headers) \
pml_base_bsend.c \
pml_base_close.c \
pml_base_request.c \
pml_base_open.c \

248
src/mca/pml/base/pml_base_bsend.c Обычный файл
Просмотреть файл

@ -0,0 +1,248 @@
#include "mca/allocator/base/base.h"
#include "mca/allocator/allocator.h"
#include "mca/base/mca_base_param.h"
#include "mca/pml/pml.h"
#include "mca/pml/base/pml_base_request.h"
#include "mca/ptl/base/ptl_base_sendreq.h"
#include "pml_base_bsend.h"
static ompi_mutex_t mca_pml_bsend_mutex; /* lock for thread safety */
static ompi_condition_t mca_pml_bsend_condition; /* condition variable to block on detach */
static mca_allocator_base_module_t* mca_pml_bsend_allocator_component;
static mca_allocator_t* mca_pml_bsend_allocator; /* sub-allocator to manage users buffer */
static unsigned char *mca_pml_bsend_base; /* base address of users buffer */
static unsigned char *mca_pml_bsend_addr; /* current offset into users buffer */
static size_t mca_pml_bsend_size; /* size of users buffer */
static size_t mca_pml_bsend_count; /* number of outstanding requests */
static size_t mca_pml_bsend_pagesz; /* mmap page size */
static int mca_pml_bsend_pagebits; /* number of bits in pagesz */
/*
* Routine to return pages to sub-allocator as needed
*/
static void* mca_pml_bsend_alloc_segment(size_t* size_inout)
{
void *addr;
size_t size = *size_inout;
size_t pages = 1;
/* determine number of pages to allocate */
while(size > mca_pml_bsend_pagesz) {
size >>= mca_pml_bsend_pagebits;
pages++;
}
if(mca_pml_bsend_addr + size > mca_pml_bsend_base + mca_pml_bsend_size) {
if( mca_pml_bsend_addr + *size_inout <= mca_pml_bsend_base + mca_pml_bsend_size ) {
size = *size_inout;
} else {
THREAD_UNLOCK(&mca_pml_bsend_mutex);
return NULL;
}
}
addr = mca_pml_bsend_addr;
mca_pml_bsend_addr += size;
*size_inout = size;
return addr;
}
/*
* One time initialization at startup
*/
int mca_pml_base_bsend_init(bool* thread_safe)
{
int id = mca_base_param_register_string("pml", "base", "bsend_allocator", NULL, "bucket");
mca_allocator_t *allocator;
char *name;
size_t tmp;
/* initialize static objects */
OBJ_CONSTRUCT(&mca_pml_bsend_mutex, ompi_mutex_t);
OBJ_CONSTRUCT(&mca_pml_bsend_condition, ompi_condition_t);
/* lookup name of the allocator to use for buffered sends */
mca_base_param_lookup_string(id, &name);
if(NULL == (mca_pml_bsend_allocator_component = mca_allocator_component_lookup(name))) {
free(name);
return OMPI_ERR_BUFFER;
}
free(name);
/* try to create an instance of the allocator - to determine thread safety level */
allocator = mca_pml_bsend_allocator_component->allocator_init(thread_safe, mca_pml_bsend_alloc_segment, NULL);
if(NULL == allocator) {
return OMPI_ERR_BUFFER;
}
allocator->alc_finalize(allocator);
/* determine page size */
tmp = mca_pml_bsend_pagesz = sysconf(_SC_PAGESIZE);
mca_pml_bsend_pagebits = 0;
while( tmp != 0 ) {
tmp >>= 1;
mca_pml_bsend_pagebits++;
}
return OMPI_SUCCESS;
}
/*
* One-time cleanup at shutdown - release any resources.
*/
int mca_pml_base_bsend_fini()
{
if(NULL != mca_pml_bsend_allocator)
mca_pml_bsend_allocator->alc_finalize(mca_pml_bsend_allocator);
mca_pml_bsend_allocator = NULL;
OBJ_DESTRUCT(&mca_pml_bsend_condition);
OBJ_DESTRUCT(&mca_pml_bsend_mutex);
return OMPI_SUCCESS;
}
/*
* User-level call to attach buffer.
*/
int mca_pml_base_bsend_attach(void* addr, int size)
{
bool thread_safe;
if(NULL == addr || size <= 0) {
return OMPI_ERR_BUFFER;
}
/* check for buffer already attached */
THREAD_LOCK(&mca_pml_bsend_mutex);
if(NULL != mca_pml_bsend_allocator) {
THREAD_UNLOCK(&mca_pml_bsend_mutex);
return OMPI_ERR_BUFFER;
}
/* try to create an instance of the allocator - to determine thread safety level */
mca_pml_bsend_allocator = mca_pml_bsend_allocator_component->allocator_init(&thread_safe, mca_pml_bsend_alloc_segment, NULL);
if(NULL == mca_pml_bsend_allocator) {
THREAD_UNLOCK(&mca_pml_bsend_mutex);
return OMPI_ERR_BUFFER;
}
/* setup local variables */
mca_pml_bsend_base = addr;
mca_pml_bsend_addr = addr;
mca_pml_bsend_size = size;
mca_pml_bsend_count = 0;
THREAD_UNLOCK(&mca_pml_bsend_mutex);
return OMPI_SUCCESS;
}
/*
* User-level call to detach buffer
*/
int mca_pml_base_bsend_detach(void* addr, int* size)
{
THREAD_LOCK(&mca_pml_bsend_mutex);
/* is buffer attached */
if(NULL == mca_pml_bsend_allocator) {
THREAD_UNLOCK(&mca_pml_bsend_mutex);
return OMPI_ERR_BUFFER;
}
/* wait on any pending requests */
while(mca_pml_bsend_count != 0)
ompi_condition_wait(&mca_pml_bsend_condition, &mca_pml_bsend_mutex);
/* free resources associated with the allocator */
mca_pml_bsend_allocator->alc_finalize(mca_pml_bsend_allocator);
mca_pml_bsend_allocator = NULL;
/* return current settings */
if(NULL != addr)
*((void**)addr) = mca_pml_bsend_base;
if(NULL != size)
*size = mca_pml_bsend_size;
/* reset local variables */
mca_pml_bsend_base = NULL;
mca_pml_bsend_addr = NULL;
mca_pml_bsend_size = 0;
mca_pml_bsend_count = 0;
THREAD_UNLOCK(&mca_pml_bsend_mutex);
return OMPI_SUCCESS;
}
/*
* Initialize a request for use w/ buffered send
*/
int mca_pml_base_bsend_request_init(ompi_request_t* request, bool persistent)
{
mca_ptl_base_send_request_t* sendreq = (mca_ptl_base_send_request_t*)request;
struct iovec iov;
void* buf;
int rc;
THREAD_LOCK(&mca_pml_bsend_mutex);
if(NULL == mca_pml_bsend_addr) {
THREAD_UNLOCK(&mca_pml_bsend_mutex);
return OMPI_ERR_BUFFER;
}
/* allocate a buffer to hold packed message */
buf = mca_pml_bsend_allocator->alc_alloc(mca_pml_bsend_allocator, sendreq->req_bytes_packed, 0);
if(NULL == buf) {
THREAD_UNLOCK(&mca_pml_bsend_mutex);
return OMPI_ERR_BUFFER;
}
/* pack users message into buffer */
iov.iov_base = buf;
iov.iov_len = sendreq->req_bytes_packed;
if((rc = ompi_convertor_pack(&sendreq->req_convertor, &iov, 1)) < 0) {
mca_pml_bsend_allocator->alc_free(mca_pml_bsend_allocator, buf);
THREAD_UNLOCK(&mca_pml_bsend_mutex);
return OMPI_ERROR;
}
/* setup convertor to reflect contiguous buffer */
if((rc = ompi_convertor_init_for_send(&sendreq->req_convertor, 0, MPI_BYTE, iov.iov_len, iov.iov_base, 0)) != OMPI_SUCCESS) {
mca_pml_bsend_allocator->alc_free(mca_pml_bsend_allocator, buf);
THREAD_UNLOCK(&mca_pml_bsend_mutex);
return rc;
}
/* increment count of pending requests */
mca_pml_bsend_count++;
/* set flag indicating mpi layer is done */
sendreq->super.req_persistent = persistent;
sendreq->super.req_mpi_done = true;
THREAD_UNLOCK(&mca_pml_bsend_mutex);
return OMPI_SUCCESS;
}
/*
* Request completed - free buffer and decrement pending count
*/
int mca_pml_base_bsend_request_fini(ompi_request_t* request)
{
mca_ptl_base_send_request_t* sendreq = (mca_ptl_base_send_request_t*)request;
/* remove from list of pending requests */
THREAD_LOCK(&mca_pml_bsend_mutex);
/* free buffer */
mca_pml_bsend_allocator->alc_free(mca_pml_bsend_allocator, sendreq->req_convertor.pBaseBuf);
/* decrement count of buffered requests */
if(--mca_pml_bsend_count == 0)
ompi_condition_signal(&mca_pml_bsend_condition);
THREAD_UNLOCK(&mca_pml_bsend_mutex);
return OMPI_SUCCESS;
}

20
src/mca/pml/base/pml_base_bsend.h Обычный файл
Просмотреть файл

@ -0,0 +1,20 @@
#ifndef _MCA_PML_BASE_BSEND_H_
#define _MCA_PML_BASE_BSEND_H_
#include "mca/pml/pml.h"
#include "request/request.h"
struct mca_ptl_base_send_request_t;
int mca_pml_base_bsend_init(bool*);
int mca_pml_base_bsend_fini(void);
int mca_pml_base_bsend_attach(void* addr, int size);
int mca_pml_base_bsend_detach(void* addr, int* size);
int mca_pml_base_bsend_request_init(ompi_request_t*, bool persistent);
int mca_pml_base_bsend_request_fini(ompi_request_t*);
#endif

Просмотреть файл

@ -15,6 +15,7 @@
#include "request/request.h"
#include "mca/pml/pml.h"
#include "mca/pml/base/pml_base_request.h"
#include "mca/pml/base/pml_base_bsend.h"
#include "mca/ptl/base/ptl_base_sendreq.h"
#include "mca/ptl/ptl.h"
@ -249,17 +250,37 @@ extern int mca_pml_teg_free(
ompi_request_t** request
);
#define MCA_PML_TEG_FINI(request) \
{ \
mca_pml_base_request_t* pml_request = *(mca_pml_base_request_t**)(request); \
if(pml_request->req_persistent) { \
if(pml_request->req_free_called) { \
MCA_PML_TEG_FREE(request); \
} else { \
pml_request->super.req_state = OMPI_REQUEST_INACTIVE; \
} \
} else { \
MCA_PML_TEG_FREE(request); \
} \
}
#define MCA_PML_TEG_FREE(request) \
{ \
mca_pml_base_request_t* pml_request = *(mca_pml_base_request_t**)(request); \
pml_request->req_free_called = true; \
if(pml_request->req_pml_done == true) \
{ \
OMPI_REQUEST_FINI(*request); \
switch(pml_request->req_type) { \
case MCA_PML_REQUEST_SEND: \
{ \
mca_ptl_base_send_request_t* sendreq = (mca_ptl_base_send_request_t*)pml_request; \
mca_ptl_t* ptl = sendreq->req_owner; \
if(sendreq->req_send_mode == MCA_PML_BASE_SEND_BUFFERED) { \
mca_pml_base_bsend_request_fini((ompi_request_t*)sendreq); \
} \
ptl->ptl_request_return(ptl, sendreq); \
break; \
} \

Просмотреть файл

@ -7,6 +7,7 @@
#include "mca/pml/pml.h"
#include "mca/ptl/ptl.h"
#include "mca/base/mca_base_param.h"
#include "mca/pml/base/pml_base_bsend.h"
#include "mca/ptl/base/ptl_base_sendreq.h"
#include "mca/ptl/base/ptl_base_recvreq.h"
#include "pml_teg.h"
@ -135,7 +136,6 @@ mca_pml_t* mca_pml_teg_module_init(int* priority,
bool *have_hidden_threads)
{
*priority = 0;
*allow_multi_user_threads = true;
*have_hidden_threads = false;
OBJ_CONSTRUCT(&mca_pml_teg.teg_lock, ompi_mutex_t);
@ -158,6 +158,13 @@ mca_pml_t* mca_pml_teg_module_init(int* priority,
OBJ_CONSTRUCT(&mca_pml_teg.teg_request_lock, ompi_mutex_t);
OBJ_CONSTRUCT(&mca_pml_teg.teg_request_cond, ompi_condition_t);
mca_pml_teg.teg_request_waiting = 0;
/* buffered send */
if(mca_pml_base_bsend_init(allow_multi_user_threads) != OMPI_SUCCESS) {
ompi_output(0, "mca_pml_teg_module_init: mca_pml_bsend_init failed\n");
return NULL;
}
*allow_multi_user_threads &= true;
return &mca_pml_teg.super;
}

Просмотреть файл

@ -42,6 +42,7 @@
*/
static inline int mca_pml_teg_recv_request_start(mca_ptl_base_recv_request_t* request)
{
request->super.super.req_state = OMPI_REQUEST_ACTIVE;
if(request->super.req_peer == OMPI_ANY_SOURCE) {
mca_ptl_base_recv_request_match_wild(request);
} else {

Просмотреть файл

@ -10,7 +10,6 @@
#include "pml_teg_sendreq.h"
/**
* Schedule message delivery across potentially multiple PTLs.
*
@ -100,8 +99,9 @@ void mca_pml_teg_send_request_progress(
if(mca_pml_teg.teg_request_waiting) {
ompi_condition_broadcast(&mca_pml_teg.teg_request_cond);
}
} else if (req->super.req_free_called)
} else if (req->super.req_free_called) {
MCA_PML_TEG_FREE((ompi_request_t**)&req);
}
THREAD_UNLOCK(&mca_pml_teg.teg_request_lock);
return;
}

Просмотреть файл

@ -13,9 +13,6 @@
#include "mca/ptl/base/ptl_base_sendfrag.h"
void mca_pml_teg_send_request_schedule(mca_ptl_base_send_request_t* req);
#define MCA_PML_TEG_SEND_REQUEST_ALLOC( \
comm, \
dst, \
@ -37,6 +34,7 @@ void mca_pml_teg_send_request_schedule(mca_ptl_base_send_request_t* req);
#define MCA_PML_TEG_SEND_REQUEST_RETURN(request) \
request->req_owner->ptl_request_return(request->req_owner, request);
static inline int mca_pml_teg_send_request_start(
mca_ptl_base_send_request_t* req)
{
@ -45,22 +43,31 @@ static inline int mca_pml_teg_send_request_start(
size_t offset = req->req_offset;
int flags, rc;
/* initialize request state and message sequence number */
req->super.super.req_state = OMPI_REQUEST_ACTIVE;
req->super.req_sequence = mca_pml_ptl_comm_send_sequence(
req->super.req_comm->c_pml_comm,
req->super.req_peer);
/* start the first fragment */
if(first_fragment_size == 0 || req->req_bytes_packed <= first_fragment_size) {
first_fragment_size = req->req_bytes_packed;
flags = (req->req_send_mode == MCA_PML_BASE_SEND_SYNCHRONOUS) ?
MCA_PTL_FLAGS_ACK_MATCHED : 0;
flags = (req->req_send_mode == MCA_PML_BASE_SEND_SYNCHRONOUS) ?
MCA_PTL_FLAGS_ACK_MATCHED : 0;
} else {
/* require match for first fragment of a multi-fragment */
flags = MCA_PTL_FLAGS_ACK_MATCHED;
}
rc = ptl->ptl_put(ptl, req->req_peer, req, offset, first_fragment_size, flags);
if(rc != OMPI_SUCCESS)
return rc;
return OMPI_SUCCESS;
}
void mca_pml_teg_send_request_schedule(mca_ptl_base_send_request_t* req);
void mca_pml_teg_send_request_progress(
struct mca_ptl_t* ptl,
mca_ptl_base_send_request_t* send_request,

Просмотреть файл

@ -9,9 +9,79 @@ int mca_pml_teg_start(size_t count, ompi_request_t** requests)
size_t i;
for(i=0; i<count; i++) {
mca_pml_base_request_t *pml_request = (mca_pml_base_request_t*)requests[i];
int state;
if(NULL == pml_request)
continue;
if(pml_request->req_persistent == false)
return OMPI_ERR_REQUEST;
/* If the persistent request is currently active - obtain the request lock
* and verify the status is incomplete. if the pml layer has not completed
* the request - mark the request as free called - so that it will be freed
* when the request completes - and create a new request.
*/
switch(pml_request->super.req_state) {
case OMPI_REQUEST_INVALID:
return OMPI_ERR_REQUEST;
case OMPI_REQUEST_INACTIVE:
break;
case OMPI_REQUEST_ACTIVE: {
ompi_request_t *request;
THREAD_LOCK(&mca_pml_teg.teg_request_lock);
if (pml_request->req_pml_done == false) {
/* free request after it completes */
pml_request->req_free_called = true;
} else {
/* can reuse the existing request */
THREAD_UNLOCK(&mca_pml_teg.teg_request_lock);
break;
}
THREAD_UNLOCK(&mca_pml_teg.teg_request_lock);
/* allocate a new request */
switch(pml_request->req_type) {
case MCA_PML_REQUEST_SEND: {
mca_pml_base_send_mode_t sendmode =
((mca_ptl_base_send_request_t*)pml_request)->req_send_mode;
rc = mca_pml_teg_isend_init(
pml_request->req_addr,
pml_request->req_count,
pml_request->req_datatype,
pml_request->req_peer,
pml_request->req_tag,
sendmode,
pml_request->req_comm,
&request);
if (sendmode == MCA_PML_BASE_SEND_BUFFERED) {
mca_pml_base_bsend_request_init(request, true);
}
break;
}
case MCA_PML_REQUEST_RECV:
rc = mca_pml_teg_irecv_init(
pml_request->req_addr,
pml_request->req_count,
pml_request->req_datatype,
pml_request->req_peer,
pml_request->req_tag,
pml_request->req_comm,
&request);
break;
default:
rc = OMPI_ERR_REQUEST;
break;
}
if(OMPI_SUCCESS != rc)
return rc;
pml_request = (mca_pml_base_request_t*)request;
requests[i] = request;
break;
}
}
/* start the request */
switch(pml_request->req_type) {
case MCA_PML_REQUEST_SEND:
if((rc = mca_pml_teg_send_request_start((mca_ptl_base_send_request_t*)pml_request))
@ -24,7 +94,7 @@ int mca_pml_teg_start(size_t count, ompi_request_t** requests)
return rc;
break;
default:
return OMPI_ERROR;
return OMPI_ERR_REQUEST;
}
}
return OMPI_SUCCESS;

Просмотреть файл

@ -9,6 +9,8 @@ int mca_pml_teg_test(
ompi_status_public_t* status)
{
size_t i;
ompi_atomic_mb();
for(i=0; i<count; i++) {
mca_pml_base_request_t* pml_request = (mca_pml_base_request_t*)requests[i];
if(pml_request == NULL)
@ -18,8 +20,7 @@ int mca_pml_teg_test(
*completed = true;
if (NULL != status)
*status = pml_request->req_status;
if(false == pml_request->req_persistent)
MCA_PML_TEG_FREE(requests+i);
MCA_PML_TEG_FINI(requests+i);
return OMPI_SUCCESS;
}
}
@ -40,6 +41,8 @@ int mca_pml_teg_test_all(
{
size_t i;
size_t num_completed;
ompi_atomic_mb();
for(i=0; i<count; i++) {
mca_pml_base_request_t* pml_request = (mca_pml_base_request_t*)requests[i];
if(pml_request == NULL || pml_request->req_mpi_done)
@ -60,16 +63,15 @@ int mca_pml_teg_test_all(
statuses[i] = mca_pml_teg.teg_request_null.req_status;
} else {
statuses[i] = pml_request->req_status;
if(false == pml_request->req_persistent)
MCA_PML_TEG_FREE(requests+i);
MCA_PML_TEG_FINI(requests+i);
}
}
} else {
/* free request if required */
for(i=0; i<count; i++) {
mca_pml_base_request_t* pml_request = (mca_pml_base_request_t*)requests[i];
if(NULL != pml_request && false == pml_request->req_persistent)
MCA_PML_TEG_FREE(requests+i);
if(NULL != pml_request)
MCA_PML_TEG_FINI(requests+i);
}
}
return OMPI_SUCCESS;

Просмотреть файл

@ -22,7 +22,9 @@ int mca_pml_teg_wait(
#endif
#if OMPI_HAVE_THREADS
/* poll for completion */
ompi_atomic_mb();
for(c=0; completed < 0 && c < mca_pml_teg.teg_poll_iterations; c++) {
for(i=0; i<count; i++) {
pml_request = (mca_pml_base_request_t*)request[i];
@ -59,13 +61,13 @@ int mca_pml_teg_wait(
THREAD_UNLOCK(&mca_pml_teg.teg_request_lock);
}
/* return request to pool */
if(false == pml_request->req_persistent) {
MCA_PML_TEG_FREE(request);
}
/* return status */
if (NULL != status) {
*status = pml_request->req_status;
}
/* return request to pool */
MCA_PML_TEG_FINI(request);
*index = completed;
return OMPI_SUCCESS;
}
@ -116,17 +118,15 @@ int mca_pml_teg_wait_all(
statuses[i] = mca_pml_teg.teg_request_null.req_status;
} else {
statuses[i] = pml_request->req_status;
if (false == pml_request->req_persistent) {
MCA_PML_TEG_FREE(&requests[i]);
}
MCA_PML_TEG_FINI(requests+i);
}
}
} else {
/* free request if required */
for(i=0; i<count; i++) {
mca_pml_base_request_t* pml_request = (mca_pml_base_request_t*)requests[i];
if (NULL != pml_request && false == pml_request->req_persistent) {
MCA_PML_TEG_FREE(&requests[i]);
if (NULL != pml_request) {
MCA_PML_TEG_FINI(requests+i);
}
}
}

Просмотреть файл

@ -47,6 +47,7 @@ typedef struct mca_ptl_base_recv_request_t mca_ptl_base_recv_request_t;
comm, \
persistent) \
{ \
OMPI_REQUEST_INIT(&(request)->super.super); \
(request)->req_bytes_packed = 0; \
(request)->req_bytes_received = 0; \
(request)->req_bytes_delivered = 0; \

Просмотреть файл

@ -62,13 +62,13 @@ typedef struct mca_ptl_base_send_request_t mca_ptl_base_send_request_t;
mode,\
persistent) \
{ \
OMPI_REQUEST_INIT(&(request)->super.super); \
request->req_offset = 0; \
request->req_bytes_sent = 0; \
request->req_send_mode = mode; \
request->req_peer_match.lval = 0; \
request->req_peer_addr.lval = 0; \
request->req_peer_size = 0; \
request->super.req_sequence = mca_pml_ptl_comm_send_sequence(comm->c_pml_comm, peer); \
request->super.req_addr = addr; \
request->super.req_count = count; \
request->super.req_datatype = datatype; \

Просмотреть файл

@ -147,8 +147,13 @@ void mca_ptl_self_matched( mca_ptl_t* ptl,
/* Did you have the same datatype or not ? If yes we can use an optimized version
* for the copy function, if not we have to use a temporary buffer to pack/unpack
*
* Note that if this is a buffered send - the data has already been packed into
* a contigous buffer and the convertor on the send request initialized to point
* into this buffer.
*/
if( sendreq->super.super.req_datatype == recvreq->super.req_datatype ) {
if( sendreq->super.super.req_datatype == recvreq->super.req_datatype &&
sendreq->super.req_send_mode != MCA_PML_BASE_SEND_BUFFERED) {
ompi_ddt_copy_content_same_ddt( recvreq->super.req_datatype, recvreq->super.req_count,
recvreq->super.req_addr, sendreq->super.super.req_addr );
} else {

Просмотреть файл

@ -174,7 +174,7 @@ int mca_ptl_tcp_module_close(void)
OBJ_DESTRUCT(&mca_ptl_tcp_module.tcp_send_frags);
OBJ_DESTRUCT(&mca_ptl_tcp_module.tcp_recv_frags);
OBJ_DESTRUCT(&mca_ptl_tcp_module.tcp_lock);
return OMPI_SUCCESS;
return ompi_event_fini();
}

Просмотреть файл

@ -19,7 +19,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Address";
static const char FUNC_NAME[] = "MPI_Address";
int MPI_Address(void *location, MPI_Aint *address)
{

Просмотреть файл

@ -18,7 +18,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Allgather";
static const char FUNC_NAME[] = "MPI_Allgather";
int MPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,

Просмотреть файл

@ -18,7 +18,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Alltoall";
static const char FUNC_NAME[] = "MPI_Alltoall";
int MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype,

Просмотреть файл

@ -17,7 +17,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Attr_delete";
static const char FUNC_NAME[] = "MPI_Attr_delete";
int MPI_Attr_delete(MPI_Comm comm, int keyval)
{

Просмотреть файл

@ -17,7 +17,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Attr_get";
static const char FUNC_NAME[] = "MPI_Attr_get";
int MPI_Attr_get(MPI_Comm comm, int keyval, void *attribute_val, int *flag)
{

Просмотреть файл

@ -17,7 +17,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Attr_put";
static const char FUNC_NAME[] = "MPI_Attr_put";
int MPI_Attr_put(MPI_Comm comm, int keyval, void *attribute_val)
{

Просмотреть файл

@ -18,7 +18,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Barrier";
static const char FUNC_NAME[] = "MPI_Barrier";
int MPI_Barrier(MPI_Comm comm)

Просмотреть файл

@ -17,7 +17,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Bcast";
static const char FUNC_NAME[] = "MPI_Bcast";
int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype,

Просмотреть файл

@ -8,6 +8,7 @@
#include "runtime/runtime.h"
#include "mpi/c/bindings.h"
#include "mca/pml/pml.h"
#include "mca/pml/base/pml_base_bsend.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
@ -18,33 +19,54 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Bsend";
int MPI_Bsend(void *buf, int count, MPI_Datatype type, int dest, int tag, MPI_Comm comm)
{
int rc;
int rc, index;
ompi_request_t* request;
if (dest == MPI_PROC_NULL) {
return MPI_SUCCESS;
}
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
int rc = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (count < 0) {
rc = MPI_ERR_COUNT;
} else if (type == MPI_DATATYPE_NULL) {
rc = MPI_ERR_TYPE;
} else if (tag < 0 || tag > MPI_TAG_UB_VALUE) {
rc = MPI_ERR_TAG;
} else if (ompi_comm_invalid(comm)) {
rc = MPI_ERR_COMM;
} else if (ompi_comm_peer_invalid(comm, dest)) {
rc = MPI_ERR_RANK;
}
OMPI_ERRHANDLER_CHECK(rc, comm, rc, "MPI_Bsend");
OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
}
rc = mca_pml.pml_send(buf, count, type, dest, tag, MCA_PML_BASE_SEND_BUFFERED, comm);
OMPI_ERRHANDLER_RETURN(rc, comm, rc, "MPI_Bsend");
rc = mca_pml.pml_isend_init(buf, count, type, dest, tag, MCA_PML_BASE_SEND_BUFFERED, comm, &request);
if(OMPI_SUCCESS != rc)
goto error_return;
rc = mca_pml_base_bsend_request_init(request, false);
if(OMPI_SUCCESS != rc)
goto error_return;
rc = mca_pml.pml_start(1, &request);
if(OMPI_SUCCESS != rc)
goto error_return;
rc = mca_pml.pml_wait(1, &request, &index, NULL);
if(OMPI_SUCCESS != rc) {
mca_pml.pml_free(&request);
return rc;
}
error_return:
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}

Просмотреть файл

@ -8,6 +8,7 @@
#include "runtime/runtime.h"
#include "mpi/c/bindings.h"
#include "mca/pml/pml.h"
#include "mca/pml/base/pml_base_bsend.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Bsend_init = PMPI_Bsend_init
@ -17,6 +18,9 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Bsend_init";
int MPI_Bsend_init(void *buf, int count, MPI_Datatype type,
int dest, int tag, MPI_Comm comm, MPI_Request *request)
{
@ -27,25 +31,32 @@ int MPI_Bsend_init(void *buf, int count, MPI_Datatype type,
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (count < 0) {
rc = MPI_ERR_COUNT;
} else if (type == MPI_DATATYPE_NULL) {
rc = MPI_ERR_TYPE;
} else if (tag < 0 || tag > MPI_TAG_UB_VALUE) {
rc = MPI_ERR_TAG;
} else if (ompi_comm_invalid(comm)) {
rc = MPI_ERR_COMM;
} else if (ompi_comm_peer_invalid(comm, dest)) {
rc = MPI_ERR_RANK;
} else if (request == NULL) {
rc = MPI_ERR_REQUEST;
}
OMPI_ERRHANDLER_CHECK(rc, comm, rc, "MPI_Bsend_init");
OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
}
rc = mca_pml.pml_isend_init(buf,count,type,dest,tag,MCA_PML_BASE_SEND_BUFFERED,comm,request);
OMPI_ERRHANDLER_RETURN(rc, comm, rc, "MPI_Bsend_init");
if(OMPI_SUCCESS != rc)
goto error_return;
rc = mca_pml_base_bsend_request_init(*request, true);
if(OMPI_SUCCESS != rc)
goto error_return;
error_return:
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}

Просмотреть файл

@ -6,6 +6,10 @@
#include "mpi.h"
#include "mpi/c/bindings.h"
#include "runtime/runtime.h"
#include "mca/pml/pml.h"
#include "mca/pml/base/pml_base_bsend.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Buffer_attach = PMPI_Buffer_attach
@ -16,6 +20,8 @@
#endif
int MPI_Buffer_attach(void *buffer, int size) {
return MPI_SUCCESS;
int MPI_Buffer_attach(void *buffer, int size)
{
return mca_pml_base_bsend_attach(buffer, size);
}

Просмотреть файл

@ -6,6 +6,9 @@
#include "mpi.h"
#include "mpi/c/bindings.h"
#include "runtime/runtime.h"
#include "mca/pml/pml.h"
#include "mca/pml/base/pml_base_bsend.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Buffer_detach = PMPI_Buffer_detach
@ -15,6 +18,7 @@
#include "mpi/c/profile/defines.h"
#endif
int MPI_Buffer_detach(void *buffer, int *size) {
return MPI_SUCCESS;
int MPI_Buffer_detach(void *buffer, int *size)
{
return mca_pml_base_bsend_detach(buffer, size);
}

Просмотреть файл

@ -16,7 +16,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Comm_create_keyval";
static const char FUNC_NAME[] = "MPI_Comm_create_keyval";
int MPI_Comm_create_keyval(MPI_Comm_copy_attr_function *comm_copy_attr_fn,
MPI_Comm_delete_attr_function *comm_delete_attr_fn,

Просмотреть файл

@ -16,7 +16,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Comm_delete_attr";
static const char FUNC_NAME[] = "MPI_Comm_delete_attr";
int MPI_Comm_delete_attr(MPI_Comm comm, int comm_keyval)
{

Просмотреть файл

@ -16,7 +16,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Comm_free_keyval";
static const char FUNC_NAME[] = "MPI_Comm_free_keyval";
int MPI_Comm_free_keyval(int *comm_keyval)
{

Просмотреть файл

@ -16,7 +16,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Comm_get_attr";
static const char FUNC_NAME[] = "MPI_Comm_get_attr";
int MPI_Comm_get_attr(MPI_Comm comm, int comm_keyval,
void *attribute_val, int *flag)

Просмотреть файл

@ -16,7 +16,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Comm_set_attr";
static const char FUNC_NAME[] = "MPI_Comm_set_attr";
int MPI_Comm_set_attr(MPI_Comm comm, int comm_keyval, void *attribute_val)
{

Просмотреть файл

@ -17,7 +17,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Gather";
static const char FUNC_NAME[] = "MPI_Gather";
int MPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,

Просмотреть файл

@ -17,7 +17,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Get_address";
static const char FUNC_NAME[] = "MPI_Get_address";
int MPI_Get_address(void *location, MPI_Aint *address)
{

Просмотреть файл

@ -8,6 +8,7 @@
#include "runtime/runtime.h"
#include "mpi/c/bindings.h"
#include "mca/pml/pml.h"
#include "mca/pml/base/pml_base_bsend.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
@ -18,6 +19,9 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Ibsend";
int MPI_Ibsend(void *buf, int count, MPI_Datatype type, int dest,
int tag, MPI_Comm comm, MPI_Request *request)
{
@ -28,25 +32,36 @@ int MPI_Ibsend(void *buf, int count, MPI_Datatype type, int dest,
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (count < 0) {
rc = MPI_ERR_COUNT;
} else if (type == MPI_DATATYPE_NULL) {
rc = MPI_ERR_TYPE;
} else if (tag < 0 || tag > MPI_TAG_UB_VALUE) {
rc = MPI_ERR_TAG;
} else if (ompi_comm_invalid(comm)) {
rc = MPI_ERR_COMM;
} else if (ompi_comm_peer_invalid(comm, dest)) {
rc = MPI_ERR_RANK;
} else if (request == NULL) {
rc = MPI_ERR_REQUEST;
}
OMPI_ERRHANDLER_CHECK(rc, comm, rc, "MPI_Ibsend");
OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
}
rc = mca_pml.pml_isend(buf,count,type,dest,tag,MCA_PML_BASE_SEND_BUFFERED,comm,request);
OMPI_ERRHANDLER_RETURN(rc, comm, rc, "MPI_Ibsend");
rc = mca_pml.pml_isend_init(buf, count, type, dest, tag, MCA_PML_BASE_SEND_BUFFERED, comm, request);
if(OMPI_SUCCESS != rc)
goto error_return;
rc = mca_pml_base_bsend_request_init(*request, false);
if(OMPI_SUCCESS != rc)
goto error_return;
rc = mca_pml.pml_start(1, request);
if(OMPI_SUCCESS != rc)
goto error_return;
error_return:
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}

Просмотреть файл

@ -18,6 +18,9 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Irecv";
int MPI_Irecv(void *buf, int count, MPI_Datatype type, int source,
int tag, MPI_Comm comm, MPI_Request *request)
{
@ -28,25 +31,24 @@ int MPI_Irecv(void *buf, int count, MPI_Datatype type, int source,
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (count < 0) {
rc = MPI_ERR_COUNT;
} else if (type == MPI_DATATYPE_NULL) {
rc = MPI_ERR_TYPE;
} else if (((tag < 0) && (tag != MPI_ANY_TAG)) || (tag > MPI_TAG_UB_VALUE)) {
rc = MPI_ERR_TAG;
} else if (ompi_comm_invalid(comm)) {
rc = MPI_ERR_COMM;
} else if (source != MPI_ANY_SOURCE &&
source != MPI_PROC_NULL &&
ompi_comm_peer_invalid(comm, source)) {
rc = MPI_ERR_RANK;
}
OMPI_ERRHANDLER_CHECK(rc, comm, rc, "MPI_Irecv");
OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
}
rc = mca_pml.pml_irecv(buf,count,type,source,tag,comm,request);
OMPI_ERRHANDLER_RETURN(rc, comm, rc, "MPI_Irecv");
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}

Просмотреть файл

@ -18,6 +18,9 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Irsend";
int MPI_Irsend(void *buf, int count, MPI_Datatype type, int dest,
int tag, MPI_Comm comm, MPI_Request *request)
{
@ -28,25 +31,24 @@ int MPI_Irsend(void *buf, int count, MPI_Datatype type, int dest,
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (count < 0) {
rc = MPI_ERR_COUNT;
} else if (type == MPI_DATATYPE_NULL) {
rc = MPI_ERR_TYPE;
} else if (tag < 0 || tag > MPI_TAG_UB_VALUE) {
rc = MPI_ERR_TAG;
} else if (ompi_comm_invalid(comm)) {
rc = MPI_ERR_COMM;
} else if (ompi_comm_peer_invalid(comm, dest)) {
rc = MPI_ERR_RANK;
} else if (request == NULL) {
rc = MPI_ERR_REQUEST;
}
OMPI_ERRHANDLER_CHECK(rc, comm, rc, "MPI_Irsend");
OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
}
rc = mca_pml.pml_isend(buf,count,type,dest,tag,MCA_PML_BASE_SEND_READY,comm,request);
OMPI_ERRHANDLER_RETURN(rc, comm, rc, "MPI_Irsend");
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}

Просмотреть файл

@ -18,6 +18,9 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Isend";
int MPI_Isend(void *buf, int count, MPI_Datatype type, int dest,
int tag, MPI_Comm comm, MPI_Request *request)
{
@ -28,25 +31,24 @@ int MPI_Isend(void *buf, int count, MPI_Datatype type, int dest,
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (count < 0) {
rc = MPI_ERR_COUNT;
} else if (type == MPI_DATATYPE_NULL) {
rc = MPI_ERR_TYPE;
} else if (tag < 0 || tag > MPI_TAG_UB_VALUE) {
rc = MPI_ERR_TAG;
} else if (ompi_comm_invalid(comm)) {
rc = MPI_ERR_COMM;
} else if (ompi_comm_peer_invalid(comm, dest)) {
rc = MPI_ERR_RANK;
} else if (request == NULL) {
rc = MPI_ERR_REQUEST;
}
OMPI_ERRHANDLER_CHECK(rc, comm, rc, "MPI_Isend");
OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
}
rc = mca_pml.pml_isend(buf,count,type,dest,tag,MCA_PML_BASE_SEND_STANDARD,comm,request);
OMPI_ERRHANDLER_RETURN(rc, comm, rc, "MPI_Isend");
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}

Просмотреть файл

@ -19,6 +19,9 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Issend";
int MPI_Issend(void *buf, int count, MPI_Datatype type, int dest,
int tag, MPI_Comm comm, MPI_Request *request)
{
@ -29,25 +32,24 @@ int MPI_Issend(void *buf, int count, MPI_Datatype type, int dest,
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (count < 0) {
rc = MPI_ERR_COUNT;
} else if (type == MPI_DATATYPE_NULL) {
rc = MPI_ERR_TYPE;
} else if (tag < 0 || tag > MPI_TAG_UB_VALUE) {
rc = MPI_ERR_TAG;
} else if (ompi_comm_invalid(comm)) {
rc = MPI_ERR_COMM;
} else if (ompi_comm_peer_invalid(comm, dest)) {
rc = MPI_ERR_RANK;
} else if (request == NULL) {
rc = MPI_ERR_REQUEST;
}
OMPI_ERRHANDLER_CHECK(rc, comm, rc, "MPI_Issend");
OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
}
rc = mca_pml.pml_isend(buf,count,type,dest,tag,MCA_PML_BASE_SEND_SYNCHRONOUS,comm,request);
OMPI_ERRHANDLER_RETURN(rc, comm, rc, "MPI_Issend");
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}

Просмотреть файл

@ -16,7 +16,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Keyval_create";
static const char FUNC_NAME[] = "MPI_Keyval_create";
int MPI_Keyval_create(MPI_Copy_function *copy_attr_fn,
MPI_Delete_function *delete_attr_fn,

Просмотреть файл

@ -16,7 +16,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Keyval_free";
static const char FUNC_NAME[] = "MPI_Keyval_free";
int MPI_Keyval_free(int *keyval)
{

Просмотреть файл

@ -22,7 +22,7 @@
/* VPS: Just for now, to be removed later */
extern ompi_convertor_t *ompi_convertor;
static char FUNC_NAME[] = "MPI_Pack";
static const char FUNC_NAME[] = "MPI_Pack";
int MPI_Pack(void *inbuf, int incount, MPI_Datatype datatype,

Просмотреть файл

@ -19,7 +19,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Pack_size";
static const char FUNC_NAME[] = "MPI_Pack_size";
int MPI_Pack_size(int incount, MPI_Datatype datatype, MPI_Comm comm,
int *size)

Просмотреть файл

@ -18,6 +18,9 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Recv";
int MPI_Recv(void *buf, int count, MPI_Datatype type, int source,
int tag, MPI_Comm comm, MPI_Status *status)
{
@ -34,23 +37,22 @@ int MPI_Recv(void *buf, int count, MPI_Datatype type, int source,
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (count < 0) {
rc = MPI_ERR_COUNT;
} else if (type == MPI_DATATYPE_NULL) {
rc = MPI_ERR_TYPE;
} else if (((tag < 0) && (tag != MPI_ANY_TAG)) || (tag > MPI_TAG_UB_VALUE)) {
rc = MPI_ERR_TAG;
} else if (ompi_comm_invalid(comm)) {
rc = MPI_ERR_COMM;
} else if (source != MPI_ANY_SOURCE && ompi_comm_peer_invalid(comm, source)) {
rc = MPI_ERR_RANK;
}
OMPI_ERRHANDLER_CHECK(rc, comm, rc, "MPI_Recv");
OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
}
rc = mca_pml.pml_recv(buf, count, type, source, tag, comm, status);
OMPI_ERRHANDLER_RETURN(rc, comm, rc, "MPI_Recv");
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}

Просмотреть файл

@ -18,6 +18,9 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Recv_init";
int MPI_Recv_init(void *buf, int count, MPI_Datatype type, int source,
int tag, MPI_Comm comm, MPI_Request *request)
{
@ -28,25 +31,24 @@ int MPI_Recv_init(void *buf, int count, MPI_Datatype type, int source,
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (count < 0) {
rc = MPI_ERR_COUNT;
} else if (type == MPI_DATATYPE_NULL) {
rc = MPI_ERR_TYPE;
} else if (((tag < 0) && (tag != MPI_ANY_TAG)) || (tag > MPI_TAG_UB_VALUE)) {
rc = MPI_ERR_TAG;
} else if (ompi_comm_invalid(comm)) {
rc = MPI_ERR_COMM;
} else if (source != MPI_ANY_SOURCE &&
source != MPI_PROC_NULL &&
ompi_comm_peer_invalid(comm, source)) {
rc = MPI_ERR_RANK;
}
OMPI_ERRHANDLER_CHECK(rc, comm, rc, "MPI_Recv_init");
OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
}
rc = mca_pml.pml_irecv_init(buf,count,type,source,tag,comm,request);
OMPI_ERRHANDLER_RETURN(rc, comm, rc, "MPI_Recv_init");
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}

Просмотреть файл

@ -6,6 +6,8 @@
#include "mpi.h"
#include "mpi/c/bindings.h"
#include "errhandler/errhandler.h"
#include "mca/pml/pml.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Request_free = PMPI_Request_free
@ -15,6 +17,22 @@
#include "mpi/c/profile/defines.h"
#endif
int MPI_Request_free(MPI_Request *request) {
return MPI_SUCCESS;
static const char FUNC_NAME[] = "MPI_Request_free";
int MPI_Request_free(MPI_Request *request)
{
int rc;
if( request == NULL ) {
rc = OMPI_ERR_BAD_PARAM;
goto error_return;
}
if( *request == NULL ) {
return MPI_SUCCESS;
}
rc = mca_pml.pml_free(request);
error_return:
OMPI_ERRHANDLER_RETURN(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
}

Просмотреть файл

@ -18,6 +18,9 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Rsend";
int MPI_Rsend(void *buf, int count, MPI_Datatype type, int dest, int tag, MPI_Comm comm)
{
int rc;
@ -27,23 +30,22 @@ int MPI_Rsend(void *buf, int count, MPI_Datatype type, int dest, int tag, MPI_Co
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (count < 0) {
rc = MPI_ERR_COUNT;
} else if (type == MPI_DATATYPE_NULL) {
rc = MPI_ERR_TYPE;
} else if (tag < 0 || tag > MPI_TAG_UB_VALUE) {
rc = MPI_ERR_TAG;
} else if (ompi_comm_invalid(comm)) {
rc = MPI_ERR_COMM;
} else if (ompi_comm_peer_invalid(comm, dest)) {
rc = MPI_ERR_RANK;
}
OMPI_ERRHANDLER_CHECK(rc, comm, rc, "MPI_Rsend");
OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
}
rc = mca_pml.pml_send(buf, count, type, dest, tag, MCA_PML_BASE_SEND_READY, comm);
OMPI_ERRHANDLER_RETURN(rc, comm, rc, "MPI_Rsend");
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}

Просмотреть файл

@ -18,6 +18,9 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Rsend_init";
int MPI_Rsend_init(void *buf, int count, MPI_Datatype type,
int dest, int tag, MPI_Comm comm,
MPI_Request *request)
@ -29,25 +32,24 @@ int MPI_Rsend_init(void *buf, int count, MPI_Datatype type,
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (count < 0) {
rc = MPI_ERR_COUNT;
} else if (type == MPI_DATATYPE_NULL) {
rc = MPI_ERR_TYPE;
} else if (tag < 0 || tag > MPI_TAG_UB_VALUE) {
rc = MPI_ERR_TAG;
} else if (ompi_comm_invalid(comm)) {
rc = MPI_ERR_COMM;
} else if (ompi_comm_peer_invalid(comm, dest)) {
rc = MPI_ERR_RANK;
} else if (request == NULL) {
rc = MPI_ERR_REQUEST;
}
OMPI_ERRHANDLER_CHECK(rc, comm, rc, "MPI_Rsend_init");
OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
}
rc = mca_pml.pml_isend_init(buf,count,type,dest,tag,MCA_PML_BASE_SEND_READY,comm,request);
OMPI_ERRHANDLER_RETURN(rc, comm, rc, "MPI_Rsend_init");
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}

Просмотреть файл

@ -18,7 +18,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Send";
static const char FUNC_NAME[] = "MPI_Send";
int MPI_Send(void *buf, int count, MPI_Datatype type, int dest,
@ -33,8 +33,7 @@ int MPI_Send(void *buf, int count, MPI_Datatype type, int dest,
int rc = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (count < 0) {
rc = MPI_ERR_COUNT;
} else if (type == MPI_DATATYPE_NULL) {

Просмотреть файл

@ -18,6 +18,9 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Send_init";
int MPI_Send_init(void *buf, int count, MPI_Datatype type,
int dest, int tag, MPI_Comm comm,
MPI_Request *request)
@ -29,25 +32,24 @@ int MPI_Send_init(void *buf, int count, MPI_Datatype type,
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (count < 0) {
rc = MPI_ERR_COUNT;
} else if (type == MPI_DATATYPE_NULL) {
rc = MPI_ERR_TYPE;
} else if (tag < 0 || tag > MPI_TAG_UB_VALUE) {
rc = MPI_ERR_TAG;
} else if (ompi_comm_invalid(comm)) {
rc = MPI_ERR_COMM;
} else if (ompi_comm_peer_invalid(comm, dest)) {
rc = MPI_ERR_RANK;
} else if (request == NULL) {
rc = MPI_ERR_REQUEST;
}
OMPI_ERRHANDLER_CHECK(rc, comm, rc, "MPI_Send_init");
OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
}
rc = mca_pml.pml_isend_init(buf,count,type,dest,tag,MCA_PML_BASE_SEND_STANDARD,comm,request);
OMPI_ERRHANDLER_RETURN(rc, comm, rc, "MPI_Send_init");
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}

Просмотреть файл

@ -18,6 +18,9 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Sendrecv";
int MPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype recvtype,
int dest, int sendtag, void *recvbuf, int recvcount,
MPI_Datatype sendtype, int source, int recvtag,
@ -28,10 +31,9 @@ int MPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype recvtype,
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
} else if (ompi_comm_invalid(comm)) {
rc = MPI_ERR_COMM;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (sendcount < 0) {
rc = MPI_ERR_COUNT;
} else if (sendtype == MPI_DATATYPE_NULL) {
@ -49,19 +51,19 @@ int MPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype recvtype,
} else if (((recvtag < 0) && (recvtag != MPI_ANY_TAG)) || (recvtag > MPI_TAG_UB_VALUE)) {
rc = MPI_ERR_TAG;
}
OMPI_ERRHANDLER_CHECK(rc, comm, rc, "MPI_Sendrecv");
OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
}
if (source != MPI_PROC_NULL) { /* post recv */
rc = mca_pml.pml_irecv(recvbuf, recvcount, recvtype,
source, recvtag, comm, &req);
OMPI_ERRHANDLER_CHECK(rc, comm, rc, "MPI_Sendrecv");
OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
}
if (dest != MPI_PROC_NULL) { /* send */
rc = mca_pml.pml_send(sendbuf, sendcount, sendtype, dest,
sendtag, MCA_PML_BASE_SEND_STANDARD, comm);
OMPI_ERRHANDLER_CHECK(rc, comm, rc, "MPI_Sendrecv");
OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
}
if (source != MPI_PROC_NULL) { /* wait for recv */
@ -76,5 +78,5 @@ int MPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype recvtype,
status->_count = 0;
rc = MPI_SUCCESS;
}
OMPI_ERRHANDLER_RETURN(rc, comm, rc, "MPI_Sendrecv");
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}

Просмотреть файл

@ -18,6 +18,9 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Ssend";
int MPI_Ssend(void *buf, int count, MPI_Datatype type, int dest, int tag, MPI_Comm comm)
{
int rc;
@ -27,23 +30,22 @@ int MPI_Ssend(void *buf, int count, MPI_Datatype type, int dest, int tag, MPI_Co
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (count < 0) {
rc = MPI_ERR_COUNT;
} else if (type == MPI_DATATYPE_NULL) {
rc = MPI_ERR_TYPE;
} else if (tag < 0 || tag > MPI_TAG_UB_VALUE) {
rc = MPI_ERR_TAG;
} else if (ompi_comm_invalid(comm)) {
rc = MPI_ERR_COMM;
} else if (ompi_comm_peer_invalid(comm, dest)) {
rc = MPI_ERR_RANK;
}
OMPI_ERRHANDLER_CHECK(rc, comm, rc, "MPI_Ssend");
OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
}
rc = mca_pml.pml_send(buf, count, type, dest, tag, MCA_PML_BASE_SEND_SYNCHRONOUS, comm);
OMPI_ERRHANDLER_RETURN(rc, comm, rc, "MPI_Ssend");
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}

Просмотреть файл

@ -18,6 +18,9 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Ssend_init";
int MPI_Ssend_init(void *buf, int count, MPI_Datatype type,
int dest, int tag, MPI_Comm comm,
MPI_Request *request)
@ -29,25 +32,24 @@ int MPI_Ssend_init(void *buf, int count, MPI_Datatype type,
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (count < 0) {
rc = MPI_ERR_COUNT;
} else if (type == MPI_DATATYPE_NULL) {
rc = MPI_ERR_TYPE;
} else if (tag < 0 || tag > MPI_TAG_UB_VALUE) {
rc = MPI_ERR_TAG;
} else if (ompi_comm_invalid(comm)) {
rc = MPI_ERR_COMM;
} else if (ompi_comm_peer_invalid(comm, dest)) {
rc = MPI_ERR_RANK;
} else if (request == NULL) {
rc = MPI_ERR_REQUEST;
}
OMPI_ERRHANDLER_CHECK(rc, comm, rc, "MPI_Ssend_init");
OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
}
rc = mca_pml.pml_isend_init(buf,count,type,dest,tag,MCA_PML_BASE_SEND_SYNCHRONOUS,comm,request);
OMPI_ERRHANDLER_RETURN(rc, comm, rc, "MPI_Ssend_init");
OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
}

Просмотреть файл

@ -18,22 +18,21 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Test";
int MPI_Test(MPI_Request *request, int *completed, MPI_Status *status)
{
int rc, index;
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
} else if (request == NULL) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (request == NULL) {
rc = MPI_ERR_REQUEST;
} else if (completed == NULL) {
rc = MPI_ERR_ARG;
}
/* JMS: Tim will fix to invoke on the communicator/window/file
on the request (i.e., not COMM_WORLD), if the request is
available/valid */
OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, "MPI_Test");
OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
}
if(*request == NULL) {
@ -50,6 +49,6 @@ int MPI_Test(MPI_Request *request, int *completed, MPI_Status *status)
}
/* JMS: Tim will fix to invoke on the communicator/window/file on
the request (i.e., not COMM_WORLD) */
OMPI_ERRHANDLER_RETURN(rc, MPI_COMM_WORLD, rc, "MPI_Test");
OMPI_ERRHANDLER_RETURN(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
}

Просмотреть файл

@ -16,5 +16,6 @@
#endif
int MPI_Test_cancelled(MPI_Status *status, int *flag) {
*flag = 0;
return MPI_SUCCESS;
}

Просмотреть файл

@ -17,22 +17,24 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Testany";
int MPI_Testany(int count, MPI_Request requests[], int *index, int *completed, MPI_Status *status)
{
int rc;
if ( MPI_PARAM_CHECK ) {
int rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
} else if (NULL == requests) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (NULL == requests) {
rc = MPI_ERR_REQUEST;
} else if (NULL == index) {
rc = MPI_ERR_ARG;
}
OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, "MPI_Testany");
OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
}
rc = mca_pml.pml_test(count, requests, index, completed, status);
OMPI_ERRHANDLER_RETURN(rc, MPI_COMM_WORLD, rc, "MPI_Testany");
OMPI_ERRHANDLER_RETURN(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
}

Просмотреть файл

@ -17,6 +17,9 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Testsome";
int MPI_Testsome(int incount, MPI_Request requests[],
int *outcount, int indices[],
MPI_Status statuses[])
@ -24,19 +27,18 @@ int MPI_Testsome(int incount, MPI_Request requests[],
int rc, index, completed;
if ( MPI_PARAM_CHECK ) {
int rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
} else if (NULL == requests) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (NULL == requests) {
rc = MPI_ERR_REQUEST;
} else if (NULL == indices) {
rc = MPI_ERR_ARG;
}
OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, "MPI_Testsome");
OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
}
/* optimize this in the future */
rc = mca_pml.pml_test(incount, requests, &index, &completed, statuses);
OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, "MPI_Testsome");
OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
if(completed) {
*outcount = 1;
indices[0] = index;

Просмотреть файл

@ -19,7 +19,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_commit";
static const char FUNC_NAME[] = "MPI_Type_commit";
int
MPI_Type_commit(MPI_Datatype *type)

Просмотреть файл

@ -19,7 +19,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_contiguous";
static const char FUNC_NAME[] = "MPI_Type_contiguous";
int
MPI_Type_contiguous(int count,

Просмотреть файл

@ -19,7 +19,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_create_hindexed";
static const char FUNC_NAME[] = "MPI_Type_create_hindexed";
int
MPI_Type_create_hindexed(int count,

Просмотреть файл

@ -19,7 +19,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_create_hvector";
static const char FUNC_NAME[] = "MPI_Type_create_hvector";
int
MPI_Type_create_hvector(int count,

Просмотреть файл

@ -19,7 +19,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_create_indexed_block";
static const char FUNC_NAME[] = "MPI_Type_create_indexed_block";
int
MPI_Type_create_indexed_block(int count,

Просмотреть файл

@ -16,7 +16,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_create_keyval";
static const char FUNC_NAME[] = "MPI_Type_create_keyval";
int
MPI_Type_create_keyval(MPI_Type_copy_attr_function *type_copy_attr_fn,

Просмотреть файл

@ -19,7 +19,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_create_resized";
static const char FUNC_NAME[] = "MPI_Type_create_resized";
int
MPI_Type_create_resized(MPI_Datatype oldtype,

Просмотреть файл

@ -19,7 +19,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_create_struct";
static const char FUNC_NAME[] = "MPI_Type_create_struct";
int
MPI_Type_create_struct(int count,

Просмотреть файл

@ -16,7 +16,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_delete_attr";
static const char FUNC_NAME[] = "MPI_Type_delete_attr";
int
MPI_Type_delete_attr (MPI_Datatype type, int type_keyval)

Просмотреть файл

@ -19,7 +19,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_dup";
static const char FUNC_NAME[] = "MPI_Type_dup";
int
MPI_Type_dup (MPI_Datatype type,

Просмотреть файл

@ -19,7 +19,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_free";
static const char FUNC_NAME[] = "MPI_Type_free";
int
MPI_Type_free(MPI_Datatype *type)

Просмотреть файл

@ -16,7 +16,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_free_keyval";
static const char FUNC_NAME[] = "MPI_Type_free_keyval";
int
MPI_Type_free_keyval(int *type_keyval)

Просмотреть файл

@ -16,7 +16,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_get_attr";
static const char FUNC_NAME[] = "MPI_Type_get_attr";
int
MPI_Type_get_attr (MPI_Datatype type,

Просмотреть файл

@ -19,7 +19,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_get_contents";
static const char FUNC_NAME[] = "MPI_Type_get_contents";
int
MPI_Type_get_contents(MPI_Datatype mtype,

Просмотреть файл

@ -19,7 +19,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_get_envelope";
static const char FUNC_NAME[] = "MPI_Type_get_envelope";
int
MPI_Type_get_envelope(MPI_Datatype type,

Просмотреть файл

@ -19,7 +19,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_get_extent";
static const char FUNC_NAME[] = "MPI_Type_get_extent";
int
MPI_Type_get_extent(MPI_Datatype type, MPI_Aint *lb, MPI_Aint *extent)

Просмотреть файл

@ -19,7 +19,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_get_true_extent";
static const char FUNC_NAME[] = "MPI_Type_get_true_extent";
int
MPI_Type_get_true_extent(MPI_Datatype datatype,

Просмотреть файл

@ -19,7 +19,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_indexed";
static const char FUNC_NAME[] = "MPI_Type_indexed";
int
MPI_Type_indexed(int count,

Просмотреть файл

@ -19,7 +19,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_match_size";
static const char FUNC_NAME[] = "MPI_Type_match_size";
int
MPI_Type_match_size(int typeclass, int size, MPI_Datatype *type)

Просмотреть файл

@ -16,7 +16,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_set_attr";
static const char FUNC_NAME[] = "MPI_Type_set_attr";
int
MPI_Type_set_attr (MPI_Datatype type,

Просмотреть файл

@ -18,7 +18,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_size";
static const char FUNC_NAME[] = "MPI_Type_size";
int
MPI_Type_size(MPI_Datatype type, int *size)

Просмотреть файл

@ -19,7 +19,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Type_vector";
static const char FUNC_NAME[] = "MPI_Type_vector";
int
MPI_Type_vector(int count,

Просмотреть файл

@ -19,7 +19,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Unpack";
static const char FUNC_NAME[] = "MPI_Unpack";
int MPI_Unpack(void *inbuf, int insize, int *position,

Просмотреть файл

@ -17,20 +17,19 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Wait";
int MPI_Wait(MPI_Request *request, MPI_Status *status)
{
int index, rc;
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
} else if (request == NULL) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (request == NULL) {
rc = MPI_ERR_REQUEST;
}
/* JMS: Tim will fix to invoke on the communicator/window/file
on the request (i.e., not COMM_WORLD), if the request is
available/valid */
OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, "MPI_Wait");
OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
}
if (NULL == *request) {
@ -42,9 +41,8 @@ int MPI_Wait(MPI_Request *request, MPI_Status *status)
}
return MPI_SUCCESS;
}
rc = mca_pml.pml_wait(1, request, &index, status);
/* JMS: Tim will fix to invoke on the communicator/window/file on
the request (i.e., not COMM_WORLD) */
OMPI_ERRHANDLER_RETURN(rc, MPI_COMM_WORLD, rc, "MPI_Wait");
OMPI_ERRHANDLER_RETURN(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
}

Просмотреть файл

@ -17,20 +17,21 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Waitall";
int MPI_Waitall(int count, MPI_Request *requests, MPI_Status *statuses)
{
int rc;
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
} else if (requests == NULL) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (requests == NULL) {
rc = MPI_ERR_REQUEST;
}
OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, "MPI_Waitall");
OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
}
rc = mca_pml.pml_wait_all(count, requests, statuses);
OMPI_ERRHANDLER_RETURN(rc, MPI_COMM_WORLD, rc, "MPI_Waitall");
OMPI_ERRHANDLER_RETURN(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
}

Просмотреть файл

@ -17,19 +17,21 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Waitany";
int MPI_Waitany(int count, MPI_Request *requests, int *index, MPI_Status *status)
{
int rc;
if ( MPI_PARAM_CHECK ) {
rc = MPI_SUCCESS;
if ( OMPI_MPI_INVALID_STATE ) {
rc = MPI_ERR_INTERN;
} else if (requests == NULL) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (requests == NULL) {
rc = MPI_ERR_REQUEST;
}
OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, "MPI_Waitany");
OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
}
rc = mca_pml.pml_wait(count, requests, index, status);
OMPI_ERRHANDLER_RETURN(rc, MPI_COMM_WORLD, rc, "MPI_Waitany");
OMPI_ERRHANDLER_RETURN(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
}

Просмотреть файл

@ -18,6 +18,8 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Waitsome";
int MPI_Waitsome(int incount, MPI_Request *requests,
int *outcount, int *indices,
@ -28,17 +30,16 @@ int MPI_Waitsome(int incount, MPI_Request *requests,
if ( MPI_PARAM_CHECK ) {
int rc = MPI_SUCCESS;
if (ompi_mpi_finalized) {
rc = MPI_ERR_INTERN;
} else if (requests == NULL) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (requests == NULL) {
rc = MPI_ERR_REQUEST;
}
OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, "MPI_Waitsome");
OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
}
/* optimize this in the future */
rc = mca_pml.pml_wait(incount, requests, &index, statuses);
OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, "MPI_Waitsome");
OMPI_ERRHANDLER_CHECK(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
*outcount = 1;
indices[0] = index;
return MPI_SUCCESS;

Просмотреть файл

@ -16,7 +16,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Win_create_keyval";
static const char FUNC_NAME[] = "MPI_Win_create_keyval";
int MPI_Win_create_keyval(MPI_Win_copy_attr_function *win_copy_attr_fn,
MPI_Win_delete_attr_function *win_delete_attr_fn,

Просмотреть файл

@ -16,7 +16,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Win_delete_attr";
static const char FUNC_NAME[] = "MPI_Win_delete_attr";
int MPI_Win_delete_attr(MPI_Win win, int win_keyval)
{

Просмотреть файл

@ -16,7 +16,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Win_free_keyval";
static const char FUNC_NAME[] = "MPI_Win_free_keyval";
int MPI_Win_free_keyval(int *win_keyval) {

Просмотреть файл

@ -16,7 +16,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Win_get_attr";
static const char FUNC_NAME[] = "MPI_Win_get_attr";
int MPI_Win_get_attr(MPI_Win win, int win_keyval,
void *attribute_val, int *flag) {

Просмотреть файл

@ -16,7 +16,7 @@
#include "mpi/c/profile/defines.h"
#endif
static char FUNC_NAME[] = "MPI_Win_set_attr";
static const char FUNC_NAME[] = "MPI_Win_set_attr";
int MPI_Win_set_attr(MPI_Win win, int win_keyval, void *attribute_val) {

Просмотреть файл

@ -15,6 +15,9 @@
#include "mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Win_wait";
int MPI_Win_wait(MPI_Win win) {
return MPI_SUCCESS;
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше