1
1

Add an internal ompi error code for RMA sync errors.

Dave Goodell correctly pointed out that it is unusual to return MPI
error classes from internal ompi functions. Correct this in the RMA
case by adding an internal error code to match MPI_ERR_RMA_SYNC.

This does change OMPI_ERR_MAX. I don't think this will cause any
problems with ABI.

cmr=v1.7.5:reviewer=jsquyres

This commit was SVN r31012.
Этот коммит содержится в:
Nathan Hjelm 2014-03-11 23:45:23 +00:00
родитель dc28015bcb
Коммит d5d2d5c4d8
9 изменённых файлов: 38 добавлений и 27 удалений

Просмотреть файл

@ -45,6 +45,7 @@ static ompi_errcode_intern_t ompi_err_unreach;
static ompi_errcode_intern_t ompi_err_not_found;
static ompi_errcode_intern_t ompi_err_request;
static ompi_errcode_intern_t ompi_err_buffer;
static ompi_errcode_intern_t ompi_err_rma_sync;
static void ompi_errcode_intern_construct(ompi_errcode_intern_t* errcode);
static void ompi_errcode_intern_destruct(ompi_errcode_intern_t* errcode);
@ -192,6 +193,14 @@ int ompi_errcode_intern_init (void)
opal_pointer_array_set_item(&ompi_errcodes_intern, ompi_err_request.index,
&ompi_err_request);
OBJ_CONSTRUCT(&ompi_err_rma_sync, ompi_errcode_intern_t);
ompi_err_request.code = OMPI_ERR_RMA_SYNC;
ompi_err_request.mpi_code = MPI_ERR_RMA_SYNC;
ompi_err_request.index = pos++;
strncpy(ompi_err_request.errstring, "OMPI_ERR_RMA_SYNC", OMPI_MAX_ERROR_STRING);
opal_pointer_array_set_item(&ompi_errcodes_intern, ompi_err_request.index,
&ompi_err_rma_sync);
ompi_errcode_intern_lastused=pos;
return OMPI_SUCCESS;
}
@ -215,6 +224,7 @@ int ompi_errcode_intern_finalize(void)
OBJ_DESTRUCT(&ompi_err_not_found);
OBJ_DESTRUCT(&ompi_err_buffer);
OBJ_DESTRUCT(&ompi_err_request);
OBJ_DESTRUCT(&ompi_err_rma_sync);
OBJ_DESTRUCT(&ompi_errcodes_intern);
return OMPI_SUCCESS;

Просмотреть файл

@ -64,10 +64,11 @@ enum {
OMPI_ERR_BUFFER = OPAL_ERR_BUFFER,
OMPI_ERR_SILENT = OPAL_ERR_SILENT,
OMPI_ERR_REQUEST = OMPI_ERR_BASE - 1
OMPI_ERR_REQUEST = OMPI_ERR_BASE - 1,
OMPI_ERR_RMA_SYNC = OMPI_ERR_BASE - 2
};
#define OMPI_ERR_MAX (OMPI_ERR_BASE - 2)
#define OMPI_ERR_MAX (OMPI_ERR_BASE - 100)
#endif /* OMPI_CONSTANTS_H */

Просмотреть файл

@ -672,7 +672,7 @@ ompi_osc_portals4_free(struct ompi_win_t *win)
if (NULL != module->free_after) free(module->free_after);
if (!opal_list_is_empty(&module->outstanding_locks)) {
ret = MPI_ERR_RMA_SYNC;
ret = OMPI_ERR_RMA_SYNC;
}
OBJ_DESTRUCT(&module->outstanding_locks);

Просмотреть файл

@ -153,7 +153,7 @@ start_exclusive(ompi_osc_portals4_module_t *module,
while (true) {
ret = lk_cas64(module, target, LOCK_EXCLUSIVE, 0, &result);
if (OMPI_SUCCESS != ret) return ret;
if (LOCK_ILLEGAL == (LOCK_ILLEGAL & result)) return MPI_ERR_RMA_SYNC;
if (LOCK_ILLEGAL == (LOCK_ILLEGAL & result)) return OMPI_ERR_RMA_SYNC;
if (0 == result) break;
}
@ -184,7 +184,7 @@ start_shared(ompi_osc_portals4_module_t *module,
if (OMPI_SUCCESS != ret) return ret;
if (result > (int64_t)LOCK_EXCLUSIVE) {
if (LOCK_ILLEGAL == (LOCK_ILLEGAL & result)) return MPI_ERR_RMA_SYNC;
if (LOCK_ILLEGAL == (LOCK_ILLEGAL & result)) return OMPI_ERR_RMA_SYNC;
ret = lk_add64(module, target, -1, &result);
if (OMPI_SUCCESS != ret) return ret;
} else {
@ -264,7 +264,7 @@ ompi_osc_portals4_unlock(int target,
if (NULL != item) {
opal_list_remove_item(&module->outstanding_locks, &lock->super);
} else {
return MPI_ERR_RMA_SYNC;
return OMPI_ERR_RMA_SYNC;
}
ret = ompi_osc_portals4_complete_all(module);
@ -338,7 +338,7 @@ ompi_osc_portals4_unlock_all(struct ompi_win_t *win)
if (NULL != item) {
opal_list_remove_item(&module->outstanding_locks, &lock->super);
} else {
return MPI_ERR_RMA_SYNC;
return OMPI_ERR_RMA_SYNC;
}
ret = ompi_osc_portals4_complete_all(module);

Просмотреть файл

@ -147,7 +147,7 @@ ompi_osc_rdma_start(ompi_group_t *group,
"ompi_osc_rdma_start entering..."));
if (module->sc_group) {
return MPI_ERR_RMA_SYNC;
return OMPI_ERR_RMA_SYNC;
}
/* save the group */
@ -158,7 +158,7 @@ ompi_osc_rdma_start(ompi_group_t *group,
/* ensure we're not already in a start */
if (NULL != module->sc_group) {
ret = MPI_ERR_RMA_SYNC;
ret = OMPI_ERR_RMA_SYNC;
goto cleanup;
}
module->sc_group = group;
@ -211,7 +211,7 @@ ompi_osc_rdma_complete(ompi_win_t *win)
"ompi_osc_rdma_complete entering..."));
if (NULL == module->sc_group) {
return MPI_ERR_RMA_SYNC;
return OMPI_ERR_RMA_SYNC;
}
ranks = get_comm_ranks(module, module->sc_group);
@ -301,7 +301,7 @@ ompi_osc_rdma_post(ompi_group_t *group,
"ompi_osc_rdma_post entering..."));
if (module->pw_group) {
return MPI_ERR_RMA_SYNC;
return OMPI_ERR_RMA_SYNC;
}
/* save the group */
@ -313,7 +313,7 @@ ompi_osc_rdma_post(ompi_group_t *group,
/* ensure we're not already in a post */
if (NULL != module->pw_group) {
OPAL_THREAD_UNLOCK(&(module->lock));
return MPI_ERR_RMA_SYNC;
return OMPI_ERR_RMA_SYNC;
}
module->pw_group = group;
@ -363,7 +363,7 @@ ompi_osc_rdma_wait(ompi_win_t *win)
"ompi_osc_rdma_wait entering..."));
if (NULL == module->pw_group) {
return MPI_ERR_RMA_SYNC;
return OMPI_ERR_RMA_SYNC;
}
OPAL_THREAD_LOCK(&module->lock);
@ -399,7 +399,7 @@ ompi_osc_rdma_test(ompi_win_t *win,
#endif
if (NULL == module->pw_group) {
return MPI_ERR_RMA_SYNC;
return OMPI_ERR_RMA_SYNC;
}
OPAL_THREAD_LOCK(&(module->lock));

Просмотреть файл

@ -73,7 +73,7 @@ static inline int ompi_osc_rdma_put_self (void *source, int source_count, ompi_d
int ret;
if (!(module->passive_target_access_epoch || module->active_eager_send_active)) {
return MPI_ERR_RMA_SYNC;
return OMPI_ERR_RMA_SYNC;
}
ret = ompi_datatype_sndrcv (source, source_count, source_datatype,
@ -98,7 +98,7 @@ static inline int ompi_osc_rdma_get_self (void *target, int target_count, ompi_d
int ret;
if (!(module->passive_target_access_epoch || module->active_eager_send_active)) {
return MPI_ERR_RMA_SYNC;
return OMPI_ERR_RMA_SYNC;
}
ret = ompi_datatype_sndrcv (source, source_count, source_datatype,
@ -121,7 +121,7 @@ static inline int ompi_osc_rdma_cas_self (void *source, void *compare, void *res
((unsigned long) target_disp * module->disp_unit);
if (!(module->passive_target_access_epoch || module->active_eager_send_active)) {
return MPI_ERR_RMA_SYNC;
return OMPI_ERR_RMA_SYNC;
}
ompi_osc_rdma_accumulate_lock (module);
@ -146,7 +146,7 @@ static inline int ompi_osc_rdma_acc_self (void *source, int source_count, ompi_d
int ret;
if (!(module->passive_target_access_epoch || module->active_eager_send_active)) {
return MPI_ERR_RMA_SYNC;
return OMPI_ERR_RMA_SYNC;
}
ompi_osc_rdma_accumulate_lock (module);
@ -182,7 +182,7 @@ static inline int ompi_osc_rdma_gacc_self (void *source, int source_count, ompi_
int ret;
if (!(module->passive_target_access_epoch || module->active_eager_send_active)) {
return MPI_ERR_RMA_SYNC;
return OMPI_ERR_RMA_SYNC;
}
ompi_osc_rdma_accumulate_lock (module);

Просмотреть файл

@ -120,7 +120,7 @@ ompi_osc_rdma_frag_flush_target(ompi_osc_rdma_module_t *module, int target)
if (0 != frag->pending) {
/* communication going on while synchronizing; this is a bug */
return MPI_ERR_RMA_SYNC;
return OMPI_ERR_RMA_SYNC;
}
module->peers[target].active_frag = NULL;
@ -166,7 +166,7 @@ ompi_osc_rdma_frag_flush_all(ompi_osc_rdma_module_t *module)
if (0 != frag->pending) {
/* communication going on while synchronizing; this is a bug */
return MPI_ERR_RMA_SYNC;
return OMPI_ERR_RMA_SYNC;
}
module->peers[i].active_frag = NULL;

Просмотреть файл

@ -253,7 +253,7 @@ int ompi_osc_rdma_unlock(int target, ompi_win_t *win)
"ompi_osc_rdma_unlock: target %d is not locked in window %s",
target, win->w_name));
OPAL_THREAD_LOCK(&module->lock);
return MPI_ERR_RMA_SYNC;
return OMPI_ERR_RMA_SYNC;
}
if (ompi_comm_rank (module->comm) != target) {
@ -380,7 +380,7 @@ int ompi_osc_rdma_unlock_all (struct ompi_win_t *win)
"ompi_osc_rdma_unlock_all: not locked in window %s",
win->w_name));
OPAL_THREAD_LOCK(&module->lock);
return MPI_ERR_RMA_SYNC;
return OMPI_ERR_RMA_SYNC;
}
/* wait for lock acks */
@ -543,7 +543,7 @@ int ompi_osc_rdma_flush (int target, struct ompi_win_t *win)
"ompi_osc_rdma_flush: target %d is not locked in window %s",
target, win->w_name));
OPAL_THREAD_LOCK(&module->lock);
return MPI_ERR_RMA_SYNC;
return OMPI_ERR_RMA_SYNC;
}
ret = ompi_osc_rdma_flush_lock (module, lock, target);
@ -564,7 +564,7 @@ int ompi_osc_rdma_flush_all (struct ompi_win_t *win)
OPAL_OUTPUT_VERBOSE((25, ompi_osc_base_framework.framework_output,
"ompi_osc_rdma_flush_all: no targets are locked in window %s",
win->w_name));
return MPI_ERR_RMA_SYNC;
return OMPI_ERR_RMA_SYNC;
}
OPAL_THREAD_LOCK(&module->lock);

Просмотреть файл

@ -119,7 +119,7 @@ ompi_osc_sm_lock(int lock_type,
int ret;
if (lock_none != module->outstanding_locks[target]) {
return MPI_ERR_RMA_SYNC;
return OMPI_ERR_RMA_SYNC;
}
if (0 == (assert & MPI_MODE_NOCHECK)) {
@ -159,7 +159,7 @@ ompi_osc_sm_unlock(int target,
ret = end_shared(module, target);
module->outstanding_locks[target] = lock_none;
} else {
ret = MPI_ERR_RMA_SYNC;
ret = OMPI_ERR_RMA_SYNC;
}
return ret;