1
1

Each level (OPAL/ORTE/OMPI) should only return it's own constants,

instead of the current mismatch.

This commit was SVN r25230.
Этот коммит содержится в:
George Bosilca 2011-10-04 14:50:31 +00:00
родитель c6d6c9aece
Коммит 80c02647c8
30 изменённых файлов: 226 добавлений и 231 удалений

Просмотреть файл

@ -2,7 +2,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2008 The University of Tennessee and The University
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -1041,7 +1041,7 @@ ompi_proc_t **ompi_comm_get_rprocs ( ompi_communicator_t *local_comm,
ompi_communicator_t *bridge_comm,
int local_leader,
int remote_leader,
orte_rml_tag_t tag,
int tag,
int rsize)
{
@ -1063,7 +1063,7 @@ ompi_proc_t **ompi_comm_get_rprocs ( ompi_communicator_t *local_comm,
if (local_rank == local_leader) {
sbuf = OBJ_NEW(opal_buffer_t);
if (NULL == sbuf) {
rc = ORTE_ERROR;
rc = OMPI_ERROR;
goto err_exit;
}
if(OMPI_GROUP_IS_DENSE(local_comm->c_local_group)) {
@ -1081,7 +1081,7 @@ ompi_proc_t **ompi_comm_get_rprocs ( ompi_communicator_t *local_comm,
if ( OMPI_SUCCESS != rc ) {
goto err_exit;
}
if (ORTE_SUCCESS != (rc = opal_dss.unload(sbuf, &sendbuf, &size_len))) {
if (OPAL_SUCCESS != (rc = opal_dss.unload(sbuf, &sendbuf, &size_len))) {
goto err_exit;
}
@ -1149,7 +1149,7 @@ ompi_proc_t **ompi_comm_get_rprocs ( ompi_communicator_t *local_comm,
rbuf = OBJ_NEW(opal_buffer_t);
if (NULL == rbuf) {
rc = ORTE_ERROR;
rc = OMPI_ERROR;
goto err_exit;
}
@ -1166,7 +1166,7 @@ ompi_proc_t **ompi_comm_get_rprocs ( ompi_communicator_t *local_comm,
err_exit:
/* rprocs isn't freed unless we have an error,
since it is used in the communicator */
if ( OMPI_SUCCESS !=rc ) {
if ( OMPI_SUCCESS != rc ) {
opal_output(0, "%d: Error in ompi_get_rprocs\n", local_rank);
if ( NULL != rprocs ) {
free ( rprocs );

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2008 The University of Tennessee and The University
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
@ -802,7 +802,7 @@ static int ompi_comm_allreduce_intra_oob (int *inbuf, int *outbuf,
sbuf = OBJ_NEW(opal_buffer_t);
rbuf = OBJ_NEW(opal_buffer_t);
if (ORTE_SUCCESS != (rc = opal_dss.pack(sbuf, tmpbuf, (orte_std_cntr_t)count, OPAL_INT))) {
if (OPAL_SUCCESS != (rc = opal_dss.pack(sbuf, tmpbuf, (orte_std_cntr_t)count, OPAL_INT))) {
goto exit;
}
@ -823,7 +823,7 @@ static int ompi_comm_allreduce_intra_oob (int *inbuf, int *outbuf,
}
}
if (ORTE_SUCCESS != (rc = opal_dss.unpack(rbuf, outbuf, &size_count, OPAL_INT))) {
if (OPAL_SUCCESS != (rc = opal_dss.unpack(rbuf, outbuf, &size_count, OPAL_INT))) {
goto exit;
}
OBJ_RELEASE(sbuf);

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2008 The University of Tennessee and The University
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -31,7 +31,6 @@
#include "mpi.h"
#include "ompi/group/group.h"
#include "ompi/mca/coll/coll.h"
#include "orte/mca/rml/rml_types.h"
#include "ompi/proc/proc.h"
BEGIN_C_DECLS
@ -470,7 +469,7 @@ struct ompi_proc_t **ompi_comm_get_rprocs ( ompi_communicator_t *local_comm,
ompi_communicator_t *bridge_comm,
int local_leader,
int remote_leader,
orte_rml_tag_t tag,
int tag,
int rsize);
/**

Просмотреть файл

@ -2,7 +2,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -27,50 +27,50 @@
enum {
/* Error codes inherited from ORTE/OPAL. Still enum values so
that we might get nice debugger help */
OMPI_SUCCESS = ORTE_SUCCESS,
OMPI_SUCCESS = OPAL_SUCCESS,
OMPI_ERROR = ORTE_ERROR,
OMPI_ERR_OUT_OF_RESOURCE = ORTE_ERR_OUT_OF_RESOURCE,
OMPI_ERR_TEMP_OUT_OF_RESOURCE = ORTE_ERR_TEMP_OUT_OF_RESOURCE,
OMPI_ERR_RESOURCE_BUSY = ORTE_ERR_RESOURCE_BUSY,
OMPI_ERR_BAD_PARAM = ORTE_ERR_BAD_PARAM,
OMPI_ERR_FATAL = ORTE_ERR_FATAL,
OMPI_ERR_NOT_IMPLEMENTED = ORTE_ERR_NOT_IMPLEMENTED,
OMPI_ERR_NOT_SUPPORTED = ORTE_ERR_NOT_SUPPORTED,
OMPI_ERR_INTERUPTED = ORTE_ERR_INTERUPTED,
OMPI_ERR_WOULD_BLOCK = ORTE_ERR_WOULD_BLOCK,
OMPI_ERR_IN_ERRNO = ORTE_ERR_IN_ERRNO,
OMPI_ERR_UNREACH = ORTE_ERR_UNREACH,
OMPI_ERR_NOT_FOUND = ORTE_ERR_NOT_FOUND,
OMPI_EXISTS = ORTE_EXISTS, /* indicates that the specified object already exists */
OMPI_ERR_TIMEOUT = ORTE_ERR_TIMEOUT,
OMPI_ERR_NOT_AVAILABLE = ORTE_ERR_NOT_AVAILABLE,
OMPI_ERR_PERM = ORTE_ERR_PERM,
OMPI_ERR_VALUE_OUT_OF_BOUNDS = ORTE_ERR_VALUE_OUT_OF_BOUNDS,
OMPI_ERR_FILE_READ_FAILURE = ORTE_ERR_FILE_READ_FAILURE,
OMPI_ERR_FILE_WRITE_FAILURE = ORTE_ERR_FILE_WRITE_FAILURE,
OMPI_ERR_FILE_OPEN_FAILURE = ORTE_ERR_FILE_OPEN_FAILURE,
OMPI_ERROR = OPAL_ERROR,
OMPI_ERR_OUT_OF_RESOURCE = OPAL_ERR_OUT_OF_RESOURCE,
OMPI_ERR_TEMP_OUT_OF_RESOURCE = OPAL_ERR_TEMP_OUT_OF_RESOURCE,
OMPI_ERR_RESOURCE_BUSY = OPAL_ERR_RESOURCE_BUSY,
OMPI_ERR_BAD_PARAM = OPAL_ERR_BAD_PARAM,
OMPI_ERR_FATAL = OPAL_ERR_FATAL,
OMPI_ERR_NOT_IMPLEMENTED = OPAL_ERR_NOT_IMPLEMENTED,
OMPI_ERR_NOT_SUPPORTED = OPAL_ERR_NOT_SUPPORTED,
OMPI_ERR_INTERUPTED = OPAL_ERR_INTERUPTED,
OMPI_ERR_WOULD_BLOCK = OPAL_ERR_WOULD_BLOCK,
OMPI_ERR_IN_ERRNO = OPAL_ERR_IN_ERRNO,
OMPI_ERR_UNREACH = OPAL_ERR_UNREACH,
OMPI_ERR_NOT_FOUND = OPAL_ERR_NOT_FOUND,
OMPI_EXISTS = OPAL_EXISTS, /* indicates that the specified object already exists */
OMPI_ERR_TIMEOUT = OPAL_ERR_TIMEOUT,
OMPI_ERR_NOT_AVAILABLE = OPAL_ERR_NOT_AVAILABLE,
OMPI_ERR_PERM = OPAL_ERR_PERM,
OMPI_ERR_VALUE_OUT_OF_BOUNDS = OPAL_ERR_VALUE_OUT_OF_BOUNDS,
OMPI_ERR_FILE_READ_FAILURE = OPAL_ERR_FILE_READ_FAILURE,
OMPI_ERR_FILE_WRITE_FAILURE = OPAL_ERR_FILE_WRITE_FAILURE,
OMPI_ERR_FILE_OPEN_FAILURE = OPAL_ERR_FILE_OPEN_FAILURE,
OMPI_ERR_PACK_MISMATCH = OPAL_ERR_PACK_MISMATCH,
OMPI_ERR_PACK_FAILURE = OPAL_ERR_PACK_FAILURE,
OMPI_ERR_UNPACK_FAILURE = OPAL_ERR_UNPACK_FAILURE,
OMPI_ERR_UNPACK_INADEQUATE_SPACE = ORTE_ERR_UNPACK_INADEQUATE_SPACE,
OMPI_ERR_UNPACK_READ_PAST_END_OF_BUFFER = ORTE_ERR_UNPACK_READ_PAST_END_OF_BUFFER,
OMPI_ERR_TYPE_MISMATCH = OPAL_ERR_TYPE_MISMATCH,
OMPI_ERR_UNKNOWN_DATA_TYPE = OPAL_ERR_UNKNOWN_DATA_TYPE,
OMPI_ERR_DATA_TYPE_REDEF = OPAL_ERR_DATA_TYPE_REDEF,
OMPI_ERR_DATA_OVERWRITE_ATTEMPT = OPAL_ERR_DATA_OVERWRITE_ATTEMPT,
OMPI_ERR_RECV_LESS_THAN_POSTED = ORTE_ERR_RECV_LESS_THAN_POSTED,
OMPI_ERR_RECV_MORE_THAN_POSTED = ORTE_ERR_RECV_MORE_THAN_POSTED,
OMPI_ERR_NO_MATCH_YET = ORTE_ERR_NO_MATCH_YET,
OMPI_ERR_BUFFER = ORTE_ERR_BUFFER,
OMPI_ERR_REQUEST = ORTE_ERR_REQUEST,
OMPI_ERR_NO_CONNECTION_ALLOWED = ORTE_ERR_NO_CONNECTION_ALLOWED,
OMPI_ERR_CONNECTION_REFUSED = ORTE_ERR_CONNECTION_REFUSED ,
OMPI_ERR_CONNECTION_FAILED = ORTE_ERR_CONNECTION_FAILED,
OMPI_PACK_MISMATCH = ORTE_ERR_PACK_MISMATCH,
OMPI_ERR_PACK_FAILURE = ORTE_ERR_PACK_FAILURE,
OMPI_ERR_UNPACK_FAILURE = ORTE_ERR_UNPACK_FAILURE,
OMPI_ERR_COMM_FAILURE = ORTE_ERR_COMM_FAILURE,
OMPI_UNPACK_INADEQUATE_SPACE = ORTE_ERR_UNPACK_INADEQUATE_SPACE,
OMPI_UNPACK_READ_PAST_END_OF_BUFFER = ORTE_ERR_UNPACK_READ_PAST_END_OF_BUFFER,
OMPI_ERR_TYPE_MISMATCH = ORTE_ERR_TYPE_MISMATCH,
OMPI_ERR_COMPARE_FAILURE = ORTE_ERR_COMPARE_FAILURE,
OMPI_ERR_COPY_FAILURE = ORTE_ERR_COPY_FAILURE,
OMPI_ERR_UNKNOWN_DATA_TYPE = ORTE_ERR_UNKNOWN_DATA_TYPE,
OMPI_ERR_DATA_TYPE_REDEF = ORTE_ERR_DATA_TYPE_REDEF,
OMPI_ERR_DATA_OVERWRITE_ATTEMPT = ORTE_ERR_DATA_OVERWRITE_ATTEMPT
OMPI_ERR_RECV_LESS_THAN_POSTED = ORTE_ERR_RECV_LESS_THAN_POSTED,
OMPI_ERR_RECV_MORE_THAN_POSTED = ORTE_ERR_RECV_MORE_THAN_POSTED,
OMPI_ERR_NO_MATCH_YET = ORTE_ERR_NO_MATCH_YET,
OMPI_ERR_BUFFER = OPAL_ERR_BUFFER,
OMPI_ERR_REQUEST = ORTE_ERR_REQUEST,
OMPI_ERR_NO_CONNECTION_ALLOWED = ORTE_ERR_NO_CONNECTION_ALLOWED,
OMPI_ERR_CONNECTION_REFUSED = ORTE_ERR_CONNECTION_REFUSED ,
OMPI_ERR_CONNECTION_FAILED = ORTE_ERR_CONNECTION_FAILED,
OMPI_ERR_COMM_FAILURE = ORTE_ERR_COMM_FAILURE,
OMPI_ERR_COMPARE_FAILURE = ORTE_ERR_COMPARE_FAILURE,
OMPI_ERR_COPY_FAILURE = ORTE_ERR_COPY_FAILURE
};
#define OMPI_ERR_MAX (OMPI_ERR_BASE - 1)

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2009 The University of Tennessee and The University
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -727,7 +727,7 @@ CLEANUP:
static int mca_bml_r2_add_btl(mca_btl_base_module_t* btl)
{
return ORTE_ERR_NOT_IMPLEMENTED;
return OMPI_ERR_NOT_IMPLEMENTED;
}

Просмотреть файл

@ -2,7 +2,7 @@
* Copyright (c) 2004-2010 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2007 The University of Tennessee and The University
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -152,7 +152,7 @@ int mca_bml_r2_ft_event(int state)
* Barrier to make all processes have been successfully restarted before
* we try to remove some restart only files.
*/
if (OMPI_SUCCESS != (ret = orte_grpcomm.barrier())) {
if (ORTE_SUCCESS != (ret = orte_grpcomm.barrier())) {
opal_output(0, "bml:r2: ft_event(Restart): Failed in orte_grpcomm.barrier (%d)", ret);
return ret;
}
@ -225,7 +225,7 @@ int mca_bml_r2_ft_event(int state)
* Barrier to make all processes have been successfully restarted before
* we try to remove some restart only files.
*/
if (OMPI_SUCCESS != (ret = orte_grpcomm.barrier())) {
if (ORTE_SUCCESS != (ret = orte_grpcomm.barrier())) {
opal_output(0, "bml:r2: ft_event(Restart): Failed in orte_grpcomm.barrier (%d)", ret);
return ret;
}

Просмотреть файл

@ -2,7 +2,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -35,52 +35,52 @@ OMPI_DECLSPEC extern int mca_btl_base_verbose;
OMPI_DECLSPEC extern int mca_btl_base_err(const char*, ...) __opal_attribute_format__(__printf__, 1, 2);
OMPI_DECLSPEC extern int mca_btl_base_out(const char*, ...) __opal_attribute_format__(__printf__, 1, 2);
#define BTL_OUTPUT(args) \
do { \
mca_btl_base_out("[%s]%s[%s:%d:%s] ", \
orte_process_info.nodename, \
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
__FILE__, __LINE__, __func__); \
mca_btl_base_out args; \
mca_btl_base_out("\n"); \
#define BTL_OUTPUT(args) \
do { \
mca_btl_base_out("[%s]%s[%s:%d:%s] ", \
orte_process_info.nodename, \
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
__FILE__, __LINE__, __func__); \
mca_btl_base_out args; \
mca_btl_base_out("\n"); \
} while(0);
#define BTL_ERROR(args) \
do { \
mca_btl_base_err("[%s]%s[%s:%d:%s] ", \
orte_process_info.nodename, \
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
__FILE__, __LINE__, __func__); \
mca_btl_base_err args; \
mca_btl_base_err("\n"); \
#define BTL_ERROR(args) \
do { \
mca_btl_base_err("[%s]%s[%s:%d:%s] ", \
orte_process_info.nodename, \
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
__FILE__, __LINE__, __func__); \
mca_btl_base_err args; \
mca_btl_base_err("\n"); \
} while(0);
#define BTL_PEER_ERROR(proc, args) \
do { \
mca_btl_base_err("%s[%s:%d:%s] from %s ", \
#define BTL_PEER_ERROR(proc, args) \
do { \
mca_btl_base_err("%s[%s:%d:%s] from %s ", \
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
__FILE__, __LINE__, __func__, \
orte_process_info.nodename); \
if(proc && proc->proc_hostname) { \
mca_btl_base_err("to: %s ", proc->proc_hostname); \
} \
mca_btl_base_err args; \
mca_btl_base_err("\n"); \
__FILE__, __LINE__, __func__, \
orte_process_info.nodename); \
if(proc && proc->proc_hostname) { \
mca_btl_base_err("to: %s ", proc->proc_hostname); \
} \
mca_btl_base_err args; \
mca_btl_base_err("\n"); \
} while(0);
#if OPAL_ENABLE_DEBUG
#define BTL_VERBOSE(args) \
do { \
if(mca_btl_base_verbose > 0) { \
mca_btl_base_err("[%s]%s[%s:%d:%s] ", \
orte_process_info.nodename, \
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
__FILE__, __LINE__, __func__); \
mca_btl_base_err args; \
mca_btl_base_err("\n"); \
} \
#define BTL_VERBOSE(args) \
do { \
if(mca_btl_base_verbose > 0) { \
mca_btl_base_err("[%s]%s[%s:%d:%s] ", \
orte_process_info.nodename, \
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \
__FILE__, __LINE__, __func__); \
mca_btl_base_err args; \
mca_btl_base_err("\n"); \
} \
} while(0);
#else
#define BTL_VERBOSE(args)

Просмотреть файл

@ -286,8 +286,7 @@ static int set_remote_info(mca_btl_base_endpoint_t* endpoint,
mca_btl_openib_component.num_qps);
BTL_VERBOSE(("Setting QP info, LID = %d", endpoint->rem_info.rem_lid));
return ORTE_SUCCESS;
return OMPI_SUCCESS;
}
@ -547,20 +546,20 @@ static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
if (NULL == buffer) {
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
return ORTE_ERR_OUT_OF_RESOURCE;
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* pack the info in the send buffer */
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT8));
rc = opal_dss.pack(buffer, &message_type, 1, OPAL_UINT8);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT64));
rc = opal_dss.pack(buffer, &endpoint->subnet_id, 1, OPAL_UINT64);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
@ -571,13 +570,13 @@ static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
rc = opal_dss.pack(buffer,
&endpoint->rem_info.rem_qps[0].rem_qp_num, 1,
OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT16));
rc = opal_dss.pack(buffer, &endpoint->rem_info.rem_lid, 1, OPAL_UINT16);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
@ -590,14 +589,14 @@ static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &endpoint->qps[qp].qp->lcl_qp->qp_num,
1, OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &endpoint->qps[qp].qp->lcl_psn, 1,
OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
@ -605,20 +604,20 @@ static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT16));
rc = opal_dss.pack(buffer, &endpoint->endpoint_btl->lid, 1, OPAL_UINT16);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &endpoint->endpoint_btl->device->mtu, 1,
OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &endpoint->index, 1, OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
@ -773,8 +772,8 @@ static void rml_recv_cb(int status, orte_process_name_t* process_name,
ib_proc = (mca_btl_openib_proc_t*)opal_list_get_next(ib_proc)) {
bool found = false;
if (orte_util_compare_name_fields(ORTE_NS_CMP_ALL,
&ib_proc->proc_ompi->proc_name, process_name) != OPAL_EQUAL) {
if (OPAL_EQUAL != orte_util_compare_name_fields(ORTE_NS_CMP_ALL,
&ib_proc->proc_ompi->proc_name, process_name)) {
continue;
}

Просмотреть файл

@ -109,7 +109,7 @@ static int xoob_receive_connect_data(mca_btl_openib_rem_info_t *info, uint16_t *
/* Recv standart header */
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT8));
rc = opal_dss.unpack(buffer, message_type, &cnt, OPAL_UINT8);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return OMPI_ERROR;
}
@ -117,7 +117,7 @@ static int xoob_receive_connect_data(mca_btl_openib_rem_info_t *info, uint16_t *
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT64));
rc = opal_dss.unpack(buffer, &info->rem_subnet_id, &cnt, OPAL_UINT64);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return OMPI_ERROR;
}
@ -125,7 +125,7 @@ static int xoob_receive_connect_data(mca_btl_openib_rem_info_t *info, uint16_t *
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT16));
rc = opal_dss.unpack(buffer, &info->rem_lid, &cnt, OPAL_UINT16);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return OMPI_ERROR;
}
@ -139,7 +139,7 @@ static int xoob_receive_connect_data(mca_btl_openib_rem_info_t *info, uint16_t *
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32));
rc = opal_dss.unpack(buffer, &info->rem_qps->rem_qp_num, &cnt,
OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return OMPI_ERROR;
}
@ -148,7 +148,7 @@ static int xoob_receive_connect_data(mca_btl_openib_rem_info_t *info, uint16_t *
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32));
rc = opal_dss.unpack(buffer, &info->rem_qps->rem_psn, &cnt,
OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return OMPI_ERROR;
}
@ -156,7 +156,7 @@ static int xoob_receive_connect_data(mca_btl_openib_rem_info_t *info, uint16_t *
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32));
rc = opal_dss.unpack(buffer, &info->rem_mtu, &cnt, OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return OMPI_ERROR;
}
@ -168,7 +168,7 @@ static int xoob_receive_connect_data(mca_btl_openib_rem_info_t *info, uint16_t *
/* unpack requested lid info */
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT16));
rc = opal_dss.unpack(buffer, lid, &cnt, OPAL_UINT16);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return OMPI_ERROR;
}
@ -181,7 +181,7 @@ static int xoob_receive_connect_data(mca_btl_openib_rem_info_t *info, uint16_t *
/* In XRC request case we will use rem_qp_num as container for requested qp number */
rc = opal_dss.unpack(buffer, &info->rem_qps->rem_qp_num, &cnt,
OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
@ -192,7 +192,7 @@ static int xoob_receive_connect_data(mca_btl_openib_rem_info_t *info, uint16_t *
ENDPOINT_XOOB_CONNECT_XRC_RESPONSE == *message_type) {
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32));
rc = opal_dss.unpack(buffer, &info->rem_index, &cnt, OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return OMPI_ERROR;
}
@ -201,7 +201,7 @@ static int xoob_receive_connect_data(mca_btl_openib_rem_info_t *info, uint16_t *
for (srq = 0; srq < mca_btl_openib_component.num_xrc_qps; srq++) {
BTL_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32));
rc = opal_dss.unpack(buffer, &info->rem_srqs[srq].rem_srq_num, &cnt, OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return OMPI_ERROR;
}
@ -222,7 +222,7 @@ static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
if (NULL == buffer) {
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
return ORTE_ERR_OUT_OF_RESOURCE;
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* Bulding standart header that we use in all messages:
@ -234,7 +234,7 @@ static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("Send pack Message type = %d", message_type));
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT8));
rc = opal_dss.pack(buffer, &message_type, 1, OPAL_UINT8);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
@ -242,7 +242,7 @@ static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("Send pack sid = %" PRIx64 "\n", endpoint->subnet_id));
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT64));
rc = opal_dss.pack(buffer, &endpoint->subnet_id, 1, OPAL_UINT64);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
@ -250,7 +250,7 @@ static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("Send pack lid = %d", endpoint->endpoint_btl->lid));
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT16));
rc = opal_dss.pack(buffer, &endpoint->endpoint_btl->lid, 1, OPAL_UINT16);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
@ -277,14 +277,14 @@ static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("Send pack qp num = %x", qp_num));
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &qp_num, 1, OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("Send pack lpsn = %d", psn));
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &psn, 1, OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
@ -293,7 +293,7 @@ static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &endpoint->endpoint_btl->device->mtu, 1,
OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
@ -310,7 +310,7 @@ static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("Send pack remote lid = %d", endpoint->ib_addr->lid));
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT16));
rc = opal_dss.pack(buffer, &endpoint->ib_addr->lid, 1, OPAL_UINT16);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
@ -323,7 +323,7 @@ static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &endpoint->ib_addr->remote_xrc_rcv_qp_num,
1, OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
@ -339,7 +339,7 @@ static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("Send pack index = %d", endpoint->index));
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &endpoint->index, 1, OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
@ -349,7 +349,7 @@ static int xoob_send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &endpoint->endpoint_btl->qps[srq].u.srq_qp.srq->xrc_srq_num,
1, OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
@ -712,8 +712,8 @@ static mca_btl_openib_endpoint_t* xoob_find_endpoint(orte_process_name_t* proces
ib_proc != (mca_btl_openib_proc_t*)
opal_list_get_end(&mca_btl_openib_component.ib_procs);
ib_proc = (mca_btl_openib_proc_t*)opal_list_get_next(ib_proc)) {
if (orte_util_compare_name_fields(ORTE_NS_CMP_ALL,
&ib_proc->proc_ompi->proc_name, process_name) == OPAL_EQUAL) {
if (OPAL_EQUAL == orte_util_compare_name_fields(ORTE_NS_CMP_ALL,
&ib_proc->proc_ompi->proc_name, process_name)) {
found = true;
break;
}

Просмотреть файл

@ -524,20 +524,20 @@ static int mca_btl_udapl_start_connect(mca_btl_base_endpoint_t* endpoint)
if(NULL == buf) {
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
return ORTE_ERR_OUT_OF_RESOURCE;
return OMPI_ERR_OUT_OF_RESOURCE;
}
OPAL_THREAD_ADD32(&(endpoint->endpoint_btl->udapl_connect_inprogress), 1);
/* Pack our address information */
rc = opal_dss.pack(buf, &addr->port, 1, OPAL_UINT64);
if(ORTE_SUCCESS != rc) {
if(OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
rc = opal_dss.pack(buf, &addr->addr, sizeof(DAT_SOCK_ADDR), OPAL_UINT8);
if(ORTE_SUCCESS != rc) {
if(OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
@ -567,14 +567,14 @@ void mca_btl_udapl_endpoint_recv(int status, orte_process_name_t* endpoint,
/* Unpack data */
rc = opal_dss.unpack(buffer, &addr.port, &cnt, OPAL_UINT64);
if(ORTE_SUCCESS != rc) {
if(OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return;
}
cnt = sizeof(mca_btl_udapl_addr_t);
rc = opal_dss.unpack(buffer, &addr.addr, &cnt, OPAL_UINT8);
if(ORTE_SUCCESS != rc) {
if(OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return;
}

Просмотреть файл

@ -2,7 +2,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -372,8 +372,7 @@ static int set_remote_info(mca_btl_base_endpoint_t* endpoint,
mca_btl_wv_component.num_qps);
BTL_VERBOSE(("Setting QP info, LID = %d", endpoint->rem_info.rem_lid));
return ORTE_SUCCESS;
return OMPI_SUCCESS;
}
@ -616,20 +615,20 @@ static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
if (NULL == buffer) {
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
return ORTE_ERR_OUT_OF_RESOURCE;
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* pack the info in the send buffer */
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT8));
rc = opal_dss.pack(buffer, &message_type, 1, OPAL_UINT8);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT64));
rc = opal_dss.pack(buffer, &endpoint->subnet_id, 1, OPAL_UINT64);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
@ -640,13 +639,13 @@ static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
rc = opal_dss.pack(buffer,
&endpoint->rem_info.rem_qps[0].rem_qp_num, 1,
OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT16));
rc = opal_dss.pack(buffer, &endpoint->rem_info.rem_lid, 1, OPAL_UINT16);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
@ -659,14 +658,14 @@ static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &endpoint->qps[qp].qp->lcl_qp->qp_num,
1, OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &endpoint->qps[qp].qp->lcl_psn, 1,
OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
@ -674,20 +673,20 @@ static int send_connect_data(mca_btl_base_endpoint_t* endpoint,
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT16));
rc = opal_dss.pack(buffer, &endpoint->endpoint_btl->lid, 1, OPAL_UINT16);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &endpoint->endpoint_btl->device->mtu, 1,
OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}
BTL_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32));
rc = opal_dss.pack(buffer, &endpoint->index, 1, OPAL_UINT32);
if (ORTE_SUCCESS != rc) {
if (OPAL_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
return rc;
}

Просмотреть файл

@ -1216,11 +1216,11 @@ mca_coll_sm2_comm_query(struct ompi_communicator_t *comm, int *priority)
if (NULL == sbuffer || NULL == rbuffer) {
fprintf(stderr," Can't allocte memory for sbuffer or rbuffer \n");
fflush(stderr);
return ORTE_ERR_OUT_OF_RESOURCE;
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* Pack my rank , I need it because allgather doesnot work as expected */
ret = opal_dss.pack(sbuffer, &my_rank, 1, OPAL_UINT32);
if (ORTE_SUCCESS != ret) {
if (OPAL_SUCCESS != ret) {
fprintf(stderr," pack returned error %d for my_rank \n",ret);
fflush(stderr);
return ret;
@ -1228,7 +1228,7 @@ mca_coll_sm2_comm_query(struct ompi_communicator_t *comm, int *priority)
/* Pack socket index */
ret = opal_dss.pack(sbuffer, my_socket_index, 1, OPAL_UINT32);
if (ORTE_SUCCESS != ret) {
if (OPAL_SUCCESS != ret) {
fprintf(stderr," pack returned error %d for my_socket_index \n",ret);
fflush(stderr);
return ret;
@ -1244,8 +1244,7 @@ mca_coll_sm2_comm_query(struct ompi_communicator_t *comm, int *priority)
** note !!!! - not sure why this is here, but will leave if for now
*/
ret = opal_dss.unpack(rbuffer, &dummy, &cnt, ORTE_STD_CNTR);
OPAL_OUTPUT_VERBOSE((10, mca_coll_base_output,"Get dummy value %d \n", dummy));
if (ORTE_SUCCESS != ret) {
if (OPAL_SUCCESS != ret) {
fprintf(stderr," unpack returned error %d for dummy \n",ret);
fflush(stderr);
return OMPI_ERROR;
@ -1264,7 +1263,7 @@ mca_coll_sm2_comm_query(struct ompi_communicator_t *comm, int *priority)
/* note !!!! need to store the data for manipulation */
/* unpack rank*/
ret = opal_dss.unpack(rbuffer, &rem_rank, &cnt, OPAL_UINT32);
if (ORTE_SUCCESS != ret) {
if (OPAL_SUCCESS != ret) {
fprintf(stderr," unpack returned error %d for rem_rank \n",ret);
fflush(stderr);
return OMPI_ERROR;
@ -1272,7 +1271,7 @@ mca_coll_sm2_comm_query(struct ompi_communicator_t *comm, int *priority)
/* unpack socket index */
ret = opal_dss.unpack(rbuffer, &rem_socket_index, &cnt, OPAL_UINT32);
if (ORTE_SUCCESS != ret) {
if (OPAL_SUCCESS != ret) {
fprintf(stderr," unpack returned error %d for rem_socket_index \n",ret);
fflush(stderr);
return OMPI_ERROR;

Просмотреть файл

@ -1,7 +1,7 @@
/*
* Copyright (c) 2004-2010 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* Copyright (c) 2004-2011 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
@ -112,7 +112,7 @@ int ompi_crcp_bkmrk_component_query(mca_base_module_t **module, int *priority)
*priority = mca_crcp_bkmrk_component.super.priority;
*module = (mca_base_module_t *)&loc_module;
return ORTE_SUCCESS;
return OMPI_SUCCESS;
}
int ompi_crcp_bkmrk_module_init(void)

Просмотреть файл

@ -1080,7 +1080,7 @@ do { \
#define UNPACK_BUFFER(buffer, var, count, type, error_msg) \
{ \
orte_std_cntr_t n = count; \
if (ORTE_SUCCESS != (ret = opal_dss.unpack(buffer, &(var), &n, type)) ) { \
if (OPAL_SUCCESS != (ret = opal_dss.unpack(buffer, &(var), &n, type)) ) { \
opal_output(mca_crcp_bkmrk_component.super.output_handle, \
"%s (Return %d)", error_msg, ret); \
exit_status = ret; \
@ -2372,7 +2372,7 @@ static int ompi_crcp_bkmrk_request_complete_irecv_init(struct ompi_request_t *re
false, /* Mark as inactive */
&content_ref);
if( NULL == content_ref ) {
exit_status = ORTE_ERROR;
exit_status = OMPI_ERROR;
goto DONE;
}
@ -3197,7 +3197,7 @@ static int traffic_message_append(ompi_crcp_bkmrk_pml_peer_ref_t *peer_ref,
struct ompi_communicator_t* comm,
ompi_crcp_bkmrk_pml_traffic_message_ref_t **msg_ref)
{
int ret, exit_status = ORTE_SUCCESS;
int ret, exit_status = OMPI_SUCCESS;
size_t ddt_size = 0;
if( NULL != datatype ) {
@ -3312,7 +3312,7 @@ static int traffic_message_move(ompi_crcp_bkmrk_pml_traffic_message_ref_t *old_m
bool keep_active,
bool remove)
{
int ret, exit_status = ORTE_SUCCESS;
int ret, exit_status = OMPI_SUCCESS;
ompi_crcp_bkmrk_pml_message_content_ref_t *new_content = NULL, *prev_content = NULL;
ompi_request_t *request = NULL;
bool loc_already_drained = false;
@ -3783,7 +3783,7 @@ static int drain_message_append(ompi_crcp_bkmrk_pml_peer_ref_t *peer_ref,
struct ompi_communicator_t* comm,
ompi_crcp_bkmrk_pml_drain_message_ref_t **msg_ref)
{
int ret, exit_status = ORTE_SUCCESS;
int ret, exit_status = OMPI_SUCCESS;
ompi_crcp_bkmrk_pml_message_content_ref_t *content_ref = NULL;
/*
@ -4751,7 +4751,7 @@ static int ft_event_post_drain_acks(void)
drain_msg_ack = (ompi_crcp_bkmrk_pml_drain_message_ack_ref_t*)item;
/* Post the receive */
if( OMPI_SUCCESS != (ret = orte_rml.recv_buffer_nb( &drain_msg_ack->peer,
if( ORTE_SUCCESS != (ret = orte_rml.recv_buffer_nb( &drain_msg_ack->peer,
OMPI_CRCP_COORD_BOOKMARK_TAG,
0,
drain_message_ack_cbfunc,
@ -5624,7 +5624,7 @@ static int do_send_msg_detail(ompi_crcp_bkmrk_pml_peer_ref_t *peer_ref,
* Check return value from peer to see if we found a match.
*/
if (NULL == (buffer = OBJ_NEW(opal_buffer_t))) {
exit_status = ORTE_ERROR;
exit_status = OMPI_ERROR;
goto cleanup;
}
@ -5799,7 +5799,6 @@ static int do_recv_msg_detail(ompi_crcp_bkmrk_pml_peer_ref_t *peer_ref,
int ret;
if (NULL == (buffer = OBJ_NEW(opal_buffer_t))) {
exit_status = ORTE_ERROR;
goto cleanup;
}

Просмотреть файл

@ -2,7 +2,7 @@
* Copyright (c) 2004-2008 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -30,7 +30,7 @@
#include "opal/mca/mca.h"
#include "opal/mca/base/base.h"
#include "orte/mca/rml/rml_types.h"
#include "ompi/info/info.h"
#include "ompi/communicator/communicator.h"

Просмотреть файл

@ -186,7 +186,7 @@ static int connect_accept ( ompi_communicator_t *comm, int root,
return OMPI_ERROR;
}
if (ORTE_SUCCESS != (rc = opal_dss.pack(nbuf, &size, 1, OPAL_INT))) {
if (OPAL_SUCCESS != (rc = opal_dss.pack(nbuf, &size, 1, OPAL_INT))) {
ORTE_ERROR_LOG(rc);
goto exit;
}
@ -268,7 +268,7 @@ static int connect_accept ( ompi_communicator_t *comm, int root,
rc = orte_rml.send_buffer(&carport, nbuf, tag, 0);
}
if (ORTE_SUCCESS != (rc = opal_dss.unload(cabuf, &rnamebuf, &rnamebuflen))) {
if (OPAL_SUCCESS != (rc = opal_dss.unload(cabuf, &rnamebuf, &rnamebuflen))) {
ORTE_ERROR_LOG(rc);
goto exit;
}
@ -319,13 +319,13 @@ static int connect_accept ( ompi_communicator_t *comm, int root,
if (NULL == nrbuf) {
goto exit;
}
if ( ORTE_SUCCESS != ( rc = opal_dss.load(nrbuf, rnamebuf, rnamebuflen))) {
if ( OPAL_SUCCESS != ( rc = opal_dss.load(nrbuf, rnamebuf, rnamebuflen))) {
ORTE_ERROR_LOG(rc);
goto exit;
}
num_vals = 1;
if (ORTE_SUCCESS != (rc = opal_dss.unpack(nrbuf, &rsize, &num_vals, OPAL_INT))) {
if (OPAL_SUCCESS != (rc = opal_dss.unpack(nrbuf, &rsize, &num_vals, OPAL_INT))) {
ORTE_ERROR_LOG(rc);
goto exit;
}
@ -402,7 +402,7 @@ static int connect_accept ( ompi_communicator_t *comm, int root,
"%s dpm:orte:connect_accept executing modex",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
if (OMPI_SUCCESS != (rc = orte_grpcomm.modex(&all_procs))) {
if (ORTE_SUCCESS != (rc = orte_grpcomm.modex(&all_procs))) {
ORTE_ERROR_LOG(rc);
goto exit;
}
@ -929,13 +929,13 @@ static int open_port(char *port_name, orte_rml_tag_t given_tag)
OPAL_THREAD_LOCK(&ompi_dpm_port_mutex);
if (NULL == orte_process_info.my_hnp_uri) {
rc = ORTE_ERR_NOT_AVAILABLE;
rc = OMPI_ERR_NOT_AVAILABLE;
ORTE_ERROR_LOG(rc);
goto cleanup;
}
if (NULL == (rml_uri = orte_rml.get_contact_info())) {
rc = ORTE_ERROR;
rc = OMPI_ERROR;
ORTE_ERROR_LOG(rc);
goto cleanup;
}
@ -1011,7 +1011,7 @@ static int parse_port_name(char *port_name,
/* find the ':' demarking the RML tag we added to the end */
if (NULL == (ptr = strrchr(tmpstring, ':'))) {
rc = ORTE_ERR_NOT_FOUND;
rc = OMPI_ERR_NOT_FOUND;
goto cleanup;
}
@ -1024,7 +1024,7 @@ static int parse_port_name(char *port_name,
/* now split out the second field - the uri of the remote proc */
if (NULL == (ptr = strchr(tmpstring, '+'))) {
rc = ORTE_ERR_NOT_FOUND;
rc = OMPI_ERR_NOT_FOUND;
goto cleanup;
}
*ptr = '\0';
@ -1036,7 +1036,7 @@ static int parse_port_name(char *port_name,
if(NULL != rml_uri) *rml_uri = strdup(ptr);
if(NULL != ptag) *ptag = tag;
return ORTE_SUCCESS;
return OMPI_SUCCESS;
cleanup:
/* release the tmp storage */

Просмотреть файл

@ -2,7 +2,7 @@
* Copyright (c) 2004-2008 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2007 The University of Tennessee and The University
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -61,5 +61,5 @@ static int dpm_orte_component_query(mca_base_module_t **module, int *priority)
{
*priority = 50;
*module = (mca_base_module_t *) &ompi_dpm_orte_module;
return ORTE_SUCCESS;
return OMPI_SUCCESS;
}

Просмотреть файл

@ -2,7 +2,7 @@
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2007 The University of Tennessee and The University
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -194,8 +194,8 @@ static int example_component_init_query(bool enable_progress_threads,
background regardless of the value of this parameter as lone
as the HAVE_THREADS macro is true and the component uses its
own locking schemes (i.e., does not rely on external
OPAL/ORTE/OMPI data structures to be thread safe). This flag
simply indicates whether OPAL/ORTE/OMPI data structures are
OPAL/OMPI data structures to be thread safe). This flag
simply indicates whether OPAL/OMPI data structures are
multi-threaded safe and whether multi-threading sync/IPC
mechanisms in the OMPI code base are active.

Просмотреть файл

@ -1,7 +1,7 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2006 The Trustees of the University of Tennessee.
* Copyright (c) 2004-2011 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
@ -69,8 +69,8 @@ struct ompi_op_t;
* @param[in] enable_mpi_threads True if the component needs to
* support MPI_THREAD_MULTIPLE
*
* @retval ORTE_SUCCESS Component successfully initialized
* @retval ORTE_ERROR An unspecified error occurred
* @retval OMPI_SUCCESS Component successfully initialized
* @retval OMPI_ERROR An unspecified error occurred
*/
typedef int (*ompi_osc_base_component_init_fn_t)(bool enable_progress_threads,
bool enable_mpi_threads);
@ -84,8 +84,8 @@ typedef int (*ompi_osc_base_component_init_fn_t)(bool enable_progress_threads,
* should also clean up any data created during the lifetime of the
* component, including any modules that are outstanding.
*
* @retval ORTE_SUCCESS Component successfully finalized
* @retval ORTE_ERROR An unspecified error occurred
* @retval OMPI_SUCCESS Component successfully finalized
* @retval OMPI_ERROR An unspecified error occurred
*/
typedef int (*ompi_osc_base_component_finalize_fn_t)(void);
@ -136,8 +136,8 @@ typedef int (*ompi_osc_base_component_query_fn_t)(struct ompi_win_t *win,
* @param[in] comm The communicator specified by the user for the
* basis of the group membership for the Window.
*
* @retval ORTE_SUCCESS Component successfully selected
* @retval ORTE_ERROR An unspecified error occurred
* @retval OMPI_SUCCESS Component successfully selected
* @retval OMPI_ERROR An unspecified error occurred
*/
typedef int (*ompi_osc_base_component_select_fn_t)(struct ompi_win_t *win,
struct ompi_info_t *info,
@ -184,8 +184,8 @@ typedef ompi_osc_base_component_2_0_0_t ompi_osc_base_component_t;
*
* @param[in] win Window to free
*
* @retval ORTE_SUCCESS Component successfully selected
* @retval ORTE_ERROR An unspecified error occurred
* @retval OMPI_SUCCESS Component successfully selected
* @retval OMPI_ERROR An unspecified error occurred
*/
typedef int (*ompi_osc_base_module_free_fn_t)(struct ompi_win_t *win);

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2007 The University of Tennessee and The University
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -269,9 +269,9 @@ int mca_pml_dr_add_procs(ompi_proc_t** procs, size_t nprocs)
/* this won't work for comm spawn and other dynamic
processes, but will work for initial job start */
idx = opal_pointer_array_add(&mca_pml_dr.endpoints, (void*) endpoint);
if(orte_util_compare_name_fields(ORTE_NS_CMP_ALL,
if(OPAL_EQUAL == orte_util_compare_name_fields(ORTE_NS_CMP_ALL,
ORTE_PROC_MY_NAME,
&(endpoint->proc_ompi->proc_name)) == OPAL_EQUAL) {
&(endpoint->proc_ompi->proc_name))) {
mca_pml_dr.my_rank = idx;
}
endpoint->local = endpoint->dst = idx;

Просмотреть файл

@ -1,5 +1,5 @@
/*
* Copyright (c) 2004-2007 The Trustees of the University of Tennessee.
* Copyright (c) 2004-2011 The Trustees of the University of Tennessee.
* All rights reserved.
* $COPYRIGHT$
*
@ -34,12 +34,12 @@ int vprotocol_pessimist_event_logger_connect(int el_rank, ompi_communicator_t **
port = ompi_pubsub.lookup(name, MPI_INFO_NULL);
if(NULL == port)
{
return ORTE_ERR_NOT_FOUND;
return OMPI_ERR_NOT_FOUND;
}
V_OUTPUT_VERBOSE(45, "Found port < %s >", port);
/* separate the string into the HNP and RML URI and tag */
if (ORTE_SUCCESS != (rc = ompi_dpm.parse_port(port, &hnp_uri, &rml_uri, &el_tag))) {
if (OMPI_SUCCESS != (rc = ompi_dpm.parse_port(port, &hnp_uri, &rml_uri, &el_tag))) {
ORTE_ERROR_LOG(rc);
return rc;
}
@ -50,7 +50,7 @@ int vprotocol_pessimist_event_logger_connect(int el_rank, ompi_communicator_t **
return rc;
}
/* make sure we can route rml messages to the destination */
if (ORTE_SUCCESS != (rc = ompi_dpm.route_to_port(hnp_uri, &el_proc))) {
if (OMPI_SUCCESS != (rc = ompi_dpm.route_to_port(hnp_uri, &el_proc))) {
ORTE_ERROR_LOG(rc);
free(rml_uri); free(hnp_uri);
return rc;
@ -61,7 +61,7 @@ int vprotocol_pessimist_event_logger_connect(int el_rank, ompi_communicator_t **
* connect/accept */
OBJ_CONSTRUCT(&buffer, opal_buffer_t);
rc = orte_rml.send_buffer(&el_proc, &buffer, el_tag+1, 0);
if(OMPI_SUCCESS > rc) {
if(ORTE_SUCCESS > rc) {
ORTE_ERROR_LOG(rc);
OBJ_DESTRUCT(&buffer);
return rc;

Просмотреть файл

@ -420,19 +420,19 @@ ompi_proc_pack(ompi_proc_t **proclist, int proclistsize, opal_buffer_t* buf)
*/
for (i=0; i<proclistsize; i++) {
rc = opal_dss.pack(buf, &(proclist[i]->proc_name), 1, ORTE_NAME);
if(rc != ORTE_SUCCESS) {
if(rc != OPAL_SUCCESS) {
ORTE_ERROR_LOG(rc);
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
return rc;
}
rc = opal_dss.pack(buf, &(proclist[i]->proc_arch), 1, OPAL_UINT32);
if(rc != ORTE_SUCCESS) {
if(rc != OPAL_SUCCESS) {
ORTE_ERROR_LOG(rc);
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
return rc;
}
rc = opal_dss.pack(buf, &(proclist[i]->proc_hostname), 1, OPAL_STRING);
if(rc != ORTE_SUCCESS) {
if(rc != OPAL_SUCCESS) {
ORTE_ERROR_LOG(rc);
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
return rc;
@ -515,21 +515,21 @@ ompi_proc_unpack(opal_buffer_t* buf,
int rc;
rc = opal_dss.unpack(buf, &new_name, &count, ORTE_NAME);
if (rc != ORTE_SUCCESS) {
if (rc != OPAL_SUCCESS) {
ORTE_ERROR_LOG(rc);
free(plist);
free(newprocs);
return rc;
}
rc = opal_dss.unpack(buf, &new_arch, &count, OPAL_UINT32);
if (rc != ORTE_SUCCESS) {
if (rc != OPAL_SUCCESS) {
ORTE_ERROR_LOG(rc);
free(plist);
free(newprocs);
return rc;
}
rc = opal_dss.unpack(buf, &new_hostname, &count, OPAL_STRING);
if (rc != ORTE_SUCCESS) {
if (rc != OPAL_SUCCESS) {
ORTE_ERROR_LOG(rc);
free(plist);
free(newprocs);

Просмотреть файл

@ -2,7 +2,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -57,7 +57,7 @@ struct ompi_proc_t {
/** this process' name */
orte_process_name_t proc_name;
/** PML specific proc data */
struct mca_pml_endpoint_t* proc_pml;
struct mca_pml_endpoint_t* proc_pml;
/** BML specific proc data */
struct mca_bml_base_endpoint_t* proc_bml;
/** architecture of this process */

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2010 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2007 The University of Tennessee and The University
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -360,7 +360,7 @@ static int ompi_cr_coord_pre_ckpt(void) {
* Notify PML
* - Will notify BML and BTL's
*/
if( ORTE_SUCCESS != (ret = mca_pml.pml_ft_event(OPAL_CRS_CHECKPOINT))) {
if( OMPI_SUCCESS != (ret = mca_pml.pml_ft_event(OPAL_CRS_CHECKPOINT))) {
exit_status = ret;
goto cleanup;
}
@ -383,7 +383,7 @@ static int ompi_cr_coord_pre_restart(void) {
* and handles. On the second pass (once ORTE is restarted) we can
* reconnect processes.
*/
if( ORTE_SUCCESS != (ret = mca_pml.pml_ft_event(OPAL_CRS_RESTART_PRE))) {
if( OMPI_SUCCESS != (ret = mca_pml.pml_ft_event(OPAL_CRS_RESTART_PRE))) {
exit_status = ret;
goto cleanup;
}
@ -405,7 +405,7 @@ static int ompi_cr_coord_pre_continue(void) {
if( orte_cr_continue_like_restart ) {
/* Mimic ompi_cr_coord_pre_restart(); */
if( ORTE_SUCCESS != (ret = mca_pml.pml_ft_event(OPAL_CRS_CONTINUE))) {
if( OMPI_SUCCESS != (ret = mca_pml.pml_ft_event(OPAL_CRS_CONTINUE))) {
exit_status = ret;
goto cleanup;
}
@ -452,7 +452,7 @@ static int ompi_cr_coord_post_restart(void) {
* Notify PML
* - Will notify BML and BTL's
*/
if( ORTE_SUCCESS != (ret = mca_pml.pml_ft_event(OPAL_CRS_RESTART))) {
if( OMPI_SUCCESS != (ret = mca_pml.pml_ft_event(OPAL_CRS_RESTART))) {
exit_status = ret;
goto cleanup;
}
@ -481,7 +481,7 @@ static int ompi_cr_coord_post_continue(void) {
* Notify PML
* - Will notify BML and BTL's
*/
if( ORTE_SUCCESS != (ret = mca_pml.pml_ft_event(OPAL_CRS_CONTINUE))) {
if( OMPI_SUCCESS != (ret = mca_pml.pml_ft_event(OPAL_CRS_CONTINUE))) {
exit_status = ret;
goto cleanup;
}

Просмотреть файл

@ -2,7 +2,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -122,7 +122,7 @@ ompi_modex_recv_key_value(const char* key,
bo.bytes = NULL;
bo.size = 0;
if (OMPI_SUCCESS != (rc = orte_grpcomm.get_proc_attr(source_proc->proc_name, key,
if (ORTE_SUCCESS != (rc = orte_grpcomm.get_proc_attr(source_proc->proc_name, key,
(void**)&bo.bytes, &bsize))) {
return rc;
}

Просмотреть файл

@ -2,7 +2,7 @@
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -212,7 +212,7 @@ ompi_mpi_abort(struct ompi_communicator_t* comm,
/*
* Abort peers in this communicator group. Does not include self.
*/
if( OMPI_SUCCESS != (ret = orte_errmgr.abort_peers(abort_procs, nabort_procs)) ) {
if( ORTE_SUCCESS != (ret = orte_errmgr.abort_peers(abort_procs, nabort_procs)) ) {
orte_errmgr.abort(ret, "Open MPI failed to abort all of the procs requested (%d).", ret);
}
}

Просмотреть файл

@ -3,7 +3,7 @@
* Copyright (c) 2004-2010 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2009 The University of Tennessee and The University
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -227,7 +227,7 @@ int ompi_mpi_finalize(void)
MPI barrier doesn't ensure that all messages have been transmitted
before exiting, so the possibility of a stranded message exists.
*/
if (OMPI_SUCCESS != (ret = orte_grpcomm.barrier())) {
if (ORTE_SUCCESS != (ret = orte_grpcomm.barrier())) {
ORTE_ERROR_LOG(ret);
return ret;
}
@ -427,11 +427,11 @@ int ompi_mpi_finalize(void)
/* Leave the RTE */
if (OMPI_SUCCESS != (ret = orte_finalize())) {
if (ORTE_SUCCESS != (ret = orte_finalize())) {
return ret;
}
if (OMPI_SUCCESS != (ret = opal_finalize_util())) {
if (OPAL_SUCCESS != (ret = opal_finalize_util())) {
return ret;
}

Просмотреть файл

@ -306,7 +306,7 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
/* Setup enough to check get/set MCA params */
if (ORTE_SUCCESS != (ret = opal_init_util(&argc, &argv))) {
if (OPAL_SUCCESS != (ret = opal_init_util(&argc, &argv))) {
error = "ompi_mpi_init: opal_init_util failed";
goto error;
}
@ -717,7 +717,7 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
/* exchange connection info - this function also acts as a barrier
* as it will not return until the exchange is complete
*/
if (OMPI_SUCCESS != (ret = orte_grpcomm.modex(NULL))) {
if (ORTE_SUCCESS != (ret = orte_grpcomm.modex(NULL))) {
error = "orte_grpcomm_modex failed";
goto error;
}
@ -800,7 +800,7 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
}
/* wait for everyone to reach this point */
if (OMPI_SUCCESS != (ret = orte_grpcomm.barrier())) {
if (ORTE_SUCCESS != (ret = orte_grpcomm.barrier())) {
error = "orte_grpcomm_barrier failed";
goto error;
}

Просмотреть файл

@ -2,7 +2,7 @@
* Copyright (c) 2004-2010 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
@ -232,7 +232,7 @@ void ompi_info_open_components(void)
/* Open the DSS */
if (ORTE_SUCCESS != opal_dss_open()) {
if (OPAL_SUCCESS != opal_dss_open()) {
str = "Unable to initialize the DSS";
goto error;
}

Просмотреть файл

@ -66,7 +66,7 @@ ORTE_DECLSPEC char* orte_util_print_epoch(const orte_epoch_t epoch);
#define ORTE_EPOCH_PRINT(n) \
orte_util_print_epoch(n)
#else
#define ORTE_EPOCH_PRINT(n)
#define ORTE_EPOCH_PRINT(n) ""
#endif
ORTE_DECLSPEC char* orte_util_print_job_family(const orte_jobid_t job);