1
1

Finally commit fixes from code review in Ohio with Anju:

- better error checking in top-level MPI API coll functions
- remove boolean flags in coll module struct
- minor fixes in selection logic

This commit was SVN r1825.
Этот коммит содержится в:
Jeff Squyres 2004-07-30 19:14:55 +00:00
родитель 1e162da7c7
Коммит 90de82789e
20 изменённых файлов: 236 добавлений и 288 удалений

Просмотреть файл

@ -31,8 +31,8 @@ static mca_coll_1_0_0_t null_actions = {
/* Collective function pointers */ /* Collective function pointers */
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
false, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
false, NULL, NULL, NULL, NULL, NULL NULL, NULL, NULL, NULL, NULL
}; };
@ -74,7 +74,7 @@ static int module_init(const mca_coll_1_0_0_t *module,
ompi_communicator_t *comm); ompi_communicator_t *comm);
static int query_basic(ompi_communicator_t *comm); static int query_basic(ompi_communicator_t *comm);
static void replace_null_with_basic(ompi_communicator_t *comm); static int replace_null_with_basic(ompi_communicator_t *comm);
/* /*
@ -295,11 +295,11 @@ static ompi_list_t *check_components(ompi_list_t *components,
{ {
int i, priority; int i, priority;
const mca_base_module_t *component; const mca_base_module_t *component;
ompi_list_item_t *item, *next; ompi_list_item_t *item, *next, *item2;
const mca_coll_1_0_0_t *actions; const mca_coll_1_0_0_t *actions;
bool want_to_check; bool want_to_check;
ompi_list_t *selectable; ompi_list_t *selectable;
avail_coll_t *avail; avail_coll_t *avail, *avail2;
/* Make a list of the components that query successfully */ /* Make a list of the components that query successfully */
@ -342,18 +342,33 @@ static ompi_list_t *check_components(ompi_list_t *components,
avail = OBJ_NEW(avail_coll_t); avail = OBJ_NEW(avail_coll_t);
avail->ac_priority = 0; avail->ac_priority = 0;
avail->ac_component = (mca_coll_base_module_1_0_0_t *) component; avail->ac_component = (mca_coll_base_module_1_0_0_t *) component;
ompi_list_append(selectable, item);
} else { /* Put this item on the list in priority order (highest
ompi_list_remove_item(components, item); priority first). Should it go first? */
item2 = ompi_list_get_first(selectable);
avail2 = (avail_coll_t *) item2;
if (avail->ac_priority > avail2->ac_priority) {
ompi_list_prepend(selectable, item);
} else {
for (i = 1; item2 != ompi_list_get_end(selectable);
item2 = ompi_list_get_next(selectable), ++i) {
avail2 = (avail_coll_t *) item2;
if (avail->ac_priority > avail2->ac_priority) {
ompi_list_insert(selectable, item, i);
break;
}
}
/* If we didn't find a place to put it in the list, then
append it (because it has the lowest priority found so
far) */
if (ompi_list_get_end(selectable) == item2) {
ompi_list_append(selectable, item);
}
}
} }
} }
/* If we didn't want to check, then eliminate this entry from the
list */
else {
ompi_list_remove_item(components, item);
}
} }
/* If we didn't find any available components, return an error */ /* If we didn't find any available components, return an error */
@ -531,11 +546,15 @@ static int query_basic(ompi_communicator_t *comm)
/* /*
* Replace the NULL pointers by corresponsing ompi_basic pointers * Replace the NULL pointers by corresponsing ompi_basic pointers
*/ */
static void replace_null_with_basic(ompi_communicator_t *comm) static int replace_null_with_basic(ompi_communicator_t *comm)
{ {
int err;
#define CHECK(name) \ #define CHECK(name) \
if (NULL == comm->c_coll.coll_##name) { \ if (NULL == comm->c_coll.coll_##name) { \
query_basic(comm); \ if (OMPI_SUCCESS != (err = query_basic(comm))) { \
return err; \
} \
comm->c_coll.coll_##name = comm->c_coll_basic_module->coll_##name; \ comm->c_coll.coll_##name = comm->c_coll_basic_module->coll_##name; \
} }
@ -555,4 +574,8 @@ static void replace_null_with_basic(ompi_communicator_t *comm)
CHECK(scan); CHECK(scan);
CHECK(scatter); CHECK(scatter);
CHECK(scatterv); CHECK(scatterv);
/* Happiness; all done */
return OMPI_SUCCESS;
} }

Просмотреть файл

@ -40,12 +40,10 @@ static const mca_coll_1_0_0_t intra_linear = {
mca_coll_basic_alltoallv_intra, mca_coll_basic_alltoallv_intra,
mca_coll_basic_alltoallw_intra, mca_coll_basic_alltoallw_intra,
mca_coll_basic_barrier_intra_lin, mca_coll_basic_barrier_intra_lin,
true,
mca_coll_basic_bcast_lin_intra, mca_coll_basic_bcast_lin_intra,
mca_coll_basic_exscan_intra, mca_coll_basic_exscan_intra,
mca_coll_basic_gather_intra, mca_coll_basic_gather_intra,
mca_coll_basic_gatherv_intra, mca_coll_basic_gatherv_intra,
true,
mca_coll_basic_reduce_lin_intra, mca_coll_basic_reduce_lin_intra,
mca_coll_basic_reduce_scatter_intra, mca_coll_basic_reduce_scatter_intra,
mca_coll_basic_scan_intra, mca_coll_basic_scan_intra,
@ -82,12 +80,10 @@ static const mca_coll_1_0_0_t intra_log = {
mca_coll_basic_alltoallv_intra, mca_coll_basic_alltoallv_intra,
mca_coll_basic_alltoallw_intra, mca_coll_basic_alltoallw_intra,
mca_coll_basic_barrier_intra_log, mca_coll_basic_barrier_intra_log,
true,
mca_coll_basic_bcast_log_intra, mca_coll_basic_bcast_log_intra,
mca_coll_basic_exscan_intra, mca_coll_basic_exscan_intra,
mca_coll_basic_gather_intra, mca_coll_basic_gather_intra,
mca_coll_basic_gatherv_intra, mca_coll_basic_gatherv_intra,
true,
mca_coll_basic_reduce_log_intra, mca_coll_basic_reduce_log_intra,
mca_coll_basic_reduce_scatter_intra, mca_coll_basic_reduce_scatter_intra,
mca_coll_basic_scan_intra, mca_coll_basic_scan_intra,
@ -122,12 +118,10 @@ static const mca_coll_1_0_0_t inter_linear = {
mca_coll_basic_alltoallv_inter, mca_coll_basic_alltoallv_inter,
mca_coll_basic_alltoallw_inter, mca_coll_basic_alltoallw_inter,
mca_coll_basic_barrier_inter_lin, mca_coll_basic_barrier_inter_lin,
true,
mca_coll_basic_bcast_lin_inter, mca_coll_basic_bcast_lin_inter,
mca_coll_basic_exscan_inter, mca_coll_basic_exscan_inter,
mca_coll_basic_gather_inter, mca_coll_basic_gather_inter,
mca_coll_basic_gatherv_inter, mca_coll_basic_gatherv_inter,
true,
mca_coll_basic_reduce_lin_inter, mca_coll_basic_reduce_lin_inter,
mca_coll_basic_reduce_scatter_inter, mca_coll_basic_reduce_scatter_inter,
NULL, NULL,
@ -164,12 +158,10 @@ static const mca_coll_1_0_0_t inter_log = {
mca_coll_basic_alltoallv_inter, mca_coll_basic_alltoallv_inter,
mca_coll_basic_alltoallw_inter, mca_coll_basic_alltoallw_inter,
mca_coll_basic_barrier_inter_log, mca_coll_basic_barrier_inter_log,
true,
mca_coll_basic_bcast_log_inter, mca_coll_basic_bcast_log_inter,
mca_coll_basic_exscan_inter, mca_coll_basic_exscan_inter,
mca_coll_basic_gather_inter, mca_coll_basic_gather_inter,
mca_coll_basic_gatherv_inter, mca_coll_basic_gatherv_inter,
true,
mca_coll_basic_reduce_log_inter, mca_coll_basic_reduce_log_inter,
mca_coll_basic_reduce_scatter_inter, mca_coll_basic_reduce_scatter_inter,
NULL, NULL,

Просмотреть файл

@ -281,8 +281,11 @@ int mca_coll_basic_reduce_log_intra(void *sbuf, void *rbuf, int count,
} }
pml_buffer = free_buffer - lb; pml_buffer = free_buffer - lb;
/* read the comment about commutative operations (few lines down the page) */ /* read the comment about commutative operations (few lines down
if( ompi_op_is_commute(op) ) rcv_buffer = pml_buffer; the page) */
if( ompi_op_is_commute(op) ) {
rcv_buffer = pml_buffer;
}
else rcv_buffer = rbuf; else rcv_buffer = rbuf;
} }
@ -325,13 +328,16 @@ int mca_coll_basic_reduce_log_intra(void *sbuf, void *rbuf, int count,
} }
fl_recv = 1; fl_recv = 1;
/* Most of the time (all except the first one for commutative operations) we
* receive in the user provided buffer (rbuf). But the exception is here to allow /* Most of the time (all except the first one for commutative
* us to dont have to copy from the sbuf to a temporary location. If the operation operations) we receive in the user provided buffer
* is commutative we dont care in which order we apply the operation, so for the (rbuf). But the exception is here to allow us to dont have
* first time we can receive the data in the pml_buffer and then apply to to copy from the sbuf to a temporary location. If the
* operation between this buffer and the user provided data. operation is commutative we dont care in which order we
*/ apply the operation, so for the first time we can receive
the data in the pml_buffer and then apply to operation
between this buffer and the user provided data. */
err = mca_pml.pml_recv( rcv_buffer, count, dtype, peer, err = mca_pml.pml_recv( rcv_buffer, count, dtype, peer,
MCA_COLL_BASE_TAG_REDUCE, comm, MCA_COLL_BASE_TAG_REDUCE, comm,
MPI_STATUS_IGNORE ); MPI_STATUS_IGNORE );
@ -349,19 +355,22 @@ int mca_coll_basic_reduce_log_intra(void *sbuf, void *rbuf, int count,
ompi_op_reduce(op, rcv_buffer, pml_buffer, count, dtype); ompi_op_reduce(op, rcv_buffer, pml_buffer, count, dtype);
} else { } else {
/* If we're commutative, we don't care about the order of /* If we're commutative, we don't care about the order of
* operations and we can just reduce the operations now. operations and we can just reduce the operations now.
* If we are not commutative, we have to copy the send If we are not commutative, we have to copy the send
* buffer into a temp buffer (pml_buffer) and then reduce buffer into a temp buffer (pml_buffer) and then reduce
* what we just received against it. what we just received against it. */
*/
if( !ompi_op_is_commute(op) ) { if( !ompi_op_is_commute(op) ) {
ompi_ddt_sndrcv( sbuf, count, dtype, pml_buffer, count, dtype, ompi_ddt_sndrcv( sbuf, count, dtype, pml_buffer, count, dtype,
MCA_COLL_BASE_TAG_REDUCE, comm); MCA_COLL_BASE_TAG_REDUCE, comm);
ompi_op_reduce( op, rbuf, pml_buffer, count, dtype ); ompi_op_reduce( op, rbuf, pml_buffer, count, dtype );
} else } else {
ompi_op_reduce(op, sbuf, pml_buffer, count, dtype); ompi_op_reduce(op, sbuf, pml_buffer, count, dtype);
snd_buffer = pml_buffer; /* now we have to send the buffer containing the computed data */ }
rcv_buffer = rbuf; /* starting from now we always receive in the user provided buffer */ /* now we have to send the buffer containing the computed data */
snd_buffer = pml_buffer;
/* starting from now we always receive in the user
provided buffer */
rcv_buffer = rbuf;
} }
} }
} }
@ -378,10 +387,12 @@ int mca_coll_basic_reduce_log_intra(void *sbuf, void *rbuf, int count,
MCA_PML_BASE_SEND_STANDARD, comm); MCA_PML_BASE_SEND_STANDARD, comm);
} }
} else if (rank == root) { } else if (rank == root) {
err = mca_pml.pml_recv( rcv_buffer, count, dtype, 0, MCA_COLL_BASE_TAG_REDUCE, err = mca_pml.pml_recv( rcv_buffer, count, dtype, 0,
MCA_COLL_BASE_TAG_REDUCE,
comm, MPI_STATUS_IGNORE); comm, MPI_STATUS_IGNORE);
if( rcv_buffer != rbuf ) if( rcv_buffer != rbuf ) {
ompi_op_reduce(op, rcv_buffer, rbuf, count, dtype); ompi_op_reduce(op, rcv_buffer, rbuf, count, dtype);
}
} }
if (NULL != free_buffer) { if (NULL != free_buffer) {

Просмотреть файл

@ -170,12 +170,10 @@ struct mca_coll_1_0_0_t {
mca_coll_base_alltoallv_fn_t coll_alltoallv; mca_coll_base_alltoallv_fn_t coll_alltoallv;
mca_coll_base_alltoallw_fn_t coll_alltoallw; mca_coll_base_alltoallw_fn_t coll_alltoallw;
mca_coll_base_barrier_fn_t coll_barrier; mca_coll_base_barrier_fn_t coll_barrier;
bool coll_bcast_optimization;
mca_coll_base_bcast_fn_t coll_bcast; mca_coll_base_bcast_fn_t coll_bcast;
mca_coll_base_exscan_fn_t coll_exscan; mca_coll_base_exscan_fn_t coll_exscan;
mca_coll_base_gather_fn_t coll_gather; mca_coll_base_gather_fn_t coll_gather;
mca_coll_base_gatherv_fn_t coll_gatherv; mca_coll_base_gatherv_fn_t coll_gatherv;
bool coll_reduce_optimization;
mca_coll_base_reduce_fn_t coll_reduce; mca_coll_base_reduce_fn_t coll_reduce;
mca_coll_base_reduce_scatter_fn_t coll_reduce_scatter; mca_coll_base_reduce_scatter_fn_t coll_reduce_scatter;
mca_coll_base_scan_fn_t coll_scan; mca_coll_base_scan_fn_t coll_scan;

Просмотреть файл

@ -32,20 +32,18 @@ int MPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
/* Unrooted operation -- same checks for all ranks on both /* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */ intracommunicators and intercommunicators */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME); OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) { if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
FUNC_NAME); } else if (MPI_DATATYPE_NULL == recvtype) {
} err = MPI_ERR_TYPE;
} else if (recvcount < 0) {
if ((MPI_DATATYPE_NULL == sendtype) || err = MPI_ERR_COUNT;
(MPI_DATATYPE_NULL == recvtype)) { } else {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME); OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
if ((sendcount < 0) || (recvcount < 0)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
} }
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} }
/* Invoke the coll component to perform the back-end operation */ /* Invoke the coll component to perform the back-end operation */

Просмотреть файл

@ -32,25 +32,22 @@ int MPI_Allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
/* Unrooted operation -- same checks for all ranks on both /* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */ intracommunicators and intercommunicators */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME); OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) { if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME); FUNC_NAME);
} }
if ((MPI_DATATYPE_NULL == sendtype) || OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
(MPI_DATATYPE_NULL == recvtype)) { OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
if (sendcount < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) { if (recvcounts[i] < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
} }
} }

Просмотреть файл

@ -32,28 +32,21 @@ int MPI_Allreduce(void *sendbuf, void *recvbuf, int count,
/* Unrooted operation -- same checks for all ranks on both /* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */ intracommunicators and intercommunicators */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME); OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) { if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME); FUNC_NAME);
} } else if (MPI_OP_NULL == op) {
err = MPI_ERR_OP;
if (MPI_DATATYPE_NULL == datatype) { } else if (ompi_op_is_intrinsic(op) &&
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME); datatype->id < DT_MAX_PREDEFINED &&
} -1 == ompi_op_ddt_map[datatype->id]) {
err = MPI_ERR_OP;
if (MPI_OP_NULL == op) { } else {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, FUNC_NAME); OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, count);
}
if (ompi_op_is_intrinsic(op) && datatype->id < DT_MAX_PREDEFINED &&
-1 == ompi_op_ddt_map[datatype->id]) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, FUNC_NAME);
}
if (count < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
} }
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} }
/* Invoke the coll component to perform the back-end operation */ /* Invoke the coll component to perform the back-end operation */

Просмотреть файл

@ -32,20 +32,19 @@ int MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype,
/* Unrooted operation -- same checks for all ranks on both /* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */ intracommunicators and intercommunicators */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME); OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) { if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME); FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype) {
err = MPI_ERR_TYPE;
} else if (recvcount < 0) {
err = MPI_ERR_COUNT;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
} }
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
if ((MPI_DATATYPE_NULL == sendtype) ||
(MPI_DATATYPE_NULL == recvtype)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
if ((sendcount < 0) || (recvcount < 0)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
} }
/* Invoke the coll component to perform the back-end operation */ /* Invoke the coll component to perform the back-end operation */

Просмотреть файл

@ -32,6 +32,7 @@ int MPI_Alltoallv(void *sendbuf, int *sendcounts, int *sdispls,
/* Unrooted operation -- same checks for all ranks */ /* Unrooted operation -- same checks for all ranks */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME); OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) { if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
@ -43,24 +44,21 @@ int MPI_Alltoallv(void *sendbuf, int *sendcounts, int *sdispls,
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} }
if ((MPI_DATATYPE_NULL == sendtype) || /* We always define the remote group to be the same as the local
(MPI_DATATYPE_NULL == recvtype)) { group in the case of an intracommunicator, so it's safe to
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME); get the size of the remote group here for both intra- and
}
/* Use a different size for intracommunicators and
intercommunicators */ intercommunicators */
if (OMPI_COMM_IS_INTRA(comm)) { size = ompi_comm_remote_size(comm);
size = ompi_comm_size(comm);
} else {
size = ompi_comm_remote_size(comm);
}
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
if ((sendcounts[i] < 0) || (recvcounts[i] < 0)) { if (recvcounts[i] < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME); err = MPI_ERR_COUNT;
} else if (MPI_DATATYPE_NULL == recvtype) {
err = MPI_ERR_TYPE;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
} }
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} }
} }

Просмотреть файл

@ -32,35 +32,33 @@ int MPI_Alltoallw(void *sendbuf, int *sendcounts, int *sdispls,
/* Unrooted operation -- same checks for all ranks */ /* Unrooted operation -- same checks for all ranks */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME); OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) { if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME); FUNC_NAME);
} }
if ((NULL == sendcounts) || (NULL == sdispls) || if ((NULL == sendcounts) || (NULL == sdispls) || (NULL == sendtypes) ||
(NULL == recvcounts) || (NULL == rdispls)) { (NULL == recvcounts) || (NULL == rdispls) || (NULL == recvtypes)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} }
/* Use a different size for intracommunicators and /* We always define the remote group to be the same as the local
group in the case of an intracommunicator, so it's safe to
get the size of the remote group here for both intra- and
intercommunicators */ intercommunicators */
if (OMPI_COMM_IS_INTRA(comm)) { size = ompi_comm_remote_size(comm);
size = ompi_comm_size(comm);
} else {
size = ompi_comm_remote_size(comm);
}
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
if ((MPI_DATATYPE_NULL == sendtypes[i]) || if (recvcounts[i] < 0) {
(MPI_DATATYPE_NULL == recvtypes[i])) { err = MPI_ERR_COUNT;
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME); } else if (MPI_DATATYPE_NULL == recvtypes[i]) {
} err = MPI_ERR_TYPE;
} else {
if ((sendcounts[i] < 0) || (recvcounts[i] < 0)) { OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtypes[i], sendcounts[i]);
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
} }
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} }
} }

Просмотреть файл

@ -26,6 +26,7 @@ int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype,
int err; int err;
if (MPI_PARAM_CHECK) { if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME); OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) { if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
@ -34,13 +35,8 @@ int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype,
/* Errors for all ranks */ /* Errors for all ranks */
if (MPI_DATATYPE_NULL == datatype) { OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, count);
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
if (count < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
/* Errors for intracommunicators */ /* Errors for intracommunicators */
@ -54,7 +50,7 @@ int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype,
else { else {
if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) || if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) ||
root == MPI_ROOT || root == MPI_PROC_NULL)) { MPI_ROOT == root || MPI_PROC_NULL == root)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
} }
} }
@ -68,7 +64,7 @@ int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype,
/* Can we optimize? */ /* Can we optimize? */
if (count == 0 && comm->c_coll.coll_bcast_optimization) { if (count == 0) {
return MPI_SUCCESS; return MPI_SUCCESS;
} }

Просмотреть файл

@ -28,6 +28,7 @@ int MPI_Exscan(void *sendbuf, void *recvbuf, int count,
int err; int err;
if (MPI_PARAM_CHECK) { if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME); OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) { if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
@ -37,22 +38,16 @@ int MPI_Exscan(void *sendbuf, void *recvbuf, int count,
/* Unrooted operation -- same checks for intracommunicators /* Unrooted operation -- same checks for intracommunicators
and intercommunicators */ and intercommunicators */
if (MPI_DATATYPE_NULL == datatype) { else if (MPI_OP_NULL == op) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME); err = MPI_ERR_OP;
} } else if (ompi_op_is_intrinsic(op) &&
datatype->id < DT_MAX_PREDEFINED &&
if (MPI_OP_NULL == op) { -1 == ompi_op_ddt_map[datatype->id]) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, FUNC_NAME); err = MPI_ERR_OP;
} } else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, count);
if (ompi_op_is_intrinsic(op) && datatype->id < DT_MAX_PREDEFINED &&
-1 == ompi_op_ddt_map[datatype->id]) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, FUNC_NAME);
} }
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
if (count < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
} }
/* Invoke the coll component to perform the back-end operation */ /* Invoke the coll component to perform the back-end operation */

Просмотреть файл

@ -27,6 +27,7 @@ int MPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
int err; int err;
if (MPI_PARAM_CHECK) { if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME); OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) { if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
@ -43,13 +44,8 @@ int MPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
} }
if (sendcount < 0) { OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
if (sendtype == MPI_DATATYPE_NULL) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
/* Errors for the root. Some of these could have been /* Errors for the root. Some of these could have been
combined into compound if statements above, but since combined into compound if statements above, but since
@ -71,21 +67,16 @@ int MPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
/* Errors for intercommunicators */ /* Errors for intercommunicators */
else { else {
if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) || if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) ||
root == MPI_ROOT || root == MPI_PROC_NULL)) { MPI_ROOT == root || MPI_PROC_NULL == root)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
} }
/* Errors for the senders */ /* Errors for the senders */
if (root != MPI_ROOT && root != MPI_PROC_NULL) { if (MPI_ROOT != root && MPI_PROC_NULL != root) {
if (sendcount < 0) { OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
if (sendtype == MPI_DATATYPE_NULL) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
} }
/* Errors for the root. Ditto on the comment above -- these /* Errors for the root. Ditto on the comment above -- these
@ -97,7 +88,7 @@ int MPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
} }
if (recvtype == MPI_DATATYPE_NULL) { if (MPI_DATATYPE_NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
} }
} }

Просмотреть файл

@ -28,6 +28,7 @@ int MPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
int i, size, err; int i, size, err;
if (MPI_PARAM_CHECK) { if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME); OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) { if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
@ -41,16 +42,11 @@ int MPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
/* Errors for all ranks */ /* Errors for all ranks */
if ((root >= ompi_comm_size(comm)) || (root < 0)) { if ((root >= ompi_comm_size(comm)) || (root < 0)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME); err = MPI_ERR_ROOT;
} } else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
if (sendcount < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
if (sendtype == MPI_DATATYPE_NULL) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
} }
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
/* Errors for the root. Some of these could have been /* Errors for the root. Some of these could have been
combined into compound if statements above, but since combined into compound if statements above, but since
@ -59,10 +55,6 @@ int MPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
them out into individual tests. */ them out into individual tests. */
if (ompi_comm_rank(comm) == root) { if (ompi_comm_rank(comm) == root) {
if (recvtype == MPI_DATATYPE_NULL) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
if (NULL == displs) { if (NULL == displs) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} }
@ -75,6 +67,8 @@ int MPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) { if (recvcounts[i] < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
} }
} }
} }
@ -84,20 +78,15 @@ int MPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
else { else {
if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) || if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) ||
root == MPI_ROOT || root == MPI_PROC_NULL)) { MPI_ROOT == root || MPI_PROC_NULL == root)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
} }
/* Errors for the senders */ /* Errors for the senders */
if (root != MPI_ROOT && root != MPI_PROC_NULL) { if (MPI_ROOT != root && MPI_PROC_NULL != root) {
if (sendcount < 0) { OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
if (sendtype == MPI_DATATYPE_NULL) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
} }
/* Errors for the root. Ditto on the comment above -- these /* Errors for the root. Ditto on the comment above -- these
@ -117,6 +106,8 @@ int MPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) { if (recvcounts[i] < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
} }
} }
} }

Просмотреть файл

@ -28,6 +28,7 @@ int MPI_Reduce(void *sendbuf, void *recvbuf, int count,
int err; int err;
if (MPI_PARAM_CHECK) { if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME); OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) { if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
@ -36,28 +37,22 @@ int MPI_Reduce(void *sendbuf, void *recvbuf, int count,
/* Checks for all ranks */ /* Checks for all ranks */
if (MPI_DATATYPE_NULL == datatype) { else if (MPI_OP_NULL == op) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME); err = MPI_ERR_OP;
} } else if (ompi_op_is_intrinsic(op) &&
datatype->id < DT_MAX_PREDEFINED &&
if (MPI_OP_NULL == op) { -1 == ompi_op_ddt_map[datatype->id]) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, FUNC_NAME); err = MPI_ERR_OP;
} } else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, count);
if (ompi_op_is_intrinsic(op) && datatype->id < DT_MAX_PREDEFINED &&
-1 == ompi_op_ddt_map[datatype->id]) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, FUNC_NAME);
}
if (count < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
} }
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
/* Intercommunicator errors */ /* Intercommunicator errors */
if (!OMPI_COMM_IS_INTRA(comm)) { if (!OMPI_COMM_IS_INTRA(comm)) {
if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) || if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) ||
root == MPI_ROOT || root == MPI_PROC_NULL)) { MPI_ROOT == root || MPI_PROC_NULL == root)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
} }
} }

Просмотреть файл

@ -28,6 +28,7 @@ int MPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
int i, err, size; int i, err, size;
if (MPI_PARAM_CHECK) { if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME); OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) { if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
@ -37,32 +38,26 @@ int MPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
/* Unrooted operation; same checks for all ranks on both /* Unrooted operation; same checks for all ranks on both
intracommunicators and intercommunicators */ intracommunicators and intercommunicators */
if (MPI_DATATYPE_NULL == datatype) { else if (MPI_OP_NULL == op) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME); err = MPI_ERR_OP;
} } else if (ompi_op_is_intrinsic(op) &&
datatype->id < DT_MAX_PREDEFINED &&
if (MPI_OP_NULL == op) { -1 == ompi_op_ddt_map[datatype->id]) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, FUNC_NAME); err = MPI_ERR_OP;
} else if (NULL == recvcounts) {
err = MPI_ERR_COUNT;
} }
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
if (ompi_op_is_intrinsic(op) && datatype->id < DT_MAX_PREDEFINED && /* We always define the remote group to be the same as the local
-1 == ompi_op_ddt_map[datatype->id]) { group in the case of an intracommunicator, so it's safe to
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, FUNC_NAME); get the size of the remote group here for both intra- and
} intercommunicators */
if (NULL == recvcounts) { size = ompi_comm_remote_size(comm);
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
if (OMPI_COMM_IS_INTRA(comm)) {
size = ompi_comm_size(comm);
} else {
size = ompi_comm_remote_size(comm);
}
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) { OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, recvcounts[i]);
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
} }
} }

Просмотреть файл

@ -28,6 +28,7 @@ int MPI_Scan(void *sendbuf, void *recvbuf, int count,
int err; int err;
if (MPI_PARAM_CHECK) { if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME); OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) { if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
@ -37,28 +38,22 @@ int MPI_Scan(void *sendbuf, void *recvbuf, int count,
/* No intercommunicators allowed! (MPI does not define /* No intercommunicators allowed! (MPI does not define
MPI_SCAN on intercommunicators) */ MPI_SCAN on intercommunicators) */
if (OMPI_COMM_IS_INTER(comm)) { else if (OMPI_COMM_IS_INTER(comm)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COMM, FUNC_NAME); err = MPI_ERR_COMM;
} }
/* Unrooted operation; checks for all ranks */ /* Unrooted operation; checks for all ranks */
if (MPI_DATATYPE_NULL == datatype) { else if (MPI_OP_NULL == op) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME); err = MPI_ERR_OP;
} } else if (ompi_op_is_intrinsic(op) &&
datatype->id < DT_MAX_PREDEFINED &&
if (MPI_OP_NULL == op) { -1 == ompi_op_ddt_map[datatype->id]) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, FUNC_NAME); err = MPI_ERR_OP;
} } else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, count);
if (ompi_op_is_intrinsic(op) && datatype->id < DT_MAX_PREDEFINED &&
-1 == ompi_op_ddt_map[datatype->id]) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, FUNC_NAME);
} }
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
if (count < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
} }
/* Call the coll component to actually perform the allgather */ /* Call the coll component to actually perform the allgather */

Просмотреть файл

@ -28,6 +28,7 @@ int MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
int err; int err;
if (MPI_PARAM_CHECK) { if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME); OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) { if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
@ -41,15 +42,11 @@ int MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
/* Errors for all ranks */ /* Errors for all ranks */
if ((root >= ompi_comm_size(comm)) || (root < 0)) { if ((root >= ompi_comm_size(comm)) || (root < 0)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME); err = MPI_ERR_ROOT;
} } else if (recvcount < 0) {
err = MPI_ERR_COUNT;
if (recvcount < 0) { } else if (MPI_DATATYPE_NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME); err = MPI_ERR_TYPE;
}
if (recvtype == MPI_DATATYPE_NULL) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
} }
/* Errors for the root. Some of these could have been /* Errors for the root. Some of these could have been
@ -58,34 +55,27 @@ int MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
run time) for efficiency, it's more clear to separate run time) for efficiency, it's more clear to separate
them out into individual tests. */ them out into individual tests. */
if (ompi_comm_rank(comm) == root) { else if (ompi_comm_rank(comm) == root) {
if (sendtype == MPI_DATATYPE_NULL) { OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
if (sendcount < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
} }
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} }
/* Errors for intercommunicators */ /* Errors for intercommunicators */
else { else {
if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) || if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) ||
root == MPI_ROOT || root == MPI_PROC_NULL)) { MPI_ROOT == root || MPI_PROC_NULL == root)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME); err = MPI_ERR_ROOT;
} }
/* Errors for the receivers */ /* Errors for the receivers */
if (root != MPI_ROOT && root != MPI_PROC_NULL) { else if (MPI_ROOT != root && MPI_PROC_NULL != root) {
if (recvcount < 0) { if (recvcount < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME); err = MPI_ERR_COUNT;
} } else if (MPI_DATATYPE_NULL == recvtype) {
err = MPI_ERR_TYPE;
if (recvtype == MPI_DATATYPE_NULL) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
} }
} }
@ -94,14 +84,9 @@ int MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
make the code easier to read. */ make the code easier to read. */
else if (MPI_ROOT == root) { else if (MPI_ROOT == root) {
if (sendcount < 0) { OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
if (sendtype == MPI_DATATYPE_NULL) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
} }
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
} }
} }

Просмотреть файл

@ -28,6 +28,7 @@ int MPI_Scatterv(void *sendbuf, int *sendcounts, int *displs,
int i, size, err; int i, size, err;
if (MPI_PARAM_CHECK) { if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME); OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) { if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
@ -48,7 +49,7 @@ int MPI_Scatterv(void *sendbuf, int *sendcounts, int *displs,
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
} }
if (recvtype == MPI_DATATYPE_NULL) { if (MPI_DATATYPE_NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
} }
@ -59,10 +60,6 @@ int MPI_Scatterv(void *sendbuf, int *sendcounts, int *displs,
them out into individual tests. */ them out into individual tests. */
if (ompi_comm_rank(comm) == root) { if (ompi_comm_rank(comm) == root) {
if (sendtype == MPI_DATATYPE_NULL) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
if (NULL == displs) { if (NULL == displs) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} }
@ -73,9 +70,8 @@ int MPI_Scatterv(void *sendbuf, int *sendcounts, int *displs,
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
if (sendcounts[i] < 0) { OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
} }
} }
} }
@ -84,18 +80,18 @@ int MPI_Scatterv(void *sendbuf, int *sendcounts, int *displs,
else { else {
if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) || if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) ||
root == MPI_ROOT || root == MPI_PROC_NULL)) { MPI_ROOT == root || MPI_PROC_NULL == root)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
} }
/* Errors for the receivers */ /* Errors for the receivers */
if (root != MPI_ROOT && root != MPI_PROC_NULL) { if (MPI_ROOT != root && MPI_PROC_NULL != root) {
if (recvcount < 0) { if (recvcount < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
} }
if (recvtype == MPI_DATATYPE_NULL) { if (MPI_DATATYPE_NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
} }
} }
@ -115,9 +111,8 @@ int MPI_Scatterv(void *sendbuf, int *sendcounts, int *displs,
size = ompi_comm_size(comm); size = ompi_comm_size(comm);
for (i = 0; i < size; ++i) { for (i = 0; i < size; ++i) {
if (sendcounts[i] < 0) { OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME); OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
} }
} }
} }

Просмотреть файл

@ -34,12 +34,15 @@ int MPI_Send(void *buf, int count, MPI_Datatype type, int dest,
OMPI_ERR_INIT_FINALIZE(FUNC_NAME); OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) { if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME); return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (count < 0) {
rc = MPI_ERR_COUNT;
} else if (tag < 0 || tag > MPI_TAG_UB_VALUE) { } else if (tag < 0 || tag > MPI_TAG_UB_VALUE) {
rc = MPI_ERR_TAG; rc = MPI_ERR_TAG;
} else if (ompi_comm_peer_invalid(comm, dest)) { } else if (ompi_comm_peer_invalid(comm, dest)) {
rc = MPI_ERR_RANK; rc = MPI_ERR_RANK;
} else } else {
OMPI_CHECK_DATATYPE_FOR_SEND( rc, type, count ); OMPI_CHECK_DATATYPE_FOR_SEND(rc, type, count);
}
OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME); OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
} }