1
1

More fixes so that picky compilers shut up about a local variable

named "index" shadowing a global function named "index".

This commit was SVN r25752.
Этот коммит содержится в:
Jeff Squyres 2012-01-20 02:10:57 +00:00
родитель c77325fe42
Коммит 3661fe7a4e
5 изменённых файлов: 65 добавлений и 61 удалений

Просмотреть файл

@ -9,6 +9,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -31,8 +32,8 @@ OMPI_GENERATE_F77_BINDINGS (PMPI_GRAPH_CREATE,
pmpi_graph_create_,
pmpi_graph_create__,
pmpi_graph_create_f,
(MPI_Fint *comm_old, MPI_Fint *nnodes, MPI_Fint *index, MPI_Fint *edges, ompi_fortran_logical_t *reorder, MPI_Fint *comm_graph, MPI_Fint *ierr),
(comm_old, nnodes, index, edges, reorder, comm_graph, ierr) )
(MPI_Fint *comm_old, MPI_Fint *nnodes, MPI_Fint *idx, MPI_Fint *edges, ompi_fortran_logical_t *reorder, MPI_Fint *comm_graph, MPI_Fint *ierr),
(comm_old, nnodes, idx, edges, reorder, comm_graph, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
@ -48,8 +49,8 @@ OMPI_GENERATE_F77_BINDINGS (MPI_GRAPH_CREATE,
mpi_graph_create_,
mpi_graph_create__,
mpi_graph_create_f,
(MPI_Fint *comm_old, MPI_Fint *nnodes, MPI_Fint *index, MPI_Fint *edges, ompi_fortran_logical_t *reorder, MPI_Fint *comm_graph, MPI_Fint *ierr),
(comm_old, nnodes, index, edges, reorder, comm_graph, ierr) )
(MPI_Fint *comm_old, MPI_Fint *nnodes, MPI_Fint *idx, MPI_Fint *edges, ompi_fortran_logical_t *reorder, MPI_Fint *comm_graph, MPI_Fint *ierr),
(comm_old, nnodes, idx, edges, reorder, comm_graph, ierr) )
#endif
@ -58,24 +59,24 @@ OMPI_GENERATE_F77_BINDINGS (MPI_GRAPH_CREATE,
#endif
void mpi_graph_create_f(MPI_Fint *comm_old, MPI_Fint *nnodes,
MPI_Fint *index, MPI_Fint *edges,
MPI_Fint *idx, MPI_Fint *edges,
ompi_fortran_logical_t *reorder, MPI_Fint *comm_graph,
MPI_Fint *ierr)
{
MPI_Comm c_comm_old, c_comm_graph;
OMPI_ARRAY_NAME_DECL(index);
OMPI_ARRAY_NAME_DECL(idx);
OMPI_ARRAY_NAME_DECL(edges);
c_comm_old = MPI_Comm_f2c(*comm_old);
OMPI_ARRAY_FINT_2_INT(index, *nnodes);
OMPI_ARRAY_FINT_2_INT(idx, *nnodes);
/* Number of edges is equal to the last entry in the index array */
OMPI_ARRAY_FINT_2_INT(edges, index[*nnodes - 1]);
/* Number of edges is equal to the last entry in the idx array */
OMPI_ARRAY_FINT_2_INT(edges, idx[*nnodes - 1]);
*ierr = OMPI_INT_2_FINT(MPI_Graph_create(c_comm_old,
OMPI_FINT_2_INT(*nnodes),
OMPI_ARRAY_NAME_CONVERT(index),
OMPI_ARRAY_NAME_CONVERT(idx),
OMPI_ARRAY_NAME_CONVERT(edges),
OMPI_LOGICAL_2_INT(*reorder),
&c_comm_graph));
@ -83,6 +84,6 @@ void mpi_graph_create_f(MPI_Fint *comm_old, MPI_Fint *nnodes,
*comm_graph = MPI_Comm_c2f(c_comm_graph);
}
OMPI_ARRAY_FINT_2_INT_CLEANUP(index);
OMPI_ARRAY_FINT_2_INT_CLEANUP(idx);
OMPI_ARRAY_FINT_2_INT_CLEANUP(edges);
}

Просмотреть файл

@ -9,7 +9,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -32,8 +32,8 @@ OMPI_GENERATE_F77_BINDINGS (PMPI_GRAPH_GET,
pmpi_graph_get_,
pmpi_graph_get__,
pmpi_graph_get_f,
(MPI_Fint *comm, MPI_Fint *maxindex, MPI_Fint *maxedges, MPI_Fint *index, MPI_Fint *edges, MPI_Fint *ierr),
(comm, maxindex, maxedges, index, edges, ierr) )
(MPI_Fint *comm, MPI_Fint *maxidx, MPI_Fint *maxedges, MPI_Fint *idx, MPI_Fint *edges, MPI_Fint *ierr),
(comm, maxidx, maxedges, idx, edges, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
@ -49,8 +49,8 @@ OMPI_GENERATE_F77_BINDINGS (MPI_GRAPH_GET,
mpi_graph_get_,
mpi_graph_get__,
mpi_graph_get_f,
(MPI_Fint *comm, MPI_Fint *maxindex, MPI_Fint *maxedges, MPI_Fint *index, MPI_Fint *edges, MPI_Fint *ierr),
(comm, maxindex, maxedges, index, edges, ierr) )
(MPI_Fint *comm, MPI_Fint *maxidx, MPI_Fint *maxedges, MPI_Fint *idx, MPI_Fint *edges, MPI_Fint *ierr),
(comm, maxidx, maxedges, idx, edges, ierr) )
#endif
@ -58,28 +58,28 @@ OMPI_GENERATE_F77_BINDINGS (MPI_GRAPH_GET,
#include "ompi/mpi/f77/profile/defines.h"
#endif
void mpi_graph_get_f(MPI_Fint *comm, MPI_Fint *maxindex,
MPI_Fint *maxedges, MPI_Fint *index,
void mpi_graph_get_f(MPI_Fint *comm, MPI_Fint *maxidx,
MPI_Fint *maxedges, MPI_Fint *idx,
MPI_Fint *edges, MPI_Fint *ierr)
{
MPI_Comm c_comm;
OMPI_ARRAY_NAME_DECL(index);
OMPI_ARRAY_NAME_DECL(idx);
OMPI_ARRAY_NAME_DECL(edges);
c_comm = MPI_Comm_f2c(*comm);
OMPI_ARRAY_FINT_2_INT_ALLOC(index, *maxindex);
OMPI_ARRAY_FINT_2_INT_ALLOC(idx, *maxidx);
OMPI_ARRAY_FINT_2_INT_ALLOC(edges, *maxedges);
*ierr = OMPI_INT_2_FINT(MPI_Graph_get(c_comm,
OMPI_FINT_2_INT(*maxindex),
OMPI_FINT_2_INT(*maxidx),
OMPI_FINT_2_INT(*maxedges),
OMPI_ARRAY_NAME_CONVERT(index),
OMPI_ARRAY_NAME_CONVERT(idx),
OMPI_ARRAY_NAME_CONVERT(edges)));
if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) {
OMPI_ARRAY_INT_2_FINT(index, *maxindex);
OMPI_ARRAY_INT_2_FINT(idx, *maxidx);
OMPI_ARRAY_INT_2_FINT(edges, *maxedges);
} else {
OMPI_ARRAY_FINT_2_INT_CLEANUP(index);
OMPI_ARRAY_FINT_2_INT_CLEANUP(idx);
OMPI_ARRAY_FINT_2_INT_CLEANUP(edges);
}
}

Просмотреть файл

@ -9,6 +9,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -31,8 +32,8 @@ OMPI_GENERATE_F77_BINDINGS (PMPI_GRAPH_MAP,
pmpi_graph_map_,
pmpi_graph_map__,
pmpi_graph_map_f,
(MPI_Fint *comm, MPI_Fint *nnodes, MPI_Fint *index, MPI_Fint *edges, MPI_Fint *newrank, MPI_Fint *ierr),
(comm, nnodes, index, edges, newrank, ierr) )
(MPI_Fint *comm, MPI_Fint *nnodes, MPI_Fint *indx, MPI_Fint *edges, MPI_Fint *newrank, MPI_Fint *ierr),
(comm, nnodes, indx, edges, newrank, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
@ -48,8 +49,8 @@ OMPI_GENERATE_F77_BINDINGS (MPI_GRAPH_MAP,
mpi_graph_map_,
mpi_graph_map__,
mpi_graph_map_f,
(MPI_Fint *comm, MPI_Fint *nnodes, MPI_Fint *index, MPI_Fint *edges, MPI_Fint *newrank, MPI_Fint *ierr),
(comm, nnodes, index, edges, newrank, ierr) )
(MPI_Fint *comm, MPI_Fint *nnodes, MPI_Fint *indx, MPI_Fint *edges, MPI_Fint *newrank, MPI_Fint *ierr),
(comm, nnodes, indx, edges, newrank, ierr) )
#endif
@ -57,27 +58,27 @@ OMPI_GENERATE_F77_BINDINGS (MPI_GRAPH_MAP,
#include "ompi/mpi/f77/profile/defines.h"
#endif
void mpi_graph_map_f(MPI_Fint *comm, MPI_Fint *nnodes, MPI_Fint *index,
void mpi_graph_map_f(MPI_Fint *comm, MPI_Fint *nnodes, MPI_Fint *indx,
MPI_Fint *edges, MPI_Fint *nrank, MPI_Fint *ierr)
{
MPI_Comm c_comm;
OMPI_ARRAY_NAME_DECL(index);
OMPI_ARRAY_NAME_DECL(indx);
OMPI_ARRAY_NAME_DECL(edges);
OMPI_SINGLE_NAME_DECL(nrank);
c_comm = MPI_Comm_f2c(*comm);
/* Number of edges is equal to the last entry in the index array */
OMPI_ARRAY_FINT_2_INT(edges, index[*nnodes - 1]);
OMPI_ARRAY_FINT_2_INT(index, *nnodes);
/* Number of edges is equal to the last entry in the indx array */
OMPI_ARRAY_FINT_2_INT(edges, indx[*nnodes - 1]);
OMPI_ARRAY_FINT_2_INT(indx, *nnodes);
*ierr = OMPI_INT_2_FINT(MPI_Graph_map(c_comm, OMPI_FINT_2_INT(*nnodes),
OMPI_ARRAY_NAME_CONVERT(index),
OMPI_ARRAY_NAME_CONVERT(indx),
OMPI_ARRAY_NAME_CONVERT(edges),
OMPI_SINGLE_NAME_CONVERT(nrank)));
if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) {
OMPI_SINGLE_INT_2_FINT(nrank);
}
OMPI_ARRAY_FINT_2_INT_CLEANUP(edges);
OMPI_ARRAY_FINT_2_INT_CLEANUP(index);
OMPI_ARRAY_FINT_2_INT_CLEANUP(indx);
}

Просмотреть файл

@ -9,6 +9,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -34,8 +35,8 @@ OMPI_GENERATE_F77_BINDINGS (PMPI_TESTANY,
pmpi_testany_,
pmpi_testany__,
pmpi_testany_f,
(MPI_Fint *count, MPI_Fint *array_of_requests, MPI_Fint *index, ompi_fortran_logical_t *flag, MPI_Fint *status, MPI_Fint *ierr),
(count, array_of_requests, index, flag, status, ierr) )
(MPI_Fint *count, MPI_Fint *array_of_requests, MPI_Fint *indx, ompi_fortran_logical_t *flag, MPI_Fint *status, MPI_Fint *ierr),
(count, array_of_requests, indx, flag, status, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
@ -51,8 +52,8 @@ OMPI_GENERATE_F77_BINDINGS (MPI_TESTANY,
mpi_testany_,
mpi_testany__,
mpi_testany_f,
(MPI_Fint *count, MPI_Fint *array_of_requests, MPI_Fint *index, ompi_fortran_logical_t *flag, MPI_Fint *status, MPI_Fint *ierr),
(count, array_of_requests, index, flag, status, ierr) )
(MPI_Fint *count, MPI_Fint *array_of_requests, MPI_Fint *indx, ompi_fortran_logical_t *flag, MPI_Fint *status, MPI_Fint *ierr),
(count, array_of_requests, indx, flag, status, ierr) )
#endif
@ -63,13 +64,13 @@ OMPI_GENERATE_F77_BINDINGS (MPI_TESTANY,
static const char FUNC_NAME[] = "MPI_TESTANY";
void mpi_testany_f(MPI_Fint *count, MPI_Fint *array_of_requests, MPI_Fint *index, ompi_fortran_logical_t *flag, MPI_Fint *status, MPI_Fint *ierr)
void mpi_testany_f(MPI_Fint *count, MPI_Fint *array_of_requests, MPI_Fint *indx, ompi_fortran_logical_t *flag, MPI_Fint *status, MPI_Fint *ierr)
{
MPI_Request *c_req;
MPI_Status c_status;
int i;
OMPI_LOGICAL_NAME_DECL(flag);
OMPI_SINGLE_NAME_DECL(index);
OMPI_SINGLE_NAME_DECL(indx);
c_req = (MPI_Request *) malloc(OMPI_FINT_2_INT(*count) * sizeof(MPI_Request));
if (c_req == NULL) {
@ -84,21 +85,21 @@ void mpi_testany_f(MPI_Fint *count, MPI_Fint *array_of_requests, MPI_Fint *index
}
*ierr = OMPI_INT_2_FINT(MPI_Testany(OMPI_FINT_2_INT(*count), c_req,
OMPI_SINGLE_NAME_CONVERT(index),
OMPI_SINGLE_NAME_CONVERT(indx),
OMPI_LOGICAL_SINGLE_NAME_CONVERT(flag),
&c_status));
OMPI_SINGLE_INT_2_LOGICAL(flag);
if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) {
/* Increment index by one for fortran conventions */
/* Increment indx by one for fortran conventions */
OMPI_SINGLE_INT_2_FINT(index);
OMPI_SINGLE_INT_2_FINT(indx);
if (*flag &&
MPI_UNDEFINED != *(OMPI_SINGLE_NAME_CONVERT(index))) {
array_of_requests[OMPI_INT_2_FINT(*index)] =
c_req[OMPI_INT_2_FINT(*index)]->req_f_to_c_index;
++(*index);
MPI_UNDEFINED != *(OMPI_SINGLE_NAME_CONVERT(indx))) {
array_of_requests[OMPI_INT_2_FINT(*indx)] =
c_req[OMPI_INT_2_FINT(*indx)]->req_f_to_c_indx;
++(*indx);
}
if (!OMPI_IS_FORTRAN_STATUS_IGNORE(status)) {
MPI_Status_c2f(&c_status, status);

Просмотреть файл

@ -9,6 +9,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -34,8 +35,8 @@ OMPI_GENERATE_F77_BINDINGS (PMPI_WAITANY,
pmpi_waitany_,
pmpi_waitany__,
pmpi_waitany_f,
(MPI_Fint *count, MPI_Fint *array_of_requests, MPI_Fint *index, MPI_Fint *status, MPI_Fint *ierr),
(count, array_of_requests, index, status, ierr) )
(MPI_Fint *count, MPI_Fint *array_of_requests, MPI_Fint *indx, MPI_Fint *status, MPI_Fint *ierr),
(count, array_of_requests, indx, status, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
@ -51,8 +52,8 @@ OMPI_GENERATE_F77_BINDINGS (MPI_WAITANY,
mpi_waitany_,
mpi_waitany__,
mpi_waitany_f,
(MPI_Fint *count, MPI_Fint *array_of_requests, MPI_Fint *index, MPI_Fint *status, MPI_Fint *ierr),
(count, array_of_requests, index, status, ierr) )
(MPI_Fint *count, MPI_Fint *array_of_requests, MPI_Fint *indx, MPI_Fint *status, MPI_Fint *ierr),
(count, array_of_requests, indx, status, ierr) )
#endif
@ -64,12 +65,12 @@ static const char FUNC_NAME[] = "MPI_WAITANY";
void mpi_waitany_f(MPI_Fint *count, MPI_Fint *array_of_requests,
MPI_Fint *index, MPI_Fint *status, MPI_Fint *ierr)
MPI_Fint *indx, MPI_Fint *status, MPI_Fint *ierr)
{
MPI_Request *c_req;
MPI_Status c_status;
int i, c_err;
OMPI_SINGLE_NAME_DECL(index);
OMPI_SINGLE_NAME_DECL(indx);
c_req = (MPI_Request *) malloc(OMPI_FINT_2_INT(*count) * sizeof(MPI_Request));
if (NULL == c_req) {
@ -84,18 +85,18 @@ void mpi_waitany_f(MPI_Fint *count, MPI_Fint *array_of_requests,
}
*ierr = OMPI_INT_2_FINT(MPI_Waitany(OMPI_FINT_2_INT(*count), c_req,
OMPI_SINGLE_NAME_CONVERT(index),
OMPI_SINGLE_NAME_CONVERT(indx),
&c_status));
if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) {
/* Increment index by one for fortran conventions */
/* Increment indx by one for fortran conventions */
OMPI_SINGLE_INT_2_FINT(index);
if (MPI_UNDEFINED != *(OMPI_SINGLE_NAME_CONVERT(index))) {
array_of_requests[OMPI_INT_2_FINT(*index)] =
c_req[OMPI_INT_2_FINT(*index)]->req_f_to_c_index;
++(*index);
OMPI_SINGLE_INT_2_FINT(indx);
if (MPI_UNDEFINED != *(OMPI_SINGLE_NAME_CONVERT(indx))) {
array_of_requests[OMPI_INT_2_FINT(*indx)] =
c_req[OMPI_INT_2_FINT(*indx)]->req_f_to_c_indx;
++(*indx);
}
if (!OMPI_IS_FORTRAN_STATUS_IGNORE(status)) {
MPI_Status_c2f(&c_status, status);