1
1

* Initial merge of the non-blocking collectives interface. No implementation of

the back-end yet, coming real soon now, need to solve some tag issues first.

This commit was SVN r26641.
Этот коммит содержится в:
Brian Barrett 2012-06-22 20:54:12 +00:00
родитель 148ae6d6e3
Коммит b9e8e4aeb9
108 изменённых файлов: 9568 добавлений и 28 удалений

Просмотреть файл

@ -11,7 +11,7 @@
* All rights reserved.
* Copyright (c) 2007-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2008-2009 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2009 Oak Ridge National Labs. All rights reserved.
* Copyright (c) 2009-2012 Oak Rigde National Laboratory. All rights reserved.
* Copyright (c) 2011 Sandia National Laboratories. All rights reserved.
* $COPYRIGHT$
*
@ -307,6 +307,7 @@
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
/*
* Typedefs
*/
@ -887,6 +888,7 @@ OMPI_DECLSPEC extern struct ompi_predefined_info_t ompi_mpi_info_null;
OMPI_DECLSPEC extern MPI_Fint *MPI_F_STATUS_IGNORE;
OMPI_DECLSPEC extern MPI_Fint *MPI_F_STATUSES_IGNORE;
/*
* MPI predefined handles
*/
@ -1049,22 +1051,39 @@ OMPI_DECLSPEC int MPI_Address(void *location, MPI_Aint *address)
OMPI_DECLSPEC int MPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount,
MPI_Datatype recvtype, MPI_Comm comm);
OMPI_DECLSPEC int MPI_Iallgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount,
MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int MPI_Allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int *recvcounts,
int *displs, MPI_Datatype recvtype, MPI_Comm comm);
OMPI_DECLSPEC int MPI_Iallgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int *recvcounts,
int *displs, MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int MPI_Alloc_mem(MPI_Aint size, MPI_Info info,
void *baseptr);
OMPI_DECLSPEC int MPI_Allreduce(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
OMPI_DECLSPEC int MPI_Iallreduce(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount,
MPI_Datatype recvtype, MPI_Comm comm);
OMPI_DECLSPEC int MPI_Ialltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount,
MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int MPI_Alltoallv(void *sendbuf, int *sendcounts, int *sdispls,
MPI_Datatype sendtype, void *recvbuf, int *recvcounts,
int *rdispls, MPI_Datatype recvtype, MPI_Comm comm);
OMPI_DECLSPEC int MPI_Ialltoallv(void *sendbuf, int *sendcounts, int *sdispls,
MPI_Datatype sendtype, void *recvbuf, int *recvcounts,
int *rdispls, MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int MPI_Alltoallw(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendtypes,
void *recvbuf, int *recvcounts, int *rdispls, MPI_Datatype *recvtypes,
MPI_Comm comm);
OMPI_DECLSPEC int MPI_Ialltoallw(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendtypes,
void *recvbuf, int *recvcounts, int *rdispls, MPI_Datatype *recvtypes,
MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int MPI_Attr_delete(MPI_Comm comm, int keyval)
__mpi_interface_deprecated__("MPI_Attr_delete is superseded by MPI_Comm_delete_attr in MPI-2.0");
OMPI_DECLSPEC int MPI_Attr_get(MPI_Comm comm, int keyval, void *attribute_val, int *flag)
@ -1072,10 +1091,14 @@ OMPI_DECLSPEC int MPI_Attr_get(MPI_Comm comm, int keyval, void *attribute_val,
OMPI_DECLSPEC int MPI_Attr_put(MPI_Comm comm, int keyval, void *attribute_val)
__mpi_interface_deprecated__("MPI_Attr_put is superseded by MPI_Comm_set_attr in MPI-2.0");
OMPI_DECLSPEC int MPI_Barrier(MPI_Comm comm);
OMPI_DECLSPEC int MPI_Ibarrier(MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype,
int root, MPI_Comm comm);
OMPI_DECLSPEC int MPI_Bsend(void *buf, int count, MPI_Datatype datatype,
int dest, int tag, MPI_Comm comm);
OMPI_DECLSPEC int MPI_Ibcast(void *buffer, int count, MPI_Datatype datatype,
int root, MPI_Comm comm,
MPI_Request *request);
OMPI_DECLSPEC int MPI_Bsend_init(void *buf, int count, MPI_Datatype datatype,
int dest, int tag, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int MPI_Buffer_attach(void *buffer, int size);
@ -1152,6 +1175,8 @@ OMPI_DECLSPEC int MPI_Error_class(int errorcode, int *errorclass);
OMPI_DECLSPEC int MPI_Error_string(int errorcode, char *string, int *resultlen);
OMPI_DECLSPEC int MPI_Exscan(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
OMPI_DECLSPEC int MPI_Iexscan(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request);
#if OMPI_PROVIDE_MPI_FILE_INTERFACE
OMPI_DECLSPEC MPI_Fint MPI_File_c2f(MPI_File file);
OMPI_DECLSPEC MPI_File MPI_File_f2c(MPI_Fint file);
@ -1248,9 +1273,15 @@ OMPI_DECLSPEC int MPI_Free_mem(void *base);
OMPI_DECLSPEC int MPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm);
OMPI_DECLSPEC int MPI_Igather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int MPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int *recvcounts, int *displs,
MPI_Datatype recvtype, int root, MPI_Comm comm);
OMPI_DECLSPEC int MPI_Igatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int *recvcounts, int *displs,
MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int MPI_Get_address(void *location, MPI_Aint *address);
OMPI_DECLSPEC int MPI_Get_count(MPI_Status *status, MPI_Datatype datatype, int *count);
OMPI_DECLSPEC int MPI_Get_elements(MPI_Status *status, MPI_Datatype datatype, int *count);
@ -1381,10 +1412,18 @@ OMPI_DECLSPEC int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int sou
int tag, MPI_Comm comm, MPI_Status *status);
OMPI_DECLSPEC int MPI_Reduce(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm);
OMPI_DECLSPEC int MPI_Ireduce(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int MPI_Reduce_local(void *inbuf, void *inoutbuf, int count,
MPI_Datatype datatype, MPI_Op op);
OMPI_DECLSPEC int MPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
OMPI_DECLSPEC int MPI_Ireduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int MPI_Reduce_scatter_block(void *sendbuf, void *recvbuf, int recvcount,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
OMPI_DECLSPEC int MPI_Ireduce_scatter_block(void *sendbuf, void *recvbuf, int recvcount,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int MPI_Register_datarep(char *datarep,
MPI_Datarep_conversion_function *read_conversion_fn,
MPI_Datarep_conversion_function *write_conversion_fn,
@ -1402,12 +1441,20 @@ OMPI_DECLSPEC int MPI_Rsend_init(void *buf, int count, MPI_Datatype datatype,
MPI_Request *request);
OMPI_DECLSPEC int MPI_Scan(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
OMPI_DECLSPEC int MPI_Iscan(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm);
OMPI_DECLSPEC int MPI_Iscatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int MPI_Scatterv(void *sendbuf, int *sendcounts, int *displs,
MPI_Datatype sendtype, void *recvbuf, int recvcount,
MPI_Datatype recvtype, int root, MPI_Comm comm);
OMPI_DECLSPEC int MPI_Iscatterv(void *sendbuf, int *sendcounts, int *displs,
MPI_Datatype sendtype, void *recvbuf, int recvcount,
MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int MPI_Send_init(void *buf, int count, MPI_Datatype datatype,
int dest, int tag, MPI_Comm comm,
MPI_Request *request);
@ -1589,22 +1636,39 @@ OMPI_DECLSPEC int PMPI_Address(void *location, MPI_Aint *address)
OMPI_DECLSPEC int PMPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount,
MPI_Datatype recvtype, MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Iallgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount,
MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int PMPI_Allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int *recvcounts,
int *displs, MPI_Datatype recvtype, MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Iallgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int *recvcounts,
int *displs, MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int PMPI_Alloc_mem(MPI_Aint size, MPI_Info info,
void *baseptr);
OMPI_DECLSPEC int PMPI_Allreduce(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Iallreduce(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int PMPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount,
MPI_Datatype recvtype, MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Ialltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount,
MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int PMPI_Alltoallv(void *sendbuf, int *sendcounts, int *sdispls,
MPI_Datatype sendtype, void *recvbuf, int *recvcounts,
int *rdispls, MPI_Datatype recvtype, MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Ialltoallv(void *sendbuf, int *sendcounts, int *sdispls,
MPI_Datatype sendtype, void *recvbuf, int *recvcounts,
int *rdispls, MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int PMPI_Alltoallw(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendtypes,
void *recvbuf, int *recvcounts, int *rdispls, MPI_Datatype *recvtypes,
MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Ialltoallw(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendtypes,
void *recvbuf, int *recvcounts, int *rdispls, MPI_Datatype *recvtypes,
MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int PMPI_Attr_delete(MPI_Comm comm, int keyval)
__mpi_interface_deprecated__("MPI_Attr_delete is superseded by MPI_Comm_delete_attr in MPI-2.0");
OMPI_DECLSPEC int PMPI_Attr_get(MPI_Comm comm, int keyval, void *attribute_val, int *flag)
@ -1612,8 +1676,12 @@ OMPI_DECLSPEC int PMPI_Attr_get(MPI_Comm comm, int keyval, void *attribute_val,
OMPI_DECLSPEC int PMPI_Attr_put(MPI_Comm comm, int keyval, void *attribute_val)
__mpi_interface_deprecated__("MPI_Attr_put is superseded by MPI_Comm_set_attr in MPI-2.0");
OMPI_DECLSPEC int PMPI_Barrier(MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Ibarrier(MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int PMPI_Bcast(void *buffer, int count, MPI_Datatype datatype,
int root, MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Ibcast(void *buffer, int count, MPI_Datatype datatype,
int root, MPI_Comm comm,
MPI_Request *request);
OMPI_DECLSPEC int PMPI_Bsend(void *buf, int count, MPI_Datatype datatype,
int dest, int tag, MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Bsend_init(void *buf, int count, MPI_Datatype datatype,
@ -1692,6 +1760,8 @@ OMPI_DECLSPEC int PMPI_Error_class(int errorcode, int *errorclass);
OMPI_DECLSPEC int PMPI_Error_string(int errorcode, char *string, int *resultlen);
OMPI_DECLSPEC int PMPI_Exscan(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Iexscan(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request);
#if OMPI_PROVIDE_MPI_FILE_INTERFACE
OMPI_DECLSPEC MPI_Fint PMPI_File_c2f(MPI_File file);
OMPI_DECLSPEC MPI_File PMPI_File_f2c(MPI_Fint file);
@ -1788,9 +1858,15 @@ OMPI_DECLSPEC int PMPI_Free_mem(void *base);
OMPI_DECLSPEC int PMPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Igather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int PMPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int *recvcounts, int *displs,
MPI_Datatype recvtype, int root, MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Igatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int *recvcounts, int *displs,
MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int PMPI_Get_address(void *location, MPI_Aint *address);
OMPI_DECLSPEC int PMPI_Get_count(MPI_Status *status, MPI_Datatype datatype, int *count);
OMPI_DECLSPEC int PMPI_Get_elements(MPI_Status *status, MPI_Datatype datatype,
@ -1922,10 +1998,14 @@ OMPI_DECLSPEC int PMPI_Recv(void *buf, int count, MPI_Datatype datatype, int so
int tag, MPI_Comm comm, MPI_Status *status);
OMPI_DECLSPEC int PMPI_Reduce(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Ireduce(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int PMPI_Reduce_local(void *inbuf, void *inoutbuf, int count,
MPI_Datatype datatype, MPI_Op);
OMPI_DECLSPEC int PMPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Ireduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int PMPI_Register_datarep(char *datarep,
MPI_Datarep_conversion_function *read_conversion_fn,
MPI_Datarep_conversion_function *write_conversion_fn,
@ -1943,12 +2023,20 @@ OMPI_DECLSPEC int PMPI_Rsend_init(void *buf, int count, MPI_Datatype datatype,
MPI_Request *request);
OMPI_DECLSPEC int PMPI_Scan(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Iscan(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int PMPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Iscatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int PMPI_Scatterv(void *sendbuf, int *sendcounts, int *displs,
MPI_Datatype sendtype, void *recvbuf, int recvcount,
MPI_Datatype recvtype, int root, MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Iscatterv(void *sendbuf, int *sendcounts, int *displs,
MPI_Datatype sendtype, void *recvbuf, int recvcount,
MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int PMPI_Send_init(void *buf, int count, MPI_Datatype datatype,
int dest, int tag, MPI_Comm comm,
MPI_Request *request);

Просмотреть файл

@ -13,6 +13,7 @@
* rights reserved.
* Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2008 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012 Oak Rigde National Laboratory. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -156,6 +157,22 @@ int mca_coll_base_comm_select(ompi_communicator_t * comm)
COPY(avail->ac_module, comm, scatter);
COPY(avail->ac_module, comm, scatterv);
COPY(avail->ac_module, comm, iallgather);
COPY(avail->ac_module, comm, iallgatherv);
COPY(avail->ac_module, comm, iallreduce);
COPY(avail->ac_module, comm, ialltoall);
COPY(avail->ac_module, comm, ialltoallv);
COPY(avail->ac_module, comm, ialltoallw);
COPY(avail->ac_module, comm, ibarrier);
COPY(avail->ac_module, comm, ibcast);
COPY(avail->ac_module, comm, iexscan);
COPY(avail->ac_module, comm, igather);
COPY(avail->ac_module, comm, igatherv);
COPY(avail->ac_module, comm, ireduce);
COPY(avail->ac_module, comm, ireduce_scatter);
COPY(avail->ac_module, comm, iscan);
COPY(avail->ac_module, comm, iscatter);
COPY(avail->ac_module, comm, iscatterv);
/* release the original module reference and the list item */
OBJ_RELEASE(avail->ac_module);
OBJ_RELEASE(avail);
@ -173,14 +190,37 @@ int mca_coll_base_comm_select(ompi_communicator_t * comm)
(NULL == comm->c_coll.coll_alltoallw) ||
(NULL == comm->c_coll.coll_barrier) ||
(NULL == comm->c_coll.coll_bcast) ||
((OMPI_COMM_IS_INTRA(comm)) && (NULL == comm->c_coll.coll_exscan))
|| (NULL == comm->c_coll.coll_gather)
|| (NULL == comm->c_coll.coll_gatherv)
|| (NULL == comm->c_coll.coll_reduce)
|| (NULL == comm->c_coll.coll_reduce_scatter)
|| ((OMPI_COMM_IS_INTRA(comm)) && (NULL == comm->c_coll.coll_scan))
|| (NULL == comm->c_coll.coll_scatter)
|| (NULL == comm->c_coll.coll_scatterv)) {
((OMPI_COMM_IS_INTRA(comm)) && (NULL == comm->c_coll.coll_exscan)) ||
(NULL == comm->c_coll.coll_gather) ||
(NULL == comm->c_coll.coll_gatherv) ||
(NULL == comm->c_coll.coll_reduce) ||
(NULL == comm->c_coll.coll_reduce_scatter) ||
((OMPI_COMM_IS_INTRA(comm)) && (NULL == comm->c_coll.coll_scan)) ||
(NULL == comm->c_coll.coll_scatter) ||
(NULL == comm->c_coll.coll_scatterv)
#if 0
/* JMS These need to be activated before the nb coll branch is
done */
||
(NULL == comm->c_coll.coll_iallgather) ||
(NULL == comm->c_coll.coll_iallgatherv) ||
(NULL == comm->c_coll.coll_iallreduce) ||
(NULL == comm->c_coll.coll_ialltoall) ||
(NULL == comm->c_coll.coll_ialltoallv) ||
(NULL == comm->c_coll.coll_ialltoallw) ||
(NULL == comm->c_coll.coll_ibarrier) ||
(NULL == comm->c_coll.coll_ibcast) ||
((OMPI_COMM_IS_INTRA(comm)) && (NULL == comm->c_coll.coll_iexscan)) ||
(NULL == comm->c_coll.coll_igather) ||
(NULL == comm->c_coll.coll_igatherv) ||
(NULL == comm->c_coll.coll_ireduce) ||
(NULL == comm->c_coll.coll_ireduce_scatter) ||
((OMPI_COMM_IS_INTRA(comm)) && (NULL == comm->c_coll.coll_iscan)) ||
(NULL == comm->c_coll.coll_iscatter) ||
(NULL == comm->c_coll.coll_iscatterv)
#endif
) {
mca_coll_base_comm_unselect(comm);
return OMPI_ERR_NOT_FOUND;
}

Просмотреть файл

@ -9,6 +9,8 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2012 Oak Rigde National Laboratory.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -56,6 +58,24 @@ int mca_coll_base_comm_unselect(ompi_communicator_t * comm)
CLOSE(comm, scatter);
CLOSE(comm, scatterv);
CLOSE(comm, iallgather);
CLOSE(comm, iallgatherv);
CLOSE(comm, iallreduce);
CLOSE(comm, ialltoall);
CLOSE(comm, ialltoallv);
CLOSE(comm, ialltoallw);
CLOSE(comm, ibarrier);
CLOSE(comm, ibcast);
CLOSE(comm, iexscan);
CLOSE(comm, igather);
CLOSE(comm, igatherv);
CLOSE(comm, ireduce);
CLOSE(comm, ireduce_scatter);
CLOSE(comm, iscan);
CLOSE(comm, iscatter);
CLOSE(comm, iscatterv);
/* All done */
return OMPI_SUCCESS;
}

Просмотреть файл

@ -50,7 +50,56 @@ int mca_coll_base_associative = 1;
bool mca_coll_base_components_opened_valid = false;
opal_list_t mca_coll_base_components_opened;
OBJ_CLASS_INSTANCE(mca_coll_base_module_t, opal_object_t, NULL, NULL);
/*
* Ensure all function pointers are NULL'ed out to start with
*/
static void coll_base_module_construct(mca_coll_base_module_t *m)
{
m->coll_module_enable = NULL;
/* Collective function pointers */
/* blocking functions */
m->coll_allgather = NULL;
m->coll_allgatherv = NULL;
m->coll_allreduce = NULL;
m->coll_alltoall = NULL;
m->coll_alltoallv = NULL;
m->coll_alltoallw = NULL;
m->coll_barrier = NULL;
m->coll_bcast = NULL;
m->coll_exscan = NULL;
m->coll_gather = NULL;
m->coll_gatherv = NULL;
m->coll_reduce = NULL;
m->coll_reduce_scatter = NULL;
m->coll_scan = NULL;
m->coll_scatter = NULL;
m->coll_scatterv = NULL;
/* nonblocking functions */
m->coll_iallgather = NULL;
m->coll_iallgatherv = NULL;
m->coll_iallreduce = NULL;
m->coll_ialltoall = NULL;
m->coll_ialltoallv = NULL;
m->coll_ialltoallw = NULL;
m->coll_ibarrier = NULL;
m->coll_ibcast = NULL;
m->coll_iexscan = NULL;
m->coll_igather = NULL;
m->coll_igatherv = NULL;
m->coll_ireduce = NULL;
m->coll_ireduce_scatter = NULL;
m->coll_iscan = NULL;
m->coll_iscatter = NULL;
m->coll_iscatterv = NULL;
/* FT event */
m->ft_event = NULL;
}
OBJ_CLASS_INSTANCE(mca_coll_base_module_t, opal_object_t,
coll_base_module_construct, NULL);
/*
* Function for finding and opening either all MCA components, or the one

Просмотреть файл

@ -11,6 +11,7 @@
* All rights reserved.
* Copyright (c) 2007-2008 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007-2008 UT-Battelle, LLC
* Copyright (c) 2012 Oak Rigde National Laboratory. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -63,6 +64,7 @@
#include "opal/mca/crs/crs.h"
#include "opal/mca/crs/base/base.h"
#include "ompi/request/request.h"
BEGIN_C_DECLS
@ -218,6 +220,9 @@ typedef int (*mca_coll_base_module_reduce_fn_t)
typedef int (*mca_coll_base_module_reduce_scatter_fn_t)
(void *sbuf, void *rbuf, int *rcounts, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_reduce_scatter_block_fn_t)
(void *sbuf, void *rbuf, int rcount, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_scan_fn_t)
(void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
@ -230,6 +235,83 @@ typedef int (*mca_coll_base_module_scatterv_fn_t)
void* rbuf, int rcount, struct ompi_datatype_t *rdtype,
int root, struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
/* nonblocking collectives */
typedef int (*mca_coll_base_module_iallgather_fn_t)
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int rcount, struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_iallgatherv_fn_t)
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void * rbuf, int *rcounts, int *disps, struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_iallreduce_fn_t)
(void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_communicator_t *comm,
ompi_request_t ** request, struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_ialltoall_fn_t)
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void* rbuf, int rcount, struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_ialltoallv_fn_t)
(void *sbuf, int *scounts, int *sdisps, struct ompi_datatype_t *sdtype,
void *rbuf, int *rcounts, int *rdisps, struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_ialltoallw_fn_t)
(void *sbuf, int *scounts, int *sdisps, struct ompi_datatype_t **sdtypes,
void *rbuf, int *rcounts, int *rdisps, struct ompi_datatype_t **rdtypes,
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_ibarrier_fn_t)
(struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_ibcast_fn_t)
(void *buff, int count, struct ompi_datatype_t *datatype, int root,
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_iexscan_fn_t)
(void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_igather_fn_t)
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int rcount, struct ompi_datatype_t *rdtype,
int root, struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_igatherv_fn_t)
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int *rcounts, int *disps, struct ompi_datatype_t *rdtype,
int root, struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_ireduce_fn_t)
(void *sbuf, void* rbuf, int count, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, int root, struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_ireduce_scatter_fn_t)
(void *sbuf, void *rbuf, int *rcounts, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_ireduce_scatter_block_fn_t)
(void *sbuf, void *rbuf, int rcount, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_iscan_fn_t)
(void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_iscatter_fn_t)
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int rcount, struct ompi_datatype_t *rdtype,
int root, struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_iscatterv_fn_t)
(void *sbuf, int *scounts, int *disps, struct ompi_datatype_t *sdtype,
void* rbuf, int rcount, struct ompi_datatype_t *rdtype,
int root, struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
/**
* Fault Tolerance Awareness function.
@ -298,6 +380,7 @@ struct mca_coll_base_module_2_0_0_t {
mca_coll_base_module_enable_1_1_0_fn_t coll_module_enable;
/* Collective function pointers */
/* blocking functions */
mca_coll_base_module_allgather_fn_t coll_allgather;
mca_coll_base_module_allgatherv_fn_t coll_allgatherv;
mca_coll_base_module_allreduce_fn_t coll_allreduce;
@ -311,9 +394,28 @@ struct mca_coll_base_module_2_0_0_t {
mca_coll_base_module_gatherv_fn_t coll_gatherv;
mca_coll_base_module_reduce_fn_t coll_reduce;
mca_coll_base_module_reduce_scatter_fn_t coll_reduce_scatter;
mca_coll_base_module_reduce_scatter_block_fn_t coll_reduce_scatter_block;
mca_coll_base_module_scan_fn_t coll_scan;
mca_coll_base_module_scatter_fn_t coll_scatter;
mca_coll_base_module_scatterv_fn_t coll_scatterv;
/* nonblocking functions */
mca_coll_base_module_iallgather_fn_t coll_iallgather;
mca_coll_base_module_iallgatherv_fn_t coll_iallgatherv;
mca_coll_base_module_iallreduce_fn_t coll_iallreduce;
mca_coll_base_module_ialltoall_fn_t coll_ialltoall;
mca_coll_base_module_ialltoallv_fn_t coll_ialltoallv;
mca_coll_base_module_ialltoallw_fn_t coll_ialltoallw;
mca_coll_base_module_ibarrier_fn_t coll_ibarrier;
mca_coll_base_module_ibcast_fn_t coll_ibcast;
mca_coll_base_module_iexscan_fn_t coll_iexscan;
mca_coll_base_module_igather_fn_t coll_igather;
mca_coll_base_module_igatherv_fn_t coll_igatherv;
mca_coll_base_module_ireduce_fn_t coll_ireduce;
mca_coll_base_module_ireduce_scatter_fn_t coll_ireduce_scatter;
mca_coll_base_module_ireduce_scatter_block_fn_t coll_ireduce_scatter_block;
mca_coll_base_module_iscan_fn_t coll_iscan;
mca_coll_base_module_iscatter_fn_t coll_iscatter;
mca_coll_base_module_iscatterv_fn_t coll_iscatterv;
/** Fault tolerance event trigger function */
mca_coll_base_module_ft_event_fn_t ft_event;
@ -362,12 +464,49 @@ struct mca_coll_base_comm_coll_t {
mca_coll_base_module_2_0_0_t *coll_reduce_module;
mca_coll_base_module_reduce_scatter_fn_t coll_reduce_scatter;
mca_coll_base_module_2_0_0_t *coll_reduce_scatter_module;
mca_coll_base_module_reduce_scatter_block_fn_t coll_reduce_scatter_block;
mca_coll_base_module_2_0_0_t *coll_reduce_scatter_block_module;
mca_coll_base_module_scan_fn_t coll_scan;
mca_coll_base_module_2_0_0_t *coll_scan_module;
mca_coll_base_module_scatter_fn_t coll_scatter;
mca_coll_base_module_2_0_0_t *coll_scatter_module;
mca_coll_base_module_scatterv_fn_t coll_scatterv;
mca_coll_base_module_2_0_0_t *coll_scatterv_module;
/* nonblocking collectives */
mca_coll_base_module_iallgather_fn_t coll_iallgather;
mca_coll_base_module_2_0_0_t *coll_iallgather_module;
mca_coll_base_module_iallgatherv_fn_t coll_iallgatherv;
mca_coll_base_module_2_0_0_t *coll_iallgatherv_module;
mca_coll_base_module_iallreduce_fn_t coll_iallreduce;
mca_coll_base_module_2_0_0_t *coll_iallreduce_module;
mca_coll_base_module_ialltoall_fn_t coll_ialltoall;
mca_coll_base_module_2_0_0_t *coll_ialltoall_module;
mca_coll_base_module_ialltoallv_fn_t coll_ialltoallv;
mca_coll_base_module_2_0_0_t *coll_ialltoallv_module;
mca_coll_base_module_ialltoallw_fn_t coll_ialltoallw;
mca_coll_base_module_2_0_0_t *coll_ialltoallw_module;
mca_coll_base_module_ibarrier_fn_t coll_ibarrier;
mca_coll_base_module_2_0_0_t *coll_ibarrier_module;
mca_coll_base_module_ibcast_fn_t coll_ibcast;
mca_coll_base_module_2_0_0_t *coll_ibcast_module;
mca_coll_base_module_iexscan_fn_t coll_iexscan;
mca_coll_base_module_2_0_0_t *coll_iexscan_module;
mca_coll_base_module_igather_fn_t coll_igather;
mca_coll_base_module_2_0_0_t *coll_igather_module;
mca_coll_base_module_igatherv_fn_t coll_igatherv;
mca_coll_base_module_2_0_0_t *coll_igatherv_module;
mca_coll_base_module_ireduce_fn_t coll_ireduce;
mca_coll_base_module_2_0_0_t *coll_ireduce_module;
mca_coll_base_module_ireduce_scatter_fn_t coll_ireduce_scatter;
mca_coll_base_module_2_0_0_t *coll_ireduce_scatter_module;
mca_coll_base_module_ireduce_scatter_block_fn_t coll_ireduce_scatter_block;
mca_coll_base_module_2_0_0_t *coll_ireduce_scatter_block_module;
mca_coll_base_module_iscan_fn_t coll_iscan;
mca_coll_base_module_2_0_0_t *coll_iscan_module;
mca_coll_base_module_iscatter_fn_t coll_iscatter;
mca_coll_base_module_2_0_0_t *coll_iscatter_module;
mca_coll_base_module_iscatterv_fn_t coll_iscatterv;
mca_coll_base_module_2_0_0_t *coll_iscatterv_module;
};
typedef struct mca_coll_base_comm_coll_t mca_coll_base_comm_coll_t;

Просмотреть файл

@ -11,6 +11,7 @@
# All rights reserved.
# Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2011 Sandia National Laboratories. All rights reserved.
# Copyright (c) 2012 Oak Rigde National Laboratory. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
@ -65,17 +66,25 @@ libmpi_c_mpi_la_SOURCES = \
add_error_string.c \
address.c \
allgather.c \
iallgather.c \
allgatherv.c \
iallgatherv.c \
alloc_mem.c \
allreduce.c \
iallreduce.c \
alltoall.c \
ialltoall.c \
alltoallv.c \
ialltoallv.c \
alltoallw.c \
ialltoallw.c \
attr_delete.c \
attr_get.c \
attr_put.c \
barrier.c \
ibarrier.c \
bcast.c \
ibcast.c \
bsend.c \
bsend_init.c \
buffer_attach.c \
@ -132,11 +141,14 @@ libmpi_c_mpi_la_SOURCES = \
error_class.c \
error_string.c \
exscan.c \
iexscan.c \
finalize.c \
finalized.c \
free_mem.c \
gather.c \
igather.c \
gatherv.c \
igatherv.c \
get_address.c \
get_count.c \
get_elements.c \
@ -166,8 +178,8 @@ libmpi_c_mpi_la_SOURCES = \
group_translate_ranks.c \
group_union.c \
ibsend.c \
improbe.c \
imrecv.c \
improbe.c \
imrecv.c \
info_c2f.c \
info_create.c \
info_delete.c \
@ -193,10 +205,10 @@ libmpi_c_mpi_la_SOURCES = \
keyval_create.c \
keyval_free.c \
lookup_name.c \
message_f2c.c \
message_c2f.c \
mprobe.c \
mrecv.c \
message_f2c.c \
message_c2f.c \
mprobe.c \
mrecv.c \
op_c2f.c \
op_commutative.c \
op_create.c \
@ -214,8 +226,12 @@ libmpi_c_mpi_la_SOURCES = \
recv_init.c \
recv.c \
reduce.c \
ireduce.c \
reduce_local.c \
reduce_scatter.c \
ireduce_scatter.c \
reduce_scatter_block.c \
ireduce_scatter_block.c \
request_c2f.c \
request_f2c.c \
request_free.c \
@ -223,8 +239,11 @@ libmpi_c_mpi_la_SOURCES = \
rsend_init.c \
rsend.c \
scan.c \
iscan.c \
scatter.c \
iscatter.c \
scatterv.c \
iscatterv.c \
send.c \
send_init.c \
sendrecv.c \
@ -287,8 +306,8 @@ libmpi_c_mpi_la_SOURCES = \
waitall.c \
waitany.c \
waitsome.c \
wtime.c \
wtick.c \
wtime.c \
wtick.c \
accumulate.c \
get.c \
put.c \

123
ompi/mpi/c/iallgather.c Обычный файл
Просмотреть файл

@ -0,0 +1,123 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012 Oak Rigde National Laboratory. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Iallgather = PMPI_Iallgather
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Iallgather";
int MPI_Iallgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm, MPI_Request *request)
{
int err;
MEMCHECKER(
int rank;
ptrdiff_t ext;
rank = ompi_comm_rank(comm);
ompi_datatype_type_extent(recvtype, &ext);
memchecker_datatype(recvtype);
memchecker_comm(comm);
/* check whether the actual send buffer is defined. */
if (MPI_IN_PLACE == sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(recvbuf)+rank*ext,
recvcount, recvtype);
} else {
memchecker_datatype(sendtype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
/* check whether the receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
err = MPI_ERR_TYPE;
} else if (recvcount < 0) {
err = MPI_ERR_COUNT;
} else if (MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} else if (MPI_IN_PLACE != sendbuf) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
/* Do we need to do anything? Everyone had to give the same send
signature, which means that everyone must have given a
sendcount > 0 if there's anything to send for the intra-communicator
case. If we're doing IN_PLACE, however, check recvcount,
not sendcount. */
if ( OMPI_COMM_IS_INTRA(comm) ) {
if ((MPI_IN_PLACE != sendbuf && 0 == sendcount) ||
(0 == recvcount)) {
return MPI_SUCCESS;
}
}
else if ( OMPI_COMM_IS_INTER(comm) ){
/* for inter comunicators, the communication pattern
need not be symmetric. Specifically, one group is
allows to have sendcount=0, while the other has
a valid sendcount. Thus, the only way not to do
anything is if both sendcount and recvcount are zero. */
if ( 0 == sendcount && 0 == recvcount ) {
return MPI_SUCCESS;
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_iallgather(sendbuf, sendcount, sendtype,
recvbuf, recvcount, recvtype, comm,
request,
comm->c_coll.coll_iallgather_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

146
ompi/mpi/c/iallgatherv.c Обычный файл
Просмотреть файл

@ -0,0 +1,146 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2010 University of Houston. All rights reserved.
* Copyright (c) 2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Iallgatherv = PMPI_Iallgatherv
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Iallgatherv";
int MPI_Iallgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int *recvcounts,
int *displs, MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request)
{
int i, size, err;
MEMCHECKER(
int rank;
ptrdiff_t ext;
rank = ompi_comm_rank(comm);
size = ompi_comm_size(comm);
ompi_datatype_type_extent(recvtype, &ext);
memchecker_datatype(recvtype);
memchecker_comm (comm);
/* check whether the receive buffer is addressable. */
for (i = 0; i < size; i++) {
memchecker_call(&opal_memchecker_base_isaddressable,
(char *)(recvbuf)+displs[i]*ext,
recvcounts[i], recvtype);
}
/* check whether the actual send buffer is defined. */
if (MPI_IN_PLACE == sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(recvbuf)+displs[rank]*ext,
recvcounts[rank], recvtype);
} else {
memchecker_datatype(sendtype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if (MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
if (MPI_IN_PLACE != sendbuf) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
/* We always define the remote group to be the same as the local
group in the case of an intracommunicator, so it's safe to
get the size of the remote group here for both intra- and
intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
}
if (NULL == displs) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_BUFFER, FUNC_NAME);
}
}
/* Do we need to do anything? Everyone had to give the same
signature, which means that everyone must have given a
sum(recvounts) > 0 if there's anything to do. */
if ( OMPI_COMM_IS_INTRA( comm) ) {
for (i = 0; i < ompi_comm_size(comm); ++i) {
if (0 != recvcounts[i]) {
break;
}
}
if (i >= ompi_comm_size(comm)) {
return MPI_SUCCESS;
}
}
/* There is no rule that can be applied for inter-communicators, since
recvcount(s)=0 only indicates that the processes in the other group
do not send anything, sendcount=0 only indicates that I do not send
anything. However, other processes in my group might very well send
something */
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_iallgatherv(sendbuf, sendcount, sendtype,
recvbuf, recvcounts,
displs, recvtype, comm,
request,
comm->c_coll.coll_iallgatherv_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

112
ompi/mpi/c/iallreduce.c Обычный файл
Просмотреть файл

@ -0,0 +1,112 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/op/op.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Iallreduce = PMPI_Iallreduce
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Iallreduce";
int MPI_Iallreduce(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request)
{
int err;
MEMCHECKER(
memchecker_datatype(datatype);
memchecker_comm(comm);
/* check whether receive buffer is defined. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, count, datatype);
/* check whether the actual send buffer is defined. */
if (MPI_IN_PLACE == sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined, recvbuf, count, datatype);
} else {
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype);
}
);
if (MPI_PARAM_CHECK) {
char *msg;
/* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if (MPI_OP_NULL == op) {
err = MPI_ERR_OP;
} else if (!ompi_op_is_valid(op, datatype, &msg, FUNC_NAME)) {
int ret = OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, msg);
free(msg);
return ret;
} else if( MPI_IN_PLACE == recvbuf ) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_BUFFER,
FUNC_NAME);
} else if( (sendbuf == recvbuf) &&
(MPI_BOTTOM != sendbuf) &&
(count > 1) ) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_BUFFER,
FUNC_NAME);
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, count);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
/* MPI-1, p114, says that each process must supply at least
one element. But at least the Pallas benchmarks call
MPI_REDUCE with a count of 0. So be sure to handle it. */
if (0 == count) {
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
OBJ_RETAIN(op);
err = comm->c_coll.coll_iallreduce(sendbuf, recvbuf, count,
datatype, op, comm,
request,
comm->c_coll.coll_iallreduce_module);
OBJ_RELEASE(op);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

92
ompi/mpi/c/ialltoall.c Обычный файл
Просмотреть файл

@ -0,0 +1,92 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012 Oak Rigde National Laboratory. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Ialltoall = PMPI_Ialltoall
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Ialltoall";
int MPI_Ialltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm, MPI_Request *request)
{
int err;
MEMCHECKER(
memchecker_datatype(sendtype);
memchecker_datatype(recvtype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
memchecker_comm(comm);
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
err = MPI_ERR_TYPE;
} else if (recvcount < 0) {
err = MPI_ERR_COUNT;
} else if (MPI_IN_PLACE == sendbuf || MPI_IN_PLACE == recvbuf) {
err = MPI_ERR_ARG;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
/* Do we need to do anything? */
if (0 == sendcount && 0 == recvcount) {
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_ialltoall(sendbuf, sendcount, sendtype,
recvbuf, recvcount, recvtype, comm,
request, comm->c_coll.coll_ialltoall_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

117
ompi/mpi/c/ialltoallv.c Обычный файл
Просмотреть файл

@ -0,0 +1,117 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2012 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Ialltoallv = PMPI_Ialltoallv
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Ialltoallv";
int MPI_Ialltoallv(void *sendbuf, int *sendcounts, int *sdispls,
MPI_Datatype sendtype,
void *recvbuf, int *recvcounts, int *rdispls,
MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request)
{
int i, size, err;
MEMCHECKER(
ptrdiff_t recv_ext;
ptrdiff_t send_ext;
size = ompi_comm_remote_size(comm);
ompi_datatype_type_extent(recvtype, &recv_ext);
ompi_datatype_type_extent(sendtype, &send_ext);
memchecker_datatype(sendtype);
memchecker_datatype(recvtype);
memchecker_comm(comm);
for ( i = 0; i < size; i++ ) {
/* check if send chunks are defined. */
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(sendbuf)+sdispls[i]*send_ext,
sendcounts[i], sendtype);
/* check if receive chunks are addressable. */
memchecker_call(&opal_memchecker_base_isaddressable,
(char *)(recvbuf)+rdispls[i]*recv_ext,
recvcounts[i], recvtype);
}
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
if ((NULL == sendcounts) || (NULL == sdispls) ||
(NULL == recvcounts) || (NULL == rdispls) ||
MPI_IN_PLACE == sendbuf || MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
/* We always define the remote group to be the same as the local
group in the case of an intracommunicator, so it's safe to
get the size of the remote group here for both intra- and
intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) {
err = MPI_ERR_COUNT;
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
err = MPI_ERR_TYPE;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_ialltoallv(sendbuf, sendcounts, sdispls, sendtype,
recvbuf, recvcounts, rdispls, recvtype,
comm, request,
comm->c_coll.coll_ialltoallv_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

116
ompi/mpi/c/ialltoallw.c Обычный файл
Просмотреть файл

@ -0,0 +1,116 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2012 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Ialltoallw = PMPI_Ialltoallw
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Ialltoallw";
int MPI_Ialltoallw(void *sendbuf, int *sendcounts, int *sdispls,
MPI_Datatype *sendtypes,
void *recvbuf, int *recvcounts, int *rdispls,
MPI_Datatype *recvtypes, MPI_Comm comm, MPI_Request *request)
{
int i, size, err;
MEMCHECKER(
ptrdiff_t recv_ext;
ptrdiff_t send_ext;
size = ompi_comm_remote_size(comm);
memchecker_comm(comm);
for ( i = 0; i < size; i++ ) {
memchecker_datatype(sendtypes[i]);
memchecker_datatype(recvtypes[i]);
ompi_datatype_type_extent(sendtypes[i], &send_ext);
ompi_datatype_type_extent(recvtypes[i], &recv_ext);
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(sendbuf)+sdispls[i]*send_ext,
sendcounts[i], sendtypes[i]);
memchecker_call(&opal_memchecker_base_isaddressable,
(char *)(recvbuf)+sdispls[i]*recv_ext,
recvcounts[i], recvtypes[i]);
}
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
if ((NULL == sendcounts) || (NULL == sdispls) || (NULL == sendtypes) ||
(NULL == recvcounts) || (NULL == rdispls) || (NULL == recvtypes) ||
MPI_IN_PLACE == sendbuf || MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
/* We always define the remote group to be the same as the local
group in the case of an intracommunicator, so it's safe to
get the size of the remote group here for both intra- and
intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) {
err = MPI_ERR_COUNT;
} else if (MPI_DATATYPE_NULL == recvtypes[i] || NULL == recvtypes[i]) {
err = MPI_ERR_TYPE;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtypes[i], sendcounts[i]);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_ialltoallw(sendbuf, sendcounts, sdispls, sendtypes,
recvbuf, recvcounts, rdispls, recvtypes,
comm, request,
comm->c_coll.coll_ialltoallw_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

78
ompi/mpi/c/ibarrier.c Обычный файл
Просмотреть файл

@ -0,0 +1,78 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2012 Oak Rigde National Laboratory. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Ibarrier = PMPI_Ibarrier
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Ibarrier";
int MPI_Ibarrier(MPI_Comm comm, MPI_Request *request)
{
int err = MPI_SUCCESS;
MEMCHECKER(
memchecker_comm(comm);
);
/* Error checking */
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
/* Intracommunicators: Only invoke the back-end coll module barrier
function if there's more than one process in the communicator */
if (OMPI_COMM_IS_INTRA(comm)) {
if (ompi_comm_size(comm) > 1) {
err = comm->c_coll.coll_ibarrier(comm, request, comm->c_coll.coll_ibarrier_module);
}
}
/* Intercommunicators -- always invoke, because, by definition,
there's always at least 2 processes in an intercommunicator. */
else {
err = comm->c_coll.coll_ibarrier(comm, request, comm->c_coll.coll_ibarrier_module);
}
/* All done */
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

90
ompi/mpi/c/ibcast.c Обычный файл
Просмотреть файл

@ -0,0 +1,90 @@
/*
* Copyright (c) 2012 Oak Rigde National Laboratory. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Ibcast = PMPI_Ibcast
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Ibcast";
int MPI_Ibcast(void *buffer, int count, MPI_Datatype datatype,
int root, MPI_Comm comm, MPI_Request *request)
{
int err;
MEMCHECKER(
memchecker_datatype(datatype);
memchecker_call(&opal_memchecker_base_isdefined, buffer, count, datatype);
memchecker_comm(comm);
);
if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
/* Errors for all ranks */
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, count);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
if (MPI_IN_PLACE == buffer) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
/* Errors for intracommunicators */
if (OMPI_COMM_IS_INTRA(comm)) {
if ((root >= ompi_comm_size(comm)) || (root < 0)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
}
}
/* Errors for intercommunicators */
else {
if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) ||
MPI_ROOT == root || MPI_PROC_NULL == root)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
}
}
}
/* If there's only one node, or if the count is 0, we're done */
if ((OMPI_COMM_IS_INTRA(comm) && ompi_comm_size(comm) <= 1) ||
0 == count) {
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_ibcast(buffer, count, datatype, root, comm,
request,
comm->c_coll.coll_ibcast_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

93
ompi/mpi/c/iexscan.c Обычный файл
Просмотреть файл

@ -0,0 +1,93 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/op/op.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Iexscan = PMPI_Iexscan
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Iexscan";
int MPI_Iexscan(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request)
{
int err;
MEMCHECKER(
memchecker_datatype(datatype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype);
memchecker_comm(comm);
);
if (MPI_PARAM_CHECK) {
char *msg;
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
/* Unrooted operation -- same checks for intracommunicators
and intercommunicators */
else if (MPI_OP_NULL == op) {
err = MPI_ERR_OP;
} else if (!ompi_op_is_valid(op, datatype, &msg, FUNC_NAME)) {
int ret = OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, msg);
free(msg);
return ret;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, count);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
/* Do we need to do anything? (MPI says that reductions have to
have a count of at least 1, but at least IMB calls reduce with
a count of 0 -- blah!) */
if (0 == count) {
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
OBJ_RETAIN(op);
err = comm->c_coll.coll_iexscan(sendbuf, recvbuf, count,
datatype, op, comm,
request,
comm->c_coll.coll_iexscan_module);
OBJ_RELEASE(op);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

183
ompi/mpi/c/igather.c Обычный файл
Просмотреть файл

@ -0,0 +1,183 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2012 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2008 University of Houston. All rights reserved.
* Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Igather = PMPI_Igather
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Igather";
int MPI_Igather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm, MPI_Request *request)
{
int err;
MEMCHECKER(
int rank;
ptrdiff_t ext;
rank = ompi_comm_rank(comm);
ompi_datatype_type_extent(recvtype, &ext);
memchecker_comm(comm);
if(OMPI_COMM_IS_INTRA(comm)) {
if(ompi_comm_rank(comm) == root) {
/* check whether root's send buffer is defined. */
if (MPI_IN_PLACE == sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(recvbuf)+rank*ext,
recvcount, recvtype);
} else {
memchecker_datatype(sendtype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
memchecker_datatype(recvtype);
/* check whether root's receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
} else {
memchecker_datatype(sendtype);
/* check whether send buffer is defined on other processes. */
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
} else {
if (MPI_ROOT == root) {
memchecker_datatype(recvtype);
/* check whether root's receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
} else if (MPI_PROC_NULL != root) {
memchecker_datatype(sendtype);
/* check whether send buffer is defined. */
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
}
);
if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if ((ompi_comm_rank(comm) != root && MPI_IN_PLACE == sendbuf) ||
(ompi_comm_rank(comm) == root && MPI_IN_PLACE == recvbuf)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
/* Errors for intracommunicators */
if (OMPI_COMM_IS_INTRA(comm)) {
/* Errors for all ranks */
if ((root >= ompi_comm_size(comm)) || (root < 0)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
}
if (MPI_IN_PLACE != sendbuf) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
/* Errors for the root. Some of these could have been
combined into compound if statements above, but since
this whole section can be compiled out (or turned off at
run time) for efficiency, it's more clear to separate
them out into individual tests. */
if (ompi_comm_rank(comm) == root) {
if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
if (recvcount < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
}
}
/* Errors for intercommunicators */
else {
if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) ||
MPI_ROOT == root || MPI_PROC_NULL == root)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
}
/* Errors for the senders */
if (MPI_ROOT != root && MPI_PROC_NULL != root) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
/* Errors for the root. Ditto on the comment above -- these
error checks could have been combined above, but let's
make the code easier to read. */
else if (MPI_ROOT == root) {
if (recvcount < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
}
}
}
/* Do we need to do anything? */
if ((0 == sendcount && MPI_ROOT != root &&
(ompi_comm_rank(comm) != root ||
(ompi_comm_rank(comm) == root && MPI_IN_PLACE != sendbuf))) ||
(ompi_comm_rank(comm) == root && MPI_IN_PLACE == sendbuf &&
0 == recvcount) ||
(0 == recvcount && (MPI_ROOT == root || MPI_PROC_NULL == root))) {
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_igather(sendbuf, sendcount, sendtype, recvbuf,
recvcount, recvtype, root, comm,
request,
comm->c_coll.coll_igather_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

198
ompi/mpi/c/igatherv.c Обычный файл
Просмотреть файл

@ -0,0 +1,198 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2012 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Igatherv = PMPI_Igatherv
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Igatherv";
int MPI_Igatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int *recvcounts, int *displs,
MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Request *request)
{
int i, size, err;
MEMCHECKER(
int rank;
ptrdiff_t ext;
size = ompi_comm_remote_size(comm);
rank = ompi_comm_rank(comm);
ompi_datatype_type_extent(recvtype, &ext);
memchecker_comm(comm);
if(OMPI_COMM_IS_INTRA(comm)) {
if(ompi_comm_rank(comm) == root) {
/* check whether root's send buffer is defined. */
if (MPI_IN_PLACE == sendbuf) {
for (i = 0; i < size; i++) {
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(recvbuf)+displs[i]*ext,
recvcounts[i], recvtype);
}
} else {
memchecker_datatype(sendtype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
memchecker_datatype(recvtype);
/* check whether root's receive buffer is addressable. */
for (i = 0; i < size; i++) {
memchecker_call(&opal_memchecker_base_isaddressable,
(char *)(recvbuf)+displs[i]*ext,
recvcounts[i], recvtype);
}
} else {
memchecker_datatype(sendtype);
/* check whether send buffer is defined on other processes. */
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
} else {
if (MPI_ROOT == root) {
memchecker_datatype(recvtype);
/* check whether root's receive buffer is addressable. */
for (i = 0; i < size; i++) {
memchecker_call(&opal_memchecker_base_isaddressable,
(char *)(recvbuf)+displs[i]*ext,
recvcounts[i], recvtype);
}
} else if (MPI_PROC_NULL != root) {
memchecker_datatype(sendtype);
/* check whether send buffer is defined. */
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
}
);
if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if ((ompi_comm_rank(comm) != root && MPI_IN_PLACE == sendbuf) ||
(ompi_comm_rank(comm) == root && MPI_IN_PLACE == recvbuf)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
/* Errors for intracommunicators */
if (OMPI_COMM_IS_INTRA(comm)) {
/* Errors for all ranks */
if ((root >= ompi_comm_size(comm)) || (root < 0)) {
err = MPI_ERR_ROOT;
} else if (MPI_IN_PLACE != sendbuf) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
/* Errors for the root. Some of these could have been
combined into compound if statements above, but since
this whole section can be compiled out (or turned off at
run time) for efficiency, it's more clear to separate
them out into individual tests. */
if (ompi_comm_rank(comm) == root) {
if (NULL == displs) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
if (NULL == recvcounts) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
size = ompi_comm_size(comm);
for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
}
}
}
/* Errors for intercommunicators */
else {
if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) ||
MPI_ROOT == root || MPI_PROC_NULL == root)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
}
/* Errors for the senders */
if (MPI_ROOT != root && MPI_PROC_NULL != root) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
/* Errors for the root. Ditto on the comment above -- these
error checks could have been combined above, but let's
make the code easier to read. */
else if (MPI_ROOT == root) {
if (NULL == displs) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
if (NULL == recvcounts) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
}
}
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_igatherv(sendbuf, sendcount, sendtype, recvbuf,
recvcounts, displs,
recvtype, root, comm,
request,
comm->c_coll.coll_igatherv_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

138
ompi/mpi/c/ireduce.c Обычный файл
Просмотреть файл

@ -0,0 +1,138 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/op/op.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Ireduce = PMPI_Ireduce
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Ireduce";
int MPI_Ireduce(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm, MPI_Request *request)
{
int err;
MEMCHECKER(
memchecker_datatype(datatype);
memchecker_comm(comm);
if(OMPI_COMM_IS_INTRA(comm)) {
if(ompi_comm_rank(comm) == root) {
/* check whether root's send buffer is defined. */
if (MPI_IN_PLACE == sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined, recvbuf, count, datatype);
} else {
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype);
}
/* check whether root's receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, count, datatype);
} else {
/* check whether send buffer is defined on other processes. */
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype);
}
} else {
if (MPI_ROOT == root) {
/* check whether root's receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, count, datatype);
} else if (MPI_PROC_NULL != root) {
/* check whether send buffer is defined. */
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype);
}
}
);
if (MPI_PARAM_CHECK) {
char *msg;
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
/* Checks for all ranks */
else if (MPI_OP_NULL == op || NULL == op) {
err = MPI_ERR_OP;
} else if (!ompi_op_is_valid(op, datatype, &msg, FUNC_NAME)) {
int ret = OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, msg);
free(msg);
return ret;
} else if ((ompi_comm_rank(comm) != root && MPI_IN_PLACE == sendbuf) ||
(ompi_comm_rank(comm) == root && ((MPI_IN_PLACE == recvbuf) || (sendbuf == recvbuf)))) {
err = MPI_ERR_ARG;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, count);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
/* Intercommunicator errors */
if (!OMPI_COMM_IS_INTRA(comm)) {
if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) ||
MPI_ROOT == root || MPI_PROC_NULL == root)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
}
}
/* Intracommunicator errors */
else {
if (root < 0 || root >= ompi_comm_size(comm)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
}
}
}
/* Do we need to do anything? (MPI says that reductions have to
have a count of at least 1, but at least IMB calls reduce with
a count of 0 -- blah!) */
if (0 == count) {
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
OBJ_RETAIN(op);
err = comm->c_coll.coll_ireduce(sendbuf, recvbuf, count,
datatype, op, root, comm,
request,
comm->c_coll.coll_ireduce_module);
OBJ_RELEASE(op);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

133
ompi/mpi/c/ireduce_scatter.c Обычный файл
Просмотреть файл

@ -0,0 +1,133 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2012 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/op/op.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Ireduce_scatter = PMPI_Ireduce_scatter
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Ireduce_scatter";
int MPI_Ireduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request)
{
int i, err, size, count;
MEMCHECKER(
int rank;
size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm);
for (count = i = 0; i < size; ++i) {
if (0 == recvcounts[i]) {
count += recvcounts[i];
}
}
memchecker_comm(comm);
memchecker_datatype(datatype);
/* check receive buffer of current proccess, whether it's addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf,
recvcounts[rank], datatype);
/* check whether the actual send buffer is defined. */
if(MPI_IN_PLACE == sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined, recvbuf, count, datatype);
} else {
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype);
}
);
if (MPI_PARAM_CHECK) {
char *msg;
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
/* Unrooted operation; same checks for all ranks on both
intracommunicators and intercommunicators */
else if (MPI_OP_NULL == op || NULL == op) {
err = MPI_ERR_OP;
} else if (!ompi_op_is_valid(op, datatype, &msg, FUNC_NAME)) {
int ret = OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, msg);
free(msg);
return ret;
} else if (NULL == recvcounts) {
err = MPI_ERR_COUNT;
} else if (MPI_IN_PLACE == recvbuf) {
err = MPI_ERR_ARG;
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
/* Based on the standard each group has to provide the same total
number of elements, so the size of the recvcounts array depends
on the number of participants in the local group. */
size = ompi_comm_size(comm);
for (i = 0; i < size; ++i) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, recvcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
}
/* MPI-1, p114, says that each process must supply at least one
element. But at least the Pallas benchmarks call MPI_REDUCE
with a count of 0. So be sure to handle it. Grrr... */
size = ompi_comm_size(comm);
for (count = i = 0; i < size; ++i) {
if (0 == recvcounts[i]) {
++count;
}
}
if (size == count) {
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
OBJ_RETAIN(op);
err = comm->c_coll.coll_ireduce_scatter(sendbuf, recvbuf, recvcounts,
datatype, op, comm,
request,
comm->c_coll.coll_ireduce_scatter_module);
OBJ_RELEASE(op);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

107
ompi/mpi/c/ireduce_scatter_block.c Обычный файл
Просмотреть файл

@ -0,0 +1,107 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2012 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012 Oak Ridge National Labs. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/op/op.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Reduce_scatter_block = PMPI_Reduce_scatter_block
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Reduce_scatter_block";
int MPI_Ireduce_scatter_block(void *sendbuf, void *recvbuf, int recvcount,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request)
{
int err;
MEMCHECKER(
int rank;
size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm);
memchecker_comm(comm);
memchecker_datatype(datatype);
/* check receive buffer of current proccess, whether it's addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf,
recvcount, datatype);
/* check whether the actual send buffer is defined. */
if(MPI_IN_PLACE == sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined, recvbuf, recvcount, datatype);
} else {
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, recvcount, datatype);
}
);
if (MPI_PARAM_CHECK) {
char *msg;
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
/* Unrooted operation; same checks for all ranks on both
intracommunicators and intercommunicators */
else if (MPI_OP_NULL == op || NULL == op) {
err = MPI_ERR_OP;
} else if (!ompi_op_is_valid(op, datatype, &msg, FUNC_NAME)) {
int ret = OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, msg);
free(msg);
return ret;
} else if (MPI_IN_PLACE == recvbuf) {
err = MPI_ERR_ARG;
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, recvcount);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
OBJ_RETAIN(op);
err = comm->c_coll.coll_ireduce_scatter_block(sendbuf, recvbuf, recvcount,
datatype, op, comm,
request,
comm->c_coll.coll_reduce_scatter_block_module);
OBJ_RELEASE(op);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

107
ompi/mpi/c/iscan.c Обычный файл
Просмотреть файл

@ -0,0 +1,107 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/op/op.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Iscan = PMPI_Iscan
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Iscan";
int MPI_Iscan(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request)
{
int err;
MEMCHECKER(
memchecker_datatype(datatype);
memchecker_comm(comm);
if (MPI_IN_PLACE != sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype);
} else {
memchecker_call(&opal_memchecker_base_isdefined, recvbuf, count, datatype);
}
);
if (MPI_PARAM_CHECK) {
char *msg;
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
/* No intercommunicators allowed! (MPI does not define
MPI_SCAN on intercommunicators) */
else if (OMPI_COMM_IS_INTER(comm)) {
err = MPI_ERR_COMM;
}
/* Unrooted operation; checks for all ranks */
else if (MPI_OP_NULL == op || NULL == op) {
err = MPI_ERR_OP;
} else if (MPI_IN_PLACE == recvbuf) {
err = MPI_ERR_ARG;
} else if (!ompi_op_is_valid(op, datatype, &msg, FUNC_NAME)) {
int ret = OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, msg);
free(msg);
return ret;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, count);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
/* Do we need to do anything? (MPI says that reductions have to
have a count of at least 1, but at least IMB calls reduce with
a count of 0 -- blah!) */
if (0 == count) {
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Call the coll component to actually perform the allgather */
OBJ_RETAIN(op);
err = comm->c_coll.coll_iscan(sendbuf, recvbuf, count,
datatype, op, comm,
request,
comm->c_coll.coll_iscan_module);
OBJ_RELEASE(op);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

166
ompi/mpi/c/iscatter.c Обычный файл
Просмотреть файл

@ -0,0 +1,166 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2008 University of Houston. All rights reserved.
* Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Iscatter = PMPI_Iscatter
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Iscatter";
int MPI_Iscatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm, MPI_Request *request)
{
int err;
MEMCHECKER(
memchecker_comm(comm);
if(OMPI_COMM_IS_INTRA(comm)) {
if(ompi_comm_rank(comm) == root) {
memchecker_datatype(sendtype);
/* check whether root's send buffer is defined. */
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
if(MPI_IN_PLACE != recvbuf) {
memchecker_datatype(recvtype);
/* check whether receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
}
} else {
memchecker_datatype(recvtype);
/* check whether receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
}
} else {
if(MPI_ROOT == root) {
memchecker_datatype(sendtype);
/* check whether root's send buffer is defined. */
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
} else if (MPI_PROC_NULL != root) {
memchecker_datatype(recvtype);
/* check whether receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
}
}
);
if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if ((ompi_comm_rank(comm) != root && MPI_IN_PLACE == recvbuf) ||
(ompi_comm_rank(comm) == root && MPI_IN_PLACE == sendbuf)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
/* Errors for intracommunicators */
if (OMPI_COMM_IS_INTRA(comm)) {
/* Errors for all ranks */
if ((root >= ompi_comm_size(comm)) || (root < 0)) {
err = MPI_ERR_ROOT;
} else if (MPI_IN_PLACE != recvbuf) {
if (recvcount < 0) {
err = MPI_ERR_COUNT;
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
err = MPI_ERR_TYPE;
}
}
/* Errors for the root. Some of these could have been
combined into compound if statements above, but since
this whole section can be compiled out (or turned off at
run time) for efficiency, it's more clear to separate
them out into individual tests. */
else if (ompi_comm_rank(comm) == root) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
/* Errors for intercommunicators */
else {
if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) ||
MPI_ROOT == root || MPI_PROC_NULL == root)) {
err = MPI_ERR_ROOT;
}
/* Errors for the receivers */
else if (MPI_ROOT != root && MPI_PROC_NULL != root) {
if (recvcount < 0) {
err = MPI_ERR_COUNT;
} else if (MPI_DATATYPE_NULL == recvtype) {
err = MPI_ERR_TYPE;
}
}
/* Errors for the root. Ditto on the comment above -- these
error checks could have been combined above, but let's
make the code easier to read. */
else if (MPI_ROOT == root) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
}
/* Do we need to do anything? */
if ((0 == recvcount && MPI_ROOT != root &&
(ompi_comm_rank(comm) != root ||
(ompi_comm_rank(comm) == root && MPI_IN_PLACE != recvbuf))) ||
(ompi_comm_rank(comm) == root && MPI_IN_PLACE == recvbuf &&
0 == sendcount) ||
(0 == sendcount && (MPI_ROOT == root || MPI_PROC_NULL == root))) {
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_iscatter(sendbuf, sendcount, sendtype, recvbuf,
recvcount, recvtype, root, comm,
request,
comm->c_coll.coll_iscatter_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

195
ompi/mpi/c/iscatterv.c Обычный файл
Просмотреть файл

@ -0,0 +1,195 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2012 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Iscatterv = PMPI_Iscatterv
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Iscatterv";
int MPI_Iscatterv(void *sendbuf, int *sendcounts, int *displs,
MPI_Datatype sendtype, void *recvbuf, int recvcount,
MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Request *request)
{
int i, size, err;
MEMCHECKER(
ptrdiff_t ext;
size = ompi_comm_remote_size(comm);
ompi_datatype_type_extent(recvtype, &ext);
memchecker_comm(comm);
if(OMPI_COMM_IS_INTRA(comm)) {
if(ompi_comm_rank(comm) == root) {
memchecker_datatype(sendtype);
/* check whether root's send buffer is defined. */
for (i = 0; i < size; i++) {
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(sendbuf)+displs[i]*ext,
sendcounts[i], sendtype);
}
if(MPI_IN_PLACE != recvbuf) {
memchecker_datatype(recvtype);
/* check whether receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
}
} else {
memchecker_datatype(recvtype);
/* check whether receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
}
} else {
if(MPI_ROOT == root) {
memchecker_datatype(sendtype);
/* check whether root's send buffer is defined. */
for (i = 0; i < size; i++) {
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(sendbuf)+displs[i]*ext,
sendcounts[i], sendtype);
}
} else if (MPI_PROC_NULL != root) {
/* check whether receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
}
}
);
if (MPI_PARAM_CHECK) {
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if ((ompi_comm_rank(comm) != root && MPI_IN_PLACE == recvbuf) ||
(ompi_comm_rank(comm) == root && MPI_IN_PLACE == sendbuf)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
/* Errors for intracommunicators */
if (OMPI_COMM_IS_INTRA(comm)) {
/* Errors for all ranks */
if ((root >= ompi_comm_size(comm)) || (root < 0)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
}
if (MPI_IN_PLACE != recvbuf) {
if (recvcount < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT,
FUNC_NAME);
}
if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE,
FUNC_NAME);
}
}
/* Errors for the root. Some of these could have been
combined into compound if statements above, but since
this whole section can be compiled out (or turned off at
run time) for efficiency, it's more clear to separate
them out into individual tests. */
if (ompi_comm_rank(comm) == root) {
if (NULL == displs) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
if (NULL == sendcounts) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
size = ompi_comm_size(comm);
for (i = 0; i < size; ++i) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
}
}
/* Errors for intercommunicators */
else {
if (! ((root >= 0 && root < ompi_comm_remote_size(comm)) ||
MPI_ROOT == root || MPI_PROC_NULL == root)) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ROOT, FUNC_NAME);
}
/* Errors for the receivers */
if (MPI_ROOT != root && MPI_PROC_NULL != root) {
if (recvcount < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
}
/* Errors for the root. Ditto on the comment above -- these
error checks could have been combined above, but let's
make the code easier to read. */
else if (MPI_ROOT == root) {
if (NULL == displs) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
if (NULL == sendcounts) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
}
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_iscatterv(sendbuf, sendcounts, displs, sendtype,
recvbuf, recvcount, recvtype, root, comm,
request,
comm->c_coll.coll_iscatterv_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

Просмотреть файл

@ -12,6 +12,7 @@
# All rights reserved.
# Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2011 Sandia National Laboratories. All rights reserved.
# Copyright (c) 2012 Oak Rigde National Laboratory. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
@ -47,17 +48,25 @@ nodist_libmpi_c_pmpi_la_SOURCES = \
padd_error_string.c \
paddress.c \
pallgather.c \
piallgather.c \
pallgatherv.c \
piallgatherv.c \
palloc_mem.c \
pallreduce.c \
piallreduce.c \
palltoall.c \
pialltoall.c \
palltoallv.c \
pialltoallv.c \
palltoallw.c \
pialltoallw.c \
pattr_delete.c \
pattr_get.c \
pattr_put.c \
pbarrier.c \
pibarrier.c \
pbcast.c \
pibcast.c \
pbsend.c \
pbsend_init.c \
pbuffer_attach.c \
@ -114,11 +123,14 @@ nodist_libmpi_c_pmpi_la_SOURCES = \
perror_class.c \
perror_string.c \
pexscan.c \
piexscan.c \
pfinalize.c \
pfinalized.c \
pfree_mem.c \
pgather.c \
pigather.c \
pgatherv.c \
pigatherv.c \
pget_address.c \
pget_count.c \
pget_elements.c \
@ -196,8 +208,10 @@ nodist_libmpi_c_pmpi_la_SOURCES = \
precv_init.c \
precv.c \
preduce.c \
pireduce.c \
preduce_local.c \
preduce_scatter.c \
pireduce_scatter.c \
prequest_c2f.c \
prequest_f2c.c \
prequest_free.c \
@ -205,8 +219,11 @@ nodist_libmpi_c_pmpi_la_SOURCES = \
prsend_init.c \
prsend.c \
pscan.c \
piscan.c \
pscatter.c \
piscatter.c \
pscatterv.c \
piscatterv.c \
psend.c \
psend_init.c \
psendrecv.c \

Просмотреть файл

@ -11,6 +11,7 @@
* All rights reserved.
* Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2011 Sandia National Laboratories. All rights reserved.
* Copyright (c) 2012 Oak Rigde National Laboratory. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -33,17 +34,25 @@
#define MPI_Add_error_string PMPI_Add_error_string
#define MPI_Address PMPI_Address
#define MPI_Allgather PMPI_Allgather
#define MPI_Iallgather PMPI_Iallgather
#define MPI_Allgatherv PMPI_Allgatherv
#define MPI_Iallgatherv PMPI_Iallgatherv
#define MPI_Alloc_mem PMPI_Alloc_mem
#define MPI_Allreduce PMPI_Allreduce
#define MPI_Iallreduce PMPI_Iallreduce
#define MPI_Alltoall PMPI_Alltoall
#define MPI_Ialltoall PMPI_Ialltoall
#define MPI_Alltoallv PMPI_Alltoallv
#define MPI_Ialltoallv PMPI_Ialltoallv
#define MPI_Alltoallw PMPI_Alltoallw
#define MPI_Ialltoallw PMPI_Ialltoallw
#define MPI_Attr_delete PMPI_Attr_delete
#define MPI_Attr_get PMPI_Attr_get
#define MPI_Attr_put PMPI_Attr_put
#define MPI_Barrier PMPI_Barrier
#define MPI_Ibarrier PMPI_Ibarrier
#define MPI_Bcast PMPI_Bcast
#define MPI_Ibcast PMPI_Ibcast
#define MPI_Bsend_init PMPI_Bsend_init
#define MPI_Bsend PMPI_Bsend
#define MPI_Buffer_attach PMPI_Buffer_attach
@ -100,6 +109,7 @@
#define MPI_Error_class PMPI_Error_class
#define MPI_Error_string PMPI_Error_string
#define MPI_Exscan PMPI_Exscan
#define MPI_Iexscan PMPI_Iexscan
#define MPI_File_c2f PMPI_File_c2f
#define MPI_File_call_errhandler PMPI_File_call_errhandler
#define MPI_File_close PMPI_File_close
@ -161,7 +171,9 @@
#define MPI_Finalized PMPI_Finalized
#define MPI_Gather PMPI_Gather
#define MPI_Igather PMPI_Igather
#define MPI_Gatherv PMPI_Gatherv
#define MPI_Igatherv PMPI_Igatherv
#define MPI_Get_address PMPI_Get_address
#define MPI_Get_count PMPI_Get_count
#define MPI_Get_elements PMPI_Get_elements
@ -243,8 +255,12 @@
#define MPI_Recv_init PMPI_Recv_init
#define MPI_Recv PMPI_Recv
#define MPI_Reduce PMPI_Reduce
#define MPI_Ireduce PMPI_Ireduce
#define MPI_Reduce_local PMPI_Reduce_local
#define MPI_Reduce_scatter PMPI_Reduce_scatter
#define MPI_Ireduce_scatter PMPI_Ireduce_scatter
#define MPI_Reduce_scatter_block PMPI_Reduce_scatter_block
#define MPI_Ireduce_scatter_block PMPI_Ireduce_scatter_block
#define MPI_Register_datarep PMPI_Register_datarep
#define MPI_Request_c2f PMPI_Request_c2f
#define MPI_Request_f2c PMPI_Request_f2c
@ -253,8 +269,11 @@
#define MPI_Rsend_init PMPI_Rsend_init
#define MPI_Rsend PMPI_Rsend
#define MPI_Scan PMPI_Scan
#define MPI_Iscan PMPI_Iscan
#define MPI_Scatter PMPI_Scatter
#define MPI_Iscatter PMPI_Iscatter
#define MPI_Scatterv PMPI_Scatterv
#define MPI_Iscatterv PMPI_Iscatterv
#define MPI_Send_init PMPI_Send_init
#define MPI_Send PMPI_Send
#define MPI_Sendrecv PMPI_Sendrecv

106
ompi/mpi/c/reduce_scatter_block.c Обычный файл
Просмотреть файл

@ -0,0 +1,106 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2012 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012 Oak Ridge National Labs. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/op/op.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Reduce_scatter_block = PMPI_Reduce_scatter_block
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Reduce_scatter_block";
int MPI_Reduce_scatter_block(void *sendbuf, void *recvbuf, int recvcount,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
{
int err;
MEMCHECKER(
int rank;
size = ompi_comm_size(comm);
rank = ompi_comm_rank(comm);
memchecker_comm(comm);
memchecker_datatype(datatype);
/* check receive buffer of current proccess, whether it's addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf,
recvcount, datatype);
/* check whether the actual send buffer is defined. */
if(MPI_IN_PLACE == sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined, recvbuf, recvcount, datatype);
} else {
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, recvcount, datatype);
}
);
if (MPI_PARAM_CHECK) {
char *msg;
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
/* Unrooted operation; same checks for all ranks on both
intracommunicators and intercommunicators */
else if (MPI_OP_NULL == op || NULL == op) {
err = MPI_ERR_OP;
} else if (!ompi_op_is_valid(op, datatype, &msg, FUNC_NAME)) {
int ret = OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, msg);
free(msg);
return ret;
} else if (MPI_IN_PLACE == recvbuf) {
err = MPI_ERR_ARG;
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, recvcount);
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
OBJ_RETAIN(op);
err = comm->c_coll.coll_reduce_scatter_block(sendbuf, recvbuf, recvcount,
datatype, op, comm,
comm->c_coll.coll_reduce_scatter_block_module);
OBJ_RELEASE(op);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

Просмотреть файл

@ -933,6 +933,26 @@ subroutine MPI_Allgather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvty
end subroutine MPI_Allgather_f08
end interface MPI_Allgather
interface MPI_Iallgather
subroutine MPI_Iallgather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Iallgather_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Iallgather_f08
end interface MPI_Iallgather
interface MPI_Allgatherv
subroutine MPI_Allgatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,displs, &
recvtype,comm,ierror &
@ -953,6 +973,27 @@ subroutine MPI_Allgatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,disp
end subroutine MPI_Allgatherv_f08
end interface MPI_Allgatherv
interface MPI_Iallgatherv
subroutine MPI_Iallgatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,displs, &
recvtype,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Iallgatherv_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount
INTEGER, INTENT(IN) :: recvcounts(*), displs(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Iallgatherv_f08
end interface MPI_Iallgatherv
interface MPI_Allreduce
subroutine MPI_Allreduce_f08(sendbuf,recvbuf,count,datatype,op,comm,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Allreduce_f08")
@ -972,6 +1013,26 @@ subroutine MPI_Allreduce_f08(sendbuf,recvbuf,count,datatype,op,comm,ierror &
end subroutine MPI_Allreduce_f08
end interface MPI_Allreduce
interface MPI_Iallreduce
subroutine MPI_Iallreduce_f08(sendbuf,recvbuf,count,datatype,op,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Iallreduce_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: count
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Iallreduce_f08
end interface MPI_Iallreduce
interface MPI_Alltoall
subroutine MPI_Alltoall_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
comm,ierror &
@ -991,6 +1052,26 @@ subroutine MPI_Alltoall_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtyp
end subroutine MPI_Alltoall_f08
end interface MPI_Alltoall
interface MPI_Ialltoall
subroutine MPI_Ialltoall_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Ialltoall_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Ialltoall_f08
end interface MPI_Ialltoall
interface MPI_Alltoallv
subroutine MPI_Alltoallv_f08(sendbuf,sendcounts,sdispls,sendtype,recvbuf,recvcounts, &
rdispls,recvtype,comm,ierror &
@ -1010,6 +1091,26 @@ subroutine MPI_Alltoallv_f08(sendbuf,sendcounts,sdispls,sendtype,recvbuf,recvcou
end subroutine MPI_Alltoallv_f08
end interface MPI_Alltoallv
interface MPI_Ialltoallv
subroutine MPI_Ialltoallv_f08(sendbuf,sendcounts,sdispls,sendtype,recvbuf,recvcounts, &
rdispls,recvtype,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Ialltoallv_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(IN) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Ialltoallv_f08
end interface MPI_Ialltoallv
interface MPI_Alltoallw
subroutine MPI_Alltoallw_f08(sendbuf,sendcounts,sdispls,sendtypes,recvbuf,recvcounts, &
rdispls,recvtypes,comm,ierror &
@ -1029,6 +1130,26 @@ subroutine MPI_Alltoallw_f08(sendbuf,sendcounts,sdispls,sendtypes,recvbuf,recvco
end subroutine MPI_Alltoallw_f08
end interface MPI_Alltoallw
interface MPI_Ialltoallw
subroutine MPI_Ialltoallw_f08(sendbuf,sendcounts,sdispls,sendtypes,recvbuf,recvcounts, &
rdispls,recvtypes,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Ialltoallw_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtypes(*), recvtypes(*)
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(IN) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Ialltoallw_f08
end interface MPI_Ialltoallw
interface MPI_Barrier
subroutine MPI_Barrier_f08(comm,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Barrier_f08")
@ -1039,6 +1160,17 @@ subroutine MPI_Barrier_f08(comm,ierror &
end subroutine MPI_Barrier_f08
end interface MPI_Barrier
interface MPI_Ibarrier
subroutine MPI_Ibarrier_f08(comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Ibarrier_f08")
use :: mpi_f08_types, only : MPI_Comm, MPI_Request
implicit none
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Ibarrier_f08
end interface MPI_Ibarrier
interface MPI_Bcast
subroutine MPI_Bcast_f08(buffer,count,datatype,root,comm,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Bcast_f08")
@ -1056,6 +1188,24 @@ subroutine MPI_Bcast_f08(buffer,count,datatype,root,comm,ierror &
end subroutine MPI_Bcast_f08
end interface MPI_Bcast
interface MPI_Ibcast
subroutine MPI_Ibcast_f08(buffer,count,datatype,root,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Ibcast_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: buffer
!$PRAGMA IGNORE_TKR buffer
!DIR$ IGNORE_TKR buffer
!IBM* IGNORE_TKR buffer
OMPI_FORTRAN_IGNORE_TKR_TYPE :: buffer
INTEGER, INTENT(IN) :: count, root
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Ibcast_f08
end interface MPI_Ibcast
interface MPI_Exscan
subroutine MPI_Exscan_f08(sendbuf,recvbuf,count,datatype,op,comm,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Exscan_f08")
@ -1075,6 +1225,26 @@ subroutine MPI_Exscan_f08(sendbuf,recvbuf,count,datatype,op,comm,ierror &
end subroutine MPI_Exscan_f08
end interface MPI_Exscan
interface MPI_Iexscan
subroutine MPI_Iexscan_f08(sendbuf,recvbuf,count,datatype,op,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Iexscan_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: count
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Iexscan_f08
end interface MPI_Iexscan
interface MPI_Gather
subroutine MPI_Gather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
root,comm,ierror &
@ -1094,6 +1264,26 @@ subroutine MPI_Gather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype,
end subroutine MPI_Gather_f08
end interface MPI_Gather
interface MPI_Igather
subroutine MPI_Igather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
root,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Igather_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount, root
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Igather_f08
end interface MPI_Igather
interface MPI_Gatherv
subroutine MPI_Gatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,displs, &
recvtype,root,comm,ierror &
@ -1114,6 +1304,27 @@ subroutine MPI_Gatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,displs,
end subroutine MPI_Gatherv_f08
end interface MPI_Gatherv
interface MPI_Igatherv
subroutine MPI_Igatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,displs, &
recvtype,root,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Igatherv_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount, root
INTEGER, INTENT(IN) :: recvcounts(*), displs(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Igatherv_f08
end interface MPI_Igatherv
interface MPI_Op_commutative
subroutine MPI_Op_commutative_f08(op,commute,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Op_commutative_f08")
@ -1167,6 +1378,26 @@ subroutine MPI_Reduce_f08(sendbuf,recvbuf,count,datatype,op,root,comm,ierror &
end subroutine MPI_Reduce_f08
end interface MPI_Reduce
interface MPI_Ireduce
subroutine MPI_Ireduce_f08(sendbuf,recvbuf,count,datatype,op,root,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Ireduce_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: count, root
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Ireduce_f08
end interface MPI_Ireduce
interface MPI_Reduce_local
subroutine MPI_Reduce_local_f08(inbuf,inoutbuf,count,datatype,op,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Reduce_local_f08")
@ -1205,6 +1436,27 @@ subroutine MPI_Reduce_scatter_f08(sendbuf,recvbuf,recvcounts,datatype,op,comm, &
end subroutine MPI_Reduce_scatter_f08
end interface MPI_Reduce_scatter
interface MPI_Ireduce_scatter
subroutine MPI_Ireduce_scatter_f08(sendbuf,recvbuf,recvcounts,datatype,op,comm, &
request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Ireduce_scatter_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: recvcounts(*)
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Ireduce_scatter_f08
end interface MPI_Ireduce_scatter
interface MPI_Reduce_scatter_block
subroutine MPI_Reduce_scatter_block_f08(sendbuf,recvbuf,recvcount,datatype,op,comm, &
ierror &
@ -1225,6 +1477,27 @@ subroutine MPI_Reduce_scatter_block_f08(sendbuf,recvbuf,recvcount,datatype,op,co
end subroutine MPI_Reduce_scatter_block_f08
end interface MPI_Reduce_scatter_block
interface MPI_Ireduce_scatter_block
subroutine MPI_Ireduce_scatter_block_f08(sendbuf,recvbuf,recvcount,datatype,op,comm, &
request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Ireduce_scatter_block_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: recvcount
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Ireduce_scatter_block_f08
end interface MPI_Ireduce_scatter_block
interface MPI_Scan
subroutine MPI_Scan_f08(sendbuf,recvbuf,count,datatype,op,comm,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Scan_f08")
@ -1244,6 +1517,26 @@ subroutine MPI_Scan_f08(sendbuf,recvbuf,count,datatype,op,comm,ierror &
end subroutine MPI_Scan_f08
end interface MPI_Scan
interface MPI_Iscan
subroutine MPI_Iscan_f08(sendbuf,recvbuf,count,datatype,op,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Iscan_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: count
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Iscan_f08
end interface MPI_Iscan
interface MPI_Scatter
subroutine MPI_Scatter_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
root,comm,ierror &
@ -1263,6 +1556,26 @@ subroutine MPI_Scatter_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype
end subroutine MPI_Scatter_f08
end interface MPI_Scatter
interface MPI_Iscatter
subroutine MPI_Iscatter_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
root,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Iscatter_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount, root
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Iscatter_f08
end interface MPI_Iscatter
interface MPI_Scatterv
subroutine MPI_Scatterv_f08(sendbuf,sendcounts,displs,sendtype,recvbuf,recvcount, &
recvtype,root,comm,ierror &
@ -1283,6 +1596,27 @@ subroutine MPI_Scatterv_f08(sendbuf,sendcounts,displs,sendtype,recvbuf,recvcount
end subroutine MPI_Scatterv_f08
end interface MPI_Scatterv
interface MPI_Iscatterv
subroutine MPI_Iscatterv_f08(sendbuf,sendcounts,displs,sendtype,recvbuf,recvcount, &
recvtype,root,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Iscatterv_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: recvcount, root
INTEGER, INTENT(IN) :: sendcounts(*), displs(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Iscatterv_f08
end interface MPI_Iscatterv
interface MPI_Comm_compare
subroutine MPI_Comm_compare_f08(comm1,comm2,result,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Comm_compare_f08")

Просмотреть файл

@ -935,6 +935,26 @@ subroutine PMPI_Allgather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvt
end subroutine PMPI_Allgather_f08
end interface PMPI_Allgather
interface PMPI_Iallgather
subroutine PMPI_Iallgather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Iallgather_f08")
use :: mpi_f08_types
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(IN) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Iallgather_f08
end interface PMPI_Iallgather
interface PMPI_Allgatherv
subroutine PMPI_Allgatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,displs, &
recvtype,comm,ierror &
@ -955,6 +975,27 @@ subroutine PMPI_Allgatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,dis
end subroutine PMPI_Allgatherv_f08
end interface PMPI_Allgatherv
interface PMPI_Iallgatherv
subroutine PMPI_Iallgatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,displs, &
recvtype,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Iallgatherv_f08")
use :: mpi_f08_types
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount
INTEGER, INTENT(IN) :: recvcounts(*), displs(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Iallgatherv_f08
end interface PMPI_Iallgatherv
interface PMPI_Allreduce
subroutine PMPI_Allreduce_f08(sendbuf,recvbuf,count,datatype,op,comm,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Allreduce_f08")
@ -974,6 +1015,26 @@ subroutine PMPI_Allreduce_f08(sendbuf,recvbuf,count,datatype,op,comm,ierror &
end subroutine PMPI_Allreduce_f08
end interface PMPI_Allreduce
interface PMPI_Iallreduce
subroutine PMPI_Iallreduce_f08(sendbuf,recvbuf,count,datatype,op,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Iallreduce_f08")
use :: mpi_f08_types
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: count
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Iallreduce_f08
end interface PMPI_Iallreduce
interface PMPI_Alltoall
subroutine PMPI_Alltoall_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
comm,ierror &
@ -993,6 +1054,26 @@ subroutine PMPI_Alltoall_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvty
end subroutine PMPI_Alltoall_f08
end interface PMPI_Alltoall
interface PMPI_Ialltoall
subroutine PMPI_Ialltoall_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Ialltoall_f08")
use :: mpi_f08_types
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Ialltoall_f08
end interface PMPI_Ialltoall
interface PMPI_Alltoallv
subroutine PMPI_Alltoallv_f08(sendbuf,sendcounts,sdispls,sendtype,recvbuf,recvcounts, &
rdispls,recvtype,comm,ierror &
@ -1012,6 +1093,26 @@ subroutine PMPI_Alltoallv_f08(sendbuf,sendcounts,sdispls,sendtype,recvbuf,recvco
end subroutine PMPI_Alltoallv_f08
end interface PMPI_Alltoallv
interface PMPI_Ialltoallv
subroutine PMPI_Ialltoallv_f08(sendbuf,sendcounts,sdispls,sendtype,recvbuf,recvcounts, &
rdispls,recvtype,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Ialltoallv_f08")
use :: mpi_f08_types
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(IN) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Ialltoallv_f08
end interface PMPI_Ialltoallv
interface PMPI_Alltoallw
subroutine PMPI_Alltoallw_f08(sendbuf,sendcounts,sdispls,sendtypes,recvbuf,recvcounts, &
rdispls,recvtypes,comm,ierror &
@ -1031,6 +1132,26 @@ subroutine PMPI_Alltoallw_f08(sendbuf,sendcounts,sdispls,sendtypes,recvbuf,recvc
end subroutine PMPI_Alltoallw_f08
end interface PMPI_Alltoallw
interface PMPI_Ialltoallw
subroutine PMPI_Ialltoallw_f08(sendbuf,sendcounts,sdispls,sendtypes,recvbuf,recvcounts, &
rdispls,recvtypes,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Ialltoallw_f08")
use :: mpi_f08_types
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtypes(*), recvtypes(*)
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(IN) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Ialltoallw_f08
end interface PMPI_Ialltoallw
interface PMPI_Barrier
subroutine PMPI_Barrier_f08(comm,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Barrier_f08")
@ -1041,6 +1162,17 @@ subroutine PMPI_Barrier_f08(comm,ierror &
end subroutine PMPI_Barrier_f08
end interface PMPI_Barrier
interface PMPI_Ibarrier
subroutine PMPI_Ibarrier_f08(comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Ibarrier_f08")
use :: mpi_f08_types
implicit none
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Ibarrier_f08
end interface PMPI_Ibarrier
interface PMPI_Bcast
subroutine PMPI_Bcast_f08(buffer,count,datatype,root,comm,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Bcast_f08")
@ -1058,6 +1190,24 @@ subroutine PMPI_Bcast_f08(buffer,count,datatype,root,comm,ierror &
end subroutine PMPI_Bcast_f08
end interface PMPI_Bcast
interface PMPI_Ibcast
subroutine PMPI_Ibcast_f08(buffer,count,datatype,root,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Ibcast_f08")
use :: mpi_f08_types
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: buffer
!$PRAGMA IGNORE_TKR buffer
!DIR$ IGNORE_TKR buffer
!IBM* IGNORE_TKR buffer
OMPI_FORTRAN_IGNORE_TKR_TYPE :: buffer
INTEGER, INTENT(IN) :: count, root
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Ibcast_f08
end interface PMPI_Ibcast
interface PMPI_Exscan
subroutine PMPI_Exscan_f08(sendbuf,recvbuf,count,datatype,op,comm,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Exscan_f08")
@ -1077,6 +1227,26 @@ subroutine PMPI_Exscan_f08(sendbuf,recvbuf,count,datatype,op,comm,ierror &
end subroutine PMPI_Exscan_f08
end interface PMPI_Exscan
interface PMPI_Iexscan
subroutine PMPI_Iexscan_f08(sendbuf,recvbuf,count,datatype,op,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Iexscan_f08")
use :: mpi_f08_types
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: count
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Iexscan_f08
end interface PMPI_Iexscan
interface PMPI_Gather
subroutine PMPI_Gather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
root,comm,ierror &
@ -1096,6 +1266,26 @@ subroutine PMPI_Gather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype
end subroutine PMPI_Gather_f08
end interface PMPI_Gather
interface PMPI_Igather
subroutine PMPI_Igather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
root,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Igather_f08")
use :: mpi_f08_types
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount, root
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Igather_f08
end interface PMPI_Igather
interface PMPI_Gatherv
subroutine PMPI_Gatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,displs, &
recvtype,root,comm,ierror &
@ -1116,6 +1306,27 @@ subroutine PMPI_Gatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,displs
end subroutine PMPI_Gatherv_f08
end interface PMPI_Gatherv
interface PMPI_Igatherv
subroutine PMPI_Igatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,displs, &
recvtype,root,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Igatherv_f08")
use :: mpi_f08_types
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount, root
INTEGER, INTENT(IN) :: recvcounts(*), displs(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Igatherv_f08
end interface PMPI_Igatherv
interface PMPI_Op_commutative
subroutine PMPI_Op_commutative_f08(op,commute,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Op_commutative_f08")
@ -1169,6 +1380,26 @@ subroutine PMPI_Reduce_f08(sendbuf,recvbuf,count,datatype,op,root,comm,ierror &
end subroutine PMPI_Reduce_f08
end interface PMPI_Reduce
interface PMPI_Ireduce
subroutine PMPI_Ireduce_f08(sendbuf,recvbuf,count,datatype,op,root,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Ireduce_f08")
use :: mpi_f08_types
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: count, root
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Ireduce_f08
end interface PMPI_Ireduce
interface PMPI_Reduce_local
subroutine PMPI_Reduce_local_f08(inbuf,inoutbuf,count,datatype,op,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Reduce_local_f08")
@ -1207,6 +1438,27 @@ subroutine PMPI_Reduce_scatter_f08(sendbuf,recvbuf,recvcounts,datatype,op,comm,
end subroutine PMPI_Reduce_scatter_f08
end interface PMPI_Reduce_scatter
interface PMPI_Ireduce_scatter
subroutine PMPI_Ireduce_scatter_f08(sendbuf,recvbuf,recvcounts,datatype,op,comm, &
request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Ireduce_scatter_f08")
use :: mpi_f08_types
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: recvcounts(*)
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Ireduce_scatter_f08
end interface PMPI_Ireduce_scatter
interface PMPI_Reduce_scatter_block
subroutine PMPI_Reduce_scatter_block_f08(sendbuf,recvbuf,recvcount,datatype,op,comm, &
ierror &
@ -1227,6 +1479,27 @@ subroutine PMPI_Reduce_scatter_block_f08(sendbuf,recvbuf,recvcount,datatype,op,c
end subroutine PMPI_Reduce_scatter_block_f08
end interface PMPI_Reduce_scatter_block
interface PMPI_Ireduce_scatter_block
subroutine PMPI_Ireduce_scatter_block_f08(sendbuf,recvbuf,recvcount,datatype,op,comm, &
request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Ireduce_scatter_block_f08")
use :: mpi_f08_types
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: recvcount
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Ireduce_scatter_block_f08
end interface PMPI_Ireduce_scatter_block
interface PMPI_Scan
subroutine PMPI_Scan_f08(sendbuf,recvbuf,count,datatype,op,comm,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Scan_f08")
@ -1246,6 +1519,26 @@ subroutine PMPI_Scan_f08(sendbuf,recvbuf,count,datatype,op,comm,ierror &
end subroutine PMPI_Scan_f08
end interface PMPI_Scan
interface PMPI_Iscan
subroutine PMPI_Iscan_f08(sendbuf,recvbuf,count,datatype,op,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Iscan_f08")
use :: mpi_f08_types
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: count
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Iscan_f08
end interface PMPI_Iscan
interface PMPI_Scatter
subroutine PMPI_Scatter_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
root,comm,ierror &
@ -1265,6 +1558,26 @@ subroutine PMPI_Scatter_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtyp
end subroutine PMPI_Scatter_f08
end interface PMPI_Scatter
interface PMPI_Iscatter
subroutine PMPI_Iscatter_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
root,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Iscatter_f08")
use :: mpi_f08_types
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount, root
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Iscatter_f08
end interface PMPI_Iscatter
interface PMPI_Scatterv
subroutine PMPI_Scatterv_f08(sendbuf,sendcounts,displs,sendtype,recvbuf,recvcount, &
recvtype,root,comm,ierror &
@ -1285,6 +1598,27 @@ subroutine PMPI_Scatterv_f08(sendbuf,sendcounts,displs,sendtype,recvbuf,recvcoun
end subroutine PMPI_Scatterv_f08
end interface PMPI_Scatterv
interface PMPI_Iscatterv
subroutine PMPI_Iscatterv_f08(sendbuf,sendcounts,displs,sendtype,recvbuf,recvcount, &
recvtype,root,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Iscatterv_f08")
use :: mpi_f08_types
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: recvcount, root
INTEGER, INTENT(IN) :: sendcounts(*), displs(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Iscatterv_f08
end interface PMPI_Iscatterv
interface PMPI_Comm_compare
subroutine PMPI_Comm_compare_f08(comm1,comm2,result,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Comm_compare_f08")

Просмотреть файл

@ -174,7 +174,18 @@ libmpi_mpifh_la_SOURCES += \
group_size_f.c \
group_translate_ranks_f.c \
group_union_f.c \
iallgather_f.c \
iallgatherv_f.c \
iallreduce_f.c \
ialltoall_f.c \
ialltoallv_f.c \
ialltoallw_f.c \
ibarrier_f.c \
ibcast_f.c \
ibsend_f.c \
iexscan_f.c \
igather_f.c \
igatherv_f.c \
improbe_f.c \
imrecv_f.c \
info_create_f.c \
@ -193,8 +204,14 @@ libmpi_mpifh_la_SOURCES += \
intercomm_merge_f.c \
iprobe_f.c \
irecv_f.c \
ireduce_f.c \
ireduce_scatter_f.c \
ireduce_scatter_block_f.c \
irsend_f.c \
isend_f.c \
iscan_f.c \
iscatter_f.c \
iscatterv_f.c \
issend_f.c \
is_thread_main_f.c \
keyval_create_f.c \
@ -219,6 +236,7 @@ libmpi_mpifh_la_SOURCES += \
reduce_f.c \
reduce_local_f.c \
reduce_scatter_f.c \
reduce_scatter_block_f.c \
request_free_f.c \
request_get_status_f.c \
rsend_f.c \

95
ompi/mpi/fortran/mpif-h/iallgather_f.c Обычный файл
Просмотреть файл

@ -0,0 +1,95 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_IALLGATHER = ompi_iallgather_f
#pragma weak pmpi_iallgather = ompi_iallgather_f
#pragma weak pmpi_iallgather_ = ompi_iallgather_f
#pragma weak pmpi_iallgather__ = ompi_iallgather_f
#pragma weak PMPI_Iallgather_f = ompi_iallgather_f
#pragma weak PMPI_Iallgather_f08 = ompi_iallgather_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_IALLGATHER,
pmpi_iallgather,
pmpi_iallgather_,
pmpi_iallgather__,
pompi_iallgather_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_IALLGATHER = ompi_iallgather_f
#pragma weak mpi_iallgather = ompi_iallgather_f
#pragma weak mpi_iallgather_ = ompi_iallgather_f
#pragma weak mpi_iallgather__ = ompi_iallgather_f
#pragma weak MPI_Iallgather_f = ompi_iallgather_f
#pragma weak MPI_Iallgather_f08 = ompi_iallgather_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_IALLGATHER,
mpi_iallgather,
mpi_iallgather_,
mpi_iallgather__,
ompi_iallgather_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_iallgather_f(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype,
char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype,
MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr)
{
int ierr_c;
MPI_Comm c_comm;
MPI_Request c_req;
MPI_Datatype c_sendtype, c_recvtype;
c_comm = MPI_Comm_f2c(*comm);
c_sendtype = MPI_Type_f2c(*sendtype);
c_recvtype = MPI_Type_f2c(*recvtype);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
ierr_c = MPI_Iallgather(sendbuf,
OMPI_FINT_2_INT(*sendcount),
c_sendtype,
recvbuf,
OMPI_FINT_2_INT(*recvcount),
c_recvtype, c_comm, &c_req);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(ierr_c);
if (MPI_SUCCESS == ierr_c) *request = MPI_Request_c2f(c_req);
}

105
ompi/mpi/fortran/mpif-h/iallgatherv_f.c Обычный файл
Просмотреть файл

@ -0,0 +1,105 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_IALLGATHERV = ompi_iallgatherv_f
#pragma weak pmpi_iallgatherv = ompi_iallgatherv_f
#pragma weak pmpi_iallgatherv_ = ompi_iallgatherv_f
#pragma weak pmpi_iallgatherv__ = ompi_iallgatherv_f
#pragma weak PMPI_Iallgatherv_f = ompi_iallgatherv_f
#pragma weak PMPI_Iallgatherv_f08 = ompi_iallgatherv_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_IALLGATHERV,
pmpi_iallgatherv,
pmpi_iallgatherv_,
pmpi_iallgatherv__,
pompi_iallgatherv_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *displs, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_IALLGATHERV = ompi_iallgatherv_f
#pragma weak mpi_iallgatherv = ompi_iallgatherv_f
#pragma weak mpi_iallgatherv_ = ompi_iallgatherv_f
#pragma weak mpi_iallgatherv__ = ompi_iallgatherv_f
#pragma weak MPI_Iallgatherv_f = ompi_iallgatherv_f
#pragma weak MPI_Iallgatherv_f08 = ompi_iallgatherv_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_IALLGATHERV,
mpi_iallgatherv,
mpi_iallgatherv_,
mpi_iallgatherv__,
ompi_iallgatherv_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *displs, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_iallgatherv_f(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype,
char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *displs,
MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request,
MPI_Fint *ierr)
{
MPI_Comm c_comm;
MPI_Datatype c_sendtype, c_recvtype;
MPI_Request c_request;
int size, ierr_c;
OMPI_ARRAY_NAME_DECL(recvcounts);
OMPI_ARRAY_NAME_DECL(displs);
c_comm = MPI_Comm_f2c(*comm);
c_sendtype = MPI_Type_f2c(*sendtype);
c_recvtype = MPI_Type_f2c(*recvtype);
MPI_Comm_size(c_comm, &size);
OMPI_ARRAY_FINT_2_INT(recvcounts, size);
OMPI_ARRAY_FINT_2_INT(displs, size);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
ierr_c = MPI_Iallgatherv(sendbuf,
OMPI_FINT_2_INT(*sendcount),
c_sendtype,
recvbuf,
OMPI_ARRAY_NAME_CONVERT(recvcounts),
OMPI_ARRAY_NAME_CONVERT(displs),
c_recvtype, c_comm, &c_request);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(ierr_c);
if (MPI_SUCCESS == ierr_c) *request = MPI_Request_c2f(c_request);
OMPI_ARRAY_FINT_2_INT_CLEANUP(recvcounts);
OMPI_ARRAY_FINT_2_INT_CLEANUP(displs);
}

91
ompi/mpi/fortran/mpif-h/iallreduce_f.c Обычный файл
Просмотреть файл

@ -0,0 +1,91 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_IALLREDUCE = ompi_iallreduce_f
#pragma weak pmpi_iallreduce = ompi_iallreduce_f
#pragma weak pmpi_iallreduce_ = ompi_iallreduce_f
#pragma weak pmpi_iallreduce__ = ompi_iallreduce_f
#pragma weak PMPI_Iallreduce_f = ompi_iallreduce_f
#pragma weak PMPI_Iallreduce_f08 = ompi_iallreduce_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_IALLREDUCE,
pmpi_iallreduce,
pmpi_iallreduce_,
pmpi_iallreduce__,
pompi_iallreduce_f,
(char *sendbuf, char *recvbuf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, recvbuf, count, datatype, op, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_IALLREDUCE = ompi_iallreduce_f
#pragma weak mpi_iallreduce = ompi_iallreduce_f
#pragma weak mpi_iallreduce_ = ompi_iallreduce_f
#pragma weak mpi_iallreduce__ = ompi_iallreduce_f
#pragma weak MPI_Iallreduce_f = ompi_iallreduce_f
#pragma weak MPI_Iallreduce_f08 = ompi_iallreduce_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_IALLREDUCE,
mpi_iallreduce,
mpi_iallreduce_,
mpi_iallreduce__,
ompi_iallreduce_f,
(char *sendbuf, char *recvbuf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, recvbuf, count, datatype, op, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_iallreduce_f(char *sendbuf, char *recvbuf, MPI_Fint *count,
MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm,
MPI_Fint *request, MPI_Fint *ierr)
{
int ierr_c;
MPI_Comm c_comm;
MPI_Datatype c_type;
MPI_Request c_request;
MPI_Op c_op;
c_comm = MPI_Comm_f2c(*comm);
c_type = MPI_Type_f2c(*datatype);
c_op = MPI_Op_f2c(*op);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
ierr_c = MPI_Iallreduce(sendbuf, recvbuf,
OMPI_FINT_2_INT(*count),
c_type, c_op, c_comm, &c_request);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(ierr_c);
if (MPI_SUCCESS == ierr_c) *request = MPI_Request_c2f(c_request);
}

94
ompi/mpi/fortran/mpif-h/ialltoall_f.c Обычный файл
Просмотреть файл

@ -0,0 +1,94 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_IALLTOALL = ompi_ialltoall_f
#pragma weak pmpi_ialltoall = ompi_ialltoall_f
#pragma weak pmpi_ialltoall_ = ompi_ialltoall_f
#pragma weak pmpi_ialltoall__ = ompi_ialltoall_f
#pragma weak PMPI_Ialltoall_f = ompi_ialltoall_f
#pragma weak PMPI_Ialltoall_f08 = ompi_ialltoall_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_IALLTOALL,
pmpi_ialltoall,
pmpi_ialltoall_,
pmpi_ialltoall__,
pompi_ialltoall_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_IALLTOALL = ompi_ialltoall_f
#pragma weak mpi_ialltoall = ompi_ialltoall_f
#pragma weak mpi_ialltoall_ = ompi_ialltoall_f
#pragma weak mpi_ialltoall__ = ompi_ialltoall_f
#pragma weak MPI_Ialltoall_f = ompi_ialltoall_f
#pragma weak MPI_Ialltoall_f08 = ompi_ialltoall_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_IALLTOALL,
mpi_ialltoall,
mpi_ialltoall_,
mpi_ialltoall__,
ompi_ialltoall_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_ialltoall_f(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype,
char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype,
MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr)
{
int c_ierr;
MPI_Comm c_comm;
MPI_Request c_req;
MPI_Datatype c_sendtype, c_recvtype;
c_comm = MPI_Comm_f2c(*comm);
c_sendtype = MPI_Type_f2c(*sendtype);
c_recvtype = MPI_Type_f2c(*recvtype);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
c_ierr = MPI_Ialltoall(sendbuf,
OMPI_FINT_2_INT(*sendcount),
c_sendtype,
recvbuf,
OMPI_FINT_2_INT(*recvcount),
c_recvtype, c_comm, &c_req);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
if (MPI_SUCCESS == c_ierr) *request = MPI_Request_c2f(c_req);
}

111
ompi/mpi/fortran/mpif-h/ialltoallv_f.c Обычный файл
Просмотреть файл

@ -0,0 +1,111 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_IALLTOALLV = ompi_ialltoallv_f
#pragma weak pmpi_ialltoallv = ompi_ialltoallv_f
#pragma weak pmpi_ialltoallv_ = ompi_ialltoallv_f
#pragma weak pmpi_ialltoallv__ = ompi_ialltoallv_f
#pragma weak PMPI_Ialltoallv_f = ompi_ialltoallv_f
#pragma weak PMPI_Ialltoallv_f08 = ompi_ialltoallv_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_IALLTOALLV,
pmpi_ialltoallv,
pmpi_ialltoallv_,
pmpi_ialltoallv__,
pompi_ialltoallv_f,
(char *sendbuf, MPI_Fint *sendcounts, MPI_Fint *sdispls, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *rdispls, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcounts, sdispls, sendtype, recvbuf, recvcounts, rdispls, recvtype, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_IALLTOALLV = ompi_ialltoallv_f
#pragma weak mpi_ialltoallv = ompi_ialltoallv_f
#pragma weak mpi_ialltoallv_ = ompi_ialltoallv_f
#pragma weak mpi_ialltoallv__ = ompi_ialltoallv_f
#pragma weak MPI_Ialltoallv_f = ompi_ialltoallv_f
#pragma weak MPI_Ialltoallv_f08 = ompi_ialltoallv_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_IALLTOALLV,
mpi_ialltoallv,
mpi_ialltoallv_,
mpi_ialltoallv__,
ompi_ialltoallv_f,
(char *sendbuf, MPI_Fint *sendcounts, MPI_Fint *sdispls, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *rdispls, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcounts, sdispls, sendtype, recvbuf, recvcounts, rdispls, recvtype, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_ialltoallv_f(char *sendbuf, MPI_Fint *sendcounts, MPI_Fint *sdispls,
MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts,
MPI_Fint *rdispls, MPI_Fint *recvtype,
MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr)
{
MPI_Comm c_comm;
MPI_Datatype c_sendtype, c_recvtype;
MPI_Request c_request;
int size, c_ierr;
OMPI_ARRAY_NAME_DECL(sendcounts);
OMPI_ARRAY_NAME_DECL(sdispls);
OMPI_ARRAY_NAME_DECL(recvcounts);
OMPI_ARRAY_NAME_DECL(rdispls);
c_comm = MPI_Comm_f2c(*comm);
c_sendtype = MPI_Type_f2c(*sendtype);
c_recvtype = MPI_Type_f2c(*recvtype);
MPI_Comm_size(c_comm, &size);
OMPI_ARRAY_FINT_2_INT(sendcounts, size);
OMPI_ARRAY_FINT_2_INT(sdispls, size);
OMPI_ARRAY_FINT_2_INT(recvcounts, size);
OMPI_ARRAY_FINT_2_INT(rdispls, size);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
c_ierr = MPI_Ialltoallv(sendbuf,
OMPI_ARRAY_NAME_CONVERT(sendcounts),
OMPI_ARRAY_NAME_CONVERT(sdispls),
c_sendtype,
recvbuf,
OMPI_ARRAY_NAME_CONVERT(recvcounts),
OMPI_ARRAY_NAME_CONVERT(rdispls),
c_recvtype, c_comm, &c_request);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
if (MPI_SUCCESS == c_ierr) *request = MPI_Request_c2f(c_request);
OMPI_ARRAY_FINT_2_INT_CLEANUP(sendcounts);
OMPI_ARRAY_FINT_2_INT_CLEANUP(sdispls);
OMPI_ARRAY_FINT_2_INT_CLEANUP(recvcounts);
OMPI_ARRAY_FINT_2_INT_CLEANUP(rdispls);
}

121
ompi/mpi/fortran/mpif-h/ialltoallw_f.c Обычный файл
Просмотреть файл

@ -0,0 +1,121 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_IALLTOALLW = ompi_ialltoallw_f
#pragma weak pmpi_ialltoallw = ompi_ialltoallw_f
#pragma weak pmpi_ialltoallw_ = ompi_ialltoallw_f
#pragma weak pmpi_ialltoallw__ = ompi_ialltoallw_f
#pragma weak PMPI_Ialltoallw_f = ompi_ialltoallw_f
#pragma weak PMPI_Ialltoallw_f08 = ompi_ialltoallw_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_IALLTOALLW,
pmpi_ialltoallw,
pmpi_ialltoallw_,
pmpi_ialltoallw__,
pompi_ialltoallw_f,
(char *sendbuf, MPI_Fint *sendcounts, MPI_Fint *sdispls, MPI_Fint *sendtypes, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *rdispls, MPI_Fint *recvtypes, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, recvcounts, rdispls, recvtypes, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_IALLTOALLW = ompi_ialltoallw_f
#pragma weak mpi_ialltoallw = ompi_ialltoallw_f
#pragma weak mpi_ialltoallw_ = ompi_ialltoallw_f
#pragma weak mpi_ialltoallw__ = ompi_ialltoallw_f
#pragma weak MPI_Ialltoallw_f = ompi_ialltoallw_f
#pragma weak MPI_Ialltoallw_f08 = ompi_ialltoallw_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_IALLTOALLW,
mpi_ialltoallw,
mpi_ialltoallw_,
mpi_ialltoallw__,
ompi_ialltoallw_f,
(char *sendbuf, MPI_Fint *sendcounts, MPI_Fint *sdispls, MPI_Fint *sendtypes, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *rdispls, MPI_Fint *recvtypes, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, recvcounts, rdispls, recvtypes, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_ialltoallw_f(char *sendbuf, MPI_Fint *sendcounts,
MPI_Fint *sdispls, MPI_Fint *sendtypes,
char *recvbuf, MPI_Fint *recvcounts,
MPI_Fint *rdispls, MPI_Fint *recvtypes,
MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr)
{
MPI_Comm c_comm;
MPI_Datatype *c_sendtypes, *c_recvtypes;
MPI_Request c_request;
int size, c_ierr;
OMPI_ARRAY_NAME_DECL(sendcounts);
OMPI_ARRAY_NAME_DECL(sdispls);
OMPI_ARRAY_NAME_DECL(recvcounts);
OMPI_ARRAY_NAME_DECL(rdispls);
c_comm = MPI_Comm_f2c(*comm);
MPI_Comm_size(c_comm, &size);
c_sendtypes = (MPI_Datatype *) malloc(size * sizeof(MPI_Datatype));
c_recvtypes = (MPI_Datatype *) malloc(size * sizeof(MPI_Datatype));
OMPI_ARRAY_FINT_2_INT(sendcounts, size);
OMPI_ARRAY_FINT_2_INT(sdispls, size);
OMPI_ARRAY_FINT_2_INT(recvcounts, size);
OMPI_ARRAY_FINT_2_INT(rdispls, size);
while (size > 0) {
c_sendtypes[size - 1] = MPI_Type_f2c(sendtypes[size - 1]);
c_recvtypes[size - 1] = MPI_Type_f2c(recvtypes[size - 1]);
--size;
}
/* Ialltoallw does not support MPI_IN_PLACE */
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
c_ierr = MPI_Ialltoallw(sendbuf,
OMPI_ARRAY_NAME_CONVERT(sendcounts),
OMPI_ARRAY_NAME_CONVERT(sdispls),
c_sendtypes,
recvbuf,
OMPI_ARRAY_NAME_CONVERT(recvcounts),
OMPI_ARRAY_NAME_CONVERT(rdispls),
c_recvtypes, c_comm, &c_request);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
if (MPI_SUCCESS == c_ierr) *request = MPI_Request_c2f(c_request);
OMPI_ARRAY_FINT_2_INT_CLEANUP(sendcounts);
OMPI_ARRAY_FINT_2_INT_CLEANUP(sdispls);
OMPI_ARRAY_FINT_2_INT_CLEANUP(recvcounts);
OMPI_ARRAY_FINT_2_INT_CLEANUP(rdispls);
free(c_sendtypes);
free(c_recvtypes);
}

90
ompi/mpi/fortran/mpif-h/ibarrier_f.c Обычный файл
Просмотреть файл

@ -0,0 +1,90 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_IBARRIER = ompi_ibarrier_f
#pragma weak pmpi_ibarrier = ompi_ibarrier_f
#pragma weak pmpi_ibarrier_ = ompi_ibarrier_f
#pragma weak pmpi_ibarrier__ = ompi_ibarrier_f
#pragma weak PMPI_Ibarrier_f = ompi_ibarrier_f
#pragma weak PMPI_Ibarrier_f08 = ompi_ibarrier_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_IBARRIER,
pmpi_ibarrier,
pmpi_ibarrier_,
pmpi_ibarrier__,
pompi_ibarrier_f,
(MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_IBARRIER = ompi_ibarrier_f
#pragma weak mpi_ibarrier = ompi_ibarrier_f
#pragma weak mpi_ibarrier_ = ompi_ibarrier_f
#pragma weak mpi_ibarrier__ = ompi_ibarrier_f
#pragma weak MPI_Ibarrier_f = ompi_ibarrier_f
#pragma weak MPI_Ibarrier_f08 = ompi_ibarrier_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_IBARRIER,
mpi_ibarrier,
mpi_ibarrier_,
mpi_ibarrier__,
ompi_ibarrier_f,
(MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_ibarrier_f(MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr)
{
int ierr_c;
MPI_Comm c_comm;
MPI_Request c_req;
volatile int rank;
{
static int first = 1;
if (first) {
rank = 0;
printf("PID %d waiting\n", getpid());
while (0 == rank) sleep(5);
}
first = 0;
}
c_comm = MPI_Comm_f2c(*comm);
ierr_c = MPI_Ibarrier(c_comm, &c_req);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(ierr_c);
if (MPI_SUCCESS == ierr_c) *request = MPI_Request_c2f(c_req);
}

88
ompi/mpi/fortran/mpif-h/ibcast_f.c Обычный файл
Просмотреть файл

@ -0,0 +1,88 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_IBCAST = ompi_ibcast_f
#pragma weak pmpi_ibcast = ompi_ibcast_f
#pragma weak pmpi_ibcast_ = ompi_ibcast_f
#pragma weak pmpi_ibcast__ = ompi_ibcast_f
#pragma weak PMPI_Ibcast_f = ompi_ibcast_f
#pragma weak PMPI_Ibcast_f08 = ompi_ibcast_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_IBCAST,
pmpi_ibcast,
pmpi_ibcast_,
pmpi_ibcast__,
pompi_ibcast_f,
(char *buffer, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(buffer, count, datatype, root, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_IBCAST = ompi_ibcast_f
#pragma weak mpi_ibcast = ompi_ibcast_f
#pragma weak mpi_ibcast_ = ompi_ibcast_f
#pragma weak mpi_ibcast__ = ompi_ibcast_f
#pragma weak MPI_Ibcast_f = ompi_ibcast_f
#pragma weak MPI_Ibcast_f08 = ompi_ibcast_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_IBCAST,
mpi_ibcast,
mpi_ibcast_,
mpi_ibcast__,
ompi_ibcast_f,
(char *buffer, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(buffer, count, datatype, root, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_ibcast_f(char *buffer, MPI_Fint *count, MPI_Fint *datatype,
MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request,
MPI_Fint *ierr)
{
int c_ierr;
MPI_Comm c_comm;
MPI_Request c_req;
MPI_Datatype c_type;
c_comm = MPI_Comm_f2c(*comm);
c_type = MPI_Type_f2c(*datatype);
c_ierr = MPI_Ibcast(OMPI_F2C_BOTTOM(buffer),
OMPI_FINT_2_INT(*count),
c_type,
OMPI_FINT_2_INT(*root),
c_comm,
&c_req);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
if (MPI_SUCCESS == c_ierr) *request = MPI_Request_c2f(c_req);
}

91
ompi/mpi/fortran/mpif-h/iexscan_f.c Обычный файл
Просмотреть файл

@ -0,0 +1,91 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_IEXSCAN = ompi_iexscan_f
#pragma weak pmpi_iexscan = ompi_iexscan_f
#pragma weak pmpi_iexscan_ = ompi_iexscan_f
#pragma weak pmpi_iexscan__ = ompi_iexscan_f
#pragma weak PMPI_Iexscan_f = ompi_iexscan_f
#pragma weak PMPI_Iexscan_f08 = ompi_iexscan_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_IEXSCAN,
pmpi_iexscan,
pmpi_iexscan_,
pmpi_iexscan__,
pompi_iexscan_f,
(char *sendbuf, char *recvbuf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, recvbuf, count, datatype, op, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_IEXSCAN = ompi_iexscan_f
#pragma weak mpi_iexscan = ompi_iexscan_f
#pragma weak mpi_iexscan_ = ompi_iexscan_f
#pragma weak mpi_iexscan__ = ompi_iexscan_f
#pragma weak MPI_Iexscan_f = ompi_iexscan_f
#pragma weak MPI_Iexscan_f08 = ompi_iexscan_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_IEXSCAN,
mpi_iexscan,
mpi_iexscan_,
mpi_iexscan__,
ompi_iexscan_f,
(char *sendbuf, char *recvbuf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, recvbuf, count, datatype, op, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_iexscan_f(char *sendbuf, char *recvbuf, MPI_Fint *count,
MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm,
MPI_Fint *request, MPI_Fint *ierr)
{
int c_ierr;
MPI_Comm c_comm;
MPI_Datatype c_type;
MPI_Request c_request;
MPI_Op c_op;
c_comm = MPI_Comm_f2c(*comm);
c_type = MPI_Type_f2c(*datatype);
c_op = MPI_Op_f2c(*op);
/* MPI_IN_PLACE is not supported */
sendbuf = (char *) OMPI_F2C_BOTTOM (sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM (recvbuf);
c_ierr = MPI_Iexscan(sendbuf, recvbuf,
OMPI_FINT_2_INT(*count),
c_type, c_op, c_comm, &c_request);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
if (MPI_SUCCESS == c_ierr) *request = MPI_Request_c2f(c_request);
}

94
ompi/mpi/fortran/mpif-h/igather_f.c Обычный файл
Просмотреть файл

@ -0,0 +1,94 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_IGATHER = ompi_igather_f
#pragma weak pmpi_igather = ompi_igather_f
#pragma weak pmpi_igather_ = ompi_igather_f
#pragma weak pmpi_igather__ = ompi_igather_f
#pragma weak PMPI_Igather_f = ompi_igather_f
#pragma weak PMPI_Igather_f08 = ompi_igather_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_IGATHER,
pmpi_igather,
pmpi_igather_,
pmpi_igather__,
pompi_igather_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_IGATHER = ompi_igather_f
#pragma weak mpi_igather = ompi_igather_f
#pragma weak mpi_igather_ = ompi_igather_f
#pragma weak mpi_igather__ = ompi_igather_f
#pragma weak MPI_Igather_f = ompi_igather_f
#pragma weak MPI_Igather_f08 = ompi_igather_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_IGATHER,
mpi_igather,
mpi_igather_,
mpi_igather__,
ompi_igather_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_igather_f(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype,
char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype,
MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request,
MPI_Fint *ierr)
{
int c_ierr;
MPI_Comm c_comm;
MPI_Datatype c_sendtype, c_recvtype;
MPI_Request c_request;
c_comm = MPI_Comm_f2c(*comm);
c_sendtype = MPI_Type_f2c(*sendtype);
c_recvtype = MPI_Type_f2c(*recvtype);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
c_ierr = MPI_Igather(sendbuf, OMPI_FINT_2_INT(*sendcount),
c_sendtype, recvbuf,
OMPI_FINT_2_INT(*recvcount),
c_recvtype,
OMPI_FINT_2_INT(*root),
c_comm, &c_request);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
if (MPI_SUCCESS == c_ierr) *request = MPI_Request_c2f(c_request);
}

101
ompi/mpi/fortran/mpif-h/igatherv_f.c Обычный файл
Просмотреть файл

@ -0,0 +1,101 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_IGATHERV = ompi_igatherv_f
#pragma weak pmpi_igatherv = ompi_igatherv_f
#pragma weak pmpi_igatherv_ = ompi_igatherv_f
#pragma weak pmpi_igatherv__ = ompi_igatherv_f
#pragma weak PMPI_Igatherv_f = ompi_igatherv_f
#pragma weak PMPI_Igatherv_f08 = ompi_igatherv_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_IGATHERV,
pmpi_igatherv,
pmpi_igatherv_,
pmpi_igatherv__,
pompi_igatherv_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *displs, MPI_Fint *recvtype, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_IGATHERV = ompi_igatherv_f
#pragma weak mpi_igatherv = ompi_igatherv_f
#pragma weak mpi_igatherv_ = ompi_igatherv_f
#pragma weak mpi_igatherv__ = ompi_igatherv_f
#pragma weak MPI_Igatherv_f = ompi_igatherv_f
#pragma weak MPI_Igatherv_f08 = ompi_igatherv_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_IGATHERV,
mpi_igatherv,
mpi_igatherv_,
mpi_igatherv__,
ompi_igatherv_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *displs, MPI_Fint *recvtype, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request,MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_igatherv_f(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype,
char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *displs,
MPI_Fint *recvtype, MPI_Fint *root, MPI_Fint *comm,
MPI_Fint *request, MPI_Fint *ierr)
{
MPI_Comm c_comm;
MPI_Datatype c_sendtype, c_recvtype;
MPI_Request c_request;
int size, c_ierr;
OMPI_ARRAY_NAME_DECL(recvcounts);
OMPI_ARRAY_NAME_DECL(displs);
c_comm = MPI_Comm_f2c(*comm);
c_sendtype = MPI_Type_f2c(*sendtype);
c_recvtype = MPI_Type_f2c(*recvtype);
MPI_Comm_size(c_comm, &size);
OMPI_ARRAY_FINT_2_INT(recvcounts, size);
OMPI_ARRAY_FINT_2_INT(displs, size);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
c_ierr = MPI_Igatherv(sendbuf, OMPI_FINT_2_INT(*sendcount),
c_sendtype, recvbuf,
OMPI_ARRAY_NAME_CONVERT(recvcounts),
OMPI_ARRAY_NAME_CONVERT(displs),
c_recvtype,
OMPI_FINT_2_INT(*root),
c_comm, &c_request);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
if (MPI_SUCCESS == c_ierr) *request = MPI_Request_c2f(c_request);
}

94
ompi/mpi/fortran/mpif-h/ireduce_f.c Обычный файл
Просмотреть файл

@ -0,0 +1,94 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_IREDUCE = ompi_ireduce_f
#pragma weak pmpi_ireduce = ompi_ireduce_f
#pragma weak pmpi_ireduce_ = ompi_ireduce_f
#pragma weak pmpi_ireduce__ = ompi_ireduce_f
#pragma weak PMPI_Ireduce_f = ompi_ireduce_f
#pragma weak PMPI_Ireduce_f08 = ompi_ireduce_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_IREDUCE,
pmpi_ireduce,
pmpi_ireduce_,
pmpi_ireduce__,
pompi_ireduce_f,
(char *sendbuf, char *recvbuf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, recvbuf, count, datatype, op, root, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_IREDUCE = ompi_ireduce_f
#pragma weak mpi_ireduce = ompi_ireduce_f
#pragma weak mpi_ireduce_ = ompi_ireduce_f
#pragma weak mpi_ireduce__ = ompi_ireduce_f
#pragma weak MPI_Ireduce_f = ompi_ireduce_f
#pragma weak MPI_Ireduce_f08 = ompi_ireduce_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_IREDUCE,
mpi_ireduce,
mpi_ireduce_,
mpi_ireduce__,
ompi_ireduce_f,
(char *sendbuf, char *recvbuf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, recvbuf, count, datatype, op, root, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_ireduce_f(char *sendbuf, char *recvbuf, MPI_Fint *count,
MPI_Fint *datatype, MPI_Fint *op,
MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request,
MPI_Fint *ierr)
{
int c_ierr;
MPI_Datatype c_type;
MPI_Request c_request;
MPI_Op c_op;
MPI_Comm c_comm;
c_type = MPI_Type_f2c(*datatype);
c_op = MPI_Op_f2c(*op);
c_comm = MPI_Comm_f2c(*comm);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
c_ierr = MPI_Ireduce(sendbuf, recvbuf,
OMPI_FINT_2_INT(*count),
c_type, c_op,
OMPI_FINT_2_INT(*root),
c_comm, &c_request);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
if (MPI_SUCCESS == c_ierr) *request = MPI_Request_c2f(c_request);
}

Просмотреть файл

@ -0,0 +1,95 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_IREDUCE_SCATTER_BLOCK = ompi_ireduce_scatter_block_f
#pragma weak pmpi_ireduce_scatter_block = ompi_ireduce_scatter_block_f
#pragma weak pmpi_ireduce_scatter_block_ = ompi_ireduce_scatter_block_f
#pragma weak pmpi_ireduce_scatter_block__ = ompi_ireduce_scatter_block_f
#pragma weak PMPI_ireduce_scatter_block_f = ompi_ireduce_scatter_block_f
#pragma weak PMPI_ireduce_scatter_block_f08 = ompi_ireduce_scatter_block_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_IREDUCE_SCATTER_BLOCK,
pmpi_ireduce_scatter_block,
pmpi_ireduce_scatter_block_,
pmpi_ireduce_scatter_block__,
pompi_ireduce_scatter_block_f,
(char *sendbuf, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, recvbuf, recvcounts, datatype, op, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_IREDUCE_SCATTER_BLOCK = ompi_ireduce_scatter_block_f
#pragma weak mpi_ireduce_scatter_block = ompi_ireduce_scatter_block_f
#pragma weak mpi_ireduce_scatter_block_ = ompi_ireduce_scatter_block_f
#pragma weak mpi_ireduce_scatter_block__ = ompi_ireduce_scatter_block_f
#pragma weak MPI_ireduce_scatter_block_f = ompi_ireduce_scatter_block_f
#pragma weak MPI_ireduce_scatter_block_f08 = ompi_ireduce_scatter_block_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_IREDUCE_SCATTER_BLOCK,
mpi_ireduce_scatter_block,
mpi_ireduce_scatter_block_,
mpi_ireduce_scatter_block__,
ompi_ireduce_scatter_block_f,
(char *sendbuf, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, recvbuf, recvcounts, datatype, op, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_ireduce_scatter_block_f(char *sendbuf, char *recvbuf,
MPI_Fint *recvcount, MPI_Fint *datatype,
MPI_Fint *op, MPI_Fint *comm,
MPI_Fint *request, MPI_Fint *ierr)
{
int c_ierr;
MPI_Comm c_comm;
MPI_Datatype c_type;
MPI_Request c_request;
MPI_Op c_op;
int size;
c_comm = MPI_Comm_f2c(*comm);
c_type = MPI_Type_f2c(*datatype);
c_op = MPI_Op_f2c(*op);
MPI_Comm_size(c_comm, &size);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
c_ierr = MPI_Ireduce_scatter_block(sendbuf, recvbuf,
OMPI_FINT_2_INT(*recvcount),
c_type, c_op, c_comm, &c_request);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
if (MPI_SUCCESS == c_ierr) *request = MPI_Request_c2f(c_request);
}

97
ompi/mpi/fortran/mpif-h/ireduce_scatter_f.c Обычный файл
Просмотреть файл

@ -0,0 +1,97 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_IREDUCE_SCATTER = ompi_ireduce_scatter_f
#pragma weak pmpi_ireduce_scatter = ompi_ireduce_scatter_f
#pragma weak pmpi_ireduce_scatter_ = ompi_ireduce_scatter_f
#pragma weak pmpi_ireduce_scatter__ = ompi_ireduce_scatter_f
#pragma weak PMPI_Ireduce_scatter_f = ompi_ireduce_scatter_f
#pragma weak PMPI_Ireduce_scatter_f08 = ompi_ireduce_scatter_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_IREDUCE_SCATTER,
pmpi_ireduce_scatter,
pmpi_ireduce_scatter_,
pmpi_ireduce_scatter__,
pompi_ireduce_scatter_f,
(char *sendbuf, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, recvbuf, recvcounts, datatype, op, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_IREDUCE_SCATTER = ompi_ireduce_scatter_f
#pragma weak mpi_ireduce_scatter = ompi_ireduce_scatter_f
#pragma weak mpi_ireduce_scatter_ = ompi_ireduce_scatter_f
#pragma weak mpi_ireduce_scatter__ = ompi_ireduce_scatter_f
#pragma weak MPI_Ireduce_scatter_f = ompi_ireduce_scatter_f
#pragma weak MPI_Ireduce_scatter_f08 = ompi_ireduce_scatter_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_IREDUCE_SCATTER,
mpi_ireduce_scatter,
mpi_ireduce_scatter_,
mpi_ireduce_scatter__,
ompi_ireduce_scatter_f,
(char *sendbuf, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, recvbuf, recvcounts, datatype, op, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_ireduce_scatter_f(char *sendbuf, char *recvbuf,
MPI_Fint *recvcounts, MPI_Fint *datatype,
MPI_Fint *op, MPI_Fint *comm, MPI_Fint *request,
MPI_Fint *ierr)
{
int c_ierr;
MPI_Comm c_comm;
MPI_Datatype c_type;
MPI_Request c_request;
MPI_Op c_op;
int size;
OMPI_ARRAY_NAME_DECL(recvcounts);
c_comm = MPI_Comm_f2c(*comm);
c_type = MPI_Type_f2c(*datatype);
c_op = MPI_Op_f2c(*op);
MPI_Comm_size(c_comm, &size);
OMPI_ARRAY_FINT_2_INT(recvcounts, size);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
c_ierr = MPI_Ireduce_scatter(sendbuf, recvbuf,
OMPI_ARRAY_NAME_CONVERT(recvcounts),
c_type, c_op, c_comm, &c_request);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
if (MPI_SUCCESS == c_ierr) *request = MPI_Request_c2f(c_request);
}

92
ompi/mpi/fortran/mpif-h/iscan_f.c Обычный файл
Просмотреть файл

@ -0,0 +1,92 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_ISCAN = ompi_iscan_f
#pragma weak pmpi_iscan = ompi_iscan_f
#pragma weak pmpi_iscan_ = ompi_iscan_f
#pragma weak pmpi_iscan__ = ompi_iscan_f
#pragma weak PMPI_Iscan_f = ompi_iscan_f
#pragma weak PMPI_Iscan_f08 = ompi_iscan_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_ISCAN,
pmpi_iscan,
pmpi_iscan_,
pmpi_iscan__,
pompi_iscan_f,
(char *sendbuf, char *recvbuf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, recvbuf, count, datatype, op, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_ISCAN = ompi_iscan_f
#pragma weak mpi_iscan = ompi_iscan_f
#pragma weak mpi_iscan_ = ompi_iscan_f
#pragma weak mpi_iscan__ = ompi_iscan_f
#pragma weak MPI_Iscan_f = ompi_iscan_f
#pragma weak MPI_Iscan_f08 = ompi_iscan_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_ISCAN,
mpi_iscan,
mpi_iscan_,
mpi_iscan__,
ompi_iscan_f,
(char *sendbuf, char *recvbuf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, recvbuf, count, datatype, op, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_iscan_f(char *sendbuf, char *recvbuf, MPI_Fint *count,
MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm,
MPI_Fint *request, MPI_Fint *ierr)
{
int c_ierr;
MPI_Comm c_comm;
MPI_Datatype c_type;
MPI_Request c_request;
MPI_Op c_op;
c_type = MPI_Type_f2c(*datatype);
c_op = MPI_Op_f2c(*op);
c_comm = MPI_Comm_f2c(*comm);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
c_ierr = MPI_Iscan(sendbuf, recvbuf,
OMPI_FINT_2_INT(*count),
c_type, c_op,
c_comm, &c_request);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
if (MPI_SUCCESS == c_ierr) *request = MPI_Request_c2f(c_request);
}

93
ompi/mpi/fortran/mpif-h/iscatter_f.c Обычный файл
Просмотреть файл

@ -0,0 +1,93 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_ISCATTER = ompi_iscatter_f
#pragma weak pmpi_iscatter = ompi_iscatter_f
#pragma weak pmpi_iscatter_ = ompi_iscatter_f
#pragma weak pmpi_iscatter__ = ompi_iscatter_f
#pragma weak PMPI_Iscatter_f = ompi_iscatter_f
#pragma weak PMPI_Iscatter_f08 = ompi_iscatter_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_ISCATTER,
pmpi_iscatter,
pmpi_iscatter_,
pmpi_iscatter__,
pompi_iscatter_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_ISCATTER = ompi_iscatter_f
#pragma weak mpi_iscatter = ompi_iscatter_f
#pragma weak mpi_iscatter_ = ompi_iscatter_f
#pragma weak mpi_iscatter__ = ompi_iscatter_f
#pragma weak MPI_Iscatter_f = ompi_iscatter_f
#pragma weak MPI_Iscatter_f08 = ompi_iscatter_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_ISCATTER,
mpi_iscatter,
mpi_iscatter_,
mpi_iscatter__,
ompi_iscatter_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_iscatter_f(char *sendbuf, MPI_Fint *sendcount,
MPI_Fint *sendtype, char *recvbuf,
MPI_Fint *recvcount, MPI_Fint *recvtype,
MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request,
MPI_Fint *ierr)
{
int c_ierr;
MPI_Datatype c_sendtype, c_recvtype;
MPI_Request c_request;
MPI_Comm c_comm = MPI_Comm_f2c(*comm);
c_sendtype = MPI_Type_f2c(*sendtype);
c_recvtype = MPI_Type_f2c(*recvtype);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
c_ierr = MPI_Iscatter(sendbuf,OMPI_FINT_2_INT(*sendcount),
c_sendtype, recvbuf,
OMPI_FINT_2_INT(*recvcount),
c_recvtype,
OMPI_FINT_2_INT(*root), c_comm, &c_request);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
if (MPI_SUCCESS == c_ierr) *request = MPI_Request_c2f(c_request);
}

105
ompi/mpi/fortran/mpif-h/iscatterv_f.c Обычный файл
Просмотреть файл

@ -0,0 +1,105 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_ISCATTERV = ompi_iscatterv_f
#pragma weak pmpi_iscatterv = ompi_iscatterv_f
#pragma weak pmpi_iscatterv_ = ompi_iscatterv_f
#pragma weak pmpi_iscatterv__ = ompi_iscatterv_f
#pragma weak PMPI_Iscatterv_f = ompi_iscatterv_f
#pragma weak PMPI_Iscatterv_f08 = ompi_iscatterv_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_ISCATTERV,
pmpi_iscatterv,
pmpi_iscatterv_,
pmpi_iscatterv__,
pompi_iscatterv_f,
(char *sendbuf, MPI_Fint *sendcounts, MPI_Fint *displs, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_ISCATTERV = ompi_iscatterv_f
#pragma weak mpi_iscatterv = ompi_iscatterv_f
#pragma weak mpi_iscatterv_ = ompi_iscatterv_f
#pragma weak mpi_iscatterv__ = ompi_iscatterv_f
#pragma weak MPI_Iscatterv_f = ompi_iscatterv_f
#pragma weak MPI_Iscatterv_f08 = ompi_iscatterv_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_ISCATTERV,
mpi_iscatterv,
mpi_iscatterv_,
mpi_iscatterv__,
ompi_iscatterv_f,
(char *sendbuf, MPI_Fint *sendcounts, MPI_Fint *displs, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_iscatterv_f(char *sendbuf, MPI_Fint *sendcounts,
MPI_Fint *displs, MPI_Fint *sendtype,
char *recvbuf, MPI_Fint *recvcount,
MPI_Fint *recvtype, MPI_Fint *root,
MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr)
{
MPI_Comm c_comm;
MPI_Datatype c_sendtype, c_recvtype;
MPI_Request c_request;
int size, c_ierr;
OMPI_ARRAY_NAME_DECL(sendcounts);
OMPI_ARRAY_NAME_DECL(displs);
c_comm = MPI_Comm_f2c(*comm);
c_sendtype = MPI_Type_f2c(*sendtype);
c_recvtype = MPI_Type_f2c(*recvtype);
MPI_Comm_size(c_comm, &size);
OMPI_ARRAY_FINT_2_INT(sendcounts, size);
OMPI_ARRAY_FINT_2_INT(displs, size);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
c_ierr = MPI_Iscatterv(sendbuf,
OMPI_ARRAY_NAME_CONVERT(sendcounts),
OMPI_ARRAY_NAME_CONVERT(displs),
c_sendtype, recvbuf,
OMPI_FINT_2_INT(*recvcount),
c_recvtype,
OMPI_FINT_2_INT(*root), c_comm, &c_request);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
if (MPI_SUCCESS == c_ierr) *request = MPI_Request_c2f(c_request);
OMPI_ARRAY_FINT_2_INT_CLEANUP(sendcounts);
OMPI_ARRAY_FINT_2_INT_CLEANUP(displs);
}

Просмотреть файл

@ -143,7 +143,18 @@ nodist_libmpi_mpifh_pmpi_la_SOURCES = \
pgroup_size_f.c \
pgroup_translate_ranks_f.c \
pgroup_union_f.c \
piallgather_f.c \
piallgatherv_f.c \
piallreduce_f.c \
pialltoall_f.c \
pialltoallv_f.c \
pialltoallw_f.c \
pibarrier_f.c \
pibcast_f.c \
pibsend_f.c \
piexscan_f.c \
pigather_f.c \
pigatherv_f.c \
pimprobe_f.c \
pimrecv_f.c \
pinfo_create_f.c \
@ -162,7 +173,12 @@ nodist_libmpi_mpifh_pmpi_la_SOURCES = \
pintercomm_merge_f.c \
piprobe_f.c \
pirecv_f.c \
pireduce_f.c \
pireduce_scatter_f.c \
pireduce_scatter_block_f.c \
pirsend_f.c \
piscatter_f.c \
piscatterv_f.c \
pisend_f.c \
pissend_f.c \
pis_thread_main_f.c \
@ -188,6 +204,7 @@ nodist_libmpi_mpifh_pmpi_la_SOURCES = \
preduce_f.c \
preduce_local_f.c \
preduce_scatter_f.c \
preduce_scatter_block_f.c \
prequest_free_f.c \
prequest_get_status_f.c \
prsend_f.c \

Просмотреть файл

@ -176,9 +176,27 @@
#define ompi_group_size_f pompi_group_size_f
#define ompi_group_translate_ranks_f pompi_group_translate_ranks_f
#define ompi_group_union_f pompi_group_union_f
#define ompi_iallgather_f pompi_iallgather_f
#define ompi_iallgatherv_f pompi_iallgatherv_f
#define ompi_iallgather_f pompi_iallgather_f
#define ompi_iallreduce_f pompi_iallreduce_f
#define ompi_ialltoall_f pompi_ialltoall_f
#define ompi_ialltoallv_f pompi_ialltoallv_f
#define ompi_ialltoallw_f pompi_ialltoallw_f
#define ompi_ibarrier_f pompi_ibarrier_f
#define ompi_ibcast_f pompi_ibcast_f
#define ompi_ibsend_f pompi_ibsend_f
#define ompi_iexscan_f pompi_iexscan_f
#define ompi_igather_f pompi_igather_f
#define ompi_igatherv_f pompi_igatherv_f
#define ompi_improbe_f pompi_improbe_f
#define ompi_imrecv_f pompi_imrecv_f
#define ompi_ireduce_f pompi_ireduce_f
#define ompi_ireduce_scatter_f pompi_ireduce_scatter_f
#define ompi_ireduce_scatter_block_f pompi_ireduce_scatter_block_f
#define ompi_iscan_f pompi_iscan_f
#define ompi_iscatter_f pompi_iscatter_f
#define ompi_iscatterv_f pompi_iscatterv_f
#define ompi_info_create_f pompi_info_create_f
#define ompi_info_delete_f pompi_info_delete_f
#define ompi_info_dup_f pompi_info_dup_f
@ -222,6 +240,7 @@
#define ompi_reduce_f pompi_reduce_f
#define ompi_reduce_local_f pompi_reduce_local_f
#define ompi_reduce_scatter_f pompi_reduce_scatter_f
#define ompi_reduce_scatter_block_f pompi_reduce_scatter_block_f
#define ompi_register_datarep_f pompi_register_datarep_f
#define ompi_request_free_f pompi_request_free_f
#define ompi_request_get_status_f pompi_request_get_status_f

Просмотреть файл

@ -232,9 +232,26 @@ PN2(void, MPI_Group_rank, mpi_group_rank, MPI_GROUP_RANK, (MPI_Fint *group, MPI_
PN2(void, MPI_Group_size, mpi_group_size, MPI_GROUP_SIZE, (MPI_Fint *group, MPI_Fint *size, MPI_Fint *ierr));
PN2(void, MPI_Group_translate_ranks, mpi_group_translate_ranks, MPI_GROUP_TRANSLATE_RANKS, (MPI_Fint *group1, MPI_Fint *n, MPI_Fint *ranks1, MPI_Fint *group2, MPI_Fint *ranks2, MPI_Fint *ierr));
PN2(void, MPI_Group_union, mpi_group_union, MPI_GROUP_UNION, (MPI_Fint *group1, MPI_Fint *group2, MPI_Fint *newgroup, MPI_Fint *ierr));
PN2(void, MPI_Iallgather, mpi_iallgather, MPI_IALLGATHER, (char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Iallgatherv, mpi_iallgatherv, MPI_IALLGATHERV, (char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *displs, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Iallreduce, mpi_iallreduce, MPI_IALLREDUCE, (char *sendbuf, char *recvbuf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Ialltoall, mpi_ialltoall, MPI_IALLTOALL, (char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Ialltoallv, mpi_ialltoallv, MPI_IALLTOALLV, (char *sendbuf, MPI_Fint *sendcounts, MPI_Fint *sdispls, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *rdispls, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Ialltoallw, mpi_ialltoallw, MPI_IALLTOALLW, (char *sendbuf, MPI_Fint *sendcounts, MPI_Fint *sdispls, MPI_Fint *sendtypes, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *rdispls, MPI_Fint *recvtypes, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Ibarrier, mpi_ibarrier, MPI_IBARRIER, (MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Ibcast, mpi_ibcast, MPI_IBCAST, (char *buffer, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Ibsend, mpi_ibsend, MPI_IBSEND, (char *buf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *dest, MPI_Fint *tag, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Iexscan, mpi_iexscan, MPI_IEXSCAN, (char *sendbuf, char *recvbuf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Igather, mpi_igather, MPI_IGATHER, (char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Igatherv, mpi_igatherv, MPI_IGATHERV, (char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *displs, MPI_Fint *recvtype, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Improbe, mpi_improbe, MPI_IMPROBE, (MPI_Fint *source, MPI_Fint *tag, MPI_Fint *comm, ompi_fortran_logical_t *flag, MPI_Fint *message, MPI_Fint *status, MPI_Fint *ierr));
PN2(void, MPI_Imrecv,mpi_imrecv, MPI_IMRECV, (char *buf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *message, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Ireduce, mpi_ireduce, MPI_IREDUCE, (char *sendbuf, char *recvbuf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Ireduce_scatter, mpi_ireduce_scatter, MPI_IREDUCE_SCATTER, (char *sendbuf, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Ireduce_scatter_block, mpi_ireduce_scatter_block, MPI_IREDUCE_SCATTER_BLOCK, (char *sendbuf, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Iscan, mpi_iscan, MPI_ISCAN, (char *sendbuf, char *recvbuf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Iscatter, mpi_iscatter, MPI_ISCATTER, (char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Iscatterv, mpi_iscatterv, MPI_ISCATTERV, (char *sendbuf, MPI_Fint *sendcounts, MPI_Fint *displs, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Info_create, mpi_info_create, MPI_INFO_CREATE, (MPI_Fint *info, MPI_Fint *ierr));
PN2(void, MPI_Info_delete, mpi_info_delete, MPI_INFO_DELETE, (MPI_Fint *info, char *key, MPI_Fint *ierr, int key_len));
PN2(void, MPI_Info_dup, mpi_info_dup, MPI_INFO_DUP, (MPI_Fint *info, MPI_Fint *newinfo, MPI_Fint *ierr));
@ -278,6 +295,7 @@ PN2(void, MPI_Recv, mpi_recv, MPI_RECV, (char *buf, MPI_Fint *count, MPI_Fint *d
PN2(void, MPI_Reduce, mpi_reduce, MPI_REDUCE, (char *sendbuf, char *recvbuf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *ierr));
PN2(void, MPI_Reduce_local, mpi_reduce_local, MPI_REDUCE_LOCAL, (char *inbuf, char *inoutbuf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *ierr));
PN2(void, MPI_Reduce_scatter, mpi_reduce_scatter, MPI_REDUCE_SCATTER, (char *sendbuf, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *ierr));
PN2(void, MPI_Reduce_scatter_block, mpi_reduce_scatter_block, MPI_REDUCE_SCATTER_BLOCK, (char *sendbuf, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *ierr));
PN2(void, MPI_Register_datarep, mpi_register_datarep, MPI_REGISTER_DATAREP, (char *datarep, ompi_mpi2_fortran_datarep_conversion_fn_t *read_conversion_fn, ompi_mpi2_fortran_datarep_conversion_fn_t *write_conversion_fn, ompi_mpi2_fortran_datarep_extent_fn_t *dtype_file_extent_fn, MPI_Aint *extra_state, MPI_Fint *ierr, int datarep_len));
PN2(void, MPI_Request_free, mpi_request_free, MPI_REQUEST_FREE, (MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Request_get_status, mpi_request_get_status, MPI_REQUEST_GET_STATUS, (MPI_Fint *request, ompi_fortran_logical_t *flag, MPI_Fint *status, MPI_Fint *ierr));

Просмотреть файл

@ -0,0 +1,92 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_REDUCE_SCATTER_BLOCK = ompi_reduce_scatter_block_f
#pragma weak pmpi_reduce_scatter_block = ompi_reduce_scatter_block_f
#pragma weak pmpi_reduce_scatter_block_ = ompi_reduce_scatter_block_f
#pragma weak pmpi_reduce_scatter_block__ = ompi_reduce_scatter_block_f
#pragma weak PMPI_reduce_scatter_block_f = ompi_reduce_scatter_block_f
#pragma weak PMPI_reduce_scatter_block_f08 = ompi_reduce_scatter_block_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_REDUCE_SCATTER_BLOCK,
pmpi_reduce_scatter_block,
pmpi_reduce_scatter_block_,
pmpi_reduce_scatter_block__,
pompi_reduce_scatter_block_f,
(char *sendbuf, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *ierr),
(sendbuf, recvbuf, recvcounts, datatype, op, comm, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_REDUCE_SCATTER_BLOCK = ompi_reduce_scatter_block_f
#pragma weak mpi_reduce_scatter_block = ompi_reduce_scatter_block_f
#pragma weak mpi_reduce_scatter_block_ = ompi_reduce_scatter_block_f
#pragma weak mpi_reduce_scatter_block__ = ompi_reduce_scatter_block_f
#pragma weak MPI_reduce_scatter_block_f = ompi_reduce_scatter_block_f
#pragma weak MPI_reduce_scatter_block_f08 = ompi_reduce_scatter_block_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_REDUCE_SCATTER_BLOCK,
mpi_reduce_scatter_block,
mpi_reduce_scatter_block_,
mpi_reduce_scatter_block__,
ompi_reduce_scatter_block_f,
(char *sendbuf, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *ierr),
(sendbuf, recvbuf, recvcounts, datatype, op, comm, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_reduce_scatter_block_f(char *sendbuf, char *recvbuf,
MPI_Fint *recvcount, MPI_Fint *datatype,
MPI_Fint *op, MPI_Fint *comm, MPI_Fint *ierr)
{
int c_ierr;
MPI_Comm c_comm;
MPI_Datatype c_type;
MPI_Op c_op;
int size;
c_comm = MPI_Comm_f2c(*comm);
c_type = MPI_Type_f2c(*datatype);
c_op = MPI_Op_f2c(*op);
MPI_Comm_size(c_comm, &size);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
c_ierr = MPI_Reduce_scatter_block(sendbuf, recvbuf,
OMPI_FINT_2_INT(*recvcount),
c_type, c_op, c_comm);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
}

Просмотреть файл

@ -123,7 +123,18 @@ mpi_api_files = \
group_size_f08.F90 \
group_translate_ranks_f08.F90 \
group_union_f08.F90 \
iallgather_f08.F90 \
iallgatherv_f08.F90 \
iallreduce_f08.F90 \
ialltoall_f08.F90 \
ialltoallv_f08.F90 \
ialltoallw_f08.F90 \
ibarrier_f08.F90 \
ibcast_f08.F90 \
ibsend_f08.F90 \
iexscan_f08.F90 \
igather_f08.F90 \
igatherv_f08.F90 \
improbe_f08.F90 \
imrecv_f08.F90 \
info_create_f08.F90 \
@ -142,7 +153,13 @@ mpi_api_files = \
intercomm_merge_f08.F90 \
iprobe_f08.F90 \
irecv_f08.F90 \
ireduce_f08.F90 \
ireduce_scatter_f08.F90 \
ireduce_scatter_block_f08.F90 \
irsend_f08.F90 \
iscan_f08.F90 \
iscatter_f08.F90 \
iscatterv_f08.F90 \
isend_f08.F90 \
issend_f08.F90 \
is_thread_main_f08.F90 \
@ -167,6 +184,7 @@ mpi_api_files = \
reduce_f08.F90 \
reduce_local_f08.F90 \
reduce_scatter_f08.F90 \
reduce_scatter_block_f08.F90 \
request_free_f08.F90 \
request_get_status_f08.F90 \
rsend_f08.F90 \
@ -329,11 +347,9 @@ pmpi_api_files = \
profile/padd_error_class_f08.F90 \
profile/padd_error_code_f08.F90 \
profile/padd_error_string_f08.F90 \
profile/pallgather_f08.F90 \
profile/pallgatherv_f08.F90 \
profile/palloc_mem_f08.F90 \
profile/pallreduce_f08.F90 \
profile/palltoall_f08.F90 \
profile/palltoallv_f08.F90 \
profile/palltoallw_f08.F90 \
profile/pbarrier_f08.F90 \
@ -417,7 +433,18 @@ pmpi_api_files = \
profile/pgroup_size_f08.F90 \
profile/pgroup_translate_ranks_f08.F90 \
profile/pgroup_union_f08.F90 \
profile/piallgather_f08.F90 \
profile/piallgatherv_f08.F90 \
profile/piallreduce_f08.F90 \
profile/pialltoall_f08.F90 \
profile/pialltoallv_f08.F90 \
profile/pialltoallw_f08.F90 \
profile/pibarrier_f08.F90 \
profile/pibcast_f08.F90 \
profile/pibsend_f08.F90 \
profile/pigather_f08.F90 \
profile/pigatherv_f08.F90 \
profile/piexscan_f08.F90 \
profile/pimprobe_f08.F90 \
profile/pimrecv_f08.F90 \
profile/pinfo_create_f08.F90 \
@ -436,7 +463,13 @@ pmpi_api_files = \
profile/pintercomm_merge_f08.F90 \
profile/piprobe_f08.F90 \
profile/pirecv_f08.F90 \
profile/pireduce_f08.F90 \
profile/pireduce_scatter_f08.F90 \
profile/pireduce_scatter_block_f08.F90 \
profile/pirsend_f08.F90 \
profile/piscan_f08.F90 \
profile/piscatter_f08.F90 \
profile/piscatterv_f08.F90 \
profile/pisend_f08.F90 \
profile/pissend_f08.F90 \
profile/pis_thread_main_f08.F90 \

Просмотреть файл

@ -0,0 +1,27 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Iallgather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_iallgather_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_iallgather_f(sendbuf,sendcount,sendtype%MPI_VAL,&
recvbuf,recvcount,recvtype%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Iallgather_f08

Просмотреть файл

@ -0,0 +1,29 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Iallgatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,&
displs,recvtype,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_iallgatherv_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount
INTEGER, INTENT(IN) :: recvcounts(*), displs(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_iallgatherv_f(sendbuf,sendcount,sendtype%MPI_VAL,recvbuf,recvcounts,&
displs,recvtype%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Iallgatherv_f08

Просмотреть файл

@ -0,0 +1,27 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Iallreduce_f08(sendbuf,recvbuf,count,datatype,op,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_iallreduce_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: count
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_iallreduce_f(sendbuf,recvbuf,count,datatype%MPI_VAL,&
op%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Iallreduce_f08

Просмотреть файл

@ -0,0 +1,28 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Ialltoall_f08(sendbuf,sendcount,sendtype,recvbuf,&
recvcount,recvtype,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ialltoall_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ialltoall_f(sendbuf,sendcount,sendtype%MPI_VAL,recvbuf,&
recvcount,recvtype%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Ialltoall_f08

Просмотреть файл

@ -0,0 +1,28 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Ialltoallv_f08(sendbuf,sendcounts,sdispls,sendtype,recvbuf,&
recvcounts,rdispls,recvtype,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ialltoallv_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ialltoallv_f(sendbuf,sendcounts,sdispls,sendtype%MPI_VAL,&
recvbuf,recvcounts,rdispls,recvtype%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Ialltoallv_f08

Просмотреть файл

@ -0,0 +1,28 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Ialltoallw_f08(sendbuf,sendcounts,sdispls,sendtypes,&
recvbuf,recvcounts,rdispls,recvtypes,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ialltoallw_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtypes
TYPE(MPI_Datatype), INTENT(IN) :: recvtypes
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ialltoallw_f(sendbuf,sendcounts,sdispls,sendtypes%MPI_VAL,&
recvbuf,recvcounts,rdispls,recvtypes%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Ialltoallw_f08

Просмотреть файл

@ -0,0 +1,20 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
subroutine MPI_Ibarrier_f08(comm,request,ierror)
use :: mpi_f08_types, only : MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ibarrier_f
implicit none
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ibarrier_f(comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Ibarrier_f08

25
ompi/mpi/fortran/use-mpi-f08/ibcast_f08.F90 Обычный файл
Просмотреть файл

@ -0,0 +1,25 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Ibcast_f08(buffer,count,datatype,root,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ibcast_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: buffer
INTEGER, INTENT(IN) :: count, root
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ibcast_f(buffer,count,datatype%MPI_VAL,root,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Ibcast_f08

Просмотреть файл

@ -0,0 +1,27 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Iexscan_f08(sendbuf,recvbuf,count,datatype,op,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_iexscan_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: count
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_iexscan_f(sendbuf,recvbuf,count,datatype%MPI_VAL,&
op%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Iexscan_f08

Просмотреть файл

@ -0,0 +1,28 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Igather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,&
recvtype,root,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_igather_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount, root
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_igather_f(sendbuf,sendcount,sendtype%MPI_VAL,recvbuf,recvcount,&
recvtype%MPI_VAL,root,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Igather_f08

Просмотреть файл

@ -0,0 +1,29 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Igatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,&
displs,recvtype,root,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_igatherv_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, root
INTEGER, INTENT(IN) :: recvcounts(*), displs(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_igatherv_f(sendbuf,sendcount,sendtype%MPI_VAL,recvbuf,recvcounts,&
displs,recvtype%MPI_VAL,root,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Igatherv_f08

Просмотреть файл

@ -0,0 +1,27 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Ireduce_f08(sendbuf,recvbuf,count,datatype,op,root,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ireduce_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: count, root
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ireduce_f(sendbuf,recvbuf,count,datatype%MPI_VAL,&
op%MPI_VAL,root,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Ireduce_f08

Просмотреть файл

@ -0,0 +1,27 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Ireduce_scatter_block_f08(sendbuf,recvbuf,recvcount,datatype,op,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ireduce_scatter_block_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: recvcount
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ireduce_scatter_block_f(sendbuf,recvbuf,recvcount,&
datatype%MPI_VAL,op%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Ireduce_scatter_block_f08

Просмотреть файл

@ -0,0 +1,27 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Ireduce_scatter_f08(sendbuf,recvbuf,recvcounts,datatype,op,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ireduce_scatter_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: recvcounts(*)
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ireduce_scatter_f(sendbuf,recvbuf,recvcounts,datatype%MPI_VAL,&
op%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Ireduce_scatter_f08

27
ompi/mpi/fortran/use-mpi-f08/iscan_f08.F90 Обычный файл
Просмотреть файл

@ -0,0 +1,27 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Iscan_f08(sendbuf,recvbuf,count,datatype,op,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_iscan_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: count
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_iscan_f(sendbuf,recvbuf,count,datatype%MPI_VAL,&
op%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Iscan_f08

Просмотреть файл

@ -0,0 +1,28 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Iscatter_f08(sendbuf,sendcount,sendtype,recvbuf,&
recvcount,recvtype,root,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_iscatter_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount, root
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_iscatter_f(sendbuf,sendcount,sendtype%MPI_VAL,recvbuf,recvcount,&
recvtype%MPI_VAL,root,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Iscatter_f08

Просмотреть файл

@ -0,0 +1,29 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Iscatterv_f08(sendbuf,sendcounts,displs,sendtype,recvbuf,&
recvcount,recvtype,root,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_iscatterv_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: recvcount, root
INTEGER, INTENT(IN) :: sendcounts(*), displs(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_iscatterv_f(sendbuf,sendcounts,displs,sendtype%MPI_VAL,recvbuf,&
recvcount,recvtype%MPI_VAL,root,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Iscatterv_f08

Просмотреть файл

@ -679,6 +679,19 @@ subroutine ompi_allgather_f(sendbuf,sendcount,sendtype,recvbuf, &
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_allgather_f
subroutine ompi_iallgather_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcount,recvtype,comm,request,ierror) &
BIND(C, name="ompi_iallgather_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
INTEGER, INTENT(IN) :: sendtype
INTEGER, INTENT(IN) :: recvtype
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_iallgather_f
subroutine ompi_allgatherv_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcounts,displs,recvtype,comm,ierror) &
BIND(C, name="ompi_allgatherv_f")
@ -692,6 +705,20 @@ subroutine ompi_allgatherv_f(sendbuf,sendcount,sendtype,recvbuf, &
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_allgatherv_f
subroutine ompi_iallgatherv_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcounts,displs,recvtype,comm,request,ierror) &
BIND(C, name="ompi_iallgatherv_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount
INTEGER, INTENT(IN) :: recvcounts(*), displs(*)
INTEGER, INTENT(IN) :: sendtype
INTEGER, INTENT(IN) :: recvtype
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_iallgatherv_f
subroutine ompi_allreduce_f(sendbuf,recvbuf,count,datatype,op,comm,ierror) &
BIND(C, name="ompi_allreduce_f")
implicit none
@ -703,6 +730,18 @@ subroutine ompi_allreduce_f(sendbuf,recvbuf,count,datatype,op,comm,ierror) &
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_allreduce_f
subroutine ompi_iallreduce_f(sendbuf,recvbuf,count,datatype,op,comm,request,ierror) &
BIND(C, name="ompi_iallreduce_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: count
INTEGER, INTENT(IN) :: datatype
INTEGER, INTENT(IN) :: op
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_iallreduce_f
subroutine ompi_alltoall_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcount,recvtype,comm,ierror) &
BIND(C, name="ompi_alltoall_f")
@ -715,6 +754,19 @@ subroutine ompi_alltoall_f(sendbuf,sendcount,sendtype,recvbuf, &
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_alltoall_f
subroutine ompi_ialltoall_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcount,recvtype,comm,request,ierror) &
BIND(C, name="ompi_ialltoall_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
INTEGER, INTENT(IN) :: sendtype
INTEGER, INTENT(IN) :: recvtype
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_ialltoall_f
subroutine ompi_alltoallv_f(sendbuf,sendcounts,sdispls,sendtype, &
recvbuf,recvcounts,rdispls,recvtype,comm,ierror) &
BIND(C, name="ompi_alltoallv_f")
@ -727,6 +779,19 @@ subroutine ompi_alltoallv_f(sendbuf,sendcounts,sdispls,sendtype, &
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_alltoallv_f
subroutine ompi_ialltoallv_f(sendbuf,sendcounts,sdispls,sendtype, &
recvbuf,recvcounts,rdispls,recvtype,comm,request,ierror) &
BIND(C, name="ompi_ialltoallv_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), rdispls(*)
INTEGER, INTENT(IN) :: sendtype
INTEGER, INTENT(IN) :: recvtype
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_ialltoallv_f
subroutine ompi_alltoallw_f(sendbuf,sendcounts,sdispls,sendtypes, &
recvbuf,recvcounts,rdispls,recvtypes,comm,ierror) &
BIND(C, name="ompi_alltoallw_f")
@ -739,14 +804,35 @@ subroutine ompi_alltoallw_f(sendbuf,sendcounts,sdispls,sendtypes, &
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_alltoallw_f
subroutine ompi_Barrier_f(comm,ierror) &
subroutine ompi_ialltoallw_f(sendbuf,sendcounts,sdispls,sendtypes, &
recvbuf,recvcounts,rdispls,recvtypes,comm,request,ierror) &
BIND(C, name="ompi_ialltoallw_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), rdispls(*)
INTEGER, INTENT(IN) :: sendtypes
INTEGER, INTENT(IN) :: recvtypes
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_ialltoallw_f
subroutine ompi_barrier_f(comm,ierror) &
BIND(C, name="ompi_barrier_f")
implicit none
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_Barrier_f
end subroutine ompi_barrier_f
subroutine ompi_Bcast_f(buffer,count,datatype,root,comm,ierror) &
subroutine ompi_ibarrier_f(comm,request,ierror) &
BIND(C, name="ompi_ibarrier_f")
implicit none
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_ibarrier_f
subroutine ompi_bcast_f(buffer,count,datatype,root,comm,ierror) &
BIND(C, name="ompi_bcast_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: buffer
@ -754,7 +840,18 @@ subroutine ompi_Bcast_f(buffer,count,datatype,root,comm,ierror) &
INTEGER, INTENT(IN) :: datatype
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_Bcast_f
end subroutine ompi_bcast_f
subroutine ompi_ibcast_f(buffer,count,datatype,root,comm,request,ierror) &
BIND(C, name="ompi_ibcast_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: buffer
INTEGER, INTENT(IN) :: count, root
INTEGER, INTENT(IN) :: datatype
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_ibcast_f
subroutine ompi_exscan_f(sendbuf,recvbuf,count,datatype,op,comm,ierror) &
BIND(C, name="ompi_exscan_f")
@ -767,6 +864,18 @@ subroutine ompi_exscan_f(sendbuf,recvbuf,count,datatype,op,comm,ierror) &
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_exscan_f
subroutine ompi_iexscan_f(sendbuf,recvbuf,count,datatype,op,comm,request,ierror) &
BIND(C, name="ompi_iexscan_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: count
INTEGER, INTENT(IN) :: datatype
INTEGER, INTENT(IN) :: op
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_iexscan_f
subroutine ompi_gather_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcount,recvtype,root,comm,ierror) &
BIND(C, name="ompi_gather_f")
@ -779,6 +888,19 @@ subroutine ompi_gather_f(sendbuf,sendcount,sendtype,recvbuf, &
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_gather_f
subroutine ompi_igather_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcount,recvtype,root,comm,request,ierror) &
BIND(C, name="ompi_igather_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount, root
INTEGER, INTENT(IN) :: sendtype
INTEGER, INTENT(IN) :: recvtype
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_igather_f
subroutine ompi_gatherv_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcounts,displs,recvtype,root,comm,ierror) &
BIND(C, name="ompi_gatherv_f")
@ -792,6 +914,20 @@ subroutine ompi_gatherv_f(sendbuf,sendcount,sendtype,recvbuf, &
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_gatherv_f
subroutine ompi_igatherv_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcounts,displs,recvtype,root,comm,request,ierror) &
BIND(C, name="ompi_igatherv_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, root
INTEGER, INTENT(IN) :: recvcounts(*), displs(*)
INTEGER, INTENT(IN) :: sendtype
INTEGER, INTENT(IN) :: recvtype
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_igatherv_f
subroutine ompi_op_commutative_f(op,commute,ierror) &
BIND(C, name="ompi_op_commutative_f")
implicit none
@ -827,6 +963,18 @@ subroutine ompi_reduce_f(sendbuf,recvbuf,count,datatype,op,root,comm,ierror) &
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_reduce_f
subroutine ompi_ireduce_f(sendbuf,recvbuf,count,datatype,op,root,comm,request,ierror) &
BIND(C, name="ompi_ireduce_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: count, root
INTEGER, INTENT(IN) :: datatype
INTEGER, INTENT(IN) :: op
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_ireduce_f
subroutine ompi_reduce_local_f(inbuf,inoutbuf,count,datatype,op,ierror) &
BIND(C, name="ompi_reduce_local_f")
implicit none
@ -849,6 +997,19 @@ subroutine ompi_reduce_scatter_f(sendbuf,recvbuf,recvcounts, &
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_reduce_scatter_f
subroutine ompi_ireduce_scatter_f(sendbuf,recvbuf,recvcounts, &
datatype,op,comm,request,ierror) &
BIND(C, name="ompi_ireduce_scatter_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: recvcounts(*)
INTEGER, INTENT(IN) :: datatype
INTEGER, INTENT(IN) :: op
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_ireduce_scatter_f
subroutine ompi_reduce_scatter_block_f(sendbuf,recvbuf,recvcount, &
datatype,op,comm,ierror) &
BIND(C, name="ompi_reduce_scatter_block_f")
@ -861,6 +1022,19 @@ subroutine ompi_reduce_scatter_block_f(sendbuf,recvbuf,recvcount, &
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_reduce_scatter_block_f
subroutine ompi_ireduce_scatter_block_f(sendbuf,recvbuf,recvcount, &
datatype,op,comm,request,ierror) &
BIND(C, name="ompi_ireduce_scatter_block_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: recvcount
INTEGER, INTENT(IN) :: datatype
INTEGER, INTENT(IN) :: op
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_ireduce_scatter_block_f
subroutine ompi_scan_f(sendbuf,recvbuf,count,datatype,op,comm,ierror) &
BIND(C, name="ompi_scan_f")
implicit none
@ -872,6 +1046,18 @@ subroutine ompi_scan_f(sendbuf,recvbuf,count,datatype,op,comm,ierror) &
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_scan_f
subroutine ompi_iscan_f(sendbuf,recvbuf,count,datatype,op,comm,request,ierror) &
BIND(C, name="ompi_iscan_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: count
INTEGER, INTENT(IN) :: datatype
INTEGER, INTENT(IN) :: op
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_iscan_f
subroutine ompi_scatter_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcount,recvtype,root,comm,ierror) &
BIND(C, name="ompi_scatter_f")
@ -884,6 +1070,19 @@ subroutine ompi_scatter_f(sendbuf,sendcount,sendtype,recvbuf, &
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_scatter_f
subroutine ompi_iscatter_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcount,recvtype,root,comm,request,ierror) &
BIND(C, name="ompi_iscatter_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount, root
INTEGER, INTENT(IN) :: sendtype
INTEGER, INTENT(IN) :: recvtype
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_iscatter_f
subroutine ompi_scatterv_f(sendbuf,sendcounts,displs,sendtype, &
recvbuf,recvcount,recvtype,root,comm,ierror) &
BIND(C, name="ompi_scatterv_f")
@ -897,6 +1096,20 @@ subroutine ompi_scatterv_f(sendbuf,sendcounts,displs,sendtype, &
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_scatterv_f
subroutine ompi_iscatterv_f(sendbuf,sendcounts,displs,sendtype, &
recvbuf,recvcount,recvtype,root,comm,request,ierror) &
BIND(C, name="ompi_iscatterv_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: recvcount, root
INTEGER, INTENT(IN) :: sendcounts(*), displs(*)
INTEGER, INTENT(IN) :: sendtype
INTEGER, INTENT(IN) :: recvtype
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine ompi_iscatterv_f
subroutine ompi_comm_compare_f(comm1,comm2,result,ierror) &
BIND(C, name="ompi_comm_compare_f")
implicit none

Просмотреть файл

@ -679,6 +679,19 @@ subroutine pompi_allgather_f(sendbuf,sendcount,sendtype,recvbuf, &
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_allgather_f
subroutine pompi_iallgather_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcount,recvtype,comm,request,ierror) &
BIND(C, name="pompi_iallgather_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
INTEGER, INTENT(IN) :: sendtype
INTEGER, INTENT(IN) :: recvtype
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_iallgather_f
subroutine pompi_allgatherv_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcounts,displs,recvtype,comm,ierror) &
BIND(C, name="pompi_allgatherv_f")
@ -692,6 +705,20 @@ subroutine pompi_allgatherv_f(sendbuf,sendcount,sendtype,recvbuf, &
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_allgatherv_f
subroutine pompi_iallgatherv_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcounts,displs,recvtype,comm,request,ierror) &
BIND(C, name="pompi_iallgatherv_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount
INTEGER, INTENT(IN) :: recvcounts(*), displs(*)
INTEGER, INTENT(IN) :: sendtype
INTEGER, INTENT(IN) :: recvtype
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_iallgatherv_f
subroutine pompi_allreduce_f(sendbuf,recvbuf,count,datatype,op,comm,ierror) &
BIND(C, name="pompi_allreduce_f")
implicit none
@ -703,6 +730,18 @@ subroutine pompi_allreduce_f(sendbuf,recvbuf,count,datatype,op,comm,ierror) &
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_allreduce_f
subroutine pompi_iallreduce_f(sendbuf,recvbuf,count,datatype,op,comm,request,ierror) &
BIND(C, name="pompi_iallreduce_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: count
INTEGER, INTENT(IN) :: datatype
INTEGER, INTENT(IN) :: op
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_iallreduce_f
subroutine pompi_alltoall_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcount,recvtype,comm,ierror) &
BIND(C, name="pompi_alltoall_f")
@ -715,6 +754,19 @@ subroutine pompi_alltoall_f(sendbuf,sendcount,sendtype,recvbuf, &
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_alltoall_f
subroutine pompi_ialltoall_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcount,recvtype,comm,request,ierror) &
BIND(C, name="pompi_ialltoall_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
INTEGER, INTENT(IN) :: sendtype
INTEGER, INTENT(IN) :: recvtype
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_ialltoall_f
subroutine pompi_alltoallv_f(sendbuf,sendcounts,sdispls,sendtype, &
recvbuf,recvcounts,rdispls,recvtype,comm,ierror) &
BIND(C, name="pompi_alltoallv_f")
@ -727,6 +779,19 @@ subroutine pompi_alltoallv_f(sendbuf,sendcounts,sdispls,sendtype, &
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_alltoallv_f
subroutine pompi_ialltoallv_f(sendbuf,sendcounts,sdispls,sendtype, &
recvbuf,recvcounts,rdispls,recvtype,comm,request,ierror) &
BIND(C, name="pompi_ialltoallv_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), rdispls(*)
INTEGER, INTENT(IN) :: sendtype
INTEGER, INTENT(IN) :: recvtype
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_ialltoallv_f
subroutine pompi_alltoallw_f(sendbuf,sendcounts,sdispls,sendtypes, &
recvbuf,recvcounts,rdispls,recvtypes,comm,ierror) &
BIND(C, name="pompi_alltoallw_f")
@ -739,14 +804,35 @@ subroutine pompi_alltoallw_f(sendbuf,sendcounts,sdispls,sendtypes, &
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_alltoallw_f
subroutine pompi_Barrier_f(comm,ierror) &
subroutine pompi_ialltoallw_f(sendbuf,sendcounts,sdispls,sendtypes, &
recvbuf,recvcounts,rdispls,recvtypes,comm,request,ierror) &
BIND(C, name="pompi_ialltoallw_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), rdispls(*)
INTEGER, INTENT(IN) :: sendtypes
INTEGER, INTENT(IN) :: recvtypes
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_ialltoallw_f
subroutine pompi_barrier_f(comm,ierror) &
BIND(C, name="pompi_barrier_f")
implicit none
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_Barrier_f
end subroutine pompi_barrier_f
subroutine pompi_Bcast_f(buffer,count,datatype,root,comm,ierror) &
subroutine pompi_ibarrier_f(comm,request,ierror) &
BIND(C, name="pompi_ibarrier_f")
implicit none
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_ibarrier_f
subroutine pompi_bcast_f(buffer,count,datatype,root,comm,ierror) &
BIND(C, name="pompi_bcast_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: buffer
@ -754,7 +840,18 @@ subroutine pompi_Bcast_f(buffer,count,datatype,root,comm,ierror) &
INTEGER, INTENT(IN) :: datatype
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_Bcast_f
end subroutine pompi_bcast_f
subroutine pompi_ibcast_f(buffer,count,datatype,root,comm,request,ierror) &
BIND(C, name="pompi_ibcast_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: buffer
INTEGER, INTENT(IN) :: count, root
INTEGER, INTENT(IN) :: datatype
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_ibcast_f
subroutine pompi_exscan_f(sendbuf,recvbuf,count,datatype,op,comm,ierror) &
BIND(C, name="pompi_exscan_f")
@ -767,6 +864,18 @@ subroutine pompi_exscan_f(sendbuf,recvbuf,count,datatype,op,comm,ierror) &
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_exscan_f
subroutine pompi_iexscan_f(sendbuf,recvbuf,count,datatype,op,comm,request,ierror) &
BIND(C, name="pompi_iexscan_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: count
INTEGER, INTENT(IN) :: datatype
INTEGER, INTENT(IN) :: op
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_iexscan_f
subroutine pompi_gather_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcount,recvtype,root,comm,ierror) &
BIND(C, name="pompi_gather_f")
@ -779,6 +888,19 @@ subroutine pompi_gather_f(sendbuf,sendcount,sendtype,recvbuf, &
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_gather_f
subroutine pompi_igather_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcount,recvtype,root,comm,request,ierror) &
BIND(C, name="pompi_igather_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount, root
INTEGER, INTENT(IN) :: sendtype
INTEGER, INTENT(IN) :: recvtype
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_igather_f
subroutine pompi_gatherv_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcounts,displs,recvtype,root,comm,ierror) &
BIND(C, name="pompi_gatherv_f")
@ -792,6 +914,20 @@ subroutine pompi_gatherv_f(sendbuf,sendcount,sendtype,recvbuf, &
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_gatherv_f
subroutine pompi_igatherv_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcounts,displs,recvtype,root,comm,request,ierror) &
BIND(C, name="pompi_igatherv_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, root
INTEGER, INTENT(IN) :: recvcounts(*), displs(*)
INTEGER, INTENT(IN) :: sendtype
INTEGER, INTENT(IN) :: recvtype
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_igatherv_f
subroutine pompi_op_commutative_f(op,commute,ierror) &
BIND(C, name="pompi_op_commutative_f")
implicit none
@ -827,6 +963,18 @@ subroutine pompi_reduce_f(sendbuf,recvbuf,count,datatype,op,root,comm,ierror) &
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_reduce_f
subroutine pompi_ireduce_f(sendbuf,recvbuf,count,datatype,op,root,comm,request,ierror) &
BIND(C, name="pompi_ireduce_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: count, root
INTEGER, INTENT(IN) :: datatype
INTEGER, INTENT(IN) :: op
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_ireduce_f
subroutine pompi_reduce_local_f(inbuf,inoutbuf,count,datatype,op,ierror) &
BIND(C, name="pompi_reduce_local_f")
implicit none
@ -849,6 +997,19 @@ subroutine pompi_reduce_scatter_f(sendbuf,recvbuf,recvcounts, &
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_reduce_scatter_f
subroutine pompi_ireduce_scatter_f(sendbuf,recvbuf,recvcounts, &
datatype,op,comm,request,ierror) &
BIND(C, name="pompi_ireduce_scatter_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: recvcounts(*)
INTEGER, INTENT(IN) :: datatype
INTEGER, INTENT(IN) :: op
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_ireduce_scatter_f
subroutine pompi_reduce_scatter_block_f(sendbuf,recvbuf,recvcount, &
datatype,op,comm,ierror) &
BIND(C, name="pompi_reduce_scatter_block_f")
@ -861,6 +1022,19 @@ subroutine pompi_reduce_scatter_block_f(sendbuf,recvbuf,recvcount, &
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_reduce_scatter_block_f
subroutine pompi_ireduce_scatter_block_f(sendbuf,recvbuf,recvcount, &
datatype,op,comm,request,ierror) &
BIND(C, name="pompi_ireduce_scatter_block_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: recvcount
INTEGER, INTENT(IN) :: datatype
INTEGER, INTENT(IN) :: op
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_ireduce_scatter_block_f
subroutine pompi_scan_f(sendbuf,recvbuf,count,datatype,op,comm,ierror) &
BIND(C, name="pompi_scan_f")
implicit none
@ -872,6 +1046,18 @@ subroutine pompi_scan_f(sendbuf,recvbuf,count,datatype,op,comm,ierror) &
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_scan_f
subroutine pompi_iscan_f(sendbuf,recvbuf,count,datatype,op,comm,request,ierror) &
BIND(C, name="pompi_iscan_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: count
INTEGER, INTENT(IN) :: datatype
INTEGER, INTENT(IN) :: op
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_iscan_f
subroutine pompi_scatter_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcount,recvtype,root,comm,ierror) &
BIND(C, name="pompi_scatter_f")
@ -884,6 +1070,19 @@ subroutine pompi_scatter_f(sendbuf,sendcount,sendtype,recvbuf, &
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_scatter_f
subroutine pompi_iscatter_f(sendbuf,sendcount,sendtype,recvbuf, &
recvcount,recvtype,root,comm,request,ierror) &
BIND(C, name="pompi_iscatter_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount, root
INTEGER, INTENT(IN) :: sendtype
INTEGER, INTENT(IN) :: recvtype
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_iscatter_f
subroutine pompi_scatterv_f(sendbuf,sendcounts,displs,sendtype, &
recvbuf,recvcount,recvtype,root,comm,ierror) &
BIND(C, name="pompi_scatterv_f")
@ -897,6 +1096,20 @@ subroutine pompi_scatterv_f(sendbuf,sendcounts,displs,sendtype, &
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_scatterv_f
subroutine pompi_iscatterv_f(sendbuf,sendcounts,displs,sendtype, &
recvbuf,recvcount,recvtype,root,comm,request,ierror) &
BIND(C, name="pompi_iscatterv_f")
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: recvcount, root
INTEGER, INTENT(IN) :: sendcounts(*), displs(*)
INTEGER, INTENT(IN) :: sendtype
INTEGER, INTENT(IN) :: recvtype
INTEGER, INTENT(IN) :: comm
INTEGER, INTENT(OUT) :: request
INTEGER, INTENT(OUT) :: ierror
end subroutine pompi_iscatterv_f
subroutine pompi_comm_compare_f(comm1,comm2,result,ierror) &
BIND(C, name="pompi_comm_compare_f")
implicit none

Просмотреть файл

@ -0,0 +1,27 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Iallgather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_iallgather_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_iallgather_f(sendbuf,sendcount,sendtype%MPI_VAL,&
recvbuf,recvcount,recvtype%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Iallgather_f08

Просмотреть файл

@ -0,0 +1,29 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Iallgatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,&
displs,recvtype,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_iallgatherv_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount
INTEGER, INTENT(IN) :: recvcounts(*), displs(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_iallgatherv_f(sendbuf,sendcount,sendtype%MPI_VAL,recvbuf,recvcounts,&
displs,recvtype%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Iallgatherv_f08

Просмотреть файл

@ -0,0 +1,27 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Iallreduce_f08(sendbuf,recvbuf,count,datatype,op,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_iallreduce_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: count
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_iallreduce_f(sendbuf,recvbuf,count,datatype%MPI_VAL,&
op%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Iallreduce_f08

Просмотреть файл

@ -0,0 +1,28 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Ialltoall_f08(sendbuf,sendcount,sendtype,recvbuf,&
recvcount,recvtype,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ialltoall_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ialltoall_f(sendbuf,sendcount,sendtype%MPI_VAL,recvbuf,&
recvcount,recvtype%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Ialltoall_f08

Просмотреть файл

@ -0,0 +1,28 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Ialltoallv_f08(sendbuf,sendcounts,sdispls,sendtype,recvbuf,&
recvcounts,rdispls,recvtype,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ialltoallv_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ialltoallv_f(sendbuf,sendcounts,sdispls,sendtype%MPI_VAL,&
recvbuf,recvcounts,rdispls,recvtype%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Ialltoallv_f08

Просмотреть файл

@ -0,0 +1,28 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Ialltoallw_f08(sendbuf,sendcounts,sdispls,sendtypes,&
recvbuf,recvcounts,rdispls,recvtypes,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ialltoallw_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtypes
TYPE(MPI_Datatype), INTENT(IN) :: recvtypes
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ialltoallw_f(sendbuf,sendcounts,sdispls,sendtypes%MPI_VAL,&
recvbuf,recvcounts,rdispls,recvtypes%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Ialltoallw_f08

Просмотреть файл

@ -0,0 +1,20 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
subroutine PMPI_Ibarrier_f08(comm,request,ierror)
use :: mpi_f08_types, only : MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ibarrier_f
implicit none
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ibarrier_f(comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Ibarrier_f08

Просмотреть файл

@ -0,0 +1,25 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Ibcast_f08(buffer,count,datatype,root,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ibcast_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: buffer
INTEGER, INTENT(IN) :: count, root
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ibcast_f(buffer,count,datatype%MPI_VAL,root,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Ibcast_f08

Просмотреть файл

@ -0,0 +1,27 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Iexscan_f08(sendbuf,recvbuf,count,datatype,op,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_iexscan_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: count
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_iexscan_f(sendbuf,recvbuf,count,datatype%MPI_VAL,&
op%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Iexscan_f08

Просмотреть файл

@ -0,0 +1,28 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Igather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,&
recvtype,root,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_igather_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount, root
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_igather_f(sendbuf,sendcount,sendtype%MPI_VAL,recvbuf,recvcount,&
recvtype%MPI_VAL,root,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Igather_f08

Просмотреть файл

@ -0,0 +1,29 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Igatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,&
displs,recvtype,root,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_igatherv_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, root
INTEGER, INTENT(IN) :: recvcounts(*), displs(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_igatherv_f(sendbuf,sendcount,sendtype%MPI_VAL,recvbuf,recvcounts,&
displs,recvtype%MPI_VAL,root,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Igatherv_f08

Просмотреть файл

@ -0,0 +1,27 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Ireduce_f08(sendbuf,recvbuf,count,datatype,op,root,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ireduce_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: count, root
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ireduce_f(sendbuf,recvbuf,count,datatype%MPI_VAL,&
op%MPI_VAL,root,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Ireduce_f08

Просмотреть файл

@ -0,0 +1,27 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Ireduce_scatter_block_f08(sendbuf,recvbuf,recvcount,datatype,op,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ireduce_scatter_block_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: recvcount
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ireduce_scatter_block_f(sendbuf,recvbuf,recvcount,&
datatype%MPI_VAL,op%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Ireduce_scatter_block_f08

Просмотреть файл

@ -0,0 +1,27 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Ireduce_scatter_f08(sendbuf,recvbuf,recvcounts,datatype,op,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ireduce_scatter_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: recvcounts(*)
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ireduce_scatter_f(sendbuf,recvbuf,recvcounts,datatype%MPI_VAL,&
op%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Ireduce_scatter_f08

Просмотреть файл

@ -0,0 +1,27 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Iscan_f08(sendbuf,recvbuf,count,datatype,op,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Op, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_iscan_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: count
TYPE(MPI_Datatype), INTENT(IN) :: datatype
TYPE(MPI_Op), INTENT(IN) :: op
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_iscan_f(sendbuf,recvbuf,count,datatype%MPI_VAL,&
op%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Iscan_f08

Просмотреть файл

@ -0,0 +1,28 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Iscatter_f08(sendbuf,sendcount,sendtype,recvbuf,&
recvcount,recvtype,root,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_iscatter_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount, root
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_iscatter_f(sendbuf,sendcount,sendtype%MPI_VAL,recvbuf,recvcount,&
recvtype%MPI_VAL,root,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Iscatter_f08

Просмотреть файл

@ -0,0 +1,29 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2012 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Iscatterv_f08(sendbuf,sendcounts,displs,sendtype,recvbuf,&
recvcount,recvtype,root,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_iscatterv_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: recvcount, root
INTEGER, INTENT(IN) :: sendcounts(*), displs(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_iscatterv_f(sendbuf,sendcounts,displs,sendtype%MPI_VAL,recvbuf,&
recvcount,recvtype%MPI_VAL,root,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Iscatterv_f08

Просмотреть файл

@ -1349,6 +1349,177 @@ end subroutine MPI_Group_union
end interface
interface MPI_Iallgather
subroutine MPI_Iallgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, comm, request, ierr)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, intent(in) :: recvcount
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
end subroutine MPI_Iallgather
end interface
interface MPI_Iallgatherv
subroutine MPI_Iallgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, &
displs, recvtype, comm, request, ierr)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer, dimension(*), intent(in) :: displs
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
end subroutine MPI_Iallgatherv
end interface
interface MPI_Iallreduce
subroutine MPI_Iallreduce(sendbuf, recvbuf, count, datatype, op, &
comm, request, ierr)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, intent(in) :: count
integer, intent(in) :: datatype
integer, intent(in) :: op
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
end subroutine MPI_Iallreduce
end interface
interface MPI_Ialltoall
subroutine MPI_Ialltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, comm, request, ierr)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, intent(in) :: recvcount
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
end subroutine MPI_Ialltoall
end interface
interface MPI_Ialltoallv
subroutine MPI_Ialltoallv(sendbuf, sendcounts, sdispls, sendtype, recvbuf, &
recvcounts, rdispls, recvtype, comm, request, ierr)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
integer, dimension(*), intent(in) :: sendcounts
integer, dimension(*), intent(in) :: sdispls
integer, intent(in) :: sendtype
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer, dimension(*), intent(in) :: rdispls
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
end subroutine MPI_Ialltoallv
end interface
interface MPI_Ialltoallw
subroutine MPI_Ialltoallw(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, &
recvcounts, rdispls, recvtypes, comm, request, ierr)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
integer, dimension(*), intent(in) :: sendcounts
integer, dimension(*), intent(in) :: sdispls
integer, dimension(*), intent(in) :: sendtypes
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer, dimension(*), intent(in) :: rdispls
integer, dimension(*), intent(in) :: recvtypes
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
end subroutine MPI_Ialltoallw
end interface
interface MPI_Ibarrier
subroutine MPI_Ibarrier(comm, request, ierr)
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
end subroutine MPI_Ibarrier
end interface
interface MPI_Ibcast
subroutine MPI_Ibcast(buffer, count, datatype, root, comm&
, request, ierr)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ buffer
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: buffer
integer, intent(in) :: count
integer, intent(in) :: datatype
integer, intent(in) :: root
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
end subroutine MPI_Ibcast
end interface
interface MPI_Iexscan
subroutine MPI_Iexscan(sendbuf, recvbuf, count, datatype, op, &
comm, request, ierr)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, intent(in) :: count
integer, intent(in) :: datatype
integer, intent(in) :: op
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
end subroutine MPI_Iexscan
end interface
interface MPI_Ibsend
subroutine MPI_Ibsend(buf, count, datatype, dest, tag, &
@ -1367,6 +1538,49 @@ end subroutine MPI_Ibsend
end interface
interface MPI_Igather
subroutine MPI_Igather(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, root, comm, request, ierr)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, intent(in) :: recvcount
integer, intent(in) :: recvtype
integer, intent(in) :: root
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
end subroutine MPI_Igather
end interface
interface MPI_Igatherv
subroutine MPI_Igatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, &
displs, recvtype, root, comm, request, ierr)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer, dimension(*), intent(in) :: displs
integer, intent(in) :: recvtype
integer, intent(in) :: root
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
end subroutine MPI_Igatherv
end interface
interface MPI_Improbe
subroutine MPI_Improbe(source, tag, comm, flag, message, status, ierr)
@ -1595,6 +1809,64 @@ end subroutine MPI_Irecv
end interface
interface MPI_Ireduce
subroutine MPI_Ireduce(sendbuf, recvbuf, count, datatype, op, &
root, comm, request, ierr)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, intent(in) :: count
integer, intent(in) :: datatype
integer, intent(in) :: op
integer, intent(in) :: root
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
end subroutine MPI_Ireduce
end interface
interface MPI_Ireduce_scatter
subroutine MPI_Ireduce_scatter(sendbuf, recvbuf, recvcounts, datatype, op, &
comm, request, ierr)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer, intent(in) :: datatype
integer, intent(in) :: op
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
end subroutine MPI_Ireduce_scatter
end interface
interface MPI_Ireduce_scatter_block
subroutine MPI_Ireduce_scatter_block(sendbuf, recvbuf, recvcount, datatype, op, &
comm, request, ierr)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, intent(in) :: recvcount
integer, intent(in) :: datatype
integer, intent(in) :: op
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
end subroutine MPI_Ireduce_scatter_block
end interface
interface MPI_Irsend
subroutine MPI_Irsend(buf, count, datatype, dest, tag, &
@ -1623,6 +1895,68 @@ end subroutine MPI_Is_thread_main
end interface
interface MPI_Iscan
subroutine MPI_Iscan(sendbuf, recvbuf, count, datatype, op, &
comm, request, ierr)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, intent(in) :: count
integer, intent(in) :: datatype
integer, intent(in) :: op
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
end subroutine MPI_Iscan
end interface
interface MPI_Iscatter
subroutine MPI_Iscatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, root, comm, request, ierr)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, intent(in) :: recvcount
integer, intent(in) :: recvtype
integer, intent(in) :: root
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
end subroutine MPI_Iscatter
end interface
interface MPI_Iscatterv
subroutine MPI_Iscatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, &
recvcount, recvtype, root, comm, request, ierr)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
integer, dimension(*), intent(in) :: sendcounts
integer, dimension(*), intent(in) :: displs
integer, intent(in) :: sendtype
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, intent(in) :: recvcount
integer, intent(in) :: recvtype
integer, intent(in) :: root
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
end subroutine MPI_Iscatterv
end interface
interface MPI_Isend
subroutine MPI_Isend(buf, count, datatype, dest, tag, &
@ -1992,6 +2326,24 @@ end subroutine MPI_Reduce_scatter
end interface
interface MPI_Reduce_scatter_block
subroutine MPI_Reduce_scatter_block(sendbuf, recvbuf, recvcount, datatype, op, &
comm, ierr)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, intent(in) :: recvcount
integer, intent(in) :: datatype
integer, intent(in) :: op
integer, intent(in) :: comm
integer, intent(out) :: ierr
end subroutine MPI_Reduce_scatter_block
end interface
interface MPI_Register_datarep
subroutine MPI_Register_datarep(datarep, read_conversion_fn, write_conversion_fn, dtype_file_extent_fn, extra_state&

Просмотреть файл

@ -177,6 +177,7 @@ nodist_libmpi_usempi_la_SOURCES = \
mpi_buffer_attach_f90.f90 \
mpi_buffer_detach_f90.f90 \
mpi_get_f90.f90 \
mpi_ibcast_f90.f90 \
mpi_ibsend_f90.f90 \
mpi_imrecv_f90.f90 \
mpi_irecv_f90.f90 \

Просмотреть файл

@ -70,10 +70,26 @@ noinst_SCRIPTS = \
mpi_gather_f90.f90.sh \
mpi_gatherv_f90.f90.sh \
mpi_get_f90.f90.sh \
mpi_iallgather_f90.f90.sh \
mpi_iallgatherv_f90.f90.sh \
mpi_iallreduce_f90.f90.sh \
mpi_ialltoall_f90.f90.sh \
mpi_ialltoallv_f90.f90.sh \
mpi_ialltoallw_f90.f90.sh \
mpi_ibcast_f90.f90.sh \
mpi_ibsend_f90.f90.sh \
mpi_iexscan_f90.f90.sh \
mpi_imrecv_f90.f90.sh \
mpi_igather_f90.f90.sh \
mpi_igatherv_f90.f90.sh \
mpi_irecv_f90.f90.sh \
mpi_ireduce_f90.f90.sh \
mpi_ireduce_scatter_f90.f90.sh \
mpi_ireduce_scatter_block_f90.f90.sh \
mpi_irsend_f90.f90.sh \
mpi_iscan_f90.f90.sh \
mpi_iscatter_f90.f90.sh \
mpi_iscatterv_f90.f90.sh \
mpi_isend_f90.f90.sh \
mpi_issend_f90.f90.sh \
mpi_mrecv_f90.f90.sh \
@ -84,6 +100,7 @@ noinst_SCRIPTS = \
mpi_recv_init_f90.f90.sh \
mpi_reduce_f90.f90.sh \
mpi_reduce_scatter_f90.f90.sh \
mpi_reduce_scatter_block_f90.f90.sh \
mpi_rsend_f90.f90.sh \
mpi_rsend_init_f90.f90.sh \
mpi_scan_f90.f90.sh \

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,93 @@
#! /bin/sh
#
# Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# This file generates a Fortran code to bridge between an explicit F90
# generic interface and the F77 implementation.
#
# This file is automatically generated by either of the scripts
# ../xml/create_mpi_f90_medium.f90.sh or
# ../xml/create_mpi_f90_large.f90.sh
#
. "$1/fortran_kinds.sh"
# This entire file is only generated in medium/large modules. So if
# we're not at least medium, bail now.
check_size large
if test "$output" = "0"; then
exit 0
fi
# Ok, we should continue.
allranks="0 $ranks"
output() {
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, comm, request, ierr)
include "mpif-config.h"
${type}, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
${type} :: recvbuf
integer, intent(in) :: recvcount
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
call ${procedure}(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, comm, request, ierr)
end subroutine ${proc}
EOF
}
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Iallgather ${rank} CH "character${dim}"
output MPI_Iallgather ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Iallgather ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Iallgather ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Iallgather ${rank} C${kind} "complex*${kind}${dim}"
done
done

Просмотреть файл

@ -0,0 +1,94 @@
#! /bin/sh
#
# Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# This file generates a Fortran code to bridge between an explicit F90
# generic interface and the F77 implementation.
#
# This file is automatically generated by either of the scripts
# ../xml/create_mpi_f90_medium.f90.sh or
# ../xml/create_mpi_f90_large.f90.sh
#
. "$1/fortran_kinds.sh"
# This entire file is only generated in medium/large modules. So if
# we're not at least medium, bail now.
check_size large
if test "$output" = "0"; then
exit 0
fi
# Ok, we should continue.
allranks="0 $ranks"
output() {
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, sendcount, sendtype, recvbuf, recvcounts, &
displs, recvtype, comm, request, ierr)
include "mpif-config.h"
${type}, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
${type} :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer, dimension(*), intent(in) :: displs
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
call ${procedure}(sendbuf, sendcount, sendtype, recvbuf, recvcounts, &
displs, recvtype, comm, request, ierr)
end subroutine ${proc}
EOF
}
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Iallgatherv ${rank} CH "character${dim}"
output MPI_Iallgatherv ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Iallgatherv ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Iallgatherv ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Iallgatherv ${rank} C${kind} "complex*${kind}${dim}"
done
done

Просмотреть файл

@ -0,0 +1,92 @@
#! /bin/sh
#
# Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# This file generates a Fortran code to bridge between an explicit F90
# generic interface and the F77 implementation.
#
# This file is automatically generated by either of the scripts
# ../xml/create_mpi_f90_medium.f90.sh or
# ../xml/create_mpi_f90_large.f90.sh
#
. "$1/fortran_kinds.sh"
# This entire file is only generated in medium/large modules. So if
# we're not at least medium, bail now.
check_size large
if test "$output" = "0"; then
exit 0
fi
# Ok, we should continue.
allranks="0 $ranks"
output() {
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, recvbuf, count, datatype, op, &
comm, request, ierr)
include "mpif-config.h"
${type}, intent(in) :: sendbuf
${type} :: recvbuf
integer, intent(in) :: count
integer, intent(in) :: datatype
integer, intent(in) :: op
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
call ${procedure}(sendbuf, recvbuf, count, datatype, op, &
comm, request, ierr)
end subroutine ${proc}
EOF
}
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Iallreduce ${rank} CH "character${dim}"
output MPI_Iallreduce ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Iallreduce ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Iallreduce ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Iallreduce ${rank} C${kind} "complex*${kind}${dim}"
done
done

Просмотреть файл

@ -0,0 +1,93 @@
#! /bin/sh
#
# Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# This file generates a Fortran code to bridge between an explicit F90
# generic interface and the F77 implementation.
#
# This file is automatically generated by either of the scripts
# ../xml/create_mpi_f90_medium.f90.sh or
# ../xml/create_mpi_f90_large.f90.sh
#
. "$1/fortran_kinds.sh"
# This entire file is only generated in medium/large modules. So if
# we're not at least medium, bail now.
check_size large
if test "$output" = "0"; then
exit 0
fi
# Ok, we should continue.
allranks="0 $ranks"
output() {
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, comm, request, ierr)
include "mpif-config.h"
${type}, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
${type} :: recvbuf
integer, intent(in) :: recvcount
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
call ${procedure}(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, comm, request, ierr)
end subroutine ${proc}
EOF
}
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Ialltoall ${rank} CH "character${dim}"
output MPI_Ialltoall ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Ialltoall ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Ialltoall ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Ialltoall ${rank} C${kind} "complex*${kind}${dim}"
done
done

Просмотреть файл

@ -0,0 +1,95 @@
#! /bin/sh
#
# Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# This file generates a Fortran code to bridge between an explicit F90
# generic interface and the F77 implementation.
#
# This file is automatically generated by either of the scripts
# ../xml/create_mpi_f90_medium.f90.sh or
# ../xml/create_mpi_f90_large.f90.sh
#
. "$1/fortran_kinds.sh"
# This entire file is only generated in medium/large modules. So if
# we're not at least medium, bail now.
check_size large
if test "$output" = "0"; then
exit 0
fi
# Ok, we should continue.
allranks="0 $ranks"
output() {
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, sendcounts, sdispls, sendtype, recvbuf, &
recvcounts, rdispls, recvtype, comm, request, ierr)
include "mpif-config.h"
${type}, intent(in) :: sendbuf
integer, dimension(*), intent(in) :: sendcounts
integer, dimension(*), intent(in) :: sdispls
integer, intent(in) :: sendtype
${type} :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer, dimension(*), intent(in) :: rdispls
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
call ${procedure}(sendbuf, sendcounts, sdispls, sendtype, recvbuf, &
recvcounts, rdispls, recvtype, comm, request, ierr)
end subroutine ${proc}
EOF
}
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Ialltoallv ${rank} CH "character${dim}"
output MPI_Ialltoallv ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Ialltoallv ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Ialltoallv ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Ialltoallv ${rank} C${kind} "complex*${kind}${dim}"
done
done

Просмотреть файл

@ -0,0 +1,95 @@
#! /bin/sh
#
# Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# This file generates a Fortran code to bridge between an explicit F90
# generic interface and the F77 implementation.
#
# This file is automatically generated by either of the scripts
# ../xml/create_mpi_f90_medium.f90.sh or
# ../xml/create_mpi_f90_large.f90.sh
#
. "$1/fortran_kinds.sh"
# This entire file is only generated in medium/large modules. So if
# we're not at least medium, bail now.
check_size large
if test "$output" = "0"; then
exit 0
fi
# Ok, we should continue.
allranks="0 $ranks"
output() {
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, &
recvcounts, rdispls, recvtypes, comm, request, ierr)
include "mpif-config.h"
${type}, intent(in) :: sendbuf
integer, dimension(*), intent(in) :: sendcounts
integer, dimension(*), intent(in) :: sdispls
integer, dimension(*), intent(in) :: sendtypes
${type} :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer, dimension(*), intent(in) :: rdispls
integer, dimension(*), intent(in) :: recvtypes
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
call ${procedure}(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, &
recvcounts, rdispls, recvtypes, comm, request, ierr)
end subroutine ${proc}
EOF
}
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Ialltoallw ${rank} CH "character${dim}"
output MPI_Ialltoallw ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Ialltoallw ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Ialltoallw ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Ialltoallw ${rank} C${kind} "complex*${kind}${dim}"
done
done

Просмотреть файл

@ -0,0 +1,91 @@
#! /bin/sh
#
# Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2012 FUJITSU LIMITED. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# This file generates a Fortran code to bridge between an explicit F90
# generic interface and the F77 implementation.
#
# This file is automatically generated by either of the scripts
# ../xml/create_mpi_f90_medium.f90.sh or
# ../xml/create_mpi_f90_large.f90.sh
#
. "$1/fortran_kinds.sh"
# This entire file is only generated in medium/large modules. So if
# we're not at least medium, bail now.
check_size medium
if test "$output" = "0"; then
exit 0
fi
# Ok, we should continue.
allranks="0 $ranks"
output() {
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(buffer, count, datatype, root, comm&
, request, ierr)
include "mpif-config.h"
${type} :: buffer
integer, intent(in) :: count
integer, intent(in) :: datatype
integer, intent(in) :: root
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
call ${procedure}(buffer, count, datatype, root, comm&
, request, ierr)
end subroutine ${proc}
EOF
}
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Ibcast ${rank} CH "character${dim}"
output MPI_Ibcast ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Ibcast ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Ibcast ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Ibcast ${rank} C${kind} "complex*${kind}${dim}"
done
done

Просмотреть файл

@ -0,0 +1,92 @@
#! /bin/sh
#
# Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# This file generates a Fortran code to bridge between an explicit F90
# generic interface and the F77 implementation.
#
# This file is automatically generated by either of the scripts
# ../xml/create_mpi_f90_medium.f90.sh or
# ../xml/create_mpi_f90_large.f90.sh
#
. "$1/fortran_kinds.sh"
# This entire file is only generated in medium/large modules. So if
# we're not at least medium, bail now.
check_size large
if test "$output" = "0"; then
exit 0
fi
# Ok, we should continue.
allranks="0 $ranks"
output() {
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, recvbuf, count, datatype, op, &
comm, request, ierr)
include "mpif-config.h"
${type}, intent(in) :: sendbuf
${type} :: recvbuf
integer, intent(in) :: count
integer, intent(in) :: datatype
integer, intent(in) :: op
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
call ${procedure}(sendbuf, recvbuf, count, datatype, op, &
comm, request, ierr)
end subroutine ${proc}
EOF
}
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Iexscan ${rank} CH "character${dim}"
output MPI_Iexscan ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Iexscan ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Iexscan ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Iexscan ${rank} C${kind} "complex*${kind}${dim}"
done
done

Просмотреть файл

@ -0,0 +1,94 @@
#! /bin/sh
#
# Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# This file generates a Fortran code to bridge between an explicit F90
# generic interface and the F77 implementation.
#
# This file is automatically generated by either of the scripts
# ../xml/create_mpi_f90_medium.f90.sh or
# ../xml/create_mpi_f90_large.f90.sh
#
. "$1/fortran_kinds.sh"
# This entire file is only generated in medium/large modules. So if
# we're not at least medium, bail now.
check_size large
if test "$output" = "0"; then
exit 0
fi
# Ok, we should continue.
allranks="0 $ranks"
output() {
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, root, comm, request, ierr)
include "mpif-config.h"
${type}, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
${type} :: recvbuf
integer, intent(in) :: recvcount
integer, intent(in) :: recvtype
integer, intent(in) :: root
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierr
call ${procedure}(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, root, comm, request, ierr)
end subroutine ${proc}
EOF
}
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Igather ${rank} CH "character${dim}"
output MPI_Igather ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Igather ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Igather ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Igather ${rank} C${kind} "complex*${kind}${dim}"
done
done

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше