1
1

MPI-3: Add support for neighborhood collectives

Blocking versions are simple linear algorithms implemented in coll/basic. Non-
blocking versions are from libnbc 1.1.1. All algorithms have been tested with
simple test cases.

cmr=v1.7.4:reviewer=jsquyres

This commit was SVN r29265.
Этот коммит содержится в:
Nathan Hjelm 2013-09-26 21:55:08 +00:00
родитель a42fa78da7
Коммит c5596548b2
96 изменённых файлов: 7856 добавлений и 128 удалений

Просмотреть файл

@ -469,7 +469,7 @@ typedef int (MPI_Grequest_cancel_function)(void *, int);
#define MPI_SEEK_END 604
/* Max data representation length */
#define MPI_MAX_DATAREP_STRING OPAL_MAX_DATAREP_STRING
#define MPI_MAX_DATAREP_STRING OPAL_MAX_DATAREP_STRING
#endif /* #if OMPI_PROVIDE_MPI_FILE_INTERFACE */
@ -645,7 +645,7 @@ enum {
MPI_COMBINER_HINDEXED_BLOCK
};
/*
/*
* Communicator split type constants.
* Do not change the order of these without also modifying mpif.h.in
* (see also mpif-common.h.fin).
@ -1250,25 +1250,25 @@ OMPI_DECLSPEC int MPI_Comm_free(MPI_Comm *comm);
OMPI_DECLSPEC int MPI_Comm_get_attr(MPI_Comm comm, int comm_keyval,
void *attribute_val, int *flag);
OMPI_DECLSPEC int MPI_Dist_graph_create(MPI_Comm comm_old, int n, int nodes[],
int degrees[], int targets[],
int weights[], MPI_Info info,
int degrees[], int targets[],
int weights[], MPI_Info info,
int reorder, MPI_Comm * newcomm);
OMPI_DECLSPEC int MPI_Dist_graph_create_adjacent(MPI_Comm comm_old,
int indegree, int sources[],
int sourceweights[],
int sourceweights[],
int outdegree,
int destinations[],
int destinations[],
int destweights[],
MPI_Info info, int reorder,
MPI_Comm *comm_dist_graph);
OMPI_DECLSPEC int MPI_Dist_graph_neighbors(MPI_Comm comm, int maxindegree,
int sources[], int sourceweights[],
int maxoutdegree,
int maxoutdegree,
int destinations[],
int destweights[]);
OMPI_DECLSPEC int MPI_Dist_graph_neighbors_count(MPI_Comm comm,
OMPI_DECLSPEC int MPI_Dist_graph_neighbors_count(MPI_Comm comm,
int *inneighbors,
int *outneighbors,
int *outneighbors,
int *weighted);
OMPI_DECLSPEC int MPI_Comm_get_errhandler(MPI_Comm comm, MPI_Errhandler *erhandler);
OMPI_DECLSPEC int MPI_Comm_get_name(MPI_Comm comm, char *comm_name, int *resultlen);
@ -1516,7 +1516,37 @@ OMPI_DECLSPEC int MPI_Mprobe(int source, int tag, MPI_Comm comm,
MPI_Status *status);
OMPI_DECLSPEC int MPI_Mrecv(void *buf, int count, MPI_Datatype type,
MPI_Message *message, MPI_Status *status);
OMPI_DECLSPEC MPI_Fint MPI_Op_c2f(MPI_Op op);
OMPI_DECLSPEC int MPI_Neighbor_allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm);
OMPI_DECLSPEC int MPI_Ineighbor_allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int MPI_Neighbor_allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcounts[], int displs[],
MPI_Datatype recvtype, MPI_Comm comm);
OMPI_DECLSPEC int MPI_Ineighbor_allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcounts[], int displs[],
MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int MPI_Neighbor_alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm);
OMPI_DECLSPEC int MPI_Ineighbor_alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int MPI_Neighbor_alltoallv(void *sendbuf, int sendcounts[], int sdispls[], MPI_Datatype sendtype,
void *recvbuf, int recvcounts[], int rdispls[], MPI_Datatype recvtype,
MPI_Comm comm);
OMPI_DECLSPEC int MPI_Ineighbor_alltoallv(void *sendbuf, int sendcounts[], int sdispls[], MPI_Datatype sendtype,
void *recvbuf, int recvcounts[], int rdispls[], MPI_Datatype recvtype,
MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int MPI_Neighbor_alltoallw(void *sendbuf, int sendcounts[], MPI_Aint sdispls[], MPI_Datatype sendtypes[],
void *recvbuf, int recvcounts[], MPI_Aint rdispls[], MPI_Datatype recvtypes[],
MPI_Comm comm);
OMPI_DECLSPEC int MPI_Ineighbor_alltoallw(void *sendbuf, int sendcounts[], MPI_Aint sdispls[], MPI_Datatype sendtypes[],
void *recvbuf, int recvcounts[], MPI_Aint rdispls[], MPI_Datatype recvtypes[],
MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC MPI_Fint MPI_Op_c2f(MPI_Op op);
OMPI_DECLSPEC int MPI_Op_commutative(MPI_Op op, int *commute);
OMPI_DECLSPEC int MPI_Op_create(MPI_User_function *function, int commute, MPI_Op *op);
OMPI_DECLSPEC int MPI_Open_port(MPI_Info info, char *port_name);
@ -1818,25 +1848,25 @@ OMPI_DECLSPEC int PMPI_Attr_delete(MPI_Comm comm, int keyval)
OMPI_DECLSPEC int PMPI_Attr_get(MPI_Comm comm, int keyval, void *attribute_val, int *flag)
__mpi_interface_deprecated__("MPI_Attr_get is superseded by MPI_Comm_get_attr in MPI-2.0");
OMPI_DECLSPEC int PMPI_Dist_graph_create(MPI_Comm comm_old, int n, int nodes[],
int degrees[], int targets[],
int weights[], MPI_Info info,
int degrees[], int targets[],
int weights[], MPI_Info info,
int reorder, MPI_Comm * newcomm);
OMPI_DECLSPEC int PMPI_Dist_graph_create_adjacent(MPI_Comm comm_old,
int indegree, int sources[],
int sourceweights[],
int sourceweights[],
int outdegree,
int destinations[],
int destinations[],
int destweights[],
MPI_Info info, int reorder,
MPI_Comm *comm_dist_graph);
OMPI_DECLSPEC int PMPI_Dist_graph_neighbors(MPI_Comm comm, int maxindegree,
int sources[], int sourceweights[],
int maxoutdegree,
int maxoutdegree,
int destinations[],
int destweights[]);
OMPI_DECLSPEC int PMPI_Dist_graph_neighbors_count(MPI_Comm comm,
OMPI_DECLSPEC int PMPI_Dist_graph_neighbors_count(MPI_Comm comm,
int *inneighbors,
int *outneighbors,
int *outneighbors,
int *weighted);
OMPI_DECLSPEC int PMPI_Attr_put(MPI_Comm comm, int keyval, void *attribute_val)
__mpi_interface_deprecated__("MPI_Attr_put is superseded by MPI_Comm_set_attr in MPI-2.0");
@ -2136,6 +2166,36 @@ OMPI_DECLSPEC int PMPI_Mprobe(int source, int tag, MPI_Comm comm,
MPI_Status *status);
OMPI_DECLSPEC int PMPI_Mrecv(void *buf, int count, MPI_Datatype type,
MPI_Message *message, MPI_Status *status);
OMPI_DECLSPEC int PMPI_Neighbor_allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Ineighbor_allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int PMPI_Neighbor_allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcounts[], int displs[],
MPI_Datatype recvtype, MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Ineighbor_allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcounts[], int displs[],
MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int PMPI_Neighbor_alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Ineighbor_alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int PMPI_Neighbor_alltoallv(void *sendbuf, int sendcounts[], int sdispls[], MPI_Datatype sendtype,
void *recvbuf, int recvcounts[], int rdispls[], MPI_Datatype recvtype,
MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Ineighbor_alltoallv(void *sendbuf, int sendcounts[], int sdispls[], MPI_Datatype sendtype,
void *recvbuf, int recvcounts[], int rdispls[], MPI_Datatype recvtype,
MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC int PMPI_Neighbor_alltoallw(void *sendbuf, int sendcounts[], MPI_Aint sdispls[], MPI_Datatype sendtypes[],
void *recvbuf, int recvcounts[], MPI_Aint rdispls[], MPI_Datatype recvtypes[],
MPI_Comm comm);
OMPI_DECLSPEC int PMPI_Ineighbor_alltoallw(void *sendbuf, int sendcounts[], MPI_Aint sdispls[], MPI_Datatype sendtypes[],
void *recvbuf, int recvcounts[], MPI_Aint rdispls[], MPI_Datatype recvtypes[],
MPI_Comm comm, MPI_Request *request);
OMPI_DECLSPEC MPI_Fint PMPI_Op_c2f(MPI_Op op);
OMPI_DECLSPEC int PMPI_Op_commutative(MPI_Op op, int *commute);
OMPI_DECLSPEC int PMPI_Op_create(MPI_User_function *function, int commute, MPI_Op *op);

Просмотреть файл

@ -1,3 +1,4 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
@ -13,7 +14,9 @@
* rights reserved.
* Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2008 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012 Oak Rigde National Laboratory. All rights reserved.
* Copyright (c) 2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -175,6 +178,19 @@ int mca_coll_base_comm_select(ompi_communicator_t * comm)
COPY(avail->ac_module, comm, iscan);
COPY(avail->ac_module, comm, iscatter);
COPY(avail->ac_module, comm, iscatterv);
COPY(avail->ac_module, comm, neighbor_allgather);
COPY(avail->ac_module, comm, neighbor_allgatherv);
COPY(avail->ac_module, comm, neighbor_alltoall);
COPY(avail->ac_module, comm, neighbor_alltoallv);
COPY(avail->ac_module, comm, neighbor_alltoallw);
COPY(avail->ac_module, comm, ineighbor_allgather);
COPY(avail->ac_module, comm, ineighbor_allgatherv);
COPY(avail->ac_module, comm, ineighbor_alltoall);
COPY(avail->ac_module, comm, ineighbor_alltoallv);
COPY(avail->ac_module, comm, ineighbor_alltoallw);
/* release the original module reference and the list item */
OBJ_RELEASE(avail->ac_module);
OBJ_RELEASE(avail);
@ -217,7 +233,17 @@ int mca_coll_base_comm_select(ompi_communicator_t * comm)
(NULL == comm->c_coll.coll_ireduce_scatter) ||
((OMPI_COMM_IS_INTRA(comm)) && (NULL == comm->c_coll.coll_iscan)) ||
(NULL == comm->c_coll.coll_iscatter) ||
(NULL == comm->c_coll.coll_iscatterv)
(NULL == comm->c_coll.coll_iscatterv) ||
(NULL == comm->c_coll.coll_neighbor_allgather) ||
(NULL == comm->c_coll.coll_neighbor_allgatherv) ||
(NULL == comm->c_coll.coll_neighbor_alltoall) ||
(NULL == comm->c_coll.coll_neighbor_alltoallv) ||
(NULL == comm->c_coll.coll_neighbor_alltoallw) ||
(NULL == comm->c_coll.coll_ineighbor_allgather) ||
(NULL == comm->c_coll.coll_ineighbor_allgatherv) ||
(NULL == comm->c_coll.coll_ineighbor_alltoall) ||
(NULL == comm->c_coll.coll_ineighbor_alltoallv) ||
(NULL == comm->c_coll.coll_ineighbor_alltoallw)
) {
mca_coll_base_comm_unselect(comm);
return OMPI_ERR_NOT_FOUND;

Просмотреть файл

@ -1,3 +1,4 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
@ -9,6 +10,8 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -41,49 +44,8 @@
*/
static void coll_base_module_construct(mca_coll_base_module_t *m)
{
m->coll_module_enable = NULL;
/* Collective function pointers */
/* blocking functions */
m->coll_allgather = NULL;
m->coll_allgatherv = NULL;
m->coll_allreduce = NULL;
m->coll_alltoall = NULL;
m->coll_alltoallv = NULL;
m->coll_alltoallw = NULL;
m->coll_barrier = NULL;
m->coll_bcast = NULL;
m->coll_exscan = NULL;
m->coll_gather = NULL;
m->coll_gatherv = NULL;
m->coll_reduce = NULL;
m->coll_reduce_scatter_block = NULL;
m->coll_reduce_scatter = NULL;
m->coll_scan = NULL;
m->coll_scatter = NULL;
m->coll_scatterv = NULL;
/* nonblocking functions */
m->coll_iallgather = NULL;
m->coll_iallgatherv = NULL;
m->coll_iallreduce = NULL;
m->coll_ialltoall = NULL;
m->coll_ialltoallv = NULL;
m->coll_ialltoallw = NULL;
m->coll_ibarrier = NULL;
m->coll_ibcast = NULL;
m->coll_iexscan = NULL;
m->coll_igather = NULL;
m->coll_igatherv = NULL;
m->coll_ireduce = NULL;
m->coll_ireduce_scatter_block = NULL;
m->coll_ireduce_scatter = NULL;
m->coll_iscan = NULL;
m->coll_iscatter = NULL;
m->coll_iscatterv = NULL;
/* FT event */
m->ft_event = NULL;
/* zero out all functions */
memset ((char *) m + sizeof (m->super), 0, sizeof (*m) - sizeof (m->super));
}
OBJ_CLASS_INSTANCE(mca_coll_base_module_t, opal_object_t,

Просмотреть файл

@ -11,6 +11,8 @@
# All rights reserved.
# Copyright (c) 2010 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2012 Sandia National Laboratories. All rights reserved.
# Copyright (c) 2013 Los Alamos National Security, LLC. All rights
# reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
@ -32,6 +34,11 @@ sources = \
coll_basic_gather.c \
coll_basic_gatherv.c \
coll_basic_module.c \
coll_basic_neighbor_allgather.c \
coll_basic_neighbor_allgatherv.c \
coll_basic_neighbor_alltoall.c \
coll_basic_neighbor_alltoallv.c \
coll_basic_neighbor_alltoallw.c \
coll_basic_reduce.c \
coll_basic_reduce_scatter.c \
coll_basic_reduce_scatter_block.c \

Просмотреть файл

@ -1,3 +1,4 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
@ -11,6 +12,8 @@
* All rights reserved.
* Copyright (c) 2008 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012 Sandia National Laboratories. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -306,6 +309,30 @@ BEGIN_C_DECLS
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module);
int mca_coll_basic_neighbor_allgather(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, void *rbuf,
int rcount, struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module);
int mca_coll_basic_neighbor_allgatherv(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int rcounts[], int disps[], struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, mca_coll_base_module_t *module);
int mca_coll_basic_neighbor_alltoall(void *sbuf, int scount, struct ompi_datatype_t *sdtype, void *rbuf,
int rcount, struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm,
mca_coll_base_module_t *module);
int mca_coll_basic_neighbor_alltoallv(void *sbuf, int scounts[], int sdisps[],
struct ompi_datatype_t *sdtype, void *rbuf, int rcounts[],
int rdisps[], struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, mca_coll_base_module_t *module);
int mca_coll_basic_neighbor_alltoallw(void *sbuf, int scounts[], MPI_Aint sdisps[],
struct ompi_datatype_t *sdtypes[], void *rbuf, int rcounts[],
MPI_Aint rdisps[], struct ompi_datatype_t *rdtypes[],
struct ompi_communicator_t *comm, mca_coll_base_module_t *module);
int mca_coll_basic_ft_event(int status);

Просмотреть файл

@ -1,3 +1,4 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
@ -10,6 +11,8 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2012 Sandia National Laboratories. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -132,6 +135,13 @@ mca_coll_basic_comm_query(struct ompi_communicator_t *comm,
basic_module->super.coll_scatterv = mca_coll_basic_scatterv_intra;
}
/* These functions will return an error code if comm does not have a virtual topology */
basic_module->super.coll_neighbor_allgather = mca_coll_basic_neighbor_allgather;
basic_module->super.coll_neighbor_allgatherv = mca_coll_basic_neighbor_allgatherv;
basic_module->super.coll_neighbor_alltoall = mca_coll_basic_neighbor_alltoall;
basic_module->super.coll_neighbor_alltoallv = mca_coll_basic_neighbor_alltoallv;
basic_module->super.coll_neighbor_alltoallw = mca_coll_basic_neighbor_alltoallw;
return &(basic_module->super);
}

Просмотреть файл

@ -0,0 +1,232 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2006 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "coll_basic.h"
#include <stdlib.h>
#include "mpi.h"
#include "ompi/constants.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/mca/pml/pml.h"
#include "ompi/mca/coll/base/coll_tags.h"
#include "coll_basic.h"
#include "ompi/mca/topo/base/base.h"
static int
mca_coll_basic_neighbor_allgather_cart(const void *sbuf, int scount,
struct ompi_datatype_t *sdtype, void *rbuf,
int rcount, struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_cart_2_1_0_t *cart = comm->c_topo->mtc.cart;
const int rank = ompi_comm_rank (comm);
ompi_request_t **reqs;
ptrdiff_t lb, extent;
int rc = MPI_SUCCESS, dim, nreqs;
ompi_datatype_get_extent(rdtype, &lb, &extent);
/* The ordering is defined as -1 then +1 in each dimension in
* order of dimension. */
for (dim = 0, reqs = basic_module->mccb_reqs, nreqs = 0 ; dim < cart->ndims ; ++dim) {
int srank = MPI_PROC_NULL, drank = MPI_PROC_NULL;
if (cart->dims[dim] > 1) {
mca_topo_base_cart_shift (comm, dim, 1, &srank, &drank);
} else if (1 == cart->dims[dim] && cart->periods[dim]) {
srank = drank = rank;
}
if (MPI_PROC_NULL != srank) {
rc = MCA_PML_CALL(irecv(rbuf, rcount, rdtype, srank,
MCA_COLL_BASE_TAG_ALLGATHER,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
/* remove cast from const when the pml layer is updated to take
* a const for the send buffer. */
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, srank,
MCA_COLL_BASE_TAG_ALLGATHER,
MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs += 2;
}
rbuf = (char *) rbuf + extent * rcount;
if (MPI_PROC_NULL != drank) {
rc = MCA_PML_CALL(irecv(rbuf, rcount, rdtype, drank,
MCA_COLL_BASE_TAG_ALLGATHER,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, drank,
MCA_COLL_BASE_TAG_ALLGATHER,
MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs += 2;
}
rbuf = (char *) rbuf + extent * rcount;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
return ompi_request_wait_all (nreqs, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
}
static int
mca_coll_basic_neighbor_allgather_graph(const void *sbuf, int scount,
struct ompi_datatype_t *sdtype, void *rbuf,
int rcount, struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_graph_2_1_0_t *graph = comm->c_topo->mtc.graph;
const int rank = ompi_comm_rank (comm);
const int *edges;
int degree;
ompi_request_t **reqs;
ptrdiff_t lb, extent;
int rc = MPI_SUCCESS, neighbor;
mca_topo_base_graph_neighbors_count (comm, rank, &degree);
edges = graph->edges;
if (rank > 0) {
edges += graph->index[rank - 1];
}
ompi_datatype_get_extent(rdtype, &lb, &extent);
for (neighbor = 0, reqs = basic_module->mccb_reqs ; neighbor < degree ; ++neighbor) {
rc = MCA_PML_CALL(irecv(rbuf, rcount, rdtype, edges[neighbor], MCA_COLL_BASE_TAG_ALLGATHER,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
rbuf = (char *) rbuf + extent * rcount;
/* remove cast from const when the pml layer is updated to take
* a const for the send buffer. */
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, edges[neighbor],
MCA_COLL_BASE_TAG_ALLGATHER, MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
return ompi_request_wait_all (degree * 2, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
}
static int
mca_coll_basic_neighbor_allgather_dist_graph(const void *sbuf, int scount,
struct ompi_datatype_t *sdtype, void *rbuf,
int rcount, struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_dist_graph_2_1_0_t *dist_graph = comm->c_topo->mtc.dist_graph;
const int *inedges, *outedges;
int indegree, outdegree;
ompi_request_t **reqs;
ptrdiff_t lb, extent;
int rc = MPI_SUCCESS, neighbor;
indegree = dist_graph->indegree;
outdegree = dist_graph->outdegree;
inedges = dist_graph->in;
outedges = dist_graph->out;
ompi_datatype_get_extent(rdtype, &lb, &extent);
for (neighbor = 0, reqs = basic_module->mccb_reqs ; neighbor < indegree ; ++neighbor) {
rc = MCA_PML_CALL(irecv(rbuf, rcount, rdtype, inedges[neighbor],
MCA_COLL_BASE_TAG_ALLGATHER,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
rbuf = (char *) rbuf + extent * rcount;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
for (neighbor = 0 ; neighbor < outdegree ; ++neighbor) {
/* remove cast from const when the pml layer is updated to take
* a const for the send buffer. */
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, outedges[neighbor],
MCA_COLL_BASE_TAG_ALLGATHER,
MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
return ompi_request_wait_all (indegree + outdegree, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
}
int mca_coll_basic_neighbor_allgather(void *sbuf, int scount,
struct ompi_datatype_t *sdtype, void *rbuf,
int rcount, struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
if (OMPI_COMM_IS_INTER(comm)) {
return OMPI_ERR_NOT_SUPPORTED;
}
if (OMPI_COMM_IS_CART(comm)) {
return mca_coll_basic_neighbor_allgather_cart (sbuf, scount, sdtype, rbuf,
rcount, rdtype, comm, module);
} else if (OMPI_COMM_IS_GRAPH(comm)) {
return mca_coll_basic_neighbor_allgather_graph (sbuf, scount, sdtype, rbuf,
rcount, rdtype, comm, module);
} else if (OMPI_COMM_IS_DIST_GRAPH(comm)) {
return mca_coll_basic_neighbor_allgather_dist_graph (sbuf, scount, sdtype, rbuf,
rcount, rdtype, comm, module);
}
return OMPI_ERR_NOT_SUPPORTED;
}

Просмотреть файл

@ -0,0 +1,212 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2006 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "coll_basic.h"
#include <stdlib.h>
#include "mpi.h"
#include "ompi/constants.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/mca/pml/pml.h"
#include "ompi/mca/coll/base/coll_tags.h"
#include "coll_basic.h"
#include "ompi/mca/topo/base/base.h"
static int
mca_coll_basic_neighbor_allgatherv_cart(const void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, const int rcounts[], const int disps[],
struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_cart_2_1_0_t *cart = comm->c_topo->mtc.cart;
const int rank = ompi_comm_rank (comm);
ompi_request_t **reqs;
ptrdiff_t lb, extent;
int rc = MPI_SUCCESS, dim, i, nreqs;
ompi_datatype_get_extent(rdtype, &lb, &extent);
reqs = basic_module->mccb_reqs;
/* The ordering is defined as -1 then +1 in each dimension in
* order of dimension. */
for (dim = 0, i = 0, nreqs = 0 ; dim < cart->ndims ; ++dim, i += 2) {
int srank = MPI_PROC_NULL, drank = MPI_PROC_NULL;
if (cart->dims[dim] > 1) {
mca_topo_base_cart_shift (comm, dim, 1, &srank, &drank);
} else if (1 == cart->dims[dim] && cart->periods[dim]) {
srank = drank = rank;
}
if (MPI_PROC_NULL != srank) {
rc = MCA_PML_CALL(irecv((char *) rbuf + disps[i] * extent, rcounts[i], rdtype, srank,
MCA_COLL_BASE_TAG_ALLGATHER, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
/* remove cast from const when the pml layer is updated to take
* a const for the send buffer. */
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, srank, MCA_COLL_BASE_TAG_ALLGATHER,
MCA_PML_BASE_SEND_STANDARD, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs += 2;
}
if (MPI_PROC_NULL != drank) {
rc = MCA_PML_CALL(irecv((char *) rbuf + disps[i+1] * extent, rcounts[i+1], rdtype, drank,
MCA_COLL_BASE_TAG_ALLGATHER, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, drank, MCA_COLL_BASE_TAG_ALLGATHER,
MCA_PML_BASE_SEND_STANDARD, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs += 2;
}
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
return ompi_request_wait_all (nreqs, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
}
static int
mca_coll_basic_neighbor_allgatherv_graph(const void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, const int rcounts[], const int disps[],
struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_graph_2_1_0_t *graph = comm->c_topo->mtc.graph;
const int rank = ompi_comm_rank (comm);
const int *edges;
int degree;
ompi_request_t **reqs;
ptrdiff_t lb, extent;
int rc = MPI_SUCCESS, neighbor;
mca_topo_base_graph_neighbors_count (comm, rank, &degree);
edges = graph->edges;
if (rank > 0) {
edges += graph->index[rank - 1];
}
ompi_datatype_get_extent(rdtype, &lb, &extent);
for (neighbor = 0, reqs = basic_module->mccb_reqs ; neighbor < degree ; ++neighbor) {
rc = MCA_PML_CALL(irecv((char *) rbuf + disps[neighbor] * extent, rcounts[neighbor],
rdtype, edges[neighbor], MCA_COLL_BASE_TAG_ALLGATHER, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
/* remove cast from const when the pml layer is updated to take
* a const for the send buffer. */
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, edges[neighbor],
MCA_COLL_BASE_TAG_ALLGATHER, MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
return ompi_request_wait_all (degree * 2, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
}
static int
mca_coll_basic_neighbor_allgatherv_dist_graph(const void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, const int rcounts[], const int disps[],
struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_dist_graph_2_1_0_t *dist_graph = comm->c_topo->mtc.dist_graph;
const int *inedges, *outedges;
int indegree, outdegree;
ompi_request_t **reqs;
ptrdiff_t lb, extent;
int rc = MPI_SUCCESS, neighbor;
indegree = dist_graph->indegree;
outdegree = dist_graph->outdegree;
inedges = dist_graph->in;
outedges = dist_graph->out;
ompi_datatype_get_extent(rdtype, &lb, &extent);
for (neighbor = 0, reqs = basic_module->mccb_reqs ; neighbor < indegree ; ++neighbor) {
rc = MCA_PML_CALL(irecv((char *) rbuf + disps[neighbor] * extent, rcounts[neighbor], rdtype,
inedges[neighbor], MCA_COLL_BASE_TAG_ALLGATHER, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
for (neighbor = 0 ; neighbor < outdegree ; ++neighbor) {
/* remove cast from const when the pml layer is updated to take
* a const for the send buffer. */
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, outedges[neighbor],
MCA_COLL_BASE_TAG_ALLGATHER, MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
return ompi_request_wait_all (indegree + outdegree, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
}
int mca_coll_basic_neighbor_allgatherv(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int rcounts[], int disps[], struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, mca_coll_base_module_t *module)
{
if (OMPI_COMM_IS_INTER(comm)) {
return OMPI_ERR_NOT_SUPPORTED;
}
if (OMPI_COMM_IS_CART(comm)) {
return mca_coll_basic_neighbor_allgatherv_cart (sbuf, scount, sdtype, rbuf, rcounts,
disps, rdtype, comm, module);
} else if (OMPI_COMM_IS_GRAPH(comm)) {
return mca_coll_basic_neighbor_allgatherv_graph (sbuf, scount, sdtype, rbuf, rcounts,
disps, rdtype, comm, module);
} else if (OMPI_COMM_IS_DIST_GRAPH(comm)) {
return mca_coll_basic_neighbor_allgatherv_dist_graph (sbuf, scount, sdtype, rbuf, rcounts,
disps, rdtype, comm, module);
}
return OMPI_ERR_NOT_SUPPORTED;
}

Просмотреть файл

@ -0,0 +1,251 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2006 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "coll_basic.h"
#include <stdlib.h>
#include "mpi.h"
#include "ompi/constants.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/mca/pml/pml.h"
#include "ompi/mca/coll/base/coll_tags.h"
#include "coll_basic.h"
#include "ompi/mca/topo/base/base.h"
static int
mca_coll_basic_neighbor_alltoall_cart(const void *sbuf, int scount, struct ompi_datatype_t *sdtype, void *rbuf,
int rcount, struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_cart_2_1_0_t *cart = comm->c_topo->mtc.cart;
const int rank = ompi_comm_rank (comm);
ompi_request_t **reqs;
ptrdiff_t lb, rdextent, sdextent;
int rc = MPI_SUCCESS, dim, nreqs;
ompi_datatype_get_extent(rdtype, &lb, &rdextent);
ompi_datatype_get_extent(sdtype, &lb, &sdextent);
/* post receives first */
for (dim = 0, nreqs = 0, reqs = basic_module->mccb_reqs ; dim < cart->ndims ; ++dim) {
int srank = MPI_PROC_NULL, drank = MPI_PROC_NULL;
if (cart->dims[dim] > 1) {
mca_topo_base_cart_shift (comm, dim, 1, &srank, &drank);
} else if (1 == cart->dims[dim] && cart->periods[dim]) {
srank = drank = rank;
}
if (MPI_PROC_NULL != srank) {
rc = MCA_PML_CALL(irecv(rbuf, rcount, rdtype, srank,
MCA_COLL_BASE_TAG_ALLTOALL,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
}
rbuf = (char *) rbuf + rdextent * rcount;
if (MPI_PROC_NULL != drank) {
rc = MCA_PML_CALL(irecv(rbuf, rcount, rdtype, drank,
MCA_COLL_BASE_TAG_ALLTOALL,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
}
rbuf = (char *) rbuf + rdextent * rcount;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
for (dim = 0 ; dim < cart->ndims ; ++dim) {
int srank = MPI_PROC_NULL, drank = MPI_PROC_NULL;
if (cart->dims[dim] > 1) {
mca_topo_base_cart_shift (comm, dim, 1, &srank, &drank);
} else if (1 == cart->dims[dim] && cart->periods[dim]) {
srank = drank = rank;
}
if (MPI_PROC_NULL != srank) {
/* remove cast from const when the pml layer is updated to take
* a const for the send buffer. */
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, srank,
MCA_COLL_BASE_TAG_ALLTOALL,
MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
}
sbuf = (const char *) sbuf + sdextent * scount;
if (MPI_PROC_NULL != drank) {
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, drank,
MCA_COLL_BASE_TAG_ALLTOALL,
MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
}
sbuf = (const char *) sbuf + sdextent * scount;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
return ompi_request_wait_all (nreqs, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
}
static int
mca_coll_basic_neighbor_alltoall_graph(const void *sbuf, int scount, struct ompi_datatype_t *sdtype, void *rbuf,
int rcount, struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_graph_2_1_0_t *graph = comm->c_topo->mtc.graph;
const int rank = ompi_comm_rank (comm);
int rc = MPI_SUCCESS, neighbor, degree;
ptrdiff_t lb, rdextent, sdextent;
ompi_request_t **reqs;
const int *edges;
mca_topo_base_graph_neighbors_count (comm, rank, &degree);
edges = graph->edges;
if (rank > 0) {
edges += graph->index[rank - 1];
}
ompi_datatype_get_extent(rdtype, &lb, &rdextent);
ompi_datatype_get_extent(sdtype, &lb, &sdextent);
/* post receives first */
for (neighbor = 0, reqs = basic_module->mccb_reqs ; neighbor < degree ; ++neighbor) {
rc = MCA_PML_CALL(irecv(rbuf, rcount, rdtype, edges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
rbuf = (char *) rbuf + rdextent * rcount;
}
for (neighbor = 0 ; neighbor < degree ; ++neighbor) {
/* remove cast from const when the pml layer is updated to take
* a const for the send buffer. */
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, edges[neighbor],
MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
sbuf = (const char *) sbuf + sdextent * scount;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
return ompi_request_wait_all (degree * 2, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
}
static int
mca_coll_basic_neighbor_alltoall_dist_graph(const void *sbuf, int scount,struct ompi_datatype_t *sdtype, void *rbuf,
int rcount, struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_dist_graph_2_1_0_t *dist_graph = comm->c_topo->mtc.dist_graph;
ptrdiff_t lb, rdextent, sdextent;
int rc = MPI_SUCCESS, neighbor;
const int *inedges, *outedges;
int indegree, outdegree;
ompi_request_t **reqs;
indegree = dist_graph->indegree;
outdegree = dist_graph->outdegree;
inedges = dist_graph->in;
outedges = dist_graph->out;
ompi_datatype_get_extent(rdtype, &lb, &rdextent);
ompi_datatype_get_extent(sdtype, &lb, &sdextent);
/* post receives first */
for (neighbor = 0, reqs = basic_module->mccb_reqs ; neighbor < indegree ; ++neighbor, ++reqs) {
rc = MCA_PML_CALL(irecv(rbuf, rcount, rdtype, inedges[neighbor],
MCA_COLL_BASE_TAG_ALLTOALL,
comm, reqs));
if (OMPI_SUCCESS != rc) break;
rbuf = (char *) rbuf + rdextent * rcount;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
for (neighbor = 0 ; neighbor < outdegree ; ++neighbor, ++reqs) {
/* remove cast from const when the pml layer is updated to take a const for the send buffer */
rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, outedges[neighbor],
MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD,
comm, reqs));
if (OMPI_SUCCESS != rc) break;
sbuf = (char *) sbuf + sdextent * scount;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
return ompi_request_wait_all (indegree + outdegree, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
}
int mca_coll_basic_neighbor_alltoall(void *sbuf, int scount, struct ompi_datatype_t *sdtype, void *rbuf,
int rcount, struct ompi_datatype_t *rdtype, struct ompi_communicator_t *comm,
mca_coll_base_module_t *module)
{
if (OMPI_COMM_IS_INTER(comm)) {
return OMPI_ERR_NOT_SUPPORTED;
}
if (OMPI_COMM_IS_CART(comm)) {
return mca_coll_basic_neighbor_alltoall_cart (sbuf, scount, sdtype, rbuf,
rcount, rdtype, comm, module);
} else if (OMPI_COMM_IS_GRAPH(comm)) {
return mca_coll_basic_neighbor_alltoall_graph (sbuf, scount, sdtype, rbuf,
rcount, rdtype, comm, module);
} else if (OMPI_COMM_IS_DIST_GRAPH(comm)) {
return mca_coll_basic_neighbor_alltoall_dist_graph (sbuf, scount, sdtype, rbuf,
rcount, rdtype, comm, module);
}
return OMPI_ERR_NOT_SUPPORTED;
}

Просмотреть файл

@ -0,0 +1,239 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2006 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "coll_basic.h"
#include <stdlib.h>
#include "mpi.h"
#include "ompi/constants.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/mca/pml/pml.h"
#include "ompi/mca/coll/base/coll_tags.h"
#include "coll_basic.h"
#include "ompi/mca/topo/base/base.h"
static int
mca_coll_basic_neighbor_alltoallv_cart(const void *sbuf, const int scounts[], const int sdisps[],
struct ompi_datatype_t *sdtype, void *rbuf, const int rcounts[],
const int rdisps[], struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_cart_2_1_0_t *cart = comm->c_topo->mtc.cart;
const int rank = ompi_comm_rank (comm);
int rc = MPI_SUCCESS, dim, i, nreqs;
ptrdiff_t lb, rdextent, sdextent;
ompi_request_t **reqs;
ompi_datatype_get_extent(rdtype, &lb, &rdextent);
ompi_datatype_get_extent(sdtype, &lb, &sdextent);
/* post receives first */
for (dim = 0, nreqs = 0, i = 0, reqs = basic_module->mccb_reqs ; dim < cart->ndims ; ++dim, i += 2) {
int srank = MPI_PROC_NULL, drank = MPI_PROC_NULL;
if (cart->dims[dim] > 1) {
mca_topo_base_cart_shift (comm, dim, 1, &srank, &drank);
} else if (1 == cart->dims[dim] && cart->periods[dim]) {
srank = drank = rank;
}
if (MPI_PROC_NULL != srank) {
rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[i] * rdextent, rcounts[i], rdtype, srank,
MCA_COLL_BASE_TAG_ALLTOALL, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
}
if (MPI_PROC_NULL != drank) {
rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[i+1] * rdextent, rcounts[i+1], rdtype, drank,
MCA_COLL_BASE_TAG_ALLTOALL, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
}
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
for (dim = 0, i = 0 ; dim < cart->ndims ; ++dim, i += 2) {
int srank = MPI_PROC_NULL, drank = MPI_PROC_NULL;
if (cart->dims[dim] > 1) {
mca_topo_base_cart_shift (comm, dim, 1, &srank, &drank);
} else if (1 == cart->dims[dim] && cart->periods[dim]) {
srank = drank = rank;
}
if (MPI_PROC_NULL != srank) {
/* remove cast from const when the pml layer is updated to take a const for the send buffer */
rc = MCA_PML_CALL(isend((char *) sbuf + sdisps[i] * sdextent, scounts[i], sdtype, srank,
MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
}
if (MPI_PROC_NULL != drank) {
rc = MCA_PML_CALL(isend((char *) sbuf + sdisps[i+1] * sdextent, scounts[i+1], sdtype, drank,
MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
}
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
return ompi_request_wait_all (nreqs, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
}
static int
mca_coll_basic_neighbor_alltoallv_graph(const void *sbuf, const int scounts[], const int sdisps[],
struct ompi_datatype_t *sdtype, void *rbuf, const int rcounts[],
const int rdisps[], struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_graph_2_1_0_t *graph = comm->c_topo->mtc.graph;
int rc = MPI_SUCCESS, neighbor, degree;
const int rank = ompi_comm_rank (comm);
ptrdiff_t lb, rdextent, sdextent;
ompi_request_t **reqs;
const int *edges;
mca_topo_base_graph_neighbors_count (comm, rank, &degree);
edges = graph->edges;
if (rank > 0) {
edges += graph->index[rank - 1];
}
ompi_datatype_get_extent(rdtype, &lb, &rdextent);
ompi_datatype_get_extent(sdtype, &lb, &sdextent);
/* post all receives first */
for (neighbor = 0, reqs = basic_module->mccb_reqs ; neighbor < degree ; ++neighbor) {
rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[neighbor] * rdextent, rcounts[neighbor], rdtype,
edges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
for (neighbor = 0 ; neighbor < degree ; ++neighbor) {
/* remove cast from const when the pml layer is updated to take a const for the send buffer */
rc = MCA_PML_CALL(isend((char *) sbuf + sdisps[neighbor] * sdextent, scounts[neighbor], sdtype,
edges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
return ompi_request_wait_all (degree * 2, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
}
static int
mca_coll_basic_neighbor_alltoallv_dist_graph(const void *sbuf, const int scounts[], const int sdisps[],
struct ompi_datatype_t *sdtype, void *rbuf, const int rcounts[],
const int rdisps[], struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_dist_graph_2_1_0_t *dist_graph = comm->c_topo->mtc.dist_graph;
ptrdiff_t lb, rdextent, sdextent;
int rc = MPI_SUCCESS, neighbor;
const int *inedges, *outedges;
int indegree, outdegree;
ompi_request_t **reqs;
indegree = dist_graph->indegree;
outdegree = dist_graph->outdegree;
inedges = dist_graph->in;
outedges = dist_graph->out;
ompi_datatype_get_extent(rdtype, &lb, &rdextent);
ompi_datatype_get_extent(sdtype, &lb, &sdextent);
/* post all receives first */
for (neighbor = 0, reqs = basic_module->mccb_reqs ; neighbor < indegree ; ++neighbor) {
rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[neighbor] * rdextent, rcounts[neighbor], rdtype,
inedges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
for (neighbor = 0 ; neighbor < outdegree ; ++neighbor) {
/* remove cast from const when the pml layer is updated to take a const for the send buffer */
rc = MCA_PML_CALL(isend((void *) sbuf + sdisps[neighbor] * sdextent, scounts[neighbor], sdtype,
outedges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
return ompi_request_wait_all (indegree + outdegree, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
}
int mca_coll_basic_neighbor_alltoallv(void *sbuf, int scounts[], int sdisps[],
struct ompi_datatype_t *sdtype, void *rbuf, int rcounts[],
int rdisps[], struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, mca_coll_base_module_t *module)
{
if (OMPI_COMM_IS_INTER(comm)) {
return OMPI_ERR_NOT_SUPPORTED;
}
if (OMPI_COMM_IS_CART(comm)) {
return mca_coll_basic_neighbor_alltoallv_cart (sbuf, scounts, sdisps, sdtype, rbuf,
rcounts, rdisps, rdtype, comm, module);
} else if (OMPI_COMM_IS_GRAPH(comm)) {
return mca_coll_basic_neighbor_alltoallv_graph (sbuf, scounts, sdisps, sdtype, rbuf,
rcounts, rdisps, rdtype, comm, module);
} else if (OMPI_COMM_IS_DIST_GRAPH(comm)) {
return mca_coll_basic_neighbor_alltoallv_dist_graph (sbuf, scounts, sdisps, sdtype, rbuf,
rcounts, rdisps, rdtype, comm, module);
}
return OMPI_ERR_NOT_SUPPORTED;
}

Просмотреть файл

@ -0,0 +1,228 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2006 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "coll_basic.h"
#include <stdlib.h>
#include "mpi.h"
#include "ompi/constants.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/mca/coll/coll.h"
#include "ompi/mca/pml/pml.h"
#include "ompi/mca/coll/base/coll_tags.h"
#include "coll_basic.h"
#include "ompi/mca/topo/base/base.h"
static int
mca_coll_basic_neighbor_alltoallw_cart(const void *sbuf, const int scounts[], const MPI_Aint sdisps[],
struct ompi_datatype_t * const sdtypes[], void *rbuf, const int rcounts[],
const MPI_Aint rdisps[], struct ompi_datatype_t * const rdtypes[],
struct ompi_communicator_t *comm, mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_cart_2_1_0_t *cart = comm->c_topo->mtc.cart;
const int rank = ompi_comm_rank (comm);
int rc = MPI_SUCCESS, dim, i, nreqs;
ompi_request_t **reqs;
/* post receives first */
for (dim = 0, i = 0, nreqs = 0, reqs = basic_module->mccb_reqs ; dim < cart->ndims ; ++dim, i += 2) {
int srank = MPI_PROC_NULL, drank = MPI_PROC_NULL;
if (cart->dims[dim] > 1) {
mca_topo_base_cart_shift (comm, dim, 1, &srank, &drank);
} else if (1 == cart->dims[dim] && cart->periods[dim]) {
srank = drank = rank;
}
if (MPI_PROC_NULL != srank) {
rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[i], rcounts[i], rdtypes[i], srank,
MCA_COLL_BASE_TAG_ALLTOALL, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
}
if (MPI_PROC_NULL != drank) {
rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[i+1], rcounts[i+1], rdtypes[i+1], drank,
MCA_COLL_BASE_TAG_ALLTOALL, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
}
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
for (dim = 0, i = 0 ; dim < cart->ndims ; ++dim, i += 2) {
int srank = MPI_PROC_NULL, drank = MPI_PROC_NULL;
if (cart->dims[dim] > 1) {
mca_topo_base_cart_shift (comm, dim, 1, &srank, &drank);
} else if (1 == cart->dims[dim] && cart->periods[dim]) {
srank = drank = rank;
}
if (MPI_PROC_NULL != srank) {
/* remove cast from const when the pml layer is updated to take a const for the send buffer */
rc = MCA_PML_CALL(isend((char *) sbuf + sdisps[i], scounts[i], sdtypes[i], srank,
MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
}
if (MPI_PROC_NULL != drank) {
rc = MCA_PML_CALL(isend((char *) sbuf + sdisps[i+1], scounts[i+1], sdtypes[i+1], drank,
MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
nreqs++;
}
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
return ompi_request_wait_all (nreqs, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
}
static int
mca_coll_basic_neighbor_alltoallw_graph(const void *sbuf, const int scounts[], const MPI_Aint sdisps[],
struct ompi_datatype_t * const sdtypes[], void *rbuf, const int rcounts[],
const MPI_Aint rdisps[], struct ompi_datatype_t * const rdtypes[],
struct ompi_communicator_t *comm, mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_graph_2_1_0_t *graph = comm->c_topo->mtc.graph;
int rc = MPI_SUCCESS, neighbor, degree;
const int rank = ompi_comm_rank (comm);
ompi_request_t **reqs;
const int *edges;
mca_topo_base_graph_neighbors_count (comm, rank, &degree);
edges = graph->edges;
if (rank > 0) {
edges += graph->index[rank - 1];
}
/* post all receives first */
for (neighbor = 0, reqs = basic_module->mccb_reqs ; neighbor < degree ; ++neighbor) {
rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[neighbor], rcounts[neighbor], rdtypes[neighbor],
edges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
for (neighbor = 0 ; neighbor < degree ; ++neighbor) {
/* remove cast from const when the pml layer is updated to take a const for the send buffer */
rc = MCA_PML_CALL(isend((char *) sbuf + sdisps[neighbor], scounts[neighbor], sdtypes[neighbor],
edges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
return ompi_request_wait_all (degree * 2, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
}
static int
mca_coll_basic_neighbor_alltoallw_dist_graph(const void *sbuf, const int scounts[], const MPI_Aint sdisps[],
struct ompi_datatype_t * const sdtypes[], void *rbuf, const int rcounts[],
const MPI_Aint rdisps[], struct ompi_datatype_t * const rdtypes[],
struct ompi_communicator_t *comm, mca_coll_base_module_t *module)
{
mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
const mca_topo_base_comm_dist_graph_2_1_0_t *dist_graph = comm->c_topo->mtc.dist_graph;
int rc = MPI_SUCCESS, neighbor;
const int *inedges, *outedges;
int indegree, outdegree;
ompi_request_t **reqs;
indegree = dist_graph->indegree;
outdegree = dist_graph->outdegree;
inedges = dist_graph->in;
outedges = dist_graph->out;
/* post all receives first */
for (neighbor = 0, reqs = basic_module->mccb_reqs ; neighbor < indegree ; ++neighbor) {
rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[neighbor], rcounts[neighbor], rdtypes[neighbor],
inedges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL, comm, reqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
for (neighbor = 0 ; neighbor < outdegree ; ++neighbor) {
/* remove cast from const when the pml layer is updated to take a const for the send buffer */
rc = MCA_PML_CALL(isend((void *) sbuf + sdisps[neighbor], scounts[neighbor], sdtypes[neighbor],
outedges[neighbor], MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD,
comm, reqs++));
if (OMPI_SUCCESS != rc) break;
}
if (OMPI_SUCCESS != rc) {
/* should probably try to clean up here */
return rc;
}
return ompi_request_wait_all (indegree + outdegree, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
}
int mca_coll_basic_neighbor_alltoallw(void *sbuf, int scounts[], MPI_Aint sdisps[],
struct ompi_datatype_t *sdtypes[], void *rbuf, int rcounts[],
MPI_Aint rdisps[], struct ompi_datatype_t *rdtypes[],
struct ompi_communicator_t *comm, mca_coll_base_module_t *module)
{
if (OMPI_COMM_IS_INTER(comm)) {
return OMPI_ERR_NOT_SUPPORTED;
}
if (OMPI_COMM_IS_CART(comm)) {
return mca_coll_basic_neighbor_alltoallw_cart (sbuf, scounts, sdisps, sdtypes, rbuf,
rcounts, rdisps, rdtypes, comm, module);
} else if (OMPI_COMM_IS_GRAPH(comm)) {
return mca_coll_basic_neighbor_alltoallw_graph (sbuf, scounts, sdisps, sdtypes, rbuf,
rcounts, rdisps, rdtypes, comm, module);
} else if (OMPI_COMM_IS_DIST_GRAPH(comm)) {
return mca_coll_basic_neighbor_alltoallw_dist_graph (sbuf, scounts, sdisps, sdtypes, rbuf,
rcounts, rdisps, rdtypes, comm, module);
}
return OMPI_ERR_NOT_SUPPORTED;
}

Просмотреть файл

@ -1,3 +1,4 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
@ -5,17 +6,19 @@
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007-2008 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007-2008 UT-Battelle, LLC
* Copyright (c) 2012 Oak Rigde National Laboratory. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
*
* Additional copyrights may follow
*
*
* $HEADER$
*/
@ -35,7 +38,7 @@
* collm_comm_query function, instantiating a module for each
* component that i usable, and sets the module collective function pointers.
* mca_coll_base_comm_select() then loops through the list of available
* components (via the instantiated module), and uses the
* components (via the instantiated module), and uses the
* module's coll_module_enable() function to enable the modules, and
* if successful, sets the communicator collective functions to the
* those supplied by the given module, keeping track of which module it
@ -44,7 +47,7 @@
* The module destructors are called for each module used by the
* communicator, at communicator desctruction time.
*
* This can result in up to N different components being used for a
* This can result in up to N different components being used for a
* single communicator, one per needed collective function.
*
* The interface is the same for inter- or intra-communicators, and
@ -124,7 +127,7 @@ typedef int (*mca_coll_base_component_init_query_fn_t)
* phase of initialization.
*
* @param[in] comm The communicator being created
* @param[out] priority Priority setting for component on
* @param[out] priority Priority setting for component on
* this communicator
*
* @returns An initialized module structure if the component can
@ -176,27 +179,27 @@ typedef int
typedef int (*mca_coll_base_module_allgather_fn_t)
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int rcount, struct ompi_datatype_t *rdtype,
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int rcount, struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_allgatherv_fn_t)
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void * rbuf, int *rcounts, int *disps, struct ompi_datatype_t *rdtype,
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void * rbuf, int *rcounts, int *disps, struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_allreduce_fn_t)
(void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype,
(void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_alltoall_fn_t)
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void* rbuf, int rcount, struct ompi_datatype_t *rdtype,
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void* rbuf, int rcount, struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_alltoallv_fn_t)
(void *sbuf, int *scounts, int *sdisps, struct ompi_datatype_t *sdtype,
void *rbuf, int *rcounts, int *rdisps, struct ompi_datatype_t *rdtype,
(void *sbuf, int *scounts, int *sdisps, struct ompi_datatype_t *sdtype,
void *rbuf, int *rcounts, int *rdisps, struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_alltoallw_fn_t)
(void *sbuf, int *scounts, int *sdisps, struct ompi_datatype_t **sdtypes,
void *rbuf, int *rcounts, int *rdisps, struct ompi_datatype_t **rdtypes,
(void *sbuf, int *scounts, int *sdisps, struct ompi_datatype_t **sdtypes,
void *rbuf, int *rcounts, int *rdisps, struct ompi_datatype_t **rdtypes,
struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_barrier_fn_t)
(struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
@ -204,18 +207,18 @@ typedef int (*mca_coll_base_module_bcast_fn_t)
(void *buff, int count, struct ompi_datatype_t *datatype, int root,
struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_exscan_fn_t)
(void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype,
(void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_gather_fn_t)
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int rcount, struct ompi_datatype_t *rdtype,
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int rcount, struct ompi_datatype_t *rdtype,
int root, struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_gatherv_fn_t)
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int *rcounts, int *disps, struct ompi_datatype_t *rdtype,
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int *rcounts, int *disps, struct ompi_datatype_t *rdtype,
int root, struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_reduce_fn_t)
(void *sbuf, void* rbuf, int count, struct ompi_datatype_t *dtype,
(void *sbuf, void* rbuf, int count, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, int root, struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_reduce_scatter_fn_t)
(void *sbuf, void *rbuf, int *rcounts, struct ompi_datatype_t *dtype,
@ -224,45 +227,45 @@ typedef int (*mca_coll_base_module_reduce_scatter_block_fn_t)
(void *sbuf, void *rbuf, int rcount, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_scan_fn_t)
(void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype,
(void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_scatter_fn_t)
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int rcount, struct ompi_datatype_t *rdtype,
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int rcount, struct ompi_datatype_t *rdtype,
int root, struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_scatterv_fn_t)
(void *sbuf, int *scounts, int *disps, struct ompi_datatype_t *sdtype,
(void *sbuf, int *scounts, int *disps, struct ompi_datatype_t *sdtype,
void* rbuf, int rcount, struct ompi_datatype_t *rdtype,
int root, struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
/* nonblocking collectives */
typedef int (*mca_coll_base_module_iallgather_fn_t)
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int rcount, struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, ompi_request_t ** request,
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int rcount, struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_iallgatherv_fn_t)
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void * rbuf, int *rcounts, int *disps, struct ompi_datatype_t *rdtype,
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void * rbuf, int *rcounts, int *disps, struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_iallreduce_fn_t)
(void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_communicator_t *comm,
(void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_communicator_t *comm,
ompi_request_t ** request, struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_ialltoall_fn_t)
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void* rbuf, int rcount, struct ompi_datatype_t *rdtype,
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void* rbuf, int rcount, struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_ialltoallv_fn_t)
(void *sbuf, int *scounts, int *sdisps, struct ompi_datatype_t *sdtype,
void *rbuf, int *rcounts, int *rdisps, struct ompi_datatype_t *rdtype,
(void *sbuf, int *scounts, int *sdisps, struct ompi_datatype_t *sdtype,
void *rbuf, int *rcounts, int *rdisps, struct ompi_datatype_t *rdtype,
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_ialltoallw_fn_t)
(void *sbuf, int *scounts, int *sdisps, struct ompi_datatype_t **sdtypes,
void *rbuf, int *rcounts, int *rdisps, struct ompi_datatype_t **rdtypes,
(void *sbuf, int *scounts, int *sdisps, struct ompi_datatype_t **sdtypes,
void *rbuf, int *rcounts, int *rdisps, struct ompi_datatype_t **rdtypes,
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_ibarrier_fn_t)
@ -273,21 +276,21 @@ typedef int (*mca_coll_base_module_ibcast_fn_t)
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_iexscan_fn_t)
(void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype,
(void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_igather_fn_t)
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int rcount, struct ompi_datatype_t *rdtype,
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int rcount, struct ompi_datatype_t *rdtype,
int root, struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_igatherv_fn_t)
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int *rcounts, int *disps, struct ompi_datatype_t *rdtype,
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int *rcounts, int *disps, struct ompi_datatype_t *rdtype,
int root, struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_ireduce_fn_t)
(void *sbuf, void* rbuf, int count, struct ompi_datatype_t *dtype,
(void *sbuf, void* rbuf, int count, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, int root, struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_ireduce_scatter_fn_t)
@ -299,20 +302,33 @@ typedef int (*mca_coll_base_module_ireduce_scatter_block_fn_t)
struct ompi_op_t *op, struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_iscan_fn_t)
(void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype,
(void *sbuf, void *rbuf, int count, struct ompi_datatype_t *dtype,
struct ompi_op_t *op, struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_iscatter_fn_t)
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int rcount, struct ompi_datatype_t *rdtype,
(void *sbuf, int scount, struct ompi_datatype_t *sdtype,
void *rbuf, int rcount, struct ompi_datatype_t *rdtype,
int root, struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_iscatterv_fn_t)
(void *sbuf, int *scounts, int *disps, struct ompi_datatype_t *sdtype,
(void *sbuf, int *scounts, int *disps, struct ompi_datatype_t *sdtype,
void* rbuf, int rcount, struct ompi_datatype_t *rdtype,
int root, struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
/*
* The signature of the neighborhood alltoallw differs from alltoallw
*/
typedef int (*mca_coll_base_module_neighbor_alltoallw_fn_t)
(void *sbuf, int *scounts, MPI_Aint *sdisps, struct ompi_datatype_t **sdtypes,
void *rbuf, int *rcounts, MPI_Aint *rdisps, struct ompi_datatype_t **rdtypes,
struct ompi_communicator_t *comm, struct mca_coll_base_module_2_0_0_t *module);
typedef int (*mca_coll_base_module_ineighbor_alltoallw_fn_t)
(void *sbuf, int *scounts, MPI_Aint *sdisps, struct ompi_datatype_t **sdtypes,
void *rbuf, int *rcounts, MPI_Aint *rdisps, struct ompi_datatype_t **rdtypes,
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
/**
* Fault Tolerance Awareness function.
*
@ -364,7 +380,7 @@ typedef struct mca_coll_base_component_2_0_0_t mca_coll_base_component_t;
* reference counted based on the number of functions from the module
* used on the commuicator. There is at most one module per component
* on a given communicator, and there can be many component modules on
* a given communicator.
* a given communicator.
*
* @note The collective framework and the
* communicator functionality only stores a pointer to the module
@ -417,6 +433,19 @@ struct mca_coll_base_module_2_0_0_t {
mca_coll_base_module_iscatter_fn_t coll_iscatter;
mca_coll_base_module_iscatterv_fn_t coll_iscatterv;
/* neighborhood functions */
mca_coll_base_module_allgather_fn_t coll_neighbor_allgather;
mca_coll_base_module_allgatherv_fn_t coll_neighbor_allgatherv;
mca_coll_base_module_alltoall_fn_t coll_neighbor_alltoall;
mca_coll_base_module_alltoallv_fn_t coll_neighbor_alltoallv;
mca_coll_base_module_neighbor_alltoallw_fn_t coll_neighbor_alltoallw;
mca_coll_base_module_iallgather_fn_t coll_ineighbor_allgather;
mca_coll_base_module_iallgatherv_fn_t coll_ineighbor_allgatherv;
mca_coll_base_module_ialltoall_fn_t coll_ineighbor_alltoall;
mca_coll_base_module_ialltoallv_fn_t coll_ineighbor_alltoallv;
mca_coll_base_module_ineighbor_alltoallw_fn_t coll_ineighbor_alltoallw;
/** Fault tolerance event trigger function */
mca_coll_base_module_ft_event_fn_t ft_event;
};
@ -507,6 +536,29 @@ struct mca_coll_base_comm_coll_t {
mca_coll_base_module_2_0_0_t *coll_iscatter_module;
mca_coll_base_module_iscatterv_fn_t coll_iscatterv;
mca_coll_base_module_2_0_0_t *coll_iscatterv_module;
/* neighborhood collectives */
mca_coll_base_module_allgather_fn_t coll_neighbor_allgather;
mca_coll_base_module_2_0_0_t *coll_neighbor_allgather_module;
mca_coll_base_module_allgatherv_fn_t coll_neighbor_allgatherv;
mca_coll_base_module_2_0_0_t *coll_neighbor_allgatherv_module;
mca_coll_base_module_alltoall_fn_t coll_neighbor_alltoall;
mca_coll_base_module_2_0_0_t *coll_neighbor_alltoall_module;
mca_coll_base_module_alltoallv_fn_t coll_neighbor_alltoallv;
mca_coll_base_module_2_0_0_t *coll_neighbor_alltoallv_module;
mca_coll_base_module_neighbor_alltoallw_fn_t coll_neighbor_alltoallw;
mca_coll_base_module_2_0_0_t *coll_neighbor_alltoallw_module;
mca_coll_base_module_iallgather_fn_t coll_ineighbor_allgather;
mca_coll_base_module_2_0_0_t *coll_ineighbor_allgather_module;
mca_coll_base_module_iallgatherv_fn_t coll_ineighbor_allgatherv;
mca_coll_base_module_2_0_0_t *coll_ineighbor_allgatherv_module;
mca_coll_base_module_ialltoall_fn_t coll_ineighbor_alltoall;
mca_coll_base_module_2_0_0_t *coll_ineighbor_alltoall_module;
mca_coll_base_module_ialltoallv_fn_t coll_ineighbor_alltoallv;
mca_coll_base_module_2_0_0_t *coll_ineighbor_alltoallv_module;
mca_coll_base_module_ineighbor_alltoallw_fn_t coll_ineighbor_alltoallw;
mca_coll_base_module_2_0_0_t *coll_ineighbor_alltoallw_module;
};
typedef struct mca_coll_base_comm_coll_t mca_coll_base_comm_coll_t;

Просмотреть файл

@ -10,6 +10,8 @@
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2010 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2013 Los Alamos National Security, LLC. All rights
# reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
@ -39,11 +41,17 @@ sources = \
nbc_ibcast_inter.c \
nbc_igather.c \
nbc_igatherv.c \
nbc_ineighbor_allgather.c \
nbc_ineighbor_allgatherv.c \
nbc_ineighbor_alltoall.c \
nbc_ineighbor_alltoallv.c \
nbc_ineighbor_alltoallw.c \
nbc_ireduce.c \
nbc_ireduce_scatter.c \
nbc_iscan.c \
nbc_iscatter.c \
nbc_iscatterv.c
nbc_iscatterv.c \
nbc_neighbor_helpers.c
# Make the output library in this directory, and name it either
# mca_<type>_<name>.la (for DSO builds) or libmca_<type>_<name>.la

Просмотреть файл

@ -1,3 +1,4 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
@ -10,6 +11,8 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -249,6 +252,27 @@ int ompi_coll_libnbc_iscatterv_inter(void* sendbuf, int *sendcounts, int *displs
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
int ompi_coll_libnbc_ineighbor_allgather(void *sbuf, int scount, MPI_Datatype stype, void *rbuf,
int rcount, MPI_Datatype rtype, struct ompi_communicator_t *comm,
ompi_request_t ** request, struct mca_coll_base_module_2_0_0_t *module);
int ompi_coll_libnbc_ineighbor_allgatherv(void *sbuf, int scount, MPI_Datatype stype, void *rbuf,
int *rcounts, int *displs, MPI_Datatype rtype,
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
int ompi_coll_libnbc_ineighbor_alltoall(void *sbuf, int scount, MPI_Datatype stype, void *rbuf,
int rcount, MPI_Datatype rtype, struct ompi_communicator_t *comm,
ompi_request_t ** request, struct mca_coll_base_module_2_0_0_t *module);
int ompi_coll_libnbc_ineighbor_alltoallv(void *sbuf, int *scounts, int *sdispls, MPI_Datatype stype,
void *rbuf, int *rcounts, int *rdispls, MPI_Datatype rtype,
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
int ompi_coll_libnbc_ineighbor_alltoallw(void *sbuf, int *scounts, MPI_Aint *sdisps, MPI_Datatype *stypes,
void *rbuf, int *rcounts, MPI_Aint *rdisps, MPI_Datatype *rtypes,
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module);
END_C_DECLS
#endif /* MCA_COLL_LIBNBC_EXPORT_H */

Просмотреть файл

@ -1,3 +1,4 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
@ -10,6 +11,8 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -149,6 +152,12 @@ libnbc_init_query(bool enable_progress_threads,
}
static int libnbc_not_implemented (void *arg0, ...)
{
(void)arg0;
return OMPI_ERR_NOT_IMPLEMENTED;
}
/*
* Invoked when there's a new communicator that has been created.
* Look at the communicator and decide which set of functions and
@ -184,6 +193,11 @@ libnbc_comm_query(struct ompi_communicator_t *comm,
module->super.coll_iscan = NULL;
module->super.coll_iscatter = ompi_coll_libnbc_iscatter_inter;
module->super.coll_iscatterv = ompi_coll_libnbc_iscatterv_inter;
module->super.coll_ineighbor_allgather = (mca_coll_base_module_allgather_fn_t) libnbc_not_implemented;
module->super.coll_ineighbor_allgatherv = (mca_coll_base_module_allgatherv_fn_t) libnbc_not_implemented;
module->super.coll_ineighbor_alltoall = (mca_coll_base_module_alltoall_fn_t) libnbc_not_implemented;
module->super.coll_ineighbor_alltoallv = (mca_coll_base_module_alltoallv_fn_t) libnbc_not_implemented;
module->super.coll_ineighbor_alltoallw = (mca_coll_base_module_alltoallw_fn_t) libnbc_not_implemented;
} else {
module->super.coll_iallgather = ompi_coll_libnbc_iallgather;
module->super.coll_iallgatherv = ompi_coll_libnbc_iallgatherv;
@ -202,7 +216,13 @@ libnbc_comm_query(struct ompi_communicator_t *comm,
module->super.coll_iscan = ompi_coll_libnbc_iscan;
module->super.coll_iscatter = ompi_coll_libnbc_iscatter;
module->super.coll_iscatterv = ompi_coll_libnbc_iscatterv;
module->super.coll_ineighbor_allgather = ompi_coll_libnbc_ineighbor_allgather;
module->super.coll_ineighbor_allgatherv = ompi_coll_libnbc_ineighbor_allgatherv;
module->super.coll_ineighbor_alltoall = ompi_coll_libnbc_ineighbor_alltoall;
module->super.coll_ineighbor_alltoallv = ompi_coll_libnbc_ineighbor_alltoallv;
module->super.coll_ineighbor_alltoallw = ompi_coll_libnbc_ineighbor_alltoallw;
}
module->super.ft_event = NULL;
if (OMPI_SUCCESS != NBC_Init_comm(comm, module)) {

Просмотреть файл

@ -0,0 +1,163 @@
/*
* Copyright (c) 2006 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2006 The Technical University of Chemnitz. All
* rights reserved.
*
* Author(s): Torsten Hoefler <htor@cs.indiana.edu>
*
*/
#include "nbc_internal.h"
/* cannot cache schedules because one cannot check locally if the pattern is the same!! */
#undef NBC_CACHE_SCHEDULE
#ifdef NBC_CACHE_SCHEDULE
/* tree comparison function for schedule cache */
int NBC_Ineighbor_allgather_args_compare(NBC_Ineighbor_allgather_args *a, NBC_Ineighbor_allgather_args *b, void *param) {
if( (a->sbuf == b->sbuf) &&
(a->scount == b->scount) &&
(a->stype == b->stype) &&
(a->rbuf == b->rbuf) &&
(a->rcount == b->rcount) &&
(a->rtype == b->rtype) ) {
return 0;
}
if( a->sbuf < b->sbuf ) {
return -1;
}
return +1;
}
#endif
int ompi_coll_libnbc_ineighbor_allgather(void *sbuf, int scount, MPI_Datatype stype, void *rbuf,
int rcount, MPI_Datatype rtype, struct ompi_communicator_t *comm,
ompi_request_t ** request, struct mca_coll_base_module_2_0_0_t *module) {
int rank, size, res, worldsize;
MPI_Aint sndext, rcvext;
NBC_Handle *handle;
ompi_coll_libnbc_request_t **coll_req = (ompi_coll_libnbc_request_t**) request;
ompi_coll_libnbc_module_t *libnbc_module = (ompi_coll_libnbc_module_t*) module;
res = NBC_Init_handle(comm, coll_req, libnbc_module);
handle = *coll_req;
if(res != NBC_OK) { printf("Error in NBC_Init_handle(%i)\n", res); return res; }
res = MPI_Comm_size(comm, &size);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_size() (%i)\n", res); return res; }
res = MPI_Comm_size(MPI_COMM_WORLD, &worldsize);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_size() (%i)\n", res); return res; }
res = MPI_Comm_rank(comm, &rank);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; }
res = MPI_Type_extent(stype, &sndext);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; }
res = MPI_Type_extent(rtype, &rcvext);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; }
char inplace;
NBC_Schedule *schedule;
#ifdef NBC_CACHE_SCHEDULE
NBC_Ineighbor_allgather_args *args, *found, search;
#endif
NBC_IN_PLACE(sbuf, rbuf, inplace);
handle->tmpbuf=NULL;
#ifdef NBC_CACHE_SCHEDULE
/* search schedule in communicator specific tree */
search.sbuf=sbuf;
search.scount=scount;
search.stype=stype;
search.rbuf=rbuf;
search.rcount=rcount;
search.rtype=rtype;
found = (NBC_Ineighbor_allgather_args*)hb_tree_search((hb_tree*)handle->comminfo->NBC_Dict[NBC_NEIGHBOR_ALLGATHER], &search);
if(found == NULL) {
#endif
schedule = (NBC_Schedule*)malloc(sizeof(NBC_Schedule));
res = NBC_Sched_create(schedule);
if(res != NBC_OK) { printf("Error in NBC_Sched_create, res = %i\n", res); return res; }
{
int indegree, outdegree, weighted, *srcs, *dsts, i;
res = NBC_Comm_neighbors_count(comm, &indegree, &outdegree, &weighted);
if(res != NBC_OK) return res;
srcs = (int*)malloc(sizeof(int)*indegree);
dsts = (int*)malloc(sizeof(int)*outdegree);
res = NBC_Comm_neighbors(comm, indegree, srcs, MPI_UNWEIGHTED, outdegree, dsts, MPI_UNWEIGHTED);
if(res != NBC_OK) return res;
if(inplace) { /* we need an extra buffer to be deadlock-free */
handle->tmpbuf = malloc(indegree*rcvext*rcount);
for(i = 0; i < indegree; i++) {
if (MPI_PROC_NULL != srcs[i]) {
res = NBC_Sched_recv((char*)0+i*rcount*rcvext, true, rcount, rtype, srcs[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_recv() (%i)\n", res); return res; }
}
}
for(i = 0; i < outdegree; i++) {
if (MPI_PROC_NULL != dsts[i]) {
res = NBC_Sched_send((char*)sbuf, false, scount, stype, dsts[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_send() (%i)\n", res); return res; }
}
}
/* unpack from buffer */
for(i = 0; i < indegree; i++) {
res = NBC_Sched_barrier(schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_barrier() (%i)\n", res); return res; }
res = NBC_Sched_copy((char*)0+i*rcount*rcvext, true, rcount, rtype, (char*)rbuf+i*rcount*rcvext, false, rcount, rtype, schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_copy() (%i)\n", res); return res; }
}
} else { /* non INPLACE case */
/* simply loop over neighbors and post send/recv operations */
for(i = 0; i < indegree; i++) {
if (MPI_PROC_NULL != srcs[i]) {
res = NBC_Sched_recv((char*)rbuf+i*rcount*rcvext, false, rcount, rtype, srcs[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_recv() (%i)\n", res); return res; }
}
}
for(i = 0; i < outdegree; i++) {
if (MPI_PROC_NULL != dsts[i]) {
res = NBC_Sched_send((char*)sbuf, false, scount, stype, dsts[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_send() (%i)\n", res); return res; }
}
}
}
}
res = NBC_Sched_commit(schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_commit() (%i)\n", res); return res; }
#ifdef NBC_CACHE_SCHEDULE
/* save schedule to tree */
args = (NBC_Ineighbor_allgather_args*)malloc(sizeof(NBC_Ineighbor_allgather_args));
args->sbuf=sbuf;
args->scount=scount;
args->stype=stype;
args->rbuf=rbuf;
args->rcount=rcount;
args->rtype=rtype;
args->schedule=schedule;
res = hb_tree_insert ((hb_tree*)handle->comminfo->NBC_Dict[NBC_NEIGHBOR_ALLGATHER], args, args, 0);
if(res != 0) printf("error in dict_insert() (%i)\n", res);
/* increase number of elements for A2A */
if(++handle->comminfo->NBC_Dict_size[NBC_NEIGHBOR_ALLGATHER] > NBC_SCHED_DICT_UPPER) {
NBC_SchedCache_dictwipe((hb_tree*)handle->comminfo->NBC_Dict[NBC_NEIGHBOR_ALLGATHER], &handle->comminfo->NBC_Dict_size[NBC_NEIGHBOR_ALLGATHER]);
}
} else {
/* found schedule */
schedule=found->schedule;
}
#endif
res = NBC_Start(handle, schedule);
if (NBC_OK != res) { printf("Error in NBC_Start() (%i)\n", res); return res; }
return NBC_OK;
}

Просмотреть файл

@ -0,0 +1,173 @@
/*
* Copyright (c) 2006 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2006 The Technical University of Chemnitz. All
* rights reserved.
*
* Author(s): Torsten Hoefler <htor@cs.indiana.edu>
*
*/
#include "nbc_internal.h"
/* cannot cache schedules because one cannot check locally if the pattern is the same!! */
#undef NBC_CACHE_SCHEDULE
#ifdef NBC_CACHE_SCHEDULE
/* tree comparison function for schedule cache */
int NBC_Ineighbor_allgatherv_args_compare(NBC_Ineighbor_allgatherv_args *a, NBC_Ineighbor_allgatherv_args *b, void *param) {
if( (a->sbuf == b->sbuf) &&
(a->scount == b->scount) &&
(a->stype == b->stype) &&
(a->rbuf == b->rbuf) &&
(a->rcount == b->rcount) &&
(a->rtype == b->rtype) ) {
return 0;
}
if( a->sbuf < b->sbuf ) {
return -1;
}
return +1;
}
#endif
int ompi_coll_libnbc_ineighbor_allgatherv(void *sbuf, int scount, MPI_Datatype stype, void *rbuf,
int *rcounts, int *displs, MPI_Datatype rtype,
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module) {
int rank, size, res, worldsize;
MPI_Aint sndext, rcvext;
NBC_Handle *handle;
ompi_coll_libnbc_request_t **coll_req = (ompi_coll_libnbc_request_t**) request;
ompi_coll_libnbc_module_t *libnbc_module = (ompi_coll_libnbc_module_t*) module;
res = NBC_Init_handle(comm, coll_req, libnbc_module);
handle = *coll_req;
if(res != NBC_OK) { printf("Error in NBC_Init_handle(%i)\n", res); return res; }
res = MPI_Comm_size(comm, &size);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_size() (%i)\n", res); return res; }
res = MPI_Comm_size(MPI_COMM_WORLD, &worldsize);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_size() (%i)\n", res); return res; }
res = MPI_Comm_rank(comm, &rank);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; }
res = MPI_Type_extent(stype, &sndext);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; }
res = MPI_Type_extent(rtype, &rcvext);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; }
char inplace;
NBC_Schedule *schedule;
#ifdef NBC_CACHE_SCHEDULE
NBC_Ineighbor_allgatherv_args *args, *found, search;
#endif
NBC_IN_PLACE(sbuf, rbuf, inplace);
handle->tmpbuf=NULL;
#ifdef NBC_CACHE_SCHEDULE
/* search schedule in communicator specific tree */
search.sbuf=sbuf;
search.scount=scount;
search.stype=stype;
search.rbuf=rbuf;
search.rcount=rcount;
search.rtype=rtype;
found = (NBC_Ineighbor_allgatherv_args*)hb_tree_search((hb_tree*)handle->comminfo->NBC_Dict[NBC_NEIGHBOR_ALLGATHERV], &search);
if(found == NULL) {
#endif
schedule = (NBC_Schedule*)malloc(sizeof(NBC_Schedule));
res = NBC_Sched_create(schedule);
if(res != NBC_OK) { printf("Error in NBC_Sched_create, res = %i\n", res); return res; }
{
int indegree, outdegree, weighted, *srcs, *dsts, i;
res = NBC_Comm_neighbors_count(comm, &indegree, &outdegree, &weighted);
if(res != NBC_OK) return res;
srcs = (int*)malloc(sizeof(int)*indegree);
dsts = (int*)malloc(sizeof(int)*outdegree);
res = NBC_Comm_neighbors(comm, indegree, srcs, MPI_UNWEIGHTED, outdegree, dsts, MPI_UNWEIGHTED);
if(res != NBC_OK) return res;
if(inplace) { /* we need an extra buffer to be deadlock-free */
int sumrcounts=0;
int offset=0;
for(i=0; i<indegree; ++i) sumrcounts += rcounts[i];
handle->tmpbuf = malloc(rcvext*sumrcounts);
for(i = 0; i < indegree; i++) {
if(srcs[i] != MPI_PROC_NULL) {
res = NBC_Sched_recv((char*)0+offset, true, rcounts[i], rtype, srcs[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_recv() (%i)\n", res); return res; }
}
offset += rcounts[i]*rcvext;
}
for(i = 0; i < outdegree; i++) {
if(dsts[i] != MPI_PROC_NULL) {
res = NBC_Sched_send((char*)sbuf, false, scount, stype, dsts[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_send() (%i)\n", res); return res; }
}
}
/* unpack from buffer */
offset=0;
for(i = 0; i < indegree; i++) {
if(srcs[i] != MPI_PROC_NULL) {
res = NBC_Sched_barrier(schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_barrier() (%i)\n", res); return res; }
res = NBC_Sched_copy((char*)0+offset, true, rcounts[i], rtype, (char*)rbuf+displs[i]*rcvext, false, rcounts[i], rtype, schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_copy() (%i)\n", res); return res; }
}
offset += rcounts[i]*rcvext;
}
} else { /* non INPLACE case */
/* simply loop over neighbors and post send/recv operations */
for(i = 0; i < indegree; i++) {
if(srcs[i] != MPI_PROC_NULL) {
res = NBC_Sched_recv((char*)rbuf+displs[i]*rcvext, false, rcounts[i], rtype, srcs[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_recv() (%i)\n", res); return res; }
}
}
for(i = 0; i < outdegree; i++) {
if(dsts[i] != MPI_PROC_NULL) {
res = NBC_Sched_send((char*)sbuf, false, scount, stype, dsts[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_send() (%i)\n", res); return res; }
}
}
}
}
res = NBC_Sched_commit(schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_commit() (%i)\n", res); return res; }
#ifdef NBC_CACHE_SCHEDULE
/* save schedule to tree */
args = (NBC_Ineighbor_allgatherv_args*)malloc(sizeof(NBC_Ineighbor_allgatherv_args));
args->sbuf=sbuf;
args->scount=scount;
args->stype=stype;
args->rbuf=rbuf;
args->rcount=rcount;
args->rtype=rtype;
args->schedule=schedule;
res = hb_tree_insert ((hb_tree*)handle->comminfo->NBC_Dict[NBC_NEIGHBOR_ALLGATHERV], args, args, 0);
if(res != 0) printf("error in dict_insert() (%i)\n", res);
/* increase number of elements for A2A */
if(++handle->comminfo->NBC_Dict_size[NBC_NEIGHBOR_ALLGATHERV] > NBC_SCHED_DICT_UPPER) {
NBC_SchedCache_dictwipe((hb_tree*)handle->comminfo->NBC_Dict[NBC_NEIGHBOR_ALLGATHERV], &handle->comminfo->NBC_Dict_size[NBC_NEIGHBOR_ALLGATHERV]);
}
} else {
/* found schedule */
schedule=found->schedule;
}
#endif
res = NBC_Start(handle, schedule);
if (NBC_OK != res) { printf("Error in NBC_Start() (%i)\n", res); return res; }
return NBC_OK;
}

162
ompi/mca/coll/libnbc/nbc_ineighbor_alltoall.c Обычный файл
Просмотреть файл

@ -0,0 +1,162 @@
/*
* Copyright (c) 2006 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2006 The Technical University of Chemnitz. All
* rights reserved.
*
* Author(s): Torsten Hoefler <htor@cs.indiana.edu>
*
*/
#include "nbc_internal.h"
/* cannot cache schedules because one cannot check locally if the pattern is the same!! */
#undef NBC_CACHE_SCHEDULE
#ifdef NBC_CACHE_SCHEDULE
/* tree comparison function for schedule cache */
int NBC_Ineighbor_alltoall_args_compare(NBC_Ineighbor_alltoall_args *a, NBC_Ineighbor_alltoall_args *b, void *param) {
if( (a->sbuf == b->sbuf) &&
(a->scount == b->scount) &&
(a->stype == b->stype) &&
(a->rbuf == b->rbuf) &&
(a->rcount == b->rcount) &&
(a->rtype == b->rtype) ) {
return 0;
}
if( a->sbuf < b->sbuf ) {
return -1;
}
return +1;
}
#endif
int ompi_coll_libnbc_ineighbor_alltoall(void *sbuf, int scount, MPI_Datatype stype, void *rbuf,
int rcount, MPI_Datatype rtype, struct ompi_communicator_t *comm,
ompi_request_t ** request, struct mca_coll_base_module_2_0_0_t *module) {
int rank, size, res, worldsize;
MPI_Aint sndext, rcvext;
NBC_Handle *handle;
ompi_coll_libnbc_request_t **coll_req = (ompi_coll_libnbc_request_t**) request;
ompi_coll_libnbc_module_t *libnbc_module = (ompi_coll_libnbc_module_t*) module;
res = NBC_Init_handle(comm, coll_req, libnbc_module);
handle = *coll_req;
if(res != NBC_OK) { printf("Error in NBC_Init_handle(%i)\n", res); return res; }
res = MPI_Comm_size(comm, &size);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_size() (%i)\n", res); return res; }
res = MPI_Comm_size(MPI_COMM_WORLD, &worldsize);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_size() (%i)\n", res); return res; }
res = MPI_Comm_rank(comm, &rank);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; }
res = MPI_Type_extent(stype, &sndext);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; }
res = MPI_Type_extent(rtype, &rcvext);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; }
char inplace;
NBC_Schedule *schedule;
#ifdef NBC_CACHE_SCHEDULE
NBC_Ineighbor_alltoall_args *args, *found, search;
#endif
NBC_IN_PLACE(sbuf, rbuf, inplace);
handle->tmpbuf=NULL;
#ifdef NBC_CACHE_SCHEDULE
/* search schedule in communicator specific tree */
search.sbuf=sbuf;
search.scount=scount;
search.stype=stype;
search.rbuf=rbuf;
search.rcount=rcount;
search.rtype=rtype;
found = (NBC_Ineighbor_alltoall_args*)hb_tree_search((hb_tree*)handle->comminfo->NBC_Dict[NBC_NEIGHBOR_ALLTOALL], &search);
if(found == NULL) {
#endif
schedule = (NBC_Schedule*)malloc(sizeof(NBC_Schedule));
res = NBC_Sched_create(schedule);
if(res != NBC_OK) { printf("Error in NBC_Sched_create, res = %i\n", res); return res; }
{
int indegree, outdegree, weighted, *srcs, *dsts, i;
res = NBC_Comm_neighbors_count(comm, &indegree, &outdegree, &weighted);
if(res != NBC_OK) return res;
srcs = indegree ? (int*)malloc(sizeof(int)*indegree) : NULL;
dsts = outdegree ? (int*)malloc(sizeof(int)*outdegree) : NULL;
res = NBC_Comm_neighbors(comm, indegree, srcs, MPI_UNWEIGHTED, outdegree, dsts, MPI_UNWEIGHTED);
if(res != NBC_OK) return res;
if(inplace) { /* we need an extra buffer to be deadlock-free */
handle->tmpbuf = malloc(indegree*rcvext*rcount);
for(i = 0; i < indegree; i++) {
if (MPI_PROC_NULL != srcs[i]) {
res = NBC_Sched_recv((char*)0+i*rcount*rcvext, true, rcount, rtype, srcs[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_recv() (%i)\n", res); return res; }
}
}
for(i = 0; i < outdegree; i++) {
if (MPI_PROC_NULL != dsts[i]) {
res = NBC_Sched_send((char*)sbuf+i*scount*sndext, false, scount, stype, dsts[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_send() (%i)\n", res); return res; }
}
}
/* unpack from buffer */
for(i = 0; i < indegree; i++) {
res = NBC_Sched_barrier(schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_barrier() (%i)\n", res); return res; }
res = NBC_Sched_copy((char*)0+i*rcount*rcvext, true, rcount, rtype, (char*)rbuf+i*rcount*rcvext, false, rcount, rtype, schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_copy() (%i)\n", res); return res; }
}
} else { /* non INPLACE case */
/* simply loop over neighbors and post send/recv operations */
for(i = 0; i < indegree; i++) {
if (MPI_PROC_NULL != srcs[i]) {
res = NBC_Sched_recv((char*)rbuf+i*rcount*rcvext, false, rcount, rtype, srcs[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_recv() (%i)\n", res); return res; }
}
}
for(i = 0; i < outdegree; i++) {
if (MPI_PROC_NULL != dsts[i]) {
res = NBC_Sched_send((char*)sbuf+i*scount*sndext, false, scount, stype, dsts[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_send() (%i)\n", res); return res; }
}
}
}
}
res = NBC_Sched_commit(schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_commit() (%i)\n", res); return res; }
#ifdef NBC_CACHE_SCHEDULE
/* save schedule to tree */
args = (NBC_Ineighbor_alltoall_args*)malloc(sizeof(NBC_Ineighbor_alltoall_args));
args->sbuf=sbuf;
args->scount=scount;
args->stype=stype;
args->rbuf=rbuf;
args->rcount=rcount;
args->rtype=rtype;
args->schedule=schedule;
res = hb_tree_insert ((hb_tree*)handle->comminfo->NBC_Dict[NBC_NEIGHBOR_ALLTOALL], args, args, 0);
if(res != 0) printf("error in dict_insert() (%i)\n", res);
/* increase number of elements for A2A */
if(++handle->comminfo->NBC_Dict_size[NBC_NEIGHBOR_ALLTOALL] > NBC_SCHED_DICT_UPPER) {
NBC_SchedCache_dictwipe((hb_tree*)handle->comminfo->NBC_Dict[NBC_NEIGHBOR_ALLTOALL], &handle->comminfo->NBC_Dict_size[NBC_NEIGHBOR_ALLTOALL]);
}
} else {
/* found schedule */
schedule=found->schedule;
}
#endif
res = NBC_Start(handle, schedule);
if (NBC_OK != res) { printf("Error in NBC_Start() (%i)\n", res); return res; }
return NBC_OK;
}

Просмотреть файл

@ -0,0 +1,173 @@
/*
* Copyright (c) 2006 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2006 The Technical University of Chemnitz. All
* rights reserved.
*
* Author(s): Torsten Hoefler <htor@cs.indiana.edu>
*
*/
#include "nbc_internal.h"
/* cannot cache schedules because one cannot check locally if the pattern is the same!! */
#undef NBC_CACHE_SCHEDULE
#ifdef NBC_CACHE_SCHEDULE
/* tree comparison function for schedule cache */
int NBC_Ineighbor_alltoallv_args_compare(NBC_Ineighbor_alltoallv_args *a, NBC_Ineighbor_alltoallv_args *b, void *param) {
if( (a->sbuf == b->sbuf) &&
(a->scount == b->scount) &&
(a->stype == b->stype) &&
(a->rbuf == b->rbuf) &&
(a->rcount == b->rcount) &&
(a->rtype == b->rtype) ) {
return 0;
}
if( a->sbuf < b->sbuf ) {
return -1;
}
return +1;
}
#endif
int ompi_coll_libnbc_ineighbor_alltoallv(void *sbuf, int *scounts, int *sdispls, MPI_Datatype stype,
void *rbuf, int *rcounts, int *rdispls, MPI_Datatype rtype,
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module) {
int rank, size, res, worldsize;
MPI_Aint sndext, rcvext;
NBC_Handle *handle;
ompi_coll_libnbc_request_t **coll_req = (ompi_coll_libnbc_request_t**) request;
ompi_coll_libnbc_module_t *libnbc_module = (ompi_coll_libnbc_module_t*) module;
res = NBC_Init_handle(comm, coll_req, libnbc_module);
handle = *coll_req;
if(res != NBC_OK) { printf("Error in NBC_Init_handle(%i)\n", res); return res; }
res = MPI_Comm_size(comm, &size);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_size() (%i)\n", res); return res; }
res = MPI_Comm_size(MPI_COMM_WORLD, &worldsize);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_size() (%i)\n", res); return res; }
res = MPI_Comm_rank(comm, &rank);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; }
res = MPI_Type_extent(stype, &sndext);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; }
res = MPI_Type_extent(rtype, &rcvext);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; }
char inplace;
NBC_Schedule *schedule;
#ifdef NBC_CACHE_SCHEDULE
NBC_Ineighbor_alltoallv_args *args, *found, search;
#endif
NBC_IN_PLACE(sbuf, rbuf, inplace);
handle->tmpbuf=NULL;
#ifdef NBC_CACHE_SCHEDULE
/* search schedule in communicator specific tree */
search.sbuf=sbuf;
search.scount=scount;
search.stype=stype;
search.rbuf=rbuf;
search.rcount=rcount;
search.rtype=rtype;
found = (NBC_Ineighbor_alltoallv_args*)hb_tree_search((hb_tree*)handle->comminfo->NBC_Dict[NBC_NEIGHBOR_ALLTOALLV], &search);
if(found == NULL) {
#endif
schedule = (NBC_Schedule*)malloc(sizeof(NBC_Schedule));
res = NBC_Sched_create(schedule);
if(res != NBC_OK) { printf("Error in NBC_Sched_create, res = %i\n", res); return res; }
{
int indegree, outdegree, weighted, *srcs, *dsts, i;
res = NBC_Comm_neighbors_count(comm, &indegree, &outdegree, &weighted);
if(res != NBC_OK) return res;
srcs = (int*)malloc(sizeof(int)*indegree);
dsts = (int*)malloc(sizeof(int)*outdegree);
res = NBC_Comm_neighbors(comm, indegree, srcs, MPI_UNWEIGHTED, outdegree, dsts, MPI_UNWEIGHTED);
if(res != NBC_OK) return res;
if(inplace) { /* we need an extra buffer to be deadlock-free */
int sumrcounts=0;
int offset=0;
for(i=0; i<indegree; ++i) sumrcounts += rcounts[i];
handle->tmpbuf = malloc(rcvext*sumrcounts);
for(i = 0; i < indegree; i++) {
if(srcs[i] != MPI_PROC_NULL) {
res = NBC_Sched_recv((char*)0+offset, true, rcounts[i], rtype, srcs[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_recv() (%i)\n", res); return res; }
}
offset += rcounts[i]*rcvext;
}
for(i = 0; i < outdegree; i++) {
if(dsts[i] != MPI_PROC_NULL) {
res = NBC_Sched_send((char*)sbuf+sdispls[i]*sndext, false, scounts[i], stype, dsts[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_send() (%i)\n", res); return res; }
}
}
/* unpack from buffer */
offset=0;
for(i = 0; i < indegree; i++) {
if(srcs[i] != MPI_PROC_NULL) {
res = NBC_Sched_barrier(schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_barrier() (%i)\n", res); return res; }
res = NBC_Sched_copy((char*)0+offset, true, rcounts[i], rtype, (char*)rbuf+rdispls[i]*rcvext, false, rcounts[i], rtype, schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_copy() (%i)\n", res); return res; }
}
offset += rcounts[i]*rcvext;
}
} else { /* non INPLACE case */
/* simply loop over neighbors and post send/recv operations */
for(i = 0; i < indegree; i++) {
if(srcs[i] != MPI_PROC_NULL) {
res = NBC_Sched_recv((char*)rbuf+rdispls[i]*rcvext, false, rcounts[i], rtype, srcs[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_recv() (%i)\n", res); return res; }
}
}
for(i = 0; i < outdegree; i++) {
if(dsts[i] != MPI_PROC_NULL) {
res = NBC_Sched_send((char*)sbuf+sdispls[i]*sndext, false, scounts[i], stype, dsts[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_send() (%i)\n", res); return res; }
}
}
}
}
res = NBC_Sched_commit(schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_commit() (%i)\n", res); return res; }
#ifdef NBC_CACHE_SCHEDULE
/* save schedule to tree */
args = (NBC_Ineighbor_alltoallv_args*)malloc(sizeof(NBC_Ineighbor_alltoallv_args));
args->sbuf=sbuf;
args->scount=scount;
args->stype=stype;
args->rbuf=rbuf;
args->rcount=rcount;
args->rtype=rtype;
args->schedule=schedule;
res = hb_tree_insert ((hb_tree*)handle->comminfo->NBC_Dict[NBC_NEIGHBOR_ALLTOALLV], args, args, 0);
if(res != 0) printf("error in dict_insert() (%i)\n", res);
/* increase number of elements for A2A */
if(++handle->comminfo->NBC_Dict_size[NBC_NEIGHBOR_ALLTOALLV] > NBC_SCHED_DICT_UPPER) {
NBC_SchedCache_dictwipe((hb_tree*)handle->comminfo->NBC_Dict[NBC_NEIGHBOR_ALLTOALLV], &handle->comminfo->NBC_Dict_size[NBC_NEIGHBOR_ALLTOALLV]);
}
} else {
/* found schedule */
schedule=found->schedule;
}
#endif
res = NBC_Start(handle, schedule);
if (NBC_OK != res) { printf("Error in NBC_Start() (%i)\n", res); return res; }
return NBC_OK;
}

Просмотреть файл

@ -0,0 +1,171 @@
/*
* Copyright (c) 2006 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2006 The Technical University of Chemnitz. All
* rights reserved.
*
* Author(s): Torsten Hoefler <htor@cs.indiana.edu>
*
*/
#include "nbc_internal.h"
/* cannot cache schedules because one cannot check locally if the pattern is the same!! */
#undef NBC_CACHE_SCHEDULE
#ifdef NBC_CACHE_SCHEDULE
/* tree comparison function for schedule cache */
int NBC_Ineighbor_alltoallw_args_compare(NBC_Ineighbor_alltoallw_args *a, NBC_Ineighbor_alltoallw_args *b, void *param) {
if( (a->sbuf == b->sbuf) &&
(a->scount == b->scount) &&
(a->stype == b->stype) &&
(a->rbuf == b->rbuf) &&
(a->rcount == b->rcount) &&
(a->rtype == b->rtype) ) {
return 0;
}
if( a->sbuf < b->sbuf ) {
return -1;
}
return +1;
}
#endif
int ompi_coll_libnbc_ineighbor_alltoallw(void *sbuf, int *scounts, MPI_Aint *sdisps, MPI_Datatype *stypes,
void *rbuf, int *rcounts, MPI_Aint *rdisps, MPI_Datatype *rtypes,
struct ompi_communicator_t *comm, ompi_request_t ** request,
struct mca_coll_base_module_2_0_0_t *module) {
int rank, size, res, worldsize;
MPI_Aint *sndexts, *rcvexts;
NBC_Handle *handle;
ompi_coll_libnbc_request_t **coll_req = (ompi_coll_libnbc_request_t**) request;
ompi_coll_libnbc_module_t *libnbc_module = (ompi_coll_libnbc_module_t*) module;
res = NBC_Init_handle(comm, coll_req, libnbc_module);
handle = *coll_req;
if(res != NBC_OK) { printf("Error in NBC_Init_handle(%i)\n", res); return res; }
res = MPI_Comm_size(comm, &size);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_size() (%i)\n", res); return res; }
res = MPI_Comm_size(MPI_COMM_WORLD, &worldsize);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_size() (%i)\n", res); return res; }
res = MPI_Comm_rank(comm, &rank);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; }
char inplace;
NBC_Schedule *schedule;
#ifdef NBC_CACHE_SCHEDULE
NBC_Ineighbor_alltoallw_args *args, *found, search;
#endif
NBC_IN_PLACE(sbuf, rbuf, inplace);
handle->tmpbuf=NULL;
#ifdef NBC_CACHE_SCHEDULE
/* search schedule in communicator specific tree */
search.sbuf=sbuf;
search.scount=scount;
search.stype=stype;
search.rbuf=rbuf;
search.rcount=rcount;
search.rtype=rtype;
found = (NBC_Ineighbor_alltoallw_args*)hb_tree_search((hb_tree*)handle->comminfo->NBC_Dict[NBC_NEIGHBOR_ALLTOALLW], &search);
if(found == NULL) {
#endif
schedule = (NBC_Schedule*)malloc(sizeof(NBC_Schedule));
res = NBC_Sched_create(schedule);
if(res != NBC_OK) { printf("Error in NBC_Sched_create, res = %i\n", res); return res; }
{
int indegree, outdegree, weighted, *srcs, *dsts, i;
res = NBC_Comm_neighbors_count(comm, &indegree, &outdegree, &weighted);
if(res != NBC_OK) return res;
srcs = (int*)malloc(sizeof(int)*indegree);
dsts = (int*)malloc(sizeof(int)*outdegree);
sndexts = (MPI_Aint*)malloc(sizeof(MPI_Aint)*outdegree);
for(i=0; i<outdegree; ++i) {
res = MPI_Type_extent(stypes[i], &sndexts[i]);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; }
}
rcvexts = (MPI_Aint*)malloc(sizeof(MPI_Aint)*indegree);
for(i=0; i<indegree; ++i) {
res = MPI_Type_extent(rtypes[i], &rcvexts[i]);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; }
}
res = NBC_Comm_neighbors(comm, indegree, srcs, MPI_UNWEIGHTED, outdegree, dsts, MPI_UNWEIGHTED);
if(res != NBC_OK) return res;
if(inplace) { /* we need an extra buffer to be deadlock-free */
int sumrbytes=0;
for(i=0; i<indegree; ++i) sumrbytes += rcounts[i]*rcvexts[i];
handle->tmpbuf = malloc(sumrbytes);
for(i = 0; i < indegree; i++) {
if(srcs[i] != MPI_PROC_NULL) {
res = NBC_Sched_recv((char*)0+rdisps[i], true, rcounts[i], rtypes[i], srcs[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_recv() (%i)\n", res); return res; }
}
}
for(i = 0; i < outdegree; i++) {
if(dsts[i] != MPI_PROC_NULL) {
res = NBC_Sched_send((char*)sbuf+sdisps[i], false, scounts[i], stypes[i], dsts[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_send() (%i)\n", res); return res; }
}
}
/* unpack from buffer */
for(i = 0; i < indegree; i++) {
res = NBC_Sched_barrier(schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_barrier() (%i)\n", res); return res; }
res = NBC_Sched_copy((char*)0+rdisps[i], true, rcounts[i], rtypes[i], (char*)rbuf+rdisps[i], false, rcounts[i], rtypes[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_copy() (%i)\n", res); return res; }
}
} else { /* non INPLACE case */
/* simply loop over neighbors and post send/recv operations */
for(i = 0; i < indegree; i++) {
if(srcs[i] != MPI_PROC_NULL) {
res = NBC_Sched_recv((char*)rbuf+rdisps[i], false, rcounts[i], rtypes[i], srcs[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_recv() (%i)\n", res); return res; }
}
}
for(i = 0; i < outdegree; i++) {
if(dsts[i] != MPI_PROC_NULL) {
res = NBC_Sched_send((char*)sbuf+sdisps[i], false, scounts[i], stypes[i], dsts[i], schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_send() (%i)\n", res); return res; }
}
}
}
}
res = NBC_Sched_commit(schedule);
if (NBC_OK != res) { printf("Error in NBC_Sched_commit() (%i)\n", res); return res; }
#ifdef NBC_CACHE_SCHEDULE
/* save schedule to tree */
args = (NBC_Ineighbor_alltoallw_args*)malloc(sizeof(NBC_Ineighbor_alltoallw_args));
args->sbuf=sbuf;
args->scount=scount;
args->stype=stype;
args->rbuf=rbuf;
args->rcount=rcount;
args->rtype=rtype;
args->schedule=schedule;
res = hb_tree_insert ((hb_tree*)handle->comminfo->NBC_Dict[NBC_NEIGHBOR_ALLTOALLW], args, args, 0);
if(res != 0) printf("error in dict_insert() (%i)\n", res);
/* increase number of elements for A2A */
if(++handle->comminfo->NBC_Dict_size[NBC_NEIGHBOR_ALLTOALLW] > NBC_SCHED_DICT_UPPER) {
NBC_SchedCache_dictwipe((hb_tree*)handle->comminfo->NBC_Dict[NBC_NEIGHBOR_ALLTOALLW], &handle->comminfo->NBC_Dict_size[NBC_NEIGHBOR_ALLTOALLW]);
}
} else {
/* found schedule */
schedule=found->schedule;
}
#endif
res = NBC_Start(handle, schedule);
if (NBC_OK != res) { printf("Error in NBC_Start() (%i)\n", res); return res; }
return NBC_OK;
}

Просмотреть файл

@ -557,6 +557,9 @@ static inline void NBC_SchedCache_dictwipe(hb_tree *dict, int *size) {
} \
}
int NBC_Comm_neighbors_count(MPI_Comm comm, int *indegree, int *outdegree, int *weighted);
int NBC_Comm_neighbors(MPI_Comm comm, int maxindegree, int sources[], int sourceweights[], int maxoutdegree, int destinations[], int destweights[]);
#ifdef __cplusplus
}
#endif

106
ompi/mca/coll/libnbc/nbc_neighbor_helpers.c Обычный файл
Просмотреть файл

@ -0,0 +1,106 @@
/*
* Copyright (c) 2006 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2006 The Technical University of Chemnitz. All
* rights reserved.
*
* Author(s): Torsten Hoefler <htor@cs.indiana.edu>
*
*/
#include "nbc_internal.h"
int NBC_Comm_neighbors_count(MPI_Comm comm, int *indegree, int *outdegree, int *weighted) {
int topo, res;
res = MPI_Topo_test(comm, &topo);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Topo_test() (%i)\n", res); return res; }
switch(topo) {
case MPI_CART: /* cartesian */
{
int ndims;
res = MPI_Cartdim_get(comm, &ndims) ;
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Cartdim_get() (%i)\n", res); return res; }
/* outdegree is always 2*ndims because we need to iterate over empty buffers for MPI_PROC_NULL */
*outdegree = *indegree = 2*ndims;
*weighted = 0;
}
break;
case MPI_GRAPH: /* graph */
{
int rank, nneighbors;
MPI_Comm_rank(comm, &rank);
res = MPI_Graph_neighbors_count(comm, rank, &nneighbors);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Graph_neighbors_count() (%i)\n", res); return res; }
*outdegree = *indegree = nneighbors;
*weighted = 0;
}
break;
case MPI_DIST_GRAPH: /* graph */
{
res = MPI_Dist_graph_neighbors_count(comm, indegree, outdegree, weighted);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Dist_graph_neighbors_count() (%i)\n", res); return res; }
}
break;
case MPI_UNDEFINED:
return NBC_INVALID_TOPOLOGY_COMM;
break;
default:
return NBC_INVALID_PARAM;
break;
}
return NBC_OK;
}
int NBC_Comm_neighbors(MPI_Comm comm, int maxindegree, int sources[], int sourceweights[], int maxoutdegree, int destinations[], int destweights[]) {
int topo, res;
int index = 0;
int indeg, outdeg, wgtd;
res = NBC_Comm_neighbors_count(comm, &indeg, &outdeg, &wgtd);
if(indeg > maxindegree && outdeg > maxoutdegree) return NBC_INVALID_PARAM; /* we want to return *all* neighbors */
res = MPI_Topo_test(comm, &topo);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Topo_test() (%i)\n", res); return res; }
switch(topo) {
case MPI_CART: /* cartesian */
{
int ndims, i, rpeer, speer;
res = MPI_Cartdim_get(comm, &ndims);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Cartdim_get() (%i)\n", res); return res; }
for(i = 0; i<ndims; i++) {
res = MPI_Cart_shift(comm, i, 1, &rpeer, &speer);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Cart_shift() (%i)\n", res); return res; }
sources[index] = destinations[index] = rpeer; index++;
sources[index] = destinations[index] = speer; index++;
}
}
break;
case MPI_GRAPH: /* graph */
{
int rank;
MPI_Comm_rank(comm, &rank);
res = MPI_Graph_neighbors(comm, rank, maxindegree, sources);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Graph_neighbors_count() (%i)\n", res); return res; }
for(int i=0; i<maxindegree; i++) destinations[i] = sources[i];
}
break;
case MPI_DIST_GRAPH: /* dist graph */
{
res = MPI_Dist_graph_neighbors(comm, maxindegree, sources, sourceweights, maxoutdegree, destinations, destweights);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Graph_neighbors_count() (%i)\n", res); return res; }
}
break;
case MPI_UNDEFINED:
return NBC_INVALID_TOPOLOGY_COMM;
break;
default:
return NBC_INVALID_PARAM;
break;
}
return NBC_OK;
}

Просмотреть файл

@ -214,6 +214,16 @@ libmpi_c_mpi_la_SOURCES = \
message_c2f.c \
mprobe.c \
mrecv.c \
neighbor_allgather.c \
ineighbor_allgather.c \
neighbor_allgatherv.c \
ineighbor_allgatherv.c \
neighbor_alltoall.c \
ineighbor_alltoall.c \
neighbor_alltoallv.c \
ineighbor_alltoallv.c \
neighbor_alltoallw.c \
ineighbor_alltoallw.c \
op_c2f.c \
op_commutative.c \
op_create.c \

104
ompi/mpi/c/ineighbor_allgather.c Обычный файл
Просмотреть файл

@ -0,0 +1,104 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012 Oak Rigde National Laboratory. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Ineighbor_allgather = PMPI_Ineighbor_allgather
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Ineighbor_allgather";
int MPI_Ineighbor_allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm, MPI_Request *request)
{
int err;
MEMCHECKER(
int rank;
ptrdiff_t ext;
rank = ompi_comm_rank(comm);
ompi_datatype_type_extent(recvtype, &ext);
memchecker_datatype(recvtype);
memchecker_comm(comm);
/* check whether the actual send buffer is defined. */
if (MPI_IN_PLACE == sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(recvbuf)+rank*ext,
recvcount, recvtype);
} else {
memchecker_datatype(sendtype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
/* check whether the receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm) || !(OMPI_COMM_IS_CART(comm) || OMPI_COMM_IS_GRAPH(comm) ||
OMPI_COMM_IS_DIST_GRAPH(comm))) {
OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
err = MPI_ERR_TYPE;
} else if (recvcount < 0) {
err = MPI_ERR_COUNT;
} else if (MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} else if (MPI_IN_PLACE != sendbuf) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_ineighbor_allgather(sendbuf, sendcount, sendtype, recvbuf,
recvcount, recvtype, comm, request,
comm->c_coll.coll_ineighbor_allgather_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

128
ompi/mpi/c/ineighbor_allgatherv.c Обычный файл
Просмотреть файл

@ -0,0 +1,128 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2010 University of Houston. All rights reserved.
* Copyright (c) 2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Ineighbor_allgatherv = PMPI_Ineighbor_allgatherv
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Ineighbor_allgatherv";
int MPI_Ineighbor_allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcounts[], int displs[],
MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request)
{
int i, size, err;
MEMCHECKER(
int rank;
ptrdiff_t ext;
rank = ompi_comm_rank(comm);
size = ompi_comm_size(comm);
ompi_datatype_type_extent(recvtype, &ext);
memchecker_datatype(recvtype);
memchecker_comm (comm);
/* check whether the receive buffer is addressable. */
for (i = 0; i < size; i++) {
memchecker_call(&opal_memchecker_base_isaddressable,
(char *)(recvbuf)+displs[i]*ext,
recvcounts[i], recvtype);
}
/* check whether the actual send buffer is defined. */
if (MPI_IN_PLACE == sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(recvbuf)+displs[rank]*ext,
recvcounts[rank], recvtype);
} else {
memchecker_datatype(sendtype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm) || !(OMPI_COMM_IS_CART(comm) || OMPI_COMM_IS_GRAPH(comm) ||
OMPI_COMM_IS_DIST_GRAPH(comm))) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if (MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
if (MPI_IN_PLACE != sendbuf) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
/* We always define the remote group to be the same as the local
group in the case of an intracommunicator, so it's safe to
get the size of the remote group here for both intra- and
intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
}
if (NULL == displs) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_BUFFER, FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_ineighbor_allgatherv(sendbuf, sendcount, sendtype,
recvbuf, recvcounts, displs,
recvtype, comm, request,
comm->c_coll.coll_ineighbor_allgatherv_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

90
ompi/mpi/c/ineighbor_alltoall.c Обычный файл
Просмотреть файл

@ -0,0 +1,90 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Ineighbor_alltoall = PMPI_Ineighbor_alltoall
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Ineighbor_alltoall";
int MPI_Ineighbor_alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm, MPI_Request *request)
{
int err;
MEMCHECKER(
memchecker_datatype(sendtype);
memchecker_datatype(recvtype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
memchecker_comm(comm);
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm) || !(OMPI_COMM_IS_CART(comm) || OMPI_COMM_IS_GRAPH(comm) ||
OMPI_COMM_IS_DIST_GRAPH(comm))) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
err = MPI_ERR_TYPE;
} else if (recvcount < 0) {
err = MPI_ERR_COUNT;
} else if (MPI_IN_PLACE == recvbuf) {
err = MPI_ERR_ARG;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_ineighbor_alltoall(sendbuf, sendcount, sendtype,
recvbuf, recvcount, recvtype, comm,
request, comm->c_coll.coll_ineighbor_alltoall_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

120
ompi/mpi/c/ineighbor_alltoallv.c Обычный файл
Просмотреть файл

@ -0,0 +1,120 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2012 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Ineighbor_alltoallv = PMPI_Ineighbor_alltoallv
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Ineighbor_alltoallv";
int MPI_Ineighbor_alltoallv(void *sendbuf, int sendcounts[], int sdispls[],
MPI_Datatype sendtype, void *recvbuf,
int recvcounts[], int rdispls[], MPI_Datatype recvtype,
MPI_Comm comm, MPI_Request *request)
{
int i, size, err;
MEMCHECKER(
ptrdiff_t recv_ext;
ptrdiff_t send_ext;
size = ompi_comm_remote_size(comm);
ompi_datatype_type_extent(recvtype, &recv_ext);
ompi_datatype_type_extent(sendtype, &send_ext);
memchecker_datatype(sendtype);
memchecker_datatype(recvtype);
memchecker_comm(comm);
for ( i = 0; i < size; i++ ) {
/* check if send chunks are defined. */
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(sendbuf)+sdispls[i]*send_ext,
sendcounts[i], sendtype);
/* check if receive chunks are addressable. */
memchecker_call(&opal_memchecker_base_isaddressable,
(char *)(recvbuf)+rdispls[i]*recv_ext,
recvcounts[i], recvtype);
}
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm) || !(OMPI_COMM_IS_CART(comm) || OMPI_COMM_IS_GRAPH(comm) ||
OMPI_COMM_IS_DIST_GRAPH(comm))) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
if ((NULL == sendcounts) || (NULL == sdispls) ||
(NULL == recvcounts) || (NULL == rdispls) ||
MPI_IN_PLACE == sendbuf || MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
/* We always define the remote group to be the same as the local
group in the case of an intracommunicator, so it's safe to
get the size of the remote group here for both intra- and
intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) {
err = MPI_ERR_COUNT;
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
err = MPI_ERR_TYPE;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_ineighbor_alltoallv(sendbuf, sendcounts, sdispls, sendtype, recvbuf,
recvcounts, rdispls, recvtype, comm, request,
comm->c_coll.coll_ineighbor_alltoallv_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

119
ompi/mpi/c/ineighbor_alltoallw.c Обычный файл
Просмотреть файл

@ -0,0 +1,119 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2012 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Ineighbor_alltoallw = PMPI_Ineighbor_alltoallw
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Ineighbor_alltoallw";
int MPI_Ineighbor_alltoallw(void *sendbuf, int sendcounts[], MPI_Aint sdispls[],
MPI_Datatype *sendtypes, void *recvbuf, int recvcounts[],
MPI_Aint rdispls[], MPI_Datatype *recvtypes, MPI_Comm comm,
MPI_Request *request)
{
int i, size, err;
MEMCHECKER(
ptrdiff_t recv_ext;
ptrdiff_t send_ext;
size = ompi_comm_remote_size(comm);
memchecker_comm(comm);
for ( i = 0; i < size; i++ ) {
memchecker_datatype(sendtypes[i]);
memchecker_datatype(recvtypes[i]);
ompi_datatype_type_extent(sendtypes[i], &send_ext);
ompi_datatype_type_extent(recvtypes[i], &recv_ext);
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(sendbuf)+sdispls[i]*send_ext,
sendcounts[i], sendtypes[i]);
memchecker_call(&opal_memchecker_base_isaddressable,
(char *)(recvbuf)+sdispls[i]*recv_ext,
recvcounts[i], recvtypes[i]);
}
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm) || !(OMPI_COMM_IS_CART(comm) || OMPI_COMM_IS_GRAPH(comm) ||
OMPI_COMM_IS_DIST_GRAPH(comm))) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
if ((NULL == sendcounts) || (NULL == sdispls) || (NULL == sendtypes) ||
(NULL == recvcounts) || (NULL == rdispls) || (NULL == recvtypes) ||
MPI_IN_PLACE == sendbuf || MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
/* We always define the remote group to be the same as the local
group in the case of an intracommunicator, so it's safe to
get the size of the remote group here for both intra- and
intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) {
err = MPI_ERR_COUNT;
} else if (MPI_DATATYPE_NULL == recvtypes[i] || NULL == recvtypes[i]) {
err = MPI_ERR_TYPE;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtypes[i], sendcounts[i]);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_ineighbor_alltoallw(sendbuf, sendcounts, sdispls, sendtypes, recvbuf,
recvcounts, rdispls, recvtypes, comm, request,
comm->c_coll.coll_ineighbor_alltoallw_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

125
ompi/mpi/c/neighbor_allgather.c Обычный файл
Просмотреть файл

@ -0,0 +1,125 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2010 University of Houston. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Neighbor_allgather = PMPI_Neighbor_allgather
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Neighbor_allgather";
int MPI_Neighbor_allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm)
{
int err;
MEMCHECKER(
int rank;
ptrdiff_t ext;
rank = ompi_comm_rank(comm);
ompi_datatype_type_extent(recvtype, &ext);
memchecker_datatype(recvtype);
memchecker_comm(comm);
/* check whether the actual send buffer is defined. */
if (MPI_IN_PLACE == sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(recvbuf)+rank*ext,
recvcount, recvtype);
} else {
memchecker_datatype(sendtype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
/* check whether the receive buffer is addressable. */
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm)) {
OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
err = MPI_ERR_TYPE;
} else if (recvcount < 0) {
err = MPI_ERR_COUNT;
} else if (MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} else if (MPI_IN_PLACE != sendbuf) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
/* Do we need to do anything? Everyone had to give the same send
signature, which means that everyone must have given a
sendcount > 0 if there's anything to send for the intra-communicator
case. If we're doing IN_PLACE, however, check recvcount,
not sendcount. */
if ( OMPI_COMM_IS_INTRA(comm) ) {
if ((MPI_IN_PLACE != sendbuf && 0 == sendcount) ||
(0 == recvcount)) {
return MPI_SUCCESS;
}
}
else if ( OMPI_COMM_IS_INTER(comm) ){
/* for inter comunicators, the communication pattern
need not be symmetric. Specifically, one group is
allows to have sendcount=0, while the other has
a valid sendcount. Thus, the only way not to do
anything is if both sendcount and recvcount are zero. */
if ( 0 == sendcount && 0 == recvcount ) {
return MPI_SUCCESS;
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_neighbor_allgather(sendbuf, sendcount, sendtype,
recvbuf, recvcount, recvtype, comm,
comm->c_coll.coll_neighbor_allgather_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

149
ompi/mpi/c/neighbor_allgatherv.c Обычный файл
Просмотреть файл

@ -0,0 +1,149 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2010 University of Houston. All rights reserved.
* Copyright (c) 2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Neighbor_allgatherv = PMPI_Neighbor_allgatherv
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Neighbor_allgatherv";
int MPI_Neighbor_allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcounts[],
int displs[], MPI_Datatype recvtype, MPI_Comm comm)
{
int i, size, err;
MEMCHECKER(
int rank;
ptrdiff_t ext;
rank = ompi_comm_rank(comm);
size = ompi_comm_size(comm);
ompi_datatype_type_extent(recvtype, &ext);
memchecker_datatype(recvtype);
memchecker_comm (comm);
/* check whether the receive buffer is addressable. */
for (i = 0; i < size; i++) {
memchecker_call(&opal_memchecker_base_isaddressable,
(char *)(recvbuf)+displs[i]*ext,
recvcounts[i], recvtype);
}
/* check whether the actual send buffer is defined. */
if (MPI_IN_PLACE == sendbuf) {
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(recvbuf)+displs[rank]*ext,
recvcounts[rank], recvtype);
} else {
memchecker_datatype(sendtype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
}
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm) || !(OMPI_COMM_IS_CART(comm) || OMPI_COMM_IS_GRAPH(comm) ||
OMPI_COMM_IS_DIST_GRAPH(comm))) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if (MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_TYPE, FUNC_NAME);
}
if (MPI_IN_PLACE != sendbuf) {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
/* We always define the remote group to be the same as the local
group in the case of an intracommunicator, so it's safe to
get the size of the remote group here for both intra- and
intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_COUNT, FUNC_NAME);
}
}
if (NULL == displs) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_BUFFER, FUNC_NAME);
}
}
/* Do we need to do anything? Everyone had to give the same
signature, which means that everyone must have given a
sum(recvounts) > 0 if there's anything to do. */
if ( OMPI_COMM_IS_INTRA( comm) ) {
for (i = 0; i < ompi_comm_size(comm); ++i) {
if (0 != recvcounts[i]) {
break;
}
}
if (i >= ompi_comm_size(comm)) {
return MPI_SUCCESS;
}
}
/* There is no rule that can be applied for inter-communicators, since
recvcount(s)=0 only indicates that the processes in the other group
do not send anything, sendcount=0 only indicates that I do not send
anything. However, other processes in my group might very well send
something */
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_neighbor_allgatherv(sendbuf, sendcount, sendtype,
recvbuf, recvcounts,
displs, recvtype, comm,
comm->c_coll.coll_neighbor_allgatherv_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

101
ompi/mpi/c/neighbor_alltoall.c Обычный файл
Просмотреть файл

@ -0,0 +1,101 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2012 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Neighbor_alltoall = PMPI_Neighbor_alltoall
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Neighbor_alltoall";
int MPI_Neighbor_alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm)
{
int err;
MEMCHECKER(
memchecker_datatype(sendtype);
memchecker_datatype(recvtype);
memchecker_call(&opal_memchecker_base_isdefined, sendbuf, sendcount, sendtype);
memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
memchecker_comm(comm);
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks on both
intracommunicators and intercommunicators */
if (MPI_IN_PLACE == sendbuf) {
sendcount = recvcount;
sendtype = recvtype;
}
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm) || !(OMPI_COMM_IS_CART(comm) || OMPI_COMM_IS_GRAPH(comm) ||
OMPI_COMM_IS_DIST_GRAPH(comm))) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
err = MPI_ERR_TYPE;
} else if (recvcount < 0) {
err = MPI_ERR_COUNT;
} else if (MPI_IN_PLACE == recvbuf) {
err = MPI_ERR_ARG;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcount);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
/* Do we need to do anything? */
if (0 == sendcount && 0 == recvcount) {
return MPI_SUCCESS;
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_neighbor_alltoall(sendbuf, sendcount, sendtype, recvbuf,
recvcount, recvtype, comm,
comm->c_coll.coll_neighbor_alltoall_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

126
ompi/mpi/c/neighbor_alltoallv.c Обычный файл
Просмотреть файл

@ -0,0 +1,126 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2012 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Neighbor_alltoallv = PMPI_Neighbor_alltoallv
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Neighbor_alltoallv";
int MPI_Neighbor_alltoallv(void *sendbuf, int sendcounts[], int sdispls[],
MPI_Datatype sendtype, void *recvbuf,
int recvcounts[], int rdispls[],
MPI_Datatype recvtype, MPI_Comm comm)
{
int i, size, err;
MEMCHECKER(
ptrdiff_t recv_ext;
ptrdiff_t send_ext;
size = ompi_comm_remote_size(comm);
ompi_datatype_type_extent(recvtype, &recv_ext);
ompi_datatype_type_extent(sendtype, &send_ext);
memchecker_datatype(sendtype);
memchecker_datatype(recvtype);
memchecker_comm(comm);
for ( i = 0; i < size; i++ ) {
/* check if send chunks are defined. */
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(sendbuf)+sdispls[i]*send_ext,
sendcounts[i], sendtype);
/* check if receive chunks are addressable. */
memchecker_call(&opal_memchecker_base_isaddressable,
(char *)(recvbuf)+rdispls[i]*recv_ext,
recvcounts[i], recvtype);
}
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm) || !(OMPI_COMM_IS_CART(comm) || OMPI_COMM_IS_GRAPH(comm) ||
OMPI_COMM_IS_DIST_GRAPH(comm))) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
if (MPI_IN_PLACE == sendbuf) {
sendcounts = recvcounts;
sdispls = rdispls;
sendtype = recvtype;
}
if ((NULL == sendcounts) || (NULL == sdispls) ||
(NULL == recvcounts) || (NULL == rdispls) ||
MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
/* We always define the remote group to be the same as the local
group in the case of an intracommunicator, so it's safe to
get the size of the remote group here for both intra- and
intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) {
err = MPI_ERR_COUNT;
} else if (MPI_DATATYPE_NULL == recvtype || NULL == recvtype) {
err = MPI_ERR_TYPE;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtype, sendcounts[i]);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_neighbor_alltoallv(sendbuf, sendcounts, sdispls, sendtype,
recvbuf, recvcounts, rdispls, recvtype,
comm, comm->c_coll.coll_neighbor_alltoallv_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

125
ompi/mpi/c/neighbor_alltoallw.c Обычный файл
Просмотреть файл

@ -0,0 +1,125 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2012 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "ompi/mpi/c/bindings.h"
#include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/memchecker.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Neighbor_alltoallw = PMPI_Neighbor_alltoallw
#endif
#if OMPI_PROFILING_DEFINES
#include "ompi/mpi/c/profile/defines.h"
#endif
static const char FUNC_NAME[] = "MPI_Neighbor_alltoallw";
int MPI_Neighbor_alltoallw(void *sendbuf, int sendcounts[], MPI_Aint sdispls[],
MPI_Datatype *sendtypes, void *recvbuf,
int recvcounts[], MPI_Aint rdispls[],
MPI_Datatype *recvtypes, MPI_Comm comm)
{
int i, size, err;
MEMCHECKER(
ptrdiff_t recv_ext;
ptrdiff_t send_ext;
size = ompi_comm_remote_size(comm);
memchecker_comm(comm);
for ( i = 0; i < size; i++ ) {
memchecker_datatype(sendtypes[i]);
memchecker_datatype(recvtypes[i]);
ompi_datatype_type_extent(sendtypes[i], &send_ext);
ompi_datatype_type_extent(recvtypes[i], &recv_ext);
memchecker_call(&opal_memchecker_base_isdefined,
(char *)(sendbuf)+sdispls[i]*send_ext,
sendcounts[i], sendtypes[i]);
memchecker_call(&opal_memchecker_base_isaddressable,
(char *)(recvbuf)+sdispls[i]*recv_ext,
recvcounts[i], recvtypes[i]);
}
);
if (MPI_PARAM_CHECK) {
/* Unrooted operation -- same checks for all ranks */
err = MPI_SUCCESS;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if (ompi_comm_invalid(comm) || !(OMPI_COMM_IS_CART(comm) || OMPI_COMM_IS_GRAPH(comm) ||
OMPI_COMM_IS_DIST_GRAPH(comm))) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
FUNC_NAME);
}
if (MPI_IN_PLACE == sendbuf) {
sendcounts = recvcounts;
sdispls = rdispls;
sendtypes = recvtypes;
}
if ((NULL == sendcounts) || (NULL == sdispls) || (NULL == sendtypes) ||
(NULL == recvcounts) || (NULL == rdispls) || (NULL == recvtypes) ||
MPI_IN_PLACE == recvbuf) {
return OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_ARG, FUNC_NAME);
}
/* We always define the remote group to be the same as the local
group in the case of an intracommunicator, so it's safe to
get the size of the remote group here for both intra- and
intercommunicators */
size = ompi_comm_remote_size(comm);
for (i = 0; i < size; ++i) {
if (recvcounts[i] < 0) {
err = MPI_ERR_COUNT;
} else if (MPI_DATATYPE_NULL == recvtypes[i] || NULL == recvtypes[i]) {
err = MPI_ERR_TYPE;
} else {
OMPI_CHECK_DATATYPE_FOR_SEND(err, sendtypes[i], sendcounts[i]);
}
OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
}
}
OPAL_CR_ENTER_LIBRARY();
/* Invoke the coll component to perform the back-end operation */
err = comm->c_coll.coll_neighbor_alltoallw(sendbuf, sendcounts, sdispls, sendtypes,
recvbuf, recvcounts, rdispls, recvtypes,
comm, comm->c_coll.coll_neighbor_alltoallw_module);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

Просмотреть файл

@ -196,6 +196,16 @@ nodist_libmpi_c_pmpi_la_SOURCES = \
pmessage_c2f.c \
pmprobe.c \
pmrecv.c \
pneighbor_allgather.c \
pineighbor_allgather.c \
pneighbor_allgatherv.c \
pineighbor_allgatherv.c \
pneighbor_alltoall.c \
pineighbor_alltoall.c \
pneighbor_alltoallv.c \
pineighbor_alltoallv.c \
pneighbor_alltoallw.c \
pineighbor_alltoallw.c \
pop_c2f.c \
pop_create.c \
pop_commutative.c \

Просмотреть файл

@ -12,6 +12,8 @@
* Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2011 Sandia National Laboratories. All rights reserved.
* Copyright (c) 2012 Oak Rigde National Laboratory. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -242,6 +244,16 @@
#define MPI_Mprobe PMPI_Mprobe
#define MPI_Mrecv PMPI_Mrecv
#define MPI_Message_cancel PMPI_Message_cancel
#define MPI_Neighbor_allgather PMPI_Neighbor_allgather
#define MPI_Ineighbor_allgather PMPI_Ineighbor_allgather
#define MPI_Neighbor_allgatherv PMPI_Neighbor_allgatherv
#define MPI_Ineighbor_allgatherv PMPI_Ineighbor_allgatherv
#define MPI_Neighbor_alltoall PMPI_Neighbor_alltoall
#define MPI_Ineighbor_alltoall PMPI_Ineighbor_alltoall
#define MPI_Neighbor_alltoallv PMPI_Neighbor_alltoallv
#define MPI_Ineighbor_alltoallv PMPI_Ineighbor_alltoallv
#define MPI_Neighbor_alltoallw PMPI_Neighbor_alltoallw
#define MPI_Ineighbor_alltoallw PMPI_Ineighbor_alltoallw
#define MPI_Op_c2f PMPI_Op_c2f
#define MPI_Op_commutative PMPI_Op_commutative
#define MPI_Op_create PMPI_Op_create

Просмотреть файл

@ -4254,4 +4254,203 @@ subroutine MPI_Mrecv_f08(buf,count,datatype,message,status,ierror &
end subroutine MPI_Mrecv_f08
end interface MPI_Mrecv
interface MPI_Neighbor_allgather
subroutine MPI_Neighbor_allgather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
comm,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Neighbor_allgather_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Neighbor_allgather_f08
end interface MPI_Neighbor_allgather
interface MPI_Ineighbor_allgather
subroutine MPI_Ineighbor_allgather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Ineighbor_allgather_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Ineighbor_allgather_f08
end interface MPI_Ineighbor_allgather
interface MPI_Neighbor_allgatherv
subroutine MPI_Neighbor_allgatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,displs, &
recvtype,comm,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Neighbor_allgatherv_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount
INTEGER, INTENT(IN) :: recvcounts(*), displs(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Neighbor_allgatherv_f08
end interface MPI_Neighbor_allgatherv
interface MPI_Ineighbor_allgatherv
subroutine MPI_Ineighbor_allgatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,displs, &
recvtype,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Ineighbor_allgatherv_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount
INTEGER, INTENT(IN) :: recvcounts(*), displs(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Ineighbor_allgatherv_f08
end interface MPI_Ineighbor_allgatherv
interface MPI_Neighbor_alltoall
subroutine MPI_Neighbor_alltoall_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
comm,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Neighbor_alltoall_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Neighbor_alltoall_f08
end interface MPI_Neighbor_alltoall
interface MPI_Ineighbor_alltoall
subroutine MPI_Ineighbor_alltoall_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Ineighbor_alltoall_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Ineighbor_alltoall_f08
end interface MPI_Ineighbor_alltoall
interface MPI_Neighbor_alltoallv
subroutine MPI_Neighbor_alltoallv_f08(sendbuf,sendcounts,sdispls,sendtype,recvbuf,recvcounts, &
rdispls,recvtype,comm,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Neighbor_alltoallv_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Neighbor_alltoallv_f08
end interface MPI_Neighbor_alltoallv
interface MPI_Ineighbor_alltoallv
subroutine MPI_Ineighbor_alltoallv_f08(sendbuf,sendcounts,sdispls,sendtype,recvbuf,recvcounts, &
rdispls,recvtype,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Ineighbor_alltoallv_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(IN) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Ineighbor_alltoallv_f08
end interface MPI_Ineighbor_alltoallv
interface MPI_Neighbor_alltoallw
subroutine MPI_Neighbor_alltoallw_f08(sendbuf,sendcounts,sdispls,sendtypes,recvbuf,recvcounts, &
rdispls,recvtypes,comm,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Neighbor_alltoallw_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Aint
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), recvcounts(*)
INTEGER(MPI_ADDRESS_KIND), INTENT(IN) :: sdispls(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtypes(*), recvtypes(*)
TYPE(MPI_Comm), INTENT(IN) :: comm
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Neighbor_alltoallw_f08
end interface MPI_Neighbor_alltoallw
interface MPI_Ineighbor_alltoallw
subroutine MPI_Ineighbor_alltoallw_f08(sendbuf,sendcounts,sdispls,sendtypes,recvbuf,recvcounts, &
rdispls,recvtypes,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("MPI_Ineighbor_alltoallw_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request, MPI_ADDRESS_KIND
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), recvcounts(*)
INTEGER(MPI_ADDRESS_KIND), INTENT(IN) :: sdispls(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtypes(*), recvtypes(*)
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(IN) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine MPI_Ineighbor_alltoallw_f08
end interface MPI_Ineighbor_alltoallw
end module mpi_f08_interfaces

Просмотреть файл

@ -4254,4 +4254,203 @@ subroutine PMPI_Mrecv_f08(buf,count,datatype,message,status,ierror &
end subroutine PMPI_Mrecv_f08
end interface PMPI_Mrecv
interface PMPI_Neighbor_allgather
subroutine PMPI_Neighbor_allgather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
comm,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Neighbor_allgather_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Neighbor_allgather_f08
end interface PMPI_Neighbor_allgather
interface PMPI_Ineighbor_allgather
subroutine PMPI_Ineighbor_allgather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Ineighbor_allgather_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Ineighbor_allgather_f08
end interface PMPI_Ineighbor_allgather
interface PMPI_Neighbor_allgatherv
subroutine PMPI_Neighbor_allgatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,displs, &
recvtype,comm,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Neighbor_allgatherv_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount
INTEGER, INTENT(IN) :: recvcounts(*), displs(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Neighbor_allgatherv_f08
end interface PMPI_Neighbor_allgatherv
interface PMPI_Ineighbor_allgatherv
subroutine PMPI_Ineighbor_allgatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,displs, &
recvtype,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Ineighbor_allgatherv_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount
INTEGER, INTENT(IN) :: recvcounts(*), displs(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Ineighbor_allgatherv_f08
end interface PMPI_Ineighbor_allgatherv
interface PMPI_Neighbor_alltoall
subroutine PMPI_Neighbor_alltoall_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
comm,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Neighbor_alltoall_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Neighbor_alltoall_f08
end interface PMPI_Neighbor_alltoall
interface PMPI_Ineighbor_alltoall
subroutine PMPI_Ineighbor_alltoall_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype, &
comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Ineighbor_alltoall_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Ineighbor_alltoall_f08
end interface PMPI_Ineighbor_alltoall
interface PMPI_Neighbor_alltoallv
subroutine PMPI_Neighbor_alltoallv_f08(sendbuf,sendcounts,sdispls,sendtype,recvbuf,recvcounts, &
rdispls,recvtype,comm,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Neighbor_alltoallv_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Neighbor_alltoallv_f08
end interface PMPI_Neighbor_alltoallv
interface PMPI_Ineighbor_alltoallv
subroutine PMPI_Ineighbor_alltoallv_f08(sendbuf,sendcounts,sdispls,sendtype,recvbuf,recvcounts, &
rdispls,recvtype,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Ineighbor_alltoallv_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(IN) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Ineighbor_alltoallv_f08
end interface PMPI_Ineighbor_alltoallv
interface PMPI_Neighbor_alltoallw
subroutine PMPI_Neighbor_alltoallw_f08(sendbuf,sendcounts,sdispls,sendtypes,recvbuf,recvcounts, &
rdispls,recvtypes,comm,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Neighbor_alltoallw_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_ADDRESS_KIND
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), recvcounts(*)
INTEGER(MPI_ADDRESS_KIND), INTENT(IN) :: sdispls(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtypes(*), recvtypes(*)
TYPE(MPI_Comm), INTENT(IN) :: comm
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Neighbor_alltoallw_f08
end interface PMPI_Neighbor_alltoallw
interface PMPI_Ineighbor_alltoallw
subroutine PMPI_Ineighbor_alltoallw_f08(sendbuf,sendcounts,sdispls,sendtypes,recvbuf,recvcounts, &
rdispls,recvtypes,comm,request,ierror &
) OMPI_F08_INTERFACE_BIND_C("PMPI_Ineighbor_alltoallw_f08")
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request, MPI_ADDRESS_KIND
implicit none
!DEC$ ATTRIBUTES NO_ARG_CHECK :: sendbuf, recvbuf
!$PRAGMA IGNORE_TKR sendbuf, recvbuf
!DIR$ IGNORE_TKR sendbuf, recvbuf
!IBM* IGNORE_TKR sendbuf, recvbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf
OMPI_FORTRAN_IGNORE_TKR_TYPE :: recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), recvcounts(*)
INTEGER(MPI_ADDRESS_KIND), INTENT(IN) :: sdispls(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtypes(*), recvtypes(*)
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(IN) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
end subroutine PMPI_Ineighbor_alltoallw_f08
end interface PMPI_Ineighbor_alltoallw
end module pmpi_f08_interfaces

Просмотреть файл

@ -195,6 +195,11 @@ libmpi_mpifh_la_SOURCES += \
igatherv_f.c \
improbe_f.c \
imrecv_f.c \
ineighbor_allgather_f.c \
ineighbor_allgatherv_f.c \
ineighbor_alltoall_f.c \
ineighbor_alltoallv_f.c \
ineighbor_alltoallw_f.c \
info_create_f.c \
info_delete_f.c \
info_dup_f.c \
@ -226,6 +231,11 @@ libmpi_mpifh_la_SOURCES += \
lookup_name_f.c \
mprobe_f.c \
mrecv_f.c \
neighbor_allgather_f.c \
neighbor_allgatherv_f.c \
neighbor_alltoall_f.c \
neighbor_alltoallv_f.c \
neighbor_alltoallw_f.c \
op_commutative_f.c \
op_create_f.c \
open_port_f.c \

Просмотреть файл

@ -0,0 +1,98 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_INEIGHBOR_ALLGATHER = ompi_ineighbor_allgather_f
#pragma weak pmpi_ineighbor_allgather = ompi_ineighbor_allgather_f
#pragma weak pmpi_ineighbor_allgather_ = ompi_ineighbor_allgather_f
#pragma weak pmpi_ineighbor_allgather__ = ompi_ineighbor_allgather_f
#pragma weak PMPI_Ineighbor_allgather_f = ompi_ineighbor_allgather_f
#pragma weak PMPI_Ineighbor_allgather_f08 = ompi_ineighbor_allgather_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_INEIGHBOR_ALLGATHER,
pmpi_ineighbor_allgather,
pmpi_ineighbor_allgather_,
pmpi_ineighbor_allgather__,
pompi_ineighbor_allgather_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_INEIGHBOR_ALLGATHER = ompi_ineighbor_allgather_f
#pragma weak mpi_ineighbor_allgather = ompi_ineighbor_allgather_f
#pragma weak mpi_ineighbor_allgather_ = ompi_ineighbor_allgather_f
#pragma weak mpi_ineighbor_allgather__ = ompi_ineighbor_allgather_f
#pragma weak MPI_Ineighbor_allgather_f = ompi_ineighbor_allgather_f
#pragma weak MPI_Ineighbor_allgather_f08 = ompi_ineighbor_allgather_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_INEIGHBOR_ALLGATHER,
mpi_ineighbor_allgather,
mpi_ineighbor_allgather_,
mpi_ineighbor_allgather__,
ompi_ineighbor_allgather_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_ineighbor_allgather_f(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype,
char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype,
MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr)
{
int ierr_c;
MPI_Comm c_comm;
MPI_Request c_req;
MPI_Datatype c_sendtype, c_recvtype;
c_comm = MPI_Comm_f2c(*comm);
c_sendtype = MPI_Type_f2c(*sendtype);
c_recvtype = MPI_Type_f2c(*recvtype);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
ierr_c = MPI_Ineighbor_allgather(sendbuf,
OMPI_FINT_2_INT(*sendcount),
c_sendtype,
recvbuf,
OMPI_FINT_2_INT(*recvcount),
c_recvtype, c_comm, &c_req);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(ierr_c);
if (MPI_SUCCESS == ierr_c) *request = MPI_Request_c2f(c_req);
}

Просмотреть файл

@ -0,0 +1,108 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_INEIGHBOR_ALLGATHERV = ompi_ineighbor_allgatherv_f
#pragma weak pmpi_ineighbor_allgatherv = ompi_ineighbor_allgatherv_f
#pragma weak pmpi_ineighbor_allgatherv_ = ompi_ineighbor_allgatherv_f
#pragma weak pmpi_ineighbor_allgatherv__ = ompi_ineighbor_allgatherv_f
#pragma weak PMPI_Ineighbor_allgatherv_f = ompi_ineighbor_allgatherv_f
#pragma weak PMPI_Ineighbor_allgatherv_f08 = ompi_ineighbor_allgatherv_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_INEIGHBOR_ALLGATHERV,
pmpi_ineighbor_allgatherv,
pmpi_ineighbor_allgatherv_,
pmpi_ineighbor_allgatherv__,
pompi_ineighbor_allgatherv_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *displs, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_INEIGHBOR_ALLGATHERV = ompi_ineighbor_allgatherv_f
#pragma weak mpi_ineighbor_allgatherv = ompi_ineighbor_allgatherv_f
#pragma weak mpi_ineighbor_allgatherv_ = ompi_ineighbor_allgatherv_f
#pragma weak mpi_ineighbor_allgatherv__ = ompi_ineighbor_allgatherv_f
#pragma weak MPI_Ineighbor_allgatherv_f = ompi_ineighbor_allgatherv_f
#pragma weak MPI_Ineighbor_allgatherv_f08 = ompi_ineighbor_allgatherv_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_INEIGHBOR_ALLGATHERV,
mpi_ineighbor_allgatherv,
mpi_ineighbor_allgatherv_,
mpi_ineighbor_allgatherv__,
ompi_ineighbor_allgatherv_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *displs, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_ineighbor_allgatherv_f(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype,
char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *displs,
MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request,
MPI_Fint *ierr)
{
MPI_Comm c_comm;
MPI_Datatype c_sendtype, c_recvtype;
MPI_Request c_request;
int size, ierr_c;
OMPI_ARRAY_NAME_DECL(recvcounts);
OMPI_ARRAY_NAME_DECL(displs);
c_comm = MPI_Comm_f2c(*comm);
c_sendtype = MPI_Type_f2c(*sendtype);
c_recvtype = MPI_Type_f2c(*recvtype);
MPI_Comm_size(c_comm, &size);
OMPI_ARRAY_FINT_2_INT(recvcounts, size);
OMPI_ARRAY_FINT_2_INT(displs, size);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
ierr_c = MPI_Ineighbor_allgatherv(sendbuf,
OMPI_FINT_2_INT(*sendcount),
c_sendtype,
recvbuf,
OMPI_ARRAY_NAME_CONVERT(recvcounts),
OMPI_ARRAY_NAME_CONVERT(displs),
c_recvtype, c_comm, &c_request);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(ierr_c);
if (MPI_SUCCESS == ierr_c) *request = MPI_Request_c2f(c_request);
OMPI_ARRAY_FINT_2_INT_CLEANUP(recvcounts);
OMPI_ARRAY_FINT_2_INT_CLEANUP(displs);
}

Просмотреть файл

@ -0,0 +1,97 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_INEIGHBOR_ALLTOALL = ompi_ineighbor_alltoall_f
#pragma weak pmpi_ineighbor_alltoall = ompi_ineighbor_alltoall_f
#pragma weak pmpi_ineighbor_alltoall_ = ompi_ineighbor_alltoall_f
#pragma weak pmpi_ineighbor_alltoall__ = ompi_ineighbor_alltoall_f
#pragma weak PMPI_Ineighbor_alltoall_f = ompi_ineighbor_alltoall_f
#pragma weak PMPI_Ineighbor_alltoall_f08 = ompi_ineighbor_alltoall_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_INEIGHBOR_ALLTOALL,
pmpi_ineighbor_alltoall,
pmpi_ineighbor_alltoall_,
pmpi_ineighbor_alltoall__,
pompi_ineighbor_alltoall_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_INEIGHBOR_ALLTOALL = ompi_ineighbor_alltoall_f
#pragma weak mpi_ineighbor_alltoall = ompi_ineighbor_alltoall_f
#pragma weak mpi_ineighbor_alltoall_ = ompi_ineighbor_alltoall_f
#pragma weak mpi_ineighbor_alltoall__ = ompi_ineighbor_alltoall_f
#pragma weak MPI_Ineighbor_alltoall_f = ompi_ineighbor_alltoall_f
#pragma weak MPI_Ineighbor_alltoall_f08 = ompi_ineighbor_alltoall_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_INEIGHBOR_ALLTOALL,
mpi_ineighbor_alltoall,
mpi_ineighbor_alltoall_,
mpi_ineighbor_alltoall__,
ompi_ineighbor_alltoall_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_ineighbor_alltoall_f(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype,
char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype,
MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr)
{
int c_ierr;
MPI_Comm c_comm;
MPI_Request c_req;
MPI_Datatype c_sendtype, c_recvtype;
c_comm = MPI_Comm_f2c(*comm);
c_sendtype = MPI_Type_f2c(*sendtype);
c_recvtype = MPI_Type_f2c(*recvtype);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
c_ierr = MPI_Ineighbor_alltoall(sendbuf,
OMPI_FINT_2_INT(*sendcount),
c_sendtype,
recvbuf,
OMPI_FINT_2_INT(*recvcount),
c_recvtype, c_comm, &c_req);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
if (MPI_SUCCESS == c_ierr) *request = MPI_Request_c2f(c_req);
}

Просмотреть файл

@ -0,0 +1,114 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_INEIGHBOR_ALLTOALLV = ompi_ineighbor_alltoallv_f
#pragma weak pmpi_ineighbor_alltoallv = ompi_ineighbor_alltoallv_f
#pragma weak pmpi_ineighbor_alltoallv_ = ompi_ineighbor_alltoallv_f
#pragma weak pmpi_ineighbor_alltoallv__ = ompi_ineighbor_alltoallv_f
#pragma weak PMPI_Ineighbor_alltoallv_f = ompi_ineighbor_alltoallv_f
#pragma weak PMPI_Ineighbor_alltoallv_f08 = ompi_ineighbor_alltoallv_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_INEIGHBOR_ALLTOALLV,
pmpi_ineighbor_alltoallv,
pmpi_ineighbor_alltoallv_,
pmpi_ineighbor_alltoallv__,
pompi_ineighbor_alltoallv_f,
(char *sendbuf, MPI_Fint *sendcounts, MPI_Fint *sdispls, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *rdispls, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcounts, sdispls, sendtype, recvbuf, recvcounts, rdispls, recvtype, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_INEIGHBOR_ALLTOALLV = ompi_ineighbor_alltoallv_f
#pragma weak mpi_ineighbor_alltoallv = ompi_ineighbor_alltoallv_f
#pragma weak mpi_ineighbor_alltoallv_ = ompi_ineighbor_alltoallv_f
#pragma weak mpi_ineighbor_alltoallv__ = ompi_ineighbor_alltoallv_f
#pragma weak MPI_Ineighbor_alltoallv_f = ompi_ineighbor_alltoallv_f
#pragma weak MPI_Ineighbor_alltoallv_f08 = ompi_ineighbor_alltoallv_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_INEIGHBOR_ALLTOALLV,
mpi_ineighbor_alltoallv,
mpi_ineighbor_alltoallv_,
mpi_ineighbor_alltoallv__,
ompi_ineighbor_alltoallv_f,
(char *sendbuf, MPI_Fint *sendcounts, MPI_Fint *sdispls, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *rdispls, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcounts, sdispls, sendtype, recvbuf, recvcounts, rdispls, recvtype, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_ineighbor_alltoallv_f(char *sendbuf, MPI_Fint *sendcounts, MPI_Fint *sdispls,
MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts,
MPI_Fint *rdispls, MPI_Fint *recvtype,
MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr)
{
MPI_Comm c_comm;
MPI_Datatype c_sendtype, c_recvtype;
MPI_Request c_request;
int size, c_ierr;
OMPI_ARRAY_NAME_DECL(sendcounts);
OMPI_ARRAY_NAME_DECL(sdispls);
OMPI_ARRAY_NAME_DECL(recvcounts);
OMPI_ARRAY_NAME_DECL(rdispls);
c_comm = MPI_Comm_f2c(*comm);
c_sendtype = MPI_Type_f2c(*sendtype);
c_recvtype = MPI_Type_f2c(*recvtype);
MPI_Comm_size(c_comm, &size);
OMPI_ARRAY_FINT_2_INT(sendcounts, size);
OMPI_ARRAY_FINT_2_INT(sdispls, size);
OMPI_ARRAY_FINT_2_INT(recvcounts, size);
OMPI_ARRAY_FINT_2_INT(rdispls, size);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
c_ierr = MPI_Ineighbor_alltoallv(sendbuf,
OMPI_ARRAY_NAME_CONVERT(sendcounts),
OMPI_ARRAY_NAME_CONVERT(sdispls),
c_sendtype,
recvbuf,
OMPI_ARRAY_NAME_CONVERT(recvcounts),
OMPI_ARRAY_NAME_CONVERT(rdispls),
c_recvtype, c_comm, &c_request);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
if (MPI_SUCCESS == c_ierr) *request = MPI_Request_c2f(c_request);
OMPI_ARRAY_FINT_2_INT_CLEANUP(sendcounts);
OMPI_ARRAY_FINT_2_INT_CLEANUP(sdispls);
OMPI_ARRAY_FINT_2_INT_CLEANUP(recvcounts);
OMPI_ARRAY_FINT_2_INT_CLEANUP(rdispls);
}

Просмотреть файл

@ -0,0 +1,118 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_INEIGHBOR_ALLTOALLW = ompi_ineighbor_alltoallw_f
#pragma weak pmpi_ineighbor_alltoallw = ompi_ineighbor_alltoallw_f
#pragma weak pmpi_ineighbor_alltoallw_ = ompi_ineighbor_alltoallw_f
#pragma weak pmpi_ineighbor_alltoallw__ = ompi_ineighbor_alltoallw_f
#pragma weak PMPI_Ineighbor_alltoallw_f = ompi_ineighbor_alltoallw_f
#pragma weak PMPI_Ineighbor_alltoallw_f08 = ompi_ineighbor_alltoallw_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_INEIGHBOR_ALLTOALLW,
pmpi_ineighbor_alltoallw,
pmpi_ineighbor_alltoallw_,
pmpi_ineighbor_alltoallw__,
pompi_ineighbor_alltoallw_f,
(char *sendbuf, MPI_Fint *sendcounts, MPI_Aint *sdispls, MPI_Fint *sendtypes, char *recvbuf, MPI_Fint *recvcounts, MPI_Aint *rdispls, MPI_Fint *recvtypes, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, recvcounts, rdispls, recvtypes, comm, request, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_INEIGHBOR_ALLTOALLW = ompi_ineighbor_alltoallw_f
#pragma weak mpi_ineighbor_alltoallw = ompi_ineighbor_alltoallw_f
#pragma weak mpi_ineighbor_alltoallw_ = ompi_ineighbor_alltoallw_f
#pragma weak mpi_ineighbor_alltoallw__ = ompi_ineighbor_alltoallw_f
#pragma weak MPI_Ineighbor_alltoallw_f = ompi_ineighbor_alltoallw_f
#pragma weak MPI_Ineighbor_alltoallw_f08 = ompi_ineighbor_alltoallw_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_INEIGHBOR_ALLTOALLW,
mpi_ineighbor_alltoallw,
mpi_ineighbor_alltoallw_,
mpi_ineighbor_alltoallw__,
ompi_ineighbor_alltoallw_f,
(char *sendbuf, MPI_Fint *sendcounts, MPI_Aint *sdispls, MPI_Fint *sendtypes, char *recvbuf, MPI_Fint *recvcounts, MPI_Aint *rdispls, MPI_Fint *recvtypes, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, recvcounts, rdispls, recvtypes, comm, request, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_ineighbor_alltoallw_f(char *sendbuf, MPI_Fint *sendcounts,
MPI_Aint *sdispls, MPI_Fint *sendtypes,
char *recvbuf, MPI_Fint *recvcounts,
MPI_Aint *rdispls, MPI_Fint *recvtypes,
MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr)
{
MPI_Comm c_comm;
MPI_Datatype *c_sendtypes, *c_recvtypes;
MPI_Request c_request;
int size, c_ierr;
OMPI_ARRAY_NAME_DECL(sendcounts);
OMPI_ARRAY_NAME_DECL(recvcounts);
c_comm = MPI_Comm_f2c(*comm);
MPI_Comm_size(c_comm, &size);
c_sendtypes = (MPI_Datatype *) malloc(size * sizeof(MPI_Datatype));
c_recvtypes = (MPI_Datatype *) malloc(size * sizeof(MPI_Datatype));
OMPI_ARRAY_FINT_2_INT(sendcounts, size);
OMPI_ARRAY_FINT_2_INT(recvcounts, size);
while (size > 0) {
c_sendtypes[size - 1] = MPI_Type_f2c(sendtypes[size - 1]);
c_recvtypes[size - 1] = MPI_Type_f2c(recvtypes[size - 1]);
--size;
}
/* Ineighbor_alltoallw does not support MPI_IN_PLACE */
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
c_ierr = MPI_Ineighbor_alltoallw(sendbuf,
OMPI_ARRAY_NAME_CONVERT(sendcounts),
sdispls,
c_sendtypes,
recvbuf,
OMPI_ARRAY_NAME_CONVERT(recvcounts),
rdispls,
c_recvtypes, c_comm, &c_request);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
if (MPI_SUCCESS == c_ierr) *request = MPI_Request_c2f(c_request);
OMPI_ARRAY_FINT_2_INT_CLEANUP(sendcounts);
OMPI_ARRAY_FINT_2_INT_CLEANUP(recvcounts);
free(c_sendtypes);
free(c_recvtypes);
}

Просмотреть файл

@ -0,0 +1,95 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_NEIGHBOR_ALLGATHER = ompi_neighbor_allgather_f
#pragma weak pmpi_neighbor_allgather = ompi_neighbor_allgather_f
#pragma weak pmpi_neighbor_allgather_ = ompi_neighbor_allgather_f
#pragma weak pmpi_neighbor_allgather__ = ompi_neighbor_allgather_f
#pragma weak PMPI_Neighbor_allgather_f = ompi_neighbor_allgather_f
#pragma weak PMPI_Neighbor_allgather_f08 = ompi_neighbor_allgather_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_NEIGHBOR_ALLGATHER,
pmpi_neighbor_allgather,
pmpi_neighbor_allgather_,
pmpi_neighbor_allgather__,
pompi_neighbor_allgather_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_NEIGHBOR_ALLGATHER = ompi_neighbor_allgather_f
#pragma weak mpi_neighbor_allgather = ompi_neighbor_allgather_f
#pragma weak mpi_neighbor_allgather_ = ompi_neighbor_allgather_f
#pragma weak mpi_neighbor_allgather__ = ompi_neighbor_allgather_f
#pragma weak MPI_Neighbor_allgather_f = ompi_neighbor_allgather_f
#pragma weak MPI_Neighbor_allgather_f08 = ompi_neighbor_allgather_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_NEIGHBOR_ALLGATHER,
mpi_neighbor_allgather,
mpi_neighbor_allgather_,
mpi_neighbor_allgather__,
ompi_neighbor_allgather_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_neighbor_allgather_f(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype,
char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype,
MPI_Fint *comm, MPI_Fint *ierr)
{
int ierr_c;
MPI_Comm c_comm;
MPI_Datatype c_sendtype, c_recvtype;
c_comm = MPI_Comm_f2c(*comm);
c_sendtype = MPI_Type_f2c(*sendtype);
c_recvtype = MPI_Type_f2c(*recvtype);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
ierr_c = MPI_Neighbor_allgather(sendbuf,
OMPI_FINT_2_INT(*sendcount),
c_sendtype,
recvbuf,
OMPI_FINT_2_INT(*recvcount),
c_recvtype, c_comm);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(ierr_c);
}

Просмотреть файл

@ -0,0 +1,105 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_NEIGHBOR_ALLGATHERV = ompi_neighbor_allgatherv_f
#pragma weak pmpi_neighbor_allgatherv = ompi_neighbor_allgatherv_f
#pragma weak pmpi_neighbor_allgatherv_ = ompi_neighbor_allgatherv_f
#pragma weak pmpi_neighbor_allgatherv__ = ompi_neighbor_allgatherv_f
#pragma weak PMPI_Neighbor_allgatherv_f = ompi_neighbor_allgatherv_f
#pragma weak PMPI_Neighbor_allgatherv_f08 = ompi_neighbor_allgatherv_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_NEIGHBOR_ALLGATHERV,
pmpi_neighbor_allgatherv,
pmpi_neighbor_allgatherv_,
pmpi_neighbor_allgatherv__,
pompi_neighbor_allgatherv_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *displs, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_NEIGHBOR_ALLGATHERV = ompi_neighbor_allgatherv_f
#pragma weak mpi_neighbor_allgatherv = ompi_neighbor_allgatherv_f
#pragma weak mpi_neighbor_allgatherv_ = ompi_neighbor_allgatherv_f
#pragma weak mpi_neighbor_allgatherv__ = ompi_neighbor_allgatherv_f
#pragma weak MPI_Neighbor_allgatherv_f = ompi_neighbor_allgatherv_f
#pragma weak MPI_Neighbor_allgatherv_f08 = ompi_neighbor_allgatherv_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_NEIGHBOR_ALLGATHERV,
mpi_neighbor_allgatherv,
mpi_neighbor_allgatherv_,
mpi_neighbor_allgatherv__,
ompi_neighbor_allgatherv_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *displs, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_neighbor_allgatherv_f(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype,
char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *displs,
MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *ierr)
{
MPI_Comm c_comm;
MPI_Datatype c_sendtype, c_recvtype;
int size, ierr_c;
OMPI_ARRAY_NAME_DECL(recvcounts);
OMPI_ARRAY_NAME_DECL(displs);
c_comm = MPI_Comm_f2c(*comm);
c_sendtype = MPI_Type_f2c(*sendtype);
c_recvtype = MPI_Type_f2c(*recvtype);
MPI_Comm_size(c_comm, &size);
OMPI_ARRAY_FINT_2_INT(recvcounts, size);
OMPI_ARRAY_FINT_2_INT(displs, size);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
ierr_c = MPI_Neighbor_allgatherv(sendbuf,
OMPI_FINT_2_INT(*sendcount),
c_sendtype,
recvbuf,
OMPI_ARRAY_NAME_CONVERT(recvcounts),
OMPI_ARRAY_NAME_CONVERT(displs),
c_recvtype, c_comm);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(ierr_c);
OMPI_ARRAY_FINT_2_INT_CLEANUP(recvcounts);
OMPI_ARRAY_FINT_2_INT_CLEANUP(displs);
}

Просмотреть файл

@ -0,0 +1,94 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_NEIGHBOR_ALLTOALL = ompi_neighbor_alltoall_f
#pragma weak pmpi_neighbor_alltoall = ompi_neighbor_alltoall_f
#pragma weak pmpi_neighbor_alltoall_ = ompi_neighbor_alltoall_f
#pragma weak pmpi_neighbor_alltoall__ = ompi_neighbor_alltoall_f
#pragma weak PMPI_Neighbor_alltoall_f = ompi_neighbor_alltoall_f
#pragma weak PMPI_Neighbor_alltoall_f08 = ompi_neighbor_alltoall_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_NEIGHBOR_ALLTOALL,
pmpi_neighbor_alltoall,
pmpi_neighbor_alltoall_,
pmpi_neighbor_alltoall__,
pompi_neighbor_alltoall_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_NEIGHBOR_ALLTOALL = ompi_neighbor_alltoall_f
#pragma weak mpi_neighbor_alltoall = ompi_neighbor_alltoall_f
#pragma weak mpi_neighbor_alltoall_ = ompi_neighbor_alltoall_f
#pragma weak mpi_neighbor_alltoall__ = ompi_neighbor_alltoall_f
#pragma weak MPI_Neighbor_alltoall_f = ompi_neighbor_alltoall_f
#pragma weak MPI_Neighbor_alltoall_f08 = ompi_neighbor_alltoall_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_NEIGHBOR_ALLTOALL,
mpi_neighbor_alltoall,
mpi_neighbor_alltoall_,
mpi_neighbor_alltoall__,
ompi_neighbor_alltoall_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_neighbor_alltoall_f(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype,
char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype,
MPI_Fint *comm, MPI_Fint *ierr)
{
int c_ierr;
MPI_Comm c_comm;
MPI_Datatype c_sendtype, c_recvtype;
c_comm = MPI_Comm_f2c(*comm);
c_sendtype = MPI_Type_f2c(*sendtype);
c_recvtype = MPI_Type_f2c(*recvtype);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
c_ierr = MPI_Neighbor_alltoall(sendbuf,
OMPI_FINT_2_INT(*sendcount),
c_sendtype,
recvbuf,
OMPI_FINT_2_INT(*recvcount),
c_recvtype, c_comm);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
}

Просмотреть файл

@ -0,0 +1,112 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_NEIGHBOR_ALLTOALLV = ompi_neighbor_alltoallv_f
#pragma weak pmpi_neighbor_alltoallv = ompi_neighbor_alltoallv_f
#pragma weak pmpi_neighbor_alltoallv_ = ompi_neighbor_alltoallv_f
#pragma weak pmpi_neighbor_alltoallv__ = ompi_neighbor_alltoallv_f
#pragma weak PMPI_Neighbor_alltoallv_f = ompi_neighbor_alltoallv_f
#pragma weak PMPI_Neighbor_alltoallv_f08 = ompi_neighbor_alltoallv_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_NEIGHBOR_ALLTOALLV,
pmpi_neighbor_alltoallv,
pmpi_neighbor_alltoallv_,
pmpi_neighbor_alltoallv__,
pompi_neighbor_alltoallv_f,
(char *sendbuf, MPI_Fint *sendcounts, MPI_Fint *sdispls, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *rdispls, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *ierr),
(sendbuf, sendcounts, sdispls, sendtype, recvbuf, recvcounts, rdispls, recvtype, comm, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_NEIGHBOR_ALLTOALLV = ompi_neighbor_alltoallv_f
#pragma weak mpi_neighbor_alltoallv = ompi_neighbor_alltoallv_f
#pragma weak mpi_neighbor_alltoallv_ = ompi_neighbor_alltoallv_f
#pragma weak mpi_neighbor_alltoallv__ = ompi_neighbor_alltoallv_f
#pragma weak MPI_Neighbor_alltoallv_f = ompi_neighbor_alltoallv_f
#pragma weak MPI_Neighbor_alltoallv_f08 = ompi_neighbor_alltoallv_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_NEIGHBOR_ALLTOALLV,
mpi_neighbor_alltoallv,
mpi_neighbor_alltoallv_,
mpi_neighbor_alltoallv__,
ompi_neighbor_alltoallv_f,
(char *sendbuf, MPI_Fint *sendcounts, MPI_Fint *sdispls, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *rdispls, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *ierr),
(sendbuf, sendcounts, sdispls, sendtype, recvbuf, recvcounts, rdispls, recvtype, comm, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_neighbor_alltoallv_f(char *sendbuf, MPI_Fint *sendcounts, MPI_Fint *sdispls,
MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts,
MPI_Fint *rdispls, MPI_Fint *recvtype,
MPI_Fint *comm, MPI_Fint *ierr)
{
MPI_Comm c_comm;
MPI_Datatype c_sendtype, c_recvtype;
int size, c_ierr;
OMPI_ARRAY_NAME_DECL(sendcounts);
OMPI_ARRAY_NAME_DECL(sdispls);
OMPI_ARRAY_NAME_DECL(recvcounts);
OMPI_ARRAY_NAME_DECL(rdispls);
c_comm = MPI_Comm_f2c(*comm);
c_sendtype = MPI_Type_f2c(*sendtype);
c_recvtype = MPI_Type_f2c(*recvtype);
MPI_Comm_size(c_comm, &size);
OMPI_ARRAY_FINT_2_INT(sendcounts, size);
OMPI_ARRAY_FINT_2_INT(sdispls, size);
OMPI_ARRAY_FINT_2_INT(recvcounts, size);
OMPI_ARRAY_FINT_2_INT(rdispls, size);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
c_ierr = MPI_Neighbor_alltoallv(sendbuf,
OMPI_ARRAY_NAME_CONVERT(sendcounts),
OMPI_ARRAY_NAME_CONVERT(sdispls),
c_sendtype,
recvbuf,
OMPI_ARRAY_NAME_CONVERT(recvcounts),
OMPI_ARRAY_NAME_CONVERT(rdispls),
c_recvtype, c_comm);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
OMPI_ARRAY_FINT_2_INT_CLEANUP(sendcounts);
OMPI_ARRAY_FINT_2_INT_CLEANUP(sdispls);
OMPI_ARRAY_FINT_2_INT_CLEANUP(recvcounts);
OMPI_ARRAY_FINT_2_INT_CLEANUP(rdispls);
}

Просмотреть файл

@ -0,0 +1,116 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OPAL_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_NEIGHBOR_ALLTOALLW = ompi_neighbor_alltoallw_f
#pragma weak pmpi_neighbor_alltoallw = ompi_neighbor_alltoallw_f
#pragma weak pmpi_neighbor_alltoallw_ = ompi_neighbor_alltoallw_f
#pragma weak pmpi_neighbor_alltoallw__ = ompi_neighbor_alltoallw_f
#pragma weak PMPI_Neighbor_alltoallw_f = ompi_neighbor_alltoallw_f
#pragma weak PMPI_Neighbor_alltoallw_f08 = ompi_neighbor_alltoallw_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_NEIGHBOR_ALLTOALLW,
pmpi_neighbor_alltoallw,
pmpi_neighbor_alltoallw_,
pmpi_neighbor_alltoallw__,
pompi_neighbor_alltoallw_f,
(char *sendbuf, MPI_Fint *sendcounts, MPI_Aint *sdispls, MPI_Fint *sendtypes, char *recvbuf, MPI_Fint *recvcounts, MPI_Aint *rdispls, MPI_Fint *recvtypes, MPI_Fint *comm, MPI_Fint *ierr),
(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, recvcounts, rdispls, recvtypes, comm, ierr) )
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_NEIGHBOR_ALLTOALLW = ompi_neighbor_alltoallw_f
#pragma weak mpi_neighbor_alltoallw = ompi_neighbor_alltoallw_f
#pragma weak mpi_neighbor_alltoallw_ = ompi_neighbor_alltoallw_f
#pragma weak mpi_neighbor_alltoallw__ = ompi_neighbor_alltoallw_f
#pragma weak MPI_Neighbor_alltoallw_f = ompi_neighbor_alltoallw_f
#pragma weak MPI_Neighbor_alltoallw_f08 = ompi_neighbor_alltoallw_f
#endif
#if ! OPAL_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_NEIGHBOR_ALLTOALLW,
mpi_neighbor_alltoallw,
mpi_neighbor_alltoallw_,
mpi_neighbor_alltoallw__,
ompi_neighbor_alltoallw_f,
(char *sendbuf, MPI_Fint *sendcounts, MPI_Aint *sdispls, MPI_Fint *sendtypes, char *recvbuf, MPI_Fint *recvcounts, MPI_Aint *rdispls, MPI_Fint *recvtypes, MPI_Fint *comm, MPI_Fint *ierr),
(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, recvcounts, rdispls, recvtypes, comm, ierr) )
#endif
#if OMPI_PROFILE_LAYER && ! OPAL_HAVE_WEAK_SYMBOLS
#include "ompi/mpi/fortran/mpif-h/profile/defines.h"
#endif
void ompi_neighbor_alltoallw_f(char *sendbuf, MPI_Fint *sendcounts,
MPI_Aint *sdispls, MPI_Fint *sendtypes,
char *recvbuf, MPI_Fint *recvcounts,
MPI_Aint *rdispls, MPI_Fint *recvtypes,
MPI_Fint *comm, MPI_Fint *ierr)
{
MPI_Comm c_comm;
MPI_Datatype *c_sendtypes, *c_recvtypes;
int size, c_ierr;
OMPI_ARRAY_NAME_DECL(sendcounts);
OMPI_ARRAY_NAME_DECL(recvcounts);
c_comm = MPI_Comm_f2c(*comm);
MPI_Comm_size(c_comm, &size);
c_sendtypes = (MPI_Datatype *) malloc(size * sizeof(MPI_Datatype));
c_recvtypes = (MPI_Datatype *) malloc(size * sizeof(MPI_Datatype));
OMPI_ARRAY_FINT_2_INT(sendcounts, size);
OMPI_ARRAY_FINT_2_INT(recvcounts, size);
while (size > 0) {
c_sendtypes[size - 1] = MPI_Type_f2c(sendtypes[size - 1]);
c_recvtypes[size - 1] = MPI_Type_f2c(recvtypes[size - 1]);
--size;
}
/* Alltoallw does not support MPI_IN_PLACE */
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
c_ierr = MPI_Neighbor_alltoallw(sendbuf,
OMPI_ARRAY_NAME_CONVERT(sendcounts),
sdispls,
c_sendtypes,
recvbuf,
OMPI_ARRAY_NAME_CONVERT(recvcounts),
rdispls,
c_recvtypes, c_comm);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
OMPI_ARRAY_FINT_2_INT_CLEANUP(sendcounts);
OMPI_ARRAY_FINT_2_INT_CLEANUP(recvcounts);
free(c_sendtypes);
free(c_recvtypes);
}

Просмотреть файл

@ -165,6 +165,11 @@ nodist_libmpi_mpifh_pmpi_la_SOURCES = \
pigatherv_f.c \
pimprobe_f.c \
pimrecv_f.c \
pineighbor_allgather_f.c \
pineighbor_allgatherv_f.c \
pineighbor_alltoall_f.c \
pineighbor_alltoallv_f.c \
pineighbor_alltoallw_f.c \
pinfo_create_f.c \
pinfo_delete_f.c \
pinfo_dup_f.c \
@ -196,6 +201,11 @@ nodist_libmpi_mpifh_pmpi_la_SOURCES = \
plookup_name_f.c \
pmprobe_f.c \
pmrecv_f.c \
pneighbor_allgather_f.c \
pneighbor_allgatherv_f.c \
pneighbor_alltoall_f.c \
pneighbor_alltoallv_f.c \
pneighbor_alltoallw_f.c \
pop_commutative_f.c \
pop_create_f.c \
popen_port_f.c \

Просмотреть файл

@ -12,6 +12,8 @@
* Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2011-2012 Inria. All rights reserved.
* Copyright (c) 2011-2012 Universite Bordeaux 1
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -254,6 +256,11 @@ PN2(void, MPI_Igather, mpi_igather, MPI_IGATHER, (char *sendbuf, MPI_Fint *sendc
PN2(void, MPI_Igatherv, mpi_igatherv, MPI_IGATHERV, (char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *displs, MPI_Fint *recvtype, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Improbe, mpi_improbe, MPI_IMPROBE, (MPI_Fint *source, MPI_Fint *tag, MPI_Fint *comm, ompi_fortran_logical_t *flag, MPI_Fint *message, MPI_Fint *status, MPI_Fint *ierr));
PN2(void, MPI_Imrecv,mpi_imrecv, MPI_IMRECV, (char *buf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *message, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Ineighbor_allgather, mpi_ineighbor_allgather, MPI_INEIGHBOR_ALLGATHER, (char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Ineighbor_allgatherv, mpi_ineighbor_allgatherv, MPI_INEIGHBOR_ALLGATHERV, (char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *displs, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Ineighbor_alltoall, mpi_ineighbor_alltoall, MPI_INEIGHBOR_ALLTOALL, (char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Ineighbor_alltoallv, mpi_ineighbor_alltoallv, MPI_INEIGHBOR_ALLTOALLV, (char *sendbuf, MPI_Fint *sendcounts, MPI_Fint *sdispls, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *rdispls, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Ineighbor_alltoallw, mpi_ineighbor_alltoallw, MPI_INEIGHBOR_ALLTOALLW, (char *sendbuf, MPI_Fint *sendcounts, MPI_Aint *sdispls, MPI_Fint *sendtypes, char *recvbuf, MPI_Fint *recvcounts, MPI_Aint *rdispls, MPI_Fint *recvtypes, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Ireduce, mpi_ireduce, MPI_IREDUCE, (char *sendbuf, char *recvbuf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *root, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Ireduce_scatter, mpi_ireduce_scatter, MPI_IREDUCE_SCATTER, (char *sendbuf, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
PN2(void, MPI_Ireduce_scatter_block, mpi_ireduce_scatter_block, MPI_IREDUCE_SCATTER_BLOCK, (char *sendbuf, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *datatype, MPI_Fint *op, MPI_Fint *comm, MPI_Fint *request, MPI_Fint *ierr));
@ -285,6 +292,11 @@ PN2(void, MPI_Keyval_free, mpi_keyval_free, MPI_KEYVAL_FREE, (MPI_Fint *keyval,
PN2(void, MPI_Lookup_name, mpi_lookup_name, MPI_LOOKUP_NAME, (char *service_name, MPI_Fint *info, char *port_name, MPI_Fint *ierr, int service_name_len, int port_name_len));
PN2(void, MPI_Mprobe, mpi_mprobe, MPI_MPROBE, (MPI_Fint *source, MPI_Fint *tag, MPI_Fint *comm, MPI_Fint *message, MPI_Fint *status, MPI_Fint *ierr));
PN2(void, MPI_Mrecv, mpi_mrecv, MPI_MRECV, (char *buf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *message, MPI_Fint *status, MPI_Fint *ierr));
PN2(void, MPI_Neighbor_allgather, mpi_neighbor_allgather, MPI_NEIGHBOR_ALLGATHER, (char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *ierr));
PN2(void, MPI_Neighbor_allgatherv, mpi_neighbor_allgatherv, MPI_NEIGHBOR_ALLGATHERV, (char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *displs, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *ierr));
PN2(void, MPI_Neighbor_alltoall, mpi_neighbor_alltoall, MPI_NEIGHBOR_ALLTOALL, (char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *ierr));
PN2(void, MPI_Neighbor_alltoallv, mpi_neighbor_alltoallv, MPI_NEIGHBOR_ALLTOALLV, (char *sendbuf, MPI_Fint *sendcounts, MPI_Fint *sdispls, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcounts, MPI_Fint *rdispls, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *ierr));
PN2(void, MPI_Neighbor_alltoallw, mpi_neighbor_alltoallw, MPI_NEIGHBOR_ALLTOALLW, (char *sendbuf, MPI_Fint *sendcounts, MPI_Aint *sdispls, MPI_Fint *sendtypes, char *recvbuf, MPI_Fint *recvcounts, MPI_Aint *rdispls, MPI_Fint *recvtypes, MPI_Fint *comm, MPI_Fint *ierr));
PN2(void, MPI_Op_commutative, mpi_op_commutative, MPI_OP_COMMUTATIVE, (MPI_Fint *op, MPI_Fint *commute, MPI_Fint *ierr));
PN2(void, MPI_Op_create, mpi_op_create, MPI_OP_CREATE, (ompi_op_fortran_handler_fn_t* function, ompi_fortran_logical_t *commute, MPI_Fint *op, MPI_Fint *ierr));
PN2(void, MPI_Open_port, mpi_open_port, MPI_OPEN_PORT, (MPI_Fint *info, char *port_name, MPI_Fint *ierr, int port_name_len));

Просмотреть файл

@ -148,6 +148,11 @@ mpi_api_files = \
igatherv_f08.F90 \
improbe_f08.F90 \
imrecv_f08.F90 \
ineighbor_allgather_f08.F90 \
ineighbor_allgatherv_f08.F90 \
ineighbor_alltoall_f08.F90 \
ineighbor_alltoallv_f08.F90 \
ineighbor_alltoallw_f08.F90 \
info_create_f08.F90 \
info_delete_f08.F90 \
info_dup_f08.F90 \
@ -177,6 +182,11 @@ mpi_api_files = \
lookup_name_f08.F90 \
mprobe_f08.F90 \
mrecv_f08.F90 \
neighbor_allgather_f08.F90 \
neighbor_allgatherv_f08.F90 \
neighbor_alltoall_f08.F90 \
neighbor_alltoallv_f08.F90 \
neighbor_alltoallw_f08.F90 \
op_commutative_f08.F90 \
op_create_f08.F90 \
open_port_f08.F90 \
@ -361,6 +371,7 @@ pmpi_api_files = \
profile/padd_error_class_f08.F90 \
profile/padd_error_code_f08.F90 \
profile/padd_error_string_f08.F90 \
profile/pallgather_f08.F90 \
profile/pallgatherv_f08.F90 \
profile/palloc_mem_f08.F90 \
profile/pallreduce_f08.F90 \
@ -464,6 +475,11 @@ pmpi_api_files = \
profile/piexscan_f08.F90 \
profile/pimprobe_f08.F90 \
profile/pimrecv_f08.F90 \
profile/pineighbor_allgather_f08.F90 \
profile/pineighbor_allgatherv_f08.F90 \
profile/pineighbor_alltoall_f08.F90 \
profile/pineighbor_alltoallv_f08.F90 \
profile/pineighbor_alltoallw_f08.F90 \
profile/pinfo_create_f08.F90 \
profile/pinfo_delete_f08.F90 \
profile/pinfo_dup_f08.F90 \
@ -493,6 +509,11 @@ pmpi_api_files = \
profile/plookup_name_f08.F90 \
profile/pmprobe_f08.F90 \
profile/pmrecv_f08.F90 \
profile/pneighbor_allgather_f08.F90 \
profile/pneighbor_allgatherv_f08.F90 \
profile/pneighbor_alltoall_f08.F90 \
profile/pneighbor_alltoallv_f08.F90 \
profile/pneighbor_alltoallw_f08.F90 \
profile/pop_commutative_f08.F90 \
profile/pop_create_f08.F90 \
profile/popen_port_f08.F90 \

Просмотреть файл

@ -0,0 +1,27 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2013 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Ineighbor_allgather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ineighbor_allgather_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ineighbor_allgather_f(sendbuf,sendcount,sendtype%MPI_VAL,&
recvbuf,recvcount,recvtype%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Ineighbor_allgather_f08

Просмотреть файл

@ -0,0 +1,29 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2013 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Ineighbor_allgatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,&
displs,recvtype,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ineighbor_allgatherv_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount
INTEGER, INTENT(IN) :: recvcounts(*), displs(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ineighbor_allgatherv_f(sendbuf,sendcount,sendtype%MPI_VAL,recvbuf,recvcounts,&
displs,recvtype%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Ineighbor_allgatherv_f08

Просмотреть файл

@ -0,0 +1,28 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2013 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Ineighbor_alltoall_f08(sendbuf,sendcount,sendtype,recvbuf,&
recvcount,recvtype,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ineighbor_alltoall_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ineighbor_alltoall_f(sendbuf,sendcount,sendtype%MPI_VAL,recvbuf,&
recvcount,recvtype%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Ineighbor_alltoall_f08

Просмотреть файл

@ -0,0 +1,29 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2013 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Ineighbor_alltoallv_f08(sendbuf,sendcounts,sdispls,sendtype,recvbuf,&
recvcounts,rdispls,recvtype,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ineighbor_alltoallv_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ineighbor_alltoallv_f(sendbuf,sendcounts,sdispls,sendtype%MPI_VAL,&
recvbuf,recvcounts,rdispls,recvtype%MPI_VAL,&
comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Ineighbor_alltoallv_f08

Просмотреть файл

@ -0,0 +1,30 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2013 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Ineighbor_alltoallw_f08(sendbuf,sendcounts,sdispls,sendtypes,&
recvbuf,recvcounts,rdispls,recvtypes,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request, MPI_ADDRESS_KIND
use :: mpi_f08, only : ompi_ineighbor_alltoallw_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), recvcounts(*)
INTEGER(MPI_ADDRESS_KIND), INTENT(IN) :: sdispls(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtypes
TYPE(MPI_Datatype), INTENT(IN) :: recvtypes
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ineighbor_alltoallw_f(sendbuf,sendcounts,sdispls,sendtypes%MPI_VAL,&
recvbuf,recvcounts,rdispls,recvtypes%MPI_VAL,&
comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Ineighbor_alltoallw_f08

Просмотреть файл

@ -0,0 +1,26 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2013 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Neighbor_allgather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype,comm,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm
use :: mpi_f08, only : ompi_neighbor_allgather_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_neighbor_allgather_f(sendbuf,sendcount,sendtype%MPI_VAL,&
recvbuf,recvcount,recvtype%MPI_VAL,comm%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Neighbor_allgather_f08

Просмотреть файл

@ -0,0 +1,28 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2013 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Neighbor_allgatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,&
displs,recvtype,comm,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm
use :: mpi_f08, only : ompi_neighbor_allgatherv_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount
INTEGER, INTENT(IN) :: recvcounts(*), displs(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_neighbor_allgatherv_f(sendbuf,sendcount,sendtype%MPI_VAL,recvbuf,recvcounts,&
displs,recvtype%MPI_VAL,comm%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Neighbor_allgatherv_f08

Просмотреть файл

@ -0,0 +1,27 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2013 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Neighbor_alltoall_f08(sendbuf,sendcount,sendtype,recvbuf,&
recvcount,recvtype,comm,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm
use :: mpi_f08, only : ompi_neighbor_alltoall_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_neighbor_alltoall_f(sendbuf,sendcount,sendtype%MPI_VAL,recvbuf,&
recvcount,recvtype%MPI_VAL,comm%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Neighbor_alltoall_f08

Просмотреть файл

@ -0,0 +1,28 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2013 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Neighbor_alltoallv_f08(sendbuf,sendcounts,sdispls,sendtype,recvbuf,&
recvcounts,rdispls,recvtype,comm,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm
use :: mpi_f08, only : ompi_neighbor_alltoallv_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_neighbor_alltoallv_f(sendbuf,sendcounts,sdispls,sendtype%MPI_VAL,&
recvbuf,recvcounts,rdispls,recvtype%MPI_VAL,&
comm%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Neighbor_alltoallv_f08

Просмотреть файл

@ -0,0 +1,29 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2013 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine MPI_Neighbor_alltoallw_f08(sendbuf,sendcounts,sdispls,sendtypes,&
recvbuf,recvcounts,rdispls,recvtypes,comm,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_ADDRESS_KIND
use :: mpi_f08, only : ompi_neighbor_alltoallw_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), recvcounts(*)
INTEGER(MPI_ADDRESS_KIND), INTENT(IN) :: sdispls(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtypes
TYPE(MPI_Datatype), INTENT(IN) :: recvtypes
TYPE(MPI_Comm), INTENT(IN) :: comm
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_neighbor_alltoallw_f(sendbuf,sendcounts,sdispls,sendtypes%MPI_VAL,&
recvbuf,recvcounts,rdispls,recvtypes%MPI_VAL,&
comm%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine MPI_Neighbor_alltoallw_f08

Просмотреть файл

@ -6,7 +6,7 @@
! $COPYRIGHT$
subroutine PMPI_Dist_graph_neighbors_f08(comm,maxindegree,sources,sourceweights,&
maxoutdegree,destinations,destweights,ierror)
maxoutdegree,destinations,destweights,ierror)
use :: mpi_f08_types, only : MPI_Comm
use :: mpi_f08, only : ompi_dist_graph_neighbors_f
implicit none

Просмотреть файл

@ -0,0 +1,27 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2013 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Ineighbor_allgather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ineighbor_allgather_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ineighbor_allgather_f(sendbuf,sendcount,sendtype%MPI_VAL,&
recvbuf,recvcount,recvtype%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Ineighbor_allgather_f08

Просмотреть файл

@ -0,0 +1,29 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2013 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Ineighbor_allgatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,&
displs,recvtype,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ineighbor_allgatherv_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount
INTEGER, INTENT(IN) :: recvcounts(*), displs(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ineighbor_allgatherv_f(sendbuf,sendcount,sendtype%MPI_VAL,recvbuf,recvcounts,&
displs,recvtype%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Ineighbor_allgatherv_f08

Просмотреть файл

@ -0,0 +1,28 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2013 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Ineighbor_alltoall_f08(sendbuf,sendcount,sendtype,recvbuf,&
recvcount,recvtype,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ineighbor_alltoall_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ineighbor_alltoall_f(sendbuf,sendcount,sendtype%MPI_VAL,recvbuf,&
recvcount,recvtype%MPI_VAL,comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Ineighbor_alltoall_f08

Просмотреть файл

@ -0,0 +1,29 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2013 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Ineighbor_alltoallv_f08(sendbuf,sendcounts,sdispls,sendtype,recvbuf,&
recvcounts,rdispls,recvtype,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request
use :: mpi_f08, only : ompi_ineighbor_alltoallv_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ineighbor_alltoallv_f(sendbuf,sendcounts,sdispls,sendtype%MPI_VAL,&
recvbuf,recvcounts,rdispls,recvtype%MPI_VAL,&
comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Ineighbor_alltoallv_f08

Просмотреть файл

@ -0,0 +1,30 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2013 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Ineighbor_alltoallw_f08(sendbuf,sendcounts,sdispls,sendtypes,&
recvbuf,recvcounts,rdispls,recvtypes,comm,request,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_Request, MPI_ADDRESS_KIND
use :: mpi_f08, only : ompi_ineighbor_alltoallw_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), recvcounts(*)
INTEGER(MPI_ADDRESS_KIND), INTENT(IN) :: sdispls(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtypes
TYPE(MPI_Datatype), INTENT(IN) :: recvtypes
TYPE(MPI_Comm), INTENT(IN) :: comm
TYPE(MPI_Request), INTENT(OUT) :: request
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_ineighbor_alltoallw_f(sendbuf,sendcounts,sdispls,sendtypes%MPI_VAL,&
recvbuf,recvcounts,rdispls,recvtypes%MPI_VAL,&
comm%MPI_VAL,request%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Ineighbor_alltoallw_f08

Просмотреть файл

@ -0,0 +1,26 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2013 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Neighbor_allgather_f08(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype,comm,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm
use :: mpi_f08, only : ompi_neighbor_allgather_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_neighbor_allgather_f(sendbuf,sendcount,sendtype%MPI_VAL,&
recvbuf,recvcount,recvtype%MPI_VAL,comm%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Neighbor_allgather_f08

Просмотреть файл

@ -0,0 +1,28 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2013 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Neighbor_allgatherv_f08(sendbuf,sendcount,sendtype,recvbuf,recvcounts,&
displs,recvtype,comm,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm
use :: mpi_f08, only : ompi_neighbor_allgatherv_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount
INTEGER, INTENT(IN) :: recvcounts(*), displs(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_neighbor_allgatherv_f(sendbuf,sendcount,sendtype%MPI_VAL,recvbuf,recvcounts,&
displs,recvtype%MPI_VAL,comm%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Neighbor_allgatherv_f08

Просмотреть файл

@ -0,0 +1,27 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2013 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Neighbor_alltoall_f08(sendbuf,sendcount,sendtype,recvbuf,&
recvcount,recvtype,comm,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm
use :: mpi_f08, only : ompi_neighbor_alltoall_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcount, recvcount
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_neighbor_alltoall_f(sendbuf,sendcount,sendtype%MPI_VAL,recvbuf,&
recvcount,recvtype%MPI_VAL,comm%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Neighbor_alltoall_f08

Просмотреть файл

@ -0,0 +1,28 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2013 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Neighbor_alltoallv_f08(sendbuf,sendcounts,sdispls,sendtype,recvbuf,&
recvcounts,rdispls,recvtype,comm,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm
use :: mpi_f08, only : ompi_neighbor_alltoallv_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), sdispls(*), recvcounts(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtype
TYPE(MPI_Datatype), INTENT(IN) :: recvtype
TYPE(MPI_Comm), INTENT(IN) :: comm
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_neighbor_alltoallv_f(sendbuf,sendcounts,sdispls,sendtype%MPI_VAL,&
recvbuf,recvcounts,rdispls,recvtype%MPI_VAL,&
comm%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Neighbor_alltoallv_f08

Просмотреть файл

@ -0,0 +1,29 @@
! -*- f90 -*-
!
! Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
! Copyright (c) 2009-2013 Los Alamos National Security, LLC.
! All rights reserved.
! $COPYRIGHT$
#include "ompi/mpi/fortran/configure-fortran-output.h"
subroutine PMPI_Neighbor_alltoallw_f08(sendbuf,sendcounts,sdispls,sendtypes,&
recvbuf,recvcounts,rdispls,recvtypes,comm,ierror)
use :: mpi_f08_types, only : MPI_Datatype, MPI_Comm, MPI_ADDRESS_KIND
use :: mpi_f08, only : ompi_neighbor_alltoallw_f
implicit none
OMPI_FORTRAN_IGNORE_TKR_TYPE, INTENT(IN) :: sendbuf, recvbuf
INTEGER, INTENT(IN) :: sendcounts(*), recvcounts(*)
INTEGER(MPI_ADDRESS_KIND), INTENT(IN) :: sdispls(*), rdispls(*)
TYPE(MPI_Datatype), INTENT(IN) :: sendtypes
TYPE(MPI_Datatype), INTENT(IN) :: recvtypes
TYPE(MPI_Comm), INTENT(IN) :: comm
INTEGER, OPTIONAL, INTENT(OUT) :: ierror
integer :: c_ierror
call ompi_neighbor_alltoallw_f(sendbuf,sendcounts,sdispls,sendtypes%MPI_VAL,&
recvbuf,recvcounts,rdispls,recvtypes%MPI_VAL,&
comm%MPI_VAL,c_ierror)
if (present(ierror)) ierror = c_ierror
end subroutine PMPI_Neighbor_alltoallw_f08

Просмотреть файл

@ -7,6 +7,8 @@
! of Tennessee Research Foundation. All rights
! reserved.
! Copyright (c) 2012 Inria. All rights reserved.
! Copyright (c) 2013 Los Alamos National Security, LLC. All rights
! reserved.
! $COPYRIGHT$
!
! Additional copyrights may follow
@ -1616,6 +1618,111 @@ end subroutine MPI_Imrecv
end interface
interface MPI_Ineighbor_allgather
subroutine MPI_Ineighbor_allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, comm, request, ierror)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, intent(in) :: recvcount
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierror
end subroutine MPI_Ineighbor_allgather
end interface
interface MPI_Ineighbor_allgatherv
subroutine MPI_Ineighbor_allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, &
displs, recvtype, comm, request, ierror)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer, dimension(*), intent(in) :: displs
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierror
end subroutine MPI_Ineighbor_allgatherv
end interface
interface MPI_Ineighbor_alltoall
subroutine MPI_Ineighbor_alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, comm, request, ierror)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, intent(in) :: recvcount
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierror
end subroutine MPI_Ineighbor_alltoall
end interface
interface MPI_Ineighbor_alltoallv
subroutine MPI_Ineighbor_alltoallv(sendbuf, sendcounts, sdispls, sendtype, recvbuf, &
recvcounts, rdispls, recvtype, comm, request, ierror)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
integer, dimension(*), intent(in) :: sendcounts
integer, dimension(*), intent(in) :: sdispls
integer, intent(in) :: sendtype
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer, dimension(*), intent(in) :: rdispls
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierror
end subroutine MPI_Ineighbor_alltoallv
end interface
interface MPI_Ineighbor_alltoallw
subroutine MPI_Ineighbor_alltoallw(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, &
recvcounts, rdispls, recvtypes, comm, request, ierror)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
integer, dimension(*), intent(in) :: sendcounts
integer(kind=MPI_ADDRESS_KIND), intent(in) :: sdispls
integer, dimension(*), intent(in) :: sendtypes
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer(kind=MPI_ADDRESS_KIND), intent(in) :: rdispls
integer, dimension(*), intent(in) :: recvtypes
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierror
end subroutine MPI_Ineighbor_alltoallw
end interface
interface MPI_Info_create
subroutine MPI_Info_create(info, ierror)
@ -2063,6 +2170,105 @@ end subroutine MPI_Mrecv
end interface
interface MPI_Neighbor_allgather
subroutine MPI_Neighbor_allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, comm, ierror)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, intent(in) :: recvcount
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: ierror
end subroutine MPI_Neighbor_allgather
end interface
interface MPI_Neighbor_allgatherv
subroutine MPI_Neighbor_allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, &
displs, recvtype, comm, ierror)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer, dimension(*), intent(in) :: displs
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: ierror
end subroutine MPI_Neighbor_allgatherv
end interface
interface MPI_Neighbor_alltoall
subroutine MPI_Neighbor_alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, comm, ierror)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, intent(in) :: recvcount
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: ierror
end subroutine MPI_Neighbor_alltoall
end interface
interface MPI_Neighbor_alltoallv
subroutine MPI_Neighbor_alltoallv(sendbuf, sendcounts, sdispls, sendtype, recvbuf, &
recvcounts, rdispls, recvtype, comm, ierror)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
integer, dimension(*), intent(in) :: sendcounts
integer, dimension(*), intent(in) :: sdispls
integer, intent(in) :: sendtype
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer, dimension(*), intent(in) :: rdispls
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: ierror
end subroutine MPI_Neighbor_alltoallv
end interface
interface MPI_Neighbor_alltoallw
subroutine MPI_Neighbor_alltoallw(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, &
recvcounts, rdispls, recvtypes, comm, ierror)
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ sendbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@, intent(in) :: sendbuf
integer, dimension(*), intent(in) :: sendcounts
integer(kind=MPI_ADDRESS_KIND), intent(in) :: sdispls
integer, dimension(*), intent(in) :: sendtypes
@OMPI_FORTRAN_IGNORE_TKR_PREDECL@ recvbuf
@OMPI_FORTRAN_IGNORE_TKR_TYPE@ :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer(kind=MPI_ADDRESS_KIND), intent(in) :: rdispls
integer, dimension(*), intent(in) :: recvtypes
integer, intent(in) :: comm
integer, intent(out) :: ierror
end subroutine MPI_Neighbor_alltoallw
end interface
interface MPI_Op_commutative
subroutine MPI_Op_commutative(op, commute, ierror)

Просмотреть файл

@ -79,6 +79,11 @@ fortran_scripts = \
mpi_ibsend_f90.f90.sh \
mpi_iexscan_f90.f90.sh \
mpi_imrecv_f90.f90.sh \
mpi_ineighbor_allgather_f90.f90.sh \
mpi_ineighbor_allgatherv_f90.f90.sh \
mpi_ineighbor_alltoall_f90.f90.sh \
mpi_ineighbor_alltoallv_f90.f90.sh \
mpi_ineighbor_alltoallw_f90.f90.sh \
mpi_igather_f90.f90.sh \
mpi_igatherv_f90.f90.sh \
mpi_irecv_f90.f90.sh \
@ -92,6 +97,11 @@ fortran_scripts = \
mpi_isend_f90.f90.sh \
mpi_issend_f90.f90.sh \
mpi_mrecv_f90.f90.sh \
mpi_neighbor_allgather_f90.f90.sh \
mpi_neighbor_allgatherv_f90.f90.sh \
mpi_neighbor_alltoall_f90.f90.sh \
mpi_neighbor_alltoallv_f90.f90.sh \
mpi_neighbor_alltoallw_f90.f90.sh \
mpi_pack_external_f90.f90.sh \
mpi_pack_f90.f90.sh \
mpi_put_f90.f90.sh \

Просмотреть файл

@ -11,6 +11,8 @@
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2012 FUJITSU LIMITED. All rights reserved.
# Copyright (c) 2012 Inria. All rights reserved.
# Copyright (c) 2013 Los Alamos Nationa Security, LLC. All rights
# reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
@ -347,6 +349,36 @@ do
done
end MPI_Allgather
start MPI_Neighbor_allgather large
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output_7 MPI_Neighbor_allgather ${rank} CH "character${dim}"
output_7 MPI_Neighbor_allgather ${rank} L "logical${dim}"
for kind in $ikinds
do
output_7 MPI_Neighbor_allgather ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output_7 MPI_Neighbor_allgather ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output_7 MPI_Neighbor_allgather ${rank} C${kind} "complex*${kind}${dim}"
done
done
end MPI_Neighbor_allgather
#------------------------------------------------------------------------
output_7_nonblocking() {
@ -465,6 +497,36 @@ do
done
end MPI_Allgatherv
start MPI_Neighbor_allgatherv large
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output_8 MPI_Neighbor_allgatherv ${rank} CH "character${dim}"
output_8 MPI_Neighbor_allgatherv ${rank} L "logical${dim}"
for kind in $ikinds
do
output_8 MPI_Neighbor_allgatherv ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output_8 MPI_Neighbor_allgatherv ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output_8 MPI_Neighbor_allgatherv ${rank} C${kind} "complex*${kind}${dim}"
done
done
end MPI_Neighbor_allgatherv
#------------------------------------------------------------------------
output_8_nonblocking() {
@ -723,6 +785,36 @@ do
done
end MPI_Alltoall
start MPI_Neighbor_alltoall large
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output_11 MPI_Neighbor_alltoall ${rank} CH "character${dim}"
output_11 MPI_Neighbor_alltoall ${rank} L "logical${dim}"
for kind in $ikinds
do
output_11 MPI_Neighbor_alltoall ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output_11 MPI_Neighbor_alltoall ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output_11 MPI_Neighbor_alltoall ${rank} C${kind} "complex*${kind}${dim}"
done
done
end MPI_Neighbor_alltoall
#------------------------------------------------------------------------
output_11_nonblocking() {
@ -842,6 +934,36 @@ do
done
end MPI_Alltoallv
start MPI_Neighbor_alltoallv large
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output_12 MPI_Neighbor_alltoallv ${rank} CH "character${dim}"
output_12 MPI_Neighbor_alltoallv ${rank} L "logical${dim}"
for kind in $ikinds
do
output_12 MPI_Neighbor_alltoallv ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output_12 MPI_Neighbor_alltoallv ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output_12 MPI_Neighbor_alltoallv ${rank} C${kind} "complex*${kind}${dim}"
done
done
end MPI_Neighbor_alltoallv
#------------------------------------------------------------------------
output_12_nonblocking() {
@ -965,6 +1087,66 @@ end MPI_Alltoallw
#------------------------------------------------------------------------
output_13_neigbor() {
if test "$output" = "0"; then
return 0
fi
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, &
recvcounts, rdispls, recvtypes, comm, ierror)
${type}, intent(in) :: sendbuf
integer, dimension(*), intent(in) :: sendcounts
integer(kind=MPI_ADDRESS_KIND), intent(in) :: sdispls
integer, dimension(*), intent(in) :: sendtypes
${type} :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer(kind=MPI_ADDRESS_KIND), intent(in) :: rdispls
integer, dimension(*), intent(in) :: recvtypes
integer, intent(in) :: comm
integer, intent(out) :: ierror
end subroutine ${proc}
EOF
}
start MPI_Neighbor_alltoallw large
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output_13_neigbor MPI_Neighbor_alltoallw ${rank} CH "character${dim}"
output_13_neigbor MPI_Neighbor_alltoallw ${rank} L "logical${dim}"
for kind in $ikinds
do
output_13_neigbor MPI_Neighbor_alltoallw ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output_13_neigbor MPI_Neighbor_alltoallw ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output_13_neigbor MPI_Neighbor_alltoallw ${rank} C${kind} "complex*${kind}${dim}"
done
done
end MPI_Neighbor_alltoallw
#------------------------------------------------------------------------
output_13_nonblocking() {
if test "$output" = "0"; then
return 0
@ -1026,6 +1208,67 @@ end MPI_Ialltoallw
#------------------------------------------------------------------------
output_13_nonblocking_neighbor() {
if test "$output" = "0"; then
return 0
fi
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, &
recvcounts, rdispls, recvtypes, comm, request, ierror)
${type}, intent(in) :: sendbuf
integer, dimension(*), intent(in) :: sendcounts
integer(kind=MPI_ADDRESS_KIND), intent(in) :: sdispls
integer, dimension(*), intent(in) :: sendtypes
${type} :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer(kind=MPI_ADDRESS_KIND), intent(in) :: rdispls
integer, dimension(*), intent(in) :: recvtypes
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierror
end subroutine ${proc}
EOF
}
start MPI_Ineighbor_alltoallw large
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output_13_nonblocking_neighbor MPI_Ineighbor_alltoallw ${rank} CH "character${dim}"
output_13_nonblocking_neighbor MPI_Ineighbor_alltoallw ${rank} L "logical${dim}"
for kind in $ikinds
do
output_13_nonblocking_neighbor MPI_Ineighbor_alltoallw ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output_13_nonblocking_neighbor MPI_Ineighbor_alltoallw ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output_13_nonblocking_neighbor MPI_Ineighbor_alltoallw ${rank} C${kind} "complex*${kind}${dim}"
done
done
end MPI_Ineighbor_alltoallw
#------------------------------------------------------------------------
output_14() {
if test "$output" = "0"; then
return 0

Просмотреть файл

@ -7,6 +7,8 @@
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2013 Los Alamos National Security, LLC. All rights
# reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
@ -75,18 +77,18 @@ do
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Allgather ${rank} CH "character${dim}"
output MPI_Allgather ${rank} L "logical${dim}"
output MPI_Neighbor_allgather ${rank} CH "character${dim}"
output MPI_Neighbor_allgather ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Allgather ${rank} I${kind} "integer*${kind}${dim}"
output MPI_Neighbor_allgather ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Allgather ${rank} R${kind} "real*${kind}${dim}"
output MPI_Neighbor_allgather ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Allgather ${rank} C${kind} "complex*${kind}${dim}"
output MPI_Neighbor_allgather ${rank} C${kind} "complex*${kind}${dim}"
done
done

Просмотреть файл

@ -7,6 +7,8 @@
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2013 Los Alamos National Security, LLC. All rights
# reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
@ -76,18 +78,18 @@ do
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Allgatherv ${rank} CH "character${dim}"
output MPI_Allgatherv ${rank} L "logical${dim}"
output MPI_Neighbor_allgatherv ${rank} CH "character${dim}"
output MPI_Neighbor_allgatherv ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Allgatherv ${rank} I${kind} "integer*${kind}${dim}"
output MPI_Neighbor_allgatherv ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Allgatherv ${rank} R${kind} "real*${kind}${dim}"
output MPI_Neighbor_allgatherv ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Allgatherv ${rank} C${kind} "complex*${kind}${dim}"
output MPI_Neighbor_allgatherv ${rank} C${kind} "complex*${kind}${dim}"
done
done

Просмотреть файл

@ -7,6 +7,8 @@
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2013 Los Alamos National Security, LLC. All rights
# reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow

Просмотреть файл

@ -7,6 +7,8 @@
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2013 Los Alamos National Security, LLC. All rights
# reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow

Просмотреть файл

@ -7,6 +7,8 @@
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2013 Los Alamos National Security, LLC. All rights
# reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
@ -51,11 +53,11 @@ subroutine ${proc}(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, &
include "mpif-config.h"
${type}, intent(in) :: sendbuf
integer, dimension(*), intent(in) :: sendcounts
integer, dimension(*), intent(in) :: sdispls
integer(kind=MPI_ADDRESS_KIND), intent(in) :: sdispls
integer, dimension(*), intent(in) :: sendtypes
${type} :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer, dimension(*), intent(in) :: rdispls
integer(kind=MPI_ADDRESS_KIND), intent(in) :: rdispls
integer, dimension(*), intent(in) :: recvtypes
integer, intent(in) :: comm
integer, intent(out) :: ierror

Просмотреть файл

@ -0,0 +1,95 @@
#! /bin/sh
#
# Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2013 Los Alamos National Security, LLC. All rights
# reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# This file generates a Fortran code to bridge between an explicit F90
# generic interface and the F77 implementation.
#
# This file is automatically generated by either of the scripts
# ../xml/create_mpi_f90_medium.f90.sh or
# ../xml/create_mpi_f90_large.f90.sh
#
. "$1/fortran_kinds.sh"
# This entire file is only generated in medium/large modules. So if
# we're not at least medium, bail now.
check_size large
if test "$output" = "0"; then
exit 0
fi
# Ok, we should continue.
allranks="0 $ranks"
output() {
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, comm, request, ierror)
include "mpif-config.h"
${type}, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
${type} :: recvbuf
integer, intent(in) :: recvcount
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierror
call ${procedure}(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, comm, request, ierror)
end subroutine ${proc}
EOF
}
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Ineighbor_allgather ${rank} CH "character${dim}"
output MPI_Ineighbor_allgather ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Ineighbor_allgather ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Ineighbor_allgather ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Ineighbor_allgather ${rank} C${kind} "complex*${kind}${dim}"
done
done

Просмотреть файл

@ -0,0 +1,96 @@
#! /bin/sh
#
# Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2013 Los Alamos National Security, LLC. All rights
# reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# This file generates a Fortran code to bridge between an explicit F90
# generic interface and the F77 implementation.
#
# This file is automatically generated by either of the scripts
# ../xml/create_mpi_f90_medium.f90.sh or
# ../xml/create_mpi_f90_large.f90.sh
#
. "$1/fortran_kinds.sh"
# This entire file is only generated in medium/large modules. So if
# we're not at least medium, bail now.
check_size large
if test "$output" = "0"; then
exit 0
fi
# Ok, we should continue.
allranks="0 $ranks"
output() {
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, sendcount, sendtype, recvbuf, recvcounts, &
displs, recvtype, comm, request, ierror)
include "mpif-config.h"
${type}, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
${type} :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer, dimension(*), intent(in) :: displs
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierror
call ${procedure}(sendbuf, sendcount, sendtype, recvbuf, recvcounts, &
displs, recvtype, comm, request, ierror)
end subroutine ${proc}
EOF
}
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Ineighbor_allgatherv ${rank} CH "character${dim}"
output MPI_Ineighbor_allgatherv ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Ineighbor_allgatherv ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Ineighbor_allgatherv ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Ineighbor_allgatherv ${rank} C${kind} "complex*${kind}${dim}"
done
done

Просмотреть файл

@ -0,0 +1,95 @@
#! /bin/sh
#
# Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2013 Los Alamos National Security, LLC. All rights
# reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# This file generates a Fortran code to bridge between an explicit F90
# generic interface and the F77 implementation.
#
# This file is automatically generated by either of the scripts
# ../xml/create_mpi_f90_medium.f90.sh or
# ../xml/create_mpi_f90_large.f90.sh
#
. "$1/fortran_kinds.sh"
# This entire file is only generated in medium/large modules. So if
# we're not at least medium, bail now.
check_size large
if test "$output" = "0"; then
exit 0
fi
# Ok, we should continue.
allranks="0 $ranks"
output() {
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, comm, request, ierror)
include "mpif-config.h"
${type}, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
${type} :: recvbuf
integer, intent(in) :: recvcount
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierror
call ${procedure}(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, comm, request, ierror)
end subroutine ${proc}
EOF
}
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Ineighbor_alltoall ${rank} CH "character${dim}"
output MPI_Ineighbor_alltoall ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Ineighbor_alltoall ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Ineighbor_alltoall ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Ineighbor_alltoall ${rank} C${kind} "complex*${kind}${dim}"
done
done

Просмотреть файл

@ -0,0 +1,97 @@
#! /bin/sh
#
# Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2013 Los Alamos National Security, LLC. All rights
# reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# This file generates a Fortran code to bridge between an explicit F90
# generic interface and the F77 implementation.
#
# This file is automatically generated by either of the scripts
# ../xml/create_mpi_f90_medium.f90.sh or
# ../xml/create_mpi_f90_large.f90.sh
#
. "$1/fortran_kinds.sh"
# This entire file is only generated in medium/large modules. So if
# we're not at least medium, bail now.
check_size large
if test "$output" = "0"; then
exit 0
fi
# Ok, we should continue.
allranks="0 $ranks"
output() {
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, sendcounts, sdispls, sendtype, recvbuf, &
recvcounts, rdispls, recvtype, comm, request, ierror)
include "mpif-config.h"
${type}, intent(in) :: sendbuf
integer, dimension(*), intent(in) :: sendcounts
integer, dimension(*), intent(in) :: sdispls
integer, intent(in) :: sendtype
${type} :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer, dimension(*), intent(in) :: rdispls
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierror
call ${procedure}(sendbuf, sendcounts, sdispls, sendtype, recvbuf, &
recvcounts, rdispls, recvtype, comm, request, ierror)
end subroutine ${proc}
EOF
}
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Ineighbor_alltoallv ${rank} CH "character${dim}"
output MPI_Ineighbor_alltoallv ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Ineighbor_alltoallv ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Ineighbor_alltoallv ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Ineighbor_alltoallv ${rank} C${kind} "complex*${kind}${dim}"
done
done

Просмотреть файл

@ -0,0 +1,95 @@
#! /bin/sh
#
# Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# This file generates a Fortran code to bridge between an explicit F90
# generic interface and the F77 implementation.
#
# This file is automatically generated by either of the scripts
# ../xml/create_mpi_f90_medium.f90.sh or
# ../xml/create_mpi_f90_large.f90.sh
#
. "$1/fortran_kinds.sh"
# This entire file is only generated in medium/large modules. So if
# we're not at least medium, bail now.
check_size large
if test "$output" = "0"; then
exit 0
fi
# Ok, we should continue.
allranks="0 $ranks"
output() {
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, &
recvcounts, rdispls, recvtypes, comm, request, ierror)
include "mpif-config.h"
${type}, intent(in) :: sendbuf
integer, dimension(*), intent(in) :: sendcounts
integer(kind=MPI_ADDRESS_KIND), intent(in) :: sdispls
integer, dimension(*), intent(in) :: sendtypes
${type} :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer(kind=MPI_ADDRESS_KIND) :: rdispls
integer, dimension(*), intent(in) :: recvtypes
integer, intent(in) :: comm
integer, intent(out) :: request
integer, intent(out) :: ierror
call ${procedure}(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, &
recvcounts, rdispls, recvtypes, comm, request, ierror)
end subroutine ${proc}
EOF
}
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Ineighbor_alltoallw ${rank} CH "character${dim}"
output MPI_Ineighbor_alltoallw ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Ineighbor_alltoallw ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Ineighbor_alltoallw ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Ineighbor_alltoallw ${rank} C${kind} "complex*${kind}${dim}"
done
done

Просмотреть файл

@ -0,0 +1,94 @@
#! /bin/sh
#
# Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2013 Los Alamos National Security, LLC. All rights
# reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# This file generates a Fortran code to bridge between an explicit F90
# generic interface and the F77 implementation.
#
# This file is automatically generated by either of the scripts
# ../xml/create_mpi_f90_medium.f90.sh or
# ../xml/create_mpi_f90_large.f90.sh
#
. "$1/fortran_kinds.sh"
# This entire file is only generated in medium/large modules. So if
# we're not at least medium, bail now.
check_size large
if test "$output" = "0"; then
exit 0
fi
# Ok, we should continue.
allranks="0 $ranks"
output() {
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, comm, ierror)
include "mpif-config.h"
${type}, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
${type} :: recvbuf
integer, intent(in) :: recvcount
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: ierror
call ${procedure}(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, comm, ierror)
end subroutine ${proc}
EOF
}
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Neighbor_allgather ${rank} CH "character${dim}"
output MPI_Neighbor_allgather ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Neighbor_allgather ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Neighbor_allgather ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Neighbor_allgather ${rank} C${kind} "complex*${kind}${dim}"
done
done

Просмотреть файл

@ -0,0 +1,95 @@
#! /bin/sh
#
# Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2013 Los Alamos National Security, LLC. All rights
# reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# This file generates a Fortran code to bridge between an explicit F90
# generic interface and the F77 implementation.
#
# This file is automatically generated by either of the scripts
# ../xml/create_mpi_f90_medium.f90.sh or
# ../xml/create_mpi_f90_large.f90.sh
#
. "$1/fortran_kinds.sh"
# This entire file is only generated in medium/large modules. So if
# we're not at least medium, bail now.
check_size large
if test "$output" = "0"; then
exit 0
fi
# Ok, we should continue.
allranks="0 $ranks"
output() {
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, sendcount, sendtype, recvbuf, recvcounts, &
displs, recvtype, comm, ierror)
include "mpif-config.h"
${type}, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
${type} :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer, dimension(*), intent(in) :: displs
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: ierror
call ${procedure}(sendbuf, sendcount, sendtype, recvbuf, recvcounts, &
displs, recvtype, comm, ierror)
end subroutine ${proc}
EOF
}
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Neighbor_allgatherv ${rank} CH "character${dim}"
output MPI_Neighbor_allgatherv ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Neighbor_allgatherv ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Neighbor_allgatherv ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Neighbor_allgatherv ${rank} C${kind} "complex*${kind}${dim}"
done
done

Просмотреть файл

@ -0,0 +1,94 @@
#! /bin/sh
#
# Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2013 Los Alamos National Security, LLC. All rights
# reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# This file generates a Fortran code to bridge between an explicit F90
# generic interface and the F77 implementation.
#
# This file is automatically generated by either of the scripts
# ../xml/create_mpi_f90_medium.f90.sh or
# ../xml/create_mpi_f90_large.f90.sh
#
. "$1/fortran_kinds.sh"
# This entire file is only generated in medium/large modules. So if
# we're not at least medium, bail now.
check_size large
if test "$output" = "0"; then
exit 0
fi
# Ok, we should continue.
allranks="0 $ranks"
output() {
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, comm, ierror)
include "mpif-config.h"
${type}, intent(in) :: sendbuf
integer, intent(in) :: sendcount
integer, intent(in) :: sendtype
${type} :: recvbuf
integer, intent(in) :: recvcount
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: ierror
call ${procedure}(sendbuf, sendcount, sendtype, recvbuf, recvcount, &
recvtype, comm, ierror)
end subroutine ${proc}
EOF
}
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Neighbor_alltoall ${rank} CH "character${dim}"
output MPI_Neighbor_alltoall ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Neighbor_alltoall ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Neighbor_alltoall ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Neighbor_alltoall ${rank} C${kind} "complex*${kind}${dim}"
done
done

Просмотреть файл

@ -0,0 +1,96 @@
#! /bin/sh
#
# Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2013 Los Alamos National Security, LLC. All rights
# reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# This file generates a Fortran code to bridge between an explicit F90
# generic interface and the F77 implementation.
#
# This file is automatically generated by either of the scripts
# ../xml/create_mpi_f90_medium.f90.sh or
# ../xml/create_mpi_f90_large.f90.sh
#
. "$1/fortran_kinds.sh"
# This entire file is only generated in medium/large modules. So if
# we're not at least medium, bail now.
check_size large
if test "$output" = "0"; then
exit 0
fi
# Ok, we should continue.
allranks="0 $ranks"
output() {
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, sendcounts, sdispls, sendtype, recvbuf, &
recvcounts, rdispls, recvtype, comm, ierror)
include "mpif-config.h"
${type}, intent(in) :: sendbuf
integer, dimension(*), intent(in) :: sendcounts
integer, dimension(*), intent(in) :: sdispls
integer, intent(in) :: sendtype
${type} :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer, dimension(*), intent(in) :: rdispls
integer, intent(in) :: recvtype
integer, intent(in) :: comm
integer, intent(out) :: ierror
call ${procedure}(sendbuf, sendcounts, sdispls, sendtype, recvbuf, &
recvcounts, rdispls, recvtype, comm, ierror)
end subroutine ${proc}
EOF
}
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Neighbor_alltoallv ${rank} CH "character${dim}"
output MPI_Neighbor_alltoallv ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Neighbor_alltoallv ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Neighbor_alltoallv ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Neighbor_alltoallv ${rank} C${kind} "complex*${kind}${dim}"
done
done

Просмотреть файл

@ -0,0 +1,96 @@
#! /bin/sh
#
# Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2013 Los Alamos National Security, LLC. All rights
# reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# This file generates a Fortran code to bridge between an explicit F90
# generic interface and the F77 implementation.
#
# This file is automatically generated by either of the scripts
# ../xml/create_mpi_f90_medium.f90.sh or
# ../xml/create_mpi_f90_large.f90.sh
#
. "$1/fortran_kinds.sh"
# This entire file is only generated in medium/large modules. So if
# we're not at least medium, bail now.
check_size large
if test "$output" = "0"; then
exit 0
fi
# Ok, we should continue.
allranks="0 $ranks"
output() {
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, &
recvcounts, rdispls, recvtypes, comm, ierror)
include "mpif-config.h"
${type}, intent(in) :: sendbuf
integer, dimension(*), intent(in) :: sendcounts
integer(kind=MPI_ADDRESS_KIND), intent(in) :: sdispls
integer, dimension(*), intent(in) :: sendtypes
${type} :: recvbuf
integer, dimension(*), intent(in) :: recvcounts
integer(kind=MPI_ADDRESS_KIND), intent(in) :: rdispls
integer, dimension(*), intent(in) :: recvtypes
integer, intent(in) :: comm
integer, intent(out) :: ierror
call ${procedure}(sendbuf, sendcounts, sdispls, sendtypes, recvbuf, &
recvcounts, rdispls, recvtypes, comm, ierror)
end subroutine ${proc}
EOF
}
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(*)' ; esac
case "$rank" in 2) dim=', dimension(1,*)' ; esac
case "$rank" in 3) dim=', dimension(1,1,*)' ; esac
case "$rank" in 4) dim=', dimension(1,1,1,*)' ; esac
case "$rank" in 5) dim=', dimension(1,1,1,1,*)' ; esac
case "$rank" in 6) dim=', dimension(1,1,1,1,1,*)' ; esac
case "$rank" in 7) dim=', dimension(1,1,1,1,1,1,*)' ; esac
output MPI_Neighbor_alltoallw ${rank} CH "character${dim}"
output MPI_Neighbor_alltoallw ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Neighbor_alltoallw ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Neighbor_alltoallw ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Neighbor_alltoallw ${rank} C${kind} "complex*${kind}${dim}"
done
done

110
ompi/mpi/man/man3/MPI_Neighbor_allgather.3in Обычный файл
Просмотреть файл

@ -0,0 +1,110 @@
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Neighbor_allgather 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Neighbor_allgather\fP \- Gathers and distributes data from and to all neighbors
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Neighbor_allgather(void\fI *sendbuf\fP, int \fI sendcount\fP,
MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, int\fI recvcount\fP,
MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP)
.fi
.SH Fortran Syntax
.nf
INCLUDE 'mpif.h'
MPI_ALLGATHER(\fISENDBUF\fP,\fI SENDCOUNT\fP,\fI SENDTYPE\fP,\fI RECVBUF\fP,\fI RECVCOUNT\fP,\fI
RECVTYPE\fP,\fI COMM\fP,\fI IERROR\fP)
<type> \fISENDBUF\fP (*), \fIRECVBUF\fP (*)
INTEGER \fISENDCOUNT\fP,\fI SENDTYPE\fP,\fI RECVCOUNT\fP,\fI RECVTYPE\fP,\fI COMM\fP,
INTEGER \fIIERROR\fP
.fi
.SH INPUT PARAMETERS
.ft R
.TP 1i
sendbuf
Starting address of send buffer (choice).
.TP 1i
sendcount
Number of elements in send buffer (integer).
.TP 1i
sendtype
Datatype of send buffer elements (handle).
.TP 1i
recvbuf
Starting address of recv buffer (choice).
.TP 1i
recvcount
Number of elements received from any process (integer).
.TP 1i
recvtype
Datatype of receive buffer elements (handle).
.TP 1i
comm
Communicator (handle).
.SH OUTPUT PARAMETERS
.ft R
.TP 1i
recvbuf
Address of receive buffer (choice).
.ft R
.TP 1i
IERROR
Fortran only: Error status (integer).
.SH DESCRIPTION
.ft R
MPI_Neighbor_allgather is similar to MPI_Allgather, except that only the neighboring processes receive the result, instead of all processes. The neighbors and buffer layout is determined by the topology of \fIcomm\fP.
.sp
The type signature associated with sendcount, sendtype at a process must be equal to the type signature associated with recvcount, recvtype at any other process.
.fi
.sp
.SH NEIGHBOR ORDERING
For a distributed graph topology, created with MPI_Dist_graph_create, the sequence of neighbors
in the send and receive buffers at each process is defined as the sequence returned by MPI_Dist_graph_neighbors
for destinations and sources, respectively. For a general graph topology, created with MPI_Graph_create, the order of
neighbors in the send and receive buffers is defined as the sequence of neighbors as returned by MPI_Graph_neighbors.
Note that general graph topologies should generally be replaced by the distributed graph topologies.
For a Cartesian topology, created with MPI_Cart_create, the sequence of neighbors in the send and receive
buffers at each process is defined by order of the dimensions, first the neighbor in the negative direction
and then in the positive direction with displacement 1. The numbers of sources and destinations in the
communication routines are 2*ndims with ndims defined in MPI_Cart_create. If a neighbor does not exist, i.e., at
the border of a Cartesian topology in the case of a non-periodic virtual grid dimension (i.e.,
periods[...]==false), then this neighbor is defined to be MPI_PROC_NULL.
If a neighbor in any of the functions is MPI_PROC_NULL, then the neighborhood collective communication behaves
like a point-to-point communication with MPI_PROC_NULL in this direction. That is, the buffer is still part of
the sequence of neighbors but it is neither communicated nor updated.
.SH NOTES
.sp
The MPI_IN_PLACE option for \fIsendbuf\fP is not meaningful for this operation.
.SH ERRORS
Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument.
.sp
Before the error value is returned, the current MPI error handler is
called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler
may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error.
.SH SEE ALSO
.ft R
.sp
MPI_Neighbor_allgatherv
MPI_Cart_create
MPI_Garph_create
MPI_Dist_graph_create
.br
MPI_Gather

104
ompi/mpi/man/man3/MPI_Neighbor_allgatherv.3in Обычный файл
Просмотреть файл

@ -0,0 +1,104 @@
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2007-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Neighbor_allgatherv 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Neighbor_allgatherv\fP \- Gathers and distributes data from and to all neighbors. Each process may contribute a different amount of data.
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Neighbor_allgatherv(void\fI *sendbuf\fP, int\fI sendcount\fP,
MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, int\fI recvcounts[]\fP,
int\fI displs[]\fP, MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP)
.fi
.SH Fortran Syntax
.nf
INCLUDE 'mpif.h'
MPI_ALLGATHERV(\fISENDBUF\fP,\fI SENDCOUNT\fP, \fISENDTYPE\fP,\fI RECVBUF\fP,
\fIRECVCOUNT\fP,\fI DISPLS\fP, \fIRECVTYPE\fP,\fI COMM\fP,\fI IERROR\fP)
<type> \fISENDBUF\fP(*), \fIRECVBUF\fP(*)
INTEGER \fISENDCOUNT\fP,\fI SENDTYPE\fP, \fIRECVCOUNT\fP(*),
INTEGER \fIDISPLS\fP(*),\fI RECVTYPE\fP,\fI COMM\fP,\fI IERROR\fP
.fi
.SH INPUT PARAMETERS
.ft R
.TP 1i
sendbuf
Starting address of send buffer (choice).
.TP 1i
sendcount
Number of elements in send buffer (integer).
.TP 1i
sendtype
Datatype of send buffer elements (handle).
.TP 1i
recvcount
Integer array (of length group size) containing the number of elements that are received from each neighbor.
.TP 1i
displs
Integer array (of length group size). Entry i specifies the displacement (relative to recvbuf) at which to place the incoming data from neighbor i.
.TP 1i
recvtype
Datatype of receive buffer elements (handle).
.TP 1i
comm
Communicator (handle).
.sp
.SH OUTPUT PARAMETERS
.ft R
.TP 1i
recvbuf
Address of receive buffer (choice).
.ft R
.TP 1i
IERROR
Fortran only: Error status (integer).
.SH DESCRIPTION
.ft R
MPI_Neighbor_allgatherv is similar to MPI_Neighbor_allgather in that all processes gather data from all neighbors, except that each process can send a different amount of data. The block of data sent from the jth neighbor is received by every neighbor and placed in the jth block of the buffer. The neighbors and buffer layout is determined by the topology of \fIcomm\fP.
.I recvbuf.
.sp
The type signature associated with sendcount, sendtype, at process j must be equal to the type signature associated with the corresponding entry in \fIrecvcounts\fP on neighboring processes.
.sp
.SH NEIGHBOR ORDERING
For a distributed graph topology, created with MPI_Dist_graph_create, the sequence of neighbors
in the send and receive buffers at each process is defined as the sequence returned by MPI_Dist_graph_neighbors
for destinations and sources, respectively. For a general graph topology, created with MPI_Graph_create, the order of
neighbors in the send and receive buffers is defined as the sequence of neighbors as returned by MPI_Graph_neighbors.
Note that general graph topologies should generally be replaced by the distributed graph topologies.
For a Cartesian topology, created with MPI_Cart_create, the sequence of neighbors in the send and receive
buffers at each process is defined by order of the dimensions, first the neighbor in the negative direction
and then in the positive direction with displacement 1. The numbers of sources and destinations in the
communication routines are 2*ndims with ndims defined in MPI_Cart_create. If a neighbor does not exist, i.e., at
the border of a Cartesian topology in the case of a non-periodic virtual grid dimension (i.e.,
periods[...]==false), then this neighbor is defined to be MPI_PROC_NULL.
If a neighbor in any of the functions is MPI_PROC_NULL, then the neighborhood collective communication behaves
like a point-to-point communication with MPI_PROC_NULL in this direction. That is, the buffer is still part of
the sequence of neighbors but it is neither communicated nor updated.
.SH NOTES
The MPI_IN_PLACE option for \fIsendbuf\fP is not meaningful for this operation.
.SH ERRORS
Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument.
.sp
Before the error value is returned, the current MPI error handler is
called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler
may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error.
.SH SEE ALSO
.ft R
MPI_Neighbor_allgather
MPI_Cart_create
MPI_Graph_create
MPI_Dist_graph_create

151
ompi/mpi/man/man3/MPI_Neighbor_alltoall.3in Обычный файл
Просмотреть файл

@ -0,0 +1,151 @@
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Neighbor_alltoall 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Neighbor_alltoall\fP \- All processes send data to neighboring processes in a virtual topology communicator
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Neighbor_alltoall(void *\fIsendbuf\fP, int \fIsendcount\fP,
MPI_Datatype \fIsendtype\fP, void *\fIrecvbuf\fP, int \fIrecvcount\fP,
MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP)
.fi
.SH Fortran Syntax
.nf
INCLUDE 'mpif.h'
MPI_NEIGHBOR_ALLTOALL(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT,
RECVTYPE, COMM, IERROR\fP)
<type> \fISENDBUF(*), RECVBUF(*)\fP
INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE\fP
INTEGER \fICOMM, IERROR\fP
.fi
.SH INPUT PARAMETERS
.ft R
.TP 1.2i
sendbuf
Starting address of send buffer (choice).
.TP 1.2i
sendcount
Number of elements to send to each process (integer).
.TP 1.2i
sendtype
Datatype of send buffer elements (handle).
.TP 1.2i
recvcount
Number of elements to receive from each process (integer).
.TP 1.2i
recvtype
Datatype of receive buffer elements (handle).
.TP 1.2i
comm
Communicator over which data is to be exchanged (handle).
.SH OUTPUT PARAMETERS
.ft R
.TP 1.2i
recvbuf
Starting address of receive buffer (choice).
.ft R
.TP 1.2i
IERROR
Fortran only: Error status (integer).
.SH DESCRIPTION
.ft R
MPI_Neighbor_alltoall is a collective operation in which all processes send and receive the same amount of data to each neighbor. The operation of this routine can be represented as follows, where each process performs 2n (n being the number of neighbors in communicator \fIcomm\fP) independent point-to-point communications. The neighbors and buffer layout are determined by the topology of \fIcomm\fP.
.sp
Example of MPI_Neighbor_alltoall semantics for cartesian topologies:
.sp
.nf
MPI_Cart_get(\fIcomm\fP, maxdims, dims, periods, coords);
for (dim = 0, i = 0 ; dim < dims ; ++dim) {
MPI_Cart_shift(\fIcomm\fP, dim, 1, &r0, &r1);
MPI_Isend(\fIsendbuf\fP + i * \fIsendcount\fP * extent(\fIsendtype\fP),
\fIsendcount\fP, \fIsendtype\fP, r0, ..., \fIcomm\fP, ...);
MPI_Irecv(\fIrecvbuf\fP + i * \fIrecvcount\fP * extent(\fIrecvtype\fP),
\fIrecvcount\fP, \fIrecvtype\fP, r0, ..., \fIcomm\fP, ...);
++i;
MPI_Isend(\fIsendbuf\fP + i * \fIsendcount\fP * extent(\fIsendtype\fP),
\fIsendcount\fP, \fIsendtype\fP, r1, ..., \fIcomm\fP, &req[i]);
MPI_Irecv(\fIrecvbuf\fP + i * \fIrecvcount\fP * extent(\fIrecvtype\fP),
\fIrecvcount\fP, \fIrecvtype\fP, r1, ..., \fIcomm\fP, ...);
++i;
}
MPI_Waitall (...);
.fi
.sp
Each process breaks up its local \fIsendbuf\fP into n blocks \- each
containing \fIsendcount\fP elements of type \fIsendtype\fP \- and
divides its \fIrecvbuf\fP similarly according to \fIrecvcount\fP and
\fIrecvtype\fP. Process j sends the k-th block of its local
\fIsendbuf\fP to neighbor k, which places the data in the j-th block of
its local \fIrecvbuf\fP. The amount of data sent must be equal to the
amount of data received, pairwise, between every pair of processes.
.sp
.SH NEIGHBOR ORDERING
For a distributed graph topology, created with MPI_Dist_graph_create, the sequence of neighbors
in the send and receive buffers at each process is defined as the sequence returned by MPI_Dist_graph_neighbors
for destinations and sources, respectively. For a general graph topology, created with MPI_Graph_create, the order of
neighbors in the send and receive buffers is defined as the sequence of neighbors as returned by MPI_Graph_neighbors.
Note that general graph topologies should generally be replaced by the distributed graph topologies.
For a Cartesian topology, created with MPI_Cart_create, the sequence of neighbors in the send and receive
buffers at each process is defined by order of the dimensions, first the neighbor in the negative direction
and then in the positive direction with displacement 1. The numbers of sources and destinations in the
communication routines are 2*ndims with ndims defined in MPI_Cart_create. If a neighbor does not exist, i.e., at
the border of a Cartesian topology in the case of a non-periodic virtual grid dimension (i.e.,
periods[...]==false), then this neighbor is defined to be MPI_PROC_NULL.
If a neighbor in any of the functions is MPI_PROC_NULL, then the neighborhood collective communication behaves
like a point-to-point communication with MPI_PROC_NULL in this direction. That is, the buffer is still part of
the sequence of neighbors but it is neither communicated nor updated.
.sp
.SH NOTES
.ft R
The MPI_IN_PLACE option for \fIsendbuf\fP is not meaningful for this function.
.sp
All arguments on all processes are significant. The \fIcomm\fP argument,
in particular, must describe the same communicator on all processes. \fIcomm\fP
must be either a cartesian, graph, or dist graph communicator.
.sp
There are two MPI library functions that are more general than
MPI_Neighbor_alltoall. MPI_Neighbor_alltoallv allows all-to-all communication to and
from buffers that need not be contiguous; different processes may
send and receive different amounts of data. MPI_Neighbor_alltoallw expands
MPI_Neighbor_alltoallv's functionality to allow the exchange of data with
different datatypes.
.SH ERRORS
.ft R
Almost all MPI routines return an error value; C routines as
the value of the function and Fortran routines in the last argument.
.sp
Before the error value is returned, the current MPI error handler is
called. By default, this error handler aborts the MPI job, except for
I/O function errors. The error handler may be changed with
MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN
may be used to cause error values to be returned. Note that MPI does not
guarantee that an MPI program can continue past an error.
.SH SEE ALSO
.ft R
.nf
MPI_Neighbor_alltoallv
MPI_Neighbor_alltoallw
MPI_Cart_create
MPI_Graph_create
MPI_Dist_graph_create
MPI_Dist_graph_create_adjacent

173
ompi/mpi/man/man3/MPI_Neighbor_alltoallv.3in Обычный файл
Просмотреть файл

@ -0,0 +1,173 @@
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Neighbor_alltoallv 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Neighbor_alltoallv\fP \- All processes send different amounts of data to, and receive different amounts of data from, all neighbors
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Neighbor_alltoallv(void *\fIsendbuf\fP, int \fIsendcounts\fP[],
int \fIsdispls\f[]P, MPI_Datatype \fIsendtype\fP,
void *\fIrecvbuf\fP, int\fI recvcounts\fP[],
int \fIrdispls\fP[], MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP)
.fi
.SH Fortran Syntax
.nf
INCLUDE 'mpif.h'
MPI_ALLTOALLV(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPE,
RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPE, COMM, IERROR\fP)
<type> \fISENDBUF(*), RECVBUF(*)\fP
INTEGER \fISENDCOUNTS(*), SDISPLS(*), SENDTYPE\fP
INTEGER \fIRECVCOUNTS(*), RDISPLS(*), RECVTYPE\fP
INTEGER \fICOMM, IERROR\fP
.fi
.SH INPUT PARAMETERS
.ft R
.TP 1.2i
sendbuf
Starting address of send buffer.
.TP 1.2i
sendcounts
Integer array, where entry i specifies the number of elements to send
to neighbor i.
.TP 1.2i
sdispls
Integer array, where entry i specifies the displacement (offset from
\fIsendbuf\fP, in units of \fIsendtype\fP) from which to send data to
neighbor i.
.TP 1.2i
sendtype
Datatype of send buffer elements.
.TP 1.2i
recvcounts
Integer array, where entry j specifies the number of elements to
receive from neighbor j.
.TP 1.2i
rdispls
Integer array, where entry j specifies the displacement (offset from
\fIrecvbuf\fP, in units of \fIrecvtype\fP) to which data from neighbor j
should be written.
.TP 1.2i
recvtype
Datatype of receive buffer elements.
.TP 1.2i
comm
Communicator over which data is to be exchanged.
.SH OUTPUT PARAMETERS
.ft R
.TP 1.2i
recvbuf
Address of receive buffer.
.ft R
.TP 1.2i
IERROR
Fortran only: Error status.
.SH DESCRIPTION
.ft R
MPI_Neighbor_alltoallv is a generalized collective operation in which all
processes send data to and receive data from all neighbors. It
adds flexibility to MPI_Neighbor_alltoall by allowing the user to specify data
to send and receive vector-style (via a displacement and element
count). The operation of this routine can be thought of as follows,
where each process performs 2n (n being the number of neighbors in
to topology of communicator \fIcomm\fP) independent point-to-point communications.
The neighbors and buffer layout are determined by the topology of \fIcomm\fP.
.sp
.nf
MPI_Cart_get(\fIcomm\fP, maxdims, dims, periods, coords);
for (dim = 0, i = 0 ; dim < dims ; ++dim) {
MPI_Cart_shift(\fIcomm\fP, dim, 1, &r0, &r1);
MPI_Isend(\fIsendbuf\fP + \fIsdispls\fP[i] * extent(\fIsendtype\fP),
\fIsendcount\fP, \fIsendtype\fP, r0, ..., \fIcomm\fP, ...);
MPI_Irecv(\fIrecvbuf\fP + \fIrdispls\fP[i] * extent(\fIrecvtype\fP),
\fIrecvcount\fP, \fIrecvtype\fP, r0, ..., \fIcomm\fP, ...);
++i;
MPI_Isend(\fIsendbuf\fP + \fIsdispls\fP[i] * extent(\fIsendtype\fP),
\fIsendcount\fP, \fIsendtype\fP, r1, ..., \fIcomm\fP, &req[i]);
MPI_Irecv(\fIrecvbuf\fP + \fIrdispls\fP[i] * extent(\fIrecvtype\fP),
\fIrecvcount\fP, \fIrecvtype\fP, r1, ..., \fIcomm\fP, ...);
++i;
}
.fi
.sp
Process j sends the k-th block of its local \fIsendbuf\fP to neighbor
k, which places the data in the j-th block of its local
\fIrecvbuf\fP.
.sp
When a pair of processes exchanges data, each may pass different
element count and datatype arguments so long as the sender specifies
the same amount of data to send (in bytes) as the receiver expects
to receive.
.sp
Note that process i may send a different amount of data to process j
than it receives from process j. Also, a process may send entirely
different amounts of data to different processes in the communicator.
.sp
.SH NEIGHBOR ORDERING
For a distributed graph topology, created with MPI_Dist_graph_create, the sequence of neighbors
in the send and receive buffers at each process is defined as the sequence returned by MPI_Dist_graph_neighbors
for destinations and sources, respectively. For a general graph topology, created with MPI_Graph_create, the order of
neighbors in the send and receive buffers is defined as the sequence of neighbors as returned by MPI_Graph_neighbors.
Note that general graph topologies should generally be replaced by the distributed graph topologies.
For a Cartesian topology, created with MPI_Cart_create, the sequence of neighbors in the send and receive
buffers at each process is defined by order of the dimensions, first the neighbor in the negative direction
and then in the positive direction with displacement 1. The numbers of sources and destinations in the
communication routines are 2*ndims with ndims defined in MPI_Cart_create. If a neighbor does not exist, i.e., at
the border of a Cartesian topology in the case of a non-periodic virtual grid dimension (i.e.,
periods[...]==false), then this neighbor is defined to be MPI_PROC_NULL.
If a neighbor in any of the functions is MPI_PROC_NULL, then the neighborhood collective communication behaves
like a point-to-point communication with MPI_PROC_NULL in this direction. That is, the buffer is still part of
the sequence of neighbors but it is neither communicated nor updated.
.sp
.SH NOTES
.ft R
The MPI_IN_PLACE option for \fIsendbuf\fP is not meaningful for this operation.
.sp
The specification of counts and displacements should not cause
any location to be written more than once.
.sp
All arguments on all processes are significant. The \fIcomm\fP argument,
in particular, must describe the same communicator on all processes.
.sp
The offsets of \fIsdispls\fP and \fIrdispls\fP are measured in units
of \fIsendtype\fP and \fIrecvtype\fP, respectively. Compare this to
MPI_Neighbor_alltoallw, where these offsets are measured in bytes.
.SH ERRORS
.ft R
Almost all MPI routines return an error value; C routines as
the value of the function and Fortran routines in the last argument.
.sp
Before the error value is returned, the current MPI error handler is
called. By default, this error handler aborts the MPI job, except for
I/O function errors. The error handler may be changed with
MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN
may be used to cause error values to be returned. Note that MPI does not
guarantee that an MPI program can continue past an error.
.SH SEE ALSO
.ft R
.nf
MPI_Neighbor_alltoall
MPI_Neighbor_alltoallw
MPI_Cart_create
MPI_Graph_create
MPI_Dist_graph_create

165
ompi/mpi/man/man3/MPI_Neighbor_alltoallw.3in Обычный файл
Просмотреть файл

@ -0,0 +1,165 @@
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Neighbor_alltoallw 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Neighbor_alltoallw\fP \- All processes send data of different types to, and receive data of different types from, all processes
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Neighbor_alltoallw(void *\fIsendbuf\fP, int *\fIsendcounts\fP,
int *\fIsdispls\fP, MPI_Datatype *\fIsendtypes\fP,
void *\fIrecvbuf\fP, int *\fIrecvcounts\fP,
int *\fIrdispls\fP, MPI_Datatype *\fIrecvtypes\fP, MPI_Comm \fIcomm\fP)
.fi
.SH Fortran Syntax
.nf
INCLUDE 'mpif.h'
MPI_ALLTOALLW(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPES,
RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPES, COMM, IERROR\fP)
<type> \fISENDBUF(*), RECVBUF(*)\fP
INTEGER \fISENDCOUNTS(*), SDISPLS(*), SENDTYPES(*)\fP
INTEGER \fIRECVCOUNTS(*), RDISPLS(*), RECVTYPES(*)\fP
INTEGER \fICOMM, IERROR\fP
.fi
.SH INPUT PARAMETERS
.ft R
.TP 1.2i
sendbuf
Starting address of send buffer.
.TP 1.2i
sendcounts
Integer array, where entry i specifies the number of elements to send
to neighbor i.
.TP 1.2i
sdispls
Integer array, where entry i specifies the displacement (in bytes,
offset from \fIsendbuf\fP) from which to send data to neighbor i.
.TP 1.2i
sendtypes
Datatype array, where entry i specifies the datatype to use when
sending data to neighbor i.
.TP 1.2i
recvcounts
Integer array, where entry j specifies the number of elements to
receive from neighbor j.
.TP 1.2i
rdispls
Integer array, where entry j specifies the displacement (in bytes,
offset from \fIrecvbuf\fP) to which data from neighbor j should
be written.
.TP 1.2i
recvtypes
Datatype array, where entry j specifies the datatype to use when
receiving data from neighbor j.
.TP 1.2i
comm
Communicator over which data is to be exchanged.
.SH OUTPUT PARAMETERS
.ft R
.TP 1.2i
recvbuf
Address of receive buffer.
.ft R
.TP 1.2i
IERROR
Fortran only: Error status.
.SH DESCRIPTION
.ft R
MPI_Neighbor_alltoallw is a generalized collective operation in which all
processes send data to and receive data from all neighbors. It
adds flexibility to MPI_Neighbor_alltoallv by allowing the user to specify the
datatype of individual data blocks (in addition to displacement and
element count). Its operation can be thought of in the following way,
where each process performs 2n (n being the number of neighbors in
the topology of communicator \fIcomm\fP) independent point-to-point communications.
The neighbors and buffer layout are determined by the topology of \fIcomm\fP.
.sp
.nf
MPI_Cart_get(\fIcomm\fP, maxdims, dims, periods, coords);
for (dim = 0, i = 0 ; dim < dims ; ++dim) {
MPI_Cart_shift(\fIcomm\fP, dim, 1, &r0, &r1);
MPI_Isend(\fIsendbuf\fP + \fIsdispls\fP[i] * extent(\fIsendtype\fP),
\fIsendcount\fP, \fIsendtypes\fP[i], r0, ..., \fIcomm\fP, ...);
MPI_Irecv(\fIrecvbuf\fP + \fIrdispls\fP[i] * extent(\fIrecvtype\fP),
\fIrecvcount\fP, \fIrecvtypes\fP[i], r0, ..., \fIcomm\fP, ...);
++i;
MPI_Isend(\fIsendbuf\fP + \fIsdispls\fP[i] * extent(\fIsendtype\fP),
\fIsendcount\fP, \fIsendtypes\fP[i], r1, ..., \fIcomm\fP, &req[i]);
MPI_Irecv(\fIrecvbuf\fP + \fIrdispls\fP[i] * extent(\fIrecvtype\fP),
\fIrecvcount\fP, \fIrecvtypes\fP[i], r1, ..., \fIcomm\fP, ...);
++i;
}
MPI_Wait_all (...);
MPI_Comm_size(\fIcomm\fP, &n);
for (i = 0, i < n; i++)
MPI_Send(\fIsendbuf\fP + \fIsdispls\fP[i], \fIsendcounts\fP[i],
\fIsendtypes\fP[i], i, ..., \fIcomm\fP);
for (i = 0, i < n; i++)
MPI_Recv(\fIrecvbuf\fP + \fIrdispls\fP[i], \fIrecvcounts\fP[i],
\fIrecvtypes\fP[i], i, ..., \fIcomm\fP);
.fi
.sp
Process j sends the k-th block of its local \fIsendbuf\fP to neighbor
k, which places the data in the j-th block of its local
\fIrecvbuf\fP.
.sp
When a pair of processes exchanges data, each may pass different
element count and datatype arguments so long as the sender specifies
the same amount of data to send (in bytes) as the receiver expects
to receive.
.sp
Note that process i may send a different amount of data to process j
than it receives from process j. Also, a process may send entirely
different amounts and types of data to different processes in the
communicator.
.sp
.SH NOTES
.ft R
The MPI_IN_PLACE option for \fIsendbuf\fP is not meaningful for this operation
.sp
The specification of counts, types, and displacements should not cause
any location to be written more than once.
.sp
All arguments on all processes are significant. The \fIcomm\fP argument,
in particular, must describe the same communicator on all processes.
.sp
The offsets of \fIsdispls\fP and \fIrdispls\fP are measured in bytes.
Compare this to MPI_Neighbor_alltoallv, where these offsets are measured in units
of \fIsendtype\fP and \fIrecvtype\fP, respectively.
.SH ERRORS
.ft R
Almost all MPI routines return an error value; C routines as
the value of the function and Fortran routines in the last argument.
.sp
Before the error value is returned, the current MPI error handler is
called. By default, this error handler aborts the MPI job, except for
I/O function errors. The error handler may be changed with
MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN
may be used to cause error values to be returned. Note that MPI does not
guarantee that an MPI program can continue past an error.
.SH SEE ALSO
.ft R
.nf
MPI_Neighbor_alltoall
MPI_Neighbor_alltoallv
MPI_Cart_create
MPI_Graph_create
MPI_Dist_graph_create

Просмотреть файл

@ -1,7 +1,7 @@
# -*- makefile -*-
# Copyright (c) 2006-2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved.
# Copyright (c) 2012 Los Alamos National Security, LLC. All rights reserved.
# Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
@ -207,6 +207,11 @@ mpi_api_man_pages = \
mpi/man/man3/MPI_Lookup_name.3 \
mpi/man/man3/MPI_Mprobe.3 \
mpi/man/man3/MPI_Mrecv.3 \
mpi/man/man3/MPI_Neighbor_allgather.3 \
mpi/man/man3/MPI_Neighbor_allgatherv.3 \
mpi/man/man3/MPI_Neighbor_alltoall.3 \
mpi/man/man3/MPI_Neighbor_alltoallv.3 \
mpi/man/man3/MPI_Neighbor_alltoallw.3 \
mpi/man/man3/MPI_Op_c2f.3 \
mpi/man/man3/MPI_Op_create.3 \
mpi/man/man3/MPI_Open_port.3 \