1
1
openmpi/ompi/mpi/f90/scripts/mpi_scatterv_f90.f90.sh
Jeff Squyres f8e634d6ca Bring over /tmp/f90-stuff branch to the trunk.
svn merge -r 9453:9609 https://svn.open-mpi.org/svn/ompi/tmp/f90-stuff .

Several improvements over the current F90 MPI bindings:

- The capability to make 4 sizes of the F90 bindings:
  - trivial: only the F90-specific MPI functions (sizeof and a few
    others)
  - small: (this is the default) all MPI functions that do not take
    choice buffers
  - medium: small + all MPI functions that take one choice buffer
    (e.g., MPI_SEND)
  - large: all MPI functions, but those that take 2 choice buffers
    (e.g., MPI_GATHER) only allow both buffers to be of the same type
- Remove all non-standard MPI types (LOGICAL*x, CHARACTER*x)
- Remove use of selected_*_kind() and only use MPI-defined types
  (INTEGER*x, etc.)
- Decrease complexity of the F90 configure and build system

This commit was SVN r9610.
2006-04-11 03:33:38 +00:00

95 строки
2.5 KiB
Bash
Исполняемый файл

#! /bin/sh
#
# Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2006 Cisco Systems, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# This file generates a Fortran code to bridge between an explicit F90
# generic interface and the F77 implementation.
#
# This file is automatically generated by either of the scripts
# ../xml/create_mpi_f90_medium.f90.sh or
# ../xml/create_mpi_f90_large.f90.sh
#
. "$1/fortran_kinds.sh"
# This entire file is only generated in medium/large modules. So if
# we're not at least medium, bail now.
check_size large
if test "$output" = "0"; then
exit 0
fi
# Ok, we should continue.
allranks="0 $ranks"
output() {
procedure=$1
rank=$2
type=$4
proc="$1$2D$3"
cat <<EOF
subroutine ${proc}(sendbuf, sendcounts, displs, sendtype, recvbuf, &
recvcount, recvtype, root, comm, ierr)
include "mpif.h"
${type}, intent(in) :: sendbuf
integer, intent(in) :: sendcounts
integer, intent(in) :: displs
integer, intent(in) :: sendtype
${type}, intent(out) :: recvbuf
integer, intent(in) :: recvcount
integer, intent(in) :: recvtype
integer, intent(in) :: root
integer, intent(in) :: comm
integer, intent(out) :: ierr
call ${procedure}(sendbuf, sendcounts, displs, sendtype, recvbuf, &
recvcount, recvtype, root, comm, ierr)
end subroutine ${proc}
EOF
}
for rank in $allranks
do
case "$rank" in 0) dim='' ; esac
case "$rank" in 1) dim=', dimension(:)' ; esac
case "$rank" in 2) dim=', dimension(:,:)' ; esac
case "$rank" in 3) dim=', dimension(:,:,:)' ; esac
case "$rank" in 4) dim=', dimension(:,:,:,:)' ; esac
case "$rank" in 5) dim=', dimension(:,:,:,:,:)' ; esac
case "$rank" in 6) dim=', dimension(:,:,:,:,:,:)' ; esac
case "$rank" in 7) dim=', dimension(:,:,:,:,:,:,:)' ; esac
output MPI_Scatterv ${rank} CH "character${dim}"
output MPI_Scatterv ${rank} L "logical${dim}"
for kind in $ikinds
do
output MPI_Scatterv ${rank} I${kind} "integer*${kind}${dim}"
done
for kind in $rkinds
do
output MPI_Scatterv ${rank} R${kind} "real*${kind}${dim}"
done
for kind in $ckinds
do
output MPI_Scatterv ${rank} C${kind} "complex*${kind}${dim}"
done
done