1
1

Update man pages for MPI-3 and add some missing man pages for MPI-2.x

functions.

cmr=v1.7.4

This commit was SVN r29336.
Этот коммит содержится в:
Nathan Hjelm 2013-10-02 14:27:47 +00:00
родитель 63084b1003
Коммит 9235b290d5
126 изменённых файлов: 1499 добавлений и 284 удалений

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Accumulate(void *\fIorigin_addr\fP, int \fIorigin_count\fP,
int MPI_Accumulate(const void *\fIorigin_addr\fP, int \fIorigin_count\fP,
MPI_Datatype \fIorigin_datatype\fP, int \fItarget_rank\fP,
MPI_Aint \fItarget_disp\fP, int \fItarget_count\fP,
MPI_Datatype \fItarget_datatype\fP, MPI_Op \fIop\fP, MPI_Win \fIwin\fP)

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -14,7 +16,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Add_error_string(int \fIerrorcode\fP, char *\fIstring\fP)
int MPI_Add_error_string(int \fIerrorcode\fP, const char *\fIstring\fP)
.fi
.SH Fortran Syntax

Просмотреть файл

@ -1,19 +1,25 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Allgather 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Allgather\fP \- Gathers data from all processes and distributes it to all processes
\fBMPI_Allgather, MPI_Iallgather\fP \- Gathers data from all processes and distributes it to all processes
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Allgather(void\fI *sendbuf\fP, int \fI sendcount\fP,
MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, int\fI recvcount\fP,
int MPI_Allgather(const void\fI *sendbuf\fP, int \fI sendcount\fP,
MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, int\fI recvcount\fP,
MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP)
int MPI_Iallgather(const void\fI *sendbuf\fP, int \fI sendcount\fP,
MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, int\fI recvcount\fP,
MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP, MPI_Request \fI*request\fP)
.fi
.SH Fortran Syntax
.nf
@ -24,6 +30,12 @@ MPI_ALLGATHER(\fISENDBUF\fP,\fI SENDCOUNT\fP,\fI SENDTYPE\fP,\fI RECVBUF\fP,\fI
INTEGER \fISENDCOUNT\fP,\fI SENDTYPE\fP,\fI RECVCOUNT\fP,\fI RECVTYPE\fP,\fI COMM\fP,
INTEGER \fIIERROR\fP
MPI_IALLGATHER(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT,
RECVTYPE, COMM, REQUEST, IERROR\fP)
<type> \fISENDBUF\fP(*)\fI, RECVBUF\fP (*)
INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, COMM\fP
INTEGER \fIREQUEST, IERROR\fP
.fi
.SH C++ Syntax
.nf
@ -73,10 +85,13 @@ Communicator (handle).
.SH OUTPUT PARAMETERS
.ft R
.TP 1i
recvbuf
recvbuf
Address of receive buffer (choice).
.ft R
.TP 1i
request
Request (handle, non-blocking only).
.TP 1i
IERROR
Fortran only: Error status (integer).

Просмотреть файл

@ -1,18 +1,25 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2007-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Allgatherv 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Allgatherv\fP \- Gathers data from all processes and delivers it to all. Each process may contribute a different amount of data.
\fBMPI_Allgatherv, MPI_Iallgatherv\fP \- Gathers data from all processes and delivers it to all. Each process may contribute a different amount of data.
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Allgatherv(void\fI *sendbuf\fP, int\fI sendcount\fP,
MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, int\fI recvcounts[]\fP,
int\fI displs[]\fP, MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP)
int MPI_Allgatherv(const void\fI *sendbuf\fP, int\fI sendcount\fP,
MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, const int\fI recvcounts[]\fP,
const int\fI displs[]\fP, MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP)
int MPI_Iallgatherv(const void\fI *sendbuf\fP, int\fI sendcount\fP,
MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, const int\fI recvcounts[]\fP,
const int\fI displs[]\fP, MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP,
MPI_Request \fI*request\fP)
.fi
.SH Fortran Syntax
@ -21,9 +28,15 @@ INCLUDE 'mpif.h'
MPI_ALLGATHERV(\fISENDBUF\fP,\fI SENDCOUNT\fP, \fISENDTYPE\fP,\fI RECVBUF\fP,
\fIRECVCOUNT\fP,\fI DISPLS\fP, \fIRECVTYPE\fP,\fI COMM\fP,\fI IERROR\fP)
<type> \fISENDBUF\fP(*), \fIRECVBUF\fP(*)
INTEGER \fISENDCOUNT\fP,\fI SENDTYPE\fP, \fIRECVCOUNT\fP(*),
INTEGER \fISENDCOUNT\fP,\fI SENDTYPE\fP, \fIRECVCOUNT\fP(*)
INTEGER \fIDISPLS\fP(*),\fI RECVTYPE\fP,\fI COMM\fP,\fI IERROR\fP
MPI_IALLGATHERV(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF,
RECVCOUNT, DISPLS, RECVTYPE, COMM, REQUEST, IERROR\fP)
<type> \fISENDBUF\fP(*)\fI, RECVBUF\fP(*)
INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT\fP(*),
INTEGER \fIDISPLS\fP(*)\fI, RECVTYPE, COMM, REQUEST, IERROR\fP
.fi
.SH C++ Syntax
.nf
@ -77,6 +90,9 @@ Communicator (handle).
.TP 1i
recvbuf
Address of receive buffer (choice).
.TP 1i
request
Request (handle, non-blocking only).
.ft R
.TP 1i
IERROR

Просмотреть файл

@ -1,26 +1,35 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2007-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Allreduce 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Allreduce\fP \- Combines values from all processes and distributes the result back to all processes.
\fBMPI_Allreduce, MPI_Iallreduce\fP \- Combines values from all processes and distributes the result back to all processes.
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Allreduce(void \fI*sendbuf\fP, void \fI*recvbuf\fP, int\fI count\fP,
MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, MPI_Comm\fI comm\fP)
int MPI_Allreduce(const void \fI*sendbuf\fP, void \fI*recvbuf\fP, int\fI count\fP,
MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, MPI_Comm\fI comm\fP)
int MPI_Iallreduce(const void \fI*sendbuf\fP, void \fI*recvbuf\fP, int\fI count\fP,
MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, MPI_Comm\fI comm\fP,
MPI_Request \fI*request\fP)
.fi
.SH Fortran Syntax
.nf
INCLUDE 'mpif.h'
MPI_ALLREDUCE(\fISENDBUF\fP,\fI RECVBUF\fP, \fICOUNT\fP,\fI DATATYPE\fP,\fI OP\fP,
\fICOMM\fP, \fIIERROR\fP)
MPI_ALLREDUCE(\fISENDBUF\fP,\fI RECVBUF\fP, \fICOUNT\fP,\fI DATATYPE\fP,\fI OP\fP, \fICOMM\fP, \fIIERROR\fP)
<type> \fISENDBUF\fP(*), \fIRECVBUF\fP(*)
INTEGER \fICOUNT\fP,\fI DATATYPE\fP,\fI OP\fP,\fI COMM\fP,\fI IERROR
INTEGER \fICOUNT\fP,\fI DATATYPE\fP,\fI OP\fP,\fI COMM\fP,\fI IERROR\fP
MPI_ALLREDUCE(\fISENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, REQUEST, IERROR\fP)
<type> \fISENDBUF\fP(*)\fI, RECVBUF\fP(*)
INTEGER \fICOUNT, DATATYPE, OP, COMM, REQUEST, IERROR\fP
.fi
.SH C++ Syntax
@ -68,6 +77,9 @@ Communicator (handle).
.TP 1i
recvbuf
Starting address of receive buffer (choice).
.TP 1i
request
Request (handle, non-blocking only).
.ft R
.TP 1i
IERROR

Просмотреть файл

@ -1,10 +1,12 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Alltoall 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Alltoall\fP \- All processes send data to all processes
\fBMPI_Alltoall, MPI_Ialltoall\fP \- All processes send data to all processes
.SH SYNTAX
.ft R
@ -12,10 +14,14 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Alltoall(void *\fIsendbuf\fP, int \fIsendcount\fP,
int MPI_Alltoall(const void *\fIsendbuf\fP, int \fIsendcount\fP,
MPI_Datatype \fIsendtype\fP, void *\fIrecvbuf\fP, int \fIrecvcount\fP,
MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP)
int MPI_Ialltoall(const void *\fIsendbuf\fP, int \fIsendcount\fP,
MPI_Datatype \fIsendtype\fP, void *\fIrecvbuf\fP, int \fIrecvcount\fP,
MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP, MPI_Request \fI*request\fP)
.fi
.SH Fortran Syntax
.nf
@ -27,6 +33,13 @@ MPI_ALLTOALL(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT,
INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE\fP
INTEGER \fICOMM, IERROR\fP
MPI_IALLTOALL(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT,
RECVTYPE, COMM, REQUEST, IERROR\fP)
<type> \fISENDBUF(*), RECVBUF(*)\fP
INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE\fP
INTEGER \fICOMM, REQUEST, IERROR\fP
.fi
.SH C++ Syntax
.nf
@ -74,6 +87,9 @@ Communicator over which data is to be exchanged (handle).
.TP 1.2i
recvbuf
Starting address of receive buffer (choice).
.TP 1.2i
request
Request (handle, non-blocking only).
.ft R
.TP 1.2i
IERROR
@ -108,10 +124,12 @@ When the communicator is an inter-communicator, the gather operation occurs in t
The first group defines the root process. The root process uses MPI_ROOT as the value of \fIroot\fR. All other processes in the first group use MPI_PROC_NULL as the value of \fIroot\fR. All processes in the second group use the rank of the root process in the first group as the value of \fIroot\fR.
.sp
When the communicator is an intra-communicator, these groups are the same, and the operation occurs in a single phase.
.SH USE OF IN-PLACE OPTION
When the communicator is an intracommunicator, you can perform an all-to-all operation in-place (the output buffer is used as the input buffer). Use the variable MPI_IN_PLACE as the value of \fIsendbuf\fR. In this case, \fIsendcount\fR and \fIsendtype\fR are ignored. The input data of each process is assumed to be in the area where that process would receive its own contribution to the receive buffer.
.sp
.SH NOTES
.ft R
The MPI_IN_PLACE option is not available for this function.
.sp
All arguments on all processes are significant. The \fIcomm\fP argument,
in particular, must describe the same communicator on all processes.

Просмотреть файл

@ -1,20 +1,28 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Alltoallv 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Alltoallv\fP \- All processes send different amount of data to, and receive different amount of data from, all processes
\fBMPI_Alltoallv, MPI_Ialltoallv\fP \- All processes send different amount of data to, and receive different amount of data from, all processes
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Alltoallv(void *\fIsendbuf\fP, int \fIsendcounts\fP[],
int \fIsdispls\f[]P, MPI_Datatype \fIsendtype\fP,
void *\fIrecvbuf\fP, int\fI recvcounts\fP[],
int \fIrdispls\fP[], MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP)
int MPI_Alltoallv(const void *\fIsendbuf\fP, const int \fIsendcounts\fP[],
const int \fIsdispls\f[]P, MPI_Datatype \fIsendtype\fP,
void *\fIrecvbuf\fP, const int\fI recvcounts\fP[],
const int \fIrdispls\fP[], MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP)
int MPI_Ialltoallv(const void *\fIsendbuf\fP, const int \fIsendcounts\fP[],
const int \fIsdispls\f[]P, MPI_Datatype \fIsendtype\fP,
void *\fIrecvbuf\fP, const int\fI recvcounts\fP[],
const int \fIrdispls\fP[], MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP,
MPI_Request \fI*request\fP)
.fi
.SH Fortran Syntax
@ -29,6 +37,14 @@ MPI_ALLTOALLV(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPE,
INTEGER \fIRECVCOUNTS(*), RDISPLS(*), RECVTYPE\fP
INTEGER \fICOMM, IERROR\fP
MPI_IALLTOALLV(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPE,
RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPE, REQUEST, COMM, IERROR\fP)
<type> \fISENDBUF(*), RECVBUF(*)\fP
INTEGER \fISENDCOUNTS(*), SDISPLS(*), SENDTYPE\fP
INTEGER \fIRECVCOUNTS(*), RDISPLS(*), RECVTYPE\fP
INTEGER \fICOMM, REQUEST, IERROR\fP
.fi
.SH C++ Syntax
.nf
@ -92,6 +108,9 @@ Communicator over which data is to be exchanged.
.TP 1.2i
recvbuf
Address of receive buffer.
.TP 1.2i
request
Request (handle, non-blocking only).
.ft R
.TP 1.2i
IERROR
@ -140,10 +159,11 @@ The first group defines the root process. The root process uses MPI_ROOT as the
.sp
When the communicator is an intra-communicator, these groups are the same, and the operation occurs in a single phase.
.sp
.SH USE OF IN-PLACE OPTION
When the communicator is an intracommunicator, you can perform an all-to-all operation in-place (the output buffer is used as the input buffer). Use the variable MPI_IN_PLACE as the value of \fIsendbuf\fR. In this case, \fIsendcounts\fR, \fIsdispls\fP, and \fIsendtype\fR are ignored. The input data of each process is assumed to be in the area where that process would receive its own contribution to the receive buffer.
.SH NOTES
.ft R
The MPI_IN_PLACE option is not available for any form of all-to-all
communication.
.sp
The specification of counts and displacements should not cause
any location to be written more than once.

Просмотреть файл

@ -1,10 +1,12 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Alltoallw 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Alltoallw\fP \- All processes send data of different types to, and receive data of different types from, all processes
\fBMPI_Alltoallw, MPI_Ialltoallw\fP \- All processes send data of different types to, and receive data of different types from, all processes
.SH SYNTAX
.ft R
@ -12,10 +14,16 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Alltoallw(void *\fIsendbuf\fP, int *\fIsendcounts\fP,
int *\fIsdispls\fP, MPI_Datatype *\fIsendtypes\fP,
void *\fIrecvbuf\fP, int *\fIrecvcounts\fP,
int *\fIrdispls\fP, MPI_Datatype *\fIrecvtypes\fP, MPI_Comm \fIcomm\fP)
int MPI_Alltoallw(const void *\fIsendbuf\fP, const int \fIsendcounts\fP[],
const int \fIsdispls\fP[], const MPI_Datatype \fIsendtypes\fP[],
void *\fIrecvbuf\fP, const int \fIrecvcounts\fP[], const int \fIrdispls\fP[],
const MPI_Datatype \fIrecvtypes\fP[], MPI_Comm \fIcomm\fP)
int MPI_Ialltoallw(const void *\fIsendbuf\fP, const int \fIsendcounts\fP[],
const int \fIsdispls\fP[], const MPI_Datatype \fIsendtypes\fP[],
void *\fIrecvbuf\fP, const int \fIrecvcounts\fP[], const int \fIrdispls\fP[],
const MPI_Datatype \fIrecvtypes\fP[], MPI_Comm \fIcomm\fP,
MPI_Request \fI*request\fP)
.fi
.SH Fortran Syntax
@ -29,6 +37,14 @@ MPI_ALLTOALLW(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPES,
INTEGER \fIRECVCOUNTS(*), RDISPLS(*), RECVTYPES(*)\fP
INTEGER \fICOMM, IERROR\fP
MPI_IALLTOALLW(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPES,
RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPES, COMM, REQUEST, IERROR\fP)
<type> \fISENDBUF(*), RECVBUF(*)\fP
INTEGER \fISENDCOUNTS(*), SDISPLS(*), SENDTYPES(*)\fP
INTEGER \fIRECVCOUNTS(*), RDISPLS(*), RECVTYPES(*)\fP
INTEGER \fICOMM, REQUEST, IERROR\fP
.fi
.SH C++ Syntax
.nf
@ -79,6 +95,9 @@ Communicator over which data is to be exchanged.
.TP 1.2i
recvbuf
Address of receive buffer.
.TP 1.2i
request
Request (handle, non-blocking only).
.ft R
.TP 1.2i
IERROR
@ -127,10 +146,11 @@ The first group defines the root process. The root process uses MPI_ROOT as the
.sp
When the communicator is an intra-communicator, these groups are the same, and the operation occurs in a single phase.
.sp
.SH USE OF IN-PLACE OPTION
When the communicator is an intracommunicator, you can perform an all-to-all operation in-place (the output buffer is used as the input buffer). Use the variable MPI_IN_PLACE as the value of \fIsendbuf\fR. In this case, \fIsendcounts\fR, \fIsdispls\fP, and \fIsendtypes\fR are ignored. The input data of each process is assumed to be in the area where that process would receive its own contribution to the receive buffer.
.SH NOTES
.ft R
The MPI_IN_PLACE option is not available for any form of all-to-all
communication.
.sp
The specification of counts, types, and displacements should not cause
any location to be written more than once.

Просмотреть файл

@ -3,7 +3,7 @@
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Bcast 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Bcast\fP \- Broadcasts a message from the process with rank \fIroot\fP to all other processes of the group.
\fBMPI_Bcast, MPI_Ibcast\fP \- Broadcasts a message from the process with rank \fIroot\fP to all other processes of the group.
.SH SYNTAX
.ft R
@ -13,6 +13,9 @@
int MPI_Bcast(void \fI*buffer\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP,
int\fI root\fP, MPI_Comm\fI comm\fP)
int MPI_Ibcast(void \fI*buffer\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP,
int\fI root\fP, MPI_Comm\fI comm\fP, MPI_Request \fI*request\fP)
.fi
.SH Fortran Syntax
.nf
@ -21,6 +24,10 @@ MPI_BCAST(\fIBUFFER\fP,\fI COUNT\fP, \fIDATATYPE\fP,\fI ROOT\fP,\fI COMM\fP,\fI
<type> \fIBUFFER\fP(*)
INTEGER \fICOUNT\fP,\fI DATATYPE\fP,\fI ROOT\fP,\fI COMM\fP,\fI IERROR\fP
MPI_IBCAST(\fIBUFFER\fP,\fI COUNT\fP, \fIDATATYPE\fP,\fI ROOT\fP,\fI COMM\fP, \fIREQUEST\fP,\fI IERROR\fP)
<type> \fIBUFFER\fP(*)
INTEGER \fICOUNT\fP,\fI DATATYPE\fP,\fI ROOT\fP,\fI COMM\fP, \fIREQUEST\fP,\fI IERROR\fP
.fi
.SH C++ Syntax
.nf
@ -57,9 +64,12 @@ Rank of broadcast root (integer).
comm
Communicator (handle).
.SH OUTPUT PARAMETER
.SH OUTPUT PARAMETERS
.ft R
.TP 1i
request
Request (handle, non-blocking only).
.TP 1i
IERROR
Fortran only: Error status (integer).

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Bsend(void \fI*buf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP,
int MPI_Bsend(const void \fI*buf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP,
int\fI dest\fP, int\fI tag\fP, MPI_Comm\fI comm\fP)
.fi

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Bsend_init(void \fI*buf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP,
int MPI_Bsend_init(const void \fI*buf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP,
int\fI dest\fP, int\fI tag\fP, MPI_Comm\fI comm\fP, MPI_Request\fI *request\fP)
.fi

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,8 +12,8 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Cart_create(MPI_Comm\fI comm_old\fP, int\fI ndims\fP, int\fI dims\fP[],
int\fI periods\fP[], int\fI reorder\fP, MPI_Comm\fI *comm_cart\fP)
int MPI_Cart_create(MPI_Comm\fI comm_old\fP, int\fI ndims\fP, const int\fI dims\fP[],
const int\fI periods\fP[], int\fI reorder\fP, MPI_Comm\fI *comm_cart\fP)
.fi
.SH Fortran Syntax

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,8 +12,8 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Cart_map(MPI_Comm \fIcomm\fP, int\fI ndims\fP, int\fI dims\fP[],
int\fI periods\fP[], int\fI *newrank\fP)
int MPI_Cart_map(MPI_Comm \fIcomm\fP, int\fI ndims\fP, const int\fI dims\fP[],
const int\fI periods\fP[], int\fI *newrank\fP)
.fi
.SH Fortran Syntax

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Cart_sub(MPI_Comm \fIcomm\fP, int\fI remain_dims\fP[], MPI_Comm\fI *comm_new\fP)
int MPI_Cart_sub(MPI_Comm \fIcomm\fP, const int\fI remain_dims\fP[], MPI_Comm\fI *comm_new\fP)
.fi
.SH Fortran Syntax

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Close_port(char *\fIport_name\fP)
int MPI_Close_port(const char *\fIport_name\fP)
.fi
.SH Fortran Syntax

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2009-2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2007, Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Comm_accept(char *\fIport_name\fP, MPI_Info \fIinfo\fP, int \fIroot\fP, MPI_Comm \fIcomm\fP, MPI_Comm *\fInewcomm\fP)
int MPI_Comm_accept(const char *\fIport_name\fP, MPI_Info \fIinfo\fP, int \fIroot\fP, MPI_Comm \fIcomm\fP, MPI_Comm *\fInewcomm\fP)
.fi
.SH Fortran Syntax

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2007-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Comm_connect(char *\fIport_name\fP, MPI_Info \fIinfo\fP, int \fIroot\fP,
int MPI_Comm_connect(const gchar *\fIport_name\fP, MPI_Info \fIinfo\fP, int \fIroot\fP,
MPI_Comm \fIcomm\fP, MPI_Comm *\fInewcomm\fP)
.fi

Просмотреть файл

@ -1,3 +1,4 @@
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -17,7 +18,7 @@ int MPI_Comm_create(MPI_Comm \fIcomm\fP, MPI_Group\fI group\fP, MPI_Comm\fI *new
.nf
INCLUDE 'mpif.h'
MPI_COMM_CREATE(\fICOMM, GROUP, NEWCOMM, IERROR\fP)
INTEGER \fICOMM, GROUP, NEW, IERROR\fP
INTEGER \fICOMM, GROUP, NEWCOMM, IERROR\fP
.fi
.SH C++ Syntax
@ -80,4 +81,4 @@ called. By default, this error handler aborts the MPI job, except for I/O functi
MPI_Comm_split
.sp
MPI_Intercomm_create
MPI_Comm_create_group

80
ompi/mpi/man/man3/MPI_Comm_create_group.3in Обычный файл
Просмотреть файл

@ -0,0 +1,80 @@
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Comm_create_group 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Comm_create_group\fP \- Creates a new communicator.
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Comm_create_group(MPI_Comm \fIcomm\fP, MPI_Group\fI group\fP, int\fI tag\fP, MPI_Comm\fI *newcomm\fP)
.fi
.SH Fortran Syntax
.nf
INCLUDE 'mpif.h'
MPI_COMM_CREATE_GROUP(\fICOMM, GROUP, TAG, NEWCOMM, IERROR\fP)
INTEGER \fICOMM, GROUP, TAG, NEWCOMM, IERROR\fP
.fi
.SH INPUT PARAMETERS
.ft R
.TP 1i
comm
Communicator (handle).
.TP 1i
group
Group, which is a subset of the group of comm (handle).
.TP 1i
tag
Tag (integer).
.SH OUTPUT PARAMETERS
.ft R
.TP 1i
newcomm
New communicator (handle).
.ft R
.TP 1i
IERROR
Fortran only: Error status (integer).
.SH DESCRIPTION
.ft R
MPI_Comm_create_group is similar to MPI_Comm_create; however,
MPI_Comm_create must be called by all processes in the group of
comm, whereas MPI_Comm_create_group must be called by all processes in group,
which is a subgroup of the group of \fIcomm\fP. In addition, MPI_Comm_create_group
requires that \fIcomm\fP is an intracommunicator. MPI_Comm_create_group returns a new
intracommunicator, \fInewcomm\fP, for which the group argument defines the communication
group. No cached information propagates from \fIcomm\fP to \fInewcomm\fP.
Each process must provide a group argument that is a subgroup of the group associated with \fIcomm\fP;
this could be MPI_GROUP_EMPTY. If a non-empty group is specified, then all processes in that
group must call the function, and each of these processes must provide the same arguments,
including a group that contains the same members with the same ordering. Otherwise
the call is erroneous. If the calling process is a member of the group given as the \fIgroup\fP
argument, then newcomm is a communicator with group as its associated group. If the
calling process is not a member of group, e.g., \fIgroup\fP is MPI_GROUP_EMPTY, then the call
is a local operation and MPI_COMM_NULL is returned as \fInewcomm\fP.
.sp
.LP
.SH NOTES
MPI_Comm_create_group provides a means of making a subset of processes for the purpose of separate MIMD computation, with separate communication space. \fInewcomm\fR, which is created by MPI_Comm_create_group, can be used in subsequent calls to MPI_Comm_create_group (or other communicator constructors) to further subdivide a computation into parallel sub-computations. A more general service is provided by MPI_Comm_split.
.SH ERRORS
Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument.
.sp
Before the error value is returned, the current MPI error handler is
called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error.
.SH SEE ALSO
.ft R
MPI_Comm_create

Просмотреть файл

@ -0,0 +1,65 @@
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Comm_dup_with_info 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Comm_dup_with_info \fP \- Duplicates an existing communicator using provided info.
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Comm_dup_with_info(MPI_Comm \fIcomm\fP, MPI_Info \fIinfo\fP, MPI_Comm\fI *newcomm\fP)
.fi
.SH Fortran Syntax
.nf
INCLUDE 'mpif.h'
MPI_COMM_DUP_WITH_INFO(\fICOMM, INFO, NEWCOMM, IERROR\fP)
INTEGER \fICOMM, INFO, NEWCOMM, IERROR\fP
.fi
.SH INPUT PARAMETER
.ft R
.TP 1i
comm
Communicator (handle).
.ft R
.TP 1i
info
Info argument (handle).
.SH OUTPUT PARAMETERS
.ft R
.TP 1i
newcomm
Copy of comm (handle).
.ft R
.TP 1i
IERROR
Fortran only: Error status (integer).
.SH DESCRIPTION
.ft R
MPI_Comm_dup_with_info acts exactly like MPI_Comm_dup except that the
info hints associated with the communicator \fIcomm\fP are not duplicated in \fInewcomm\fP. The
hints provided by the argument \fIinfo\fP are associated with the output communicator \fInewcomm\fP
instead.
.SH NOTES
This operation is used to provide a parallel
library call with a duplicate communication space that has the same properties as the original communicator. This includes any attributes (see below) and topologies (see Chapter 6, "Process Topologies," in the MPI-1 Standard). This call is valid even if there are pending point-to-point communications involving the communicator comm. A typical call might involve an MPI_Comm_dup_with_info at the beginning of the parallel call, and an MPI_Comm_free of that duplicated communicator at the end of the call. Other models of communicator management are also possible.
.sp
This call applies to both intra- and intercommunicators.
.SH ERRORS
Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument.
.sp
Before the error value is returned, the current MPI error handler is
called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error.
.SH SEE ALSO
MPI_Comm_dup
MPI_Comm_idup

66
ompi/mpi/man/man3/MPI_Comm_idup.3in Обычный файл
Просмотреть файл

@ -0,0 +1,66 @@
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Comm_idup 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Comm_idup \fP \- Start the nonblocking duplication of an existing communicator with all its cached information.
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Comm_idup(MPI_Comm \fIcomm\fP, MPI_Comm\fI *newcomm\fP, MPI_Request\fI *request\fP)
.fi
.SH Fortran Syntax
.nf
INCLUDE 'mpif.h'
MPI_COMM_IDUP(\fICOMM, NEWCOMM, REQUEST, IERROR\fP)
INTEGER \fICOMM, NEWCOMM, REQUEST, IERROR\fP
.fi
.SH INPUT PARAMETER
.ft R
.TP 1i
comm
Communicator (handle).
.SH OUTPUT PARAMETERS
.ft R
.TP 1i
newcomm
Copy of comm (handle).
.ft R
.TP 1i
request
Communication request (handle).
.ft R
.TP 1i
IERROR
Fortran only: Error status (integer).
.SH DESCRIPTION
.ft R
MPI_Comm_idup starts the nonblocking duplication of an existing communicator comm with associated key
values. For each key value, the respective copy callback function determines the attribute value associated with this key in the new communicator; one particular action that a copy callback may take is to delete the attribute from the new communicator. Returns in newcomm a new communicator with the same group, any copied cached information, but a new context (see Section 5.7.1 of the MPI-1 Standard, "Functionality"). The communicator returned in \fInewcomm\fP will not be available until the request is complete.
.sp
The completion of a communicator duplication request can be determined by calling any of MPI_Wait, MPI_Waitany, MPI_Test, or MPI_Testany with the request returned by this function.
.SH NOTES
This operation is used to provide a parallel
library call with a duplicate communication space that has the same properties as the original communicator. This includes any attributes (see below) and topologies (see Chapter 6, "Process Topologies," in the MPI-1 Standard). This call is valid even if there are pending point-to-point communications involving the communicator comm. A typical call might involve an MPI_Comm_idup at the beginning of the parallel call, and an MPI_Comm_free of that duplicated communicator at the end of the call. Other models of communicator management are also possible.
.sp
This call applies to both intra- and intercommunicators.
.SH ERRORS
Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument.
.sp
Before the error value is returned, the current MPI error handler is
called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error.
.SH SEE ALSO
MPI_Comm_dup
MPI_Comm_dup_with_info

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Comm_set_name(MPI_Comm \fIcomm\fP, char *\fIcomm_name\fP)
int MPI_Comm_set_name(MPI_Comm \fIcomm\fP, const char *\fIcomm_name\fP)
.fi
.SH Fortran Syntax

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Comm_spawn(char *\fIcommand\fP, char *\fIargv\fP[], int \fImaxprocs\fP,
int MPI_Comm_spawn(const char *\fIcommand\fP, char *\fIargv\fP[], int \fImaxprocs\fP,
MPI_Info \fIinfo\fP, int \fIroot\fP, MPI_Comm \fIcomm\fP,
MPI_Comm *\fIintercomm\fP, int \fIarray_of_errcodes\fP[])

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright (c) 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,9 +12,9 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Comm_spawn_multiple(int \fIcount\fP, char *\fIarray_of_commands\fP[],
char **\fIarray_of_argv\fP[], int \fIarray_of_maxprocs\fP[], MPI_Info
\fIarray_of_info\fP[], int \fIroot\fP, MPI_Comm \fIcomm\fP, MPI_Comm *\fIintercomm\fP,
int MPI_Comm_spawn_multiple(int \fIcount\fP, char *\fIarray_of_commands\fP[],
char **\fIarray_of_argv\fP[], const int \fIarray_of_maxprocs\fP[], const MPI_Info
\fIarray_of_info\fP[], int \fIroot\fP, MPI_Comm \fIcomm\fP, MPI_Comm *\fIintercomm\fP,
int \fIarray_of_errcodes\fP[])
.fi

100
ompi/mpi/man/man3/MPI_Comm_split_type.3in Обычный файл
Просмотреть файл

@ -0,0 +1,100 @@
.\" -*- nroff-mode -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Comm_split_type 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Comm_split_type \fP \- Creates new communicators based on colors and keys.
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Comm_split_type(MPI_Comm \fIcomm\fP, int\fI split_type\fP, int\fI key\fP,
MPI_Info info, MPI_Comm *\fInewcomm\fP)
.fi
.SH Fortran Syntax
.nf
INCLUDE 'mpif.h'
MPI_COMM_SPLIT_TYPE(\fICOMM, SPLIT_TYPE, KEY, INFO, NEWCOMM, IERROR\fP)
INTEGER \fICOMM, SPLIT_TYPE, KEY, INFO, NEWCOMM, IERROR\fP
.fi
.SH INPUT PARAMETERS
.ft R
.TP 1i
comm
Communicator (handle).
.TP 1i
split_type
Type of processes to be grouped together (integer).
.TP 1i
key
Control of rank assignment (integer).
.TP 1i
info
Info argument (handle).
.SH OUTPUT PARAMETERS
.ft R
.TP 1i
newcomm
New communicator (handle).
.ft R
.TP 1i
IERROR
Fortran only: Error status (integer).
.SH DESCRIPTION
.ft R
This function partitions the group associated with \fIcomm\fP into disjoint subgroups, based on
the type specied by \fIsplit_type\fP. Each subgroup contains all processes of the same type.
Within each subgroup, the processes are ranked in the order defined by the value of the
argument \fIkey\fP, with ties broken according to their rank in the old group. A new communicator
is created for each subgroup and returned in newcomm. This is a collective call;
all processes must provide the same \fIsplit_type\fP, but each process is permitted to provide
different values for key. An exception to this rule is that a process may supply the type
value MPI_UNDEFINED, in which case newcomm returns MPI_COMM_NULL.
.SH SPLIT TYPES
.ft R
.TP 1i
MPI_COMM_TYPE_SHARED
This type splits the communicator into subcommunicators, each of which can create a shared memory region.
.SH NOTES
.ft R
This is an extremely powerful mechanism for
dividing a single communicating group of processes into k subgroups, with k
chosen implicitly by the user (by the number of colors asserted over all
the processes). Each resulting communicator will be nonoverlapping. Such a division could be useful for defining a hierarchy of computations, such as for multigrid or linear algebra.
.sp
Multiple calls to MPI_Comm_split_type can be used to overcome the requirement that any call have no overlap of the resulting communicators (each process is of only one color per call). In this way, multiple overlapping communication structures can be created. Creative use of the color and key in such splitting operations is encouraged.
.sp
Note that keys need not be unique. It is MPI_Comm_split_type's responsibility to sort processes in ascending order according to this key, and to break ties in a consistent way. If all the keys are specified in the same way, then all the processes in a given color will have the relative rank order as they did in their parent group. (In general, they will have different ranks.)
.sp
Essentially, making the key value zero for all processes of a given split_type means that one needn't really pay attention to the rank-order of the processes in the new communicator.
.SH ERRORS
Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument.
.sp
Before the error value is returned, the current MPI error handler is
called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error.
.SH SEE ALSO
.ft R
.sp
MPI_Comm_create
.br
MPI_Intercomm_create
.br
MPI_Comm_dup
.br
MPI_Comm_free
.br
MPI_Comm_split

121
ompi/mpi/man/man3/MPI_Dist_graph_create.3in Обычный файл
Просмотреть файл

@ -0,0 +1,121 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Dist_graph_create 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Dist_graph_create \fP \- Makes a new communicator to which topology information has been attached.
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Dist_graph_create(MPI_Comm \fIcomm_old\fP, int\fI n\fP, const int\fI sources[]\fP,
const int\fI degrees[]\fP, const int\fI destinations\fP[], const int\fI weights\fP[],
MPI_Info info, int\fI reorder\fP, MPI_Comm\fI *comm_dist_graph\fP)
.fi
.SH Fortran Syntax
.nf
INCLUDE 'mpif.h'
MPI_DIST_GRAPH_CREATE(\fICOMM_OLD, N, SOURCES, DEGREES, DESTINATIONS, WEIGHTS,
INFO, REORDER, COMM_DIST_GRAPH, IERROR\fP)
INTEGER \fICOMM_OLD, N, SOURCES(*), DEGRES(*), WEIGHTS(*), INFO\fP
INTEGER \fICOMM_DIST_GRAPH, IERROR\fP
LOGICAL \fIREORDER\fP
.fi
.SH INPUT PARAMETERS
.ft R
.TP 1i
comm_old
Input communicator without topology (handle).
.TP 1i
n
Number of source nodes for which this process specifies edges (non-negative integer).
.TP 1i
sources
Array containing the \fIn\fP source nodes for which this process species edges (array of non-negative integers).
.TP 1i
degrees
Array specifying the number of destinations for each source node in the source node array (array of non-negative integers).
.TP 1i
destinations
Destination nodes for the source nodes in the source node array (array of non-negative integers).
.TP 1i
weights
Weights for source to destination edges (array of non-negative integers).
.TP 1i
Hints on optimization and interpretation of weights (handle).
.TP 1i
reorder
Ranking may be reordered (true) or not (false) (logical).
.SH OUTPUT PARAMETERS
.ft R
.TP 1i
comm_dist_graph
Communicator with distibuted graph topology added (handle).
.ft R
.TP 1i
IERROR
Fortran only: Error status (integer).
.SH DESCRIPTION
.ft R
MPI_Dist_graph_create creates a new communicator \fIcomm_dist_graph\fP with distrubuted
graph topology and returns a handle to the new communicator. The number of processes in
\fIcomm_dist_graph\fP is identical to the number of processes in \fIcomm_old\fP. Concretely, each process calls the
constructor with a set of directed (source,destination) communication edges as described below.
Every process passes an array of \fIn\fP source nodes in the \fIsources\fP array. For each source node, a
non-negative number of destination nodes is specied in the \fIdegrees\fP array. The destination
nodes are stored in the corresponding consecutive segment of the \fIdestinations\fP array. More
precisely, if the i-th node in sources is s, this species \fIdegrees\fP[i] \fIedges\fP (s,d) with d of the j-th
such edge stored in \fIdestinations\fP[\fIdegrees\fP[0]+...+\fIdegrees\fP[i-1]+j]. The weight of this edge is
stored in \fIweights\fP[\fIdegrees\fP[0]+...+\fIdegrees\fP[i-1]+j]. Both the \fIsources\fP and the \fIdestinations\fP arrays
may contain the same node more than once, and the order in which nodes are listed as
destinations or sources is not signicant. Similarly, different processes may specify edges
with the same source and destination nodes. Source and destination nodes must be process
ranks of comm_old. Different processes may specify different numbers of source and
destination nodes, as well as different source to destination edges. This allows a fully distributed
specification of the communication graph. Isolated processes (i.e., processes with
no outgoing or incoming edges, that is, processes that do not occur as source or destination
node in the graph specication) are allowed. The call to MPI_Dist_graph_create is collective.
If reorder = false, all processes will have the same rank in comm_dist_graph as in
comm_old. If reorder = true then the MPI library is free to remap to other processes (of
comm_old) in order to improve communication on the edges of the communication graph.
The weight associated with each edge is a hint to the MPI library about the amount or
intensity of communication on that edge, and may be used to compute a \"best\" reordering.
.SH WEIGHTS
.ft R
Weights are specied as non-negative integers and can be used to influence the process
remapping strategy and other internal MPI optimizations. For instance, approximate count
arguments of later communication calls along specic edges could be used as their edge
weights. Multiplicity of edges can likewise indicate more intense communication between
pairs of processes. However, the exact meaning of edge weights is not specied by the MPI
standard and is left to the implementation. An application can supply the special value
MPI_UNWEIGHTED for the weight array to indicate that all edges have the same (effectively no)
weight. It is erroneous to supply MPI_UNWEIGHTED for some but not
all processes of comm_old. If the graph is weighted but \fIn\fP = 0, then MPI_WEIGHTS_EMPTY
or any arbitrary array may be passed to weights. Note that MPI_UNWEIGHTED and
MPI_WEIGHTS_EMPTY are not special weight values; rather they are special values for the
total array argument. In Fortran, MPI_UNWEIGHTED and MPI_WEIGHTS_EMPTY are objects
like MPI_BOTTOM (not usable for initialization or assignment). See MPI-3 § 2.5.4.
.SH ERRORS
Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument.
.sp
Before the error value is returned, the current MPI error handler is
called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error.
.SH SEE ALSO
.ft R
.sp
MPI_Dist_graph_create_adjacent
MPI_Dist_graph_neighbors
MPI_Dist_graph_neighbors_count

Просмотреть файл

@ -0,0 +1,115 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Dist_graph_create_adjacent 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Dist_graph_create_adjacent \fP \- Makes a new communicator to which topology information has been attached.
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Dist_graph_create_adjacent(MPI_Comm \fIcomm_old\fP, int\fI indegree\fP, const int\fI sources[]\fP,
const int\fI sourceweights[]\fP, int\fI outdegree\fP, const int\fI destinations\fP[], const int\fI destweights\fP[],
MPI_Info info, int\fI reorder\fP, MPI_Comm\fI *comm_dist_graph\fP)
.fi
.SH Fortran Syntax
.nf
INCLUDE 'mpif.h'
MPI_DIST_GRAPH_CREATE_ADJACENT(\fICOMM_OLD, INDEGREE, SOURCES, SOURCEWEIGHTS, OUTDEGREE,
DESTINATIONS, DESTWEIGHTS, INFO, REORDER, COMM_DIST_GRAPH, IERROR\fP)
INTEGER \fICOMM_OLD, INDEGREE, SOURCES(*), SOURCEWEIGHTS(*), OUTDEGREE, DESTINATIONS(*), DESTWEIGHTS(*), INFO\fP
INTEGER \fICOMM_DIST_GRAPH, IERROR\fP
LOGICAL \fIREORDER\fP
.fi
.SH INPUT PARAMETERS
.ft R
.TP 1i
comm_old
Input communicator without topology (handle).
.TP 1i
indegree
Size of \fIsources\fP and \fIsourceweights\fP arrays (non-negative integer).
.TP 1i
sources
Ranks of processes for which the calling process is a destination (array of non-negative integers).
.TP 1i
sourceweights
Weights of the edges into the calling process (array of non-negative integers).
.TP 1i
outdegree
Size of \fIdestinations\fP and \fIdestweights\fP arrays (non-negative integer).
.TP 1i
destinations
Ranks of processes for which the calling process is a source (array of non-negative integers).
.TP 1i
destweights
Weights of the edges out of the calling process (array of non-negative integers).
.TP 1i
Hints on optimization and interpretation of weights (handle).
.TP 1i
reorder
Ranking may be reordered (true) or not (false) (logical).
.SH OUTPUT PARAMETERS
.ft R
.TP 1i
comm_dist_graph
Communicator with distibuted graph topology added (handle).
.ft R
.TP 1i
IERROR
Fortran only: Error status (integer).
.SH DESCRIPTION
.ft R
MPI_Dist_graph_create_adjacent creats a new communicator \fIcomm_dist_graph\fP with distrubuted
graph topology and returns a handle to the new communicator. The number of processes in
\fIcomm_dist_graph\fP is identical to the number of processes in \fIcomm_old\fP. Each process passes all
information about its incoming and outgoing edges in the virtual distributed graph topology.
The calling processes must ensure that each edge of the graph is described in the source
and in the destination process with the same weights. If there are multiple edges for a given
(source,dest) pair, then the sequence of the weights of these edges does not matter. The
complete communication topology is the combination of all edges shown in the \fIsources\fP arrays
of all processes in comm_old, which must be identical to the combination of all edges shown
in the \fIdestinations\fP arrays. Source and destination ranks must be process ranks of comm_old.
This allows a fully distributed specication of the communication graph. Isolated processes
(i.e., processes with no outgoing or incoming edges, that is, processes that have specied
indegree and outdegree as zero and thus do not occur as source or destination rank in the
graph specication) are allowed. The call to MPI_Dist_graph_create_adjacent is collective.
.SH WEIGHTS
.ft R
Weights are specied as non-negative integers and can be used to influence the process
remapping strategy and other internal MPI optimizations. For instance, approximate count
arguments of later communication calls along specic edges could be used as their edge
weights. Multiplicity of edges can likewise indicate more intense communication between
pairs of processes. However, the exact meaning of edge weights is not specied by the MPI
standard and is left to the implementation. An application can supply the special value
MPI_UNWEIGHTED for the weight array to indicate that all edges have the same (effectively
no) weight. It is erroneous to supply MPI_UNWEIGHTED for some but not all processes of
comm_old. If the graph is weighted but \fIindegree\fP or \fIoutdegree\fP is zero, then
MPI_WEIGHTS_EMPTY or any arbitrary array may be passed to sourceweights or destweights
respectively. Note that MPI_UNWEIGHTED and MPI_WEIGHTS_EMPTY are not special weight values;
rather they are special values for the total array argument. In Fortran, MPI_UNWEIGHTED
and MPI_WEIGHTS_EMPTY are objects like MPI_BOTTOM (not usable for initialization or
assignment). See MPI-3 § 2.5.4.
.SH ERRORS
Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument.
.sp
Before the error value is returned, the current MPI error handler is
called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error.
.SH SEE ALSO
.ft R
.sp
MPI_Dist_graph_create
MPI_Dist_graph_neighbors
MPI_Dist_graph_neighbors_count

Просмотреть файл

@ -0,0 +1,81 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Dist_graph_neighbors 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Dist_graph_neighbors \fP \- Returns the neighbors of the calling process in a distributed graph topology.
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Dist_graph_neighbors(MPI_Comm \fIcomm\fP, int \fImaxindegree\fP, int \fIsources\fP[], int \fIsourceweights\fP[],
int \fImaxoutdegree\fP, int \fIdestinations\fP[], int \fIdestweights\fP[])
.fi
.SH Fortran Syntax
.nf
INCLUDE 'mpif.h'
MPI_DIST_GRAPH_NEIGHBORS(COMM, MAXINDEGREE, SOURCES, SOURCEWEIGHTS,
MAXOUTDEGREE, DESTINATIONS, DESTWEIGHTS, IERROR)
INTEGER COMM, MAXINDEGREE, SOURCES(*), SOURCEWEIGHTS(*), MAXOUTDEGREE,
DESTINATIONS(*), DESTWEIGHTS(*), IERROR
.fi
.SH INPUT PARAMETERS
.ft R
.TP 1i
comm
Communicator with distributed graph topology (handle).
.TP 1i
maxindegree
Size of \fIsources\fP and \fIsourceweights\fP arrays (non-negative integer).
.TP 1i
maxoutdegree
Size of \fIdestinations\fP and \fIdestweights\fP arrays (non-negative integer).
.SH OUTPUT PARAMETERS
.ft R
.TP 1i
sources
Processes for which the calling process is a destination (array of non-negative integers).
.TP 1i
sourceweights
Weights of the edges into the calling process (array of non-negative integers).
.TP 1i
destinations
Processes for which the calling process is a source (array of non-negative integers).
.TP 1i
destweights
Weights of the edges out of the calling process (array of non-negative integers).
.ft R
.TP 1i
IERROR
Fortran only: Error status (integer).
.SH DESCRIPTION
.ft R
MPI_Dist_graph_neighbors returns the source and destination ranks in a distributed graph topology
for the calling process. This call will return up to \fImaxindegree\fP source ranks in the \fIsources\fP array
and up to \fImaxoutdegree\fP destination ranks in the \fIdestinations\fP array. If weights were
specified at the time of the communicator's creation then the associated weights
are returned in the \fIsourceweights\fP and \fI destweights\fP arrays. If the communicator
was created with MPI_Dist_graph_create_adjacent then the order of the values in \fIsources\fP and
\fIdestinations\fP is identical to the input that was used by the process with the same rank in
comm_old in the creation call.
.fi
.SH ERRORS
Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument.
.sp
Before the error value is returned, the current MPI error handler is
called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error.
.SH SEE ALSO
.ft R
.sp
MPI_Dist_graph_neighbors_count

Просмотреть файл

@ -0,0 +1,62 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Dist_graph_neighbors_count 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Dist_graph_neighbors_count \fP \- Returns the number of in and out edges for the calling processes in a distributed graph topology and a flag indicating whether the distributed graph is weighted.
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Dist_graph_neighbors_count(MPI_Comm \fIcomm\fP, int\fI *indegree\fP,
int\fI *outdegree\fP, int\fI *weighted\fP)
.fi
.SH Fortran Syntax
.nf
INCLUDE 'mpif.h'
MPI_DIST_GRAPH_NEIGHBORS_COUNT(\fICOMM, INDEGREE, OUTDEGREE, WEIGHTED, IERROR\fP)
INTEGER \fICOMM, INDEGREE, OUTDEGREE, IERROR\fP
LOGICAL \fIWEIGHTED\fP
.fi
.SH INPUT PARAMETERS
.ft R
.TP 1i
comm
Communicator with distributed graph topology (handle).
.SH OUTPUT PARAMETERS
.ft R
.TP 1i
indegree
Number of edges into this process (non-negative integer).
.TP 1i
outdegree
Number of edges out of this process (non-negative integer).
.TP 1i
weighted
False if MPI_UNWEIGHTED was supplied during creation, true otherwise (logical).
.ft R
.TP 1i
IERROR
Fortran only: Error status (integer).
.SH DESCRIPTION
.ft R
MPI_Dist_graph_neighbors_count and MPI_Graph_neighbors provide adjacency information for a distributed graph topology. MPI_Dist_graph_neighbors_count returns the number of sources and destinations for the calling process.
.SH ERRORS
Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument.
.sp
Before the error value is returned, the current MPI error handler is
called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error.
.SH SEE ALSO
.ft R
.sp
MPI_Dist_graph_neighbors

Просмотреть файл

@ -1,10 +1,12 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Exscan 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Exscan\fP \- Computes an exclusive scan (partial reduction)
\fBMPI_Exscan, MPI_Iexscan\fP \- Computes an exclusive scan (partial reduction)
.SH SYNTAX
.ft R
@ -12,9 +14,13 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Exscan(void *\fIsendbuf\fP, void *\fIrecvbuf\fP, int \fIcount\fP,
int MPI_Exscan(const void *\fIsendbuf\fP, void *\fIrecvbuf\fP, int \fIcount\fP,
MPI_Datatype \fIdatatype\fP, MPI_Op \fIop\fP, MPI_Comm \fIcomm\fP)
int MPI_Iexscan(const void *\fIsendbuf\fP, void *\fIrecvbuf\fP, int \fIcount\fP,
MPI_Datatype \fIdatatype\fP, MPI_Op \fIop\fP, MPI_Comm \fIcomm\fP,
MPI_Request \fI*request\fP)
.fi
.SH Fortran Syntax
.nf
@ -23,6 +29,10 @@ MPI_EXSCAN(\fISENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, IERROR\fP)
<type> \fISENDBUF(*), RECVBUF(*)\fP
INTEGER \fICOUNT, DATATYPE, OP, COMM, IERROR\fP
MPI_IEXSCAN(\fISENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, REQUEST, IERROR\fP)
<type> \fISENDBUF(*), RECVBUF(*)\fP
INTEGER \fICOUNT, DATATYPE, OP, COMM, REQUEST, IERROR\fP
.fi
.SH C++ Syntax
.nf
@ -55,6 +65,9 @@ Communicator (handle).
.TP 1i
recvbuf
Receive buffer (choice).
.TP 1i
request
Request (handle, non-blocking only).
.ft R
.TP 1i
IERROR

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.nf
C Syntax
#include <mpi.h>
int MPI_File_delete(char \fI*filename\fP, MPI_Info \fIinfo\fP)
int MPI_File_delete(const char \fI*filename\fP, MPI_Info \fIinfo\fP)
.fi
.SH Fortran Syntax

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.nf
C Syntax
#include <mpi.h>
int MPI_File_iwrite(MPI_File \fIfh\fP, void \fI*buf\fP, int \fIcount\fP,
int MPI_File_iwrite(MPI_File \fIfh\fP, const void \fI*buf\fP, int \fIcount\fP,
MPI_Datatype \fIdatatype\fP, MPI_Request \fI*request\fP)
.fi

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -11,7 +13,7 @@
C Syntax
#include <mpi.h>
int MPI_File_iwrite_at(MPI_File \fIfh\fP, MPI_Offset \fIoffset\fP,
void \fI*buf\fP, int \fIcount\fP, MPI_Datatype \fIdatatype\fP, MPI_Request \fI*request\fP)
const void \fI*buf\fP, int \fIcount\fP, MPI_Datatype \fIdatatype\fP, MPI_Request \fI*request\fP)
.fi
.SH Fortran Syntax (see FORTRAN 77 NOTES)

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -12,7 +14,7 @@
.nf
C Syntax
#include <mpi.h>
int MPI_File_(MPI_File \fIfh\fP, void \fI*buf\fP, int \fIcount\fP, MPI_Datatype
int MPI_File_(MPI_File \fIfh\fP, const void \fI*buf\fP, int \fIcount\fP, MPI_Datatype
\fIdatatype\fP, MPI_Request \fI*request\fP)
.fi

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -9,7 +11,7 @@
.nf
C Syntax
#include <mpi.h>
int MPI_File_open(MPI_Comm \fIcomm\fP, char \fI*filename\fP,
int MPI_File_open(MPI_Comm \fIcomm\fP, const char \fI*filename\fP,
int \fIamode\fP, MPI_Info \fIinfo\fP,
MPI_File \fI*fh\fP)

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -11,7 +13,7 @@ C Syntax
#include <mpi.h>
int MPI_File_set_view(MPI_File \fIfh\fP, MPI_Offset \fIdisp\fP,
MPI_Datatype \fIetype\fP, MPI_Datatype \fIfiletype\fP,
char \fI*datarep\fP, MPI_Info \fIinfo\fP)
const char \fI*datarep\fP, MPI_Info \fIinfo\fP)
.fi
.SH Fortran Syntax (see FORTRAN 77 NOTES)

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.nf
C Syntax
#include <mpi.h>
int MPI_File_write(MPI_File \fIfh\fP, void \fI*buf\fP,
int MPI_File_write(MPI_File \fIfh\fP, const void \fI*buf\fP,
int \fIcount\fP, MPI_Datatype \fIdatatype\fP,
MPI_Status \fI*status\fP)

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.nf
C Syntax
#include <mpi.h>
int MPI_File_write_all(MPI_File \fIfh\fP, void \fI*buf\fP,
int MPI_File_write_all(MPI_File \fIfh\fP, const void \fI*buf\fP,
int \fIcount\fP, MPI_Datatype \fIdatatype\fP, MPI_Status \fI*status\fP)
.fi

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.nf
C Syntax
#include <mpi.h>
int MPI_File_write_all_begin(MPI_File \fIfh\fP, void \fI*buf\fP,
int MPI_File_write_all_begin(MPI_File \fIfh\fP, const void \fI*buf\fP,
int \fIcount\fP, MPI_Datatype \fIdatatype\fP)
.fi

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.nf
C Syntax
#include <mpi.h>
int MPI_File_write_all_end(MPI_File \fIfh\fP, void \fI*buf\fP, MPI_Status \fI*status\fP)
int MPI_File_write_all_end(MPI_File \fIfh\fP, const void \fI*buf\fP, MPI_Status \fI*status\fP)
.fi
.SH Fortran Syntax

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.nf
C Syntax
#include <mpi.h>
int MPI_File_write_at(MPI_File \fIfh\fP, MPI_Offset \fIoffset\fP, void \fI*buf\fP,
int MPI_File_write_at(MPI_File \fIfh\fP, MPI_Offset \fIoffset\fP, const void \fI*buf\fP,
int \fIcount\fP, MPI_Datatype \fIdatatype\fP, MPI_Status \fI*status\fP)
.fi

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.nf
C Syntax
#include <mpi.h>
int MPI_File_write_at_all(MPI_File \fIfh\fP, MPI_Offset \fIoffset\fP, void \fI*buf\fP,
int MPI_File_write_at_all(MPI_File \fIfh\fP, MPI_Offset \fIoffset\fP, const void \fI*buf\fP,
int \fIcount\fP, MPI_Datatype \fIdatatype\fP, MPI_Status \fI*status\fP)
.fi

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -11,7 +13,7 @@
C Syntax
#include <mpi.h>
int MPI_File_write_at_all_begin(MPI_File \fIfh\fP, MPI_Offset \fIoffset\fP,
void \fI*buf\fP, int \fIcount\fP, MPI_Datatype \fIdatatype\fP)
const void \fI*buf\fP, int \fIcount\fP, MPI_Datatype \fIdatatype\fP)
.fi
.SH Fortran Syntax (see FORTRAN 77 NOTES)

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,8 +12,8 @@
.nf
C Syntax
#include <mpi.h>
int MPI_File_write_at_all_end(MPI_File \fIfh\fP, void *buf,
MPI_Status *status)
int MPI_File_write_at_all_end(MPI_File \fIfh\fP, const void \fI*buf\fP,
MPI_Status \fI*status\fP)
.fi
.SH Fortran Syntax

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
C Syntax
.nf
#include <mpi.h>
int MPI_File_write_ordered(MPI_File \fIfh\fP, void \fI*buf\fP,
int MPI_File_write_ordered(MPI_File \fIfh\fP, const void \fI*buf\fP,
int \fIcount\fP, MPI_Datatype \fIdatatype\fP,
MPI_Status \fI*status\fP)

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.nf
C Syntax
#include <mpi.h>
int MPI_File_write_ordered_begin(MPI_File \fIfh\fP, void \fI*buf\fP,
int MPI_File_write_ordered_begin(MPI_File \fIfh\fP, const void \fI*buf\fP,
int \fIcount\fP, MPI_Datatype \fIdatatype\fP)
.fi

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.nf
C Syntax
#include <mpi.h>
int MPI_File_write_ordered_end(MPI_File \fIfh\fP, void \fI*buf\fP,
int MPI_File_write_ordered_end(MPI_File \fIfh\fP, const void \fI*buf\fP,
MPI_Status \fI*status\fP)
.fi

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.nf
C Syntax
#include <mpi.h>
int MPI_File_write_shared(MPI_File \fIfh\fP, void \fI*buf\fP, int \fIcount\fP,
int MPI_File_write_shared(MPI_File \fIfh\fP, const void \fI*buf\fP, int \fIcount\fP,
MPI_Datatype \fIdatatype\fP, MPI_Status \fI*status\fP)
.fi

Просмотреть файл

@ -1,19 +1,25 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Gather 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Gather\fP \- Gathers values from a group of processes.
\fBMPI_Gather, MPI_Igather\fP \- Gathers values from a group of processes.
.SH SYNOPSIS
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Gather(void \fI*sendbuf\fP, int\fI sendcount\fP, MPI_Datatype\fI sendtype\fP,
int MPI_Gather(const void \fI*sendbuf\fP, int\fI sendcount\fP, MPI_Datatype\fI sendtype\fP,
void\fI *recvbuf\fP, int\fI recvcount\fP, MPI_Datatype\fI recvtype\fP, int \fIroot\fP,
MPI_Comm\fI comm\fP)
int MPI_Igather(const void \fI*sendbuf\fP, int\fI sendcount\fP, MPI_Datatype\fI sendtype\fP,
void\fI *recvbuf\fP, int\fI recvcount\fP, MPI_Datatype\fI recvtype\fP, int \fIroot\fP,
MPI_Comm\fI comm\fP, MPI_Request \fI*request\fP)
.fi
.SH Fortran Syntax
.nf
@ -24,6 +30,12 @@ MPI_GATHER(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT,
INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, ROOT\fP
INTEGER \fICOMM, IERROR\fP
MPI_GATHER(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT,
RECVTYPE, ROOT, COMM, REQUEST, IERROR\fP)
<type> \fISENDBUF(*), RECVBUF(*)\fP
INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE, ROOT\fP
INTEGER \fICOMM, REQUEST, IERROR\fP
.fi
.SH C++ Syntax
.nf
@ -63,6 +75,9 @@ Communicator (handle).
.TP 1i
recvbuf
Address of receive buffer (choice, significant only at root).
.TP 1i
request
Request (handle, non-blocking only).
.ft R
.TP 1i
IERROR

Просмотреть файл

@ -1,19 +1,25 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Gatherv 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Gatherv\fP \- Gathers varying amounts of data from all processes to the root process
\fBMPI_Gatherv, MPI_Igatherv\fP \- Gathers varying amounts of data from all processes to the root process
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Gatherv(void *\fIsendbuf\fP, int\fI sendcount\fP, MPI_Datatype\fI sendtype\fP,
void\fI *recvbuf\fP, int\fI recvcounts[]\fP, int\fI displs[]\fP, MPI_Datatype\fI recvtype\fP,
int MPI_Gatherv(const void *\fIsendbuf\fP, int\fI sendcount\fP, MPI_Datatype\fI sendtype\fP,
void\fI *recvbuf\fP, const int\fI recvcounts[]\fP, const int\fI displs[]\fP, MPI_Datatype\fI recvtype\fP,
int \fIroot\fP, MPI_Comm\fI comm\fP)
int MPI_Igatherv(const void *\fIsendbuf\fP, int\fI sendcount\fP, MPI_Datatype\fI sendtype\fP,
void\fI *recvbuf\fP, const int\fI recvcounts[]\fP, const int\fI displs[]\fP, MPI_Datatype\fI recvtype\fP,
int \fIroot\fP, MPI_Comm\fI comm\fP, MPI_Request \fI*request\fP)
.fi
.SH Fortran Syntax
.nf
@ -24,6 +30,12 @@ MPI_GATHERV(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNTS,
INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNTS(*), DISPLS(*)\fP
INTEGER \fIRECVTYPE, ROOT, COMM, IERROR\fP
MPI_IGATHERV(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNTS,
DISPLS, RECVTYPE, ROOT, COMM, REQUEST, IERROR\fP)
<type> \fISENDBUF(*), RECVBUF(*)\fP
INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNTS(*), DISPLS(*)\fP
INTEGER \fIRECVTYPE, ROOT, COMM, REQUEST, IERROR\fP
.fi
.SH C++ Syntax
.nf
@ -68,6 +80,9 @@ Communicator (handle).
.TP 1i
recvbuf
Address of receive buffer (choice, significant only at root).
.TP 1i
request
Request (handle, non-blocking only).
.ft R
.TP 1i
IERROR

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Get_address(void *\fIlocation\fP, MPI_Aint *\fIaddress\fP)
int MPI_Get_address(const void *\fIlocation\fP, MPI_Aint *\fIaddress\fP)
.fi
.SH Fortran Syntax

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Get_count 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
@ -9,7 +11,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Get_count(MPI_Status *\fIstatus\fP, MPI_Datatype\fI datatype\fP,
int MPI_Get_count(const MPI_Status *\fIstatus\fP, MPI_Datatype\fI datatype\fP,
int\fI *count\fP)
.fi

Просмотреть файл

@ -1,3 +1,4 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
@ -11,9 +12,9 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Get_elements(MPI_Status *\fIstatus\fP, MPI_Datatype\fI datatype\fP,
int MPI_Get_elements(const MPI_Status *\fIstatus\fP, MPI_Datatype\fI datatype\fP,
int\fI *count\fP)
int MPI_Get_elements_x(MPI_Status *\fIstatus\fP, MPI_Datatype\fI datatype\fP,
int MPI_Get_elements_x(const MPI_Status *\fIstatus\fP, MPI_Datatype\fI datatype\fP,
MPI_Count\fI *count\fP)
.fi

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,8 +12,8 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Graph_create(MPI_Comm \fIcomm_old\fP, int\fI nnodes\fP, int\fI index[]\fP,
int\fI edges[]\fP, int\fI reorder\fP, MPI_Comm\fI *comm_graph\fP)
int MPI_Graph_create(MPI_Comm \fIcomm_old\fP, int\fI nnodes\fP, const int\fI index[]\fP,
const int\fI edges[]\fP, int\fI reorder\fP, MPI_Comm\fI *comm_graph\fP)
.fi
.SH Fortran Syntax

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,8 +12,8 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Graph_map(MPI_Comm \fIcomm\fP, int\fI nnodes\fP, int\fI index\fP[],
int\fI edges\fP[], int\fI *newrank\fP)
int MPI_Graph_map(MPI_Comm \fIcomm\fP, int\fI nnodes\fP, const int\fI index\fP[],
const int\fI edges\fP[], int\fI *newrank\fP)
.fi
.SH Fortran Syntax

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Group_excl(MPI_Group \fIgroup\fP, int\fI n\fP, int\fI ranks\fP[],
int MPI_Group_excl(MPI_Group \fIgroup\fP, int\fI n\fP, const int\fI ranks\fP[],
MPI_Group\fI *newgroup\fP)
.fi

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Group_incl(MPI_Group \fIgroup\fP, int\fI n\fP, int\fI ranks\fP[],
int MPI_Group_incl(MPI_Group \fIgroup\fP, int\fI n\fP, const int\fI ranks\fP[],
MPI_Group\fI *newgroup\fP)
.fi

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -11,7 +13,7 @@
.nf
#include <mpi.h>
int MPI_Group_translate_ranks(MPI_Group \fIgroup1\fP, int\fI n\fP,
int\fI ranks1\fP[], MPI_Group\fI group2\fP, int\fI ranks2\fP[])
const int\fI ranks1\fP[], MPI_Group\fI group2\fP, int\fI ranks2\fP[])
.fi
.SH Fortran Syntax

1
ompi/mpi/man/man3/MPI_Iallgather.3in Обычный файл
Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Allgather.3

1
ompi/mpi/man/man3/MPI_Iallgatherv.3in Обычный файл
Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Allgatherv.3

1
ompi/mpi/man/man3/MPI_Iallreduce.3in Обычный файл
Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Allreduce.3

1
ompi/mpi/man/man3/MPI_Ialltoall.3in Обычный файл
Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Alltoall.3

1
ompi/mpi/man/man3/MPI_Ialltoallv.3in Обычный файл
Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Alltoallv.3

1
ompi/mpi/man/man3/MPI_Ialltoallw.3in Обычный файл
Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Alltoallw.3

1
ompi/mpi/man/man3/MPI_Ibcast.3in Обычный файл
Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Bcast.3

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Ibsend(void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP,
int MPI_Ibsend(const void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP,
int\fI dest\fP, int\fI tag\fP, MPI_Comm\fI comm\fP, MPI_Request\fI *request\fP)
.fi

1
ompi/mpi/man/man3/MPI_Iexscan.3in Обычный файл
Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Exscan.3

1
ompi/mpi/man/man3/MPI_Igather.3in Обычный файл
Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Gather.3

1
ompi/mpi/man/man3/MPI_Igatherv.3in Обычный файл
Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Gatherv.3

Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Neighbor_allgather.3

Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Neighbor_allgatherv.3

Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Neighbor_alltoall.3

Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Neighbor_alltoallv.3

Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Neighbor_alltoallw.3

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Info_delete(MPI_Info \fIinfo\fP, char \fI*key\fP)
int MPI_Info_delete(MPI_Info \fIinfo\fP, const char \fI*key\fP)
.fi
.SH Fortran Syntax

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Info_get(MPI_Info \fIinfo\fP, char \fI*key\fP, int \fIvaluelen\fP, char \fI*value\fP, int *\fIflag\fP)
int MPI_Info_get(MPI_Info \fIinfo\fP, const char \fI*key\fP, int \fIvaluelen\fP, char \fI*value\fP, int *\fIflag\fP)
.fi
.SH Fortran Syntax

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Info_get_valuelen(MPI_Info \fIinfo\fP, char \fI*key\fP,
int MPI_Info_get_valuelen(MPI_Info \fIinfo\fP, const char \fI*key\fP,
int \fI*valuelen\fP, int \fI*flag\fP)
.fi

1
ompi/mpi/man/man3/MPI_Ireduce.3in Обычный файл
Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Reduce.3

1
ompi/mpi/man/man3/MPI_Ireduce_scatter.3in Обычный файл
Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Reduce_scatter.3

Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Reduce_scatter_block.3

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Irsend(void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, int\fI dest\fP,
int MPI_Irsend(const void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, int\fI dest\fP,
int\fI tag\fP, MPI_Comm\fI comm\fP, MPI_Request\fI *request\fP)
.fi

1
ompi/mpi/man/man3/MPI_Iscan.3in Обычный файл
Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Scan.3

1
ompi/mpi/man/man3/MPI_Iscatter.3in Обычный файл
Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Scatter.3

1
ompi/mpi/man/man3/MPI_Iscatterv.3in Обычный файл
Просмотреть файл

@ -0,0 +1 @@
.so man3/MPI_Scatterv.3

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2007-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Isend(void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, int\fI dest\fP,
int MPI_Isend(const void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, int\fI dest\fP,
int\fI tag\fP, MPI_Comm\fI comm\fP, MPI_Request\fI *request\fP)
.fi

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Issend(void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, int\fI dest\fP,
int MPI_Issend(const void *\fIbuf\fP, int\fI count\fP, MPI_Datatype\fI datatype\fP, int\fI dest\fP,
int\fI tag\fP, MPI_Comm\fI comm\fP, MPI_Request\fI *request\fP)
.fi

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2007-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -14,7 +16,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Lookup_name(char *\fIservice_name\fP, MPI_Info \fIinfo\fP,
int MPI_Lookup_name(const char *\fIservice_name\fP, MPI_Info \fIinfo\fP,
char *\fIport_name\fP)
.fi

Просмотреть файл

@ -1,30 +1,41 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Neighbor_allgather 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Neighbor_allgather\fP \- Gathers and distributes data from and to all neighbors
\fBMPI_Neighbor_allgather, MPI_Ineighbor_allgather\fP \- Gathers and distributes data from and to all neighbors
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Neighbor_allgather(void\fI *sendbuf\fP, int \fI sendcount\fP,
int MPI_Neighbor_allgather(const void\fI *sendbuf\fP, int \fI sendcount\fP,
MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, int\fI recvcount\fP,
MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP)
int MPI_Ineighbor_allgather(const void\fI *sendbuf\fP, int \fI sendcount\fP,
MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, int\fI recvcount\fP,
MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP, MPI_Request \fIreq\fP)
.fi
.SH Fortran Syntax
.nf
INCLUDE 'mpif.h'
MPI_ALLGATHER(\fISENDBUF\fP,\fI SENDCOUNT\fP,\fI SENDTYPE\fP,\fI RECVBUF\fP,\fI RECVCOUNT\fP,\fI
MPI_NEIGHBOR_ALLGATHER(\fISENDBUF\fP,\fI SENDCOUNT\fP,\fI SENDTYPE\fP,\fI RECVBUF\fP,\fI RECVCOUNT\fP,\fI
RECVTYPE\fP,\fI COMM\fP,\fI IERROR\fP)
<type> \fISENDBUF\fP (*), \fIRECVBUF\fP (*)
INTEGER \fISENDCOUNT\fP,\fI SENDTYPE\fP,\fI RECVCOUNT\fP,\fI RECVTYPE\fP,\fI COMM\fP,
INTEGER \fIIERROR\fP
MPI_NEIGHBOR_ALLGATHER(\fISENDBUF\fP,\fI SENDCOUNT\fP,\fI SENDTYPE\fP,\fI RECVBUF\fP,\fI RECVCOUNT\fP,\fI
RECVTYPE\fP,\fI COMM\fP, \fPREQUEST\fI,\fI IERROR\fP)
<type> \fISENDBUF\fP (*), \fIRECVBUF\fP (*)
INTEGER \fISENDCOUNT\fP,\fI SENDTYPE\fP,\fI RECVCOUNT\fP,\fI RECVTYPE\fP,\fI COMM\fP,
INTEGER \fIREQUEST, IERROR\fP
.fi
.SH INPUT PARAMETERS
.ft R
@ -55,6 +66,9 @@ Communicator (handle).
.TP 1i
recvbuf
Address of receive buffer (choice).
.TP 1i
request
Request (handle, non-blocking only).
.ft R
.TP 1i
IERROR

Просмотреть файл

@ -4,27 +4,38 @@
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Neighbor_allgatherv 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Neighbor_allgatherv\fP \- Gathers and distributes data from and to all neighbors. Each process may contribute a different amount of data.
\fBMPI_Neighbor_allgatherv, MPI_Ineighbor_allgatherv\fP \- Gathers and distributes data from and to all neighbors. Each process may contribute a different amount of data.
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Neighbor_allgatherv(void\fI *sendbuf\fP, int\fI sendcount\fP,
MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, int\fI recvcounts[]\fP,
int\fI displs[]\fP, MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP)
int MPI_Neighbor_allgatherv(const void\fI *sendbuf\fP, int\fI sendcount\fP,
MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, const int\fI recvcounts[]\fP,
const int\fI displs[]\fP, MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP)
int MPI_Ineighbor_allgatherv(const void\fI *sendbuf\fP, int\fI sendcount\fP,
MPI_Datatype\fI sendtype\fP, void\fI *recvbuf\fP, const int\fI recvcounts[]\fP,
const int\fI displs[]\fP, MPI_Datatype\fI recvtype\fP, MPI_Comm\fI comm\fP,
MPI_Request \fI*request\fP)
.fi
.SH Fortran Syntax
.nf
INCLUDE 'mpif.h'
MPI_ALLGATHERV(\fISENDBUF\fP,\fI SENDCOUNT\fP, \fISENDTYPE\fP,\fI RECVBUF\fP,
MPI_NEIGHBOR_ALLGATHERV(\fISENDBUF\fP,\fI SENDCOUNT\fP, \fISENDTYPE\fP,\fI RECVBUF\fP,
\fIRECVCOUNT\fP,\fI DISPLS\fP, \fIRECVTYPE\fP,\fI COMM\fP,\fI IERROR\fP)
<type> \fISENDBUF\fP(*), \fIRECVBUF\fP(*)
INTEGER \fISENDCOUNT\fP,\fI SENDTYPE\fP, \fIRECVCOUNT\fP(*),
INTEGER \fIDISPLS\fP(*),\fI RECVTYPE\fP,\fI COMM\fP,\fI IERROR\fP
MPI_INEIGHBOR_ALLGATHERV(\fISENDBUF\fP,\fI SENDCOUNT\fP, \fISENDTYPE\fP,\fI RECVBUF\fP,
\fIRECVCOUNT\fP,\fI DISPLS\fP, \fIRECVTYPE\fP,\fI COMM\fP,\fI REQUEST\fP,\fI IERROR\fP)
<type> \fISENDBUF\fP(*), \fIRECVBUF\fP(*)
INTEGER \fISENDCOUNT\fP,\fI SENDTYPE\fP, \fIRECVCOUNT\fP(*),
INTEGER \fIDISPLS\fP(*),\fI RECVTYPE\fP,\fI COMM\fP,\fIREQUEST\fP,\fI IERROR\fP
.fi
.SH INPUT PARAMETERS
.ft R
@ -55,6 +66,9 @@ Communicator (handle).
.TP 1i
recvbuf
Address of receive buffer (choice).
.TP 1i
request
Request (handle, non-blocking only).
.ft R
.TP 1i
IERROR

Просмотреть файл

@ -1,3 +1,4 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
@ -5,7 +6,7 @@
.TH MPI_Neighbor_alltoall 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Neighbor_alltoall\fP \- All processes send data to neighboring processes in a virtual topology communicator
\fBMPI_Neighbor_alltoall, MPI_Ineighbor_alltoall\fP \- All processes send data to neighboring processes in a virtual topology communicator
.SH SYNTAX
.ft R
@ -13,10 +14,14 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Neighbor_alltoall(void *\fIsendbuf\fP, int \fIsendcount\fP,
int MPI_Neighbor_alltoall(const void *\fIsendbuf\fP, int \fIsendcount\fP,
MPI_Datatype \fIsendtype\fP, void *\fIrecvbuf\fP, int \fIrecvcount\fP,
MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP)
int MPI_Ineighbor_alltoall(const void *\fIsendbuf\fP, int \fIsendcount\fP,
MPI_Datatype \fIsendtype\fP, void *\fIrecvbuf\fP, int \fIrecvcount\fP,
MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP, MPI_Request \fI*request\fP)
.fi
.SH Fortran Syntax
.nf
@ -28,6 +33,13 @@ MPI_NEIGHBOR_ALLTOALL(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT,
INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE\fP
INTEGER \fICOMM, IERROR\fP
MPI_INEIGHBOR_ALLTOALL(\fISENDBUF, SENDCOUNT, SENDTYPE, RECVBUF, RECVCOUNT,
RECVTYPE, COMM, REQUEST, IERROR\fP)
<type> \fISENDBUF(*), RECVBUF(*)\fP
INTEGER \fISENDCOUNT, SENDTYPE, RECVCOUNT, RECVTYPE\fP
INTEGER \fICOMM, REQUEST, IERROR\fP
.fi
.SH INPUT PARAMETERS
.ft R
@ -55,6 +67,9 @@ Communicator over which data is to be exchanged (handle).
.TP 1.2i
recvbuf
Starting address of receive buffer (choice).
.TP 1i
request
Request (handle, non-blocking only).
.ft R
.TP 1.2i
IERROR

Просмотреть файл

@ -1,3 +1,4 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
@ -5,24 +6,30 @@
.TH MPI_Neighbor_alltoallv 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Neighbor_alltoallv\fP \- All processes send different amounts of data to, and receive different amounts of data from, all neighbors
\fBMPI_Neighbor_alltoallv, MPI_Ineighbor_alltoallv\fP \- All processes send different amounts of data to, and receive different amounts of data from, all neighbors
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Neighbor_alltoallv(void *\fIsendbuf\fP, int \fIsendcounts\fP[],
int \fIsdispls\f[]P, MPI_Datatype \fIsendtype\fP,
void *\fIrecvbuf\fP, int\fI recvcounts\fP[],
int \fIrdispls\fP[], MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP)
int MPI_Neighbor_alltoallv(const void *\fIsendbuf\fP, const int \fIsendcounts\fP[],
const int \fIsdispls\f[]P, MPI_Datatype \fIsendtype\fP,
void *\fIrecvbuf\fP, const int\fI recvcounts\fP[],
const int \fIrdispls\fP[], MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP)
int MPI_Ineighbor_alltoallv(const void *\fIsendbuf\fP, const int \fIsendcounts\fP[],
const int \fIsdispls\f[]P, MPI_Datatype \fIsendtype\fP,
void *\fIrecvbuf\fP, const int\fI recvcounts\fP[],
const int \fIrdispls\fP[], MPI_Datatype \fIrecvtype\fP, MPI_Comm \fIcomm\fP,
MPI_Request \fI*request\fP)
.fi
.SH Fortran Syntax
.nf
INCLUDE 'mpif.h'
MPI_ALLTOALLV(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPE,
MPI_NEIGHBOR_ALLTOALLV(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPE,
RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPE, COMM, IERROR\fP)
<type> \fISENDBUF(*), RECVBUF(*)\fP
@ -30,6 +37,14 @@ MPI_ALLTOALLV(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPE,
INTEGER \fIRECVCOUNTS(*), RDISPLS(*), RECVTYPE\fP
INTEGER \fICOMM, IERROR\fP
MPI_INEIGHBOR_ALLTOALLV(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPE,
RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPE, COMM, REQUEST, IERROR\fP)
<type> \fISENDBUF(*), RECVBUF(*)\fP
INTEGER \fISENDCOUNTS(*), SDISPLS(*), SENDTYPE\fP
INTEGER \fIRECVCOUNTS(*), RDISPLS(*), RECVTYPE\fP
INTEGER \fICOMM, REQUEST, IERROR\fP
.fi
.SH INPUT PARAMETERS
.ft R
@ -69,6 +84,9 @@ Communicator over which data is to be exchanged.
.TP 1.2i
recvbuf
Address of receive buffer.
.TP 1i
request
Request (handle, non-blocking only).
.ft R
.TP 1.2i
IERROR

Просмотреть файл

@ -1,3 +1,4 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
@ -5,7 +6,7 @@
.TH MPI_Neighbor_alltoallw 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Neighbor_alltoallw\fP \- All processes send data of different types to, and receive data of different types from, all processes
\fBMPI_Neighbor_alltoallw, MPI_Ineighbor_alltoallw\fP \- All processes send data of different types to, and receive data of different types from, all processes
.SH SYNTAX
.ft R
@ -13,16 +14,21 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Neighbor_alltoallw(void *\fIsendbuf\fP, int *\fIsendcounts\fP,
int *\fIsdispls\fP, MPI_Datatype *\fIsendtypes\fP,
void *\fIrecvbuf\fP, int *\fIrecvcounts\fP,
int *\fIrdispls\fP, MPI_Datatype *\fIrecvtypes\fP, MPI_Comm \fIcomm\fP)
int MPI_Neighbor_alltoallw(const void *\fIsendbuf\fP, const int \fIsendcounts\fP[],
const int \fIsdispls\fP[], const MPI_Datatype \fIsendtypes\fP[],
void *\fIrecvbuf\fP, const int \fIrecvcounts\fP[], const int \fIrdispls\fP[],
const MPI_Datatype \fIrecvtypes\fP[], MPI_Comm \fIcomm\fP)
int MPI_Ineighbor_alltoallw(const void *\fIsendbuf\fP, const int \fIsendcounts\fP[],
const int \fIsdispls\fP[], const MPI_Datatype \fIsendtypes\fP[],
void *\fIrecvbuf\fP, const int \fIrecvcounts\fP[], const int \fIrdispls\fP[],
const MPI_Datatype \fIrecvtypes\fP[], MPI_Comm \fIcomm\fP, MPI_Request \fI*request\fP)
.fi
.SH Fortran Syntax
.nf
INCLUDE 'mpif.h'
MPI_ALLTOALLW(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPES,
MPI_NEIGHBOR_ALLTOALLW(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPES,
RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPES, COMM, IERROR\fP)
<type> \fISENDBUF(*), RECVBUF(*)\fP
@ -30,6 +36,14 @@ MPI_ALLTOALLW(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPES,
INTEGER \fIRECVCOUNTS(*), RDISPLS(*), RECVTYPES(*)\fP
INTEGER \fICOMM, IERROR\fP
MPI_INEIGHBOR_ALLTOALLW(\fISENDBUF, SENDCOUNTS, SDISPLS, SENDTYPES,
RECVBUF, RECVCOUNTS, RDISPLS, RECVTYPES, COMM, REQUEST, IERROR\fP)
<type> \fISENDBUF(*), RECVBUF(*)\fP
INTEGER \fISENDCOUNTS(*), SDISPLS(*), SENDTYPES(*)\fP
INTEGER \fIRECVCOUNTS(*), RDISPLS(*), RECVTYPES(*)\fP
INTEGER \fICOMM, REQUEST, IERROR\fP
.fi
.SH INPUT PARAMETERS
.ft R
@ -70,6 +84,9 @@ Communicator over which data is to be exchanged.
.TP 1.2i
recvbuf
Address of receive buffer.
.TP 1i
request
Request (handle, non-blocking only).
.ft R
.TP 1.2i
IERROR

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Pack(void *\fIinbuf\fP, int\fI incount\fP, MPI_Datatype\fI datatype\fP,
int MPI_Pack(const void *\fIinbuf\fP, int\fI incount\fP, MPI_Datatype\fI datatype\fP,
void\fI *outbuf\fP, int\fI outsize\fP, int\fI *position\fP, MPI_Comm\fI comm\fP)
.fi

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -12,7 +14,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Pack_external(char *\fIdatarep\fP, void *\fIinbuf\fP,
int MPI_Pack_external(const char *\fIdatarep\fP, const void *\fIinbuf\fP,
int \fIincount\fP, MPI_Datatype\fI datatype\fP,
void *\fIoutbuf\fP, MPI_Aint \fIoutsize\fP,
MPI_Aint *\fIposition\fP)

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2007-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -14,8 +16,8 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Publish_name(char *\fIservice_name\fP, MPI_Info \fIinfo\fP,
char *\fIport_name\fP)
int MPI_Publish_name(const char *\fIservice_name\fP, MPI_Info \fIinfo\fP,
const char *\fIport_name\fP)
.fi
.SH Fortran Syntax

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
MPI_Put(void *\fIorigin_addr\fP, int \fIorigin_count\fP, MPI_Datatype
MPI_Put(const void *\fIorigin_addr\fP, int \fIorigin_count\fP, MPI_Datatype
\fIorigin_datatype\fP, int \fItarget_rank\fP, MPI_Aint \fItarget_disp\fP,
int \fItarget_count\fP, MPI_Datatype \fItarget_datatype\fP, MPI_Win \fIwin\fP)

Просмотреть файл

@ -1,17 +1,24 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Reduce 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Reduce\fP \- Reduces values on all processes within a group.
\fBMPI_Reduce, MPI_Ireduce\fP \- Reduces values on all processes within a group.
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Reduce(void *\fIsendbuf\fP, void *\fIrecvbuf\fP, int\fI count\fP,
MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, int\fI root\fP, MPI_Comm\fI comm\fP)
int MPI_Reduce(const void *\fIsendbuf\fP, void *\fIrecvbuf\fP, int\fI count\fP,
MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, int\fI root\fP,
MPI_Comm\fI comm\fP)
int MPI_Ireduce(const void *\fIsendbuf\fP, void *\fIrecvbuf\fP, int\fI count\fP,
MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, int\fI root\fP,
MPI_Comm\fI comm\fP, MPI_Request \fI*request\fP)
.fi
.SH Fortran Syntax
@ -22,6 +29,11 @@ MPI_REDUCE(\fISENDBUF, RECVBUF, COUNT, DATATYPE, OP, ROOT, COMM,
<type> \fISENDBUF(*), RECVBUF(*)\fP
INTEGER \fICOUNT, DATATYPE, OP, ROOT, COMM, IERROR\fP
MPI_IREDUCE(\fISENDBUF, RECVBUF, COUNT, DATATYPE, OP, ROOT, COMM,
REQUEST, IERROR\fP)
<type> \fISENDBUF(*), RECVBUF(*)\fP
INTEGER \fICOUNT, DATATYPE, OP, ROOT, COMM, REQUEST, IERROR\fP
.fi
.SH C++ Syntax
.nf
@ -57,6 +69,9 @@ Communicator (handle).
.TP 1i
recvbuf
Address of receive buffer (choice, significant only at root).
.TP 1i
request
Request (handle, non-blocking only).
.ft R
.TP 1i
IERROR

Просмотреть файл

@ -1,3 +1,5 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright 2009-2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright (c) 1996 Thinking Machines Corporation
@ -10,7 +12,7 @@
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Reduce_local(void *\fIinbuf\fP, void *\fIinoutbuf\fP, int\fI count\fP,
int MPI_Reduce_local(const void *\fIinbuf\fP, void *\fIinoutbuf\fP, int\fI count\fP,
MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP)
.fi

Просмотреть файл

@ -1,17 +1,22 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Reduce_scatter 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Reduce_scatter\fP \- Combines values and scatters the results.
\fBMPI_Reduce_scatter, MPI_Ireduce_scatter\fP \- Combines values and scatters the results.
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Reduce_scatter(void *\fIsendbuf\fP, void\fI *recvbuf\fP, int\fI recvcounts\fP[],
MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, MPI_Comm\fI comm\fP)
int MPI_Reduce_scatter(const void *\fIsendbuf\fP, void\fI *recvbuf\fP, const int\fI recvcounts\fP[],
MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, MPI_Comm\fI comm\fP)
int MPI_Ireduce_scatter(const void *\fIsendbuf\fP, void\fI *recvbuf\fP, const int\fI recvcounts\fP[],
MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, MPI_Comm\fI comm\fP, MPI_Request \fI*request\fP)
.fi
.SH Fortran Syntax
@ -22,6 +27,11 @@ MPI_REDUCE_SCATTER(\fISENDBUF, RECVBUF, RECVCOUNTS, DATATYPE, OP,
<type> \fISENDBUF(*), RECVBUF(*)\fP
INTEGER \fIRECVCOUNTS(*), DATATYPE, OP, COMM, IERROR \fP
MPI_IREDUCE_SCATTER(\fISENDBUF, RECVBUF, RECVCOUNTS, DATATYPE, OP,
COMM, REQUEST, IERROR\fP)
<type> \fISENDBUF(*), RECVBUF(*)\fP
INTEGER \fIRECVCOUNTS(*), DATATYPE, OP, COMM, REQUEST, IERROR \fP
.fi
.SH C++ Syntax
.nf
@ -55,6 +65,9 @@ Communicator (handle).
.TP 1i
recvbuf
Starting address of receive buffer (choice).
.TP 1i
request
Request (handle, non-blocking only).
.ft R
.TP 1i
IERROR

Просмотреть файл

@ -0,0 +1,110 @@
.\" -*- nroff -*-
.\" Copyright 2013 Los Alamos National Security, LLC. All rights reserved.
.\" Copyright 2010 Cisco Systems, Inc. All rights reserved.
.\" Copyright 2006-2008 Sun Microsystems, Inc.
.\" Copyright (c) 1996 Thinking Machines Corporation
.TH MPI_Reduce_scatter_block 3 "#OMPI_DATE#" "#PACKAGE_VERSION#" "#PACKAGE_NAME#"
.SH NAME
\fBMPI_Reduce_scatter_block, MPI_Ireduce_scatter_block\fP \- Combines values and scatters the results in blocks.
.SH SYNTAX
.ft R
.SH C Syntax
.nf
#include <mpi.h>
int MPI_Reduce_scatter_block(const void *\fIsendbuf\fP, void\fI *recvbuf\fP, int\fI recvcount\fP,
MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, MPI_Comm\fI comm\fP)
int MPI_Ireduce_scatter_block(const void *\fIsendbuf\fP, void\fI *recvbuf\fP, int\fI recvcount\fP,
MPI_Datatype\fI datatype\fP, MPI_Op\fI op\fP, MPI_Comm\fI comm\fP, MPI_Request \fI*request\fP)
.fi
.SH Fortran Syntax
.nf
INCLUDE 'mpif.h'
MPI_REDUCE_SCATTER_BLOCK(\fISENDBUF, RECVBUF, RECVCOUNT, DATATYPE, OP,
COMM, IERROR\fP)
<type> \fISENDBUF(*), RECVBUF(*)\fP
INTEGER \fIRECVCOUNT, DATATYPE, OP, COMM, IERROR \fP
MPI_IREDUCE_SCATTER_BLOCK(\fISENDBUF, RECVBUF, RECVCOUNT, DATATYPE, OP,
COMM, REQUEST, IERROR\fP)
<type> \fISENDBUF(*), RECVBUF(*)\fP
INTEGER \fIRECVCOUNT, DATATYPE, OP, COMM, REQUEST, IERROR \fP
.fi
.SH INPUT PARAMETERS
.ft R
.TP 1i
sendbuf
Starting address of send buffer (choice).
.TP 1i
recvcount
lement count per block (non-negative integer).
.TP 1i
datatype
Datatype of elements of input buffer (handle).
.TP 1i
op
Operation (handle).
.TP 1i
comm
Communicator (handle).
.SH OUTPUT PARAMETERS
.ft R
.TP 1i
recvbuf
Starting address of receive buffer (choice).
.TP 1i
request
Request (handle, non-blocking only).
.ft R
.TP 1i
IERROR
Fortran only: Error status (integer).
.SH DESCRIPTION
.ft R
MPI_Reduce_scatter_block first does an element-wise reduction on vector of \fIcount\fP\
=\ n * \fIrevcount\fP elements in the send buffer defined by \fIsendbuf\fP, \fIcount\fP, and
\fIdatatype\fP, using the operation \fIop\fP, where n is the number of
processes in the group of \fIcomm\fP. Next, the resulting vector of results is split into n disjoint
segments, where n is the number of processes in the group. Each segments contains \fIrecvcount\fP
elements. The ith segment is sent to process i and stored in the receive buffer defined by
\fIrecvbuf\fP, \fIrecvcount\fP, and \fIdatatype\fP.
.SH USE OF IN-PLACE OPTION
When the communicator is an intracommunicator, you can perform a reduce-scatter operation in-place (the output buffer is used as the input buffer). Use the variable MPI_IN_PLACE as the value of the \fIsendbuf\fR. In this case, the input data is taken from the top of the receive buffer. The area occupied by the input data may be either longer or shorter than the data filled by the output data.
.sp
.SH WHEN COMMUNICATOR IS AN INTER-COMMUNICATOR
.sp
When the communicator is an inter-communicator, the reduce-scatter operation occurs in two phases. First, the result of the reduction performed on the data provided by the processes in the first group is scattered among the processes in the second group. Then the reverse occurs: the reduction performed on the data provided by the processes in the second group is scattered among the processes in the first group. For each group, all processes provide the same \fIrecvcounts\fR argument, and the sum of the \fIrecvcounts\fR values should be the same for both groups.
.sp
.SH NOTES ON COLLECTIVE OPERATIONS
The reduction functions (
.I MPI_Op
) do not return an error value. As a result,
if the functions detect an error, all they can do is either call
.I MPI_Abort
or silently skip the problem. Thus, if you change the error handler from
.I MPI_ERRORS_ARE_FATAL
to something else, for example,
.I MPI_ERRORS_RETURN
,
then no error may be indicated.
The reason for this is the performance problems in ensuring that
all collective routines return the same error value.
.SH ERRORS
Almost all MPI routines return an error value; C routines as the value of the function and Fortran routines in the last argument.
.sp
Before the error value is returned, the current MPI error handler is
called. By default, this error handler aborts the MPI job, except for I/O function errors. The error handler may be changed with MPI_Comm_set_errhandler; the predefined error handler MPI_ERRORS_RETURN may be used to cause error values to be returned. Note that MPI does not guarantee that an MPI program can continue past an error.
.SH SEE ALSO
MPI_Reduce_scatter

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше