1
1

A better solution to the previous commit -- RETAIN/RELEASE the MPI_Op

at the top-level MPI API function.  This allows two kinds of
scenarios:

1. MPI_Ireduce(..., op, ...);
   MPI_Op_free(op);
   MPI_Wait(...);

For the non-blocking collectives that we're someday planning -- to
make them analogous to non-blocking point-to-point stuff.

2. Thread 1:
   MPI_Reduce(..., op, ...);
   Thread 2:
   MPI_Op_free(op);

Granted, for #2 to occur would tread a fine line between a correct and
erroneous MPI program, but it is possible (as long as the Op_free was
*after* MPI_reduce() had started to execute).  It's more realistic
with case #1, where the Op_free() could be executed in the same thread
or a different thread.

This commit was SVN r7870.
Этот коммит содержится в:
Jeff Squyres 2005-10-25 19:20:42 +00:00
родитель d009d8de57
Коммит 23ab9e0277
6 изменённых файлов: 10 добавлений и 2 удалений

Просмотреть файл

@ -69,8 +69,10 @@ int MPI_Allreduce(void *sendbuf, void *recvbuf, int count,
/* Invoke the coll component to perform the back-end operation */
OBJ_RETAIN(op);
err = comm->c_coll.coll_allreduce(sendbuf, recvbuf, count,
datatype, op, comm);
OBJ_RELEASE(op);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

Просмотреть файл

@ -61,7 +61,9 @@ int MPI_Exscan(void *sendbuf, void *recvbuf, int count,
/* Invoke the coll component to perform the back-end operation */
OBJ_RETAIN(op);
err = comm->c_coll.coll_exscan(sendbuf, recvbuf, count,
datatype, op, comm);
OBJ_RELEASE(op);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

Просмотреть файл

@ -88,7 +88,9 @@ int MPI_Reduce(void *sendbuf, void *recvbuf, int count,
/* Invoke the coll component to perform the back-end operation */
OBJ_RETAIN(op);
err = comm->c_coll.coll_reduce(sendbuf, recvbuf, count,
datatype, op, root, comm);
OBJ_RELEASE(op);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

Просмотреть файл

@ -88,7 +88,9 @@ int MPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
/* Invoke the coll component to perform the back-end operation */
OBJ_RETAIN(op);
err = comm->c_coll.coll_reduce_scatter(sendbuf, recvbuf, recvcounts,
datatype, op, comm);
OBJ_RELEASE(op);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

Просмотреть файл

@ -75,7 +75,9 @@ int MPI_Scan(void *sendbuf, void *recvbuf, int count,
/* Call the coll component to actually perform the allgather */
OBJ_RETAIN(op);
err = comm->c_coll.coll_scan(sendbuf, recvbuf, count,
datatype, op, comm);
OBJ_RELEASE(op);
OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
}

Просмотреть файл

@ -501,7 +501,6 @@ static inline void ompi_op_reduce(ompi_op_t *op, void *source, void *target,
* :-)
*/
OBJ_RETAIN(op);
if (0 != (op->o_flags & OMPI_OP_FLAGS_INTRINSIC) &&
dtype->id < DT_MAX_PREDEFINED) {
if (0 != (op->o_flags & OMPI_OP_FLAGS_FORTRAN_FUNC)) {
@ -524,7 +523,6 @@ static inline void ompi_op_reduce(ompi_op_t *op, void *source, void *target,
} else {
op->o_func[0].c_fn(source, target, &count, &dtype);
}
OBJ_RELEASE(op);
}
#endif /* OMPI_OP_H */