1
1

remove the MPI functions used in this file by the OMPI internal corresponding functionality.

This commit was SVN r25703.
Этот коммит содержится в:
Edgar Gabriel 2012-01-10 19:55:05 +00:00
родитель f65f6f5c39
Коммит fb4d1a7099

Просмотреть файл

@ -24,6 +24,7 @@
#include "ompi/runtime/params.h" #include "ompi/runtime/params.h"
#include "ompi/communicator/communicator.h" #include "ompi/communicator/communicator.h"
#include "ompi/mca/pml/pml.h" #include "ompi/mca/pml/pml.h"
#include "ompi/mca/topo/topo.h"
#include "opal/datatype/opal_convertor.h" #include "opal/datatype/opal_convertor.h"
#include "opal/datatype/opal_datatype.h" #include "opal/datatype/opal_datatype.h"
#include "ompi/datatype/ompi_datatype.h" #include "ompi/datatype/ompi_datatype.h"
@ -98,12 +99,11 @@ int ompi_io_ompio_set_file_defaults (mca_io_ompio_file_t *fh)
/*Create a derived datatype for the created iovec */ /*Create a derived datatype for the created iovec */
types[0] = &ompi_mpi_long.dt; types[0] = &ompi_mpi_long.dt;
types[1] = &ompi_mpi_long.dt; types[1] = &ompi_mpi_long.dt;
MPI_Address( fh->f_decoded_iov, d); d[0] = (OPAL_PTRDIFF_TYPE) fh->f_decoded_iov;
MPI_Address( &fh->f_decoded_iov[0].iov_len, d+1); d[1] = (OPAL_PTRDIFF_TYPE) &fh->f_decoded_iov[0].iov_len;
base = d[0]; base = d[0];
for (i=0 ; i<2 ; i++) { for (i=0 ; i<2 ; i++) {
@ -656,7 +656,7 @@ int ompi_io_ompio_set_aggregator_props (mca_io_ompio_file_t *fh,
if (-1 == num_aggregators) { if (-1 == num_aggregators) {
/* Determine Topology Information */ /* Determine Topology Information */
if (fh->f_comm->c_flags & OMPI_COMM_CART) { if (fh->f_comm->c_flags & OMPI_COMM_CART) {
MPI_Cartdim_get(fh->f_comm, &ndims); fh->f_comm->c_topo->topo_cartdim_get(fh->f_comm, &ndims);
dims = (int*)malloc (ndims * sizeof(int)); dims = (int*)malloc (ndims * sizeof(int));
if (NULL == dims) { if (NULL == dims) {
@ -682,7 +682,7 @@ int ompi_io_ompio_set_aggregator_props (mca_io_ompio_file_t *fh,
return OMPI_ERR_OUT_OF_RESOURCE; return OMPI_ERR_OUT_OF_RESOURCE;
} }
MPI_Cart_get(fh->f_comm, ndims, dims, periods, coords); fh->f_comm->c_topo->topo_cart_get(fh->f_comm, ndims, dims, periods, coords);
/* /*
printf ("NDIMS = %d\n", ndims); printf ("NDIMS = %d\n", ndims);
@ -745,7 +745,7 @@ int ompi_io_ompio_set_aggregator_props (mca_io_ompio_file_t *fh,
} }
for (j=0 ; j<fh->f_size ; j++) { for (j=0 ; j<fh->f_size ; j++) {
MPI_Cart_coords (fh->f_comm, j, ndims, coords_tmp); fh->f_comm->c_topo->topo_cart_coords (fh->f_comm, j, ndims, coords_tmp);
if (coords_tmp[0]/i == coords[0]/i) { if (coords_tmp[0]/i == coords[0]/i) {
if ((coords_tmp[1]/root_offset)*root_offset == if ((coords_tmp[1]/root_offset)*root_offset ==
(coords[1]/root_offset)*root_offset) { (coords[1]/root_offset)*root_offset) {
@ -1123,30 +1123,34 @@ int ompi_io_ompio_calc_others_requests(mca_io_ompio_file_t *fh,
{ {
int *count_others_req_per_proc, count_others_req_procs; int *count_others_req_per_proc=NULL, count_others_req_procs;
int i,j ; int i,j, ret=OMPI_SUCCESS;
MPI_Request *requests; MPI_Request *requests=NULL;
MPI_Status *statuses; mca_io_ompio_access_array_t *others_req=NULL;
mca_io_ompio_access_array_t *others_req;
count_others_req_per_proc = (int *)malloc(fh->f_size*sizeof(int)); count_others_req_per_proc = (int *)malloc(fh->f_size*sizeof(int));
if ( NULL == count_others_req_per_proc ) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* Change it to the ompio specific alltoall in coll module : VVN*/ /* Change it to the ompio specific alltoall in coll module : VVN*/
MPI_Alltoall (count_my_req_per_proc, fh->f_comm->c_coll.coll_alltoall (
count_my_req_per_proc,
1, 1,
MPI_INT, MPI_INT,
count_others_req_per_proc, count_others_req_per_proc,
1, 1,
MPI_INT, MPI_INT,
fh->f_comm); fh->f_comm,
fh->f_comm->c_coll.coll_alltoall_module);
#if 0 #if 0
for( i = 0; i< fh->f_size; i++){ for( i = 0; i< fh->f_size; i++){
printf("my: %d, others: %d\n",count_my_req_per_proc[i], printf("my: %d, others: %d\n",count_my_req_per_proc[i],
count_others_req_per_proc[i]); count_others_req_per_proc[i]);
} }
#endif #endif
*others_req_ptr = (mca_io_ompio_access_array_t *) malloc *others_req_ptr = (mca_io_ompio_access_array_t *) malloc
(fh->f_size*sizeof(mca_io_ompio_access_array_t)); (fh->f_size*sizeof(mca_io_ompio_access_array_t));
@ -1172,28 +1176,37 @@ int ompi_io_ompio_calc_others_requests(mca_io_ompio_file_t *fh,
requests = (MPI_Request *) requests = (MPI_Request *)
malloc(1+2*(count_my_req_procs+count_others_req_procs)* malloc(1+2*(count_my_req_procs+count_others_req_procs)*
sizeof(MPI_Request)); sizeof(MPI_Request));
if ( NULL == requests ) {
ret = OMPI_ERR_OUT_OF_RESOURCE;
goto exit;
}
j = 0; j = 0;
for (i=0; i<fh->f_size; i++){ for (i=0; i<fh->f_size; i++){
if (others_req[i].count){ if (others_req[i].count){
ret = MCA_PML_CALL(irecv(others_req[i].offsets,
MPI_Irecv(others_req[i].offsets,
others_req[i].count, others_req[i].count,
MPI_OFFSET, MPI_OFFSET,
i, i,
i+fh->f_rank, i+fh->f_rank,
fh->f_comm, fh->f_comm,
&requests[j]); &requests[j]));
if ( OMPI_SUCCESS != ret ) {
goto exit;
}
j++; j++;
MPI_Irecv(others_req[i].lens, ret = MCA_PML_CALL(irecv(others_req[i].lens,
others_req[i].count, others_req[i].count,
MPI_INT, MPI_INT,
i, i,
i+fh->f_rank+1, i+fh->f_rank+1,
fh->f_comm, fh->f_comm,
&requests[j]); &requests[j]));
if ( OMPI_SUCCESS != ret ) {
goto exit;
}
j++; j++;
} }
@ -1202,31 +1215,54 @@ int ompi_io_ompio_calc_others_requests(mca_io_ompio_file_t *fh,
for (i=0; i < fh->f_size; i++) { for (i=0; i < fh->f_size; i++) {
if (my_req[i].count) { if (my_req[i].count) {
MPI_Isend(my_req[i].offsets, my_req[i].count, ret = MCA_PML_CALL(isend(my_req[i].offsets,
MPI_OFFSET, i, i+fh->f_rank, fh->f_comm, &requests[j]); my_req[i].count,
MPI_OFFSET,
i,
i+fh->f_rank,
MCA_PML_BASE_SEND_STANDARD,
fh->f_comm,
&requests[j]));
if ( OMPI_SUCCESS != ret ) {
goto exit;
}
j++; j++;
MPI_Isend(my_req[i].lens, my_req[i].count, ret = MCA_PML_CALL(isend(my_req[i].lens,
MPI_INT, i, i+fh->f_rank+1, fh->f_comm, &requests[j]); my_req[i].count,
MPI_INT,
i,
i+fh->f_rank+1,
MCA_PML_BASE_SEND_STANDARD,
fh->f_comm,
&requests[j]));
if ( OMPI_SUCCESS != ret ) {
goto exit;
}
j++; j++;
} }
} }
if (j) { if (j) {
ret = ompi_request_wait_all ( j, requests, MPI_STATUS_IGNORE );
statuses = (MPI_Status *) malloc(j * sizeof(MPI_Status)); if ( OMPI_SUCCESS != ret ) {
MPI_Waitall(j, requests, statuses); return ret;
free(statuses); }
} }
free(requests);
free(count_others_req_per_proc);
*count_others_req_procs_ptr = count_others_req_procs; *count_others_req_procs_ptr = count_others_req_procs;
exit:
return OMPI_SUCCESS; if ( NULL != requests ) {
free(requests);
}
if ( NULL != count_others_req_per_proc ) {
free(count_others_req_per_proc);
}
return ret;
} }