Merge pull request #4527 from clementFoyer/osc-no-includes
Remove inter-dependencies between OSC modules.
Этот коммит содержится в:
Коммит
e7f91f8068
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -34,7 +34,7 @@ int mca_coll_monitoring_allgather(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
@ -63,7 +63,7 @@ int mca_coll_monitoring_iallgather(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -34,7 +34,7 @@ int mca_coll_monitoring_allgatherv(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
@ -63,7 +63,7 @@ int mca_coll_monitoring_iallgatherv(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -34,7 +34,7 @@ int mca_coll_monitoring_allreduce(const void *sbuf, void *rbuf, int count,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
@ -62,7 +62,7 @@ int mca_coll_monitoring_iallreduce(const void *sbuf, void *rbuf, int count,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -32,7 +32,7 @@ int mca_coll_monitoring_alltoall(const void *sbuf, int scount, struct ompi_datat
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
@ -61,7 +61,7 @@ int mca_coll_monitoring_ialltoall(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -33,7 +33,7 @@ int mca_coll_monitoring_alltoallv(const void *sbuf, const int *scounts, const in
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
@ -65,7 +65,7 @@ int mca_coll_monitoring_ialltoallv(const void *sbuf, const int *scounts,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -35,7 +35,7 @@ int mca_coll_monitoring_alltoallw(const void *sbuf, const int *scounts,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
@ -67,7 +67,7 @@ int mca_coll_monitoring_ialltoallw(const void *sbuf, const int *scounts,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -25,7 +25,7 @@ int mca_coll_monitoring_barrier(struct ompi_communicator_t *comm,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, 0);
|
||||
}
|
||||
}
|
||||
@ -47,7 +47,7 @@ int mca_coll_monitoring_ibarrier(struct ompi_communicator_t *comm,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, 0);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* Copyright (c) 2017 Research Organization for Information Science
|
||||
* and Technology (RIST). All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
@ -35,7 +35,7 @@ int mca_coll_monitoring_bcast(void *buff, int count,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
@ -64,7 +64,7 @@ int mca_coll_monitoring_ibcast(void *buff, int count,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -33,7 +33,7 @@ int mca_coll_monitoring_exscan(const void *sbuf, void *rbuf, int count,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
@ -60,7 +60,7 @@ int mca_coll_monitoring_iexscan(const void *sbuf, void *rbuf, int count,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -32,7 +32,7 @@ int mca_coll_monitoring_gather(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
@ -61,7 +61,7 @@ int mca_coll_monitoring_igather(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -34,7 +34,7 @@ int mca_coll_monitoring_gatherv(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
@ -66,7 +66,7 @@ int mca_coll_monitoring_igatherv(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -43,7 +43,7 @@ int mca_coll_monitoring_neighbor_allgather(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(srank, comm, &world_rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(srank, comm->c_remote_group, &world_rank) ) {
|
||||
mca_common_monitoring_record_coll(world_rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
@ -54,7 +54,7 @@ int mca_coll_monitoring_neighbor_allgather(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(drank, comm, &world_rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(drank, comm->c_remote_group, &world_rank) ) {
|
||||
mca_common_monitoring_record_coll(world_rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
@ -96,7 +96,7 @@ int mca_coll_monitoring_ineighbor_allgather(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(srank, comm, &world_rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(srank, comm->c_remote_group, &world_rank) ) {
|
||||
mca_common_monitoring_record_coll(world_rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
@ -107,7 +107,7 @@ int mca_coll_monitoring_ineighbor_allgather(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(drank, comm, &world_rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(drank, comm->c_remote_group, &world_rank) ) {
|
||||
mca_common_monitoring_record_coll(world_rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* Copyright (c) 2017 Research Organization for Information Science
|
||||
* and Technology (RIST). All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
@ -46,7 +46,7 @@ int mca_coll_monitoring_neighbor_allgatherv(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(srank, comm, &world_rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(srank, comm->c_remote_group, &world_rank) ) {
|
||||
mca_common_monitoring_record_coll(world_rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
@ -57,7 +57,7 @@ int mca_coll_monitoring_neighbor_allgatherv(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(drank, comm, &world_rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(drank, comm->c_remote_group, &world_rank) ) {
|
||||
mca_common_monitoring_record_coll(world_rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
@ -100,7 +100,7 @@ int mca_coll_monitoring_ineighbor_allgatherv(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(srank, comm, &world_rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(srank, comm->c_remote_group, &world_rank) ) {
|
||||
mca_common_monitoring_record_coll(world_rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
@ -111,7 +111,7 @@ int mca_coll_monitoring_ineighbor_allgatherv(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(drank, comm, &world_rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(drank, comm->c_remote_group, &world_rank) ) {
|
||||
mca_common_monitoring_record_coll(world_rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -44,7 +44,7 @@ int mca_coll_monitoring_neighbor_alltoall(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(srank, comm, &world_rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(srank, comm->c_remote_group, &world_rank) ) {
|
||||
mca_common_monitoring_record_coll(world_rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
@ -55,7 +55,7 @@ int mca_coll_monitoring_neighbor_alltoall(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(drank, comm, &world_rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(drank, comm->c_remote_group, &world_rank) ) {
|
||||
mca_common_monitoring_record_coll(world_rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
@ -98,7 +98,7 @@ int mca_coll_monitoring_ineighbor_alltoall(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(srank, comm, &world_rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(srank, comm->c_remote_group, &world_rank) ) {
|
||||
mca_common_monitoring_record_coll(world_rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
@ -109,7 +109,7 @@ int mca_coll_monitoring_ineighbor_alltoall(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(drank, comm, &world_rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(drank, comm->c_remote_group, &world_rank) ) {
|
||||
mca_common_monitoring_record_coll(world_rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -44,7 +44,7 @@ int mca_coll_monitoring_neighbor_alltoallv(const void *sbuf, const int *scounts,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(srank, comm, &world_rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(srank, comm->c_remote_group, &world_rank) ) {
|
||||
mca_common_monitoring_record_coll(world_rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
@ -57,7 +57,7 @@ int mca_coll_monitoring_neighbor_alltoallv(const void *sbuf, const int *scounts,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(drank, comm, &world_rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(drank, comm->c_remote_group, &world_rank) ) {
|
||||
mca_common_monitoring_record_coll(world_rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
@ -103,7 +103,7 @@ int mca_coll_monitoring_ineighbor_alltoallv(const void *sbuf, const int *scounts
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(srank, comm, &world_rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(srank, comm->c_remote_group, &world_rank) ) {
|
||||
mca_common_monitoring_record_coll(world_rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
@ -116,7 +116,7 @@ int mca_coll_monitoring_ineighbor_alltoallv(const void *sbuf, const int *scounts
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(drank, comm, &world_rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(drank, comm->c_remote_group, &world_rank) ) {
|
||||
mca_common_monitoring_record_coll(world_rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -45,7 +45,7 @@ int mca_coll_monitoring_neighbor_alltoallw(const void *sbuf, const int *scounts,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(srank, comm, &world_rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(srank, comm->c_remote_group, &world_rank) ) {
|
||||
mca_common_monitoring_record_coll(world_rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
@ -59,7 +59,7 @@ int mca_coll_monitoring_neighbor_alltoallw(const void *sbuf, const int *scounts,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(drank, comm, &world_rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(drank, comm->c_remote_group, &world_rank) ) {
|
||||
mca_common_monitoring_record_coll(world_rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
@ -104,7 +104,7 @@ int mca_coll_monitoring_ineighbor_alltoallw(const void *sbuf, const int *scounts
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(srank, comm, &world_rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(srank, comm->c_remote_group, &world_rank) ) {
|
||||
mca_common_monitoring_record_coll(world_rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
@ -118,7 +118,7 @@ int mca_coll_monitoring_ineighbor_alltoallw(const void *sbuf, const int *scounts
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(drank, comm, &world_rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(drank, comm->c_remote_group, &world_rank) ) {
|
||||
mca_common_monitoring_record_coll(world_rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -34,7 +34,7 @@ int mca_coll_monitoring_reduce(const void *sbuf, void *rbuf, int count,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
@ -64,7 +64,7 @@ int mca_coll_monitoring_ireduce(const void *sbuf, void *rbuf, int count,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -34,7 +34,7 @@ int mca_coll_monitoring_reduce_scatter(const void *sbuf, void *rbuf,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
data_size_aggreg += data_size;
|
||||
@ -64,7 +64,7 @@ int mca_coll_monitoring_ireduce_scatter(const void *sbuf, void *rbuf,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
data_size_aggreg += data_size;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -34,7 +34,7 @@ int mca_coll_monitoring_reduce_scatter_block(const void *sbuf, void *rbuf,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
@ -63,7 +63,7 @@ int mca_coll_monitoring_ireduce_scatter_block(const void *sbuf, void *rbuf,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -33,7 +33,7 @@ int mca_coll_monitoring_scan(const void *sbuf, void *rbuf, int count,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
@ -60,7 +60,7 @@ int mca_coll_monitoring_iscan(const void *sbuf, void *rbuf, int count,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -35,7 +35,7 @@ int mca_coll_monitoring_scatter(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
@ -68,7 +68,7 @@ int mca_coll_monitoring_iscatter(const void *sbuf, int scount,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -32,7 +32,7 @@ int mca_coll_monitoring_scatterv(const void *sbuf, const int *scounts, const int
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
@ -62,7 +62,7 @@ int mca_coll_monitoring_iscatterv(const void *sbuf, const int *scounts, const in
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm, &rank) ) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, comm->c_remote_group, &rank) ) {
|
||||
mca_common_monitoring_record_coll(rank, data_size);
|
||||
data_size_aggreg += data_size;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -66,14 +66,14 @@ OMPI_DECLSPEC void mca_common_monitoring_record_osc(int world_rank, size_t data_
|
||||
/* Records COLL communications. */
|
||||
OMPI_DECLSPEC void mca_common_monitoring_record_coll(int world_rank, size_t data_size);
|
||||
|
||||
/* Translate the rank from the given communicator of a process to its rank in MPI_COMM_RANK. */
|
||||
static inline int mca_common_monitoring_get_world_rank(int dst, struct ompi_communicator_t*comm,
|
||||
int*world_rank)
|
||||
/* Translate the rank from the given rank of a process to its rank in MPI_COMM_RANK. */
|
||||
static inline int mca_common_monitoring_get_world_rank(int dest, ompi_group_t *group,
|
||||
int *world_rank)
|
||||
{
|
||||
opal_process_name_t tmp;
|
||||
|
||||
/* find the processor of the destination */
|
||||
ompi_proc_t *proc = ompi_group_get_proc_ptr(comm->c_remote_group, dst, true);
|
||||
ompi_proc_t *proc = ompi_group_get_proc_ptr(group, dest, true);
|
||||
if( ompi_proc_is_sentinel(proc) ) {
|
||||
tmp = ompi_proc_sentinel_to_name((uintptr_t)proc);
|
||||
} else {
|
||||
|
@ -2,7 +2,7 @@
|
||||
* Copyright (c) 2013-2016 The University of Tennessee and The University
|
||||
* of Tennessee Research Foundation. All rights
|
||||
* reserved.
|
||||
* Copyright (c) 2013-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2013-2018 Inria. All rights reserved.
|
||||
* Copyright (c) 2015 Bull SAS. All rights reserved.
|
||||
* Copyright (c) 2016-2017 Research Organization for Information Science
|
||||
* and Technology (RIST). All rights reserved.
|
||||
@ -56,7 +56,8 @@ static inline void mca_common_monitoring_coll_cache(mca_monitoring_coll_data_t*d
|
||||
{
|
||||
if( -1 == data->world_rank ) {
|
||||
/* Get current process world_rank */
|
||||
mca_common_monitoring_get_world_rank(ompi_comm_rank(data->p_comm), data->p_comm,
|
||||
mca_common_monitoring_get_world_rank(ompi_comm_rank(data->p_comm),
|
||||
data->p_comm->c_remote_group,
|
||||
&data->world_rank);
|
||||
}
|
||||
/* Only list procs if the hashtable is already initialized,
|
||||
@ -76,7 +77,7 @@ static inline void mca_common_monitoring_coll_cache(mca_monitoring_coll_data_t*d
|
||||
tmp_procs[0] = '\0';
|
||||
/* Build procs list */
|
||||
for(i = 0; i < size; ++i) {
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, data->p_comm, &world_rank) )
|
||||
if( OPAL_SUCCESS == mca_common_monitoring_get_world_rank(i, data->p_comm->c_remote_group, &world_rank) )
|
||||
pos += sprintf(&tmp_procs[pos], "%d,", world_rank);
|
||||
}
|
||||
tmp_procs[pos - 1] = '\0'; /* Remove final coma */
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2016 Inria. All rights reserved.
|
||||
# Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
# Copyright (c) 2017 IBM Corporation. All rights reserved.
|
||||
# $COPYRIGHT$
|
||||
#
|
||||
@ -32,8 +32,10 @@ mcacomponent_LTLIBRARIES = $(component_install)
|
||||
mca_osc_monitoring_la_SOURCES = $(monitoring_sources)
|
||||
mca_osc_monitoring_la_LDFLAGS = -module -avoid-version
|
||||
mca_osc_monitoring_la_LIBADD = $(top_builddir)/ompi/lib@OMPI_LIBMPI_NAME@.la \
|
||||
$(OMPI_TOP_BUILDDIR)/ompi/mca/common/monitoring/libmca_common_monitoring.la
|
||||
$(OMPI_TOP_BUILDDIR)/ompi/mca/common/monitoring/libmca_common_monitoring.la
|
||||
|
||||
noinst_LTLIBRARIES = $(component_noinst)
|
||||
libmca_osc_monitoring_la_SOURCES = $(monitoring_sources)
|
||||
libmca_osc_monitoring_la_LDFLAGS = -module -avoid-version
|
||||
|
||||
DISTCLEANFILES = osc_monitoring_template_gen.h
|
||||
|
@ -1,22 +1,100 @@
|
||||
# -*- shell-script -*-
|
||||
dnl -*- shell-script -*-
|
||||
dnl
|
||||
dnl Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
dnl $COPYRIGHT$
|
||||
dnl
|
||||
dnl Additional copyrights may follow
|
||||
dnl
|
||||
dnl $HEADER$
|
||||
dnl
|
||||
|
||||
# mca_ompi_osc_monitoring_generate_templates
|
||||
#
|
||||
# Copyright (c) 2016 Inria. All rights reserved.
|
||||
# $COPYRIGHT$
|
||||
# Overwrite $1. $1 is where the different templates are brought
|
||||
# together and compose an array of components by listing component
|
||||
# names in $2.
|
||||
#
|
||||
# Additional copyrights may follow
|
||||
#
|
||||
# $HEADER$
|
||||
# $1 = filename
|
||||
# $2 = osc component names
|
||||
#
|
||||
AC_DEFUN(
|
||||
[MCA_OMPI_OSC_MONITORING_GENERATE_TEMPLATES],
|
||||
[m4_ifval(
|
||||
[$1],
|
||||
[AC_CONFIG_COMMANDS(
|
||||
[$1],
|
||||
[filename="$1"
|
||||
components=`echo "$2" | sed -e 's/,/ /g' -e 's/monitoring//'`
|
||||
cat <<EOF >$filename
|
||||
/* $filename
|
||||
*
|
||||
* This file was generated from ompi/mca/osc/monitoring/configure.m4
|
||||
*
|
||||
* DO NOT EDIT THIS FILE.
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* Copyright (c) 2017-2018 Inria. All rights reserved.
|
||||
* \$COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
*
|
||||
* \$HEADER$
|
||||
*/
|
||||
|
||||
#ifndef MCA_OSC_MONITORING_GEN_TEMPLATE_H
|
||||
#define MCA_OSC_MONITORING_GEN_TEMPLATE_H
|
||||
|
||||
#include <ompi_config.h>
|
||||
#include <ompi/mca/osc/osc.h>
|
||||
#include <ompi/mca/osc/monitoring/osc_monitoring_template.h>
|
||||
|
||||
/************************************************************/
|
||||
/* Include template generating macros and inlined functions */
|
||||
|
||||
EOF
|
||||
# Generate each case in order to register the proper template functions
|
||||
for comp in $components
|
||||
do
|
||||
echo "OSC_MONITORING_MODULE_TEMPLATE_GENERATE(${comp})" >>$filename
|
||||
done
|
||||
cat <<EOF >>$filename
|
||||
|
||||
/************************************************************/
|
||||
|
||||
typedef struct {
|
||||
const char * name;
|
||||
ompi_osc_base_module_t * (*fct) (ompi_osc_base_module_t *);
|
||||
} osc_monitoring_components_list_t;
|
||||
|
||||
static const osc_monitoring_components_list_t osc_monitoring_components_list[[]] = {
|
||||
EOF
|
||||
for comp in $components
|
||||
do
|
||||
echo " { .name = \"${comp}\", .fct = OSC_MONITORING_SET_TEMPLATE_FCT_NAME(${comp}) }," >>$filename
|
||||
done
|
||||
cat <<EOF >>$filename
|
||||
{ .name = NULL, .fct = NULL }
|
||||
};
|
||||
|
||||
#endif /* MCA_OSC_MONITORING_GEN_TEMPLATE_H */
|
||||
EOF
|
||||
unset filename components
|
||||
])
|
||||
])dnl
|
||||
])dnl
|
||||
|
||||
# MCA_ompi_osc_monitoring_CONFIG()
|
||||
# ------------------------------------------------
|
||||
AC_DEFUN([MCA_ompi_osc_monitoring_CONFIG],[
|
||||
AC_CONFIG_FILES([ompi/mca/osc/monitoring/Makefile])
|
||||
AC_DEFUN(
|
||||
[MCA_ompi_osc_monitoring_CONFIG],
|
||||
[AC_CONFIG_FILES([ompi/mca/osc/monitoring/Makefile])
|
||||
|
||||
AS_IF([test "$MCA_BUILD_ompi_common_monitoring_DSO_TRUE" = ''],
|
||||
[$1],
|
||||
[$2])
|
||||
OPAL_CHECK_PORTALS4([osc_monitoring],
|
||||
[AC_DEFINE([OMPI_WITH_OSC_PORTALS4], [1], [Whether or not to generate template for osc_portals4])],
|
||||
[])
|
||||
])dnl
|
||||
AS_IF([test "$MCA_BUILD_ompi_common_monitoring_DSO_TRUE" = ''],
|
||||
[$1],
|
||||
[$2])
|
||||
|
||||
MCA_OMPI_OSC_MONITORING_GENERATE_TEMPLATES(
|
||||
[ompi/mca/osc/monitoring/osc_monitoring_template_gen.h],
|
||||
[mca_ompi_osc_m4_config_component_list, mca_ompi_osc_no_config_component_list])dnl
|
||||
])dnl
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -29,7 +29,7 @@
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD \
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank \
|
||||
*/ \
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(target_rank, ompi_osc_monitoring_## template ##_get_comm(win), &world_rank)) { \
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(target_rank, win->w_group, &world_rank)) { \
|
||||
size_t type_size; \
|
||||
ompi_datatype_type_size(dt, &type_size); \
|
||||
mca_common_monitoring_record_osc(world_rank, type_size, SEND); \
|
||||
@ -56,7 +56,7 @@
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD \
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank \
|
||||
*/ \
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(target_rank, ompi_osc_monitoring_## template ##_get_comm(win), &world_rank)) { \
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(target_rank, win->w_group, &world_rank)) { \
|
||||
size_t type_size, data_size; \
|
||||
ompi_datatype_type_size(origin_datatype, &type_size); \
|
||||
data_size = origin_count*type_size; \
|
||||
@ -88,7 +88,7 @@
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD \
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank \
|
||||
*/ \
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(target_rank, ompi_osc_monitoring_## template ##_get_comm(win), &world_rank)) { \
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(target_rank, win->w_group, &world_rank)) { \
|
||||
size_t type_size, data_size; \
|
||||
ompi_datatype_type_size(origin_datatype, &type_size); \
|
||||
data_size = origin_count*type_size; \
|
||||
@ -116,7 +116,7 @@
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD \
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank \
|
||||
*/ \
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(target_rank, ompi_osc_monitoring_## template ##_get_comm(win), &world_rank)) { \
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(target_rank, win->w_group, &world_rank)) { \
|
||||
size_t type_size, data_size; \
|
||||
ompi_datatype_type_size(origin_datatype, &type_size); \
|
||||
data_size = origin_count*type_size; \
|
||||
@ -140,7 +140,7 @@
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD \
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank \
|
||||
*/ \
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(target_rank, ompi_osc_monitoring_## template ##_get_comm(win), &world_rank)) { \
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(target_rank, win->w_group, &world_rank)) { \
|
||||
size_t type_size, data_size; \
|
||||
ompi_datatype_type_size(origin_datatype, &type_size); \
|
||||
data_size = origin_count*type_size; \
|
||||
@ -162,7 +162,7 @@
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD \
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank \
|
||||
*/ \
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(target_rank, ompi_osc_monitoring_## template ##_get_comm(win), &world_rank)) { \
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(target_rank, win->w_group, &world_rank)) { \
|
||||
size_t type_size; \
|
||||
ompi_datatype_type_size(dt, &type_size); \
|
||||
mca_common_monitoring_record_osc(world_rank, type_size, SEND); \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2016-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -30,7 +30,7 @@
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD \
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank \
|
||||
*/ \
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(target_rank, ompi_osc_monitoring_## template ##_get_comm(win), &world_rank)) { \
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(target_rank, win->w_group, &world_rank)) { \
|
||||
size_t type_size, data_size; \
|
||||
ompi_datatype_type_size(origin_datatype, &type_size); \
|
||||
data_size = origin_count*type_size; \
|
||||
@ -55,7 +55,7 @@
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD \
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank \
|
||||
*/ \
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(target_rank, ompi_osc_monitoring_## template ##_get_comm(win), &world_rank)) { \
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(target_rank, win->w_group, &world_rank)) { \
|
||||
size_t type_size, data_size; \
|
||||
ompi_datatype_type_size(origin_datatype, &type_size); \
|
||||
data_size = origin_count*type_size; \
|
||||
@ -78,7 +78,7 @@
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD \
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank \
|
||||
*/ \
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(source_rank, ompi_osc_monitoring_## template ##_get_comm(win), &world_rank)) { \
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(source_rank, win->w_group, &world_rank)) { \
|
||||
size_t type_size, data_size; \
|
||||
ompi_datatype_type_size(origin_datatype, &type_size); \
|
||||
data_size = origin_count*type_size; \
|
||||
@ -103,7 +103,7 @@
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD \
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank \
|
||||
*/ \
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(source_rank, ompi_osc_monitoring_## template ##_get_comm(win), &world_rank)) { \
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(source_rank, win->w_group, &world_rank)) { \
|
||||
size_t type_size, data_size; \
|
||||
ompi_datatype_type_size(origin_datatype, &type_size); \
|
||||
data_size = origin_count*type_size; \
|
||||
|
@ -8,6 +8,7 @@
|
||||
*/
|
||||
|
||||
#include <ompi_config.h>
|
||||
#include <string.h>
|
||||
#include "osc_monitoring.h"
|
||||
#include <ompi/constants.h>
|
||||
#include <ompi/communicator/communicator.h>
|
||||
@ -18,29 +19,12 @@
|
||||
#include <ompi/mca/osc/base/base.h>
|
||||
#include <opal/mca/base/mca_base_component_repository.h>
|
||||
|
||||
/***************************************/
|
||||
/* Include template generating macros */
|
||||
#include "osc_monitoring_template.h"
|
||||
/**************************************************/
|
||||
/* Include templated macros and inlined functions */
|
||||
|
||||
#include <ompi/mca/osc/rdma/osc_rdma.h>
|
||||
OSC_MONITORING_MODULE_TEMPLATE_GENERATE(rdma, ompi_osc_rdma_module_t, comm)
|
||||
#undef GET_MODULE
|
||||
#include "osc_monitoring_template_gen.h"
|
||||
|
||||
#include <ompi/mca/osc/sm/osc_sm.h>
|
||||
OSC_MONITORING_MODULE_TEMPLATE_GENERATE(sm, ompi_osc_sm_module_t, comm)
|
||||
#undef GET_MODULE
|
||||
|
||||
#include <ompi/mca/osc/pt2pt/osc_pt2pt.h>
|
||||
OSC_MONITORING_MODULE_TEMPLATE_GENERATE(pt2pt, ompi_osc_pt2pt_module_t, comm)
|
||||
#undef GET_MODULE
|
||||
|
||||
#ifdef OMPI_WITH_OSC_PORTALS4
|
||||
#include <ompi/mca/osc/portals4/osc_portals4.h>
|
||||
OSC_MONITORING_MODULE_TEMPLATE_GENERATE(portals4, ompi_osc_portals4_module_t, comm)
|
||||
#undef GET_MODULE
|
||||
#endif /* OMPI_WITH_OSC_PORTALS4 */
|
||||
|
||||
/***************************************/
|
||||
/**************************************************/
|
||||
|
||||
static int mca_osc_monitoring_component_init(bool enable_progress_threads,
|
||||
bool enable_mpi_threads)
|
||||
@ -69,6 +53,20 @@ static int mca_osc_monitoring_component_query(struct ompi_win_t *win, void **bas
|
||||
return mca_osc_monitoring_component.priority;
|
||||
}
|
||||
|
||||
static inline int
|
||||
ompi_mca_osc_monitoring_set_template(ompi_osc_base_component_t *best_component,
|
||||
ompi_osc_base_module_t *module)
|
||||
{
|
||||
osc_monitoring_components_list_t comp = osc_monitoring_components_list[0];
|
||||
for (unsigned i = 0; NULL != comp.name; comp = osc_monitoring_components_list[++i]) {
|
||||
if ( 0 == strcmp(comp.name, best_component->osc_version.mca_component_name) ) {
|
||||
comp.fct(module);
|
||||
return OMPI_SUCCESS;
|
||||
}
|
||||
}
|
||||
return OMPI_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
static int mca_osc_monitoring_component_select(struct ompi_win_t *win, void **base, size_t size, int disp_unit,
|
||||
struct ompi_communicator_t *comm, struct opal_info_t *info,
|
||||
int flavor, int *model)
|
||||
@ -108,20 +106,11 @@ static int mca_osc_monitoring_component_select(struct ompi_win_t *win, void **ba
|
||||
ret = best_component->osc_select(win, base, size, disp_unit, comm, info, flavor, model);
|
||||
if( OMPI_SUCCESS == ret ) {
|
||||
/* Intercept module functions with ours, based on selected component */
|
||||
if( 0 == strcmp("rdma", best_component->osc_version.mca_component_name) ) {
|
||||
OSC_MONITORING_SET_TEMPLATE(rdma, win->w_osc_module);
|
||||
} else if( 0 == strcmp("sm", best_component->osc_version.mca_component_name) ) {
|
||||
OSC_MONITORING_SET_TEMPLATE(sm, win->w_osc_module);
|
||||
} else if( 0 == strcmp("pt2pt", best_component->osc_version.mca_component_name) ) {
|
||||
OSC_MONITORING_SET_TEMPLATE(pt2pt, win->w_osc_module);
|
||||
#ifdef OMPI_WITH_OSC_PORTALS4
|
||||
} else if( 0 == strcmp("portals4", best_component->osc_version.mca_component_name) ) {
|
||||
OSC_MONITORING_SET_TEMPLATE(portals4, win->w_osc_module);
|
||||
#endif /* OMPI_WITH_OSC_PORTALS4 */
|
||||
} else {
|
||||
ret = ompi_mca_osc_monitoring_set_template(best_component, win->w_osc_module);
|
||||
if (OMPI_ERR_NOT_SUPPORTED == ret) {
|
||||
OPAL_MONITORING_PRINT_WARN("osc: monitoring disabled: no module for this component "
|
||||
"(%s)", best_component->osc_version.mca_component_name);
|
||||
return ret;
|
||||
return OMPI_SUCCESS;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
@ -150,4 +139,3 @@ ompi_osc_monitoring_component_t mca_osc_monitoring_component = {
|
||||
},
|
||||
.priority = INT_MAX
|
||||
};
|
||||
|
||||
|
@ -22,63 +22,18 @@
|
||||
#define OMPI_OSC_MONITORING_MODULE_VARIABLE(template) \
|
||||
ompi_osc_monitoring_module_## template ##_template
|
||||
|
||||
/* Define once and for all the module_template variable name */
|
||||
#define OMPI_OSC_MONITORING_MODULE_INIT(template) \
|
||||
ompi_osc_monitoring_module_## template ##_init_done
|
||||
|
||||
/* Define once and for all the template variable name */
|
||||
#define OMPI_OSC_MONITORING_TEMPLATE_VARIABLE(template) \
|
||||
ompi_osc_monitoring_## template ##_template
|
||||
|
||||
/* Define the ompi_osc_monitoring_module_## template ##_template variable */
|
||||
#define OMPI_OSC_MONITORING_MODULE_GENERATE(template) \
|
||||
static ompi_osc_base_module_t OMPI_OSC_MONITORING_MODULE_VARIABLE(template)
|
||||
|
||||
/* Define the ompi_osc_monitoring_module_## template ##_init_done variable */
|
||||
#define OMPI_OSC_MONITORING_MODULE_INIT_GENERATE(template) \
|
||||
static int32_t OMPI_OSC_MONITORING_MODULE_INIT(template)
|
||||
|
||||
/* Define and set the ompi_osc_monitoring_## template ##_template
|
||||
* variable. The functions recorded here are linked to the original
|
||||
* functions of the original {template} module that were replaced.
|
||||
/* Define once and for all the
|
||||
* ompi_osc_monitoring_## template ##_set_template function name
|
||||
*/
|
||||
#define MCA_OSC_MONITORING_MODULE_TEMPLATE_GENERATE(template) \
|
||||
static ompi_osc_base_module_t OMPI_OSC_MONITORING_TEMPLATE_VARIABLE(template) = { \
|
||||
.osc_win_attach = ompi_osc_monitoring_## template ##_attach, \
|
||||
.osc_win_detach = ompi_osc_monitoring_## template ##_detach, \
|
||||
.osc_free = ompi_osc_monitoring_## template ##_free, \
|
||||
\
|
||||
.osc_put = ompi_osc_monitoring_## template ##_put, \
|
||||
.osc_get = ompi_osc_monitoring_## template ##_get, \
|
||||
.osc_accumulate = ompi_osc_monitoring_## template ##_accumulate, \
|
||||
.osc_compare_and_swap = ompi_osc_monitoring_## template ##_compare_and_swap, \
|
||||
.osc_fetch_and_op = ompi_osc_monitoring_## template ##_fetch_and_op, \
|
||||
.osc_get_accumulate = ompi_osc_monitoring_## template ##_get_accumulate, \
|
||||
\
|
||||
.osc_rput = ompi_osc_monitoring_## template ##_rput, \
|
||||
.osc_rget = ompi_osc_monitoring_## template ##_rget, \
|
||||
.osc_raccumulate = ompi_osc_monitoring_## template ##_raccumulate, \
|
||||
.osc_rget_accumulate = ompi_osc_monitoring_## template ##_rget_accumulate, \
|
||||
\
|
||||
.osc_fence = ompi_osc_monitoring_## template ##_fence, \
|
||||
\
|
||||
.osc_start = ompi_osc_monitoring_## template ##_start, \
|
||||
.osc_complete = ompi_osc_monitoring_## template ##_complete, \
|
||||
.osc_post = ompi_osc_monitoring_## template ##_post, \
|
||||
.osc_wait = ompi_osc_monitoring_## template ##_wait, \
|
||||
.osc_test = ompi_osc_monitoring_## template ##_test, \
|
||||
\
|
||||
.osc_lock = ompi_osc_monitoring_## template ##_lock, \
|
||||
.osc_unlock = ompi_osc_monitoring_## template ##_unlock, \
|
||||
.osc_lock_all = ompi_osc_monitoring_## template ##_lock_all, \
|
||||
.osc_unlock_all = ompi_osc_monitoring_## template ##_unlock_all, \
|
||||
\
|
||||
.osc_sync = ompi_osc_monitoring_## template ##_sync, \
|
||||
.osc_flush = ompi_osc_monitoring_## template ##_flush, \
|
||||
.osc_flush_all = ompi_osc_monitoring_## template ##_flush_all, \
|
||||
.osc_flush_local = ompi_osc_monitoring_## template ##_flush_local, \
|
||||
.osc_flush_local_all = ompi_osc_monitoring_## template ##_flush_local_all, \
|
||||
}
|
||||
#define OSC_MONITORING_SET_TEMPLATE_FCT_NAME(template) \
|
||||
ompi_osc_monitoring_## template ##_set_template
|
||||
|
||||
/* Define the ompi_osc_monitoring_module_## template ##_template
|
||||
* variable
|
||||
*/
|
||||
#define OMPI_OSC_MONITORING_MODULE_GENERATE(template) \
|
||||
/* Define the ompi_osc_monitoring_module_## template ##_template */ \
|
||||
static ompi_osc_base_module_t OMPI_OSC_MONITORING_MODULE_VARIABLE(template);
|
||||
|
||||
#define OSC_MONITORING_GENERATE_TEMPLATE_MODULE(template) \
|
||||
\
|
||||
@ -87,5 +42,68 @@
|
||||
return OMPI_OSC_MONITORING_MODULE_VARIABLE(template).osc_free(win); \
|
||||
}
|
||||
|
||||
#define MCA_OSC_MONITORING_MODULE_TEMPLATE_GENERATE(template) \
|
||||
/* Generate template specific module initialization function: \
|
||||
* ompi_osc_monitoring_## template ##_set_template(ompi_osc_base_module_t*module) \
|
||||
*/ \
|
||||
static inline ompi_osc_base_module_t * \
|
||||
OSC_MONITORING_SET_TEMPLATE_FCT_NAME(template) (ompi_osc_base_module_t*module) \
|
||||
{ \
|
||||
/* Define the ompi_osc_monitoring_module_## template ##_init_done variable */ \
|
||||
static int32_t init_done = 0; \
|
||||
/* Define and set the ompi_osc_monitoring_## template \
|
||||
* ##_template variable. The functions recorded here are \
|
||||
* linked to the original functions of the original \
|
||||
* {template} module that was replaced. \
|
||||
*/ \
|
||||
static const ompi_osc_base_module_t module_specific_interception_layer = { \
|
||||
.osc_win_attach = ompi_osc_monitoring_## template ##_attach, \
|
||||
.osc_win_detach = ompi_osc_monitoring_## template ##_detach, \
|
||||
.osc_free = ompi_osc_monitoring_## template ##_free, \
|
||||
\
|
||||
.osc_put = ompi_osc_monitoring_## template ##_put, \
|
||||
.osc_get = ompi_osc_monitoring_## template ##_get, \
|
||||
.osc_accumulate = ompi_osc_monitoring_## template ##_accumulate, \
|
||||
.osc_compare_and_swap = ompi_osc_monitoring_## template ##_compare_and_swap, \
|
||||
.osc_fetch_and_op = ompi_osc_monitoring_## template ##_fetch_and_op, \
|
||||
.osc_get_accumulate = ompi_osc_monitoring_## template ##_get_accumulate, \
|
||||
\
|
||||
.osc_rput = ompi_osc_monitoring_## template ##_rput, \
|
||||
.osc_rget = ompi_osc_monitoring_## template ##_rget, \
|
||||
.osc_raccumulate = ompi_osc_monitoring_## template ##_raccumulate, \
|
||||
.osc_rget_accumulate = ompi_osc_monitoring_## template ##_rget_accumulate, \
|
||||
\
|
||||
.osc_fence = ompi_osc_monitoring_## template ##_fence, \
|
||||
\
|
||||
.osc_start = ompi_osc_monitoring_## template ##_start, \
|
||||
.osc_complete = ompi_osc_monitoring_## template ##_complete, \
|
||||
.osc_post = ompi_osc_monitoring_## template ##_post, \
|
||||
.osc_wait = ompi_osc_monitoring_## template ##_wait, \
|
||||
.osc_test = ompi_osc_monitoring_## template ##_test, \
|
||||
\
|
||||
.osc_lock = ompi_osc_monitoring_## template ##_lock, \
|
||||
.osc_unlock = ompi_osc_monitoring_## template ##_unlock, \
|
||||
.osc_lock_all = ompi_osc_monitoring_## template ##_lock_all, \
|
||||
.osc_unlock_all = ompi_osc_monitoring_## template ##_unlock_all, \
|
||||
\
|
||||
.osc_sync = ompi_osc_monitoring_## template ##_sync, \
|
||||
.osc_flush = ompi_osc_monitoring_## template ##_flush, \
|
||||
.osc_flush_all = ompi_osc_monitoring_## template ##_flush_all, \
|
||||
.osc_flush_local = ompi_osc_monitoring_## template ##_flush_local, \
|
||||
.osc_flush_local_all = ompi_osc_monitoring_## template ##_flush_local_all, \
|
||||
}; \
|
||||
if ( 1 == opal_atomic_add_fetch_32(&init_done, 1) ) { \
|
||||
/* Saves the original module functions in \
|
||||
* ompi_osc_monitoring_module_## template ##_template \
|
||||
*/ \
|
||||
memcpy(&OMPI_OSC_MONITORING_MODULE_VARIABLE(template), \
|
||||
module, sizeof(ompi_osc_base_module_t)); \
|
||||
} \
|
||||
/* Replace the original functions with our generated ones */ \
|
||||
memcpy(module, &module_specific_interception_layer, \
|
||||
sizeof(ompi_osc_base_module_t)); \
|
||||
return module; \
|
||||
}
|
||||
|
||||
#endif /* MCA_OSC_MONITORING_MODULE_H */
|
||||
|
||||
|
@ -23,31 +23,21 @@
|
||||
#include "osc_monitoring_module.h"
|
||||
#include "osc_monitoring_passive_target.h"
|
||||
|
||||
/* module_type correspond to the ompi_osc_## template ##_module_t type
|
||||
* comm correspond to the comm field name in ompi_osc_## template ##_module_t
|
||||
*
|
||||
* The magic used here is that for a given module type (given with the
|
||||
/* The magic used here is that for a given module type (given with the
|
||||
* {template} parameter), we generate a set of every functions defined
|
||||
* in ompi_osc_base_module_t, the ompi_osc_monitoring_module_##
|
||||
* template ##_template variable recording the original set of
|
||||
* functions, and the ompi_osc_monitoring_## template ##_template
|
||||
* variable that record the generated set of functions. When a
|
||||
* function is called from the original module, we route the call to
|
||||
* our generated function that does the monitoring, and then we call
|
||||
* the original function that had been saved in the
|
||||
* functions, and the generated set of functions is recorded as a
|
||||
* static variable inside the initialization function. When a function
|
||||
* is called from the original module, we route the call to our
|
||||
* generated function that does the monitoring, and then we call the
|
||||
* original function that had been saved in the
|
||||
* ompi_osc_monitoring_module_## template ##_template variable.
|
||||
*/
|
||||
#define OSC_MONITORING_MODULE_TEMPLATE_GENERATE(template, module_type, comm) \
|
||||
#define OSC_MONITORING_MODULE_TEMPLATE_GENERATE(template) \
|
||||
/* Generate the proper symbol for the \
|
||||
ompi_osc_monitoring_module_## template ##_template variable */ \
|
||||
OMPI_OSC_MONITORING_MODULE_GENERATE(template); \
|
||||
OMPI_OSC_MONITORING_MODULE_INIT_GENERATE(template); \
|
||||
/* Generate module specific module->comm accessor */ \
|
||||
static inline struct ompi_communicator_t* \
|
||||
ompi_osc_monitoring_## template ##_get_comm(ompi_win_t*win) \
|
||||
{ \
|
||||
return ((module_type*)win->w_osc_module)->comm; \
|
||||
} \
|
||||
OMPI_OSC_MONITORING_MODULE_GENERATE(template) \
|
||||
/* Generate each module specific functions */ \
|
||||
OSC_MONITORING_GENERATE_TEMPLATE_ACCUMULATE(template) \
|
||||
OSC_MONITORING_GENERATE_TEMPLATE_ACTIVE_TARGET(template) \
|
||||
@ -55,27 +45,9 @@
|
||||
OSC_MONITORING_GENERATE_TEMPLATE_DYNAMIC(template) \
|
||||
OSC_MONITORING_GENERATE_TEMPLATE_MODULE(template) \
|
||||
OSC_MONITORING_GENERATE_TEMPLATE_PASSIVE_TARGET(template) \
|
||||
/* Set the mca_osc_monitoring_## template ##_template variable */ \
|
||||
MCA_OSC_MONITORING_MODULE_TEMPLATE_GENERATE(template); \
|
||||
/* Generate template specific module initialization function */ \
|
||||
static inline void* \
|
||||
ompi_osc_monitoring_## template ##_set_template (ompi_osc_base_module_t*module) \
|
||||
{ \
|
||||
if( 1 == opal_atomic_add_fetch_32(&(OMPI_OSC_MONITORING_MODULE_INIT(template)), 1) ) { \
|
||||
/* Saves the original module functions in \
|
||||
* ompi_osc_monitoring_module_## template ##_template \
|
||||
*/ \
|
||||
memcpy(&OMPI_OSC_MONITORING_MODULE_VARIABLE(template), \
|
||||
module, sizeof(ompi_osc_base_module_t)); \
|
||||
} \
|
||||
/* Replace the original functions with our generated ones */ \
|
||||
memcpy(module, &OMPI_OSC_MONITORING_TEMPLATE_VARIABLE(template), \
|
||||
sizeof(ompi_osc_base_module_t)); \
|
||||
return module; \
|
||||
}
|
||||
|
||||
#define OSC_MONITORING_SET_TEMPLATE(template, module) \
|
||||
ompi_osc_monitoring_## template ##_set_template(module)
|
||||
/* Generate template specific module initialization function: \
|
||||
* ompi_osc_monitoring_## template ##_set_template(ompi_osc_base_module_t*module) \
|
||||
*/ \
|
||||
MCA_OSC_MONITORING_MODULE_TEMPLATE_GENERATE(template)
|
||||
|
||||
#endif /* MCA_OSC_MONITORING_TEMPLATE_H */
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
* Copyright (c) 2013-2015 The University of Tennessee and The University
|
||||
* of Tennessee Research Foundation. All rights
|
||||
* reserved.
|
||||
* Copyright (c) 2013-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2013-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -40,7 +40,7 @@ int mca_pml_monitoring_isend(const void *buf,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
* Lookup its name in the rank hastable to get its MPI_COMM_WORLD rank
|
||||
*/
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(dst, comm, &world_rank)) {
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(dst, comm->c_remote_group, &world_rank)) {
|
||||
size_t type_size, data_size;
|
||||
ompi_datatype_type_size(datatype, &type_size);
|
||||
data_size = count*type_size;
|
||||
@ -61,7 +61,7 @@ int mca_pml_monitoring_send(const void *buf,
|
||||
{
|
||||
int world_rank;
|
||||
/* Are we sending to a peer from my own MPI_COMM_WORLD? */
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(dst, comm, &world_rank)) {
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(dst, comm->c_remote_group, &world_rank)) {
|
||||
size_t type_size, data_size;
|
||||
ompi_datatype_type_size(datatype, &type_size);
|
||||
data_size = count*type_size;
|
||||
|
@ -2,7 +2,7 @@
|
||||
* Copyright (c) 2013-2015 The University of Tennessee and The University
|
||||
* of Tennessee Research Foundation. All rights
|
||||
* reserved.
|
||||
* Copyright (c) 2013-2017 Inria. All rights reserved.
|
||||
* Copyright (c) 2013-2018 Inria. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -38,7 +38,8 @@ int mca_pml_monitoring_start(size_t count,
|
||||
* If this fails the destination is not part of my MPI_COM_WORLD
|
||||
*/
|
||||
if(OPAL_SUCCESS == mca_common_monitoring_get_world_rank(pml_request->req_peer,
|
||||
pml_request->req_comm, &world_rank)) {
|
||||
pml_request->req_comm->c_remote_group,
|
||||
&world_rank)) {
|
||||
size_t type_size, data_size;
|
||||
ompi_datatype_type_size(pml_request->req_datatype, &type_size);
|
||||
data_size = pml_request->req_count * type_size;
|
||||
|
Загрузка…
x
Ссылка в новой задаче
Block a user