2005-09-06 09:21:57 +04:00
/*
2005-11-05 22:57:48 +03:00
* Copyright ( c ) 2004 - 2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation . All rights reserved .
2006-04-20 03:42:06 +04:00
* Copyright ( c ) 2004 - 2006 The University of Tennessee and The University
2005-11-05 22:57:48 +03:00
* of Tennessee Research Foundation . All rights
* reserved .
2005-09-06 09:21:57 +04:00
* Copyright ( c ) 2004 - 2005 High Performance Computing Center Stuttgart ,
* University of Stuttgart . All rights reserved .
* Copyright ( c ) 2004 - 2005 The Regents of the University of California .
* All rights reserved .
* $ COPYRIGHT $
*
* Additional copyrights may follow
*
* $ HEADER $
*/
# include "ompi_config.h"
# include "mpi.h"
2006-02-12 04:33:29 +03:00
# include "ompi/constants.h"
# include "ompi/datatype/datatype.h"
# include "ompi/communicator/communicator.h"
# include "ompi/mca/coll/coll.h"
# include "ompi/mca/coll/base/coll_tags.h"
# include "ompi/mca/pml/pml.h"
# include "ompi/op/op.h"
2005-09-06 09:21:57 +04:00
# include "coll_tuned.h"
# include "coll_tuned_topo.h"
2006-10-27 02:53:05 +04:00
/**
* This is a generic implementation of the reduce protocol . It used the tree
* provided as an argument and execute all operations using a segment of
* count times a datatype .
* For the last communication it will update the count in order to limit
* th number of datatype to the original count ( original_count )
*/
int ompi_coll_tuned_reduce_generic ( void * sendbuf , void * recvbuf , int original_count ,
ompi_datatype_t * datatype , ompi_op_t * op ,
int root , ompi_communicator_t * comm ,
ompi_coll_tree_t * tree , int count_by_segment )
2005-09-06 09:21:57 +04:00
{
2006-02-01 02:21:46 +03:00
char * inbuf [ 2 ] = { ( char * ) NULL , ( char * ) NULL } ;
2006-10-27 04:13:33 +04:00
char * local_op_buffer , * accumbuf = NULL , * sendtmpbuf ;
2006-10-27 02:53:05 +04:00
ptrdiff_t extent , lower_bound ;
2006-10-20 07:57:44 +04:00
size_t typelng , realsegsize ;
2006-10-21 00:17:34 +04:00
ompi_request_t * reqs [ 2 ] = { MPI_REQUEST_NULL , MPI_REQUEST_NULL } ;
2006-10-27 02:53:05 +04:00
int num_segments , line , ret , segindex , i , rank ;
int recvcount , prevcount , inbi , previnbi ;
2005-09-06 09:21:57 +04:00
2006-10-20 02:20:33 +04:00
/**
* Determine number of segments and number of elements
* sent per operation
*/
2006-10-27 02:53:05 +04:00
ompi_ddt_get_extent ( datatype , & lower_bound , & extent ) ;
2006-10-18 21:33:01 +04:00
ompi_ddt_type_size ( datatype , & typelng ) ;
2006-10-27 02:53:05 +04:00
num_segments = ( original_count + count_by_segment - 1 ) / count_by_segment ;
realsegsize = count_by_segment * extent ;
2005-09-06 09:21:57 +04:00
2006-10-20 02:20:33 +04:00
sendtmpbuf = ( char * ) sendbuf ;
if ( sendbuf = = MPI_IN_PLACE ) {
sendtmpbuf = ( char * ) recvbuf ;
2005-10-14 04:00:37 +04:00
}
2006-10-27 02:53:05 +04:00
rank = ompi_comm_rank ( comm ) ;
2006-10-20 23:47:52 +04:00
/* non-leaf nodes - wait for children to send me data & forward up (if needed) */
2006-10-27 02:53:05 +04:00
if ( tree - > tree_nextsize > 0 ) {
2006-10-20 02:20:33 +04:00
/* handle non existant recv buffer (i.e. its NULL.. like basic allreduce uses!) */
2006-10-20 23:47:52 +04:00
accumbuf = ( char * ) recvbuf ;
if ( NULL = = accumbuf ) {
2006-10-23 21:51:36 +04:00
accumbuf = ( char * ) malloc ( realsegsize * num_segments ) ; /* TO BE OPTIMIZED */
2006-10-20 02:20:33 +04:00
if ( accumbuf = = NULL ) { line = __LINE__ ; ret = - 1 ; goto error_hndl ; }
}
2005-09-06 09:21:57 +04:00
/* Allocate two buffers for incoming segments */
inbuf [ 0 ] = ( char * ) malloc ( realsegsize ) ;
2006-10-20 23:47:52 +04:00
if ( inbuf [ 0 ] = = NULL ) { line = __LINE__ ; ret = - 1 ; goto error_hndl ; }
2005-09-06 09:21:57 +04:00
/* if there is chance to overlap communication -
allocate second buffer */
2006-10-27 02:53:05 +04:00
if ( ( num_segments > 1 ) | | ( tree - > tree_nextsize > 1 ) ) {
2005-09-06 09:21:57 +04:00
inbuf [ 1 ] = ( char * ) malloc ( realsegsize ) ;
2006-10-20 23:47:52 +04:00
if ( inbuf [ 1 ] = = NULL ) { line = __LINE__ ; ret = - 1 ; goto error_hndl ; }
2005-09-06 09:21:57 +04:00
} else {
inbuf [ 1 ] = NULL ;
}
/* reset input buffer index and receive count */
2006-10-20 23:47:52 +04:00
inbi = 0 ;
recvcount = 0 ;
2005-09-06 09:21:57 +04:00
/* for each segment */
2006-10-20 23:47:52 +04:00
for ( segindex = 0 ; segindex < = num_segments ; segindex + + ) {
2005-09-06 09:21:57 +04:00
prevcount = recvcount ;
/* recvcount - number of elements in current segment */
2006-10-27 02:53:05 +04:00
recvcount = count_by_segment ;
2006-10-20 23:47:52 +04:00
if ( segindex = = ( num_segments - 1 ) )
2006-10-27 02:53:05 +04:00
recvcount = original_count - count_by_segment * segindex ;
2006-10-20 23:47:52 +04:00
/* for each child */
2006-10-27 02:53:05 +04:00
for ( i = 0 ; i < tree - > tree_nextsize ; i + + ) {
2006-10-20 23:47:52 +04:00
/**
* We try to overlap communication :
* either with next segment or with the next child
*/
2005-09-06 09:21:57 +04:00
/* post irecv for current segindex on current child */
2006-10-20 23:47:52 +04:00
if ( segindex < num_segments ) {
void * local_recvbuf = inbuf [ inbi ] ;
if ( 0 = = i ) {
/* for the first step (1st child per segment) we might be able to
* irecv directly into the accumulate buffer so that we can
* reduce ( op ) this with our sendbuf in one step as ompi_op_reduce
* only has two buffer pointers , this avoids an extra memory copy .
*
* BUT if we are root and are USING MPI_IN_PLACE this is wrong ek !
* check for root might not be needed as it should be checked higher up
*/
2006-10-27 02:53:05 +04:00
if ( ! ( ( MPI_IN_PLACE = = sendbuf ) & & ( rank = = tree - > tree_root ) ) ) {
2006-10-20 23:47:52 +04:00
local_recvbuf = accumbuf + segindex * realsegsize ;
2005-10-14 03:38:21 +04:00
}
2005-09-10 03:05:17 +04:00
}
2006-10-27 02:53:05 +04:00
ret = MCA_PML_CALL ( irecv ( local_recvbuf , recvcount , datatype , tree - > tree_next [ i ] ,
2006-10-20 23:47:52 +04:00
MCA_COLL_BASE_TAG_REDUCE , comm , & reqs [ inbi ] ) ) ;
if ( ret ! = MPI_SUCCESS ) { line = __LINE__ ; goto error_hndl ; }
2005-09-06 09:21:57 +04:00
}
/* wait for previous req to complete, if any */
2006-10-20 23:47:52 +04:00
previnbi = ( inbi + 1 ) % 2 ;
/* wait on data from last child for previous segment */
ret = ompi_request_wait_all ( 1 , & reqs [ previnbi ] , MPI_STATUSES_IGNORE ) ;
if ( ret ! = MPI_SUCCESS ) { line = __LINE__ ; goto error_hndl ; }
2006-10-27 02:53:05 +04:00
local_op_buffer = inbuf [ previnbi ] ;
2006-10-20 23:47:52 +04:00
if ( i > 0 ) {
/* our first operation is to combine our own [sendbuf] data with the data
* we recvd from down stream ( but only if we are not root and not using
* MPI_IN_PLACE )
*/
if ( 1 = = i ) {
2006-10-27 02:53:05 +04:00
if ( ! ( ( MPI_IN_PLACE = = sendbuf ) & & ( rank = = tree - > tree_root ) ) ) {
2006-10-20 23:47:52 +04:00
local_op_buffer = sendtmpbuf + segindex * realsegsize ;
2005-10-14 03:38:21 +04:00
}
2005-09-10 03:05:17 +04:00
}
2006-10-20 23:47:52 +04:00
/* apply operation */
ompi_op_reduce ( op , local_op_buffer , accumbuf + segindex * realsegsize , recvcount , datatype ) ;
} else if ( segindex > 0 ) {
2006-10-24 02:29:17 +04:00
void * accumulator = accumbuf + ( segindex - 1 ) * realsegsize ;
2006-10-27 02:53:05 +04:00
if ( tree - > tree_nextsize < = 1 ) {
if ( ! ( ( MPI_IN_PLACE = = sendbuf ) & & ( rank = = tree - > tree_root ) ) ) {
2006-10-20 23:47:52 +04:00
local_op_buffer = sendtmpbuf + ( segindex - 1 ) * realsegsize ;
2005-10-14 03:38:21 +04:00
}
2005-09-10 03:05:17 +04:00
}
2006-10-24 02:29:17 +04:00
ompi_op_reduce ( op , local_op_buffer , accumulator , prevcount , datatype ) ;
2005-09-10 03:05:17 +04:00
2006-10-24 02:29:17 +04:00
/* all reduced on available data this step (i) complete, pass to
* the next process unless your the root
*/
2006-10-27 02:53:05 +04:00
if ( rank ! = tree - > tree_root ) {
2005-09-10 03:05:17 +04:00
/* send combined/accumulated data to parent */
2006-10-24 02:29:17 +04:00
ret = MCA_PML_CALL ( send ( accumulator , prevcount , datatype ,
2006-10-27 02:53:05 +04:00
tree - > tree_prev , MCA_COLL_BASE_TAG_REDUCE ,
2006-10-24 02:29:17 +04:00
MCA_PML_BASE_SEND_STANDARD , comm ) ) ;
2005-09-06 09:21:57 +04:00
if ( ret ! = MPI_SUCCESS ) { line = __LINE__ ; goto error_hndl ; }
}
2005-09-10 03:05:17 +04:00
/* we stop when segindex = number of segments (i.e. we do num_segment+1 steps to allow for pipelining */
2005-09-06 09:21:57 +04:00
if ( segindex = = num_segments ) break ;
}
/* update input buffer index */
inbi = previnbi ;
} /* end of for each child */
} /* end of for each segment */
/* clean up */
2006-10-20 02:20:33 +04:00
if ( inbuf [ 0 ] ! = NULL ) free ( inbuf [ 0 ] ) ;
if ( inbuf [ 1 ] ! = NULL ) free ( inbuf [ 1 ] ) ;
if ( NULL = = recvbuf ) free ( accumbuf ) ;
2005-09-06 09:21:57 +04:00
}
/* leaf nodes */
else {
/* Send segmented data to parents */
2006-10-27 02:53:05 +04:00
segindex = 0 ;
while ( original_count > 0 ) {
if ( original_count < count_by_segment ) count_by_segment = original_count ;
ret = MCA_PML_CALL ( send ( ( char * ) sendbuf + segindex * realsegsize , count_by_segment ,
datatype , tree - > tree_prev ,
2005-09-06 09:21:57 +04:00
MCA_COLL_BASE_TAG_REDUCE , MCA_PML_BASE_SEND_STANDARD , comm ) ) ;
if ( ret ! = MPI_SUCCESS ) { line = __LINE__ ; goto error_hndl ; }
2006-10-27 02:53:05 +04:00
segindex + + ;
original_count - = count_by_segment ;
2005-09-06 09:21:57 +04:00
}
}
2006-10-27 02:53:05 +04:00
return OMPI_SUCCESS ;
2005-09-06 09:21:57 +04:00
2006-10-27 02:53:05 +04:00
error_hndl : /* error handler */
2005-12-22 16:49:33 +03:00
OPAL_OUTPUT ( ( ompi_coll_tuned_stream , " ERROR_HNDL: node %d file %s line %d error %d \n " , rank , __FILE__ , line , ret ) ) ;
2006-10-18 06:00:46 +04:00
if ( inbuf [ 0 ] ! = NULL ) free ( inbuf [ 0 ] ) ;
if ( inbuf [ 1 ] ! = NULL ) free ( inbuf [ 1 ] ) ;
2006-10-20 02:20:33 +04:00
if ( ( NULL = = recvbuf ) & & ( NULL ! = accumbuf ) ) free ( accumbuf ) ;
2005-09-06 09:21:57 +04:00
return ret ;
}
2006-10-27 02:53:05 +04:00
/* Attention: this version of the reduce operations does not
work for :
- non - commutative operations
- segment sizes which are not multiplies of the extent of the datatype
meaning that at least one datatype must fit in the segment !
*/
int ompi_coll_tuned_reduce_intra_chain ( void * sendbuf , void * recvbuf , int count ,
ompi_datatype_t * datatype , ompi_op_t * op ,
int root , ompi_communicator_t * comm , uint32_t segsize ,
int fanout )
{
int segcount ;
size_t typelng ;
OPAL_OUTPUT ( ( ompi_coll_tuned_stream , " coll:tuned:reduce_intra_chain rank %d fo %d ss %5d " , ompi_comm_rank ( comm ) , fanout , segsize ) ) ;
COLL_TUNED_UPDATE_CHAIN ( comm , root , fanout ) ;
/**
* Determine number of segments and number of elements
* sent per operation
*/
ompi_ddt_type_size ( datatype , & typelng ) ;
if ( segsize > typelng ) {
segcount = ( int ) ( segsize / typelng ) ;
} else {
segcount = count ;
}
return ompi_coll_tuned_reduce_generic ( sendbuf , recvbuf , count , datatype , op , root , comm ,
comm - > c_coll_selected_data - > cached_chain , segcount ) ;
}
2005-09-06 09:21:57 +04:00
2005-12-22 16:49:33 +03:00
int ompi_coll_tuned_reduce_intra_pipeline ( void * sendbuf , void * recvbuf ,
2006-10-18 06:00:46 +04:00
int count , ompi_datatype_t * datatype ,
ompi_op_t * op , int root ,
ompi_communicator_t * comm , uint32_t segsize )
2005-09-06 09:21:57 +04:00
{
2006-10-27 02:53:05 +04:00
int segcount ;
size_t typelng ;
2005-10-27 03:51:56 +04:00
2006-10-27 02:53:05 +04:00
OPAL_OUTPUT ( ( ompi_coll_tuned_stream , " coll:tuned:reduce_intra_pipeline rank %d ss %5d " ,
ompi_comm_rank ( comm ) , segsize ) ) ;
COLL_TUNED_UPDATE_PIPELINE ( comm , root ) ;
/**
* Determine number of segments and number of elements
* sent per operation
*/
ompi_ddt_type_size ( datatype , & typelng ) ;
if ( segsize > typelng ) {
segcount = ( int ) ( segsize / typelng ) ;
} else {
segcount = count ;
}
return ompi_coll_tuned_reduce_generic ( sendbuf , recvbuf , count , datatype , op , root , comm ,
comm - > c_coll_selected_data - > cached_pipeline , segcount ) ;
}
int ompi_coll_tuned_reduce_intra_binary ( void * sendbuf , void * recvbuf ,
int count , ompi_datatype_t * datatype ,
ompi_op_t * op , int root ,
ompi_communicator_t * comm , uint32_t segsize )
{
int segcount ;
size_t typelng ;
OPAL_OUTPUT ( ( ompi_coll_tuned_stream , " coll:tuned:reduce_intra_binary rank %d ss %5d " ,
ompi_comm_rank ( comm ) , segsize ) ) ;
2005-10-27 03:51:56 +04:00
2006-10-27 02:53:05 +04:00
COLL_TUNED_UPDATE_BINTREE ( comm , root ) ;
/**
* Determine number of segments and number of elements
* sent per operation
*/
ompi_ddt_type_size ( datatype , & typelng ) ;
if ( segsize > typelng ) {
segcount = ( int ) ( segsize / typelng ) ;
} else {
segcount = count ;
}
2005-10-27 03:51:56 +04:00
2006-10-27 02:53:05 +04:00
return ompi_coll_tuned_reduce_generic ( sendbuf , recvbuf , count , datatype , op , root , comm ,
comm - > c_coll_selected_data - > cached_bintree , segcount ) ;
2005-09-06 09:21:57 +04:00
}
2005-09-10 03:05:17 +04:00
2006-10-27 02:53:05 +04:00
int ompi_coll_tuned_reduce_intra_binomial ( void * sendbuf , void * recvbuf ,
int count , ompi_datatype_t * datatype ,
ompi_op_t * op , int root ,
ompi_communicator_t * comm , uint32_t segsize )
{
int segcount ;
size_t typelng ;
OPAL_OUTPUT ( ( ompi_coll_tuned_stream , " coll:tuned:reduce_intra_binomial rank %d ss %5d " ,
ompi_comm_rank ( comm ) , segsize ) ) ;
COLL_TUNED_UPDATE_BMTREE ( comm , root ) ;
/**
* Determine number of segments and number of elements
* sent per operation
*/
ompi_ddt_type_size ( datatype , & typelng ) ;
if ( segsize > typelng ) {
segcount = ( int ) ( segsize / typelng ) ;
} else {
segcount = count ;
}
return ompi_coll_tuned_reduce_generic ( sendbuf , recvbuf , count , datatype , op , root , comm ,
comm - > c_coll_selected_data - > cached_bmtree , segcount ) ;
}
2005-10-27 03:51:56 +04:00
/*
* Linear functions are copied from the BASIC coll module
* they do not segment the message and are simple implementations
* but for some small number of nodes and / or small data sizes they
* are just as fast as tuned / tree based segmenting operations
* and as such may be selected by the decision functions
* These are copied into this module due to the way we select modules
* in V1 . i . e . in V2 we will handle this differently and so will not
* have to duplicate code .
* GEF Oct05 after asking Jeff .
*/
/* copied function (with appropriate renaming) starts here */
/*
* reduce_lin_intra
*
* Function : - reduction using O ( N ) algorithm
* Accepts : - same as MPI_Reduce ( )
* Returns : - MPI_SUCCESS or error code
*/
int
2005-12-22 16:49:33 +03:00
ompi_coll_tuned_reduce_intra_basic_linear ( void * sbuf , void * rbuf , int count ,
2006-10-18 06:00:46 +04:00
struct ompi_datatype_t * dtype ,
struct ompi_op_t * op ,
int root , struct ompi_communicator_t * comm )
2005-10-27 03:51:56 +04:00
{
2006-10-18 00:20:58 +04:00
int i , rank , err , size ;
ptrdiff_t true_lb , true_extent , lb , extent ;
2005-10-27 03:51:56 +04:00
char * free_buffer = NULL ;
char * pml_buffer = NULL ;
char * inplace_temp = NULL ;
char * inbuf ;
/* Initialize */
rank = ompi_comm_rank ( comm ) ;
size = ompi_comm_size ( comm ) ;
2005-12-22 16:49:33 +03:00
OPAL_OUTPUT ( ( ompi_coll_tuned_stream , " coll:tuned:reduce_intra_basic_linear rank %d " , rank ) ) ;
2005-10-27 03:51:56 +04:00
/* If not root, send data to the root. */
if ( rank ! = root ) {
err = MCA_PML_CALL ( send ( sbuf , count , dtype , root ,
MCA_COLL_BASE_TAG_REDUCE ,
MCA_PML_BASE_SEND_STANDARD , comm ) ) ;
return err ;
}
2006-10-18 06:00:46 +04:00
/* see discussion in ompi_coll_basic_reduce_lin_intra about extent and true extend */
/* for reducing buffer allocation lengths.... */
2005-10-27 03:51:56 +04:00
ompi_ddt_get_extent ( dtype , & lb , & extent ) ;
ompi_ddt_get_true_extent ( dtype , & true_lb , & true_extent ) ;
if ( MPI_IN_PLACE = = sbuf ) {
sbuf = rbuf ;
2006-08-24 20:38:08 +04:00
inplace_temp = ( char * ) malloc ( true_extent + ( count - 1 ) * extent ) ;
2005-10-27 03:51:56 +04:00
if ( NULL = = inplace_temp ) {
return OMPI_ERR_OUT_OF_RESOURCE ;
}
rbuf = inplace_temp - lb ;
}
if ( size > 1 ) {
2006-08-24 20:38:08 +04:00
free_buffer = ( char * ) malloc ( true_extent + ( count - 1 ) * extent ) ;
2005-10-27 03:51:56 +04:00
if ( NULL = = free_buffer ) {
return OMPI_ERR_OUT_OF_RESOURCE ;
}
pml_buffer = free_buffer - lb ;
}
/* Initialize the receive buffer. */
if ( rank = = ( size - 1 ) ) {
2006-08-24 20:38:08 +04:00
err = ompi_ddt_copy_content_same_ddt ( dtype , count , ( char * ) rbuf , ( char * ) sbuf ) ;
2005-10-27 03:51:56 +04:00
} else {
err = MCA_PML_CALL ( recv ( rbuf , count , dtype , size - 1 ,
MCA_COLL_BASE_TAG_REDUCE , comm ,
MPI_STATUS_IGNORE ) ) ;
}
if ( MPI_SUCCESS ! = err ) {
if ( NULL ! = free_buffer ) {
free ( free_buffer ) ;
}
return err ;
}
/* Loop receiving and calling reduction function (C or Fortran). */
for ( i = size - 2 ; i > = 0 ; - - i ) {
if ( rank = = i ) {
2006-08-24 20:38:08 +04:00
inbuf = ( char * ) sbuf ;
2005-10-27 03:51:56 +04:00
} else {
err = MCA_PML_CALL ( recv ( pml_buffer , count , dtype , i ,
MCA_COLL_BASE_TAG_REDUCE , comm ,
MPI_STATUS_IGNORE ) ) ;
if ( MPI_SUCCESS ! = err ) {
if ( NULL ! = free_buffer ) {
free ( free_buffer ) ;
}
return err ;
}
inbuf = pml_buffer ;
}
/* Perform the reduction */
ompi_op_reduce ( op , inbuf , rbuf , count , dtype ) ;
}
if ( NULL ! = inplace_temp ) {
2006-08-24 20:38:08 +04:00
err = ompi_ddt_copy_content_same_ddt ( dtype , count , ( char * ) sbuf , inplace_temp ) ;
2005-10-27 03:51:56 +04:00
free ( inplace_temp ) ;
}
if ( NULL ! = free_buffer ) {
free ( free_buffer ) ;
}
/* All done */
return MPI_SUCCESS ;
}
/* copied function (with appropriate renaming) ends here */
2006-10-27 02:53:05 +04:00
/**
* The following are used by dynamic and forced rules
*
* publish details of each algorithm and if its forced / fixed / locked in
* as you add methods / algorithms you must update this and the query / map routines
*
* this routine is called by the component only
* this makes sure that the mca parameters are set to their initial values and perms
* module does not call this they call the forced_getvalues routine instead
*/
2006-04-20 03:42:06 +04:00
int ompi_coll_tuned_reduce_intra_check_forced_init ( coll_tuned_force_algorithm_mca_param_indices_t * mca_param_indices )
2005-10-25 07:55:58 +04:00
{
2006-04-20 03:42:06 +04:00
int rc ;
int max_alg = 3 ;
2006-10-18 06:00:46 +04:00
ompi_coll_tuned_forced_max_algorithms [ REDUCE ] = max_alg ;
rc = mca_base_param_reg_int ( & mca_coll_tuned_component . super . collm_version ,
" reduce_algorithm_count " ,
" Number of reduce algorithms available " ,
false , true , max_alg , NULL ) ;
2006-10-24 02:29:17 +04:00
mca_param_indices - > algorithm_param_index
= mca_base_param_reg_int ( & mca_coll_tuned_component . super . collm_version ,
" reduce_algorithm " ,
" Which reduce algorithm is used. Can be locked down to choice of: 0 ignore, 1 linear, 2 chain, 3 pipeline " ,
false , false , 0 , NULL ) ;
mca_param_indices - > segsize_param_index
= mca_base_param_reg_int ( & mca_coll_tuned_component . super . collm_version ,
" reduce_algorithm_segmentsize " ,
" Segment size in bytes used by default for reduce algorithms. Only has meaning if algorithm is forced and supports segmenting. 0 bytes means no segmentation. " ,
false , false , 0 , NULL ) ;
mca_param_indices - > tree_fanout_param_index
= mca_base_param_reg_int ( & mca_coll_tuned_component . super . collm_version ,
" reduce_algorithm_tree_fanout " ,
" Fanout for n-tree used for reduce algorithms. Only has meaning if algorithm is forced and supports n-tree topo based operation. " ,
false , false ,
ompi_coll_tuned_init_tree_fanout , /* get system wide default */
NULL ) ;
mca_param_indices - > chain_fanout_param_index
= mca_base_param_reg_int ( & mca_coll_tuned_component . super . collm_version ,
" reduce_algorithm_chain_fanout " ,
" Fanout for chains used for reduce algorithms. Only has meaning if algorithm is forced and supports chain topo based operation. " ,
false , false ,
ompi_coll_tuned_init_chain_fanout , /* get system wide default */
NULL ) ;
2006-10-18 06:00:46 +04:00
return ( MPI_SUCCESS ) ;
2005-10-25 07:55:58 +04:00
}
2005-12-22 16:49:33 +03:00
int ompi_coll_tuned_reduce_intra_do_forced ( void * sbuf , void * rbuf , int count ,
2006-10-18 06:00:46 +04:00
struct ompi_datatype_t * dtype ,
struct ompi_op_t * op , int root ,
struct ompi_communicator_t * comm )
2005-10-25 07:55:58 +04:00
{
2006-10-18 06:00:46 +04:00
OPAL_OUTPUT ( ( ompi_coll_tuned_stream , " coll:tuned:reduce_intra_do_forced selected algorithm %d " ,
comm - > c_coll_selected_data - > user_forced [ REDUCE ] . algorithm ) ) ;
2005-12-22 16:49:33 +03:00
2006-10-18 06:00:46 +04:00
switch ( comm - > c_coll_selected_data - > user_forced [ REDUCE ] . algorithm ) {
2005-12-22 16:49:33 +03:00
case ( 0 ) : return ompi_coll_tuned_reduce_intra_dec_fixed ( sbuf , rbuf , count , dtype , op , root , comm ) ;
case ( 1 ) : return ompi_coll_tuned_reduce_intra_basic_linear ( sbuf , rbuf , count , dtype , op , root , comm ) ;
2006-04-20 03:42:06 +04:00
case ( 2 ) : return ompi_coll_tuned_reduce_intra_chain ( sbuf , rbuf , count , dtype , op , root , comm ,
2006-10-18 06:00:46 +04:00
comm - > c_coll_selected_data - > user_forced [ REDUCE ] . segsize ,
comm - > c_coll_selected_data - > user_forced [ REDUCE ] . chain_fanout ) ;
2005-12-22 16:49:33 +03:00
case ( 3 ) : return ompi_coll_tuned_reduce_intra_pipeline ( sbuf , rbuf , count , dtype , op , root , comm ,
2006-10-18 06:00:46 +04:00
comm - > c_coll_selected_data - > user_forced [ REDUCE ] . segsize ) ;
2005-10-25 07:55:58 +04:00
default :
2005-12-22 16:49:33 +03:00
OPAL_OUTPUT ( ( ompi_coll_tuned_stream , " coll:tuned:reduce_intra_do_forced attempt to select algorithm %d when only 0-%d is valid? " ,
2006-10-18 06:00:46 +04:00
comm - > c_coll_selected_data - > user_forced [ REDUCE ] . algorithm , ompi_coll_tuned_forced_max_algorithms [ REDUCE ] ) ) ;
2005-10-25 07:55:58 +04:00
return ( MPI_ERR_ARG ) ;
} /* switch */
}
2005-11-11 07:49:29 +03:00
2005-12-22 16:49:33 +03:00
int ompi_coll_tuned_reduce_intra_do_this ( void * sbuf , void * rbuf , int count ,
2006-10-18 06:00:46 +04:00
struct ompi_datatype_t * dtype ,
struct ompi_op_t * op , int root ,
struct ompi_communicator_t * comm ,
int algorithm , int faninout , int segsize )
2005-11-11 07:49:29 +03:00
{
2006-10-18 06:00:46 +04:00
OPAL_OUTPUT ( ( ompi_coll_tuned_stream , " coll:tuned:reduce_intra_do_this selected algorithm %d topo faninout %d segsize %d " ,
algorithm , faninout , segsize ) ) ;
2005-11-11 07:49:29 +03:00
2006-10-18 06:00:46 +04:00
switch ( algorithm ) {
2005-12-22 16:49:33 +03:00
case ( 0 ) : return ompi_coll_tuned_reduce_intra_dec_fixed ( sbuf , rbuf , count , dtype , op , root , comm ) ;
case ( 1 ) : return ompi_coll_tuned_reduce_intra_basic_linear ( sbuf , rbuf , count , dtype , op , root , comm ) ;
case ( 2 ) : return ompi_coll_tuned_reduce_intra_chain ( sbuf , rbuf , count , dtype , op , root , comm ,
2006-10-18 06:00:46 +04:00
segsize , faninout ) ;
2005-12-22 16:49:33 +03:00
case ( 3 ) : return ompi_coll_tuned_reduce_intra_pipeline ( sbuf , rbuf , count , dtype , op , root , comm ,
2006-10-18 06:00:46 +04:00
segsize ) ;
2005-11-11 07:49:29 +03:00
default :
2005-12-22 16:49:33 +03:00
OPAL_OUTPUT ( ( ompi_coll_tuned_stream , " coll:tuned:reduce_intra_do_this attempt to select algorithm %d when only 0-%d is valid? " ,
2006-10-18 06:00:46 +04:00
algorithm , ompi_coll_tuned_forced_max_algorithms [ REDUCE ] ) ) ;
2005-11-11 07:49:29 +03:00
return ( MPI_ERR_ARG ) ;
} /* switch */
}