1
1
openmpi/ompi/mca/coll/ml/mca-coll-ml.config

149 строки
4.2 KiB
Plaintext
Исходник Обычный вид История

##################################
# ML collective configuration file
##################################
# NOTE (by Pasha):
# Since ML configuration infrastructure is limited on this stage we do not support some tunings, even so parser
# understands this values and keys, but we do not have place to load all this values.
# threshold - ML infrastructure does not handle multiple thresholds.
# fragmentation - ML infrastructure does not fragmentation tuning per collective.
##################################
# Defining collective section
[BARRIER]
# Defining message size section. We will support small/large. In future we may add more options. Barrier is very specific case, because it is only collective that does not transfer any data, so for this specific case we use small
<small>
# Since ML does not define any algorithm for BARRIER, we just use default. Later we have to introduce some algorithm name for Barrier
algorithm = ML_BARRIER_DEFAULT
# Hierarchy setup:
#
# full_hr - means all possible levels of hierarchy (list of possible is defined by user command line)
# full_hr_no_basesocket - means all possible levels of hierarchy (list of possible is defined by user command line)
# except the basesocket subgroup.
# ptp_only - only ptp hierarchy
# iboffload_only - only iboffload hierarhcy
hierarchy = full_hr
[IBARRIER]
<small>
algorithm = ML_BARRIER_DEFAULT
hierarchy = full_hr
[BCAST]
<small>
# bcast supports: ML_BCAST_SMALL_DATA_KNOWN, ML_BCAST_SMALL_DATA_UNKNOWN, ML_BCAST_SMALL_DATA_SEQUENTIAL
algorithm = ML_BCAST_SMALL_DATA_KNOWN
hierarchy = full_hr
<large>
# bcast supports: ML_BCAST_LARGE_DATA_KNOWN, ML_BCAST_LARGE_DATA_UNKNOWN, ML_BCAST_LARGE_DATA_SEQUENTIAL
algorithm = ML_BCAST_LARGE_DATA_KNOWN
hierarchy = full_hr
[IBCAST]
<small>
algorithm = ML_BCAST_SMALL_DATA_KNOWN
hierarchy = full_hr
<large>
algorithm = ML_BCAST_LARGE_DATA_KNOWN
hierarchy = full_hr
[GATHER]
<small>
# gather supports: ML_SMALL_DATA_GATHER
algorithm = ML_SMALL_DATA_GATHER
hierarchy = full_hr
<large>
# gather supports: ML_LARGE_DATA_GATHER
algorithm = ML_LARGE_DATA_GATHER
hierarchy = full_hr
[IGATHER]
<small>
# gather supports: ML_SMALL_DATA_GATHER
algorithm = ML_SMALL_DATA_GATHER
hierarchy = full_hr
<large>
# gather supports: ML_LARGE_DATA_GATHER
algorithm = ML_LARGE_DATA_GATHER
hierarchy = full_hr
[ALLGATHER]
<small>
# allgather supports: ML_SMALL_DATA_ALLGATHER
algorithm = ML_SMALL_DATA_ALLGATHER
hierarchy = full_hr
<large>
# allgather supports: ML_LARGE_DATA_ALLGATHER
algorithm = ML_LARGE_DATA_ALLGATHER
hierarchy = full_hr
[IALLGATHER]
<small>
# allgather supports: ML_SMALL_DATA_ALLGATHER
algorithm = ML_SMALL_DATA_ALLGATHER
hierarchy = full_hr
<large>
# allgather supports: ML_LARGE_DATA_ALLGATHER
algorithm = ML_LARGE_DATA_ALLGATHER
hierarchy = full_hr
[ALLTOALL]
<small>
# alltoall supports: ML_SMALL_DATA_ALLTOALL
algorithm = ML_SMALL_DATA_ALLTOALL
hierarchy = ptp_only
<large>
# alltoall supports: ML_LARGE_DATA_ALLTOALL
algorithm = ML_LARGE_DATA_ALLTOALL
hierarchy = ptp_only
[IALLTOALL]
<small>
# alltoall supports: ML_SMALL_DATA_ALLTOALL
algorithm = ML_SMALL_DATA_ALLTOALL
hierarchy = ptp_only
<large>
# alltoall supports: ML_LARGE_DATA_ALLTOALL
algorithm = ML_LARGE_DATA_ALLTOALL
hierarchy = ptp_only
[ALLREDUCE]
<small>
# allreduce supports: ML_SMALL_DATA_ALLREDUCE
algorithm = ML_SMALL_DATA_ALLREDUCE
hierarchy = full_hr
<large>
# allreduce supports: ML_LARGE_DATA_ALLREDUCE
algorithm = ML_LARGE_DATA_ALLREDUCE
hierarchy = full_hr
[IALLREDUCE]
<small>
# allreduce supports: ML_SMALL_DATA_ALLREDUCE
algorithm = ML_SMALL_DATA_ALLREDUCE
hierarchy = full_hr
<large>
# allreduce supports: ML_LARGE_DATA_ALLREDUCE
algorithm = ML_LARGE_DATA_ALLREDUCE
hierarchy = full_hr
[SCATTER]
<small>
# scatter supports: ML_SCATTER_SMALL_DATA_SEQUENTIAL
algorithm = ML_SCATTER_SMALL_DATA_SEQUENTIAL
hierarchy = full_hr
<large>
# scatter supports: ML_SCATTER_SMALL_DATA_SEQUENTIAL
algorithm = ML_SCATTER_SMALL_DATA_SEQUENTIAL
hierarchy = full_hr
[ISCATTER]
<small>
# scatter supports: ML_SCATTER_SMALL_DATA_SEQUENTIAL
algorithm = ML_SCATTER_SMALL_DATA_SEQUENTIAL
hierarchy = full_hr
<large>
# scatter supports: ML_SCATTER_SMALL_DATA_SEQUENTIAL
algorithm = ML_SCATTER_SMALL_DATA_SEQUENTIAL
hierarchy = full_hr