From f7664b3814508d95309e323b582b428cc9078aa5 Mon Sep 17 00:00:00 2001 From: Pavel Shamis Date: Mon, 2 Jul 2012 15:20:12 +0000 Subject: [PATCH] 1. Adding 2 new components: ofacm - generic connection manager for IB interconnects. ofautils - IB common utilities and compatibility code 2. Updating OpenIB configure code - ORNL & Mellanox Teams This commit was SVN r26707. --- ...ck_openib.m4 => ompi_check_openfabrics.m4} | 222 ++- ompi/mca/btl/ofud/configure.m4 | 2 +- ompi/mca/btl/openib/Makefile.am | 4 +- ompi/mca/btl/openib/btl_openib_component.c | 49 +- ompi/mca/btl/openib/configure.m4 | 5 +- ompi/mca/common/ofacm/Makefile.am | 76 + ompi/mca/common/ofacm/base.h | 193 ++ ompi/mca/common/ofacm/common_ofacm_base.c | 678 +++++++ ompi/mca/common/ofacm/common_ofacm_empty.c | 48 + ompi/mca/common/ofacm/common_ofacm_empty.h | 22 + ompi/mca/common/ofacm/common_ofacm_oob.c | 1672 +++++++++++++++++ ompi/mca/common/ofacm/common_ofacm_oob.h | 20 + ompi/mca/common/ofacm/common_ofacm_xoob.c | 1537 +++++++++++++++ ompi/mca/common/ofacm/common_ofacm_xoob.h | 69 + ompi/mca/common/ofacm/configure.m4 | 63 + ompi/mca/common/ofacm/configure.params | 26 + ompi/mca/common/ofacm/connect.h | 541 ++++++ .../ofacm/help-mpi-common-ofacm-base.txt | 41 + .../ofacm/help-mpi-common-ofacm-oob.txt | 20 + ompi/mca/common/ofautils/Makefile.am | 68 + ompi/mca/common/ofautils/common_ofautils.c | 89 + ompi/mca/common/ofautils/common_ofautils.h | 26 + ompi/mca/common/ofautils/configure.m4 | 43 + ompi/mca/common/ofautils/configure.params | 26 + ompi/mca/dpm/dpm.h | 4 + 25 files changed, 5419 insertions(+), 125 deletions(-) rename ompi/config/{ompi_check_openib.m4 => ompi_check_openfabrics.m4} (82%) create mode 100644 ompi/mca/common/ofacm/Makefile.am create mode 100644 ompi/mca/common/ofacm/base.h create mode 100644 ompi/mca/common/ofacm/common_ofacm_base.c create mode 100644 ompi/mca/common/ofacm/common_ofacm_empty.c create mode 100644 ompi/mca/common/ofacm/common_ofacm_empty.h create mode 100644 ompi/mca/common/ofacm/common_ofacm_oob.c create mode 100644 ompi/mca/common/ofacm/common_ofacm_oob.h create mode 100644 ompi/mca/common/ofacm/common_ofacm_xoob.c create mode 100644 ompi/mca/common/ofacm/common_ofacm_xoob.h create mode 100644 ompi/mca/common/ofacm/configure.m4 create mode 100644 ompi/mca/common/ofacm/configure.params create mode 100644 ompi/mca/common/ofacm/connect.h create mode 100644 ompi/mca/common/ofacm/help-mpi-common-ofacm-base.txt create mode 100644 ompi/mca/common/ofacm/help-mpi-common-ofacm-oob.txt create mode 100644 ompi/mca/common/ofautils/Makefile.am create mode 100644 ompi/mca/common/ofautils/common_ofautils.c create mode 100644 ompi/mca/common/ofautils/common_ofautils.h create mode 100644 ompi/mca/common/ofautils/configure.m4 create mode 100644 ompi/mca/common/ofautils/configure.params diff --git a/ompi/config/ompi_check_openib.m4 b/ompi/config/ompi_check_openfabrics.m4 similarity index 82% rename from ompi/config/ompi_check_openib.m4 rename to ompi/config/ompi_check_openfabrics.m4 index 1f25dde6da..cec202b829 100644 --- a/ompi/config/ompi_check_openib.m4 +++ b/ompi/config/ompi_check_openfabrics.m4 @@ -15,6 +15,7 @@ # reserved. # Copyright (c) 2006-2009 Mellanox Technologies. All rights reserved. # Copyright (c) 2010-2012 Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. # $COPYRIGHT$ # # Additional copyrights may follow @@ -23,12 +24,12 @@ # -# OMPI_CHECK_OPENIB(prefix, [action-if-found], [action-if-not-found]) +# OMPI_CHECK_OPENFABRICS(prefix, [action-if-found], [action-if-not-found]) # -------------------------------------------------------- # check if OPENIB support can be found. sets prefix_{CPPFLAGS, # LDFLAGS, LIBS} as needed and runs action-if-found if there is # support, otherwise executes action-if-not-found -AC_DEFUN([OMPI_CHECK_OPENIB],[ +AC_DEFUN([OMPI_CHECK_OPENFABRICS],[ OPAL_VAR_SCOPE_PUSH([$1_msg]) # Setup the --with switches to allow users to specify where @@ -69,29 +70,6 @@ AC_DEFUN([OMPI_CHECK_OPENIB],[ AC_DEFINE_UNQUOTED([OMPI_OPENIB_PAD_HDR], [$ompi_openib_pad_hdr], [Add padding bytes to the openib BTL control header]) - # - # ConnectX XRC support - # - AC_ARG_ENABLE([openib-connectx-xrc], - [AC_HELP_STRING([--enable-openib-connectx-xrc], - [Enable ConnectX XRC support in the openib BTL. If you do not have InfiniBand ConnectX adapters, you may disable the ConnectX XRC support. If you do not know which InfiniBand adapter is installed on your cluster, leave this option enabled (default: enabled)])], - [enable_connectx_xrc="$enableval"], [enable_connectx_xrc="yes"]) - - # - # Unconnect Datagram (UD) based connection manager - # - AC_ARG_ENABLE([openib-udcm], - [AC_HELP_STRING([--enable-openib-udcm], - [Enable datagram connection support in openib BTL (default: enabled)])], - [enable_openib_udcm="$enableval"], [enable_openib_udcm="yes"]) - - # - # Openfabrics RDMACM - # - AC_ARG_ENABLE([openib-rdmacm], - [AC_HELP_STRING([--enable-openib-rdmacm], - [Enable Open Fabrics RDMACM support in openib BTL (default: enabled)])]) - AS_IF([test "$opal_want_verbs" = "no"], [ompi_check_openib_happy="no"], [ompi_check_openib_happy="yes"]) @@ -170,8 +148,6 @@ AC_DEFUN([OMPI_CHECK_OPENIB],[ # Set these up so that we can do an AC_DEFINE below # (unconditionally) $1_have_xrc=0 - $1_have_udcm=0 - $1_have_rdmacm=0 $1_have_opensm_devel=0 # If we have the openib stuff available, find out what we've got @@ -189,11 +165,6 @@ AC_DEFUN([OMPI_CHECK_OPENIB],[ AC_CHECK_FUNCS([ibv_create_xrc_rcv_qp], [$1_have_xrc=1]) fi - # is udcm enabled - if test "$enable_openib_udcm" = "yes"; then - $1_have_udcm=1 - fi - if test "no" != "$enable_openib_dynamic_sl"; then # We need ib_types.h file, which is installed with opensm-devel # package. However, ib_types.h has a bad include directive, @@ -219,30 +190,6 @@ AC_DEFUN([OMPI_CHECK_OPENIB],[ AC_MSG_ERROR([Cannot continue])])]) fi - # Do we have a recent enough RDMA CM? Need to have the - # rdma_get_peer_addr (inline) function (originally appeared - # in OFED v1.3). - if test "$enable_openib_rdmacm" != "no"; then - AC_CHECK_HEADERS([rdma/rdma_cma.h], - [AC_CHECK_LIB([rdmacm], [rdma_create_id], - [AC_MSG_CHECKING([for rdma_get_peer_addr]) - $1_msg=no - AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include "rdma/rdma_cma.h" - ]], [[void *ret = (void*) rdma_get_peer_addr((struct rdma_cm_id*)0);]])], - [$1_have_rdmacm=1 - $1_msg=yes]) - AC_MSG_RESULT([$$1_msg])])]) - - if test "1" = "$$1_have_rdmacm"; then - $1_LIBS="-lrdmacm $$1_LIBS" - else - AS_IF([test "$enable_openib_rdmacm" = "yes"], - [AC_MSG_WARN([--enable-openib-rdmacm was specified but the]) - AC_MSG_WARN([appropriate files could not be found]) - AC_MSG_WARN([Please install librdmacm and librdmacm-devel or disable rdmacm support]) - AC_MSG_ERROR([Cannot continue])]) - fi - fi # Check support for RDMAoE devices $1_have_rdmaoe=0 @@ -291,24 +238,6 @@ AC_DEFUN([OMPI_CHECK_OPENIB],[ AC_MSG_RESULT([no]) fi - AC_MSG_CHECKING([if UD CM is enabled]) - AC_DEFINE_UNQUOTED([OMPI_HAVE_UDCM], [$$1_have_udcm], - [Whether UD CM is available or not]) - if test "1" = "$$1_have_udcm"; then - AC_MSG_RESULT([yes]) - else - AC_MSG_RESULT([no]) - fi - - AC_MSG_CHECKING([if OpenFabrics RDMACM support is enabled]) - AC_DEFINE_UNQUOTED([OMPI_HAVE_RDMACM], [$$1_have_rdmacm], - [Whether RDMA CM is available or not]) - if test "1" = "$$1_have_rdmacm"; then - AC_MSG_RESULT([yes]) - else - AC_MSG_RESULT([no]) - fi - AS_IF([test -z "$opal_verbs_dir"], [openib_include_dir="/usr/include"], [openib_include_dir="$opal_verbs_dir/include"]) @@ -327,3 +256,148 @@ AC_DEFUN([OMPI_CHECK_OPENIB],[ OPAL_VAR_SCOPE_POP ]) + +AC_DEFUN([OMPI_CHECK_OPENFABRICS_CM_ARGS],[ + # + # ConnectX XRC support + # + AC_ARG_ENABLE([openib-connectx-xrc], + [AC_HELP_STRING([--enable-openib-connectx-xrc], + [Enable ConnectX XRC support in the openib BTL. If you do not have InfiniBand ConnectX adapters, you may disable the ConnectX XRC support. If you do not know which InfiniBand adapter is installed on your cluster, leave this option enabled (default: enabled)])], + [enable_connectx_xrc="$enableval"], [enable_connectx_xrc="yes"]) + # + # Unconnect Datagram (UD) based connection manager + # + AC_ARG_ENABLE([openib-udcm], + [AC_HELP_STRING([--enable-openib-udcm], + [Enable datagram connection support in openib BTL (default: enabled)])], + [enable_openib_udcm="$enableval"], [enable_openib_udcm="yes"]) + + # + # Openfabrics RDMACM + # + AC_ARG_ENABLE([openib-rdmacm], + [AC_HELP_STRING([--enable-openib-rdmacm], + [Enable Open Fabrics RDMACM support in openib BTL (default: enabled)])]) +])dnl + +AC_DEFUN([OMPI_CHECK_OPENFABRICS_CM],[ + AC_REQUIRE([OMPI_CHECK_OPENFABRICS_CM_ARGS]) + $1_have_udcm=0 + $1_have_rdmacm=0 + + AS_IF([test "$ompi_check_openib_happy" = "yes"], + [# Do we have a recent enough RDMA CM? Need to have the + # rdma_get_peer_addr (inline) function (originally appeared + # in OFED v1.3). + if test "$enable_openib_rdmacm" != "no"; then + AC_CHECK_HEADERS([rdma/rdma_cma.h], + [AC_CHECK_LIB([rdmacm], [rdma_create_id], + [AC_MSG_CHECKING([for rdma_get_peer_addr]) + $1_msg=no + AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include "rdma/rdma_cma.h" + ]], [[void *ret = (void*) rdma_get_peer_addr((struct rdma_cm_id*)0);]])], + [$1_have_rdmacm=1 + $1_msg=yes]) + AC_MSG_RESULT([$$1_msg])])]) + + if test "1" = "$$1_have_rdmacm"; then + $1_LIBS="-lrdmacm $$1_LIBS" + else + AS_IF([test "$enable_openib_rdmacm" = "yes"], + [AC_MSG_WARN([--enable-openib-rdmacm was specified but the]) + AC_MSG_WARN([appropriate files could not be found]) + AC_MSG_WARN([Please install librdmacm and librdmacm-devel or disable rdmacm support]) + AC_MSG_ERROR([Cannot continue])]) + fi + fi + + # is udcm enabled + if test "$enable_openib_udcm" = "yes"; then + $1_have_udcm=1 + fi + ]) + + AC_MSG_CHECKING([if UD CM is enabled]) + AC_DEFINE_UNQUOTED([OMPI_HAVE_UDCM], [$$1_have_udcm], + [Whether UD CM is available or not]) + if test "1" = "$$1_have_udcm"; then + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + fi + + AC_MSG_CHECKING([if OpenFabrics RDMACM support is enabled]) + AC_DEFINE_UNQUOTED([OMPI_HAVE_RDMACM], [$$1_have_rdmacm], + [Whether RDMA CM is available or not]) + if test "1" = "$$1_have_rdmacm"; then + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + fi +])dnl + +AC_DEFUN([OMPI_CHECK_MLNX_OPENFABRICS],[ + $1_have_mverbs=0 + $1_have_mqe=0 + + AS_IF([test "$ompi_check_openib_happy" = "yes"], + [OMPI_CHECK_PACKAGE([$1], + [infiniband/mverbs.h], + [mverbs], + [ibv_m_query_device], + ["$$1_LIBS"], + [$opal_verbs_dir], + [$opal_verbs_libdir], + [$1_have_mverbs=1], + [])]) + + AS_IF([test "$ompi_check_openib_happy" = "yes"], + [OMPI_CHECK_PACKAGE([$1], + [infiniband/mqe.h], + [mqe], + [mqe_context_create], + ["$$1_LIBS"], + [$opal_verbs_dir], + [$opal_verbs_libdir], + [$1_have_mqe=1], + [])]) + + AC_MSG_CHECKING([if Mellanox OpenFabrics VERBS is enabled]) + AC_DEFINE_UNQUOTED([OMPI_HAVE_MVERBS], [$$1_have_mverbs], + [Whether MVERBS is available or not]) + AS_IF([test "1" = "$$1_have_mverbs"], + [AC_MSG_RESULT([yes])], + [AC_MSG_RESULT([no])]) + + # save the CPPFLAGS since we would have to update it for next test + ompi_check_mellanox_openfabrics_$1_save_CPPFLAGS="$CPPFLAGS" + + # If openfabrics custom directory have been defined, we have + # to use it for MACRO test that uses mverbs.h file. + # + if test ! -z "$ompi_check_verbs_dir" ; then + CPPFLAGS="-I${opal_verbs_dir}/include $CPPFLAGS" + fi + + AS_IF([test "1" = "$$1_have_mverbs"], + [AC_CHECK_DECLS([IBV_M_WR_CALC_RDMA_WRITE_WITH_IMM], + [AC_DEFINE_UNQUOTED([OMPI_HAVE_IBOFFLOAD_CALC_RDMA], [1], + [Whether IBV_M_WR_CALC_SEND is defined or not])], + [AC_DEFINE_UNQUOTED([OMPI_HAVE_IBOFFLOAD_CALC_RDMA], [0], + [Whether IBV_M_WR_CALC_SEND is defined or not])], + [#include ])]) + + # restoring the CPPFLAGS + CPPFLAGS="$ompi_check_mellanox_openfabrics_$1_save_CPPFLAGS" + + AC_MSG_CHECKING([if Mellanox OpenFabrics MQE is enabled]) + AC_DEFINE_UNQUOTED([OMPI_HAVE_MQE], [$$1_have_mqe], + [Whether MQE is available or not]) + AS_IF([test "1" = "$$1_have_mqe"], + [AC_MSG_RESULT([yes])], + [AC_MSG_RESULT([no])]) + + AS_IF([test "1" = "$$1_have_mverbs" -a "1" = $$1_have_mqe], + [$2], [$3]) +])dnl diff --git a/ompi/mca/btl/ofud/configure.m4 b/ompi/mca/btl/ofud/configure.m4 index 3d59755e77..5765f1e7a6 100644 --- a/ompi/mca/btl/ofud/configure.m4 +++ b/ompi/mca/btl/ofud/configure.m4 @@ -27,7 +27,7 @@ AC_DEFUN([MCA_ompi_btl_ofud_CONFIG],[ AC_CONFIG_FILES([ompi/mca/btl/ofud/Makefile]) - OMPI_CHECK_OPENIB([btl_ofud], + OMPI_CHECK_OPENFABRICS([btl_ofud], [btl_ofud_happy="yes"], [btl_ofud_happy="no"]) diff --git a/ompi/mca/btl/openib/Makefile.am b/ompi/mca/btl/openib/Makefile.am index 2c6e0473cb..654b855ba2 100644 --- a/ompi/mca/btl/openib/Makefile.am +++ b/ompi/mca/btl/openib/Makefile.am @@ -13,6 +13,7 @@ # Copyright (c) 2010 Oracle and/or its affiliates. All rights reserved. # Copyright (c) 2011 NVIDIA Corporation. All rights reserved. # Copyright (c) 2011 Mellanox Technologies. All rights reserved. +# Copyright (c) 2012 Oak Ridge National Laboratory. All rights reserved # $COPYRIGHT$ # # Additional copyrights may follow @@ -126,7 +127,8 @@ mcacomponentdir = $(pkglibdir) mcacomponent_LTLIBRARIES = $(component) mca_btl_openib_la_SOURCES = $(component_sources) mca_btl_openib_la_LDFLAGS = -module -avoid-version $(btl_openib_LDFLAGS) -mca_btl_openib_la_LIBADD = $(btl_openib_LIBS) +mca_btl_openib_la_LIBADD = $(btl_openib_LIBS) \ + $(top_ompi_builddir)/ompi/mca/common/ofautils/libmca_common_ofautils.la if MCA_ompi_cuda_support mca_btl_openib_la_LIBADD += \ $(top_ompi_builddir)/ompi/mca/common/cuda/libmca_common_cuda.la diff --git a/ompi/mca/btl/openib/btl_openib_component.c b/ompi/mca/btl/openib/btl_openib_component.c index 495d84c2c6..e4d172607c 100644 --- a/ompi/mca/btl/openib/btl_openib_component.c +++ b/ompi/mca/btl/openib/btl_openib_component.c @@ -17,6 +17,7 @@ * Copyright (c) 2006-2007 Voltaire All rights reserved. * Copyright (c) 2009-2012 Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011 NVIDIA Corporation. All rights reserved. + * Copyright (c) 2012 Oak Ridge National Laboratory. All rights reserved * $COPYRIGHT$ * * Additional copyrights may follow @@ -60,6 +61,7 @@ const char *ibv_get_sysfs_path(void); #include "opal/util/argv.h" #include "opal/memoryhooks/memory.h" #include "opal/mca/base/mca_base_param.h" +#include "ompi/mca/common/ofautils/common_ofautils.h" /* Define this before including hwloc.h so that we also get the hwloc verbs helper header file, too. We have to do this level of indirection because the hwloc subsystem is a component -- we don't @@ -2341,53 +2343,6 @@ static int finish_btl_init(mca_btl_openib_module_t *openib_btl) return OMPI_SUCCESS; } -static struct ibv_device **ibv_get_device_list_compat(int *num_devs) -{ - struct ibv_device **ib_devs; - -#ifdef HAVE_IBV_GET_DEVICE_LIST - ib_devs = ibv_get_device_list(num_devs); -#else - struct dlist *dev_list; - struct ibv_device *ib_dev; - *num_devs = 0; - - /* Determine the number of device's available on the host */ - dev_list = ibv_get_devices(); - if (NULL == dev_list) - return NULL; - - dlist_start(dev_list); - - dlist_for_each_data(dev_list, ib_dev, struct ibv_device) - (*num_devs)++; - - /* Allocate space for the ib devices */ - ib_devs = (struct ibv_device**)malloc(*num_devs * sizeof(struct ibv_dev*)); - if(NULL == ib_devs) { - *num_devs = 0; - BTL_ERROR(("Failed malloc: %s:%d", __FILE__, __LINE__)); - return NULL; - } - - dlist_start(dev_list); - - dlist_for_each_data(dev_list, ib_dev, struct ibv_device) - *(++ib_devs) = ib_dev; -#endif - - return ib_devs; -} - -static void ibv_free_device_list_compat(struct ibv_device **ib_devs) -{ -#ifdef HAVE_IBV_GET_DEVICE_LIST - ibv_free_device_list(ib_devs); -#else - free(ib_devs); -#endif -} - struct dev_distance { struct ibv_device *ib_dev; int distance; diff --git a/ompi/mca/btl/openib/configure.m4 b/ompi/mca/btl/openib/configure.m4 index 2e439d0ec3..e3670219b0 100644 --- a/ompi/mca/btl/openib/configure.m4 +++ b/ompi/mca/btl/openib/configure.m4 @@ -39,8 +39,9 @@ AC_DEFUN([MCA_ompi_btl_openib_CONFIG],[ OPAL_VAR_SCOPE_PUSH([cpcs have_threads]) cpcs="oob" - OMPI_CHECK_OPENIB([btl_openib], - [btl_openib_happy="yes"], + OMPI_CHECK_OPENFABRICS([btl_openib], + [btl_openib_happy="yes"] + OMPI_CHECK_OPENFABRICS_CM([btl_openib]), [btl_openib_happy="no"]) AS_IF([test "$btl_openib_happy" = "yes"], diff --git a/ompi/mca/common/ofacm/Makefile.am b/ompi/mca/common/ofacm/Makefile.am new file mode 100644 index 0000000000..4f6a9213cc --- /dev/null +++ b/ompi/mca/common/ofacm/Makefile.am @@ -0,0 +1,76 @@ +# +# Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. +# Copyright (c) 2009 Cisco Systems, Inc. All rights reserved. +# Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. +# $COPYRIGHT$ +# +# Additional copyrights may follow +# +# $HEADER$ +# + +AM_CPPFLAGS = $(common_ofacm_CPPFLAGS) + +dist_pkgdata_DATA = \ + help-mpi-common-ofacm-base.txt \ + help-mpi-common-ofacm-oob.txt +headers = \ + base.h \ + common_ofacm_oob.h \ + common_ofacm_empty.h \ + connect.h + +sources = \ + common_ofacm_base.c \ + common_ofacm_oob.c \ + common_ofacm_empty.c + +# If we have XRC support, build XOOB connection module +if MCA_common_ofacm_have_xrc +sources += \ + common_ofacm_xoob.c \ + common_ofacm_xoob.h +endif + +# See ompi/mca/common/sm/Makefile.am for an explanation of the +# LTLIBRARIES values listed below. + +lib_LTLIBRARIES = +noinst_LTLIBRARIES = +comp_inst = libmca_common_ofacm.la +comp_noinst = libmca_common_ofacm_noinst.la + +if MCA_BUILD_ompi_common_ofacm_DSO +lib_LTLIBRARIES += $(comp_inst) +else +noinst_LTLIBRARIES += $(comp_noinst) +endif + +libmca_common_ofacm_la_SOURCES = $(headers) $(sources) +libmca_common_ofacm_la_CPPFLAGS = $(common_ofacm_CPPFLAGS) +libmca_common_ofacm_la_LDFLAGS = $(common_ofacm_LDFLAGS) +libmca_common_ofacm_la_LIBADD = $(common_ofacm_LIBS) +libmca_common_ofacm_noinst_la_SOURCES = $(libmca_common_ofacm_la_SOURCES) + +# Conditionally install the header files + +if WANT_INSTALL_HEADERS +ompidir = $(includedir)/openmpi/ompi/mca/common/ofacm +ompi_HEADERS = $(headers) +else +ompidir = $(includedir) +endif + +# See ompi/mca/common/sm/Makefile.am for an explanation of the +# the *-local rules, below. + +all-local: + if test -z "$(lib_LTLIBRARIES)"; then \ + rm -f "$(comp_inst)"; \ + $(LN_S) "$(comp_noinst)" "$(comp_inst)"; \ + fi + +clean-local: + if test -z "$(lib_LTLIBRARIES)"; then \ + rm -f "$(comp_inst)"; \ + fi diff --git a/ompi/mca/common/ofacm/base.h b/ompi/mca/common/ofacm/base.h new file mode 100644 index 0000000000..5b69076c03 --- /dev/null +++ b/ompi/mca/common/ofacm/base.h @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2007-2008 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. + * + * Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. + * $COPYRIGHT$ + * + * Additional copyrights may follow + * + * $HEADER$ + */ + +#ifndef OMPI_COMMON_OFACM_BASE_H +#define OMPI_COMMON_OFACM_BASE_H +#include "ompi_config.h" + +#include +#include + +#include "orte/runtime/orte_globals.h" +#include "orte/util/proc_info.h" +#include "orte/util/name_fns.h" +#include "connect.h" + +BEGIN_C_DECLS + +#define HAVE_XRC (1 == OMPI_HAVE_CONNECTX_XRC) + +extern int ompi_common_ofacm_base_output; +extern int ompi_common_ofacm_base_verbose; /* disabled by default */ +/* File for sl data produced only for a 3D-Torus Cluster */ +extern char* ompi_common_ofacm_three_dim_torus; + +static inline int ompi_common_ofacm_base_err(const char* fmt, ...) +{ + va_list list; + int ret; + + va_start(list, fmt); + ret = vfprintf(stderr, fmt, list); + va_end(list); + return ret; +} + +#define OFACM_ERROR(args) \ + do { \ + ompi_common_ofacm_base_err("[%s]%s[%s:%d:%s] ", \ + orte_process_info.nodename, \ + ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \ + __FILE__, __LINE__, __func__); \ + ompi_common_ofacm_base_err args; \ + ompi_common_ofacm_base_err("\n"); \ + } while(0); + +#if OPAL_ENABLE_DEBUG +#define OFACM_VERBOSE(args) \ + do { \ + if(ompi_common_ofacm_base_verbose > 0) { \ + ompi_common_ofacm_base_err("[%s]%s[%s:%d:%s] ", \ + orte_process_info.nodename, \ + ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), \ + __FILE__, __LINE__, __func__); \ + ompi_common_ofacm_base_err args; \ + ompi_common_ofacm_base_err("\n"); \ + } \ + } while(0); +#else +#define OFACM_VERBOSE(args) +#endif + +/* + * PUBLIC functions + * **************** + */ + +/* + * Open function + */ +OMPI_DECLSPEC int ompi_common_ofacm_base_register(mca_base_component_t *base); + +/* + * Query CPCs to see if they want to run on a specific port. + * Input: + * port - port information + * Output: + * cpcs - list of availible cpcs + * num_cpcs - number of cpcs + */ +OMPI_DECLSPEC int ompi_common_ofacm_base_select_for_local_port + (ompi_common_ofacm_base_dev_desc_t *dev, + ompi_common_ofacm_base_module_t ***cpcs, int *num_cpcs); + +/* + * Select function + * Input: + * local_cpcs - local cpc modules + * num_local_cpcs - number of local cpc modules + * remote_cpc_data - cpc information from remote peer + * remote_cpc_data_count - num of remote information from remote peer + * Output: + * ret_local_cpc - matched cpc module + * ret_remote_cpc_data - matched remote cpc data + */ +OMPI_DECLSPEC int ompi_common_ofacm_base_find_match + (ompi_common_ofacm_base_module_t **local_cpcs, int num_local_cpcs, + ompi_common_ofacm_base_module_data_t *remote_cpc_data, int remote_cpc_data_count, + ompi_common_ofacm_base_module_t **ret_local_cpc, + ompi_common_ofacm_base_module_data_t **ret_remote_cpc_data); + +/* + * Find a CPC's index so that we can send it in the modex + */ +OMPI_DECLSPEC int ompi_common_ofacm_base_get_cpc_index + (ompi_common_ofacm_base_component_t *cpc); + +/* + * Start a new connection to an endpoint + */ +OMPI_DECLSPEC int ompi_common_ofacm_base_start_connect + (struct ompi_common_ofacm_base_local_connection_context_t *context); + +/* + * Component-wide CPC finalize + */ +OMPI_DECLSPEC void ompi_common_ofacm_base_finalize(void); + +/* + * Component-wide CPC init + */ +OMPI_DECLSPEC int ompi_common_ofacm_base_init(void); + +/* + * Lookup a CPC by its index (received from the modex) + */ +OMPI_DECLSPEC ompi_common_ofacm_base_component_t * + ompi_common_ofacm_base_get_cpc_byindex(uint8_t index); + +/* + * PRIVATE functions (called only by cpcs) + * *************************************** + */ + +/* + * Proc initialization function + */ +void ompi_common_ofacm_base_proc_setup + (ompi_common_ofacm_base_proc_t *proc, + ompi_common_ofacm_base_local_connection_context_t *context, + ompi_proc_t *proc_ompi); +/* + * Context initialization function + */ +int ompi_common_ofacm_base_context_init + (ompi_common_ofacm_base_local_connection_context_t *context, + ompi_common_ofacm_base_module_t *cpc, + ompi_common_ofacm_base_context_connect_cb_fn_t connect_cb, + ompi_common_ofacm_base_context_error_cb_fn_t error_cb, + ompi_common_ofacm_base_context_prepare_recv_cb_fn_t prepare_recv_cb, + ompi_common_ofacm_base_proc_t *proc, + ompi_common_ofacm_base_qp_config_t *qp_config, + struct ibv_pd *pd, uint64_t subnet_id, int cpc_type, + uint16_t lid, uint16_t rem_lid, + int32_t user_context_index, void *user_context); + +/* + * Remote context initialization. + * Returns operation status + */ +int ompi_common_ofacm_base_remote_context_init + (ompi_common_ofacm_base_remote_connection_context_t *context, + int num_qps, int num_srqs); + +/* Find OFACM proc on specific component */ +ompi_common_ofacm_base_proc_t* ompi_common_ofacm_base_find_proc + (ompi_common_ofacm_base_component_t *component, ompi_proc_t *proc); + +#if 0 +/* + * Allocate a CTS frag + */ +int ompi_common_ofacm_base_alloc_cts( + struct mca_btl_base_endpoint_t *endpoint); + +/* + * Free a CTS frag + */ +int ompi_common_ofacm_base_free_cts( + struct mca_btl_base_endpoint_t *endpoint); +#endif + +END_C_DECLS + +#endif diff --git a/ompi/mca/common/ofacm/common_ofacm_base.c b/ompi/mca/common/ofacm/common_ofacm_base.c new file mode 100644 index 0000000000..5c58338fc0 --- /dev/null +++ b/ompi/mca/common/ofacm/common_ofacm_base.c @@ -0,0 +1,678 @@ +/* + * Copyright (c) 2007-2009 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2007-2012 Mellanox Technologies. All rights reserved. + * + * Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. + * $COPYRIGHT$ + * + * Additional copyrights may follow + * + * $HEADER$ + */ +#include "ompi_config.h" + +#include +#include "base.h" +#include "common_ofacm_oob.h" +#include "common_ofacm_empty.h" +#if HAVE_XRC +#include "common_ofacm_xoob.h" +#endif + +#include "ompi/constants.h" +#include "orte/util/show_help.h" +#include "opal/class/opal_list.h" +#include "opal/util/argv.h" +#include "opal/util/output.h" +#include "infiniband/verbs.h" + +/* Global variables */ +static ompi_common_ofacm_base_component_t **available = NULL; +static int num_available = 0; +int ompi_common_ofacm_base_verbose = 0; /* disabled by default */ +char* ompi_common_ofacm_three_dim_torus = NULL; +bool cpc_explicitly_defined = false; +int ompi_common_ofacm_base_output = 1; +bool ompi_common_ofacm_base_register_was_called = false; +bool ompi_common_ofacm_base_init_was_called = false; +/* + * Array of all possible connection functions + */ +static ompi_common_ofacm_base_component_t *all[] = { + &ompi_common_ofacm_oob, + + /* Always have an entry here so that the CP indexes will always be + the same: if XRC is not available, use the "empty" CPC */ +#if HAVE_XRC + &ompi_common_ofacm_xoob, +#else + &ompi_common_ofacm_empty, +#endif + NULL +}; + +static void ofacm_base_proc_contructor (ompi_common_ofacm_base_proc_t *proc) +{ + proc->proc_ompi = NULL; + OBJ_CONSTRUCT(&proc->all_contexts, opal_list_t); +} + +static void ofacm_base_proc_destructor (ompi_common_ofacm_base_proc_t *proc) +{ + OBJ_DESTRUCT(&proc->all_contexts); +} + +void ompi_common_ofacm_base_proc_setup(ompi_common_ofacm_base_proc_t *proc, + ompi_common_ofacm_base_local_connection_context_t *context, + ompi_proc_t *proc_ompi) +{ + if (NULL == proc->proc_ompi) { + /* first init for the proc, lets set ompi proc */ + proc->proc_ompi = proc_ompi; + } + /* put the context on the proc list */ + opal_list_append(&proc->all_contexts, (opal_list_item_t *)context); +} + +OBJ_CLASS_INSTANCE(ompi_common_ofacm_base_proc_t, + opal_list_item_t, + ofacm_base_proc_contructor, + ofacm_base_proc_destructor); + +/* Constructors / Destructors */ +static void base_local_context_constructor + (ompi_common_ofacm_base_local_connection_context_t *context) +{ + context->proc = NULL; + context->state = MCA_COMMON_OFACM_CLOSED; + context->subnet_id = 0; + context->lid = 0; + context->num_of_qps = 0; + context->init_attr = NULL; + context->attr = NULL; + context->ib_pd = NULL; + context->qps = NULL; + context->user_context = NULL; + context->initiator = 0; + context->index = 0; + context->xrc_recv_qp_num = 0; + /* remote info we will allocate and fill during qp creation */ + memset(&context->remote_info, 0, sizeof(context->remote_info)); + OBJ_CONSTRUCT(&context->context_lock, opal_mutex_t); +} + +static void base_local_context_destructor + (ompi_common_ofacm_base_local_connection_context_t *context) +{ + /* Release remote data arrays */ + if (NULL != context->remote_info.rem_qps) { + free(context->remote_info.rem_qps); + } + if (NULL != context->remote_info.rem_srqs) { + free(context->remote_info.rem_srqs); + } + OBJ_DESTRUCT(&context->context_lock); +} + +OBJ_CLASS_INSTANCE(ompi_common_ofacm_base_local_connection_context_t, + opal_list_item_t, + base_local_context_constructor, + base_local_context_destructor); + +int ompi_common_ofacm_base_context_init(ompi_common_ofacm_base_local_connection_context_t *context, + ompi_common_ofacm_base_module_t *cpc, + ompi_common_ofacm_base_context_connect_cb_fn_t connect_cb, + ompi_common_ofacm_base_context_error_cb_fn_t error_cb, + ompi_common_ofacm_base_context_prepare_recv_cb_fn_t prepare_recv_cb, + ompi_common_ofacm_base_proc_t *proc, + ompi_common_ofacm_base_qp_config_t *qp_config, + struct ibv_pd *pd, uint64_t subnet_id, int cpc_type, + uint16_t lid, uint16_t rem_lid, + int32_t user_context_index, void *user_context) +{ + context->proc = proc; + context->cpc = cpc; + context->subnet_id = subnet_id; + context->cpc_type = cpc_type; + context->lid = lid; + context->rem_lid = rem_lid; + context->num_of_qps = qp_config->num_qps; + /* If upper layer defines the QPs we do not want to overwrite it */ + if (NULL == context->qps) { + context->qps = calloc(context->num_of_qps, sizeof(ompi_common_ofacm_base_qp_t)); + if(NULL == context->qps) { + OFACM_ERROR(("Failed to allocate memory for qps")); + return OMPI_ERR_OUT_OF_RESOURCE; + } + } + + context->num_of_srqs = qp_config->num_srqs; + context->srq_num = qp_config->srq_num; + context->init_attr = qp_config->init_attr; + context->attr = qp_config->attr; + context->custom_init_attr_mask = qp_config->init_attr_mask; + context->custom_rtr_attr_mask = qp_config->rtr_attr_mask; + context->custom_rts_attr_mask = qp_config->rts_attr_mask; + context->ib_pd = pd; + context->connect_cb = connect_cb; + context->error_cb = error_cb; + context->prepare_recv_cb = prepare_recv_cb ; + context->index = user_context_index; + context->user_context = user_context; + return OMPI_SUCCESS; +} + +int ompi_common_ofacm_base_remote_context_init(ompi_common_ofacm_base_remote_connection_context_t *context, + int num_qps, int num_srqs) +{ + context->rem_qps = (ompi_common_ofacm_base_rem_qp_info_t *) + calloc(num_qps, sizeof(ompi_common_ofacm_base_rem_qp_info_t)); + if (NULL == context->rem_qps) { + return OMPI_ERROR; + } + + context->rem_srqs = (ompi_common_ofacm_base_rem_srq_info_t *) + calloc(num_qps, sizeof(ompi_common_ofacm_base_rem_srq_info_t)); + if (NULL == context->rem_srqs) { + return OMPI_ERROR; + } + + return OMPI_SUCCESS; +} + +ompi_common_ofacm_base_proc_t* ompi_common_ofacm_base_find_proc + (ompi_common_ofacm_base_component_t *component, ompi_proc_t *proc) +{ + ompi_common_ofacm_base_proc_t *ret = NULL; + opal_list_item_t *item; + opal_list_t *list = &component->all_procs; + + for (item = opal_list_get_first(list); + item != opal_list_get_end(list); + item = opal_list_get_next(item)) { + if (proc == ((ompi_common_ofacm_base_proc_t *)item)->proc_ompi){ + ret = (ompi_common_ofacm_base_proc_t *)item; + } + } + return ret; +} +/* + * Register MCA parameters + */ +int ompi_common_ofacm_base_register(mca_base_component_t *base) +{ + int i, j, save; + char **temp = NULL, *string = NULL, *all_cpc_names = NULL; + char *cpc_include = NULL, *cpc_exclude = NULL; + + if (ompi_common_ofacm_base_register_was_called) { + return OMPI_SUCCESS; + } + + ompi_common_ofacm_base_register_was_called = true; + + /* Make an MCA parameter to select which connect module to use */ + for (i = 0; NULL != all[i]; ++i) { + /* The CPC name "empty" is reserved for "fake" CPC modules */ + if (0 != strcmp(all[i]->cbc_name, "empty")) { + opal_argv_append_nosize(&temp, all[i]->cbc_name); + } + } + all_cpc_names = opal_argv_join(temp, ','); + opal_argv_free(temp); + asprintf(&string, + "Method used to select OpenFabrics connections (valid values: %s)", + all_cpc_names); + + mca_base_param_reg_string(base, "ofacm_cpc_include", string, false, false, + NULL, &cpc_include); + free(string); + + asprintf(&string, + "Method used to exclude OpenFabrics connections (valid values: %s)", + all_cpc_names); + + mca_base_param_reg_string(base, "ofacm_cpc_exclude", string, false, false, + NULL, &cpc_exclude); + free(string); + + /* Register the name of the file containing the fabric's Service Levels (SL) */ + mca_base_param_reg_string_name("common", "ofacm_three_dim_torus", + "The name of the file contating Service Level (SL) data for 3D-Torus cluster", + false, false, NULL, &ompi_common_ofacm_three_dim_torus); + + + mca_base_param_reg_int_name("common", + "ofacm_base_verbose", + "Verbosity level of the OFACM framework", + false, false, + 0, + &ompi_common_ofacm_base_verbose); + + + /* Parse the if_[in|ex]clude paramters to come up with a list of + CPCs that are available */ + available = calloc(1, sizeof(all)); + + /* If we have an "include" list, then find all those CPCs and put + them in available[] */ + if (NULL != cpc_include) { + cpc_explicitly_defined = true; + temp = opal_argv_split(cpc_include, ','); + for (save = j = 0; NULL != temp[j]; ++j) { + for (i = 0; NULL != all[i]; ++i) { + if (0 == strcmp(temp[j], all[i]->cbc_name)) { + OFACM_VERBOSE(("include: saving %s", all[i]->cbc_name)); + available[save++] = all[i]; + ++num_available; + break; + } + } + if (NULL == all[i]) { + orte_show_help("help-mpi-common-ofacm-cpc-base.txt", + "cpc name not found", true, + "include", orte_process_info.nodename, + "include", cpc_include, temp[j], + all_cpc_names); + opal_argv_free(temp); + free(all_cpc_names); + return OMPI_ERR_NOT_FOUND; + } + } + opal_argv_free(temp); + } + + /* Otherwise, if we have an "exclude" list, take all the CPCs that + are not in that list and put them in available[] */ + else if (NULL != cpc_exclude) { + cpc_explicitly_defined = true; + temp = opal_argv_split(cpc_exclude, ','); + /* First: error check -- ensure that all the names are valid */ + for (j = 0; NULL != temp[j]; ++j) { + for (i = 0; NULL != all[i]; ++i) { + if (0 == strcmp(temp[j], all[i]->cbc_name)) { + break; + } + } + if (NULL == all[i]) { + orte_show_help("help-mpi-common-ofacm-cpc-base.txt", + "cpc name not found", true, + "exclude", orte_process_info.nodename, + "exclude", cpc_exclude, temp[j], + all_cpc_names); + opal_argv_free(temp); + free(all_cpc_names); + return OMPI_ERR_NOT_FOUND; + } + } + + /* Now do the exclude */ + for (save = i = 0; NULL != all[i]; ++i) { + for (j = 0; NULL != temp[j]; ++j) { + if (0 == strcmp(temp[j], all[i]->cbc_name)) { + break; + } + } + if (NULL == temp[j]) { + OFACM_VERBOSE(("exclude: saving %s", all[i]->cbc_name)); + available[save++] = all[i]; + ++num_available; + } + } + opal_argv_free(temp); + } + + /* If there's no include/exclude list, copy all[] into available[] */ + else { + OFACM_VERBOSE(("no include or exclude: saving all")); + memcpy(available, all, sizeof(all)); + num_available = (sizeof(all) / + sizeof(ompi_common_ofacm_base_module_t *)) - 1; + } + + /* Call the register function on all the CPCs so that they may + setup any MCA params specific to the connection type */ + for (i = 0; NULL != available[i]; ++i) { + if (NULL != available[i]->cbc_register) { + available[i]->cbc_register(); + } + } + + return OMPI_SUCCESS; +} + +/* + * Called once during openib BTL component initialization to allow CPC + * components to initialize. + */ +int ompi_common_ofacm_base_init(void) +{ + int i, rc; + + if (ompi_common_ofacm_base_init_was_called) { + return OMPI_SUCCESS; + } + + ompi_common_ofacm_base_init_was_called = true; + + /* Call each available CPC component's open function, if it has + one. If the CPC component open function returns OMPI_SUCCESS, + keep it. If it returns ERR_NOT_SUPPORTED, remove it from the + available[] array. If it returns something else, return that + error upward. */ + for (i = num_available = 0; NULL != available[i]; ++i) { + if (NULL == available[i]->cbc_init) { + available[num_available++] = available[i]; + OFACM_VERBOSE(("found available cpc (NULL init): %s", + all[i]->cbc_name)); + continue; + } + + rc = available[i]->cbc_init(); + if (OMPI_SUCCESS == rc) { + available[num_available++] = available[i]; + OFACM_VERBOSE(("found available cpc (SUCCESS init): %s", + all[i]->cbc_name)); + continue; + } else if (OMPI_ERR_NOT_SUPPORTED == rc) { + continue; + } else { + return rc; + } + } + available[num_available] = NULL; + + return (num_available > 0) ? OMPI_SUCCESS : OMPI_ERR_NOT_AVAILABLE; +} + + +/* + * Find all the CPCs that are eligible for a single local port (i.e., + * openib module). + */ +int ompi_common_ofacm_base_select_for_local_port(ompi_common_ofacm_base_dev_desc_t *dev, + ompi_common_ofacm_base_module_t ***cpcs, int *num_cpcs) +{ + char *msg = NULL; + int i, rc, cpc_index, len; + ompi_common_ofacm_base_module_t **tmp_cpcs; + + tmp_cpcs = calloc(num_available, + sizeof(ompi_common_ofacm_base_module_t *)); + if (NULL == tmp_cpcs) { + return OMPI_ERR_OUT_OF_RESOURCE; + } + + /* Go through all available CPCs and query them to see if they + want to run on this module. If they do, save them to a running + array. */ + for (len = 1, i = 0; NULL != available[i]; ++i) { + len += strlen(available[i]->cbc_name) + 2; + } + msg = malloc(len); + if (NULL == msg) { + return OMPI_ERR_OUT_OF_RESOURCE; + } + msg[0] = '\0'; + for (cpc_index = i = 0; NULL != available[i]; ++i) { + if (i > 0) { + strcat(msg, ", "); + } + strcat(msg, available[i]->cbc_name); + + rc = available[i]->cbc_query(dev, &tmp_cpcs[cpc_index]); + if (OMPI_ERR_NOT_SUPPORTED == rc || OMPI_ERR_UNREACH == rc) { + continue; + } else if (OMPI_SUCCESS != rc) { + free(tmp_cpcs); + free(msg); + return rc; + } + OFACM_VERBOSE(("match cpc for local port: %s", + available[i]->cbc_name)); + + /* If the CPC wants to use the CTS protocol, check to ensure + that QP 0 is PP; if it's not, we can't use this CPC (or the + CTS protocol) */ + /* Pasha: Wrong place to check qp type, should be moved to CMs + if (cpcs[cpc_index]->cbm_uses_cts && + !BTL_OPENIB_QP_TYPE_PP(0)) { + OFACM_VERBOSE(("this CPC only supports when the first btl_openib_receive_queues QP is a PP QP")); + continue; + } + */ + + /* This CPC has indicated that it wants to run on this openib + BTL module. Woo hoo! */ + ++cpc_index; + } + + /* If we got an empty array, then no CPCs were eligible. Doh! */ + if (0 == cpc_index) { + orte_show_help("help-mpi-common-ofacm-cpc-base.txt", + "no cpcs for port", true, + orte_process_info.nodename, + ibv_get_device_name(dev->ib_dev), + msg); + free(tmp_cpcs); + free(msg); + return OMPI_ERR_NOT_SUPPORTED; + } + free(msg); + + /* We got at least one eligible CPC; save the array into the + module's port_info */ + *num_cpcs = cpc_index; + *cpcs = tmp_cpcs; + + return OMPI_SUCCESS; +} + +/* + * This function is invoked when determining whether we have a CPC in + * common with a specific remote port. We already know that the + * subnet ID is the same between a specific local port and the target + * remote port; now we need to know if we can find a CPC in common + * between the two. + * + * If yes, be sure to find the *same* CPC on both sides. We know + * which CPCs are available on each side, and we know the priorities + * that were assigned on both sides. So find a CPC that is common to + * both sides and has the highest overall priority (between both + * sides). + * + * Return the matching CPC, or NULL if not found. + */ +int +ompi_common_ofacm_base_find_match(ompi_common_ofacm_base_module_t **local_cpcs, int num_local_cpcs, + ompi_common_ofacm_base_module_data_t *remote_cpc_data, int remote_cpc_data_count, + ompi_common_ofacm_base_module_t **ret_local_cpc, + ompi_common_ofacm_base_module_data_t **ret_remote_cpc_data) +{ + int i, j, max = -1; + ompi_common_ofacm_base_module_t *local_cpc, *local_selected = NULL; + ompi_common_ofacm_base_module_data_t *local_cpcd, *remote_cpcd, + *remote_selected = NULL; + + /* Iterate over all the CPCs on the local module */ + for (i = 0; i < num_local_cpcs; ++i) { + local_cpc = local_cpcs[i]; + local_cpcd = &(local_cpc->data); + + /* Iterate over all the CPCs on the remote port */ + for (j = 0; j < remote_cpc_data_count; ++j) { + remote_cpcd = &(remote_cpc_data[j]); + + /* Are the components the same? */ + if (local_cpcd->cbm_component == remote_cpcd->cbm_component) { + /* If so, update the max priority found so far */ + if (max < local_cpcd->cbm_priority) { + max = local_cpcd->cbm_priority; + local_selected = local_cpc; + remote_selected = remote_cpcd; + } + if (max < remote_cpcd->cbm_priority) { + max = remote_cpcd->cbm_priority; + local_selected = local_cpc; + remote_selected = remote_cpcd; + } + } + } + } + + /* All done! */ + if (NULL != local_selected) { + *ret_local_cpc = local_selected; + *ret_remote_cpc_data = remote_selected; + OFACM_VERBOSE(("find_match: found match!")); + return OMPI_SUCCESS; + } else { + OFACM_VERBOSE(("find_match: did NOT find match!")); + return OMPI_ERR_NOT_FOUND; + } +} + +/* + * Lookup a CPC component's index in the all[] array so that we can + * send it int the modex + */ +int ompi_common_ofacm_base_get_cpc_index(ompi_common_ofacm_base_component_t *cpc) +{ + int i; + for (i = 0; NULL != all[i]; ++i) { + if (all[i] == cpc) { + return i; + } + } + + /* Not found */ + return -1; +} + +/* + * Lookup a CPC by its index (received from the modex) + */ +ompi_common_ofacm_base_component_t * +ompi_common_ofacm_base_get_cpc_byindex(uint8_t index) +{ + return (index >= (sizeof(all) / + sizeof(ompi_common_ofacm_base_module_t *))) ? + NULL : all[index]; +} + +/* + * This function we never call from BTL - so it is no reason to expose it + * in base. + */ +#if 0 +int ompi_common_ofacm_base_alloc_cts(mca_btl_base_endpoint_t *endpoint) +{ + ompi_free_list_item_t *fli; + int length = sizeof(mca_btl_openib_header_t) + + sizeof(mca_btl_openib_header_coalesced_t) + + sizeof(mca_btl_openib_control_header_t) + + sizeof(mca_btl_openib_footer_t) + + mca_btl_openib_component.qp_infos[mca_btl_openib_component.credits_qp].size; + + /* Explicitly don't use the mpool registration */ + fli = &(endpoint->endpoint_cts_frag.super.super.base.super); + fli->registration = NULL; + fli->ptr = malloc(length); + if (NULL == fli->ptr) { + BTL_ERROR(("malloc failed")); + return OMPI_ERR_OUT_OF_RESOURCE; + } + + endpoint->endpoint_cts_mr = + ibv_reg_mr(endpoint->endpoint_btl->device->ib_pd, + fli->ptr, length, + IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE | + IBV_ACCESS_REMOTE_READ); + OPAL_OUTPUT((-1, "registered memory %p, length %d", fli->ptr, length)); + if (NULL == endpoint->endpoint_cts_mr) { + free(fli->ptr); + BTL_ERROR(("Failed to reg mr!")); + return OMPI_ERR_OUT_OF_RESOURCE; + } + + /* Copy the lkey where it needs to go */ + endpoint->endpoint_cts_frag.super.sg_entry.lkey = + endpoint->endpoint_cts_frag.super.super.segment.seg_key.key32[0] = + endpoint->endpoint_cts_mr->lkey; + endpoint->endpoint_cts_frag.super.sg_entry.length = length; + + /* Construct the rest of the recv_frag_t */ + OBJ_CONSTRUCT(&(endpoint->endpoint_cts_frag), mca_btl_openib_recv_frag_t); + endpoint->endpoint_cts_frag.super.super.base.order = + mca_btl_openib_component.credits_qp; + endpoint->endpoint_cts_frag.super.endpoint = endpoint; + OPAL_OUTPUT((-1, "Got a CTS frag for peer %s, addr %p, length %d, lkey %d", + endpoint->endpoint_proc->proc_ompi->proc_hostname, + (void*) endpoint->endpoint_cts_frag.super.sg_entry.addr, + endpoint->endpoint_cts_frag.super.sg_entry.length, + endpoint->endpoint_cts_frag.super.sg_entry.lkey)); + + return OMPI_SUCCESS; +} +#endif +/* This function is needed for CTS packet release on completion.. + * and it is bad idea...it is 2 possible solutions: + * - make the send operation blocking (simple and not optimal). + * - rdmacm should add own progress function (best but not trivial). + */ +#if 0 +int ompi_common_ofacm_base_free_cts(mca_btl_base_endpoint_t *endpoint) +{ + if (NULL != endpoint->endpoint_cts_mr) { + ibv_dereg_mr(endpoint->endpoint_cts_mr); + endpoint->endpoint_cts_mr = NULL; + } + if (NULL != endpoint->endpoint_cts_frag.super.super.base.super.ptr) { + free(endpoint->endpoint_cts_frag.super.super.base.super.ptr); + endpoint->endpoint_cts_frag.super.super.base.super.ptr = NULL; + OPAL_OUTPUT((-1, "Freeing CTS frag")); + } + + return OMPI_SUCCESS; +} +#endif + +/* + * Called to start a connection + */ +int ompi_common_ofacm_base_start_connect( + ompi_common_ofacm_base_local_connection_context_t *context) +{ +#if 0 + /* If the CPC uses the CTS protocol, provide a frag buffer for the + CPC to post. Must allocate these frags up here in the main + thread because the FREE_LIST_WAIT is not thread safe. */ + if (cpc->cbm_uses_cts) { + int rc; + rc = ompi_common_ofacm_base_alloc_cts(context); + if (OMPI_SUCCESS != rc) { + return rc; + } + } +#endif + + return context->cpc->cbm_start_connect(context); +} + +/* + * Called during openib btl component close + */ +void ompi_common_ofacm_base_finalize(void) +{ + int i; + + if (NULL != available) { + for (i = 0; NULL != available[i]; ++i) { + if (NULL != available[i]->cbc_finalize) { + available[i]->cbc_finalize(); + } + } + free(available); + available = NULL; + } +} diff --git a/ompi/mca/common/ofacm/common_ofacm_empty.c b/ompi/mca/common/ofacm/common_ofacm_empty.c new file mode 100644 index 0000000000..3f2eadff0c --- /dev/null +++ b/ompi/mca/common/ofacm/common_ofacm_empty.c @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2008 Cisco Systems, Inc. All rights reserved. + * + * Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. + * Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. + * $COPYRIGHT$ + * + * Additional copyrights may follow + * + * $HEADER$ + */ + +#include "ompi_config.h" + +#include "base.h" +#include "connect.h" +#include "ompi/constants.h" + +static void empty_component_register(void); +static int empty_component_init(void); +static int empty_component_query(ompi_common_ofacm_base_dev_desc_t *dev, + ompi_common_ofacm_base_module_t **cpc); + +ompi_common_ofacm_base_component_t ompi_common_ofacm_empty = { + "empty", + empty_component_register, + empty_component_init, + empty_component_query, + NULL +}; + +static void empty_component_register(void) +{ + /* Nothing to do */ +} + +static int empty_component_init(void) +{ + /* Never let this CPC run */ + return OMPI_ERR_NOT_SUPPORTED; +} + +static int empty_component_query(ompi_common_ofacm_base_dev_desc_t *dev, + ompi_common_ofacm_base_module_t **cpc) +{ + /* Never let this CPC run */ + return OMPI_ERR_NOT_SUPPORTED; +} diff --git a/ompi/mca/common/ofacm/common_ofacm_empty.h b/ompi/mca/common/ofacm/common_ofacm_empty.h new file mode 100644 index 0000000000..a6465a5232 --- /dev/null +++ b/ompi/mca/common/ofacm/common_ofacm_empty.h @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2007-2008 Cisco Systems, Inc. All rights reserved. + * + * Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. + * Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. + * $COPYRIGHT$ + * + * Additional copyrights may follow + * + * $HEADER$ + */ + +#ifndef BTL_OPENIB_CONNECT_EMPTY_H +#define BTL_OPENIB_CONNECT_EMPTY_H + +#include "ompi_config.h" + +#include "connect.h" + +extern ompi_common_ofacm_base_component_t ompi_common_ofacm_empty; + +#endif diff --git a/ompi/mca/common/ofacm/common_ofacm_oob.c b/ompi/mca/common/ofacm/common_ofacm_oob.c new file mode 100644 index 0000000000..7fc636f2ba --- /dev/null +++ b/ompi/mca/common/ofacm/common_ofacm_oob.c @@ -0,0 +1,1672 @@ +/* + * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana + * University Research and Technology + * Corporation. All rights reserved. + * Copyright (c) 2004-2005 The University of Tennessee and The University + * of Tennessee Research Foundation. All rights + * reserved. + * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, + * University of Stuttgart. All rights reserved. + * Copyright (c) 2004-2005 The Regents of the University of California. + * All rights reserved. + * Copyright (c) 2006-2008 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2006 Los Alamos National Security, LLC. All rights + * reserved. + * Copyright (c) 2008-2012 Mellanox Technologies. All rights reserved. + * + * Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. + * $COPYRIGHT$ + * + * Additional copyrights may follow + * + * $HEADER$ + */ + +#include "ompi_config.h" + +#include "opal/dss/dss.h" +#include "orte/util/show_help.h" +#include "opal/util/error.h" +#include "opal/util/output.h" +#include "orte/mca/rml/rml.h" +#include "orte/mca/rml/rml_types.h" +#include "orte/mca/errmgr/errmgr.h" +#include "orte/util/name_fns.h" +#include "orte/runtime/orte_globals.h" +#include "ompi/mca/dpm/dpm.h" +#include "connect.h" +#include "base.h" +#include "orte/util/show_help.h" +#include "opal/class/opal_hash_table.h" +#include "opal/class/opal_object.h" + +#include + +#define MAX_LINE_LEN 80 +#define NUM_OF_TOKENS 7 + + +typedef enum { + ENDPOINT_CONNECT_REQUEST, + ENDPOINT_CONNECT_RESPONSE, + ENDPOINT_CONNECT_ACK +} connect_message_type_t; + +typedef struct port_to_switch_lids{ + uint16_t port_lid; + uint16_t switch_lid; + struct port_to_switch_lids* next; +} port_to_switch_lids; + +typedef struct switch_to_switch_sl{ + uint16_t switch_lid; + uint8_t service_level; + struct switch_to_switch_sl* next; +} switch_to_switch_sl; + +static int oob_priority = 50; +static bool rml_recv_posted = false; + +static void oob_component_register(void); +static int oob_component_query(ompi_common_ofacm_base_dev_desc_t *dev, + ompi_common_ofacm_base_module_t **cpc); +static int oob_component_finalize(void); + +static int oob_module_start_connect(ompi_common_ofacm_base_local_connection_context_t* context); +static int reply_start_connect(ompi_common_ofacm_base_local_connection_context_t* context, + ompi_common_ofacm_base_remote_connection_context_t *remote_info); +static int set_remote_info(ompi_common_ofacm_base_local_connection_context_t *context, + ompi_common_ofacm_base_remote_connection_context_t *remote_info); +static int qp_connect_all(ompi_common_ofacm_base_local_connection_context_t* context); +static int qp_create_all(ompi_common_ofacm_base_local_connection_context_t* context); +static int qp_create_one(ompi_common_ofacm_base_local_connection_context_t* context, int qp); +static int send_connect_data(ompi_common_ofacm_base_local_connection_context_t* context, + uint8_t message_type); +static ompi_common_ofacm_base_local_connection_context_t* + oob_endpoint_init(ompi_proc_t *proc, + ompi_common_ofacm_base_qp_config_t *qp_config, + struct ibv_pd *pd, uint64_t subnet_id, int cpc_type, + uint16_t lid, uint16_t rem_lid, + int32_t user_context_index, void *user_context, + ompi_common_ofacm_base_module_t *cpc, + ompi_common_ofacm_base_context_connect_cb_fn_t connect_cb, + ompi_common_ofacm_base_context_error_cb_fn_t error_cb, + ompi_common_ofacm_base_context_prepare_recv_cb_fn_t prepare_recv_cb); +static int oob_endpoint_finalize(ompi_common_ofacm_base_local_connection_context_t *context); + +static void report_error(ompi_common_ofacm_base_local_connection_context_t* context); + +static void rml_send_cb(int status, orte_process_name_t* endpoint, + opal_buffer_t* buffer, orte_rml_tag_t tag, + void* cbdata); +static void rml_recv_cb(int status, orte_process_name_t* process_name, + opal_buffer_t* buffer, orte_rml_tag_t tag, + void* cbdata); + +/* Build service level hashtables per port */ +static int create_service_level_table_for_port(uint16_t lid, + opal_hash_table_t* port_to_switch_hash_table, + opal_hash_table_t* switch_to_switch_hash_table); + +/* Pick the service level of path between to endpoints */ +static int pick_service_level(uint16_t src_port_lid, uint16_t dst_port_lid, + uint8_t* service_level, + opal_hash_table_t* port_to_switch_hash_table, + opal_hash_table_t* switch_to_switch_hash_table); + +/* + * The "component" struct -- the top-level function pointers for the + * oob connection scheme. + */ +ompi_common_ofacm_base_component_t ompi_common_ofacm_oob = { + "oob", + /* Register */ + oob_component_register, + /* Init */ + NULL, + /* Query */ + oob_component_query, + /* Finalize */ + oob_component_finalize, +}; + +/* Open - this functions sets up any oob specific commandline params */ +static void oob_component_register(void) +{ + mca_base_param_reg_int_name("common", + "ofacm_connect_oob_priority", + "The selection method priority for oob", + false, false, oob_priority, &oob_priority); + + if (oob_priority > 100) { + oob_priority = 100; + } else if (oob_priority < -1) { + oob_priority = -1; + } +} + +/* + * Init function. Post non-blocking RML receive to accept incoming + * connection requests. + */ +static int oob_component_query(ompi_common_ofacm_base_dev_desc_t *dev, + ompi_common_ofacm_base_module_t **cpc) +{ + int rc; + + /* If we have the transport_type member, check to ensure we're on + IB (this CPC will not work with iWarp). If we do not have the + transport_type member, then we must be < OFED v1.2, and + therefore we must be IB. */ +#if defined(HAVE_STRUCT_IBV_DEVICE_TRANSPORT_TYPE) + if (IBV_TRANSPORT_IB != dev->ib_dev->transport_type) { + OFACM_VERBOSE(("OFACM: oob CPC only supported on InfiniBand; skipped on device %s", + ibv_get_device_name(dev->ib_dev))); + return OMPI_ERR_NOT_SUPPORTED; + } +#endif + + if (dev->capabilities & OMPI_COMMON_OFACM_XRC_ONLY) { + OFACM_VERBOSE(("OFACM: oob CPC not supported with XRC receive queues, please try xoob CPC; skipped")); + return OMPI_ERR_NOT_SUPPORTED; + } + /* If this btl supports OOB, then post the RML message. But + ensure to only post it *once*, because another btl may have + come in before this and already posted it. */ + if (!rml_recv_posted) { + rc = orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD, + OMPI_RML_TAG_OFACM, + ORTE_RML_PERSISTENT, + rml_recv_cb, + NULL); + if (ORTE_SUCCESS != rc) { + OFACM_VERBOSE(("OFACM: oob CPC system error %d (%s)", + rc, opal_strerror(rc))); + return rc; + } + rml_recv_posted = true; + } + + *cpc = malloc(sizeof(ompi_common_ofacm_base_module_t)); + if (NULL == *cpc) { + orte_rml.recv_cancel(ORTE_NAME_WILDCARD, OMPI_RML_TAG_OFACM); + rml_recv_posted = false; + OFACM_VERBOSE(("openib BTL: oob CPC system error (malloc failed)")); + return OMPI_ERR_OUT_OF_RESOURCE; + } + /* Init global list of all connection contexts */ + OBJ_CONSTRUCT(&ompi_common_ofacm_oob.all_procs, opal_list_t); + (*cpc)->data.cbm_component = &ompi_common_ofacm_oob; + (*cpc)->data.cbm_priority = oob_priority; + (*cpc)->data.cbm_modex_message = NULL; + (*cpc)->data.cbm_modex_message_len = 0; + + (*cpc)->cbm_endpoint_init = oob_endpoint_init; + (*cpc)->cbm_start_connect = oob_module_start_connect; + (*cpc)->cbm_endpoint_finalize = oob_endpoint_finalize; + (*cpc)->cbm_finalize = NULL; + (*cpc)->cbm_uses_cts = false; + + OFACM_VERBOSE(("openib BTL: oob CPC available for use on %s", + ibv_get_device_name(dev->ib_dev))); + return OMPI_SUCCESS; +} + +static ompi_common_ofacm_base_proc_t* find_proc(ompi_proc_t *proc) +{ + ompi_common_ofacm_base_proc_t *ret = NULL; + opal_list_item_t *item; + opal_list_t *list = &ompi_common_ofacm_oob.all_procs; + + for (item = opal_list_get_first(list); + item != opal_list_get_end(list); + item = opal_list_get_next(item)) { + if (proc == ((ompi_common_ofacm_base_proc_t *)item)->proc_ompi){ + ret = (ompi_common_ofacm_base_proc_t *)item; + } + } + return ret; +} + +/* OOB connection context init */ +static ompi_common_ofacm_base_local_connection_context_t* + oob_endpoint_init(ompi_proc_t *proc, + ompi_common_ofacm_base_qp_config_t *qp_config, + struct ibv_pd *pd, uint64_t subnet_id, int cpc_type, + uint16_t lid, uint16_t rem_lid, + int32_t user_context_index, void *user_context, + ompi_common_ofacm_base_module_t *cpc, + ompi_common_ofacm_base_context_connect_cb_fn_t connect_cb, + ompi_common_ofacm_base_context_error_cb_fn_t error_cb, + ompi_common_ofacm_base_context_prepare_recv_cb_fn_t prepare_recv_cb) +{ + int ret; + bool new_proc; + ompi_common_ofacm_base_local_connection_context_t *context; + ompi_common_ofacm_base_proc_t *context_proc; + + context = (ompi_common_ofacm_base_local_connection_context_t*) + OBJ_NEW(ompi_common_ofacm_base_local_connection_context_t); + context_proc = find_proc(proc); + + if (NULL == context_proc) { + new_proc = true; + /* constructing new proc */ + context_proc = (ompi_common_ofacm_base_proc_t *) + OBJ_NEW(ompi_common_ofacm_base_proc_t ); + } else { + new_proc = false; + OBJ_RETAIN(context_proc); + } + + ompi_common_ofacm_base_proc_setup(context_proc, context, proc); + ret = ompi_common_ofacm_base_context_init(context, cpc, connect_cb, error_cb, + prepare_recv_cb, context_proc, qp_config, + pd, subnet_id, cpc_type, lid, rem_lid, user_context_index, user_context); + if (OMPI_SUCCESS != ret) { + OBJ_DESTRUCT(context_proc); + OBJ_DESTRUCT(context); + return NULL; + } + + if (new_proc) { + opal_list_append(&ompi_common_ofacm_oob.all_procs, (opal_list_item_t *)context_proc); + } + + return context; +} + +/* OOB connection context finalization */ +static int oob_endpoint_finalize + (ompi_common_ofacm_base_local_connection_context_t *context) +{ + opal_list_item_t *proc_item, *cntx_item, *cntx_item_next; + bool found = false; + bool pfound = false; + int qp; + opal_list_t *proc_list = &ompi_common_ofacm_oob.all_procs; + + /* Proc cleanup. We should find the context proc in all proc list and remove + * from the proc list our context. After it we try to release the proc context */ + for (proc_item = opal_list_get_first(proc_list); + proc_item != opal_list_get_end(proc_list); + proc_item = opal_list_get_next(proc_item)) { + if (context->proc == ((ompi_common_ofacm_base_proc_t *)proc_item)){ + ompi_common_ofacm_base_proc_t *proc = + (ompi_common_ofacm_base_proc_t *)proc_item; + opal_list_t *cntx_list = &proc->all_contexts; + pfound = true; + + /* Remove the context from proc list */ + cntx_item = opal_list_get_first(cntx_list); + while(cntx_item != opal_list_get_end(cntx_list)) { + /* take the next before removing from the list */ + cntx_item_next = opal_list_get_next(cntx_item); + if (context == (ompi_common_ofacm_base_local_connection_context_t *)cntx_item) { + found = true; + opal_list_remove_item(cntx_list, cntx_item); + } + cntx_item = cntx_item_next; + } + + /* Remove our proc from all list */ + if (opal_list_is_empty(cntx_list)) { + opal_list_remove_item(proc_list, (opal_list_item_t *)proc); + } + OBJ_RELEASE(proc); + } + } + + /* Release QPs */ + for (qp = 0; qp < context->num_of_qps; qp++) { + if(NULL != context->qps[qp].lcl_qp) { + if(ibv_destroy_qp(context->qps[qp].lcl_qp)) { + OFACM_ERROR(("Failed to destroy QP:%d\n", qp)); + } + } + } + + assert(true == found); + assert(true == pfound); + + /* We done with proc release and now we way destroy the context */ + OBJ_RELEASE(context); + + return OMPI_SUCCESS; +} + +/* + * Connect function. Start initiation of connections to a remote + * peer. We send our Queue Pair information over the RML/OOB + * communication mechanism. On completion of our send, a send + * completion handler is called. + */ +static int oob_module_start_connect(ompi_common_ofacm_base_local_connection_context_t *context) +{ + int rc; + + if (OMPI_SUCCESS != (rc = qp_create_all(context))) { + return rc; + } + + /* Send connection info over to remote endpoint */ + context->state = MCA_COMMON_OFACM_CONNECTING; + if (OMPI_SUCCESS != + (rc = send_connect_data(context, ENDPOINT_CONNECT_REQUEST))) { + OFACM_ERROR(("error sending connect request, error code %d", rc)); + return rc; + } + + return OMPI_SUCCESS; +} + +/* + * Component finalize function. Cleanup RML non-blocking receive. + */ +static int oob_component_finalize(void) +{ + if (rml_recv_posted) { + orte_rml.recv_cancel(ORTE_NAME_WILDCARD, OMPI_RML_TAG_OFACM); + rml_recv_posted = false; + } + + return OMPI_SUCCESS; +} + +/**************************************************************************/ + +/* + * Reply to a `start - connect' message + */ +static int reply_start_connect(ompi_common_ofacm_base_local_connection_context_t* context, + ompi_common_ofacm_base_remote_connection_context_t *remote_info) +{ + int rc; + + OFACM_VERBOSE(("Initialized QPs, LID = %d", context->lid)); + + /* Create local QP's and post receive resources */ + if (OMPI_SUCCESS != (rc = qp_create_all(context))) { + return rc; + } + + /* Set the remote side info */ + set_remote_info(context, remote_info); + + /* Connect to remote endpoint qp's */ + if (OMPI_SUCCESS != (rc = qp_connect_all(context))) { + return rc; + } + + /* Send connection info over to remote endpoint */ + context->state = MCA_COMMON_OFACM_CONNECT_ACK; + if (OMPI_SUCCESS != + (rc = send_connect_data(context, ENDPOINT_CONNECT_RESPONSE))) { + OFACM_ERROR(("error in endpoint send connect request error code is %d", + rc)); + return rc; + } + return OMPI_SUCCESS; +} + + +static int set_remote_info(ompi_common_ofacm_base_local_connection_context_t *context, + ompi_common_ofacm_base_remote_connection_context_t *remote_info) +{ + /* copy the remote_info stuff */ + memcpy(&context->remote_info, + remote_info, sizeof(ompi_common_ofacm_base_remote_connection_context_t )); + + OFACM_VERBOSE(("Setting QP info, LID = %d", context->remote_info.rem_lid)); + return OMPI_SUCCESS; + +} + + +/* + * Connect the local ends of all qp's to the remote side + */ +static int qp_connect_all(ompi_common_ofacm_base_local_connection_context_t* context) +{ + int i; + uint8_t service_level = 0; + uint32_t rtr_mask = 0, rts_mask = 0; + int rc = OMPI_SUCCESS; + + static bool is_hash_table_initialized = false; + static opal_hash_table_t switch_to_switch_hash_table; + static opal_hash_table_t port_to_switch_hash_table; + + + /* Create two hash tables for a given port in order to allow + * an efficient search of service level on any route exiting + * from it */ + if((NULL != ompi_common_ofacm_three_dim_torus) && + (false == is_hash_table_initialized)){ + + rc = create_service_level_table_for_port(context->lid, &port_to_switch_hash_table, + &switch_to_switch_hash_table); + if(OMPI_SUCCESS != rc){ + /* Failed to create service table for port */ + return OMPI_ERROR; + } + is_hash_table_initialized = true; + } + + + /* Pick the Service Level of each route from the table */ + if(is_hash_table_initialized){ + rc = pick_service_level(context->lid, context->remote_info.rem_lid, &service_level, + &port_to_switch_hash_table, &switch_to_switch_hash_table); + if(OMPI_SUCCESS != rc){ + /* Failed to retrieve service level on the route */ + return OMPI_ERROR; + } + /*printf("Debug: qp_connect_all: lid %hu rem lid %hu num_qps %d SL %c\n", context->lid, + context->remote_info.rem_lid, context->num_of_qps, service_level);*/ + } + + + for (i = 0; i < context->num_of_qps; i++) { + struct ibv_qp_attr attr; + struct ibv_qp* qp = context->qps[i].lcl_qp; + enum ibv_mtu mtu = (context->attr[i].path_mtu < context->remote_info.rem_mtu) ? + context->attr[i].path_mtu : context->remote_info.rem_mtu; + + memset(&attr, 0, sizeof(attr)); + memcpy(&attr, context->attr, sizeof(struct ibv_qp_attr)); + attr.qp_state = IBV_QPS_RTR; + attr.path_mtu = mtu; + attr.dest_qp_num = context->remote_info.rem_qps[i].rem_qp_num; + attr.rq_psn = context->remote_info.rem_qps[i].rem_psn; + attr.ah_attr.dlid = context->remote_info.rem_lid; + + if(is_hash_table_initialized){ + attr.ah_attr.sl = service_level; + } + /* JMS to be filled in later dynamically */ + attr.ah_attr.static_rate = 0; + rtr_mask = IBV_QP_STATE | + IBV_QP_AV | + IBV_QP_PATH_MTU | + IBV_QP_DEST_QPN | + IBV_QP_RQ_PSN | + IBV_QP_MAX_DEST_RD_ATOMIC | + IBV_QP_MIN_RNR_TIMER; + + /* applying user specified rtr mask */ + if (NULL != context->custom_rtr_attr_mask) { + rtr_mask |= context->custom_rtr_attr_mask[i]; + } + + OFACM_VERBOSE(("Set MTU to IBV value %d (%s bytes)", mtu, + (mtu == IBV_MTU_256) ? "256" : + (mtu == IBV_MTU_512) ? "512" : + (mtu == IBV_MTU_1024) ? "1024" : + (mtu == IBV_MTU_2048) ? "2048" : + (mtu == IBV_MTU_4096) ? "4096" : + "unknown (!)")); + + if (ibv_modify_qp(qp, &attr, rtr_mask)) { + OFACM_ERROR(("Error modifing QP to RTR errno says %s", + strerror(errno))); + return OMPI_ERROR; + } + attr.qp_state = IBV_QPS_RTS; + /* On PP QPs we have SW flow control, no need for rnr retries. Setting + * it to zero helps to catch bugs */ + /* + attr.rnr_retry = BTL_OPENIB_QP_TYPE_PP(i) ? 0 : + mca_btl_openib_component.ib_rnr_retry; + */ + attr.sq_psn = context->qps[i].lcl_psn; + rts_mask = IBV_QP_STATE | + IBV_QP_TIMEOUT | + IBV_QP_RETRY_CNT | + IBV_QP_RNR_RETRY | + IBV_QP_SQ_PSN | + IBV_QP_MAX_QP_RD_ATOMIC; + + /* applying user specified rts mask */ + if (NULL != context->custom_rts_attr_mask) { + rts_mask |= context->custom_rts_attr_mask[i]; + } + + if (ibv_modify_qp(qp, &attr, rts_mask)) { + OFACM_ERROR(("error modifying QP to RTS errno says %s", + strerror(errno))); + return OMPI_ERROR; + } + } + + return OMPI_SUCCESS; +} + + +/* + * Create the local side of all the qp's. The remote sides will be + * connected later. + */ +static int qp_create_all(ompi_common_ofacm_base_local_connection_context_t* context) +{ + int qp, rc; + + for (qp = 0; qp < context->num_of_qps; ++qp) { + rc = qp_create_one(context, qp); + if (OMPI_SUCCESS != rc) { + return rc; + } + } + /* Now that all the qp's are created locally, post some receive + buffers, setup credits, etc. */ + return context->prepare_recv_cb(context->user_context); +} + +/* + * Create the local side of one qp. The remote side will be connected + * later. + */ +static int qp_create_one(ompi_common_ofacm_base_local_connection_context_t *context, int qp) +{ + struct ibv_qp *my_qp; + struct ibv_qp_init_attr init_attr; + struct ibv_qp_attr attr; + size_t req_inline = context->init_attr[qp].cap.max_inline_data; + uint32_t init_mask = 0; + + /* Taking default init attributes from user */ + memcpy(&init_attr, &context->init_attr[qp], sizeof(init_attr)); + my_qp = ibv_create_qp(context->ib_pd, &init_attr); + + if (NULL == my_qp) { + OFACM_ERROR(("error creating qp errno says %s", strerror(errno))); + return OMPI_ERROR; + } + context->qps[qp].lcl_qp = my_qp; + + if (init_attr.cap.max_inline_data < req_inline) { + context->qps[qp].ib_inline_max = init_attr.cap.max_inline_data; + orte_show_help("help-mpi-common-ofacm-cpc-base.txt", + "inline truncated", true, orte_process_info.nodename, + req_inline, init_attr.cap.max_inline_data); + } else { + context->qps[qp].ib_inline_max = req_inline; + } + + /* Taking default attributes from user */ + memcpy(&attr, &context->attr[qp], sizeof(attr)); + attr.qp_state = IBV_QPS_INIT; + attr.qp_access_flags = IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ; + init_mask = IBV_QP_STATE | + IBV_QP_PKEY_INDEX | + IBV_QP_PORT | + IBV_QP_ACCESS_FLAGS; + /* apply user specified init mask */ + if (NULL != context->custom_init_attr_mask) { + init_mask |= context->custom_init_attr_mask[qp]; + } + + if (ibv_modify_qp(context->qps[qp].lcl_qp, + &attr, init_mask)) { + OFACM_ERROR(("Error modifying qp to INIT errno says %s", strerror(errno))); + return OMPI_ERROR; + } + + /* Setup meta data on the endpoint */ + context->qps[qp].lcl_psn = lrand48() & 0xffffff; + + return OMPI_SUCCESS; +} + + +/* + * RML send connect information to remote endpoint + */ +static int send_connect_data(ompi_common_ofacm_base_local_connection_context_t* context, + uint8_t message_type) +{ + opal_buffer_t* buffer = OBJ_NEW(opal_buffer_t); + int rc; + + if (NULL == buffer) { + ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE); + return ORTE_ERR_OUT_OF_RESOURCE; + } + + /* pack the info in the send buffer */ + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT8)); + OFACM_VERBOSE(("type %d\n", message_type)); + rc = opal_dss.pack(buffer, &message_type, 1, OPAL_UINT8); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT64)); + rc = opal_dss.pack(buffer, &context->subnet_id, 1, OPAL_UINT64); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + + if (message_type != ENDPOINT_CONNECT_REQUEST) { + /* send the QP connect request info we respond to */ + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32)); + rc = opal_dss.pack(buffer, + &context->remote_info.rem_qps[0].rem_qp_num, 1, + OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT16)); + rc = opal_dss.pack(buffer, &context->remote_info.rem_lid, 1, OPAL_UINT16); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + } + + if (message_type != ENDPOINT_CONNECT_ACK) { + int qp; + /* send CM type/family */ + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_INT)); + rc = opal_dss.pack(buffer, &context->cpc_type, 1, OPAL_INT); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + /* Pasha: Send number of qp here. We don't must to send number of QPs here, BUT + * recv side callback code is pretty complicated and I don't want to touch + * it now. So best work around on this stage is send another 1byte with number of + * qps. + */ + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT8)); + rc = opal_dss.pack(buffer, &context->num_of_qps, 1, OPAL_UINT8); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + /* stuff all the QP info into the buffer */ + for (qp = 0; qp < context->num_of_qps; qp++) { + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32)); + rc = opal_dss.pack(buffer, &context->qps[qp].lcl_qp->qp_num, + 1, OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32)); + rc = opal_dss.pack(buffer, &context->qps[qp].lcl_psn, 1, + OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + } + + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT16)); + rc = opal_dss.pack(buffer, &context->lid, 1, OPAL_UINT16); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32)); + rc = opal_dss.pack(buffer, &context->attr[0].path_mtu, 1, + OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32)); + rc = opal_dss.pack(buffer, &context->index, 1, OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + } + + /* send to remote endpoint */ + rc = orte_rml.send_buffer_nb(&context->proc->proc_ompi->proc_name, + buffer, OMPI_RML_TAG_OFACM, 0, + rml_send_cb, NULL); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + OFACM_VERBOSE(("Sent QP Info, LID = %d, SUBNET = %lx\n", + context->lid, + context->subnet_id)); + + return OMPI_SUCCESS; +} + +static void report_error(ompi_common_ofacm_base_local_connection_context_t* context) +{ + if (NULL == context || NULL == context->error_cb) { + /* The context is undefined and we can not print specific error */ + orte_show_help("help-mpi-common-ofacm-oob.txt", + "ofacm oob fatal error", true, + orte_process_info.nodename, + __FILE__, __LINE__); + exit(1); + } + + /* Other way, call to user error callback */ + context->error_cb(context->user_context); +} + +/* + * Callback when we have finished RML sending the connect data to a + * remote peer + */ +static void rml_send_cb(int status, orte_process_name_t* endpoint, + opal_buffer_t* buffer, orte_rml_tag_t tag, + void* cbdata) +{ + OBJ_RELEASE(buffer); +} + + +/* + * Non blocking RML recv callback. Read incoming QP and other info, + * and if this endpoint is trying to connect, reply with our QP info, + * otherwise try to modify QP's and establish reliable connection + */ +static void rml_recv_cb(int status, orte_process_name_t* process_name, + opal_buffer_t* buffer, orte_rml_tag_t tag, + void* cbdata) +{ + int context_state; + int rc; + uint32_t lcl_qp = 0; + uint16_t lcl_lid = 0; + int32_t cnt = 1; + ompi_common_ofacm_base_remote_connection_context_t remote_info; + ompi_common_ofacm_base_local_connection_context_t *l_context; + ompi_common_ofacm_base_proc_t *proc; + uint8_t message_type, num_qps; + int cpc_type; + opal_list_t *procs_list = &ompi_common_ofacm_oob.all_procs; + opal_list_t *context_list; + bool master; + + /* start by unpacking data first so we know who is knocking at + our door */ + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT8)); + rc = opal_dss.unpack(buffer, &message_type, &cnt, OPAL_UINT8); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + report_error(NULL); + return; + } + + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT64)); + rc = opal_dss.unpack(buffer, &remote_info.rem_subnet_id, &cnt, OPAL_UINT64); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + report_error(NULL); + return; + } + + if (ENDPOINT_CONNECT_REQUEST != message_type) { + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32)); + rc = opal_dss.unpack(buffer, &lcl_qp, &cnt, OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + report_error(NULL); + return; + } + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT16)); + rc = opal_dss.unpack(buffer, &lcl_lid, &cnt, OPAL_UINT16); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + report_error(NULL); + return; + } + } + + if (ENDPOINT_CONNECT_ACK != message_type) { + int qp; + + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_INT)); + rc = opal_dss.unpack(buffer, &cpc_type, &cnt, OPAL_INT); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + report_error(NULL); + return; + } + /* Pasha: Reading number of qps, in original code we tool it from + * btl component. In future we may change order of operations here. We may start + * lookup for connection descriptor after receiving subnet_id and lid. But in order + * to do it here I need totally to rewrite the recv callback...next time ;) + */ + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT8)); + rc = opal_dss.unpack(buffer, &num_qps, &cnt, OPAL_UINT8); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + report_error(NULL); + return; + } + /* get ready for the data */ + ompi_common_ofacm_base_remote_context_init(&remote_info, + num_qps, 0); + + /* unpack all the qp info */ + for (qp = 0; qp < num_qps; ++qp) { + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32)); + rc = opal_dss.unpack(buffer, &remote_info.rem_qps[qp].rem_qp_num, &cnt, + OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + report_error(NULL); + return; + } + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32)); + rc = opal_dss.unpack(buffer, &remote_info.rem_qps[qp].rem_psn, &cnt, + OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + report_error(NULL); + return; + } + } + + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT16)); + rc = opal_dss.unpack(buffer, &remote_info.rem_lid, &cnt, OPAL_UINT16); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + report_error(NULL); + return; + } + + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32)); + rc = opal_dss.unpack(buffer, &remote_info.rem_mtu, &cnt, OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + report_error(NULL); + return; + } + + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32)); + rc = opal_dss.unpack(buffer, &remote_info.rem_index, &cnt, OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + report_error(NULL); + return; + } + } + + OFACM_VERBOSE(("Received QP Info, LID = %d, SUBNET = %lx, CPC_TYPE = %d", + remote_info.rem_lid, + remote_info.rem_subnet_id, + cpc_type)); + + master = orte_util_compare_name_fields(ORTE_NS_CMP_ALL, ORTE_PROC_MY_NAME, + process_name) >= 0 ? true : false; + for (proc = (ompi_common_ofacm_base_proc_t *)opal_list_get_first(procs_list); + proc != (ompi_common_ofacm_base_proc_t *)opal_list_get_end(procs_list); + proc = (ompi_common_ofacm_base_proc_t *)opal_list_get_next(proc)){ + bool found = false; + if (orte_util_compare_name_fields(ORTE_NS_CMP_ALL, + &proc->proc_ompi->proc_name, + process_name) != OPAL_EQUAL) { + continue; + } + context_list = &proc->all_contexts; + if (ENDPOINT_CONNECT_REQUEST != message_type) { + /* This is a reply message. Try to get the endpoint + instance the reply belongs to */ + for (l_context = (ompi_common_ofacm_base_local_connection_context_t *)opal_list_get_first(context_list); + l_context != (ompi_common_ofacm_base_local_connection_context_t *)opal_list_get_end(context_list); + l_context = (ompi_common_ofacm_base_local_connection_context_t *)opal_list_get_next(l_context)) { + if (l_context->qps[0].lcl_qp != NULL && + lcl_lid == l_context->lid && + lcl_qp == l_context->qps[0].lcl_qp->qp_num && + remote_info.rem_subnet_id == l_context->subnet_id) { + found = true; + break; + } + } + } else { + /* This is new connection request. If this is master try + to find endpoint in a connecting state. If this is + slave try to find endpoint in closed state and + initiate connection back */ + ompi_common_ofacm_base_local_connection_context_t *context_found = NULL; + for (l_context = (ompi_common_ofacm_base_local_connection_context_t *)opal_list_get_first(context_list); + l_context != (ompi_common_ofacm_base_local_connection_context_t *)opal_list_get_end(context_list); + l_context = (ompi_common_ofacm_base_local_connection_context_t *)opal_list_get_next(l_context)) { + if (l_context->subnet_id != remote_info.rem_subnet_id || + l_context->cpc_type != cpc_type || + (l_context->state != MCA_COMMON_OFACM_CONNECTING + && l_context->state != MCA_COMMON_OFACM_CLOSED)) + continue; + found = true; + context_found = l_context; + if ((master && + MCA_COMMON_OFACM_CONNECTING == l_context->state) || + (!master && + MCA_COMMON_OFACM_CLOSED == l_context->state)) + break; /* Found one. No point to continue */ + } + l_context = context_found; + + /* if this is slave and there is no endpoints in closed + state then all connection are already in progress so + just ignore this connection request */ + if (found && !master && + MCA_COMMON_OFACM_CLOSED != l_context->state) { + return; + } + } + + if (!found) { + OFACM_ERROR(("can't find suitable endpoint for this peer\n")); + report_error(NULL); + return; + } + + OPAL_THREAD_LOCK(&l_context->context_lock); + context_state = l_context->state; + + /* Update status */ + switch (context_state) { + case MCA_COMMON_OFACM_CLOSED: + /* We had this connection closed before. The endpoint is + trying to connect. Move the status of this connection + to CONNECTING, and then reply with our QP + information */ + if (master) { + rc = reply_start_connect(l_context, &remote_info); + } else { + rc = oob_module_start_connect(l_context); + } + + if (OMPI_SUCCESS != rc) { + OFACM_ERROR(("error in endpoint reply start connect")); + report_error(l_context); + break; + } + + /* As long as we expect a message from the peer (in order + to setup the connection) let the event engine pool the + RML events. Note: we increment it once peer active + connection. */ + opal_progress_event_users_increment(); + break; + + case MCA_COMMON_OFACM_CONNECTING: + /* preparing remote info for this context */ + ompi_common_ofacm_base_remote_context_init(&l_context->remote_info, + l_context->num_of_qps, 0); + /* need to check status here */ + set_remote_info(l_context, &remote_info); + if (OMPI_SUCCESS != (rc = qp_connect_all(l_context))) { + OFACM_ERROR(("endpoint connect error: %d", rc)); + report_error(l_context); + break; + } + + if (master) { + l_context->state = MCA_COMMON_OFACM_WAITING_ACK; + + /* Send him an ACK */ + send_connect_data(l_context, ENDPOINT_CONNECT_RESPONSE); + } else { + send_connect_data(l_context, ENDPOINT_CONNECT_ACK); + /* Tell main BTL that we're done */ + l_context->state = MCA_COMMON_OFACM_CONNECTED; + l_context->connect_cb(l_context->user_context); + } + break; + + case MCA_COMMON_OFACM_WAITING_ACK: + /* Tell main BTL that we're done */ + l_context->state = MCA_COMMON_OFACM_CONNECTED; + l_context->connect_cb(l_context->user_context); + break; + + case MCA_COMMON_OFACM_CONNECT_ACK: + send_connect_data(l_context, ENDPOINT_CONNECT_ACK); + /* Tell main BTL that we're done */ + l_context->state = MCA_COMMON_OFACM_CONNECTED; + l_context->connect_cb(l_context->user_context); + break; + + case MCA_COMMON_OFACM_CONNECTED: + break; + + default : + OFACM_ERROR(("Invalid endpoint state %d", context_state)); + report_error(l_context); + } + OPAL_THREAD_UNLOCK(&l_context->context_lock); + break; + } +} + +/* + * Get the service level on the route between + * source port LID and destination port LID. + * @Param src_port_lid - LID of the source port. + * @Param dst_port_lid - LID of destination port. + * @Param service_level - Returned value. + * The service level on the route between source port + * to destination port. + * @return - Error Code. Non Zero value on error. + */ +static int pick_service_level(uint16_t src_port_lid, uint16_t dst_port_lid, uint8_t* service_level, + opal_hash_table_t* port_to_switch_hash_table, opal_hash_table_t* switch_to_switch_hash_table) +{ + uint8_t* sl; + uint16_t* dst_switch_lid; + void* p_src_switch_lid = NULL; + void* p_dst_switch_lid = NULL; + void* p_service_level = NULL; + int rc = OMPI_SUCCESS; + + /* Get the switch LID connected tothe source HCA LID */ + rc = opal_hash_table_get_value_ptr(port_to_switch_hash_table, &src_port_lid, sizeof(uint16_t), &p_src_switch_lid); + if(OMPI_SUCCESS != rc){ + /* Could not find source port LID */ + rc = OMPI_ERROR; + return rc; + } + + + /* Get the switch LID connected to the destination HCA LID */ + rc = opal_hash_table_get_value_ptr(port_to_switch_hash_table, &dst_port_lid, sizeof(uint16_t), &p_dst_switch_lid); + if(OMPI_SUCCESS != rc){ + /* Could not find destination port LID */ + rc = OMPI_ERROR; + return rc; + } + dst_switch_lid = (uint16_t*)p_dst_switch_lid; + + + /* Get the service level of the route beween the source HCA LID and destination HCA LID */ + rc = opal_hash_table_get_value_ptr(switch_to_switch_hash_table, dst_switch_lid, sizeof(uint16_t), &p_service_level); + if(OMPI_SUCCESS != rc){ + /* Could not find destination switch LID in hashtable*/ + rc = OMPI_ERROR; + return rc; + } + sl = (uint8_t*)p_service_level; + *service_level = *sl; + + return rc; +} + + +/* + * Get the size of the port to switch hashtable from a file. + + * @Params fp - Descriptor of the input file. + * @Param hash_table_size - Pointer to the size of + * the port to switch hashtable. + * @param head - pointer to a linked list containing + * the pairs to be stored in the hashtable. + * @return - Error code. Non zero value for failure. + */ +static int get_port_to_switch_hashtable_data_from_file(FILE* fp, int* hash_table_size, port_to_switch_lids** head) +{ + int i; + char c; + int num_items; + int rc = OMPI_SUCCESS; + int ret = OMPI_SUCCESS; + + uint64_t guid; + uint16_t port_lid; + uint16_t switch_lid; + uint16_t mtu, rate, lmc; /* TODO: Check binary representation */ + int port_number; + + port_to_switch_lids* item = NULL; + port_to_switch_lids* p_head = *head; + port_to_switch_lids* p_next_item = NULL; + + char str[MAX_LINE_LEN] = "\0"; + char input_str[NUM_OF_TOKENS][MAX_LINE_LEN] = {"\0"}; + char expected_str[NUM_OF_TOKENS][MAX_LINE_LEN] = {"\0"}; + + + c = fgetc(fp); + fseek(fp, -1, SEEK_CUR); + + /* Init expected input strings */ + strcpy(expected_str[0], "Channel"); + strcpy(expected_str[1], "Adapter"); + strcpy(expected_str[2], "base"); + strcpy(expected_str[3], "LID"); + strcpy(expected_str[4], "LMC"); + strcpy(expected_str[5], "port"); + + /* Create list */ + p_head = (port_to_switch_lids*)calloc(1, sizeof(port_to_switch_lids)); + if(NULL == p_head){ + rc = OMPI_ERR_OUT_OF_RESOURCE; + return rc; + } + *head = p_head; + /* Pre-process the port-to-switch table */ + while(EOF != c) + { + ret = fscanf(fp, "%s %s %" PRIx64 " %c", input_str[0], input_str[1], &guid, &c); + ret += fscanf(fp, "%s %s %hx %c", input_str[2], input_str[3], &port_lid, &c); + ret += fscanf(fp, "%s %hu %c", input_str[4], &lmc, &c); + ret += fscanf(fp, "%s %s %d", input_str[6], input_str[5], &port_number); + + + if(14 != ret){ + rc = OMPI_ERR_FILE_READ_FAILURE; + return rc; + } + + for(i = 0; i < 6; i++) + { + /*if(strncmp(str, table_header, hash_table_header_size)){*/ + if(strcmp(input_str[i], expected_str[i])){ + /* Incorrect table header */ + rc = OMPI_ERROR; + return rc; + } + } + + c = fgetc(fp); + fgets(str, MAX_LINE_LEN, fp); + if(strncmp(str, "# LID : MTU : RATE", strlen(str) - 1)){ + /* Incorrect table header */ + rc = OMPI_ERROR; + return rc; + } + + c = fgetc(fp); + fseek(fp, -1, SEEK_CUR); + + + /* Read next line */ + fgets(str, MAX_LINE_LEN, fp); + + /* Update the port to switch hashtable size if read valid data */ + num_items = sscanf(str, "%hx %c %hu %c %hu", &switch_lid, &c, &mtu, &c, &rate); + if(5 == num_items){ + (*hash_table_size)++; + } + else{ + /* Wrong file format */ + rc = OMPI_ERROR; + return rc; + } + /* Store port LID and switch LID */ + item = calloc(1, sizeof(port_to_switch_lids)); + if(NULL == item){ + rc = OMPI_ERR_OUT_OF_RESOURCE; + return rc; + } + item->port_lid = port_lid; + item->switch_lid = switch_lid; + + /* Insert the item to the head of the list */ + p_next_item = p_head->next; + p_head->next = item; + item->next = p_next_item; + + + /* Get Next char */ + c = fgetc(fp); + fseek(fp, -1, SEEK_CUR); + } + + return rc; + } + +/* + * Get from the input file the size of the + * switch-to-switch hashtable dedicated for + * the input switch LID. + + * @Params fp - Descriptor of the input file. + * @Param switch_lid - the source switch local ID (LID). + * @Param hash_table_size - Pointer to the hashtable size. + * Value returned by this routine. + * @Param head - pointer to a linked list containing the pairs + * to be stored in the hashtable. + * @return - Error code. Non zero value for failure. + */ +static int get_switch_to_switch_hashtable_size_from_file(FILE* fp, uint16_t switch_lid, int* hash_table_size, switch_to_switch_sl** head) +{ + int i; + char c; + int num_items; + + int port; + uint64_t guid; + uint16_t source_lid; + uint16_t dest_lid; + + int rc = OMPI_SUCCESS; + int ret = OMPI_SUCCESS;uint8_t service_level; + + switch_to_switch_sl* item = NULL; + switch_to_switch_sl* p_head = NULL; + switch_to_switch_sl* p_next_item = NULL; + + int table_offset = 0; + int offset_in_table = 0; + + char str[MAX_LINE_LEN] = "\0"; + char input_str[NUM_OF_TOKENS][MAX_LINE_LEN] = {"\0"}; + char expected_str[NUM_OF_TOKENS][MAX_LINE_LEN] = {"\0"}; + + + /* Init expected strings */ + strcpy(expected_str[0], "Switch"); + strcpy(expected_str[1], "base"); + strcpy(expected_str[2], "LID"); + strcpy(expected_str[3], "port"); + + + /* Allocate empty list */ + p_head = (switch_to_switch_sl*)calloc(1, sizeof(switch_to_switch_sl)); + if(NULL == p_head){ + rc = OMPI_ERR_OUT_OF_RESOURCE; + return rc; + } + *head = p_head; + + c = fgetc(fp); + fseek(fp, -1, SEEK_CUR); + + /* Read info */ + while(EOF != c){ + + /* Go over the switch-to-switch routing tables until the requested + * table dedicated for the input switch_lid is found */ + ret = fscanf(fp, "%s %" PRIx64 " %c", input_str[0], &guid, &c); + ret += fscanf(fp, "%s %s %hx %c", input_str[1], input_str[2], &source_lid, &c); + ret += fscanf(fp, "%s %s %d", input_str[4], input_str[3], &port); + c = fgetc(fp); + + if(10 != ret) + { + rc = OMPI_ERR_FILE_READ_FAILURE; + return rc; + } + + for(i = 0; i < 4; i++){ + /* Validate the table header correctness */ + if(strncmp(input_str[i], expected_str[i], strlen(input_str[i]))){ + /* Incorrect table header */ + rc = OMPI_ERROR; + return rc; + } + } + + /* Get next line acording to the currect structure of the file */ + fgets(str, MAX_LINE_LEN, fp); + if(strncmp(str, "# LID : SL : MTU : RATE", strlen(str) - 1)){ + rc = OMPI_ERROR; + return rc; + } + + /* Test if this is the requested table, + * dedicated for the input source switch lid */ + if(source_lid != switch_lid){ + /* Skip to next table */ + + while(EOF != c) + { + offset_in_table = ftell(fp); + fgets(str, MAX_LINE_LEN, fp); + if(!strncmp(str, "Switch", strlen("Switch"))){ + /* Found new table found - start over */ + fseek(fp, offset_in_table, SEEK_SET); + break; + } + /* Receive next charecter */ + c = fgetc(fp); + fseek(fp, -1, SEEK_CUR); + } + if(EOF == c){ + /* End-Of-File was met without + * finding the required routing table*/ + rc = OMPI_ERROR; + } + } + else{ + /* The right table was found */ + while(EOF != c){ + + fgets(str, MAX_LINE_LEN, fp); + + /* Test if a new table was found */ + if(!strncmp(str, "Switch", strlen("Switch"))){ + /* Quit the search - table was fully read */ + return rc; + } + /* Still in the required switch route table */ + else{ + /* Check correcness of the data and update table size */ + num_items = sscanf(str, "%hx %c %c", &dest_lid, &c, &service_level); + if(3 != num_items){ + /* Failed to read input data / wrong input formate */ + rc = OMPI_ERROR; + return rc; + } + (*hash_table_size)++; + + /* Add the data to the list*/ + item = (switch_to_switch_sl*)calloc(1, sizeof(switch_to_switch_sl)); + if(NULL == item){ + rc = OMPI_ERR_OUT_OF_RESOURCE; + return rc; + } + item->switch_lid = dest_lid; + item->service_level = service_level; + + p_next_item = p_head->next; + p_head->next = item; + item->next = p_next_item; + } + /* Get next charecter */ + c = fgetc(fp); + fseek(fp, -1, SEEK_CUR); + } + /* Set file descriptor to the beginning + * of the required table table */ + fseek(fp, table_offset, SEEK_SET); + } + } + return rc; +} + +/* + * Set port to switch hashtable according to data read from an input file. + * The hashtable Key is the port local ID (uint16_t). + * The hashtable Value is the local ID (uint16_t) of the switch connected to the port in the fabric. + * + * @Param hashtable - the hashtable to set. + * @Param hashtable_size - the number of hashtable elements. + * @Param head - Pointer to a linked list containing + * the pairs two be stored in the hashtable. + * @return - Error code. Non Zero value on error. + */ +static int set_port_to_switch_hash_table(opal_hash_table_t* hashtable, size_t hashtable_size, port_to_switch_lids** p_head) +{ + int ret; + uint16_t key; + uint16_t* value = NULL; + unsigned int i; + int rc = OMPI_SUCCESS; + + port_to_switch_lids* head = NULL; + port_to_switch_lids* p_item = NULL; + port_to_switch_lids* p_item_next = NULL; + + + if((NULL == p_head) || (NULL == *p_head)){ + rc = OMPI_ERROR; + return rc; + } + head = *p_head; + + for(i = 0; i < hashtable_size; i++){ + + /* Read pairs of port-lid and witch-lid from + * file and store them in the input hashtable */ + value = (uint16_t*)calloc(1, sizeof(uint16_t)); + if(NULL == value){ + rc = OMPI_ERR_OUT_OF_RESOURCE; + return rc; + } + + /* Get next pair to store */ + p_item = head->next; + if(NULL == p_item){ + rc = OMPI_ERROR; + return rc; + } + key = p_item->port_lid; + *value = p_item->switch_lid; + /* Remove item from list */ + p_item_next = p_item->next; + head->next = p_item_next; + free(p_item); + + /* Set the port to switch LIDS hashtable */ + ret = opal_hash_table_set_value_ptr(hashtable, &key, sizeof(uint16_t), (void*)value); + if(OPAL_SUCCESS != ret){ + OFACM_ERROR(("Failed to set port2switch hashtable\n")); + rc = OMPI_ERROR; + break; + } + } + + free(*p_head); + *p_head = NULL; + return rc; +} + +/* + * Set switch to switch hashtable according to data read from an input file. + * The hashtable Key is a switch local ID (uint16_t). + * The hashtable Value is the service level (uint8_t) of the route in the + * fabric between local switch LID (represented by key) and remote switch LID. + * + * @Param hashtable - The hashtable to set. + * @Param hashtable_size - The number of hashtable elements. + * @Param head - Pointer to a list of all the data + * pair to be inserted into the hashtable. + * @return - Error code. Non Zero value on error. + */ +static int set_switch_to_switch_hash_table(opal_hash_table_t* hashtable, size_t hashtable_size, switch_to_switch_sl** p_head) +{ + uint16_t key; /* switch lid */ + uint8_t* value = NULL; + unsigned int i; + int rc = OMPI_SUCCESS; + int ret = OMPI_SUCCESS; + + switch_to_switch_sl* head = NULL; + switch_to_switch_sl* item = NULL; + switch_to_switch_sl* p_next_item = NULL; + + + if((NULL == p_head) || (NULL == *p_head)){ + rc = OMPI_ERROR; + return rc; + } + head = *p_head; + + /* Read pairs of remote switch (LID) and + * route service level (SL) from file + * and store the in the input hashtable */ + for(i = 0; i < hashtable_size; i++) + { + + value = (uint8_t*)calloc(1, sizeof(uint8_t)); + if(NULL == value){ + rc = OMPI_ERR_OUT_OF_RESOURCE; + return rc; + } + + /* Get data from list */ + item = head->next; + if(NULL == item){ + rc = OMPI_ERROR; + return rc; + } + key = item->switch_lid; + *value = item->service_level; + + /* Remove data item from list */ + p_next_item = item->next; + head->next = p_next_item; + free(item); + + ret = opal_hash_table_set_value_ptr(hashtable, &key, sizeof(uint16_t), value); + if(OPAL_SUCCESS != ret){ + OFACM_ERROR(("Failed to set sw2sw hashtable\n")); + rc = OMPI_ERROR; + break; + } + } + + free(*p_head); + *p_head = NULL; + return rc; +} + +/* + * An efficient method that allows to find the service level of any + * any route from an input port to any other port in the fabric. + * + * Create two hashtables according to data read from an input file. + * The first table maps any port LID in the fabric to the LID of + * the switch it is connected to. + * The second table is dedicated to the switch LID to which the + * local port is connected. + * + * The table maps a remote switch LID to the service level + * of the route between the table's LID and this remote LID. + * + * @Param lid - the local ID of the port. + * @return - Error Code. Non Zero value in case of error. + */ +static int create_service_level_table_for_port(uint16_t lid, opal_hash_table_t* port_to_switch_hash_table, + opal_hash_table_t* switch_to_switch_hash_table) +{ + FILE* fp = NULL; + uint16_t* switch_lid; + void* p_switch_lid = NULL; + + int rc = OMPI_SUCCESS; + int ret = OMPI_SUCCESS; + + int file_name_len; + char* switch_to_sl = NULL; + + int port_to_switch_hash_table_size = 0; + int switch_to_switch_hash_table_size = 0; + + port_to_switch_lids* port_switch_lids = NULL; + switch_to_switch_sl* switch_sl = NULL; + + + + /* Open input configuration file */ + fp = fopen(ompi_common_ofacm_three_dim_torus, "rt"); + if(NULL == fp){ + /* File Opening failed */ + fprintf(stderr, "Failed to open the input file for the fabric's service level\n"); + rc = OMPI_ERR_FILE_OPEN_FAILURE; + goto ERROR; + } + + /* Get port-to-switch hashtable size */ + rc = get_port_to_switch_hashtable_data_from_file(fp, &port_to_switch_hash_table_size, &port_switch_lids); + if(OMPI_SUCCESS != rc){ + goto ERROR; + } + fclose(fp); + fp = NULL; + + /* Build and initialize the port-to-swich hashtable */ + OBJ_CONSTRUCT(port_to_switch_hash_table, opal_hash_table_t); + opal_hash_table_init(port_to_switch_hash_table, port_to_switch_hash_table_size); + + /* Set the port-to-switch hashtable */ + rc = set_port_to_switch_hash_table(port_to_switch_hash_table, port_to_switch_hash_table_size, &port_switch_lids); + if(OMPI_SUCCESS != rc){ + goto ERROR; + } + + /* Get the LID of the switch connected to the port's LID */ + ret = opal_hash_table_get_value_ptr(port_to_switch_hash_table, &lid, sizeof(uint16_t), &p_switch_lid); + if(OPAL_SUCCESS != ret){ + rc = OMPI_ERROR; + goto ERROR; + } + + + /* Open the file containing the mapping from switch-to-switch route to service level */ + file_name_len = strlen(ompi_common_ofacm_three_dim_torus); + switch_to_sl = (char*)calloc(file_name_len + 7, sizeof(char)); + if(NULL == switch_to_sl){ + rc = OMPI_ERR_OUT_OF_RESOURCE; + goto ERROR; + } + /* Build the switch-to-switch file name based on the port-to-switch file name */ + strncpy(switch_to_sl, ompi_common_ofacm_three_dim_torus, + strlen(ompi_common_ofacm_three_dim_torus) - strlen("peer-paths.dump") - 1); + strcat(switch_to_sl, "-sw2sw-path-records.dump"); + + /* Open path-to-SL file */ + fp = fopen(switch_to_sl, "rt"); + if(NULL == fp){ + /* File Opening failed */ + fprintf(stderr, "Failed to open the input file for the fabric's service level\n"); + rc = OMPI_ERR_FILE_OPEN_FAILURE; + goto ERROR; + } + free(switch_to_sl); + + switch_lid = (uint16_t*)p_switch_lid; + rc = get_switch_to_switch_hashtable_size_from_file(fp, *(uint16_t*)switch_lid, + &switch_to_switch_hash_table_size, &switch_sl); + if(OMPI_SUCCESS != rc){ + + goto ERROR; + } + fclose(fp); + fp = NULL; + + /* Build and initialize the switch-to-switch hashtable */ + OBJ_CONSTRUCT(switch_to_switch_hash_table, opal_hash_table_t); + opal_hash_table_init(switch_to_switch_hash_table, switch_to_switch_hash_table_size); + + /* Set the switch-to-switch hashtable */ + rc = set_switch_to_switch_hash_table(switch_to_switch_hash_table, + switch_to_switch_hash_table_size, &switch_sl); + if(OMPI_SUCCESS != rc){ + goto ERROR; + } + + + /* Use: opal_hash_table_get_value_uint64 */ + return OMPI_SUCCESS; +ERROR: + /* Close open files */ + if(NULL != fp){ + fclose(fp); + } + /* Release allocated resources */ + if(NULL != port_switch_lids){ + port_to_switch_lids* p_list = port_switch_lids; + port_to_switch_lids* p_item = NULL; + while(p_list->next != NULL){ + p_item = p_list->next; + if(NULL != p_item){ + p_list->next = p_item->next; + free(p_item); + } + } + free(p_list); + } + if(NULL != switch_sl){ + switch_to_switch_sl* p_list = switch_sl; + switch_to_switch_sl* p_item = NULL; + while(p_list->next != NULL){ + p_item = p_list->next; + if(NULL != p_item){ + p_list->next = p_item->next; + free(p_item); + } + } + free(p_list); + } + return rc; +} + diff --git a/ompi/mca/common/ofacm/common_ofacm_oob.h b/ompi/mca/common/ofacm/common_ofacm_oob.h new file mode 100644 index 0000000000..e596f0c72b --- /dev/null +++ b/ompi/mca/common/ofacm/common_ofacm_oob.h @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2007-2008 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. + * + * Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. + * $COPYRIGHT$ + * + * Additional copyrights may follow + * + * $HEADER$ + */ + +#ifndef COMMON_OFACM_OOB_H +#define COMMON_OFACM_OOB_H + +#include "connect.h" + +extern ompi_common_ofacm_base_component_t ompi_common_ofacm_oob; + +#endif diff --git a/ompi/mca/common/ofacm/common_ofacm_xoob.c b/ompi/mca/common/ofacm/common_ofacm_xoob.c new file mode 100644 index 0000000000..831d0aa54b --- /dev/null +++ b/ompi/mca/common/ofacm/common_ofacm_xoob.c @@ -0,0 +1,1537 @@ +/* + * Copyright (c) 2007-2012 Mellanox Technologies. All rights reserved. + * Copyright (c) 2008 Cisco Systems, Inc. All rights reserved. + * + * Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. + * $COPYRIGHT$ + * + * Additional copyrights may follow + * + * $HEADER$ + */ + +#include "ompi_config.h" + +#include "opal/dss/dss.h" +#include "opal/util/error.h" +#include "opal/util/output.h" +#include "orte/util/show_help.h" +#include "orte/util/name_fns.h" +#include "orte/mca/rml/rml.h" +#include "orte/mca/rml/rml_types.h" +#include "orte/mca/errmgr/errmgr.h" +#include "ompi/mca/dpm/dpm.h" +#include "common_ofacm_xoob.h" +#include "orte/util/show_help.h" +#include "opal/class/opal_hash_table.h" +#include "base.h" +#include "connect.h" + +#define SIZE_OF3(A, B, C) (sizeof(A) + sizeof(B) + sizeof(C)) +#define BASE_TO_XOOB(context) (ompi_common_ofacm_xoob_local_connection_context_t *)context +#define XOOB_TO_BASE(xcontext) (ompi_common_ofacm_base_local_connection_context_t *)xcontext + +static void xoob_component_register(void); +static int xoob_component_query(ompi_common_ofacm_base_dev_desc_t *dev, + ompi_common_ofacm_base_module_t **cpc); +static int xoob_component_finalize(void); + +static int xoob_module_start_connect + (ompi_common_ofacm_base_local_connection_context_t *context); + +static void xoob_ib_address_constructor(ib_address_t *ib_addr); +static void xoob_ib_address_destructor(ib_address_t *ib_addr); + +OBJ_CLASS_INSTANCE(ib_address_t, + opal_list_item_t, + xoob_ib_address_constructor, + xoob_ib_address_destructor); +/* + * The "component" struct -- the top-level function pointers for the + * xoob connection scheme. + */ +ompi_common_ofacm_base_component_t ompi_common_ofacm_xoob = { + "xoob", + /* Register */ + xoob_component_register, + /* Init */ + NULL, + /* Query */ + xoob_component_query, + /* Finalize */ + xoob_component_finalize, +}; + +typedef enum { + ENDPOINT_XOOB_CONNECT_REQUEST, + ENDPOINT_XOOB_CONNECT_RESPONSE, + ENDPOINT_XOOB_CONNECT_XRC_REQUEST, + ENDPOINT_XOOB_CONNECT_XRC_RESPONSE, + ENDPOINT_XOOB_CONNECT_XRC_NR_RESPONSE /* The xrc recv qp already was destroyed */ +} connect_message_type_t; + +static int xoob_priority = 60; +static bool rml_recv_posted = false; + +#define XOOB_SET_REMOTE_INFO(EP, INFO) \ +do { \ + /* copy the rem_info stuff */ \ + EP.rem_lid = INFO.rem_lid; \ + EP.rem_subnet_id = INFO.rem_subnet_id; \ + EP.rem_mtu = INFO.rem_mtu; \ + EP.rem_index = INFO.rem_index; \ + memcpy((void*)EP.rem_qps, (void*)INFO.rem_qps, \ + sizeof(mca_btl_openib_rem_qp_info_t)); \ + /* copy the rem_info stuff */ \ + memcpy((void*)EP.rem_srqs, (void*)INFO.rem_srqs, \ + sizeof(mca_btl_openib_rem_srq_info_t) * \ + mca_btl_openib_component.num_xrc_qps); \ +} while (0) + +/* Constructor destructor for xoob context. */ +static void xoob_local_context_constructor + (ompi_common_ofacm_xoob_local_connection_context_t *context) +{ + context->addr = NULL; + context->xrc_recv_psn = 0; +} + +static void xoob_local_context_destructor + (ompi_common_ofacm_xoob_local_connection_context_t *context) +{ + if(NULL != context->addr) { + OBJ_RELEASE(context->addr); + } +} + +OBJ_CLASS_INSTANCE(ompi_common_ofacm_xoob_local_connection_context_t, + ompi_common_ofacm_base_local_connection_context_t, + xoob_local_context_constructor, + xoob_local_context_destructor); + +static void xoob_pending_context_constructor(pending_context_t *pcontext) +{ + pcontext->xcontext = NULL; +} + +static void xoob_pending_context_destructor(pending_context_t *pcontext) +{ + /* I have nothing to do !*/ +} + +static void xoob_pending_context_init(pending_context_t *pcontext, + ompi_common_ofacm_xoob_local_connection_context_t *xcontext) +{ + pcontext->xcontext = xcontext; +} + +OBJ_CLASS_INSTANCE(pending_context_t, + opal_list_item_t, + xoob_pending_context_constructor, + xoob_pending_context_destructor); + +static void xoob_ib_address_constructor(ib_address_t *ib_addr) +{ + ib_addr->key = NULL; + ib_addr->subnet_id = 0; + ib_addr->lid = 0; + ib_addr->status = XOOB_ADDR_CLOSED; + ib_addr->qps = NULL; + OBJ_CONSTRUCT(&ib_addr->addr_lock, opal_mutex_t); + OBJ_CONSTRUCT(&ib_addr->pending_contexts, opal_list_t); +} + +static void xoob_ib_address_destructor(ib_address_t *ib_addr) +{ + if(NULL != ib_addr->qps && NULL != ib_addr->qps[0].lcl_qp) { + if(ibv_destroy_qp(ib_addr->qps[0].lcl_qp)) { + OFACM_ERROR(("Failed to destroy QP:%d\n", 0)); + } + } + if (NULL != ib_addr->key) { + free(ib_addr->key); + } + OBJ_DESTRUCT(&ib_addr->addr_lock); + OBJ_DESTRUCT(&ib_addr->pending_contexts); +} + +static int xoob_ib_address_init(ib_address_t *ib_addr, uint16_t lid, uint64_t s_id, orte_jobid_t ep_jobid) +{ + ib_addr->key = malloc(SIZE_OF3(s_id, lid, ep_jobid)); + if (NULL == ib_addr->key) { + OFACM_ERROR(("Failed to allocate memory for key\n")); + return OMPI_ERROR; + } + memset(ib_addr->key, 0, SIZE_OF3(s_id, lid, ep_jobid)); + /* creating the key = lid + s_id + ep_jobid */ + memcpy(ib_addr->key, &lid, sizeof(lid)); + memcpy((void*)((char*)ib_addr->key + sizeof(lid)), &s_id, sizeof(s_id)); + memcpy((void*)((char*)ib_addr->key + sizeof(lid) + sizeof(s_id)), + &ep_jobid, sizeof(ep_jobid)); + /* caching lid and subnet id */ + ib_addr->subnet_id = s_id; + ib_addr->lid = lid; + + return OMPI_SUCCESS; +} + +/* Create new entry in hash table for subnet_id and lid, + * update the context pointer. + * Before call to this function you need to protect with + */ +static ib_address_t* xoob_ib_address_add_new (ompi_common_ofacm_xoob_module_t *xcpc, + uint16_t lid, uint64_t s_id, orte_jobid_t ep_jobid) +{ + void *tmp; + int ret; + struct ib_address_t *ib_addr = OBJ_NEW(ib_address_t); + + ret = xoob_ib_address_init(ib_addr, lid, s_id, ep_jobid); + if (OMPI_SUCCESS != ret ) { + OFACM_ERROR(("XRC Internal error. Failed to init ib_addr\n")); + OBJ_DESTRUCT(ib_addr); + return NULL; + } + /* is it already in the table ?*/ + if (OPAL_SUCCESS != opal_hash_table_get_value_ptr(&xcpc->ib_addr_table, + ib_addr->key, + SIZE_OF3(s_id, lid, ep_jobid), &tmp)) { + /* It is new one, lets put it on the table */ + ret = opal_hash_table_set_value_ptr(&xcpc->ib_addr_table, + ib_addr->key, SIZE_OF3(s_id, lid, ep_jobid), (void*)ib_addr); + if (OPAL_SUCCESS != ret) { + OFACM_ERROR(("XRC Internal error." + " Failed to add element to ib_addr_table\n")); + OBJ_DESTRUCT(ib_addr); + return NULL; + } + } else { + /* so we have this one in the table, just return the pointer */ + OBJ_DESTRUCT(ib_addr); + ib_addr = (ib_address_t *)tmp; + OBJ_RETAIN(ib_addr); + assert(lid == ib_addr->lid && s_id == ib_addr->subnet_id); + } + + /* update the context with pointer to ib address */ + return ib_addr; +} + +static void xoob_connection_complete(ompi_common_ofacm_xoob_local_connection_context_t *xcontext) +{ + bool master = false; + pending_context_t *pcon; + ompi_common_ofacm_base_local_connection_context_t *con; + ompi_common_ofacm_base_local_connection_context_t *context = + XOOB_TO_BASE(xcontext); + + OFACM_VERBOSE(("Now we are CONNECTED")); + OPAL_THREAD_LOCK(&xcontext->addr->addr_lock); + if (XOOB_ADDR_CONNECTED == xcontext->addr->status) { + /* We are not xrc master */ + /* set our qp pointer to master qp */ + master = false; + } else { + /* I'm master of XRC */ + xcontext->addr->status = XOOB_ADDR_CONNECTED; + master = true; + } + + /* The status was moved down to cpc */ + context->state = MCA_COMMON_OFACM_CONNECTED; + + while(master && !opal_list_is_empty(&xcontext->addr->pending_contexts)) { + pcon = (pending_context_t *)opal_list_remove_first(&xcontext->addr->pending_contexts); + con = XOOB_TO_BASE(pcon->xcontext); + OBJ_RELEASE(pcon); + if (OMPI_SUCCESS != + xoob_module_start_connect(con)) { + OFACM_ERROR(("Failed to connect pending endpoint\n")); + } + } + OPAL_THREAD_UNLOCK(&xcontext->addr->addr_lock); + + context->connect_cb(context->user_context); +} + +static int xoob_init_rem_info_alloc_qp(ompi_common_ofacm_base_remote_connection_context_t *rem_info) +{ + rem_info->rem_qps = (ompi_common_ofacm_base_rem_qp_info_t *) + malloc(sizeof(ompi_common_ofacm_base_rem_qp_info_t)); + if (NULL == rem_info->rem_qps) { + OFACM_ERROR(("Failed to allocate memory for remote QP data\n")); + return OMPI_ERROR; + } + return OMPI_SUCCESS; +} + +static int xoob_init_rem_info_alloc_srq(ompi_common_ofacm_base_remote_connection_context_t *rem_info, uint8_t num_srqs) +{ + rem_info->rem_srqs = (ompi_common_ofacm_base_rem_srq_info_t*) + calloc(num_srqs, sizeof(ompi_common_ofacm_base_rem_srq_info_t)); + if (NULL == rem_info->rem_srqs) { + OFACM_ERROR(("Failed to allocate memory for remote SRQ data\n")); + return OMPI_ERROR; + } + return OMPI_SUCCESS; +} + +/* Free remote information structs */ +static void xoob_free_rem_info(ompi_common_ofacm_base_remote_connection_context_t *rem_info) +{ + if (NULL != rem_info->rem_qps) { + free(rem_info->rem_qps); + } + if (NULL != rem_info->rem_srqs) { + free(rem_info->rem_srqs); + } +} + +static int xoob_set_remote_info(ompi_common_ofacm_xoob_local_connection_context_t *xcontext, + ompi_common_ofacm_base_remote_connection_context_t *remote_info) +{ + ompi_common_ofacm_base_local_connection_context_t *context = XOOB_TO_BASE(xcontext); + + /* If we got qp information - copy it */ + if (NULL != remote_info->rem_qps) { + xoob_init_rem_info_alloc_qp(&context->remote_info); + memcpy(context->remote_info.rem_qps, + remote_info->rem_qps, + sizeof(ompi_common_ofacm_base_rem_qp_info_t)); + } + + if (NULL != remote_info->rem_srqs) { + xoob_init_rem_info_alloc_srq(&context->remote_info, context->num_of_srqs); + memcpy(context->remote_info.rem_srqs, remote_info->rem_srqs, + sizeof(ompi_common_ofacm_base_rem_srq_info_t)*context->num_of_srqs); + } + + context->remote_info.rem_lid = remote_info->rem_lid; + context->remote_info.rem_subnet_id = remote_info->rem_subnet_id; + context->remote_info.rem_mtu = remote_info->rem_mtu; + context->remote_info.rem_index = remote_info->rem_index; + + OFACM_VERBOSE(("Setting QP info, LID = %d", context->remote_info.rem_lid)); + return OMPI_SUCCESS; + +} + +static void xoob_report_error(ompi_common_ofacm_xoob_local_connection_context_t *xcontext) +{ + if (NULL == xcontext || NULL == (XOOB_TO_BASE(xcontext))->error_cb) { + /* The context is undefined and we can not print specific error */ + orte_show_help("help-mpi-common-ofacm-oob.txt", + "ofacm oob fatal error", true, + orte_process_info.nodename, + __FILE__, __LINE__); + exit(1); + } + + /* Other way, call to user error callback */ + (XOOB_TO_BASE(xcontext))->error_cb((XOOB_TO_BASE(xcontext))->user_context); +} + +static int xoob_context_init(ompi_common_ofacm_xoob_local_connection_context_t *xcontext, + ompi_common_ofacm_xoob_module_t *xcpc, + ompi_common_ofacm_base_context_connect_cb_fn_t connect_cb, + ompi_common_ofacm_base_context_error_cb_fn_t error_cb, + ompi_common_ofacm_base_context_prepare_recv_cb_fn_t prepare_recv_cb, + ompi_common_ofacm_base_proc_t *proc, + ompi_common_ofacm_base_qp_config_t *qp_config, + struct ibv_pd *pd, uint64_t subnet_id, int cpc_type, + uint16_t lid, uint16_t rem_lid, + int32_t user_context_index, void *user_context) +{ + int ret; + ompi_common_ofacm_base_local_connection_context_t *context = + XOOB_TO_BASE(xcontext); + ompi_common_ofacm_base_module_t *cpc = + (ompi_common_ofacm_base_module_t *)xcpc; + + /* Set IB address for this context */ + xcontext->addr = xoob_ib_address_add_new(xcpc, rem_lid, subnet_id, proc->proc_ompi->proc_name.jobid); + if (NULL == xcontext->addr) { + OFACM_ERROR(("Failed to allocate or found xoob ib address")); + return OMPI_ERROR; + } + + /* Allocate memory for QPs */ + if (NULL == xcontext->addr->qps) { + xcontext->addr->qps = + calloc(qp_config->num_qps, sizeof(ompi_common_ofacm_base_qp_t)); + if(NULL == xcontext->addr->qps) { + OFACM_ERROR(("Failed to allocate memory for qps")); + return OMPI_ERR_OUT_OF_RESOURCE; + } + } + /* Update QP pointers */ + context->qps = xcontext->addr->qps; + + /* Init base context */ + ret = ompi_common_ofacm_base_context_init(context, cpc, connect_cb, error_cb, + prepare_recv_cb, proc, qp_config, + pd, subnet_id, cpc_type, lid, rem_lid, user_context_index, user_context); + if (OMPI_SUCCESS != ret) { + return ret; + } + + return OMPI_SUCCESS; +} + +/* XOOB connection context init */ +static ompi_common_ofacm_base_local_connection_context_t* + xoob_endpoint_init(ompi_proc_t *proc, + ompi_common_ofacm_base_qp_config_t *qp_config, + struct ibv_pd *pd, uint64_t subnet_id, int cpc_type, + uint16_t lid, uint16_t rem_lid, int32_t user_context_index, void *user_context, + ompi_common_ofacm_base_module_t *cpc, + ompi_common_ofacm_base_context_connect_cb_fn_t connect_cb, + ompi_common_ofacm_base_context_error_cb_fn_t error_cb, + ompi_common_ofacm_base_context_prepare_recv_cb_fn_t prepare_recv_cb) +{ + int ret; + bool new_proc; + ompi_common_ofacm_xoob_local_connection_context_t *xcontext; + ompi_common_ofacm_base_proc_t *context_proc; + ompi_common_ofacm_xoob_module_t *xcpc = + (ompi_common_ofacm_xoob_module_t *)cpc; + + xcontext = (ompi_common_ofacm_xoob_local_connection_context_t*) + OBJ_NEW(ompi_common_ofacm_xoob_local_connection_context_t); + context_proc = ompi_common_ofacm_base_find_proc(&ompi_common_ofacm_xoob, proc); + + if (NULL == context_proc) { + new_proc = true; + /* constructing new proc */ + context_proc = (ompi_common_ofacm_base_proc_t *) + OBJ_NEW(ompi_common_ofacm_base_proc_t ); + } else { + new_proc = false; + OBJ_RETAIN(context_proc); + } + + OFACM_VERBOSE(("Xoob endpoint init: cpc_type %d, rem_lid %d, my_lid %d, subnet id %d", + cpc_type, rem_lid, lid, subnet_id)); + + ompi_common_ofacm_base_proc_setup(context_proc, XOOB_TO_BASE(xcontext), proc); + ret = xoob_context_init(xcontext, xcpc, connect_cb, error_cb, + prepare_recv_cb, context_proc, qp_config, + pd, subnet_id, cpc_type, lid, rem_lid, user_context_index, user_context); + if (OMPI_SUCCESS != ret) { + OBJ_DESTRUCT(context_proc); + OBJ_DESTRUCT(xcontext); + return NULL; + } + if(new_proc) { + opal_list_append(&ompi_common_ofacm_xoob.all_procs, + (opal_list_item_t *)context_proc); + } + + return &xcontext->super; +} + +static int xoob_endpoint_finalize + (ompi_common_ofacm_base_local_connection_context_t *context) +{ + opal_list_item_t *proc_item, *cntx_item, *cntx_item_next; + opal_list_t *proc_list = &ompi_common_ofacm_xoob.all_procs; + ompi_common_ofacm_xoob_local_connection_context_t *xcontext; + + /* Proc cleanup. We should find the context proc in all proc list and remove + * from the proc list our context. After it we try to release the proc context */ + for (proc_item = opal_list_get_first(proc_list); + proc_item != opal_list_get_end(proc_list); + proc_item = opal_list_get_next(proc_item)) { + if (context->proc == ((ompi_common_ofacm_base_proc_t *)proc_item)){ + ompi_common_ofacm_base_proc_t *proc = + (ompi_common_ofacm_base_proc_t *)proc_item; + opal_list_t *cntx_list = &proc->all_contexts; + + /* Remove the context from proc list */ + cntx_item = opal_list_get_first(cntx_list); + while(cntx_item != opal_list_get_end(cntx_list)) { + /* take the next before removing from the list */ + cntx_item_next = opal_list_get_next(cntx_item); + if (context == (ompi_common_ofacm_base_local_connection_context_t *)cntx_item) { + opal_list_remove_item(cntx_list, cntx_item); + } + cntx_item = cntx_item_next; + } + /* Remove our proc from all list */ + if (opal_list_is_empty(cntx_list)) { + opal_list_remove_item(proc_list, (opal_list_item_t *)proc); + } + OBJ_RELEASE(proc); + } + } + + if (0 != context->xrc_recv_qp_num) { + if(ibv_unreg_xrc_rcv_qp(context->init_attr[0].xrc_domain, + context->xrc_recv_qp_num)) { + OFACM_ERROR(("Failed to unregister XRC recv QP:%d\n", context->xrc_recv_qp_num)); + } + } + + xcontext = BASE_TO_XOOB(context); + + /* We done with proc release and now we way destroy the context */ + OBJ_DESTRUCT(xcontext); + + return OMPI_SUCCESS; +} + +/* + * Callback when we have finished RML sending the connect data to a + * remote peer + */ +static void xoob_rml_send_cb(int status, orte_process_name_t* context, + opal_buffer_t* buffer, orte_rml_tag_t tag, + void* cbdata) +{ + OBJ_RELEASE(buffer); +} + +/* Receive connect information to remote context */ +static int xoob_receive_connect_data(ompi_common_ofacm_base_remote_connection_context_t *info, uint16_t *lid, int *cpc_type, + uint8_t *message_type, opal_buffer_t* buffer) +{ + int cnt = 1, rc, srq; + uint8_t num_srqs; + + /* Recv standart header */ + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT8)); + rc = opal_dss.unpack(buffer, message_type, &cnt, OPAL_UINT8); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return OMPI_ERROR; + } + OFACM_VERBOSE(("Recv unpack Message type = %d", *message_type)); + + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT64)); + rc = opal_dss.unpack(buffer, &info->rem_subnet_id, &cnt, OPAL_UINT64); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return OMPI_ERROR; + } + OFACM_VERBOSE(("Recv unpack sid = %d", info->rem_subnet_id)); + + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT16)); + rc = opal_dss.unpack(buffer, &info->rem_lid, &cnt, OPAL_UINT16); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return OMPI_ERROR; + } + OFACM_VERBOSE(("Recv unpack lid = %d", info->rem_lid)); + + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_INT)); + rc = opal_dss.unpack(buffer, cpc_type, &cnt, OPAL_INT); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return OMPI_ERROR; + } + OFACM_VERBOSE(("Recv unpack cpc_type = %d", *cpc_type)); + + /* Till now we got the standart header, now we continue to recieve data for + * different packet types + */ + if (ENDPOINT_XOOB_CONNECT_REQUEST == *message_type || + ENDPOINT_XOOB_CONNECT_RESPONSE == *message_type) { + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32)); + rc = opal_dss.unpack(buffer, &info->rem_qps->rem_qp_num, &cnt, + OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return OMPI_ERROR; + } + OFACM_VERBOSE(("Recv unpack remote qp = %x", info->rem_qps->rem_qp_num)); + + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32)); + rc = opal_dss.unpack(buffer, &info->rem_qps->rem_psn, &cnt, + OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return OMPI_ERROR; + } + OFACM_VERBOSE(("Recv unpack remote psn = %d", info->rem_qps->rem_psn)); + + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32)); + rc = opal_dss.unpack(buffer, &info->rem_mtu, &cnt, OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return OMPI_ERROR; + } + OFACM_VERBOSE(("Recv unpack remote mtu = %d", info->rem_mtu)); + } + + if (ENDPOINT_XOOB_CONNECT_REQUEST == *message_type || + ENDPOINT_XOOB_CONNECT_XRC_REQUEST == *message_type) { + /* unpack requested lid info */ + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT16)); + rc = opal_dss.unpack(buffer, lid, &cnt, OPAL_UINT16); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return OMPI_ERROR; + } + OFACM_VERBOSE(("Recv unpack requested lid = %d", *lid)); + } + + /* Unpack requested recv qp number */ + if (ENDPOINT_XOOB_CONNECT_XRC_REQUEST == *message_type) { + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32)); + /* In XRC request case we will use rem_qp_num as container for requested qp number */ + rc = opal_dss.unpack(buffer, &info->rem_qps->rem_qp_num, &cnt, + OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + OFACM_VERBOSE(("Recv unpack requested qp = %x", info->rem_qps->rem_qp_num)); + } + + if (ENDPOINT_XOOB_CONNECT_RESPONSE == *message_type || + ENDPOINT_XOOB_CONNECT_XRC_RESPONSE == *message_type) { + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT32)); + rc = opal_dss.unpack(buffer, &info->rem_index, &cnt, OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return OMPI_ERROR; + } + OFACM_VERBOSE(("Recv unpack remote index = %d", info->rem_index)); + + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT8)); + rc = opal_dss.unpack(buffer, &num_srqs, &cnt, OPAL_UINT8); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return OMPI_ERROR; + } + OFACM_VERBOSE(("Recv unpack remote num of srqs = %d", num_srqs)); + + rc = xoob_init_rem_info_alloc_srq(info, num_srqs); + if (OMPI_SUCCESS != rc) { + return OMPI_ERROR; + } + for (srq = 0; srq < num_srqs; srq++) { + OFACM_VERBOSE(("unpacking %d of %d\n", cnt, OPAL_UINT8)); + rc = opal_dss.unpack(buffer, &info->rem_srqs[srq].rem_srq_num, &cnt, OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return OMPI_ERROR; + } + OFACM_VERBOSE(("Recv unpack remote index srq num[%d]= %d", srq, info->rem_srqs[srq].rem_srq_num)); + } + } + return OMPI_SUCCESS; +} + +/* + * send connect information to remote context + */ +static int xoob_send_connect_data(ompi_common_ofacm_xoob_local_connection_context_t* xcontext, + uint8_t message_type) +{ + opal_buffer_t* buffer = OBJ_NEW(opal_buffer_t); + int rc, srq; + ompi_common_ofacm_base_local_connection_context_t *context = XOOB_TO_BASE(xcontext); + + if (NULL == buffer) { + ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE); + return ORTE_ERR_OUT_OF_RESOURCE; + } + + /* Bulding standart header that we use in all messages: + * - Message type, + * - Our subnet id + * - Our LID + */ + /* pack the info in the send buffer */ + OFACM_VERBOSE(("Send pack Message type = %d", message_type)); + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT8)); + rc = opal_dss.pack(buffer, &message_type, 1, OPAL_UINT8); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + + OFACM_VERBOSE(("Send pack sid = %d", context->subnet_id)); + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT64)); + rc = opal_dss.pack(buffer, &context->subnet_id, 1, OPAL_UINT64); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + + OFACM_VERBOSE(("Send pack lid = %d", context->lid)); + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT16)); + rc = opal_dss.pack(buffer, &context->lid, 1, OPAL_UINT16); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + + OFACM_VERBOSE(("Send pack cpc type = %d", context->cpc_type)); + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_INT)); + rc = opal_dss.pack(buffer, &context->cpc_type, 1, OPAL_INT); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + + /* Now we append to standart header additional information + * that is required for full (open qp,etc..) connect request and response: + * - qp_num of first qp + * - psn of first qp + * - MTU + */ + if (ENDPOINT_XOOB_CONNECT_REQUEST == message_type || + ENDPOINT_XOOB_CONNECT_RESPONSE == message_type) { + uint32_t psn, qp_num; + + if (ENDPOINT_XOOB_CONNECT_REQUEST == message_type) { + qp_num = context->qps[0].lcl_qp->qp_num; + psn = context->qps[0].lcl_psn; + } else { + qp_num = context->xrc_recv_qp_num; + psn = xcontext->xrc_recv_psn; + } + /* stuff all the QP info into the buffer */ + /* we need to send only one QP */ + OFACM_VERBOSE(("Send pack qp num = %x", qp_num)); + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32)); + rc = opal_dss.pack(buffer, &qp_num, 1, OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + OFACM_VERBOSE(("Send pack lpsn = %d", psn)); + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32)); + rc = opal_dss.pack(buffer, &psn, 1, OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + + OFACM_VERBOSE(("Send pack mtu = %d", context->attr[0].path_mtu)); + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32)); + rc = opal_dss.pack(buffer, &context->attr[0].path_mtu, 1, + OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + } + + /* We append to header above additional information + * that is required for full & XRC connect request: + * - The lid ob btl on remote site that we want to connect + */ + if (ENDPOINT_XOOB_CONNECT_REQUEST == message_type || + ENDPOINT_XOOB_CONNECT_XRC_REQUEST == message_type) { + /* when we are sending request we add remote lid that we want to connect */ + + OFACM_VERBOSE(("Send pack remote lid = %d", context->rem_lid)); + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT16)); + rc = opal_dss.pack(buffer, &context->rem_lid, 1, OPAL_UINT16); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + } + + /* when we are sending xrc request we add remote + * recv qp number that we want to connect. */ + if (ENDPOINT_XOOB_CONNECT_XRC_REQUEST == message_type) { + OFACM_VERBOSE(("Send pack remote qp = %x", xcontext->addr->remote_xrc_rcv_qp_num)); + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32)); + rc = opal_dss.pack(buffer, &xcontext->addr->remote_xrc_rcv_qp_num, + 1, OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + } + /* We append to header above additional information + * that is required for full & XRC connect response: + * - index of our context + * - array of xrc-srq numbers + */ + if (ENDPOINT_XOOB_CONNECT_RESPONSE == message_type || + ENDPOINT_XOOB_CONNECT_XRC_RESPONSE == message_type) { + /* we need to send the context index for immidate send */ + OFACM_VERBOSE(("Send pack index = %d", context->index)); + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32)); + rc = opal_dss.pack(buffer, &context->index, 1, OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + + OFACM_VERBOSE(("Send pack number of srqs = %d", context->num_of_srqs)); + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT8)); + rc = opal_dss.pack(buffer, &context->num_of_srqs, 1, OPAL_UINT8); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + /* on response we add all SRQ numbers */ + for (srq = 0; srq < context->num_of_srqs; srq++) { + OFACM_VERBOSE(("Send pack srq[%d] num = %d", srq, context->srq_num[srq])); + OFACM_VERBOSE(("packing %d of %d\n", 1, OPAL_UINT32)); + rc = opal_dss.pack(buffer, &context->srq_num[srq], + 1, OPAL_UINT32); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + } + } + + /* send to remote endpoint */ + rc = orte_rml.send_buffer_nb(&context->proc->proc_ompi->proc_name, + buffer, OMPI_RML_TAG_XOFACM, 0, + xoob_rml_send_cb, NULL); + if (ORTE_SUCCESS != rc) { + ORTE_ERROR_LOG(rc); + return rc; + } + + OFACM_VERBOSE(("Send QP Info, LID = %d, SUBNET = %d, Message type = %d", + context->lid, + context->subnet_id, + message_type)); + + return OMPI_SUCCESS; +} + +/* Create XRC send qp */ +static int xoob_send_qp_create + (ompi_common_ofacm_xoob_local_connection_context_t* xcontext) +{ + struct ibv_qp *qp; + struct ibv_qp_init_attr init_attr; + struct ibv_qp_attr attr; + int ret; + size_t req_inline; + uint32_t init_mask = 0; + ompi_common_ofacm_base_local_connection_context_t *context = XOOB_TO_BASE(xcontext); + + /* Prepare QP structs */ + memcpy(&init_attr, &context->init_attr[0], sizeof(init_attr)); + req_inline = init_attr.cap.max_inline_data; + qp = ibv_create_qp(context->ib_pd, &init_attr); + if (NULL == qp) { + OFACM_ERROR(("Error creating QP, errno says: %s", strerror(errno))); + return OMPI_ERROR; + } + + context->qps[0].lcl_qp = qp; + + if (init_attr.cap.max_inline_data < req_inline) { + context->qps[0].ib_inline_max = init_attr.cap.max_inline_data; + orte_show_help("help-mpi-common-ofacm-cpc-base.txt", + "inline truncated", true, orte_process_info.nodename, + req_inline, init_attr.cap.max_inline_data); + } else { + context->qps[0].ib_inline_max = req_inline; + } + + memcpy(&attr, &context->attr[0], sizeof(attr)); + attr.qp_state = IBV_QPS_INIT; + attr.qp_access_flags = IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ; + init_mask = IBV_QP_STATE | + IBV_QP_PKEY_INDEX | + IBV_QP_PORT | + IBV_QP_ACCESS_FLAGS; + + /* applying user specified init mask */ + if (NULL != context->custom_init_attr_mask) { + init_mask |= context->custom_init_attr_mask[0]; + } + + ret = ibv_modify_qp(qp, &attr, init_mask); + if (ret) { + OFACM_ERROR(("Error modifying QP[%x] to IBV_QPS_INIT errno says: %s [%d]", + qp->qp_num, strerror(ret), ret)); + return OMPI_ERROR; + } + + /* Setup meta data on the context */ + context->qps[0].lcl_psn = lrand48() & 0xffffff; + + /* Now that all the qp's are created locally, post some receive + buffers, setup credits, etc. */ + return context->prepare_recv_cb(context->user_context); +} + +/* Send qp connect */ +static int xoob_send_qp_connect(ompi_common_ofacm_xoob_local_connection_context_t *xcontext) +{ + struct ibv_qp* qp; + struct ibv_qp_attr attr; + uint32_t psn, rtr_mask = 0, rts_mask = 0; + int ret; + ompi_common_ofacm_base_local_connection_context_t *context = XOOB_TO_BASE(xcontext); + enum ibv_mtu mtu = (context->attr[0].path_mtu < context->remote_info.rem_mtu) ? + context->attr[0].path_mtu : context->remote_info.rem_mtu; + + OFACM_VERBOSE(("Connecting Send QP\n")); + assert(NULL != context->qps); + qp = context->qps[0].lcl_qp; + psn = context->qps[0].lcl_psn; + + memset(&attr, 0, sizeof(attr)); + memcpy(&attr, context->attr, sizeof(struct ibv_qp_attr)); + attr.qp_state = IBV_QPS_RTR; + attr.path_mtu = mtu; + attr.dest_qp_num = context->remote_info.rem_qps[0].rem_qp_num; + attr.rq_psn = context->remote_info.rem_qps[0].rem_psn; + attr.ah_attr.dlid = context->remote_info.rem_lid; + attr.ah_attr.static_rate = 0; + rtr_mask = IBV_QP_STATE | + IBV_QP_AV | + IBV_QP_PATH_MTU | + IBV_QP_DEST_QPN | + IBV_QP_RQ_PSN | + IBV_QP_MAX_DEST_RD_ATOMIC | + IBV_QP_MIN_RNR_TIMER; + + /* applying user specified rtr mask */ + if (NULL != context->custom_rtr_attr_mask) { + rtr_mask |= context->custom_rtr_attr_mask[0]; + } + + OFACM_VERBOSE(("Set MTU to IBV value %d (%s bytes)", attr.path_mtu, + (attr.path_mtu == IBV_MTU_256) ? "256" : + (attr.path_mtu == IBV_MTU_512) ? "512" : + (attr.path_mtu == IBV_MTU_1024) ? "1024" : + (attr.path_mtu == IBV_MTU_2048) ? "2048" : + (attr.path_mtu == IBV_MTU_4096) ? "4096" : + "unknown (!)")); + + ret = ibv_modify_qp(qp, &attr, rtr_mask); + if (ret) { + OFACM_ERROR(("Error modifying QP[%x] to IBV_QPS_RTR errno says: %s [%d]", + qp->qp_num, strerror(ret), ret)); + return OMPI_ERROR; + } + + attr.qp_state = IBV_QPS_RTS; + attr.sq_psn = context->qps[0].lcl_psn; + /* applying user specified rts mask */ + rts_mask = IBV_QP_STATE | + IBV_QP_TIMEOUT | + IBV_QP_RETRY_CNT | + IBV_QP_RNR_RETRY | + IBV_QP_SQ_PSN | + IBV_QP_MAX_QP_RD_ATOMIC; + + /* applying user specified rts mask */ + + if (NULL != context->custom_rts_attr_mask) { + rts_mask |= context->custom_rts_attr_mask[0]; + } + + ret = ibv_modify_qp(qp, &attr, rts_mask); + if (ret) { + OFACM_ERROR(("Error modifying QP[%x] to IBV_QPS_RTS errno says: %s [%d]", + qp->qp_num, strerror(ret), ret)); + return OMPI_ERROR; + } + + return OMPI_SUCCESS; +} + +/* Recv qp create */ +static int xoob_recv_qp_create(ompi_common_ofacm_xoob_local_connection_context_t *xcontext, + ompi_common_ofacm_base_remote_connection_context_t *remote_info) +{ + struct ibv_qp_init_attr init_attr; + struct ibv_qp_attr attr; + int ret; + uint32_t init_mask = 0, rtr_mask = 0; + struct ibv_xrc_domain *xrc_domain; + ompi_common_ofacm_base_local_connection_context_t *context = XOOB_TO_BASE(xcontext); + enum ibv_mtu mtu = (context->attr[0].path_mtu < remote_info->rem_mtu) ? + context->attr[0].path_mtu : remote_info->rem_mtu; + + OFACM_VERBOSE(("Connecting Recv QP\n")); + + memcpy(&init_attr, &context->init_attr[0], sizeof(init_attr)); + xrc_domain = init_attr.xrc_domain; + /* Only xrc_domain is required, all other are ignored */ + ret = ibv_create_xrc_rcv_qp(&init_attr, &context->xrc_recv_qp_num); + if (ret) { + OFACM_ERROR(("Error creating XRC recv QP[%x], errno says: %s [%d]", + context->xrc_recv_qp_num, strerror(ret), ret)); + return OMPI_ERROR; + } + + memcpy(&attr, &context->attr[0], sizeof(attr)); + attr.qp_state = IBV_QPS_INIT; + attr.qp_access_flags = IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ; + init_mask = IBV_QP_STATE | + IBV_QP_PKEY_INDEX | + IBV_QP_PORT | + IBV_QP_ACCESS_FLAGS; + + /* applying user specified init mask */ + if (NULL != context->custom_init_attr_mask) { + init_mask |= context->custom_init_attr_mask[0]; + } + + ret = ibv_modify_xrc_rcv_qp(xrc_domain, context->xrc_recv_qp_num, + &attr, init_mask); + if (ret) { + OFACM_ERROR(("Error modifying XRC recv QP[%x] to IBV_QPS_INIT, errno says: %s [%d]", + context->xrc_recv_qp_num, strerror(ret), ret)); + return OMPI_ERROR; + } + + memcpy(&attr, &context->attr[0], sizeof(attr)); + attr.qp_state = IBV_QPS_RTR; + attr.path_mtu = mtu; + attr.dest_qp_num = remote_info->rem_qps[0].rem_qp_num; + attr.rq_psn = remote_info->rem_qps[0].rem_psn; + attr.ah_attr.dlid = remote_info->rem_lid; + attr.ah_attr.static_rate = 0; + rtr_mask = IBV_QP_STATE | + IBV_QP_AV | + IBV_QP_PATH_MTU | + IBV_QP_DEST_QPN | + IBV_QP_RQ_PSN | + IBV_QP_MAX_DEST_RD_ATOMIC| + IBV_QP_MIN_RNR_TIMER; + + /* applying user specified rtr mask */ + if (NULL != context->custom_rtr_attr_mask) { + rtr_mask |= context->custom_rtr_attr_mask[0]; + } + + ret = ibv_modify_xrc_rcv_qp(xrc_domain, context->xrc_recv_qp_num, + &attr, rtr_mask); + if (ret) { + OFACM_ERROR(("Error modifying XRC recv QP[%x] to IBV_QPS_RTR, errno says: %s [%d]", + context->xrc_recv_qp_num, strerror(ret), ret)); + return OMPI_ERROR; + } + + return OMPI_SUCCESS; +} + +/* Recv qp connect */ +static int xoob_recv_qp_connect(ompi_common_ofacm_xoob_local_connection_context_t *xcontext, + ompi_common_ofacm_base_remote_connection_context_t *rem_info) +{ + int ret; + ompi_common_ofacm_base_local_connection_context_t *context = XOOB_TO_BASE(xcontext); + + struct ibv_xrc_domain *xrc_domain = context->init_attr[0].xrc_domain; + + OFACM_VERBOSE(("Connecting Recv QP\n")); + ret = ibv_reg_xrc_rcv_qp(xrc_domain, rem_info->rem_qps->rem_qp_num); + if (ret) { /* failed to regester the qp, so it is already die and we should create new one */ + /* Return NOT READY !!!*/ + OFACM_ERROR(("Failed to register qp_num: %d , get error: %s (%d)\n. Replying with RNR", + rem_info->rem_qps->rem_qp_num, strerror(ret), ret)); + return OMPI_ERROR; + } else { + /* save the qp number for unregister */ + context->xrc_recv_qp_num = rem_info->rem_qps->rem_qp_num; + return OMPI_SUCCESS; + } +} + +/* + * Reply to a `start - connect' message + */ +static int xoob_reply_first_connect(ompi_common_ofacm_xoob_local_connection_context_t *xcontext, + ompi_common_ofacm_base_remote_connection_context_t *remote_info) +{ + int rc; + ompi_common_ofacm_base_local_connection_context_t *context = + XOOB_TO_BASE(xcontext); + + OFACM_VERBOSE(("Initialized QPs, LID = %d", (XOOB_TO_BASE(xcontext))->lid)); + + /* Create local QP's and post receive resources */ + if (OMPI_SUCCESS != (rc = xoob_recv_qp_create(xcontext, remote_info))) { + return rc; + } + + /* prepost data on receiver site */ + if (OMPI_SUCCESS != (rc = context->prepare_recv_cb(context->user_context))) { + OFACM_ERROR(("Failed to post on XRC SRQs")); + xoob_report_error(xcontext); + return rc; + } + + if (OMPI_SUCCESS != + (rc = xoob_send_connect_data(xcontext, ENDPOINT_XOOB_CONNECT_RESPONSE))) { + OFACM_ERROR(("Error in send connect request error code is %d", + rc)); + return rc; + } + + return OMPI_SUCCESS; +} + +/* Find context for specific subnet/lid/message/cpc type */ +static ompi_common_ofacm_xoob_local_connection_context_t* xoob_find_context + (orte_process_name_t* process_name, uint64_t subnet_id, + uint16_t lid, uint8_t message_type, int cpc_type) +{ + ompi_common_ofacm_xoob_local_connection_context_t *xcontext = NULL; + ompi_common_ofacm_base_proc_t *context_proc = NULL; + bool found = false; + opal_list_t *all_procs = + &ompi_common_ofacm_xoob.all_procs; + + OFACM_VERBOSE(("Searching for ep and proc with follow parameters:" + "jobid %d, vpid %d, sid %d, lid %d, cpc type %d", + process_name->jobid, process_name->vpid, subnet_id, lid, cpc_type)); + /* find ibproc */ + for (context_proc = (ompi_common_ofacm_base_proc_t*)opal_list_get_first(all_procs); + context_proc != (ompi_common_ofacm_base_proc_t*)opal_list_get_end(all_procs); + context_proc = (ompi_common_ofacm_base_proc_t*)opal_list_get_next(context_proc)) { + if (orte_util_compare_name_fields(ORTE_NS_CMP_ALL, + &context_proc->proc_ompi->proc_name, process_name) == OPAL_EQUAL) { + found = true; + break; + } + } + + /* we found our context_proc, lets find context now */ + if (found) { + opal_list_t *context_list = &context_proc->all_contexts; + ompi_common_ofacm_base_local_connection_context_t *context; + for (context = (ompi_common_ofacm_base_local_connection_context_t *) + opal_list_get_first(context_list); + context != (ompi_common_ofacm_base_local_connection_context_t *) + opal_list_get_end(context_list); + context = (ompi_common_ofacm_base_local_connection_context_t *) + opal_list_get_next(context)) { + /* we need to check different + * lid for different message type */ + if (ENDPOINT_XOOB_CONNECT_RESPONSE == message_type || + ENDPOINT_XOOB_CONNECT_XRC_RESPONSE == message_type) { + /* response message */ + if (context->subnet_id == subnet_id && + context->rem_lid == lid) { + xcontext = BASE_TO_XOOB(context); + break; /* Found one */ + } + } else { + /* request message */ + if (context->subnet_id == subnet_id && + context->lid == lid) { + xcontext = BASE_TO_XOOB(context); + break; /* Found one */ + } + } + } + if (NULL == xcontext) { + OFACM_ERROR(("can't find suitable context for this peer\n")); + } + } else { + OFACM_ERROR(("can't find suitable context for this peer\n")); + } + return xcontext; +} + +/* In case if XRC recv qp was closed and sender still don't know about it + * we need close the qp, reset the ib_adrr status to CLOSED and start everything + * from scratch. + */ +static void xoob_restart_connect + (ompi_common_ofacm_xoob_local_connection_context_t *xcontext) +{ + ompi_common_ofacm_base_local_connection_context_t *context = + XOOB_TO_BASE(xcontext); + OFACM_VERBOSE(("Restarting the connection for the context")); + OPAL_THREAD_LOCK(&xcontext->addr->addr_lock); + switch (xcontext->addr->status) { + case XOOB_ADDR_CONNECTED: + /* so we have the send qp, we just need the recive site. + * Send request for SRQ numbers */ + OFACM_VERBOSE(("Restart The IB addr: sid %d lid %d" + "in XOOB_ADDR_CONNECTED status," + " Changing to XOOB_ADDR_CLOSED and starting from scratch\n", + context->subnet_id, context->lid)); + /* Switching back to closed and starting from scratch */ + xcontext->addr->status = XOOB_ADDR_CLOSED; + /* destroy the qp */ + if(ibv_destroy_qp(context->qps[0].lcl_qp)) + OFACM_ERROR(("Failed to destroy QP")); + case XOOB_ADDR_CLOSED: + case XOOB_ADDR_CONNECTING: + OFACM_VERBOSE(("Restart The IB addr: sid %d lid %d" + "in XOOB_ADDR_CONNECTING or XOOB_ADDR_CLOSED status," + " starting from scratch\n", + context->subnet_id, context->lid)); + OPAL_THREAD_UNLOCK(&xcontext->addr->addr_lock); + /* xoob_module_start_connect() should automaticly handle all other cases */ + if (OMPI_SUCCESS != xoob_module_start_connect(XOOB_TO_BASE(xcontext))) + OFACM_ERROR(("Failed to restart connection from XOOB_ADDR_CONNECTING/CLOSED")); + break; + default : + OFACM_ERROR(("Invalid context status %d", xcontext->addr->status)); + OPAL_THREAD_UNLOCK(&xcontext->addr->addr_lock); + } +} + +/* + * Non blocking RML recv callback. Read incoming QP and other info, + * and if this endpoint is trying to connect, reply with our QP info, + * otherwise try to modify QP's and establish reliable connection + */ +static void xoob_rml_recv_cb(int status, orte_process_name_t* process_name, + opal_buffer_t* buffer, orte_rml_tag_t tag, + void* cbdata) +{ + int rc; + uint8_t message_type; + uint16_t requested_lid = 0; + int cpc_type = -1; + ompi_common_ofacm_base_local_connection_context_t *context; + ompi_common_ofacm_xoob_local_connection_context_t *xcontext; + ompi_common_ofacm_base_remote_connection_context_t remote_info; + + /* Init remote info */ + memset(&remote_info, 0, + sizeof(ompi_common_ofacm_base_remote_connection_context_t)); + + if ( OMPI_SUCCESS != xoob_init_rem_info_alloc_qp(&remote_info)) { + return; + } + + /* Get data. */ + if ( OMPI_SUCCESS != + xoob_receive_connect_data(&remote_info, &requested_lid, &cpc_type, &message_type, buffer)) { + OFACM_ERROR(("Failed to read data\n")); + xoob_report_error(NULL); + return; + } + + /* Processing message */ + switch (message_type) { + case ENDPOINT_XOOB_CONNECT_REQUEST: + OFACM_VERBOSE(("Received ENDPOINT_XOOB_CONNECT_REQUEST: lid %d, sid %d, rlid %d\n", + remote_info.rem_lid, + remote_info.rem_subnet_id, + requested_lid)); + xcontext = xoob_find_context(process_name,remote_info.rem_subnet_id, + requested_lid, message_type, cpc_type); + if ( NULL == xcontext) { + OFACM_ERROR(("Got ENDPOINT_XOOB_CONNECT_REQUEST." + " Failed to find context with subnet %d and LID %d", + remote_info.rem_subnet_id, requested_lid)); + xoob_free_rem_info(&remote_info); + xoob_report_error(xcontext); + return; + } + context = XOOB_TO_BASE(xcontext); + OPAL_THREAD_LOCK(&context->context_lock); + /* we should create qp and send the info + srq to requestor */ + rc = xoob_reply_first_connect(xcontext, &remote_info); + if (OMPI_SUCCESS != rc) { + OFACM_ERROR(("error in context reply start connect")); + xoob_free_rem_info(&remote_info); + xoob_report_error(xcontext); + return; + } + /* enable pooling for this btl */ + OPAL_THREAD_UNLOCK(&context->context_lock); + break; + case ENDPOINT_XOOB_CONNECT_XRC_REQUEST: + OFACM_VERBOSE(("Received ENDPOINT_XOOB_CONNECT_XRC_REQUEST: lid %d, sid %d\n", + remote_info.rem_lid, + remote_info.rem_subnet_id)); + xcontext = xoob_find_context(process_name, remote_info.rem_subnet_id, + requested_lid, message_type, cpc_type); + if (NULL == xcontext) { + OFACM_ERROR(("Got ENDPOINT_XOOB_CONNECT_XRC_REQUEST." + " Failed to find context with subnet %d and LID %d", + remote_info.rem_subnet_id, requested_lid)); + xoob_free_rem_info(&remote_info); + xoob_report_error(xcontext); + return; + } + + context = XOOB_TO_BASE(xcontext); + + if (OMPI_SUCCESS == xoob_recv_qp_connect(xcontext, &remote_info)) { + if (OMPI_SUCCESS != context->prepare_recv_cb(context->user_context)) { + OFACM_ERROR(("Failed to post on XRC SRQs")); + xoob_free_rem_info(&remote_info); + xoob_report_error(xcontext); + return; + } + OPAL_THREAD_LOCK(&context->context_lock); + rc = xoob_send_connect_data(xcontext, ENDPOINT_XOOB_CONNECT_XRC_RESPONSE); + if (OMPI_SUCCESS != rc) { + OFACM_ERROR(("error in context reply start connect")); + xoob_free_rem_info(&remote_info); + xoob_report_error(xcontext); + return; + } + OPAL_THREAD_UNLOCK(&context->context_lock); + } else { + /* The XRC recv qp was destroyed */ + OPAL_THREAD_LOCK(&context->context_lock); + rc = xoob_send_connect_data(xcontext, ENDPOINT_XOOB_CONNECT_XRC_NR_RESPONSE); + if (OMPI_SUCCESS != rc) { + OFACM_ERROR(("error in context reply start connect")); + xoob_free_rem_info(&remote_info); + xoob_report_error(xcontext); + return; + } + OPAL_THREAD_UNLOCK(&context->context_lock); + } + break; + case ENDPOINT_XOOB_CONNECT_RESPONSE: + OFACM_VERBOSE(("Received ENDPOINT_XOOB_CONNECT_RESPONSE: lid %d, sid %d\n", + remote_info.rem_lid, + remote_info.rem_subnet_id)); + xcontext = xoob_find_context(process_name, remote_info.rem_subnet_id, + remote_info.rem_lid, message_type, cpc_type); + if (NULL == xcontext) { + OFACM_ERROR(("Got ENDPOINT_XOOB_CONNECT_RESPONSE." + " Failed to find context with subnet %d and LID %d", + remote_info.rem_subnet_id, remote_info.rem_lid)); + xoob_free_rem_info(&remote_info); + xoob_report_error(xcontext); + return; + } + + context = XOOB_TO_BASE(xcontext); + OPAL_THREAD_LOCK(&context->context_lock); + /* we got all the data srq. switch the context to connect mode */ + xoob_set_remote_info(xcontext, &remote_info); + /* update ib_addr with remote qp number */ + xcontext->addr->remote_xrc_rcv_qp_num = + remote_info.rem_qps->rem_qp_num; + OFACM_VERBOSE(("rem_info: lid %d, sid %d ep %d %d", + remote_info.rem_lid, + remote_info.rem_subnet_id, + context->remote_info.rem_lid, + context->remote_info.rem_subnet_id)); + if (OMPI_SUCCESS != xoob_send_qp_connect(xcontext)) { + OFACM_ERROR(("Failed to connect context\n")); + xoob_free_rem_info(&remote_info); + xoob_report_error(xcontext); + return; + } + xoob_connection_complete(xcontext); + OPAL_THREAD_UNLOCK(&context->context_lock); + break; + case ENDPOINT_XOOB_CONNECT_XRC_RESPONSE: + OFACM_VERBOSE(("Received ENDPOINT_XOOB_CONNECT_XRC_RESPONSE: lid %d, sid %d\n", + remote_info.rem_lid, + remote_info.rem_subnet_id)); + xcontext = xoob_find_context(process_name, remote_info.rem_subnet_id, + remote_info.rem_lid, message_type, cpc_type); + if ( NULL == xcontext) { + OFACM_ERROR(("Got ENDPOINT_XOOB_CONNECT_XRC_RESPONSE." + " Failed to find context with subnet %d and LID %d", + remote_info.rem_subnet_id, remote_info.rem_lid)); + xoob_report_error(xcontext); + return; + } + context = XOOB_TO_BASE(xcontext); + OPAL_THREAD_LOCK(&context->context_lock); + /* we got srq numbers on our request */ + xoob_set_remote_info(xcontext, &remote_info); + xoob_connection_complete(xcontext); + OPAL_THREAD_UNLOCK(&context->context_lock); + break; + case ENDPOINT_XOOB_CONNECT_XRC_NR_RESPONSE: + /* The XRC recv site already was destroyed so we need + * start to bringup the connection from scratch */ + OFACM_VERBOSE(("Received ENDPOINT_XOOB_CONNECT_XRC_NR_RESPONSE: lid %d, sid %d\n", + remote_info.rem_lid, + remote_info.rem_subnet_id)); + xcontext = xoob_find_context(process_name, remote_info.rem_subnet_id, + remote_info.rem_lid, message_type, cpc_type); + if ( NULL == xcontext) { + OFACM_ERROR(("Got ENDPOINT_XOOB_CONNECT_XRC_NR_RESPONSE." + " Failed to find context with subnet %d and LID %d", + remote_info.rem_subnet_id, remote_info.rem_lid)); + xoob_report_error(xcontext); + return; + } + xoob_restart_connect(xcontext); + break; + default : + OFACM_ERROR(("Invalid message type %d", message_type)); + } + + xoob_free_rem_info(&remote_info); +} + +/* + * XOOB interface functions + */ + +/* Quere for the XOOB priority - will be highest in XRC case */ +static int xoob_component_query(ompi_common_ofacm_base_dev_desc_t *dev, + ompi_common_ofacm_base_module_t **cpc) +{ + int rc; + ompi_common_ofacm_xoob_module_t *xcpc; /* xoob cpc module */ + ompi_common_ofacm_base_module_t *bcpc; /* base cpc module */ + + if (!(dev->capabilities & OMPI_COMMON_OFACM_XRC_ONLY)) { + OFACM_VERBOSE(("openib BTL: xoob CPC only supported with XRC receive queues; skipped on device %s", + ibv_get_device_name(dev->ib_dev))); + return OMPI_ERR_NOT_SUPPORTED; + } + + xcpc = malloc(sizeof(ompi_common_ofacm_xoob_module_t)); + if (NULL == xcpc) { + OFACM_VERBOSE(("openib BTL: xoob CPC system error (malloc failed)")); + return OMPI_ERR_OUT_OF_RESOURCE; + } + + bcpc = &xcpc->super; + + /* If this btl supports XOOB, then post the RML message. But + ensure to only post it *once*, because another btl may have + come in before this and already posted it. */ + if (!rml_recv_posted) { + rc = orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD, + OMPI_RML_TAG_XOFACM, + ORTE_RML_PERSISTENT, + xoob_rml_recv_cb, + NULL); + if (ORTE_SUCCESS != rc) { + OFACM_VERBOSE(("OFACM: xoob CPC system error %d (%s)", + rc, opal_strerror(rc))); + return rc; + } + rml_recv_posted = true; + } + + OBJ_CONSTRUCT(&ompi_common_ofacm_xoob.all_procs, opal_list_t); + bcpc->data.cbm_component = &ompi_common_ofacm_xoob; + bcpc->data.cbm_priority = xoob_priority; + bcpc->data.cbm_modex_message = NULL; + bcpc->data.cbm_modex_message_len = 0; + + bcpc->cbm_endpoint_init = xoob_endpoint_init; + bcpc->cbm_start_connect = xoob_module_start_connect; + bcpc->cbm_endpoint_finalize = xoob_endpoint_finalize; + bcpc->cbm_finalize = NULL; + bcpc->cbm_uses_cts = false; + + /* Build our hash table for subnetid-lid */ + OBJ_CONSTRUCT(&xcpc->ib_addr_table, opal_hash_table_t); + + assert(orte_process_info.num_procs > 1); + if(NULL == xcpc->ib_addr_table.ht_table) { + if(OPAL_SUCCESS != opal_hash_table_init( + &xcpc->ib_addr_table, orte_process_info.num_procs)) { + OFACM_ERROR(("XRC internal error. Failed to allocate ib_table")); + return OMPI_ERROR; + } + } + + *cpc = bcpc; + + OFACM_VERBOSE(("openib BTL: xoob CPC available for use on %s", + ibv_get_device_name(dev->ib_dev))); + + return OMPI_SUCCESS; +} + +/* Open - this functions sets up any xoob specific commandline params */ +static void xoob_component_register(void) +{ + mca_base_param_reg_int_name("common", + "ofacm_connect_xoob_priority", + "The selection method priority for xoob", + false, false, xoob_priority, &xoob_priority); + + if (xoob_priority > 100) { + xoob_priority = 100; + } else if (xoob_priority < -1) { + xoob_priority = -1; + } +} + +/* + * Connect function. Start initiation of connections to a remote + * peer. We send our Queue Pair information over the RML/OOB + * communication mechanism. On completion of our send, a send + * completion handler is called. + */ +static int xoob_module_start_connect + (ompi_common_ofacm_base_local_connection_context_t *context) +{ + int rc = OMPI_SUCCESS; + ompi_common_ofacm_xoob_local_connection_context_t *xcontext = + (ompi_common_ofacm_xoob_local_connection_context_t *)context; + pending_context_t *pcontext; + + OPAL_THREAD_LOCK(&xcontext->addr->addr_lock); + switch (xcontext->addr->status) { + case XOOB_ADDR_CLOSED: + OFACM_VERBOSE(("The IB addr: sid %d lid %d" + "in XOOB_ADDR_CLOSED status," + " sending ENDPOINT_XOOB_CONNECT_REQUEST\n", + xcontext->addr->subnet_id, xcontext->addr->lid)); + if (OMPI_SUCCESS != (rc = xoob_send_qp_create(xcontext))) { + break; + } + + /* Send connection info over to remote endpoint */ + xcontext->super.state = MCA_COMMON_OFACM_CONNECTING; + xcontext->addr->status = XOOB_ADDR_CONNECTING; + if (OMPI_SUCCESS != + (rc = xoob_send_connect_data(xcontext, ENDPOINT_XOOB_CONNECT_REQUEST))) { + OFACM_ERROR(("Error sending connect request, error code %d", rc)); + } + break; + case XOOB_ADDR_CONNECTING: + OFACM_VERBOSE(("The IB addr: sid %d lid %d" + "in XOOB_ADDR_CONNECTING status," + " Subscribing to this address\n", + xcontext->addr->subnet_id, xcontext->addr->lid)); + pcontext = OBJ_NEW(pending_context_t); + xoob_pending_context_init(pcontext, xcontext); + /* some body already connectng to this machine, lets wait */ + opal_list_append(&xcontext->addr->pending_contexts, + (opal_list_item_t *)pcontext); + xcontext->super.state = MCA_COMMON_OFACM_CONNECTING; + break; + case XOOB_ADDR_CONNECTED: + /* so we have the send qp, we just need the recive site. + * Send request for SRQ numbers */ + OFACM_VERBOSE(("The IB addr: sid %d lid %d" + "in XOOB_ADDR_CONNECTED status," + " sending ENDPOINT_XOOB_CONNECT_XRC_REQUEST\n", + context->subnet_id, context->lid)); + xcontext->super.state = MCA_COMMON_OFACM_CONNECTING; + if (OMPI_SUCCESS != + (rc = xoob_send_connect_data(xcontext, ENDPOINT_XOOB_CONNECT_XRC_REQUEST))) { + OFACM_ERROR(("error sending xrc connect request, error code %d", rc)); + } + break; + default : + OFACM_ERROR(("Invalid context status %d", xcontext->addr->status)); + + } + OPAL_THREAD_UNLOCK(&xcontext->addr->addr_lock); + return rc; +} + + +/* + * Finalize function. Cleanup RML non-blocking receive. + */ +static int xoob_component_finalize(void) +{ + if (rml_recv_posted) { + orte_rml.recv_cancel(ORTE_NAME_WILDCARD, OMPI_RML_TAG_XOFACM); + rml_recv_posted = false; + } + return OMPI_SUCCESS; +} diff --git a/ompi/mca/common/ofacm/common_ofacm_xoob.h b/ompi/mca/common/ofacm/common_ofacm_xoob.h new file mode 100644 index 0000000000..0790387786 --- /dev/null +++ b/ompi/mca/common/ofacm/common_ofacm_xoob.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2007-2012 Mellanox Technologies. All rights reserved. + * Copyright (c) 2008 Cisco Systems, Inc. All rights reserved. + * + * Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. + * $COPYRIGHT$ + * + * Additional copyrights may follow + * + * $HEADER$ + */ + +#ifndef COMMON_OFACM_XOOB_H +#define COMMON_OFACM_XOOB_H + +#include "opal/class/opal_hash_table.h" +#include "connect.h" + +extern ompi_common_ofacm_base_component_t ompi_common_ofacm_xoob; + +typedef enum { + XOOB_ADDR_CONNECTING = 100, + XOOB_ADDR_CONNECTED, + XOOB_ADDR_CLOSED +} ompi_common_ofacm_ib_addr_state_t; + +struct ib_address_t { + opal_list_item_t super; + void *key; /* the key with size 80bit - [subnet(64) LID(16bit)] */ + uint64_t subnet_id; /* caching subnet_id */ + uint16_t lid; /* caching lid */ + opal_list_t pending_contexts; /* list of endpoints that use this ib_address */ + struct ompi_common_ofacm_base_qp_t *qps; /* pointer to qp that will be used + for communication with the + destination */ + uint32_t remote_xrc_rcv_qp_num; /* remote xrc qp number */ + opal_mutex_t addr_lock; /* protection */ + ompi_common_ofacm_ib_addr_state_t status; /* ib port status */ +}; +typedef struct ib_address_t + ib_address_t; + +struct ompi_common_ofacm_xoob_local_connection_context_t { + ompi_common_ofacm_base_local_connection_context_t super; + ib_address_t *addr; + uint32_t xrc_recv_qp_num; /* in xrc we will use it as recv qp */ + uint32_t xrc_recv_psn; +}; +typedef struct ompi_common_ofacm_xoob_local_connection_context_t + ompi_common_ofacm_xoob_local_connection_context_t; +OMPI_DECLSPEC OBJ_CLASS_DECLARATION(ompi_common_ofacm_xoob_local_connection_context_t); + +struct ompi_common_ofacm_xoob_module_t { + ompi_common_ofacm_base_module_t super; + opal_hash_table_t ib_addr_table; /**< used only for xrc.hash-table that + keeps table of all lids/subnets */ +}; +typedef struct ompi_common_ofacm_xoob_module_t + ompi_common_ofacm_xoob_module_t; + +struct pending_context_t { + opal_list_item_t super; + ompi_common_ofacm_xoob_local_connection_context_t *xcontext; +}; +typedef struct pending_context_t + pending_context_t; +OMPI_DECLSPEC OBJ_CLASS_DECLARATION(pending_context_t); + +#endif diff --git a/ompi/mca/common/ofacm/configure.m4 b/ompi/mca/common/ofacm/configure.m4 new file mode 100644 index 0000000000..76a12f4ec8 --- /dev/null +++ b/ompi/mca/common/ofacm/configure.m4 @@ -0,0 +1,63 @@ +# -*- shell-script -*- +# +# Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana +# University Research and Technology +# Corporation. All rights reserved. +# Copyright (c) 2004-2005 The University of Tennessee and The University +# of Tennessee Research Foundation. All rights +# reserved. +# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, +# University of Stuttgart. All rights reserved. +# Copyright (c) 2004-2005 The Regents of the University of California. +# All rights reserved. +# Copyright (c) 2007-2009 Cisco Systems, Inc. All rights reserved. +# Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. +# Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. +# $COPYRIGHT$ +# +# Additional copyrights may follow +# +# $HEADER$ +# + +# MCA_ompi_common_ofacm_CONFIG([should_build]) +# ------------------------------------------ +AC_DEFUN([MCA_ompi_common_ofacm_POST_CONFIG], [ + AM_CONDITIONAL([MCA_common_ofacm_have_xrc], [test $1 -eq 1 -a "x$common_ofacm_have_xrc" = "x1"]) +]) + + +# MCA_ompi_common_ofacm_CONFIG([action-if-can-compile], +# [action-if-cant-compile]) +# ------------------------------------------------ +AC_DEFUN([MCA_ompi_common_ofacm_CONFIG],[ + AC_CONFIG_FILES([ompi/mca/common/ofacm/Makefile]) + OPAL_VAR_SCOPE_PUSH([modules ofacm_have_threads]) + modules="oob" + + common_ofacm_happy="no" + OMPI_CHECK_OPENFABRICS([common_ofacm], + [common_ofacm_happy="yes" + OMPI_CHECK_OPENFABRICS_CM([common_ofacm])]) + + AS_IF([test "$common_ofacm_happy" = "yes"], + [common_ofacm_WRAPPER_EXTRA_LDFLAGS="$common_ofacm_LDFLAGS" + common_ofacm_WRAPPER_EXTRA_LIBS="$common_ofacm_LIBS" + $1], + [$2]) + + AS_IF([test "$common_ofacm_happy" = "yes"], + [if test "x$common_ofacm_have_xrc" = "x1"; then + modules="$modules xoob" + fi + AC_MSG_CHECKING([which OpenFabrics CM modules will be built]) + AC_MSG_RESULT([$modules])]) + + # substitute in the things needed to build openib + AC_SUBST([common_ofacm_CFLAGS]) + AC_SUBST([common_ofacm_CPPFLAGS]) + AC_SUBST([common_ofacm_LDFLAGS]) + AC_SUBST([common_ofacm_LIBS]) + + OPAL_VAR_SCOPE_POP +])dnl diff --git a/ompi/mca/common/ofacm/configure.params b/ompi/mca/common/ofacm/configure.params new file mode 100644 index 0000000000..b74c3bfb3f --- /dev/null +++ b/ompi/mca/common/ofacm/configure.params @@ -0,0 +1,26 @@ +# -*- shell-script -*- +# +# Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana +# University Research and Technology +# Corporation. All rights reserved. +# Copyright (c) 2004-2005 The University of Tennessee and The University +# of Tennessee Research Foundation. All rights +# reserved. +# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, +# University of Stuttgart. All rights reserved. +# Copyright (c) 2004-2005 The Regents of the University of California. +# All rights reserved. +# Copyright (c) 2007 Los Alamos National Security, LLC. All rights +# reserved. +# Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. +# Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. +# $COPYRIGHT$ +# +# Additional copyrights may follow +# +# $HEADER$ +# + +# Specific to this module + +PARAM_CONFIG_FILES="Makefile" diff --git a/ompi/mca/common/ofacm/connect.h b/ompi/mca/common/ofacm/connect.h new file mode 100644 index 0000000000..4d2aceb82e --- /dev/null +++ b/ompi/mca/common/ofacm/connect.h @@ -0,0 +1,541 @@ +/* + * Copyright (c) 2007-2008 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2009 Mellanox Technogies, Inc. All rights reserved. + * + * Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. + * Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. + * $COPYRIGHT$ + * + * Additional copyrights may follow + * + * $HEADER$ + */ + +/** + * @file + * + * This interface is designed to hide the back-end details of how IB + * RC connections are made from the rest of the openib BTL. There are + * module-like instances of the implemented functionality (dlopen and + * friends are not used, but all the functionality is accessed through + * struct's of function pointers, so you can swap between multiple + * different implementations at run time, just like real components). + * Hence, these entities are referred to as "Connect + * Pseudo-Components" (CPCs). + * + * The CPCs are referenced by their names (e.g., "oob", "rdma_cm"). + * + * CPCs are split into components and modules, similar to all other + * MCA frameworks in this code base. + * + * Before diving into the CPC interface, let's discuss some + * terminology and mappings of data structures: + * + * - a BTL module represents a network port (in the case of the openib + * BTL, a LID) + * - a CPC module represents one way to make connections to a BTL module + * - hence, a BTL module has potentially multiple CPC modules + * associated with it + * - an endpoint represnts a connection between a local BTL module and + * a remote BTL module (in the openib BTL, because of BSRQ, an + * endpoint can contain multiple QPs) + * - when an endpoint is created, one of the CPC modules associated + * with the local BTL is selected and associated with the endpoint + * (obviously, it is a CPC module that is common between the local + * and remote BTL modules) + * - endpoints may be created and destroyed during the MPI job + * - endpoints are created lazily, during the first communication + * between two peers + * - endpoints are destroyed when two MPI processes become + * disconnected (e.g., MPI-2 dynamics or MPI_FINALIZE) + * - hence, BTL modules and CPC modules outlive endpoints. + * Specifically, BTL modules and CPC modules live from MPI_INIT to + * MPI_FINALIZE. endpoints come and go as MPI semantics demand it. + * - therefore, CPC modules need to cache information on endpoints that + * are specific to that connection. + * + * Component interface: + * + * - component_register(): The openib BTL's component_open() function + * calls the connect_base_register() function, which scans all + * compiled-in CPC's. If they have component_register() functions, + * they are called (component_register() functions are only allowed to + * register MCA parameters). + * + * NOTE: The connect_base_register() function will process the + * btl_openib_cpc_include and btl_openib_cpc_exclude MCA parameters + * and automatically include/exclude CPCs as relevant. If a CPC is + * excluded, none of its other interface functions will be invoked for + * the duration of the process. + * + * - component_init(): The openib BTL's component_init() function + * calls connect_base_init(), which will invoke this query function on + * each CPC to see if it wants to run at all. CPCs can gracefully + * remove themselves from consideration in this process by returning + * OMPI_ERR_NOT_SUPPORTED. + * + * - component_query(): The openib BTL's init_one_port() calls the + * connect_base_select_for_local_port() function, which, for each LID + * on that port, calls the component_query() function on every + * available CPC on that LID. This function is intended to see if a + * CPC can run on a sepcific openib BTL module (i.e., LID). If it + * can, the CPC is supposed to create a CPC module that is specific to + * that BTL/LID and return it. If it cannot, it should return + * OMPI_ERR_NOT_SUPPORTED and be gracefully skipped for this + * OpenFabrics port. + * + * component_finalize(): The openib BTL's component_close() function + * calls connect_base_finalize(), which, in turn, calls the + * component_finalize() function on all available CPCs. Note that all + * CPC modules will have been finalized by this point; the CPC + * component_finalize() function is a chance for the CPC to clean up + * any component-specific resources. + * + * Module interface: + * + * cbm_component member: A pointer pointing to the single, global + * instance of the CPC component. This member is used for creating a + * unique index representing the modules' component so that it can be + * shared with remote peer processes. + * + * cbm_priority member: An integer between 0 and 100, inclusive, + * representing the priority of this CPC. + * + * cbm_modex_message member: A pointer to a blob buffer that will be + * included in the modex message for this port for this CPC (it is + * assumed that this blob is a) only understandable by the + * corresponding CPC in the peer process, and b) contains specific + * addressing/contact information for *this* port's CPC module). + * + * cbm_modex_message_len member: The length of the cbm_modex_message + * blob, in bytes. + * + * cbm_endpoint_init(): Called during endpoint creation, allowing a + * CPC module to cache information on the endpoint. A pointer to the + * endpoint's CPC module is already cached on the endpoint. + * + * cbm_start_connect(): initiate a connection to a remote peer. The + * CPC is responsible for setting itself up for asyncronous operation + * for progressing the outgoing connection request. + * + * cbm_endpoint_finalize(): Called during the endpoint destrouction, + * allowing the CPC module to destroy anything that it cached on the + * endpoint. + * + * cbm_finalize(): shut down all asynchronous handling and clean up + * any state that was setup for this CPC module/BTL. Some CPCs setup + * asynchronous support on a per-HCA/NIC basis (vs. per-port/LID). It + * is the reponsibility of the CPC to figure out such issues (e.g., + * via reference counting) -- there is no notification from the + * upper-level BTL about when an entire HCA/NIC is no longer being + * used. There is only this function, which tells when a specific + * CPC/BTL module is no longer being used. + * + * cbm_uses_cts: a bool that indicates whether the CPC will use the + * CTS protocol or not. + * - if true: the CPC will post the fragment on + * endpoint->endpoint_cts_frag as a receive buffer and will *not* + * call ompi_btl_openib_post_recvs(). + * - if false: the CPC will call ompi_btl_openib_post_recvs() before + * calling ompi_btl_openib_cpc_complete(). + * + * There are two functions in the main openib BTL that the CPC may + * call: + * + * - ompi_btl_openib_post_recvs(endpoint): once a QP is locally + * connected to the remote side (but we don't know if the remote side + * is connected to us yet), this function is invoked to post buffers + * on the QP, setup credits for the endpoint, etc. This function is + * *only* invoked if the CPC's cbm_uses_cts is false. + * + * - ompi_btl_openib_cpc_complete(endpoint): once that a CPC knows + * that a QP is connected on *both* sides, this function is invoked to + * tell the main openib BTL "ok, you can use this connection now." + * (e.g., the main openib BTL will either invoke the CTS protocol or + * start sending out fragments that were queued while the connection + * was establishing, etc.). + */ +#ifndef OMPI_COMMON_OFACM_CONNECT_H +#define OMPI_COMMON_OFACM_CONNECT_H + +/* System includes */ +#include + +/* Open MPI includes */ +#include "ompi/proc/proc.h" + +BEGIN_C_DECLS + +#define BCF_MAX_NAME 64 + +/** + * Must forward declare these structs to avoid include file loops. + */ + +/** + * This is struct is defined below + */ +struct ompi_common_ofacm_base_module_t; + +/* special capabilities */ +#define OMPI_COMMON_OFACM_XRC_ONLY 1 +#define OMPI_COMMON_OFACM_IWARP_ONLY 1 << 1 + +/** + * State of OFACM connection. + */ + +typedef enum { + /* Defines the state in which this BTL instance + * has started the process of connection */ + MCA_COMMON_OFACM_CONNECTING, + + /* Waiting for ack from endpoint */ + MCA_COMMON_OFACM_CONNECT_ACK, + + /*Waiting for final connection ACK from endpoint */ + MCA_COMMON_OFACM_WAITING_ACK, + + /* Connected ... both sender & receiver have + * buffers associated with this connection */ + MCA_COMMON_OFACM_CONNECTED, + + /* Connection is closed, there are no resources + * associated with this */ + MCA_COMMON_OFACM_CLOSED, + + /* Maximum number of retries have been used. + * Report failure on send to upper layer */ + MCA_COMMON_OFACM_FAILED, + + /* We found is useful to have one more + * state that maybe utilized for user needs */ + MCA_COMMON_OFACM_USER_CUSTOM +} ompi_common_ofacm_connection_state_t; + +typedef enum { + MCA_COMMON_OFACM_BTL = 0, + MCA_COMMON_OFACM_COLL = 100 +} ompi_common_ofacm_type; + +typedef struct ompi_common_ofacm_base_dev_desc_t { + struct ibv_device* ib_dev; /* device */ + struct ibv_context* ib_dev_context; /* device context */ + int capabilities; /* Special capabilities like: XRC, Iwarp, etc.. */ +} ompi_common_ofacm_base_dev_desc_t; + +/* QPs configuration container that should be filled by + * upper layer, for example - btl */ +typedef struct ompi_common_ofacm_base_qp_config_t { + int num_qps; + int num_srqs; + struct ibv_qp_init_attr *init_attr; + struct ibv_qp_attr *attr; + uint32_t *srq_num; + uint32_t *init_attr_mask; + uint32_t *rtr_attr_mask; + uint32_t *rts_attr_mask; +} ompi_common_ofacm_base_qp_config_t; + +/* QP base data */ +typedef struct ompi_common_ofacm_base_qp_t { + struct ibv_qp *lcl_qp; + size_t ib_inline_max; /**< max size of IB inline send */ + uint32_t lcl_psn; + int32_t sd_wqe; /**< number of available send wqe entries */ + int users; + opal_mutex_t lock; +} ompi_common_ofacm_base_qp_t; + +/* Remote QP info */ +typedef struct ompi_common_ofacm_base_rem_qp_info_t { + uint32_t rem_qp_num; + /* Remote QP number */ + uint32_t rem_psn; + /* Remote processes port sequence number */ +} ompi_common_ofacm_base_rem_qp_info_t; + +/* Remote SRQ info */ +typedef struct ompi_common_ofacm_base_rem_srq_info_t { + /* Remote SRQ number */ + uint32_t rem_srq_num; +} ompi_common_ofacm_base_rem_srq_info_t; + +/* Remote connection context */ +typedef struct ompi_common_ofacm_base_remote_connection_context_t { + opal_object_t super; + /* Local identifier of the remote process */ + uint16_t rem_lid; + /* subnet id of remote process */ + uint64_t rem_subnet_id; + /* MTU of remote process */ + uint32_t rem_mtu; /* TBD: not sure that i need this one */ + /* index of remote endpoint in endpoint array */ + uint32_t rem_index; /* TBD: the index we use as immidiate data */ + /* Remote QPs */ + ompi_common_ofacm_base_rem_qp_info_t *rem_qps; + /* Remote xrc_srq info, used only with XRC connections */ + ompi_common_ofacm_base_rem_srq_info_t *rem_srqs; +} ompi_common_ofacm_base_remote_connection_context_t; + +typedef struct ompi_common_ofacm_base_proc_t { + opal_list_item_t super; + ompi_proc_t *proc_ompi; /* target proc */ + opal_list_t all_contexts; /* list of all contexts connected to + this endpoint*/ +} ompi_common_ofacm_base_proc_t; +OMPI_DECLSPEC OBJ_CLASS_DECLARATION(ompi_common_ofacm_base_proc_t); + +/* Connection call back function that is called on connection setup */ +typedef void (*ompi_common_ofacm_base_context_connect_cb_fn_t)(void *); + +/* Connection call back function that is called on context error */ +typedef void (*ompi_common_ofacm_base_context_error_cb_fn_t)(void *); +/* Prepare recive call back function that is when recv side should be prepared, + * for example recv packet prepost */ +typedef int (*ompi_common_ofacm_base_context_prepare_recv_cb_fn_t)(void *); + +/* Basic connection context + * ======================== + * The initial connection contxet is created during endpoint initialazation call. + * Each CPC will return ompi_common_ofacm_base_local_connection_context_t that + * is based on CPC connection context. + * + * As Input for context creation user must provide: + * ================================================ + * number of QPs + * qp init atributes + * qp standart attribute + * pointer to protection domain + * pointer to user context (for example pointer to endpoint in case of btl) + */ +typedef struct ompi_common_ofacm_base_local_connection_context_t { + opal_list_item_t super; + struct ompi_common_ofacm_base_proc_t *proc; /* target proc */ + struct ompi_common_ofacm_base_module_t *cpc; /* Pointer to context cpc */ + ompi_common_ofacm_connection_state_t state; /* Connection context status */ + uint64_t subnet_id; /* caching subnet_id */ + int cpc_type; /* connection manager family: openib, coll, etc..*/ + uint16_t lid; /* caching lid */ + uint16_t rem_lid; /* remote lid */ + uint8_t num_of_qps; /* Number of qps that we want to open */ + struct ompi_common_ofacm_base_qp_t *qps; /* qps data */ + uint8_t num_of_srqs; /* Number of qps that we want to open */ + uint32_t *srq_num; /* srq numbers for recv on this context */ + struct ibv_qp_init_attr *init_attr; /* list of initial attr for each qp */ + struct ibv_qp_attr *attr; /* qp attributes */ + struct ibv_pd* ib_pd; /* protection domain */ + uint32_t *custom_init_attr_mask; /* in additional to standard attr_mask we want allow to user + specify special custom masks for init */ + uint32_t *custom_rtr_attr_mask; /* in additional to standard attr_mask we want allow to user + specify special custom masks for rtr */ + uint32_t *custom_rts_attr_mask; /* in additional to standard attr_mask we want allow to user + specify special custom masks for rts */ + void *user_context; /* back pointer to endpoint */ + ompi_common_ofacm_base_context_connect_cb_fn_t connect_cb; /* Connection callback function */ + ompi_common_ofacm_base_context_error_cb_fn_t error_cb; /* Error callback function */ + ompi_common_ofacm_base_context_prepare_recv_cb_fn_t prepare_recv_cb; /* Prepare recv side + (prepost) callback function */ + /* TBD: Need to check when we can update the index. I think during endpoint creation we do not + * have it. It mean that BTL should some how to update it later ...*/ + int32_t index; /* user context index */ + bool initiator; /* initiator of connection ? */ + ompi_common_ofacm_base_remote_connection_context_t remote_info; /* data about remote side of this + connection*/ + uint32_t xrc_recv_qp_num ; /* in xrc we will use it as recv qp */ + + opal_mutex_t context_lock; /* protection */ +} ompi_common_ofacm_base_local_connection_context_t; +OMPI_DECLSPEC OBJ_CLASS_DECLARATION(ompi_common_ofacm_base_local_connection_context_t); +/* Constructor and destructor are located in common_ofacm_base.c */ + +/************************************************************************/ + +/** + * Function to register MCA params in the connect functions. It + * returns no value, so it cannot fail. + */ +typedef void (*ompi_common_ofacm_base_component_register_fn_t)(void); + +/** + * This function is invoked once by the openib BTL component during + * startup. It is intended to have CPC component-wide startup. + * + * Return value: + * + * - OMPI_SUCCESS: this CPC component will be used in selection during + * this process. + * + * - OMPI_ERR_NOT_SUPPORTED: this CPC component will be silently + * ignored in this process. + * + * - Other OMPI_ERR_* values: the error will be propagated upwards, + * likely causing a fatal error (and/or the openib BTL component + * being ignored). + */ +typedef int (*ompi_common_ofacm_base_component_init_fn_t)(void); + +/** + * Query the CPC to see if it wants to run on a specific port (i.e., a + * specific BTL module). If the component init function previously + * returned OMPI_SUCCESS, this function is invoked once per BTL module + * creation (i.e., for each port found by an MPI process). If this + * CPC wants to be used on this BTL module, it returns a CPC module + * that is specific to this BTL module. + * + * The BTL module in question is passed to the function; all of its + * attributes can be used to query to see if it's eligible for this + * CPC. + * + * If it is eligible, the CPC is responsible for creating a + * corresponding CPC module, filling in all the relevant fields on the + * modules, and for setting itself up to run (per above) and returning + * a CPC module (this is effectively the "module_init" function). + * Note that the module priority must be between 0 and 100 + * (inclusive). When multiple CPCs are eligible for a single module, + * the CPC with the highest priority will be used. + * + * Return value: + * + * - OMPI_SUCCESS if this CPC is eligible for and was able to be setup + * for this BTL module. It is assumed that the CPC is now completely + * setup to run on this openib module (per description above). + * + * - OMPI_ERR_NOT_SUPPORTED if this CPC cannot support this BTL + * module. This is not an error; it's just the CPC saying "sorry, I + * cannot support this BTL module." + * + * - Other OMPI_ERR_* code: an error occurred. + */ +typedef int (*ompi_common_ofacm_base_func_component_query_t) + (struct ompi_common_ofacm_base_dev_desc_t *dev, + struct ompi_common_ofacm_base_module_t **cpc); + +/** + * This function is invoked once by the openib BTL component during + * shutdown. It is intended to have CPC component-wide shutdown. + */ +typedef int (*ompi_common_ofacm_base_component_finalize_fn_t)(void); + +/** + * CPC component struct + */ +typedef struct ompi_common_ofacm_base_component_t { + /** Name of this set of connection functions */ + char cbc_name[BCF_MAX_NAME]; + + /** Register function. Can be NULL. */ + ompi_common_ofacm_base_component_register_fn_t cbc_register; + + /** CPC component init function. Can be NULL. */ + ompi_common_ofacm_base_component_init_fn_t cbc_init; + + /** Query the CPC component to get a CPC module corresponding to + an openib BTL module. Cannot be NULL. */ + ompi_common_ofacm_base_func_component_query_t cbc_query; + + /** CPC component finalize function. Can be NULL. */ + ompi_common_ofacm_base_component_finalize_fn_t cbc_finalize; + /** All connection contexts that are using this CPC **/ + opal_list_t all_procs; +} ompi_common_ofacm_base_component_t; + +/************************************************************************/ + +/** + * Function called when an endpoint has been created and has been + * associated with a CPC. + */ +typedef ompi_common_ofacm_base_local_connection_context_t* + (*ompi_common_ofacm_base_module_endpoint_init_fn_t) + (ompi_proc_t *proc, + ompi_common_ofacm_base_qp_config_t *qp_config, struct ibv_pd *pd, + uint64_t subnet_id, int cpc_type, uint16_t lid, uint16_t rem_lid, + int32_t user_context_index, void *user_context, + struct ompi_common_ofacm_base_module_t *cpc, + ompi_common_ofacm_base_context_connect_cb_fn_t connect_cb, + ompi_common_ofacm_base_context_error_cb_fn_t error_cb, + ompi_common_ofacm_base_context_prepare_recv_cb_fn_t prepare_recv_cb); + +/** + * Function to initiate a connection to a remote process. + */ +typedef int (*ompi_common_ofacm_base_module_start_connect_fn_t) + (struct ompi_common_ofacm_base_local_connection_context_t *context); + +/** + * Function called when an endpoint is being destroyed. + */ +typedef int (*ompi_common_ofacm_base_module_endpoint_finalize_fn_t) + (struct ompi_common_ofacm_base_local_connection_context_t *context); + +/** + * Function to finalize the CPC module. It is called once when the + * CPC module's corresponding openib BTL module is being finalized. + */ +typedef int (*ompi_common_ofacm_base_module_finalize_fn_t)(void); + +/** + * Error callback that is called by cpc module on error. + * The callback should be set on upper layer (for example BTL) + */ +typedef int (*ompi_common_ofacm_base_module_error_cb_fn_t)(void *); +/** + * Meta data about a CPC module. This is in a standalone struct + * because it is used in both the CPC module struct and the + * openib_btl_proc_t struct to hold information received from the + * modex. + */ +typedef struct ompi_common_ofacm_base_module_data_t { + /** Pointer back to the component. Used by the base and openib + btl to calculate this module's index for the modex. */ + ompi_common_ofacm_base_component_t *cbm_component; + + /** Priority of the CPC module (must be >=0 and <=100) */ + uint8_t cbm_priority; + + /** Blob that the CPC wants to include in the openib modex message + for a specific port, or NULL if the CPC does not want to + include a message in the modex. */ + void *cbm_modex_message; + + /** Length of the cbm_modex_message blob (0 if + cbm_modex_message==NULL). The message is intended to be short + (because the size of the modex broadcast is a function of + sum(cbm_modex_message_len[i]) for + i=(0...total_num_ports_in_MPI_job) -- e.g., IBCM imposes its + own [very short] limits (per IBTA volume 1, chapter 12). */ + uint8_t cbm_modex_message_len; +} ompi_common_ofacm_base_module_data_t; + +/** + * Struct for holding CPC module and associated meta data + */ +typedef struct ompi_common_ofacm_base_module_t { + /** Meta data about the module */ + ompi_common_ofacm_base_module_data_t data; + + /** Endpoint initialization function */ + ompi_common_ofacm_base_module_endpoint_init_fn_t cbm_endpoint_init; + + /** Connect function */ + ompi_common_ofacm_base_module_start_connect_fn_t cbm_start_connect; + + /** Endpoint finalization function */ + ompi_common_ofacm_base_module_endpoint_finalize_fn_t cbm_endpoint_finalize; + + /** Finalize the cpc module */ + ompi_common_ofacm_base_module_finalize_fn_t cbm_finalize; + + /** Whether this module will use the CTS protocol or not. This + directly states whether this module will call + mca_btl_openib_endpoint_post_recvs() or not: true = this + module will *not* call _post_recvs() and instead will post the + receive buffer provided at endpoint->endpoint_cts_frag on qp + 0. */ + bool cbm_uses_cts; +} ompi_common_ofacm_base_module_t; + +END_C_DECLS + +#endif diff --git a/ompi/mca/common/ofacm/help-mpi-common-ofacm-base.txt b/ompi/mca/common/ofacm/help-mpi-common-ofacm-base.txt new file mode 100644 index 0000000000..de4dfa5587 --- /dev/null +++ b/ompi/mca/common/ofacm/help-mpi-common-ofacm-base.txt @@ -0,0 +1,41 @@ +# -*- text -*- +# +# Copyright (c) 2008 Cisco Systems, Inc. All rights reserved. +# Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. +# Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. +# $COPYRIGHT$ +# +# Additional copyrights may follow +# +# $HEADER$ +# +# This is the US/English help file for Open MPI's OpenFabrics IB CPC +# support. +# +[no cpcs for port] +No OpenFabrics connection schemes reported that they were able to be +used on a specific port. As such, the openib BTL (OpenFabrics +support) will be disabled for this port. + + Local host: %s + Local device: %s + CPCs attempted: %s +# +[cpc name not found] +An invalid CPC name was specified via the btl_openib_cpc_%s MCA +parameter. + + Local host: %s + btl_openib_cpc_%s value: %s + Invalid name: %s + All possible valid names: %s +# +[inline truncated] +WARNING: The btl_openib_max_inline_data MCA parameter was used to +specify how much inline data should be used, but a device reduced this +value. This is not an error; it simply means that your run will use +a smaller inline data value than was requested. + + Local host: %s + Requested value: %d + Value used by device: %d diff --git a/ompi/mca/common/ofacm/help-mpi-common-ofacm-oob.txt b/ompi/mca/common/ofacm/help-mpi-common-ofacm-oob.txt new file mode 100644 index 0000000000..8b1de30af1 --- /dev/null +++ b/ompi/mca/common/ofacm/help-mpi-common-ofacm-oob.txt @@ -0,0 +1,20 @@ +# -*- text -*- +# +# Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. +# Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. +# $COPYRIGHT$ +# +# Additional copyrights may follow +# +# $HEADER$ +# +[ofacm oob fatal error] +The OOB OpenFabrics Connection Manager module tried to raise fatal error, +but failed. + + Local host: %s + Source file: %s + Source line: %d + +Your job is now going to abort, sorry. +# diff --git a/ompi/mca/common/ofautils/Makefile.am b/ompi/mca/common/ofautils/Makefile.am new file mode 100644 index 0000000000..f467889096 --- /dev/null +++ b/ompi/mca/common/ofautils/Makefile.am @@ -0,0 +1,68 @@ +# +# Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. +# Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. +# $COPYRIGHT$ +# +# Additional copyrights may follow +# +# $HEADER$ +# + +AM_CPPFLAGS = $(common_ofautils_CPPFLAGS) + +headers = \ + common_ofautils.h + +sources = \ + common_ofautils.c + +# To simplify components that link to this library, we will *always* +# have an output libtool library named libmca__.la -- even +# for case 2) described above (i.e., so there's no conditional logic +# necessary in component Makefile.am's that link to this library). +# Hence, if we're creating a noinst version of this library (i.e., +# case 2), we sym link it to the libmca__.la name +# (libtool will do the Right Things under the covers). See the +# all-local and clean-local rules, below, for how this is effected. + +lib_LTLIBRARIES = +noinst_LTLIBRARIES = +comp_inst = libmca_common_ofautils.la +comp_noinst = libmca_common_ofautils_noinst.la + +if MCA_BUILD_ompi_common_ofautils_DSO +lib_LTLIBRARIES += $(comp_inst) +else +noinst_LTLIBRARIES += $(comp_noinst) +endif + +libmca_common_ofautils_la_SOURCES = $(headers) $(sources) +# TBD: create own config file +libmca_common_ofautils_la_CPPFLAGS = $(common_ofautils_CPPFLAGS) +libmca_common_ofautils_la_LDFLAGS = $(common_ofautils_LDFLAGS) +libmca_common_ofautils_la_LIBADD = $(common_ofautils_LIBS) +libmca_common_ofautils_noinst_la_SOURCES = $(libmca_common_ofautils_la_SOURCES) + +# Conditionally install the header files + +if WANT_INSTALL_HEADERS +ompidir = $(includedir)/openmpi/ompi/mca/common/ofautils +ompi_HEADERS = $(headers) +else +ompidir = $(includedir) +endif + +# These two rules will sym link the "noinst" libtool library filename +# to the installable libtool library filename in the case where we are +# compiling this component statically (case 2), described above). + +all-local: + if test -z "$(lib_LTLIBRARIES)"; then \ + rm -f "$(comp_inst)"; \ + $(LN_S) "$(comp_noinst)" "$(comp_inst)"; \ + fi + +clean-local: + if test -z "$(lib_LTLIBRARIES)"; then \ + rm -f "$(comp_inst)"; \ + fi diff --git a/ompi/mca/common/ofautils/common_ofautils.c b/ompi/mca/common/ofautils/common_ofautils.c new file mode 100644 index 0000000000..a242460057 --- /dev/null +++ b/ompi/mca/common/ofautils/common_ofautils.c @@ -0,0 +1,89 @@ +/* -*- Mode: C; c-basic-offset:4 ; -*- */ +/* + * Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana + * University Research and Technology + * Corporation. All rights reserved. + * Copyright (c) 2004-2008 The University of Tennessee and The University + * of Tennessee Research Foundation. All rights + * reserved. + * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, + * University of Stuttgart. All rights reserved. + * Copyright (c) 2004-2005 The Regents of the University of California. + * All rights reserved. + * Copyright (c) 2006-2009 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2006-2012 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006-2007 Los Alamos National Security, LLC. All rights + * reserved. + * Copyright (c) 2006-2007 Voltaire All rights reserved. + * Copyright (c) 2009 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. + * $COPYRIGHT$ + * + * Additional copyrights may follow + * + * $HEADER$ + */ + +#include "ompi_config.h" + +#include +/* This is crummy, but doesn't work on all + platforms with all compilers. Specifically, trying to include it + on RHEL4U3 with the PGI 32 bit compiler will cause problems because + certain 64 bit types are not defined. Per advice from Roland D., + just include the one prototype that we need in this case + (ibv_get_sysfs_path()). */ +#ifdef HAVE_INFINIBAND_DRIVER_H +#include +#else +const char *ibv_get_sysfs_path(void); +#endif +#include "opal/util/output.h" +#include "common_ofautils.h" + +struct ibv_device **ibv_get_device_list_compat(int *num_devs) +{ + struct ibv_device **ib_devs; + +#ifdef HAVE_IBV_GET_DEVICE_LIST + ib_devs = ibv_get_device_list(num_devs); +#else + struct dlist *dev_list; + struct ibv_device *ib_dev; + *num_devs = 0; + + /* Determine the number of device's available on the host */ + dev_list = ibv_get_devices(); + if (NULL == dev_list) + return NULL; + + dlist_start(dev_list); + + dlist_for_each_data(dev_list, ib_dev, struct ibv_device) + (*num_devs)++; + + /* Allocate space for the ib devices */ + ib_devs = (struct ibv_device**)malloc(*num_devs * sizeof(struct ibv_dev*)); + if(NULL == ib_devs) { + *num_devs = 0; + opal_output("Failed malloc: %s:%d", __FILE__, __LINE__); + return NULL; + } + + dlist_start(dev_list); + + dlist_for_each_data(dev_list, ib_dev, struct ibv_device) + *(++ib_devs) = ib_dev; +#endif + + return ib_devs; +} + +void ibv_free_device_list_compat(struct ibv_device **ib_devs) +{ +#ifdef HAVE_IBV_GET_DEVICE_LIST + ibv_free_device_list(ib_devs); +#else + free(ib_devs); +#endif +} diff --git a/ompi/mca/common/ofautils/common_ofautils.h b/ompi/mca/common/ofautils/common_ofautils.h new file mode 100644 index 0000000000..f9a8c0d9a8 --- /dev/null +++ b/ompi/mca/common/ofautils/common_ofautils.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. + * All rights reserved. + * Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. + * $COPYRIGHT$ + * + * Additional copyrights may follow + * + * $HEADER$ + */ + +#ifndef _COMMON_OFAUTILS_H_ +#define _COMMON_OFAUTILS_H_ + +#include "ompi_config.h" + +OMPI_DECLSPEC extern +struct ibv_device **ibv_get_device_list_compat(int *num_devs); + +OMPI_DECLSPEC extern +void ibv_free_device_list_compat(struct ibv_device **ib_devs); + +END_C_DECLS + +#endif + diff --git a/ompi/mca/common/ofautils/configure.m4 b/ompi/mca/common/ofautils/configure.m4 new file mode 100644 index 0000000000..0fe8b9295f --- /dev/null +++ b/ompi/mca/common/ofautils/configure.m4 @@ -0,0 +1,43 @@ +# -*- shell-script -*- +# +# Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana +# University Research and Technology +# Corporation. All rights reserved. +# Copyright (c) 2004-2005 The University of Tennessee and The University +# of Tennessee Research Foundation. All rights +# reserved. +# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, +# University of Stuttgart. All rights reserved. +# Copyright (c) 2004-2005 The Regents of the University of California. +# All rights reserved. +# Copyright (c) 2007-2009 Cisco Systems, Inc. All rights reserved. +# Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. +# Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. +# $COPYRIGHT$ +# +# Additional copyrights may follow +# +# $HEADER$ +# + +# MCA_ompi_common_ofautils_CONFIG([action-if-can-compile], +# [action-if-cant-compile]) +# ------------------------------------------------ +AC_DEFUN([MCA_ompi_common_ofautils_CONFIG],[ + AC_CONFIG_FILES([ompi/mca/common/ofautils/Makefile]) + common_ofautils_happy="no" + OMPI_CHECK_OPENFABRICS([common_ofautils], + [common_ofautils_happy="yes"]) + + AS_IF([test "$common_ofautils_happy" = "yes"], + [common_ofautils_WRAPPER_EXTRA_LDFLAGS="$common_ofautils_LDFLAGS" + common_ofautils_WRAPPER_EXTRA_LIBS="$common_ofautils_LIBS" + $1], + [$2]) + + # substitute in the things needed to build openib + AC_SUBST([common_ofautils_CFLAGS]) + AC_SUBST([common_ofautils_CPPFLAGS]) + AC_SUBST([common_ofautils_LDFLAGS]) + AC_SUBST([common_ofautils_LIBS]) +])dnl diff --git a/ompi/mca/common/ofautils/configure.params b/ompi/mca/common/ofautils/configure.params new file mode 100644 index 0000000000..b74c3bfb3f --- /dev/null +++ b/ompi/mca/common/ofautils/configure.params @@ -0,0 +1,26 @@ +# -*- shell-script -*- +# +# Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana +# University Research and Technology +# Corporation. All rights reserved. +# Copyright (c) 2004-2005 The University of Tennessee and The University +# of Tennessee Research Foundation. All rights +# reserved. +# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, +# University of Stuttgart. All rights reserved. +# Copyright (c) 2004-2005 The Regents of the University of California. +# All rights reserved. +# Copyright (c) 2007 Los Alamos National Security, LLC. All rights +# reserved. +# Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. +# Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. +# $COPYRIGHT$ +# +# Additional copyrights may follow +# +# $HEADER$ +# + +# Specific to this module + +PARAM_CONFIG_FILES="Makefile" diff --git a/ompi/mca/dpm/dpm.h b/ompi/mca/dpm/dpm.h index c030b22219..6a343cf2c6 100644 --- a/ompi/mca/dpm/dpm.h +++ b/ompi/mca/dpm/dpm.h @@ -56,6 +56,10 @@ BEGIN_C_DECLS /* common sm component query result index */ #define OMPI_RML_TAG_COMMON_SM_COMP_INDEX OMPI_RML_TAG_BASE+10 +/* OFACM RML TAGs */ +#define OMPI_RML_TAG_OFACM OMPI_RML_TAG_BASE+11 +#define OMPI_RML_TAG_XOFACM OMPI_RML_TAG_BASE+12 + #define OMPI_RML_TAG_DYNAMIC OMPI_RML_TAG_BASE+200