1
1
Signed-off-by: Barton Chittenden <bartonski@gmail.com>
Этот коммит содержится в:
Barton Chittenden 2019-10-09 20:35:27 -04:00 коммит произвёл Jeff Squyres
родитель 451cbdcc7a
Коммит 47816ef83b
27 изменённых файлов: 2 добавлений и 4485 удалений

36
README
Просмотреть файл

@ -614,7 +614,6 @@ MPI Functionality and Features
- cm (see list (1) of supported MTLs, below)
- ob1 (see list (2) of supported BTLs, below)
- ucx
- yalla
(1) The cm PML and the following MTLs support MPI_THREAD_MULTIPLE:
- ofi (Libfabric)
@ -693,7 +692,7 @@ Network Support
---------------
- There are several main MPI network models available: "ob1", "cm",
"ucx", and "yalla". "ob1" uses BTL ("Byte Transfer Layer")
and "ucx". "ob1" uses BTL ("Byte Transfer Layer")
components for each supported network. "cm" uses MTL ("Matching
Transport Layer") components for each supported network. "ucx" uses
the OpenUCX transport.
@ -745,10 +744,7 @@ Network Support
transport devices are available (e.g., Cray uGNI), use the "cm"
PML and a single appropriate corresponding "mtl" module.
3. If MXM/InfiniBand devices are availble, use the "yalla" PML
(NOTE: the "yalla"/MXM PML is deprecated -- see below).
4. Otherwise, use the ob1 PML and one or more appropriate "btl"
3. Otherwise, use the ob1 PML and one or more appropriate "btl"
modules.
Users can override Open MPI's default selection algorithms and force
@ -762,24 +758,9 @@ Network Support
or
shell$ mpirun --mca pml ucx ...
As alluded to above, there is actually a fourth MPI point-to-point
transport, but it is deprecated and will likely be removed in a
future Open MPI release:
- "yalla" uses the Mellanox MXM transport library. MXM is the
deprecated Mellanox Messaging Accelerator library, utilizing a
full range of IB transports to provide the following messaging
services to the upper level MPI/OpenSHMEM libraries. MXM is only
included in this release of Open MPI for backwards compatibility;
the "ucx" PML should be used insead.
- The main OpenSHMEM network model is "ucx"; it interfaces directly
with UCX.
The "ikrit" OpenSHMEM network model is also available, but is
deprecated; it uses the deprecated Mellanox Message Accelerator
(MXM) library.
- In prior versions of Open MPI, InfiniBand and RoCE support was
provided through the openib BTL and ob1 PML plugins. Starting with
Open MPI 4.0.0, InfiniBand support through the openib plugin is both
@ -1115,19 +1096,6 @@ NETWORKING SUPPORT / OPTIONS
directory>/lib64, which covers most cases. This option is only
needed for special configurations.
--with-mxm=<directory>
Specify the directory where the Mellanox MXM library and header
files are located. This option is generally only necessary if the
MXM headers and libraries are not in default compiler/linker search
paths.
MXM is the support library for Mellanox Network adapters.
--with-mxm-libdir=<directory>
Look in directory for the MXM libraries. By default, Open MPI will
look in <mxm directory>/lib and <mxm directory>/lib64, which covers
most cases. This option is only needed for special configurations.
--with-portals4=<directory>
Specify the directory where the Portals4 libraries and header files
are located. This option is generally only necessary if the Portals4

Просмотреть файл

@ -1,94 +0,0 @@
dnl -*- shell-script -*-
dnl
dnl Copyright (c) 2001-2011 Mellanox Technologies Ltd. ALL RIGHTS RESERVED.
dnl Copyright (c) 2015 Research Organization for Information Science
dnl and Technology (RIST). All rights reserved.
dnl Copyright (c) 2016 Los Alamos National Security, LLC. All rights
dnl reserved.
dnl Copyright (c) 2016 Cisco Systems, Inc. All rights reserved.
dnl $COPYRIGHT$
dnl
dnl Additional copyrights may follow
dnl
dnl $HEADER$
dnl
# OMPI_CHECK_MXM(prefix, [action-if-found], [action-if-not-found])
# --------------------------------------------------------
# check if MXM support can be found. sets prefix_{CPPFLAGS,
# LDFLAGS, LIBS} as needed and runs action-if-found if there is
# support, otherwise executes action-if-not-found
AC_DEFUN([OMPI_CHECK_MXM],[
if test -z "$ompi_check_mxm_happy" ; then
AC_ARG_WITH([mxm],
[AC_HELP_STRING([--with-mxm(=DIR)],
[Build Mellanox Messaging support, optionally adding
DIR/include and DIR/lib or DIR/lib64 to the search path for headers and libraries])])
AC_ARG_WITH([mxm-libdir],
[AC_HELP_STRING([--with-mxm-libdir=DIR],
[Search for Mellanox MXM libraries in DIR])])
OPAL_CHECK_WITHDIR([mxm-libdir], [$with_mxm_libdir], [libmxm.*])
ompi_check_mxm_$1_save_CPPFLAGS="$CPPFLAGS"
ompi_check_mxm_$1_save_LDFLAGS="$LDFLAGS"
ompi_check_mxm_$1_save_LIBS="$LIBS"
AS_IF([test "$with_mxm" != "no"],
[AS_IF([test ! -z "$with_mxm" && test "$with_mxm" != "yes"],
[
ompi_check_mxm_dir="$with_mxm"
])
AS_IF([test ! -z "$with_mxm_libdir" && test "$with_mxm_libdir" != "yes"],
[ompi_check_mxm_libdir="$with_mxm_libdir"])
OPAL_CHECK_PACKAGE([ompi_check_mxm],
[mxm/api/mxm_api.h],
[mxm],
[mxm_cleanup],
[],
[$ompi_check_mxm_dir],
[$ompi_check_mxm_libdir],
[ompi_check_mxm_happy="yes"],
[ompi_check_mxm_happy="no"])],
[ompi_check_mxm_happy="no"])
CPPFLAGS="$ompi_check_mxm_$1_save_CPPFLAGS"
LDFLAGS="$ompi_check_mxm_$1_save_LDFLAGS"
LIBS="$ompi_check_mxm_$1_save_LIBS"
AC_MSG_CHECKING(for MXM version compatibility)
AC_REQUIRE_CPP
old_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS -I$ompi_check_mxm_dir/include"
AC_COMPILE_IFELSE(
[AC_LANG_PROGRAM([[#include <mxm/api/mxm_version.h>]],
[[
#ifndef MXM_VERSION
#error "MXM Version is less than 2.1, please upgrade"
#endif
#
#if MXM_API < MXM_VERSION(2,1)
#error "MXM Version is less than 2.1, please upgrade"
#endif
]])],
[ompi_mxm_version_ok="yes"],
[ompi_mxm_version_ok="no"])
AC_MSG_RESULT([$ompi_mxm_version_ok])
CFLAGS=$old_CFLAGS
AS_IF([test "$ompi_mxm_version_ok" = "no"], [ompi_check_mxm_happy="no"])
OPAL_SUMMARY_ADD([[Transports]],[[Mellanox MXM]],[$1],[$ompi_check_mxm_happy])
fi
AS_IF([test "$ompi_check_mxm_happy" = "yes"],
[$1_LDFLAGS="[$]$1_LDFLAGS $ompi_check_mxm_LDFLAGS"
$1_LIBS="[$]$1_LIBS $ompi_check_mxm_LIBS"
$1_CPPFLAGS="[$]$1_CPPFLAGS $ompi_check_mxm_CPPFLAGS"
$2],
[AS_IF([test ! -z "$with_mxm" && test "$with_mxm" != "no"],
[AC_MSG_ERROR([MXM support requested but not found. Aborting])])
$3])
])

Просмотреть файл

@ -1,49 +0,0 @@
#
# Copyright (c) 2001-2014 Mellanox Technologies Ltd. ALL RIGHTS RESERVED.
# Copyright (c) 2015 Research Organization for Information Science
# and Technology (RIST). All rights reserved.
# Copyright (c) 2017 IBM Corporation. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
# Make the output library in this directory, and name it either
# mca_<type>_<name>.la (for DSO builds) or libmca_<type>_<name>.la
# (for static builds).
AM_CPPFLAGS = $(pml_yalla_CPPFLAGS)
local_sources = \
pml_yalla.h \
pml_yalla.c \
pml_yalla_request.h \
pml_yalla_request.c \
pml_yalla_datatype.h \
pml_yalla_datatype.c \
pml_yalla_freelist.h \
pml_yalla_component.c
if MCA_BUILD_ompi_pml_yalla_DSO
component_noinst =
component_install = mca_pml_yalla.la
else
component_noinst = libmca_pml_yalla.la
component_install =
endif
mcacomponentdir = $(ompilibdir)
mcacomponent_LTLIBRARIES = $(component_install)
mca_pml_yalla_la_SOURCES = $(local_sources)
mca_pml_yalla_la_LIBADD = $(top_builddir)/ompi/lib@OMPI_LIBMPI_NAME@.la \
$(pml_yalla_LIBS)
mca_pml_yalla_la_LDFLAGS = -module -avoid-version $(pml_yalla_LDFLAGS)
noinst_LTLIBRARIES = $(component_noinst)
libmca_pml_yalla_la_SOURCES = $(local_sources)
libmca_pml_yalla_la_LIBADD = $(pml_yalla_LIBS)
libmca_pml_yalla_la_LDFLAGS = -module -avoid-version $(pml_yalla_LDFLAGS)

Просмотреть файл

@ -1,30 +0,0 @@
#
# Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
AC_DEFUN([MCA_ompi_pml_yalla_POST_CONFIG], [
AS_IF([test "$1" = "1"], [OMPI_REQUIRE_ENDPOINT_TAG([PML])])
])
AC_DEFUN([MCA_ompi_pml_yalla_CONFIG], [
AC_CONFIG_FILES([ompi/mca/pml/yalla/Makefile])
OMPI_CHECK_MXM([pml_yalla],
[pml_yalla_happy="yes"],
[pml_yalla_happy="no"])
AS_IF([test "$pml_yalla_happy" = "yes"],
[$1],
[$2])
# substitute in the things needed to build mxm
AC_SUBST([pml_yalla_CPPFLAGS])
AC_SUBST([pml_yalla_LDFLAGS])
AC_SUBST([pml_yalla_LIBS])
])

Просмотреть файл

@ -1,7 +0,0 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: MELLANOX
status: maintenance

Просмотреть файл

@ -1,747 +0,0 @@
/*
* Copyright (C) 2001-2011 Mellanox Technologies Ltd. ALL RIGHTS RESERVED.
* Copyright (c) 2015 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2018 Cisco Systems, Inc. All rights reserved
* Copyright (c) 2018 IBM Corporation. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifdef HAVE_ALLOCA_H
#include <alloca.h>
#endif
#include "pml_yalla.h"
#include "pml_yalla_request.h"
#include "opal/runtime/opal.h"
#include "opal/memoryhooks/memory.h"
#include "opal/mca/memory/base/base.h"
#include "opal/mca/pmix/pmix.h"
#include "ompi/mca/pml/base/pml_base_bsend.h"
#include "ompi/message/message.h"
#define MODEX_KEY "yalla-mxm"
mca_pml_yalla_module_t ompi_pml_yalla = {
{
mca_pml_yalla_add_procs,
mca_pml_yalla_del_procs,
mca_pml_yalla_enable,
NULL,
mca_pml_yalla_add_comm,
mca_pml_yalla_del_comm,
mca_pml_yalla_irecv_init,
mca_pml_yalla_irecv,
mca_pml_yalla_recv,
mca_pml_yalla_isend_init,
mca_pml_yalla_isend,
mca_pml_yalla_send,
mca_pml_yalla_iprobe,
mca_pml_yalla_probe,
mca_pml_yalla_start,
mca_pml_yalla_improbe,
mca_pml_yalla_mprobe,
mca_pml_yalla_imrecv,
mca_pml_yalla_mrecv,
mca_pml_yalla_dump,
NULL, /* FT */
1ul << ((sizeof(mxm_ctxid_t)*8) - 1),
1ul << ((sizeof(mxm_tag_t)*8 - 1) - 1),
0 /* flags */
},
NULL,
NULL,
NULL,
NULL
};
static int send_ep_address(void)
{
mxm_error_t error;
void *address;
size_t addrlen;
int rc;
addrlen = 0;
error = mxm_ep_get_address(ompi_pml_yalla.mxm_ep, NULL, &addrlen);
PML_YALLA_ASSERT(error == MXM_ERR_BUFFER_TOO_SMALL);
address = alloca(addrlen);
error = mxm_ep_get_address(ompi_pml_yalla.mxm_ep, address, &addrlen);
if (MXM_OK != error) {
PML_YALLA_ERROR("%s", "Failed to get EP address");
return OMPI_ERROR;
}
OPAL_MODEX_SEND(rc, OPAL_PMIX_GLOBAL,
&mca_pml_yalla_component.pmlm_version, address, addrlen);
if (OMPI_SUCCESS != rc) {
PML_YALLA_ERROR("%s", "Open MPI couldn't distribute EP connection details");
return OMPI_ERROR;
}
return OMPI_SUCCESS;
}
static int recv_ep_address(ompi_proc_t *proc, void **address_p, size_t *addrlen_p)
{
int rc;
OPAL_MODEX_RECV(rc, &mca_pml_yalla_component.pmlm_version, &proc->super.proc_name,
address_p, addrlen_p);
if (rc < 0) {
PML_YALLA_ERROR("%s", "Failed to receive EP address");
}
return rc;
}
static void mca_pml_yalla_mem_release_cb(void *buf, size_t length,
void *cbdata, bool from_alloc)
{
mxm_mem_unmap(ompi_pml_yalla.mxm_context, buf, length,
from_alloc ? MXM_MEM_UNMAP_MARK_INVALID : 0);
}
int mca_pml_yalla_open(void)
{
mxm_error_t error;
PML_YALLA_VERBOSE(1, "%s", "mca_pml_yalla_open");
(void)mca_base_framework_open(&opal_memory_base_framework, 0);
/* Set memory hooks */
if ((OPAL_MEMORY_FREE_SUPPORT | OPAL_MEMORY_MUNMAP_SUPPORT) ==
((OPAL_MEMORY_FREE_SUPPORT | OPAL_MEMORY_MUNMAP_SUPPORT) &
opal_mem_hooks_support_level()))
{
PML_YALLA_VERBOSE(1, "%s", "enabling on-demand memory mapping");
opal_setenv("MXM_MPI_MEM_ON_DEMAND_MAP", "y", false, &environ);
ompi_pml_yalla.using_mem_hooks = 1;
} else {
PML_YALLA_VERBOSE(1, "%s", "disabling on-demand memory mapping");
ompi_pml_yalla.using_mem_hooks = 0;
}
opal_setenv("MXM_MPI_SINGLE_THREAD", ompi_mpi_thread_multiple ? "n" : "y",
false, &environ);
/* Read options */
error = mxm_config_read_opts(&ompi_pml_yalla.ctx_opts, &ompi_pml_yalla.ep_opts,
"MPI", NULL, 0);
if (MXM_OK != error) {
return OMPI_ERROR;
}
error = mxm_init(ompi_pml_yalla.ctx_opts, &ompi_pml_yalla.mxm_context);
if (MXM_OK != error) {
return OMPI_ERROR;
}
return OMPI_SUCCESS;
}
int mca_pml_yalla_close(void)
{
PML_YALLA_VERBOSE(1, "%s", "mca_pml_yalla_close");
if (ompi_pml_yalla.ctx_opts != NULL) {
mxm_config_free_context_opts(ompi_pml_yalla.ctx_opts);
}
if (ompi_pml_yalla.ep_opts != NULL) {
mxm_config_free_ep_opts(ompi_pml_yalla.ep_opts);
}
if (ompi_pml_yalla.mxm_context != NULL) {
mxm_cleanup(ompi_pml_yalla.mxm_context);
ompi_pml_yalla.mxm_context = NULL;
}
mca_base_framework_close(&opal_memory_base_framework);
return 0;
}
int mca_pml_yalla_init(void)
{
mxm_error_t error;
int rc;
PML_YALLA_VERBOSE(1, "%s", "mca_pml_yalla_init");
if (ompi_pml_yalla.using_mem_hooks) {
opal_mem_hooks_register_release(mca_pml_yalla_mem_release_cb, NULL);
}
error = mxm_ep_create(ompi_pml_yalla.mxm_context, ompi_pml_yalla.ep_opts,
&ompi_pml_yalla.mxm_ep);
if (MXM_OK != error) {
return OMPI_ERROR;
}
rc = send_ep_address();
if (rc < 0) {
return rc;
}
OBJ_CONSTRUCT(&ompi_pml_yalla.send_reqs, mca_pml_yalla_freelist_t);
OBJ_CONSTRUCT(&ompi_pml_yalla.bsend_reqs, mca_pml_yalla_freelist_t);
OBJ_CONSTRUCT(&ompi_pml_yalla.recv_reqs, mca_pml_yalla_freelist_t);
OBJ_CONSTRUCT(&ompi_pml_yalla.convs, mca_pml_yalla_freelist_t);
opal_progress_register(mca_pml_yalla_progress);
ompi_pml_yalla.super.pml_flags |= MCA_PML_BASE_FLAG_REQUIRE_WORLD;
PML_YALLA_VERBOSE(2, "created mxm context %p ep %p", (void *)ompi_pml_yalla.mxm_context,
(void *)ompi_pml_yalla.mxm_ep);
return OMPI_SUCCESS;
}
int mca_pml_yalla_cleanup(void)
{
PML_YALLA_VERBOSE(1, "%s", "mca_pml_yalla_cleanup");
opal_progress_unregister(mca_pml_yalla_progress);
OBJ_DESTRUCT(&ompi_pml_yalla.convs);
OBJ_DESTRUCT(&ompi_pml_yalla.recv_reqs);
OBJ_DESTRUCT(&ompi_pml_yalla.bsend_reqs);
OBJ_DESTRUCT(&ompi_pml_yalla.send_reqs);
if (ompi_pml_yalla.mxm_ep) {
mxm_ep_destroy(ompi_pml_yalla.mxm_ep);
ompi_pml_yalla.mxm_ep = NULL;
}
if (ompi_pml_yalla.using_mem_hooks) {
opal_mem_hooks_unregister_release(mca_pml_yalla_mem_release_cb);
}
return OMPI_SUCCESS;
}
int mca_pml_yalla_add_procs(struct ompi_proc_t **procs, size_t nprocs)
{
size_t i;
int ret;
void *address;
mxm_conn_h conn;
size_t addrlen;
mxm_error_t error;
if (OMPI_SUCCESS != (ret = mca_pml_base_pml_check_selected("yalla",
procs,
nprocs))) {
return ret;
}
for (i = 0; i < nprocs; ++i) {
ret = recv_ep_address(procs[i], &address, &addrlen);
if (ret < 0) {
return ret;
}
if (procs[i]->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_PML]) {
PML_YALLA_VERBOSE(3, "already connected to proc. %s",
OPAL_NAME_PRINT(procs[i]->super.proc_name));
continue;
}
PML_YALLA_VERBOSE(2, "connecting to proc. %s",
OPAL_NAME_PRINT(procs[i]->super.proc_name));
error = mxm_ep_connect(ompi_pml_yalla.mxm_ep, address, &conn);
free(address);
if (MXM_OK != error) {
PML_YALLA_ERROR("%s", "Failed to connect");
return OMPI_ERROR;
}
procs[i]->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_PML] = conn;
}
return OMPI_SUCCESS;
}
int mca_pml_yalla_del_procs(struct ompi_proc_t **procs, size_t nprocs)
{
size_t i;
int ret;
if (ompi_mpi_state >= OMPI_MPI_STATE_FINALIZE_STARTED) {
PML_YALLA_VERBOSE(3, "%s", "using bulk powerdown");
mxm_ep_powerdown(ompi_pml_yalla.mxm_ep);
}
for (i = 0; i < nprocs; ++i) {
mxm_ep_disconnect(procs[i]->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_PML]);
PML_YALLA_VERBOSE(2, "disconnected from rank %s", OPAL_NAME_PRINT(procs[i]->super.proc_name));
procs[i]->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_PML] = NULL;
}
if (OMPI_SUCCESS != (ret = opal_pmix.fence(NULL, 0))) {
return ret;
}
return OMPI_SUCCESS;
}
int mca_pml_yalla_enable(bool enable)
{
mca_pml_yalla_init_reqs();
mca_pml_yalla_init_datatype();
return OMPI_SUCCESS;
}
int mca_pml_yalla_progress(void)
{
mxm_progress(ompi_pml_yalla.mxm_context);
return OMPI_SUCCESS;
}
int mca_pml_yalla_add_comm(struct ompi_communicator_t* comm)
{
mxm_error_t error;
mxm_mq_h mq;
error = mxm_mq_create(ompi_pml_yalla.mxm_context, comm->c_contextid, &mq);
if (MXM_OK != error) {
return OMPI_ERROR;
}
comm->c_pml_comm = (void*)mq;
PML_YALLA_VERBOSE(2, "created mq ctxid %d for comm %s", comm->c_contextid,
comm->c_name);
return OMPI_SUCCESS;
}
int mca_pml_yalla_del_comm(struct ompi_communicator_t* comm)
{
mxm_mq_h mq = (void*)comm->c_pml_comm;
if (ompi_pml_yalla.mxm_context == NULL) {
PML_YALLA_ERROR("%s", "Destroying communicator after MXM context is destroyed");
return OMPI_ERROR;
}
PML_YALLA_VERBOSE(2, "destroying mq ctxid %d of comm %s", comm->c_contextid,
comm->c_name);
mxm_mq_destroy(mq);
return OMPI_SUCCESS;
}
int mca_pml_yalla_irecv_init(void *buf, size_t count, ompi_datatype_t *datatype,
int src, int tag, struct ompi_communicator_t* comm,
struct ompi_request_t **request)
{
mca_pml_yalla_recv_request_t *rreq;
rreq = MCA_PML_YALLA_RREQ_INIT(buf, count, datatype, src, tag, comm,
OMPI_REQUEST_INACTIVE);
rreq->super.ompi.req_persistent = true;
rreq->super.flags = 0;
*request = &rreq->super.ompi;
PML_YALLA_VERBOSE(9, "init recv request %p src %d tag %d comm %s", (void*)(*request),
src, tag, comm->c_name);
return OMPI_SUCCESS;
}
int mca_pml_yalla_irecv(void *buf, size_t count, ompi_datatype_t *datatype,
int src, int tag, struct ompi_communicator_t* comm,
struct ompi_request_t **request)
{
mca_pml_yalla_recv_request_t *rreq;
mxm_error_t error;
rreq = MCA_PML_YALLA_RREQ_INIT(buf, count, datatype, src, tag, comm,
OMPI_REQUEST_ACTIVE);
rreq->super.ompi.req_persistent = false;
rreq->super.flags = 0;
PML_YALLA_VERBOSE(8, "receive request *%p=%p from %d tag %d dtype %s count %zu",
(void *)request, (void *)rreq, src, tag, datatype->name, count);
error = mxm_req_recv(&rreq->mxm);
if (MXM_OK != error) {
return OMPI_ERROR;
}
*request = &rreq->super.ompi;
return OMPI_SUCCESS;
}
int mca_pml_yalla_recv(void *buf, size_t count, ompi_datatype_t *datatype, int src,
int tag, struct ompi_communicator_t* comm,
ompi_status_public_t* status)
{
mxm_recv_req_t rreq;
mxm_error_t error;
int rc;
PML_YALLA_INIT_MXM_RECV_REQ(&rreq, buf, count, datatype, src, tag, comm, recv);
PML_YALLA_INIT_BLOCKING_MXM_RECV_REQ(&rreq);
PML_YALLA_VERBOSE(8, "receive from %d tag %d dtype %s count %zu", src, tag,
datatype->name, count);
error = mxm_req_recv(&rreq);
if (MXM_OK != error) {
return OMPI_ERROR;
}
PML_YALLA_WAIT_MXM_REQ(&rreq.base);
PML_YALLA_VERBOSE(8, "receive completed with status %s source %d rtag %d(%d/0x%x) len %zu",
mxm_error_string(rreq.base.error),
rreq.completion.sender_imm, rreq.completion.sender_tag,
rreq.tag, rreq.tag_mask,
rreq.completion.actual_len);
rc = PML_YALLA_SET_RECV_STATUS(&rreq, rreq.completion.actual_len, status);
PML_YALLA_FREE_BLOCKING_MXM_REQ(&rreq.base);
return rc;
}
int mca_pml_yalla_isend_init(const void *buf, size_t count, ompi_datatype_t *datatype,
int dst, int tag, mca_pml_base_send_mode_t mode,
struct ompi_communicator_t* comm,
struct ompi_request_t **request)
{
mca_pml_yalla_send_request_t *sreq;
sreq = MCA_PML_YALLA_SREQ_INIT((void *)buf, count, datatype, dst, tag, mode, comm,
OMPI_REQUEST_INACTIVE);
sreq->super.ompi.req_persistent = true;
sreq->super.flags = MCA_PML_YALLA_REQUEST_FLAG_SEND;
if (mode == MCA_PML_BASE_SEND_BUFFERED) {
sreq->super.flags |= MCA_PML_YALLA_REQUEST_FLAG_BSEND;
}
*request = &sreq->super.ompi;
PML_YALLA_VERBOSE(9, "init send request %p dst %d tag %d comm %s", (void *)*request,
dst, tag, comm->c_name);
return OMPI_SUCCESS;
}
static int mca_pml_yalla_bsend(mxm_send_req_t *mxm_sreq)
{
mca_pml_yalla_bsend_request_t *bsreq = (mca_pml_yalla_bsend_request_t *)PML_YALLA_FREELIST_GET(&ompi_pml_yalla.bsend_reqs);
mxm_error_t error;
size_t length;
/* Create a new send request using MPI internal buffer */
bsreq->mxm.base.state = mxm_sreq->base.state;
bsreq->mxm.base.mq = mxm_sreq->base.mq;
bsreq->mxm.base.conn = mxm_sreq->base.conn;
bsreq->mxm.base.data_type = MXM_REQ_DATA_BUFFER;
switch (mxm_sreq->base.data_type) {
case MXM_REQ_DATA_BUFFER:
length = mxm_sreq->base.data.buffer.length;
bsreq->mxm.base.data.buffer.ptr = mca_pml_base_bsend_request_alloc_buf(length);
bsreq->mxm.base.data.buffer.length = length;
memcpy(bsreq->mxm.base.data.buffer.ptr, mxm_sreq->base.data.buffer.ptr, length);
break;
case MXM_REQ_DATA_STREAM:
length = mxm_sreq->base.data.stream.length;
bsreq->mxm.base.data.buffer.ptr = mca_pml_base_bsend_request_alloc_buf(length);
bsreq->mxm.base.data.buffer.length = length;
mxm_sreq->base.data.stream.cb(bsreq->mxm.base.data.buffer.ptr, length,
0, mxm_sreq->base.context);
break;
default:
return OMPI_ERROR;
}
bsreq->mxm.opcode = mxm_sreq->opcode;
bsreq->mxm.flags = mxm_sreq->flags;
bsreq->mxm.op.send = mxm_sreq->op.send;
error = mxm_req_send(&bsreq->mxm);
if (MXM_OK != error) {
return OMPI_ERROR;
}
/* Make the completion handler believe it's ok to release the original request */
mxm_sreq->base.state = MXM_REQ_COMPLETED;
return OMPI_SUCCESS;
}
int mca_pml_yalla_isend(const void *buf, size_t count, ompi_datatype_t *datatype,
int dst, int tag, mca_pml_base_send_mode_t mode,
struct ompi_communicator_t* comm,
struct ompi_request_t **request)
{
mca_pml_yalla_send_request_t *sreq;
mxm_error_t error;
int rc;
sreq = MCA_PML_YALLA_SREQ_INIT((void *)buf, count, datatype, dst, tag, mode, comm,
OMPI_REQUEST_ACTIVE);
sreq->super.ompi.req_persistent = false;
sreq->super.flags = 0;
PML_YALLA_VERBOSE(8, "send request *%p=%p to %d mode %d tag %d dtype %s count %zu",
(void *)request, (void *)sreq, dst, mode, tag, datatype->name, count);
if (mode == MCA_PML_BASE_SEND_BUFFERED) {
rc = mca_pml_yalla_bsend(&sreq->mxm);
sreq->super.ompi.req_status.MPI_ERROR = rc;
ompi_request_complete(&sreq->super.ompi, true);
*request = &sreq->super.ompi;
return rc;
}
error = mxm_req_send(&sreq->mxm);
if (MXM_OK != error) {
return OMPI_ERROR;
}
*request = &sreq->super.ompi;
return OMPI_SUCCESS;
}
int mca_pml_yalla_send(const void *buf, size_t count, ompi_datatype_t *datatype, int dst,
int tag, mca_pml_base_send_mode_t mode,
struct ompi_communicator_t* comm)
{
mxm_send_req_t sreq;
mxm_error_t error;
PML_YALLA_INIT_MXM_SEND_REQ(&sreq, (void *)buf, count, datatype, dst, tag, mode, comm, send);
PML_YALLA_INIT_BLOCKING_MXM_SEND_REQ(&sreq);
PML_YALLA_VERBOSE(8, "send to %d tag %d dtype %s count %zu", dst, tag,
datatype->name, count);
if (mode == MCA_PML_BASE_SEND_BUFFERED) {
return mca_pml_yalla_bsend(&sreq);
}
error = mxm_req_send(&sreq);
if (MXM_OK != error) {
return OMPI_ERROR;
}
PML_YALLA_WAIT_MXM_REQ(&sreq.base);
if (MXM_OK != sreq.base.error) {
return OMPI_ERROR;
}
PML_YALLA_FREE_BLOCKING_MXM_REQ(&sreq.base);
return OMPI_SUCCESS;
}
int mca_pml_yalla_iprobe(int src, int tag, struct ompi_communicator_t* comm,
int *matched, ompi_status_public_t* status)
{
mxm_recv_req_t rreq;
mxm_error_t error;
PML_YALLA_INIT_MXM_PROBE_REQ(&rreq, src, tag, comm);
error = mxm_req_probe(&rreq);
switch (error) {
case MXM_OK:
*matched = 1;
PML_YALLA_SET_RECV_STATUS(&rreq, rreq.completion.sender_len, status);
return OMPI_SUCCESS;
case MXM_ERR_NO_MESSAGE:
*matched = 0;
return OMPI_SUCCESS;
default:
return OMPI_ERROR;
}
return OMPI_SUCCESS;
}
int mca_pml_yalla_probe(int src, int tag, struct ompi_communicator_t* comm,
ompi_status_public_t* status)
{
mxm_recv_req_t rreq;
mxm_error_t error;
PML_YALLA_INIT_MXM_PROBE_REQ(&rreq, src, tag, comm);
for (;;) {
error = mxm_req_probe(&rreq);
switch (error) {
case MXM_OK:
PML_YALLA_SET_RECV_STATUS(&rreq, rreq.completion.sender_len, status);
return OMPI_SUCCESS;
case MXM_ERR_NO_MESSAGE:
break;
default:
return OMPI_ERROR;
}
opal_progress();
}
}
int mca_pml_yalla_improbe(int src, int tag, struct ompi_communicator_t* comm,
int *matched, struct ompi_message_t **message,
ompi_status_public_t* status)
{
mxm_recv_req_t rreq;
mxm_message_h mxm_msg;
mxm_error_t error;
PML_YALLA_INIT_MXM_PROBE_REQ(&rreq, src, tag, comm);
error = mxm_req_mprobe(&rreq, &mxm_msg);
switch (error) {
case MXM_OK:
*matched = 1;
PML_YALLA_SET_RECV_STATUS(&rreq, rreq.completion.sender_len, status);
PML_YALLA_SET_MESSAGE(&rreq, comm, mxm_msg, message);
return OMPI_SUCCESS;
case MXM_ERR_NO_MESSAGE:
*matched = 0;
return OMPI_SUCCESS;
default:
return OMPI_ERROR;
}
return OMPI_SUCCESS;
}
int mca_pml_yalla_mprobe(int src, int tag, struct ompi_communicator_t* comm,
struct ompi_message_t **message,
ompi_status_public_t* status)
{
mxm_recv_req_t rreq;
mxm_message_h mxm_msg;
mxm_error_t error;
PML_YALLA_INIT_MXM_PROBE_REQ(&rreq, src, tag, comm);
for (;;) {
error = mxm_req_mprobe(&rreq, &mxm_msg);
switch (error) {
case MXM_OK:
PML_YALLA_SET_RECV_STATUS(&rreq, rreq.completion.sender_len, status);
PML_YALLA_SET_MESSAGE(&rreq, comm, mxm_msg, message);
return OMPI_SUCCESS;
case MXM_ERR_NO_MESSAGE:
break;
default:
return OMPI_ERROR;
}
opal_progress();
}
}
int mca_pml_yalla_imrecv(void *buf, size_t count, ompi_datatype_t *datatype,
struct ompi_message_t **message,
struct ompi_request_t **request)
{
mca_pml_yalla_recv_request_t *rreq;
mxm_error_t error;
rreq = MCA_PML_YALLA_RREQ_INIT(buf, count, datatype, -1, 0, (*message)->comm,
OMPI_REQUEST_ACTIVE);
rreq->super.ompi.req_persistent = false;
rreq->super.flags = 0;
PML_YALLA_VERBOSE(8, "receive request *%p=%p message *%p=%p dtype %s count %zu",
(void *)request, (void *)rreq, (void *)message, (void *)(*message), datatype->name, count);
error = mxm_message_recv(&rreq->mxm, (*message)->req_ptr);
if (MXM_OK != error) {
return OMPI_ERROR;
}
PML_YALLA_MESSAGE_RELEASE(message);
*request = &rreq->super.ompi;
return OMPI_SUCCESS;
}
int mca_pml_yalla_mrecv(void *buf, size_t count, ompi_datatype_t *datatype,
struct ompi_message_t **message,
ompi_status_public_t* status)
{
mxm_recv_req_t rreq;
mxm_error_t error;
PML_YALLA_INIT_MXM_RECV_REQ(&rreq, buf, count, datatype, -1, 0, (*message)->comm, recv);
PML_YALLA_INIT_BLOCKING_MXM_RECV_REQ(&rreq);
PML_YALLA_VERBOSE(8, "receive message *%p=%p dtype %s count %zu", (void *)message,
(void *)*message, datatype->name, count);
error = mxm_message_recv(&rreq, (*message)->req_ptr);
if (MXM_OK != error) {
return OMPI_ERROR;
}
PML_YALLA_MESSAGE_RELEASE(message);
PML_YALLA_WAIT_MXM_REQ(&rreq.base);
PML_YALLA_VERBOSE(8, "receive completed with status %s source %d rtag %d(%d/0x%x) len %zu",
mxm_error_string(rreq.base.error),
rreq.completion.sender_imm, rreq.completion.sender_tag,
rreq.tag, rreq.tag_mask,
rreq.completion.actual_len);
return PML_YALLA_SET_RECV_STATUS(&rreq, rreq.completion.actual_len, status);
}
int mca_pml_yalla_start(size_t count, ompi_request_t** requests)
{
mca_pml_yalla_base_request_t *req;
mxm_error_t error;
size_t i;
int rc;
for (i = 0; i < count; ++i) {
req = (mca_pml_yalla_base_request_t *)requests[i];
if ((req == NULL) || (OMPI_REQUEST_PML != req->ompi.req_type)) {
/* Skip irrelevant requests */
continue;
}
PML_YALLA_ASSERT(req->ompi.req_state != OMPI_REQUEST_INVALID);
PML_YALLA_RESET_OMPI_REQ(&req->ompi, OMPI_REQUEST_ACTIVE);
if (req->flags & MCA_PML_YALLA_REQUEST_FLAG_SEND) {
mca_pml_yalla_send_request_t *sreq;
sreq = (mca_pml_yalla_send_request_t *)req;
PML_YALLA_RESET_PML_REQ(req, PML_YALLA_MXM_REQBASE(sreq));
if (req->flags & MCA_PML_YALLA_REQUEST_FLAG_BSEND) {
PML_YALLA_VERBOSE(8, "start bsend request %p", (void *)sreq);
rc = mca_pml_yalla_bsend(&sreq->mxm);
sreq->super.ompi.req_status.MPI_ERROR = rc;
ompi_request_complete(&sreq->super.ompi, true);
if (OMPI_SUCCESS != rc) {
return rc;
}
} else {
PML_YALLA_VERBOSE(8, "start send request %p", (void *)sreq);
error = mxm_req_send(&sreq->mxm);
if (MXM_OK != error) {
return OMPI_ERROR;
}
}
} else {
mca_pml_yalla_recv_request_t *rreq;
rreq = (mca_pml_yalla_recv_request_t *)req;
PML_YALLA_RESET_PML_REQ(req, PML_YALLA_MXM_REQBASE(rreq));
PML_YALLA_VERBOSE(8, "start recv request %p", (void *)req);
error = mxm_req_recv(&rreq->mxm);
if (MXM_OK != error) {
return OMPI_ERROR;
}
}
}
return OMPI_SUCCESS;
}
int mca_pml_yalla_dump(struct ompi_communicator_t* comm, int verbose)
{
return OMPI_SUCCESS;
}

Просмотреть файл

@ -1,150 +0,0 @@
/*
* Copyright (C) Mellanox Technologies Ltd. 2001-2011. ALL RIGHTS RESERVED.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef PML_YALLA_H_
#define PML_YALLA_H_
#include "pml_yalla_freelist.h"
#include "ompi_config.h"
#include "ompi/request/request.h"
#include "ompi/mca/pml/pml.h"
#include "ompi/mca/pml/base/base.h"
#include "ompi/datatype/ompi_datatype.h"
#include "ompi/communicator/communicator.h"
#include "ompi/request/request.h"
#include <mxm/api/mxm_api.h>
typedef struct mca_pml_yalla_module mca_pml_yalla_module_t;
typedef struct pml_yalla_base_request mca_pml_yalla_base_request_t;
typedef struct pml_yalla_send_request mca_pml_yalla_send_request_t;
typedef struct pml_yalla_bsend_request mca_pml_yalla_bsend_request_t;
typedef struct pml_yalla_recv_request mca_pml_yalla_recv_request_t;
typedef struct pml_yalla_convertor mca_pml_yalla_convertor_t;
#if MXM_API < MXM_VERSION(2,0)
# error "MXM 2.0 or above is required"
#endif
struct mca_pml_yalla_module {
mca_pml_base_module_t super;
/* MXM global objects */
mxm_context_opts_t *ctx_opts;
mxm_ep_opts_t *ep_opts;
mxm_h mxm_context;
mxm_ep_h mxm_ep;
/* MXM requests */
mca_pml_yalla_freelist_t send_reqs;
mca_pml_yalla_freelist_t bsend_reqs;
mca_pml_yalla_freelist_t recv_reqs;
/* Convertors pool */
mca_pml_yalla_freelist_t convs;
int using_mem_hooks;
int priority;
int verbose;
int output;
};
extern mca_pml_base_component_2_0_0_t mca_pml_yalla_component;
extern mca_pml_yalla_module_t ompi_pml_yalla;
/* Debugging */
#define PML_YALLA_ENABLE_DEBUG OPAL_ENABLE_DEBUG
#if PML_YALLA_ENABLE_DEBUG
# define PML_YALLA_MAX_VERBOSE 9
# define PML_YALLA_ASSERT(_x) assert(_x)
#else
# define PML_YALLA_MAX_VERBOSE 2
# define PML_YALLA_ASSERT(_x)
#endif
#define PML_YALLA_ERROR(format, ... ) \
opal_output_verbose(0, ompi_pml_yalla.output, "Error: %s:%d - %s() " format, \
__FILE__, __LINE__, __FUNCTION__, ## __VA_ARGS__)
#define PML_YALLA_VERBOSE(_level, format, ... ) \
if (((_level) <= PML_YALLA_MAX_VERBOSE) && ((_level) <= ompi_pml_yalla.verbose)) { \
opal_output_verbose(_level, ompi_pml_yalla.output, "%s:%d - %s() " format, \
__FILE__, __LINE__, __FUNCTION__, ## __VA_ARGS__); \
}
int mca_pml_yalla_open(void);
int mca_pml_yalla_close(void);
int mca_pml_yalla_init(void);
int mca_pml_yalla_cleanup(void);
int mca_pml_yalla_add_procs(struct ompi_proc_t **procs, size_t nprocs);
int mca_pml_yalla_del_procs(struct ompi_proc_t **procs, size_t nprocs);
int mca_pml_yalla_enable(bool enable);
int mca_pml_yalla_progress(void);
int mca_pml_yalla_add_comm(struct ompi_communicator_t* comm);
int mca_pml_yalla_del_comm(struct ompi_communicator_t* comm);
int mca_pml_yalla_irecv_init(void *buf, size_t count, ompi_datatype_t *datatype,
int src, int tag, struct ompi_communicator_t* comm,
struct ompi_request_t **request);
int mca_pml_yalla_irecv(void *buf, size_t count, ompi_datatype_t *datatype,
int src, int tag, struct ompi_communicator_t* comm,
struct ompi_request_t **request);
int mca_pml_yalla_recv(void *buf, size_t count, ompi_datatype_t *datatype, int src,
int tag, struct ompi_communicator_t* comm,
ompi_status_public_t* status);
int mca_pml_yalla_isend_init(const void *buf, size_t count, ompi_datatype_t *datatype,
int dst, int tag, mca_pml_base_send_mode_t mode,
struct ompi_communicator_t* comm,
struct ompi_request_t **request);
int mca_pml_yalla_isend(const void *buf, size_t count, ompi_datatype_t *datatype,
int dst, int tag, mca_pml_base_send_mode_t mode,
struct ompi_communicator_t* comm,
struct ompi_request_t **request);
int mca_pml_yalla_send(const void *buf, size_t count, ompi_datatype_t *datatype, int dst,
int tag, mca_pml_base_send_mode_t mode,
struct ompi_communicator_t* comm);
int mca_pml_yalla_iprobe(int src, int tag, struct ompi_communicator_t* comm,
int *matched, ompi_status_public_t* status);
int mca_pml_yalla_probe(int src, int tag, struct ompi_communicator_t* comm,
ompi_status_public_t* status);
int mca_pml_yalla_improbe(int src, int tag, struct ompi_communicator_t* comm,
int *matched, struct ompi_message_t **message,
ompi_status_public_t* status);
int mca_pml_yalla_mprobe(int src, int tag, struct ompi_communicator_t* comm,
struct ompi_message_t **message,
ompi_status_public_t* status);
int mca_pml_yalla_imrecv(void *buf, size_t count, ompi_datatype_t *datatype,
struct ompi_message_t **message,
struct ompi_request_t **request);
int mca_pml_yalla_mrecv(void *buf, size_t count, ompi_datatype_t *datatype,
struct ompi_message_t **message,
ompi_status_public_t* status);
int mca_pml_yalla_start(size_t count, ompi_request_t** requests);
int mca_pml_yalla_dump(struct ompi_communicator_t* comm, int verbose);
#endif /* PML_YALLA_H_ */

Просмотреть файл

@ -1,112 +0,0 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (C) Mellanox Technologies Ltd. 2001-2011. ALL RIGHTS RESERVED.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2015 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "pml_yalla.h"
#include "ompi/runtime/mpiruntime.h"
static int mca_pml_yalla_component_register(void);
static int mca_pml_yalla_component_open(void);
static int mca_pml_yalla_component_close(void);
static mca_pml_base_module_t*
mca_pml_yalla_component_init(int* priority, bool enable_progress_threads,
bool enable_mpi_threads);
static int mca_pml_yalla_component_fini(void);
mca_pml_base_component_2_0_0_t mca_pml_yalla_component = {
/* First, the mca_base_component_t struct containing meta
* information about the component itself */
.pmlm_version = {
MCA_PML_BASE_VERSION_2_0_0,
.mca_component_name = "yalla",
MCA_BASE_MAKE_VERSION(component, OMPI_MAJOR_VERSION, OMPI_MINOR_VERSION,
OMPI_RELEASE_VERSION),
.mca_open_component = mca_pml_yalla_component_open,
.mca_close_component = mca_pml_yalla_component_close,
.mca_register_component_params = mca_pml_yalla_component_register,
},
.pmlm_data = {
/* This component is not checkpoint ready */
MCA_BASE_METADATA_PARAM_NONE
},
.pmlm_init = mca_pml_yalla_component_init,
.pmlm_finalize = mca_pml_yalla_component_fini,
};
static int mca_pml_yalla_component_register(void)
{
ompi_pml_yalla.verbose = 0;
(void) mca_base_component_var_register(&mca_pml_yalla_component.pmlm_version, "verbose",
"Verbose level of the yalla component",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_9,
MCA_BASE_VAR_SCOPE_LOCAL,
&ompi_pml_yalla.verbose);
ompi_pml_yalla.priority = 50;
(void) mca_base_component_var_register(&mca_pml_yalla_component.pmlm_version, "priority",
"Priority of the yalla component",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_3,
MCA_BASE_VAR_SCOPE_LOCAL,
&ompi_pml_yalla.priority);
return 0;
}
static int mca_pml_yalla_component_open(void)
{
ompi_pml_yalla.output = opal_output_open(NULL);
opal_output_set_verbosity(ompi_pml_yalla.output, ompi_pml_yalla.verbose);
return mca_pml_yalla_open();
}
static int mca_pml_yalla_component_close(void)
{
int rc;
rc = mca_pml_yalla_close();
if (rc != 0) {
return rc;
}
opal_output_close(ompi_pml_yalla.output);
return 0;
}
static mca_pml_base_module_t*
mca_pml_yalla_component_init(int* priority, bool enable_progress_threads,
bool enable_mpi_threads)
{
int ret;
if ( (ret = mca_pml_yalla_init()) != 0) {
return NULL;
}
ompi_mpi_dynamics_disable("the Yalla (MXM) PML does not support MPI dynamic process functionality");
*priority = ompi_pml_yalla.priority;
return &ompi_pml_yalla.super;
}
static int mca_pml_yalla_component_fini(void)
{
return mca_pml_yalla_cleanup();
}

Просмотреть файл

@ -1,166 +0,0 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (C) Mellanox Technologies Ltd. 2001-2011. ALL RIGHTS RESERVED.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "pml_yalla_datatype.h"
#include "pml_yalla_request.h"
static mca_pml_yalla_convertor_t *mca_pml_yalla_get_send_convertor(void *buf, size_t count,
ompi_datatype_t *datatype)
{
mca_pml_yalla_convertor_t *convertor = (mca_pml_yalla_convertor_t *)PML_YALLA_FREELIST_GET(&ompi_pml_yalla.convs);
convertor->datatype = datatype;
OMPI_DATATYPE_RETAIN(datatype);
opal_convertor_copy_and_prepare_for_send(ompi_proc_local_proc->super.proc_convertor,
&datatype->super, count, buf, 0,
&convertor->convertor);
return convertor;
}
static mca_pml_yalla_convertor_t *mca_pml_yalla_get_recv_convertor(void *buf, size_t count,
ompi_datatype_t *datatype)
{
mca_pml_yalla_convertor_t *convertor = (mca_pml_yalla_convertor_t *)PML_YALLA_FREELIST_GET(&ompi_pml_yalla.convs);
convertor->datatype = datatype;
OMPI_DATATYPE_RETAIN(datatype);
opal_convertor_copy_and_prepare_for_recv(ompi_proc_local_proc->super.proc_convertor,
&datatype->super, count, buf, 0,
&convertor->convertor);
return convertor;
}
static void mca_pml_yalla_noncontig_req_init(mxm_req_base_t *mxm_req,
mca_pml_yalla_convertor_t *convertor,
mxm_stream_cb_t stream_cb)
{
mxm_req->data_type = MXM_REQ_DATA_STREAM;
mxm_req->data.stream.cb = stream_cb;
opal_convertor_get_packed_size(&convertor->convertor, &mxm_req->data.stream.length);
}
static size_t mca_pml_yalla_stream_unpack(void *buffer, size_t length, size_t offset,
opal_convertor_t *convertor)
{
uint32_t iov_count;
struct iovec iov;
iov_count = 1;
iov.iov_base = buffer;
iov.iov_len = length;
opal_convertor_set_position(convertor, &offset);
opal_convertor_unpack(convertor, &iov, &iov_count, &length);
return length;
}
static size_t mca_pml_yalla_stream_pack(void *buffer, size_t length, size_t offset,
opal_convertor_t *convertor)
{
uint32_t iov_count;
struct iovec iov;
iov_count = 1;
iov.iov_base = buffer;
iov.iov_len = length;
opal_convertor_set_position(convertor, &offset);
opal_convertor_pack(convertor, &iov, &iov_count, &length);
return length;
}
static size_t mxm_pml_yalla_irecv_stream_cb(void *buffer, size_t length,
size_t offset, void *context)
{
mca_pml_yalla_base_request_t *req = context;
return mca_pml_yalla_stream_unpack(buffer, length, offset, &req->convertor->convertor);
}
static size_t mxm_pml_yalla_recv_stream_cb(void *buffer, size_t length,
size_t offset, void *context)
{
mca_pml_yalla_convertor_t *convertor = context;
return mca_pml_yalla_stream_unpack(buffer, length, offset, &convertor->convertor);
}
static size_t mxm_pml_yalla_isend_stream_cb(void *buffer, size_t length,
size_t offset, void *context)
{
mca_pml_yalla_base_request_t *req = context;
return mca_pml_yalla_stream_pack(buffer, length, offset, &req->convertor->convertor);
}
static size_t mxm_pml_yalla_send_stream_cb(void *buffer, size_t length,
size_t offset, void *context)
{
mca_pml_yalla_convertor_t *convertor = context;
return mca_pml_yalla_stream_pack(buffer, length, offset, &convertor->convertor);
}
void mca_pml_yalla_set_noncontig_data_irecv(mxm_req_base_t *mxm_req, void *buf,
size_t count, ompi_datatype_t *datatype,
mca_pml_yalla_recv_request_t *rreq)
{
rreq->super.convertor = mca_pml_yalla_get_recv_convertor(buf, count, datatype);
mca_pml_yalla_noncontig_req_init(mxm_req, rreq->super.convertor, mxm_pml_yalla_irecv_stream_cb);
}
void mca_pml_yalla_set_noncontig_data_recv(mxm_req_base_t *mxm_req, void *buf,
size_t count, ompi_datatype_t *datatype)
{
mca_pml_yalla_convertor_t *convertor;
convertor = mca_pml_yalla_get_recv_convertor(buf, count, datatype);
mca_pml_yalla_noncontig_req_init(mxm_req, convertor, mxm_pml_yalla_recv_stream_cb);
mxm_req->context = convertor;
}
void mca_pml_yalla_set_noncontig_data_isend(mxm_req_base_t *mxm_req, void *buf,
size_t count, ompi_datatype_t *datatype,
mca_pml_yalla_send_request_t *sreq)
{
sreq->super.convertor = mca_pml_yalla_get_send_convertor(buf, count, datatype);
mca_pml_yalla_noncontig_req_init(mxm_req, sreq->super.convertor, mxm_pml_yalla_isend_stream_cb);
}
void mca_pml_yalla_set_noncontig_data_send(mxm_req_base_t *mxm_req, void *buf,
size_t count, ompi_datatype_t *datatype)
{
mca_pml_yalla_convertor_t *convertor;
convertor = mca_pml_yalla_get_send_convertor(buf, count, datatype);
mca_pml_yalla_noncontig_req_init(mxm_req, convertor, mxm_pml_yalla_send_stream_cb);
mxm_req->context = convertor;
}
static void mca_pml_yalla_convertor_construct(mca_pml_yalla_convertor_t *convertor)
{
OBJ_CONSTRUCT(&convertor->convertor, opal_convertor_t);
}
static void mca_pml_yalla_convertor_destruct(mca_pml_yalla_convertor_t *convertor)
{
OBJ_DESTRUCT(&convertor->convertor);
}
void mca_pml_yalla_init_datatype(void)
{
PML_YALLA_FREELIST_INIT(&ompi_pml_yalla.convs, mca_pml_yalla_convertor_t,
128, -1, 128);
}
OBJ_CLASS_INSTANCE(mca_pml_yalla_convertor_t,
opal_free_list_item_t,
mca_pml_yalla_convertor_construct,
mca_pml_yalla_convertor_destruct);

Просмотреть файл

@ -1,76 +0,0 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (C) Mellanox Technologies Ltd. 2001-2011. ALL RIGHTS RESERVED.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2017 IBM Corporation. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef PML_YALLA_DATATYPE_H_
#define PML_YALLA_DATATYPE_H_
#include "pml_yalla.h"
struct pml_yalla_convertor {
opal_free_list_item_t super;
ompi_datatype_t *datatype;
opal_convertor_t convertor;
};
OBJ_CLASS_DECLARATION(mca_pml_yalla_convertor_t);
#define PML_YALLA_INIT_MXM_REQ_DATA(_req_base, _buf, _count, _dtype, _stream_type, ...) \
{ \
ptrdiff_t span, gap; \
\
if (opal_datatype_is_contiguous_memory_layout(&(_dtype)->super, _count)) { \
span = opal_datatype_span(&(_dtype)->super, (_count), &gap); \
(_req_base)->data_type = MXM_REQ_DATA_BUFFER; \
(_req_base)->data.buffer.ptr = (char *)_buf + gap; \
(_req_base)->data.buffer.length = span; \
} else { \
mca_pml_yalla_set_noncontig_data_ ## _stream_type(_req_base, \
_buf, _count, \
_dtype, ## __VA_ARGS__); \
} \
}
#define PML_YALLA_RESET_PML_REQ_DATA(_pml_req) \
{ \
if ((_pml_req)->convertor != NULL) { \
size_t _position = 0; \
opal_convertor_set_position(&(_pml_req)->convertor->convertor, &_position); \
} \
}
static inline void mca_pml_yalla_convertor_free(mca_pml_yalla_convertor_t *convertor)
{
opal_convertor_cleanup(&convertor->convertor);
OMPI_DATATYPE_RELEASE(convertor->datatype);
PML_YALLA_FREELIST_RETURN(&ompi_pml_yalla.convs, &convertor->super);
}
void mca_pml_yalla_set_noncontig_data_irecv(mxm_req_base_t *mxm_req, void *buf,
size_t count, ompi_datatype_t *datatype,
mca_pml_yalla_recv_request_t *rreq);
void mca_pml_yalla_set_noncontig_data_recv(mxm_req_base_t *mxm_req, void *buf,
size_t count, ompi_datatype_t *datatype);
void mca_pml_yalla_set_noncontig_data_isend(mxm_req_base_t *mxm_req, void *buf,
size_t count, ompi_datatype_t *datatype,
mca_pml_yalla_send_request_t *sreq);
void mca_pml_yalla_set_noncontig_data_send(mxm_req_base_t *mxm_req, void *buf,
size_t count, ompi_datatype_t *datatype);
void mca_pml_yalla_init_datatype(void);
#endif /* PML_YALLA_DATATYPE_H_ */

Просмотреть файл

@ -1,34 +0,0 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (C) Mellanox Technologies Ltd. 2001-2011. ALL RIGHTS RESERVED.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef PML_YALLA_FREELIST_H_
#define PML_YALLA_FREELIST_H_
#include "ompi_config.h"
#include "opal/class/opal_free_list.h"
#define mca_pml_yalla_freelist_t opal_free_list_t
#define PML_YALLA_FREELIST_GET(_freelist) \
opal_free_list_get (_freelist);\
#define PML_YALLA_FREELIST_RETURN(_freelist, _item) \
{ \
opal_free_list_return (_freelist, _item); \
}
#define PML_YALLA_FREELIST_INIT(_fl, _type, _initial, _max, _batch) \
opal_free_list_init(_fl, sizeof(_type), 8, OBJ_CLASS(_type), \
0, 0, _initial, _max, _batch, NULL, 0, NULL, NULL, NULL)
#endif /* PML_YALLA_FREELIST_H_ */

Просмотреть файл

@ -1,279 +0,0 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (C) Mellanox Technologies Ltd. 2001-2011. ALL RIGHTS RESERVED.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "pml_yalla_request.h"
#include "ompi/mca/pml/base/pml_base_bsend.h"
#include "ompi/message/message.h"
static inline void mca_pml_yalla_request_release(mca_pml_yalla_base_request_t *req,
mca_pml_yalla_freelist_t *fl)
{
if (req->convertor != NULL) {
mca_pml_yalla_convertor_free(req->convertor);
req->convertor = NULL;
}
OBJ_RELEASE(req->ompi.req_mpi_object.comm);
#if PML_YALLA_ENABLE_DEBUG
req->ompi.req_state = OMPI_REQUEST_INVALID;
#endif
PML_YALLA_FREELIST_RETURN(fl, &req->ompi.super);
}
static inline int
mca_pml_yalla_check_request_state(mca_pml_yalla_base_request_t *req, mxm_req_base_t *mxm_base)
{
if (mxm_base->state != MXM_REQ_COMPLETED) {
PML_YALLA_VERBOSE(8, "request %p free called before completed", (void*)req);
req->flags |= MCA_PML_YALLA_REQUEST_FLAG_FREE_CALLED;
return 0;
}
return 1;
}
static int mca_pml_yalla_send_request_free(ompi_request_t **request)
{
mca_pml_yalla_send_request_t *sreq = (mca_pml_yalla_send_request_t*)(*request);
PML_YALLA_VERBOSE(9, "free send request *%p=%p", (void *)request, (void *)*request);
if (mca_pml_yalla_check_request_state(&sreq->super, PML_YALLA_MXM_REQBASE(sreq))) {
mca_pml_yalla_request_release(&sreq->super, &ompi_pml_yalla.send_reqs);
}
*request = MPI_REQUEST_NULL;
return OMPI_SUCCESS;
}
static int mca_pml_yalla_send_request_cancel(ompi_request_t *request, int flag)
{
mca_pml_yalla_send_request_t *sreq = (mca_pml_yalla_send_request_t*)request;
mxm_error_t error;
if (REQUEST_COMPLETE(request)) {
/*
* This might be a buffered send request which has completed anyway, so
* we cannot cancel it anymore. Just hope for the best.
*/
PML_YALLA_VERBOSE(7, "not canceling a completed send request %p", (void *)request);
return OMPI_SUCCESS;
}
error = mxm_req_cancel_send(&sreq->mxm);
if ((error != MXM_OK) && (error != MXM_ERR_NO_PROGRESS)) {
PML_YALLA_ERROR("failed to cancel send request %p: %s", (void *)request,
mxm_error_string(error));
return OMPI_ERROR;
}
PML_YALLA_VERBOSE(9, "canceled send request %p", (void *)request);
return OMPI_SUCCESS;
}
static int mca_pml_yalla_recv_request_free(ompi_request_t **request)
{
mca_pml_yalla_recv_request_t *rreq = (mca_pml_yalla_recv_request_t*)(*request);
PML_YALLA_VERBOSE(9, "free receive request *%p=%p", (void *)request, (void *)*request);
if (mca_pml_yalla_check_request_state(&rreq->super, PML_YALLA_MXM_REQBASE(rreq))) {
mca_pml_yalla_request_release(&rreq->super, &ompi_pml_yalla.recv_reqs);
}
*request = MPI_REQUEST_NULL;
return OMPI_SUCCESS;
}
static int mca_pml_yalla_recv_request_cancel(ompi_request_t *request, int flag)
{
mca_pml_yalla_recv_request_t *rreq = (mca_pml_yalla_recv_request_t*)request;
mxm_error_t error;
error = mxm_req_cancel_recv(&rreq->mxm);
if ((error != MXM_OK) && (error != MXM_ERR_NO_PROGRESS)) {
PML_YALLA_ERROR("failed to cancel receive request %p: %s", (void *)request,
mxm_error_string(error));
return OMPI_ERROR;
}
PML_YALLA_VERBOSE(9, "canceled receive request %p", (void *)request);
return OMPI_SUCCESS;
}
static void init_mxm_base_req(mxm_req_base_t *mxm_req_base)
{
mxm_req_base->state = MXM_REQ_NEW;
mxm_req_base->mq = NULL;
mxm_req_base->conn = NULL;
mxm_req_base->data_type = MXM_REQ_DATA_BUFFER;
mxm_req_base->data.buffer.ptr = NULL;
mxm_req_base->data.buffer.length = 0;
mxm_req_base->data.buffer.memh = 0;
mxm_req_base->context = NULL;
mxm_req_base->completed_cb = NULL;
}
static void init_mxm_send_req(mxm_send_req_t *mxm_sreq)
{
init_mxm_base_req(&mxm_sreq->base);
mxm_sreq->opcode = MXM_REQ_OP_SEND;
mxm_sreq->op.send.imm_data = 0;
mxm_sreq->op.send.tag = 0;
#if defined(MXM_REQ_SEND_FLAG_REENTRANT)
mxm_sreq->flags = MXM_REQ_SEND_FLAG_REENTRANT;
#else
mxm_sreq->flags = 0;
#endif
}
static void init_mxm_recv_req(mxm_recv_req_t *mxm_rreq)
{
init_mxm_base_req(&mxm_rreq->base);
mxm_rreq->tag = 0;
mxm_rreq->tag_mask = 0x7fffffff;
}
static void init_base_req(mca_pml_yalla_base_request_t *req)
{
OMPI_REQUEST_INIT(&req->ompi, false);
req->ompi.req_type = OMPI_REQUEST_PML;
req->ompi.req_start = mca_pml_yalla_start;
req->ompi.req_cancel = NULL;
req->ompi.req_complete_cb = NULL;
req->ompi.req_complete_cb_data = NULL;
req->convertor = NULL;
}
static void mca_pml_yalla_send_completion_cb(void *context)
{
mca_pml_yalla_send_request_t* sreq = context;
switch (sreq->mxm.base.error) {
case MXM_OK:
sreq->super.ompi.req_status.MPI_ERROR = OMPI_SUCCESS;
break;
case MXM_ERR_CANCELED:
sreq->super.ompi.req_status._cancelled = true;
break;
default:
sreq->super.ompi.req_status.MPI_ERROR = MPI_ERR_INTERN;
break;
}
PML_YALLA_VERBOSE(8, "send request %p completed with status %s", (void *)sreq,
mxm_error_string(sreq->mxm.base.error));
ompi_request_complete(&sreq->super.ompi, true);
if (sreq->super.flags & MCA_PML_YALLA_REQUEST_FLAG_FREE_CALLED) {
PML_YALLA_VERBOSE(7, "release request %p because free was already called", (void *)sreq);
mca_pml_yalla_request_release(&sreq->super, &ompi_pml_yalla.send_reqs);
}
}
static void mca_pml_yalla_bsend_completion_cb(void *context)
{
mca_pml_yalla_bsend_request_t *bsreq = context;
PML_YALLA_VERBOSE(8, "bsend request %p completed with status %s", (void *)bsreq,
mxm_error_string(bsreq->mxm.base.error));
mca_pml_base_bsend_request_free(bsreq->mxm.base.data.buffer.ptr);
PML_YALLA_FREELIST_RETURN(&ompi_pml_yalla.bsend_reqs, &bsreq->super);
}
static void mca_pml_yalla_recv_completion_cb(void *context)
{
mca_pml_yalla_recv_request_t* rreq = context;
PML_YALLA_SET_RECV_STATUS(&rreq->mxm, rreq->mxm.completion.actual_len,
&rreq->super.ompi.req_status);
PML_YALLA_VERBOSE(8, "receive request %p completed with status %s source %d rtag %d(%d/0x%x) len %zu",
(void *)rreq, mxm_error_string(rreq->mxm.base.error),
rreq->mxm.completion.sender_imm, rreq->mxm.completion.sender_tag,
rreq->mxm.tag, rreq->mxm.tag_mask,
rreq->mxm.completion.actual_len);
ompi_request_complete(&rreq->super.ompi, true);
if (rreq->super.flags & MCA_PML_YALLA_REQUEST_FLAG_FREE_CALLED) {
PML_YALLA_VERBOSE(7, "release request %p because free was already called", (void *)rreq);
mca_pml_yalla_request_release(&rreq->super, &ompi_pml_yalla.recv_reqs);
}
}
static void mca_pml_yalla_send_request_construct(mca_pml_yalla_send_request_t* sreq)
{
init_base_req(&sreq->super);
init_mxm_send_req(&sreq->mxm);
sreq->super.ompi.req_free = mca_pml_yalla_send_request_free;
sreq->super.ompi.req_cancel = mca_pml_yalla_send_request_cancel;
sreq->mxm.base.context = sreq;
sreq->mxm.base.completed_cb = mca_pml_yalla_send_completion_cb;
}
static void mca_pml_yalla_send_request_destruct(mca_pml_yalla_send_request_t *sreq)
{
OMPI_REQUEST_FINI(&sreq->super.ompi);
}
static void mca_pml_yalla_bsend_request_construct(mca_pml_yalla_bsend_request_t* bsreq)
{
init_mxm_send_req(&bsreq->mxm);
bsreq->mxm.base.context = bsreq;
bsreq->mxm.base.completed_cb = mca_pml_yalla_bsend_completion_cb;
}
static void mca_pml_yalla_recv_request_construct(mca_pml_yalla_recv_request_t* rreq)
{
init_base_req(&rreq->super);
init_mxm_recv_req(&rreq->mxm);
rreq->super.ompi.req_free = mca_pml_yalla_recv_request_free;
rreq->super.ompi.req_cancel = mca_pml_yalla_recv_request_cancel;
rreq->mxm.base.context = rreq;
rreq->mxm.base.completed_cb = mca_pml_yalla_recv_completion_cb;
}
static void mca_pml_yalla_recv_request_destruct(mca_pml_yalla_recv_request_t *rreq)
{
OMPI_REQUEST_FINI(&rreq->super.ompi);
}
void mca_pml_yalla_init_reqs(void)
{
PML_YALLA_FREELIST_INIT(&ompi_pml_yalla.send_reqs, mca_pml_yalla_send_request_t,
128, -1, 128);
PML_YALLA_FREELIST_INIT(&ompi_pml_yalla.bsend_reqs, mca_pml_yalla_bsend_request_t,
128, -1, 128);
PML_YALLA_FREELIST_INIT(&ompi_pml_yalla.recv_reqs, mca_pml_yalla_recv_request_t,
128, -1, 128);
}
OBJ_CLASS_INSTANCE(mca_pml_yalla_send_request_t,
ompi_request_t,
mca_pml_yalla_send_request_construct,
mca_pml_yalla_send_request_destruct);
OBJ_CLASS_INSTANCE(mca_pml_yalla_bsend_request_t,
opal_free_list_item_t,
mca_pml_yalla_bsend_request_construct,
NULL);
OBJ_CLASS_INSTANCE(mca_pml_yalla_recv_request_t,
ompi_request_t,
mca_pml_yalla_recv_request_construct,
mca_pml_yalla_recv_request_destruct);

Просмотреть файл

@ -1,227 +0,0 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (C) Mellanox Technologies Ltd. 2001-2011. ALL RIGHTS RESERVED.
* Copyright (c) 2015-2016 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef PML_YALLA_REQUEST_H_
#define PML_YALLA_REQUEST_H_
#include "pml_yalla.h"
#include "pml_yalla_datatype.h"
#define MCA_PML_YALLA_REQUEST_FLAG_SEND 0x1 /* Persistent send */
#define MCA_PML_YALLA_REQUEST_FLAG_BSEND 0x2 /* Persistent buffered send */
#define MCA_PML_YALLA_REQUEST_FLAG_FREE_CALLED 0x4
struct pml_yalla_base_request {
ompi_request_t ompi;
mca_pml_yalla_convertor_t *convertor;
int flags;
};
struct pml_yalla_send_request {
mca_pml_yalla_base_request_t super;
mxm_send_req_t mxm;
};
struct pml_yalla_bsend_request {
opal_free_list_item_t super;
mxm_send_req_t mxm;
};
struct pml_yalla_recv_request {
mca_pml_yalla_base_request_t super;
mxm_recv_req_t mxm;
};
OBJ_CLASS_DECLARATION(mca_pml_yalla_send_request_t);
OBJ_CLASS_DECLARATION(mca_pml_yalla_bsend_request_t);
OBJ_CLASS_DECLARATION(mca_pml_yalla_recv_request_t);
void mca_pml_yalla_init_reqs(void);
#define PML_YALLA_MXM_REQBASE( x ) ( &((x)->mxm.base) )
#define PML_YALLA_RESET_OMPI_REQ(_ompi_req, _state) \
{ \
(_ompi_req)->req_state = _state; \
(_ompi_req)->req_complete = REQUEST_PENDING; \
(_ompi_req)->req_status._cancelled = false; \
}
#define PML_YALLA_INIT_OMPI_REQ(_ompi_req, _comm, _state) \
{ \
PML_YALLA_RESET_OMPI_REQ(_ompi_req, _state); \
(_ompi_req)->req_mpi_object.comm = _comm; \
OBJ_RETAIN(_comm); \
}
#define PML_YALLA_RESET_PML_REQ(_pml_req, mxm_base) \
{ \
mxm_base->state = MXM_REQ_NEW; \
PML_YALLA_RESET_PML_REQ_DATA(_pml_req); \
}
#define PML_YALLA_INIT_MXM_REQ_BASE(_req_base, _comm) \
{ \
(_req_base)->state = MXM_REQ_NEW; \
(_req_base)->mq = (mxm_mq_h)(_comm)->c_pml_comm; \
}
#define PML_YALLA_PEER_CONN(_comm, _rank) \
ompi_comm_peer_lookup(_comm, _rank)->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_PML]
#define PML_YALLA_INIT_MXM_SEND_REQ(_sreq, _buf, _count, _dtype, _rank, _tag, _mode, _comm, _stream_type, ...) \
{ \
PML_YALLA_INIT_MXM_REQ_BASE(&(_sreq)->base, _comm); \
PML_YALLA_INIT_MXM_REQ_DATA(&(_sreq)->base, _buf, _count, _dtype, _stream_type, ## __VA_ARGS__); \
(_sreq)->base.conn = PML_YALLA_PEER_CONN(_comm, _rank); \
(_sreq)->opcode = ((_mode) == MCA_PML_BASE_SEND_SYNCHRONOUS) ? MXM_REQ_OP_SEND_SYNC : MXM_REQ_OP_SEND; \
(_sreq)->op.send.tag = _tag; \
(_sreq)->op.send.imm_data = ompi_comm_rank(_comm); \
}
#define PML_YALLA_INIT_MXM_RECV_REQ_ENVELOPE(_rreq, _rank, _tag, _comm) \
{ \
(_rreq)->base.conn = ((_rank) == MPI_ANY_SOURCE) ? NULL : PML_YALLA_PEER_CONN(_comm, _rank); \
if ((_tag) == MPI_ANY_TAG) { \
(_rreq)->tag = 0; \
(_rreq)->tag_mask = 0x80000000u; \
} else { \
(_rreq)->tag = _tag; \
(_rreq)->tag_mask = 0xffffffffu; \
} \
}
#define PML_YALLA_INIT_MXM_RECV_REQ(_rreq, _buf, _count, _dtype, _rank, _tag, _comm, _stream_type, ...) \
{ \
PML_YALLA_INIT_MXM_REQ_BASE(&(_rreq)->base, _comm); \
PML_YALLA_INIT_MXM_REQ_DATA(&(_rreq)->base, _buf, _count, _dtype, _stream_type, ## __VA_ARGS__); \
PML_YALLA_INIT_MXM_RECV_REQ_ENVELOPE(_rreq, _rank, _tag, _comm); \
}
#define PML_YALLA_INIT_BLOCKING_MXM_SEND_REQ(_sreq) \
{ \
(_sreq)->base.completed_cb = NULL; \
(_sreq)->flags = MXM_REQ_SEND_FLAG_BLOCKING; \
}
#define PML_YALLA_INIT_BLOCKING_MXM_RECV_REQ(_rreq) \
{ \
(_rreq)->base.completed_cb = NULL; \
}
#define PML_YALLA_FREE_BLOCKING_MXM_REQ(_req) \
{ \
if ((_req)->data_type == MXM_REQ_DATA_STREAM) { \
mca_pml_yalla_convertor_free((mca_pml_yalla_convertor_t*)((_req)->context)); \
} \
}
static inline mca_pml_yalla_recv_request_t* MCA_PML_YALLA_RREQ_INIT(void *_buf, size_t _count, ompi_datatype_t *_datatype,
int _src, int _tag, struct ompi_communicator_t* _comm, int _state)
{
mca_pml_yalla_recv_request_t *rreq = (mca_pml_yalla_recv_request_t *)PML_YALLA_FREELIST_GET(&ompi_pml_yalla.recv_reqs);
PML_YALLA_INIT_OMPI_REQ(&rreq->super.ompi, _comm, _state);
PML_YALLA_INIT_MXM_RECV_REQ(&rreq->mxm, _buf, _count, _datatype, _src, _tag, _comm, irecv, rreq);
return rreq;
}
static inline mca_pml_yalla_send_request_t* MCA_PML_YALLA_SREQ_INIT(void *_buf, size_t _count, ompi_datatype_t *_datatype,
int _dst, int _tag, mca_pml_base_send_mode_t _mode, struct ompi_communicator_t* _comm, int _state)
{
mca_pml_yalla_send_request_t *sreq = (mca_pml_yalla_send_request_t *)PML_YALLA_FREELIST_GET(&ompi_pml_yalla.send_reqs);
PML_YALLA_INIT_OMPI_REQ(&sreq->super.ompi, _comm, _state);
PML_YALLA_INIT_MXM_SEND_REQ(&sreq->mxm, _buf, _count, _datatype, _dst, _tag, _mode, _comm, isend, sreq);
sreq->super.ompi.req_status.MPI_TAG = _tag;
sreq->super.ompi.req_status.MPI_SOURCE = (_comm)->c_my_rank;
sreq->super.ompi.req_status._ucount = _count;
return sreq;
}
#define PML_YALLA_INIT_MXM_PROBE_REQ(_rreq, _rank, _tag, _comm) \
{ \
PML_YALLA_INIT_MXM_REQ_BASE(&(_rreq)->base, _comm); \
PML_YALLA_INIT_MXM_RECV_REQ_ENVELOPE(_rreq, _rank, _tag, _comm); \
}
/*
* For multi-threaded MPI, avoid blocking inside mxm_wait(), since it prevents
* from other threads making progress.
*/
#define PML_YALLA_WAIT_MXM_REQ(_req_base) \
{ \
if (opal_using_threads()) { \
while (!mxm_req_test(_req_base)) { \
sched_yield(); \
opal_progress(); \
} \
} else if (!mxm_req_test(_req_base)) { \
mxm_wait_t wait; \
wait.progress_cb = (mxm_progress_cb_t)opal_progress; \
wait.progress_arg = NULL; \
wait.req = (_req_base); \
wait.state = MXM_REQ_COMPLETED; \
mxm_wait(&wait); \
} \
}
static inline int PML_YALLA_SET_RECV_STATUS(mxm_recv_req_t *_rreq,
size_t _length,
ompi_status_public_t *_mpi_status)
{
int rc;
switch (_rreq->base.error) {
case MXM_OK:
rc = OMPI_SUCCESS;
break;
case MXM_ERR_CANCELED:
rc = OMPI_SUCCESS;
break;
case MXM_ERR_MESSAGE_TRUNCATED:
rc = MPI_ERR_TRUNCATE;
break;
default:
rc = MPI_ERR_INTERN;
break;
}
/* If status is not ignored, fill what is needed */
if (_mpi_status != MPI_STATUS_IGNORE) {
_mpi_status->MPI_ERROR = rc;
if (MXM_ERR_CANCELED == _rreq->base.error) {
_mpi_status->_cancelled = true;
}
_mpi_status->MPI_TAG = _rreq->completion.sender_tag;
_mpi_status->MPI_SOURCE = _rreq->completion.sender_imm;
_mpi_status->_ucount = _length;
}
return rc;
}
#define PML_YALLA_SET_MESSAGE(_rreq, _comm, _mxm_msg, _message) \
{ \
*(_message) = ompi_message_alloc(); \
(*(_message))->comm = (_comm); \
(*(_message))->count = (_rreq)->completion.sender_len; \
(*(_message))->peer = (_rreq)->completion.sender_imm; \
(*(_message))->req_ptr = (_mxm_msg); \
}
#define PML_YALLA_MESSAGE_RELEASE(_message) \
{ \
ompi_message_return(*(_message)); \
*(_message) = MPI_MESSAGE_NULL; \
}
#endif /* PML_YALLA_REQUEST_H_ */

Просмотреть файл

@ -1,43 +0,0 @@
#
# Copyright (c) 2013 Mellanox Technologies, Inc.
# All rights reserved.
# Copyright (c) 2017 IBM Corporation. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
AM_CPPFLAGS = $(atomic_mxm_CPPFLAGS)
mxm_sources = \
atomic_mxm.h \
atomic_mxm_module.c \
atomic_mxm_component.c \
atomic_mxm_fadd.c \
atomic_mxm_cswap.c
# Make the output library in this directory, and name it either
# mca_<type>_<name>.la (for DSO builds) or libmca_<type>_<name>.la
# (for static builds).
if MCA_BUILD_oshmem_atomic_mxm_DSO
component_noinst =
component_install = mca_atomic_mxm.la
else
component_noinst = libmca_atomic_mxm.la
component_install =
endif
mcacomponentdir = $(oshmemlibdir)
mcacomponent_LTLIBRARIES = $(component_install)
mca_atomic_mxm_la_SOURCES = $(mxm_sources)
mca_atomic_mxm_la_LIBADD = $(top_builddir)/oshmem/liboshmem.la \
$(atomic_mxm_LIBS) $(top_builddir)/oshmem/mca/spml/libmca_spml.la
mca_atomic_mxm_la_LDFLAGS = -module -avoid-version $(atomic_mxm_LDFLAGS)
noinst_LTLIBRARIES = $(component_noinst)
libmca_atomic_mxm_la_SOURCES =$(mxm_sources)
libmca_atomic_mxm_la_LDFLAGS = -module -avoid-version $(atomic_mxm_LDFLAGS)

Просмотреть файл

@ -1,148 +0,0 @@
/*
* Copyright (c) 2013 Mellanox Technologies, Inc.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef MCA_ATOMIC_MXM_H
#define MCA_ATOMIC_MXM_H
#include "oshmem_config.h"
#include "oshmem/mca/mca.h"
#include "oshmem/mca/atomic/atomic.h"
#include "oshmem/util/oshmem_util.h"
/* This component does uses SPML:IKRIT */
#include "oshmem/mca/spml/ikrit/spml_ikrit.h"
#include "oshmem/runtime/runtime.h"
BEGIN_C_DECLS
/* Globally exported variables */
OSHMEM_MODULE_DECLSPEC extern mca_atomic_base_component_1_0_0_t
mca_atomic_mxm_component;
/* this component works with spml:ikrit only */
extern mca_spml_ikrit_t *mca_atomic_mxm_spml_self;
OSHMEM_DECLSPEC void atomic_mxm_lock(int pe);
OSHMEM_DECLSPEC void atomic_mxm_unlock(int pe);
/* API functions */
int mca_atomic_mxm_startup(bool enable_progress_threads, bool enable_threads);
int mca_atomic_mxm_finalize(void);
mca_atomic_base_module_t*
mca_atomic_mxm_query(int *priority);
int mca_atomic_mxm_add(shmem_ctx_t ctx,
void *target,
uint64_t value,
size_t nlong,
int pe);
int mca_atomic_mxm_fadd(shmem_ctx_t ctx,
void *target,
void *prev,
uint64_t value,
size_t nlong,
int pe);
int mca_atomic_mxm_swap(shmem_ctx_t ctx,
void *target,
void *prev,
uint64_t value,
size_t nlong,
int pe);
int mca_atomic_mxm_cswap(shmem_ctx_t ctx,
void *target,
uint64_t *prev,
uint64_t cond,
uint64_t value,
size_t nlong,
int pe);
struct mca_atomic_mxm_module_t {
mca_atomic_base_module_t super;
};
typedef struct mca_atomic_mxm_module_t mca_atomic_mxm_module_t;
OBJ_CLASS_DECLARATION(mca_atomic_mxm_module_t);
static inline uint8_t mca_atomic_mxm_order(size_t nlong)
{
if (OPAL_LIKELY(8 == nlong)) {
return 3;
}
if (OPAL_LIKELY(4 == nlong)) {
return 2;
}
if (2 == nlong) {
return 1;
}
if (1 == nlong) {
return 0;
}
ATOMIC_ERROR("Type size must be 1/2/4 or 8 bytes.");
oshmem_shmem_abort(-1);
return OSHMEM_ERR_BAD_PARAM;
}
static inline void mca_atomic_mxm_req_init(mxm_send_req_t *sreq, int pe, void *target, size_t nlong)
{
uint8_t nlong_order;
void *remote_addr;
mxm_mem_key_t *mkey;
nlong_order = mca_atomic_mxm_order(nlong);
mkey = mca_spml_ikrit_get_mkey(pe, target, MXM_PTL_RDMA, &remote_addr, mca_atomic_mxm_spml_self);
/* mxm request init */
sreq->base.state = MXM_REQ_NEW;
sreq->base.mq = mca_atomic_mxm_spml_self->mxm_mq;
sreq->base.conn = mca_atomic_mxm_spml_self->mxm_peers[pe].mxm_hw_rdma_conn;
sreq->base.completed_cb = NULL;
sreq->base.data_type = MXM_REQ_DATA_BUFFER;
sreq->base.data.buffer.memh = MXM_INVALID_MEM_HANDLE;
sreq->base.data.buffer.length = nlong;
sreq->op.atomic.remote_vaddr = (uintptr_t) remote_addr;
sreq->op.atomic.remote_mkey = mkey;
sreq->op.atomic.order = nlong_order;
sreq->flags = 0;
}
static inline void mca_atomic_mxm_post(mxm_send_req_t *sreq)
{
mxm_error_t mxm_err;
mxm_err = mxm_req_send(sreq);
if (OPAL_UNLIKELY(MXM_OK != mxm_err)) {
ATOMIC_ERROR("mxm_req_send failed, mxm_error = %d",
mxm_err);
oshmem_shmem_abort(-1);
}
mxm_req_wait(&sreq->base);
if (OPAL_UNLIKELY(MXM_OK != sreq->base.error)) {
ATOMIC_ERROR("mxm_req_wait got non MXM_OK error: %d",
sreq->base.error);
oshmem_shmem_abort(-1);
}
}
END_C_DECLS
#endif /* MCA_ATOMIC_MXM_H */

Просмотреть файл

@ -1,107 +0,0 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2013 Mellanox Technologies, Inc.
* All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "oshmem_config.h"
#include "oshmem/constants.h"
#include "oshmem/mca/atomic/atomic.h"
#include "oshmem/mca/atomic/base/base.h"
#include "oshmem/mca/spml/base/base.h"
#include "atomic_mxm.h"
/*
* Public string showing the scoll mxm component version number
*/
const char *mca_atomic_mxm_component_version_string =
"Open SHMEM mxm atomic MCA component version " OSHMEM_VERSION;
/*
* Global variable
*/
mca_spml_ikrit_t *mca_atomic_mxm_spml_self = NULL;
/*
* Local function
*/
static int _mxm_register(void);
static int _mxm_open(void);
/*
* Instantiate the public struct with all of our public information
* and pointers to our public functions in it
*/
mca_atomic_base_component_t mca_atomic_mxm_component = {
/* First, the mca_component_t struct containing meta information
about the component itself */
.atomic_version = {
MCA_ATOMIC_BASE_VERSION_2_0_0,
/* Component name and version */
.mca_component_name = "mxm",
MCA_BASE_MAKE_VERSION(component, OSHMEM_MAJOR_VERSION, OSHMEM_MINOR_VERSION,
OSHMEM_RELEASE_VERSION),
.mca_open_component = _mxm_open,
.mca_register_component_params = _mxm_register,
},
.atomic_data = {
/* The component is checkpoint ready */
MCA_BASE_METADATA_PARAM_CHECKPOINT
},
/* Initialization / querying functions */
.atomic_startup = mca_atomic_mxm_startup,
.atomic_finalize = mca_atomic_mxm_finalize,
.atomic_query = mca_atomic_mxm_query,
};
static int _mxm_register(void)
{
mca_atomic_mxm_component.priority = 100;
mca_base_component_var_register (&mca_atomic_mxm_component.atomic_version,
"priority", "Priority of the atomic:mxm "
"component (default: 100)", MCA_BASE_VAR_TYPE_INT,
NULL, 0, MCA_BASE_VAR_FLAG_SETTABLE,
OPAL_INFO_LVL_3,
MCA_BASE_VAR_SCOPE_ALL_EQ,
&mca_atomic_mxm_component.priority);
return OSHMEM_SUCCESS;
}
static int _mxm_open(void)
{
/*
* This component is able to work using spml:ikrit component only
* (this check is added instead of !mca_spml_ikrit.enabled)
*/
if (strcmp(mca_spml_base_selected_component.spmlm_version.mca_component_name, "ikrit")) {
ATOMIC_VERBOSE(5,
"Can not use atomic/mxm because spml ikrit component disabled");
return OSHMEM_ERR_NOT_AVAILABLE;
}
mca_atomic_mxm_spml_self = (mca_spml_ikrit_t *) mca_spml.self;
return OSHMEM_SUCCESS;
}
OBJ_CLASS_INSTANCE(mca_atomic_mxm_module_t,
mca_atomic_base_module_t,
NULL,
NULL);

Просмотреть файл

@ -1,68 +0,0 @@
/*
* Copyright (c) 2013 Mellanox Technologies, Inc.
* All rights reserved.
* Copyright (c) 2016 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "oshmem_config.h"
#include <stdio.h>
#include <stdlib.h>
#include "oshmem/constants.h"
#include "oshmem/mca/spml/spml.h"
#include "oshmem/mca/atomic/atomic.h"
#include "oshmem/mca/atomic/base/base.h"
#include "oshmem/mca/memheap/memheap.h"
#include "oshmem/mca/memheap/base/base.h"
#include "oshmem/runtime/runtime.h"
#include "atomic_mxm.h"
int mca_atomic_mxm_swap(shmem_ctx_t ctx,
void *target,
void *prev,
uint64_t value,
size_t nlong,
int pe)
{
mxm_send_req_t sreq;
mca_atomic_mxm_req_init(&sreq, pe, target, nlong);
memcpy(prev, &value, nlong);
sreq.base.data.buffer.ptr = prev;
sreq.opcode = MXM_REQ_OP_ATOMIC_SWAP;
mca_atomic_mxm_post(&sreq);
return OSHMEM_SUCCESS;
}
int mca_atomic_mxm_cswap(shmem_ctx_t ctx,
void *target,
uint64_t *prev,
uint64_t cond,
uint64_t value,
size_t nlong,
int pe)
{
mxm_send_req_t sreq;
mca_atomic_mxm_req_init(&sreq, pe, target, nlong);
*prev = value;
sreq.op.atomic.value = cond;
sreq.base.data.buffer.ptr = prev;
sreq.opcode = MXM_REQ_OP_ATOMIC_CSWAP;
mca_atomic_mxm_post(&sreq);
return OSHMEM_SUCCESS;
}

Просмотреть файл

@ -1,66 +0,0 @@
/*
* Copyright (c) 2013 Mellanox Technologies, Inc.
* All rights reserved.
* Copyright (c) 2016 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "oshmem_config.h"
#include <stdio.h>
#include <stdlib.h>
#include "oshmem/constants.h"
#include "oshmem/op/op.h"
#include "oshmem/mca/spml/spml.h"
#include "oshmem/mca/atomic/atomic.h"
#include "oshmem/mca/atomic/base/base.h"
#include "oshmem/mca/memheap/memheap.h"
#include "oshmem/mca/memheap/base/base.h"
#include "oshmem/runtime/runtime.h"
#include "atomic_mxm.h"
int mca_atomic_mxm_add(shmem_ctx_t ctx,
void *target,
uint64_t value,
size_t size,
int pe)
{
mxm_send_req_t sreq;
static char dummy_buf[8];
mca_atomic_mxm_req_init(&sreq, pe, target, size);
sreq.op.atomic.value = value;
sreq.opcode = MXM_REQ_OP_ATOMIC_FADD;
sreq.base.data.buffer.ptr = dummy_buf;
mca_atomic_mxm_post(&sreq);
return OSHMEM_SUCCESS;
}
int mca_atomic_mxm_fadd(shmem_ctx_t ctx,
void *target,
void *prev,
uint64_t value,
size_t size,
int pe)
{
mxm_send_req_t sreq;
mca_atomic_mxm_req_init(&sreq, pe, target, size);
sreq.op.atomic.value = value;
sreq.opcode = MXM_REQ_OP_ATOMIC_FADD;
sreq.base.data.buffer.ptr = prev;
mca_atomic_mxm_post(&sreq);
return OSHMEM_SUCCESS;
}

Просмотреть файл

@ -1,77 +0,0 @@
/*
* Copyright (c) 2013 Mellanox Technologies, Inc.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "oshmem_config.h"
#include <stdio.h>
#include "oshmem/constants.h"
#include "oshmem/mca/atomic/atomic.h"
#include "oshmem/mca/spml/spml.h"
#include "oshmem/mca/memheap/memheap.h"
#include "oshmem/proc/proc.h"
#include "atomic_mxm.h"
/*
* Initial query function that is invoked during initialization, allowing
* this module to indicate what level of thread support it provides.
*/
int mca_atomic_mxm_startup(bool enable_progress_threads, bool enable_threads)
{
return OSHMEM_SUCCESS;
}
int mca_atomic_mxm_finalize(void)
{
return OSHMEM_SUCCESS;
}
static int mca_atomic_mxm_op_not_implemented(shmem_ctx_t ctx,
void *target,
uint64_t value,
size_t size,
int pe)
{
return OSHMEM_ERR_NOT_IMPLEMENTED;
}
static int mca_atomic_mxm_fop_not_implemented(shmem_ctx_t ctx,
void *target,
void *prev,
uint64_t value,
size_t size,
int pe)
{
return OSHMEM_ERR_NOT_IMPLEMENTED;
}
mca_atomic_base_module_t *
mca_atomic_mxm_query(int *priority)
{
mca_atomic_mxm_module_t *module;
*priority = mca_atomic_mxm_component.priority;
module = OBJ_NEW(mca_atomic_mxm_module_t);
if (module) {
module->super.atomic_add = mca_atomic_mxm_add;
module->super.atomic_and = mca_atomic_mxm_op_not_implemented;
module->super.atomic_or = mca_atomic_mxm_op_not_implemented;
module->super.atomic_xor = mca_atomic_mxm_op_not_implemented;
module->super.atomic_fadd = mca_atomic_mxm_fadd;
module->super.atomic_fand = mca_atomic_mxm_fop_not_implemented;
module->super.atomic_for = mca_atomic_mxm_fop_not_implemented;
module->super.atomic_fxor = mca_atomic_mxm_fop_not_implemented;
module->super.atomic_swap = mca_atomic_mxm_swap;
module->super.atomic_cswap = mca_atomic_mxm_cswap;
return &(module->super);
}
return NULL ;
}

Просмотреть файл

@ -1,60 +0,0 @@
/*
* Copyright (c) 2013 Mellanox Technologies, Inc.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
# MCA_oshmem_atomic_mxm_CONFIG([action-if-can-compile],
# [action-if-cant-compile])
# ------------------------------------------------
AC_DEFUN([MCA_oshmem_atomic_mxm_CONFIG],[
AC_CONFIG_FILES([oshmem/mca/atomic/mxm/Makefile])
OMPI_CHECK_MXM([atomic_mxm],
[save_CPPFLAGS="$CPPFLAGS"
save_LDFLAGS="$LDFLAGS"
save_LIBS="$LIBS"
CPPFLAGS="$CPPFLAGS -I$ompi_check_mxm_dir/include"
LDFLAGS="$LDFLAGS -L$ompi_check_mxm_dir/lib"
LIBS="$LIBS -lmxm"
AC_COMPILE_IFELSE([AC_LANG_SOURCE([[
#include <mxm/api/mxm_api.h>
int main() {
if (mxm_get_version() < MXM_VERSION(1,5) )
return 1;
/* if compiler sees these constansts then mxm has atomic support*/
int add_index = MXM_REQ_OP_ATOMIC_ADD;
int swap_index = MXM_REQ_OP_ATOMIC_SWAP;
return 0;
}]])],
[AC_DEFINE([OSHMEM_HAS_ATOMIC_MXM], [1], [mxm support is available]) atomic_mxm_happy="yes"],
[atomic_mxm_happy="no"],
[atomic_mxm_happy="no"])
CPPFLAGS=$save_CPPFLAGS
LDFLAGS=$save_LDFLAGS
LIBS=$save_LIBS
],
[atomic_mxm_happy="no"])
AS_IF([test "$atomic_mxm_happy" = "yes"],
[atomic_mxm_WRAPPER_EXTRA_LDFLAGS="$atomic_mxm_LDFLAGS"
atomic_mxm_WRAPPER_EXTRA_LIBS="$atomic_mxm_LIBS"
$1],
[$2])
# substitute in the things needed to build mxm
AC_SUBST([atomic_mxm_CFLAGS])
AC_SUBST([atomic_mxm_CPPFLAGS])
AC_SUBST([atomic_mxm_LDFLAGS])
AC_SUBST([atomic_mxm_LIBS])
AC_MSG_CHECKING([if oshmem/atomic/mxm component can be compiled])
AC_MSG_RESULT([$atomic_mxm_happy])
])dnl

Просмотреть файл

@ -1,43 +0,0 @@
#
# Copyright (c) 2013 Mellanox Technologies, Inc.
# All rights reserved.
#
# Copyright (c) 2017 IBM Corporation. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
dist_oshmemdata_DATA = \
help-oshmem-spml-ikrit.txt
AM_CPPFLAGS = $(spml_ikrit_CPPFLAGS)
ikrit_sources = \
spml_ikrit.c \
spml_ikrit.h \
spml_ikrit_component.c \
spml_ikrit_component.h
if MCA_BUILD_oshmem_spml_ikrit_DSO
component_noinst =
component_install = mca_spml_ikrit.la
else
component_noinst = libmca_spml_ikrit.la
component_install =
endif
mcacomponentdir = $(oshmemlibdir)
mcacomponent_LTLIBRARIES = $(component_install)
mca_spml_ikrit_la_SOURCES = $(ikrit_sources)
mca_spml_ikrit_la_LIBADD = $(top_builddir)/oshmem/liboshmem.la \
$(spml_ikrit_LIBS)
mca_spml_ikrit_la_LDFLAGS = -module -avoid-version $(spml_ikrit_LDFLAGS)
noinst_LTLIBRARIES = $(component_noinst)
libmca_spml_ikrit_la_SOURCES = $(ikrit_sources)
libmca_spml_ikrit_la_LIBADD = $(spml_ikrit_LIBS)
libmca_spml_ikrit_la_LDFLAGS = -module -avoid-version $(spml_ikrit_LDFLAGS)

Просмотреть файл

@ -1,32 +0,0 @@
/*
* Copyright (c) 2013 Mellanox Technologies, Inc.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
# MCA_oshmem_mtl_mxm_CONFIG([action-if-can-compile],
# [action-if-cant-compile])
# ------------------------------------------------
AC_DEFUN([MCA_oshmem_spml_ikrit_CONFIG],[
AC_CONFIG_FILES([oshmem/mca/spml/ikrit/Makefile])
OMPI_CHECK_MXM([spml_ikrit],
[spml_ikrit_happy="yes"],
[spml_ikrit_happy="no"])
AS_IF([test "$spml_ikrit_happy" = "yes"],
[$1],
[$2])
# substitute in the things needed to build mxm
AC_SUBST([spml_ikrit_CFLAGS])
AC_SUBST([spml_ikrit_CPPFLAGS])
AC_SUBST([spml_ikrit_LDFLAGS])
AC_SUBST([spml_ikrit_LIBS])
])dnl

Просмотреть файл

@ -1,45 +0,0 @@
#
# Copyright (c) 2013 Mellanox Technologies, Inc.
# All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
[unable to create endpoint]
MXM was unable to create an endpoint. Please make sure that the network link is
active on the node and the hardware is functioning.
Error: %s
#
[unable to get endpoint address]
MXM was unable to get endpoint address
Error: %s
#
[mxm mq create]
Failed to create MQ for endpoint
Error: %s
#
[errors during mxm_progress]
Error %s occurred in attempting to make network progress (mxm_progress).
#
[mxm init]
Initialization of MXM library failed.
Error: %s
#
[mxm shm tls]
ERROR: MXM shared memory transport can not be used
because it is not fully compliant with OSHMEM spec
MXM transport setting: %s
#
[mxm tls]
ERROR: valid mxm transports are:
"ud" "ud,self" "rc" or "dc"
transport setting is: %s=%s
#

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,222 +0,0 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2013 Mellanox Technologies, Inc.
* All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2016-2019 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
/**
* @file
*/
#ifndef MCA_SPML_UD_MXM_H
#define MCA_SPML_UD_MXM_H
#include "oshmem_config.h"
#include "oshmem/request/request.h"
#include "oshmem/mca/spml/spml.h"
#include "oshmem/util/oshmem_util.h"
#include "oshmem/mca/spml/base/spml_base_putreq.h"
#include "oshmem/proc/proc.h"
#include "oshmem/mca/spml/base/spml_base_request.h"
#include "oshmem/mca/spml/base/spml_base_getreq.h"
#include "ompi/mca/bml/base/base.h"
#include "opal/class/opal_free_list.h"
#include "opal/class/opal_list.h"
#include "oshmem/mca/memheap/base/base.h"
#include <mxm/api/mxm_api.h>
#ifndef MXM_VERSION
#define MXM_VERSION(major, minor) (((major)<<MXM_MAJOR_BIT)|((minor)<<MXM_MINOR_BIT))
#endif
#define MXM_SHMEM_MQ_ID 0x7119
/* start request explicit ack once our buffer pool is less than watermark */
#define SPML_IKRIT_PUT_LOW_WATER 16
/* request explicit ack (SYNC) per every X put requests per connection */
#define SPML_IKRIT_PACKETS_PER_SYNC 64
#define spml_ikrit_container_of(ptr, type, member) ( \
(type *)( ((char *)(ptr)) - offsetof(type,member) ))
#define MXM_MAX_ADDR_LEN 512
#define MXM_PTL_RDMA 0
#define MXM_PTL_SHM 1
#define MXM_PTL_LAST 2
BEGIN_C_DECLS
/**
* MXM SPML module
*/
/* TODO: move va_xx to base struct */
struct spml_ikrit_mkey {
mkey_segment_t super;
mxm_mem_key_t key;
};
typedef struct spml_ikrit_mkey spml_ikrit_mkey_t;
struct mxm_peer {
mxm_conn_h mxm_conn;
mxm_conn_h mxm_hw_rdma_conn;
uint8_t ptl_id;
uint8_t need_fence;
int32_t n_active_puts;
opal_list_item_t link;
spml_ikrit_mkey_t mkeys[MCA_MEMHEAP_SEG_COUNT];
};
typedef struct mxm_peer mxm_peer_t;
typedef mxm_mem_key_t *(*mca_spml_ikrit_get_mkey_slow_fn_t)(int pe, void *va, int ptl_id, void **rva);
struct mca_spml_ikrit_ctx {
int temp;
};
typedef struct mca_spml_ikrit_ctx mca_spml_ikrit_ctx_t;
extern mca_spml_ikrit_ctx_t mca_spml_ikrit_ctx_default;
struct mca_spml_ikrit_t {
mca_spml_base_module_t super;
mca_spml_ikrit_get_mkey_slow_fn_t get_mkey_slow;
mxm_context_opts_t *mxm_ctx_opts;
mxm_ep_opts_t *mxm_ep_opts;
mxm_ep_opts_t *mxm_ep_hw_rdma_opts;
mxm_h mxm_context;
mxm_ep_h mxm_ep;
mxm_ep_h mxm_hw_rdma_ep;
mxm_mq_h mxm_mq;
mxm_peer_t *mxm_peers;
int32_t n_active_puts;
int32_t n_active_gets;
int32_t n_mxm_fences;
int priority; /* component priority */
int free_list_num; /* initial size of free list */
int free_list_max; /* maximum size of free list */
int free_list_inc; /* number of elements to grow free list */
int bulk_connect; /* use bulk connect */
int bulk_disconnect; /* use bulk disconnect */
bool enabled;
opal_list_t active_peers;
int n_relays; /* number of procs/node serving as relays */
char *mxm_tls;
int ud_only; /* only ud transport is used. In this case
it is possible to speedup mkey exchange
and not to register memheap */
int hw_rdma_channel; /* true if we provide separate channel that
has true one sided capability */
int np;
int unsync_conn_max;
size_t put_zcopy_threshold; /* enable zcopy in put if message size is
greater than the threshold */
};
typedef struct mca_spml_ikrit_t mca_spml_ikrit_t;
typedef struct spml_ikrit_mxm_ep_conn_info_t {
union {
struct sockaddr_storage ptl_addr[MXM_PTL_LAST];
char ep_addr[MXM_MAX_ADDR_LEN];
} addr;
} spml_ikrit_mxm_ep_conn_info_t;
extern mca_spml_ikrit_t mca_spml_ikrit;
extern int mca_spml_ikrit_enable(bool enable);
extern int mca_spml_ikrit_ctx_create(long options,
shmem_ctx_t *ctx);
extern void mca_spml_ikrit_ctx_destroy(shmem_ctx_t ctx);
extern int mca_spml_ikrit_get(shmem_ctx_t ctx,
void* dst_addr,
size_t size,
void* src_addr,
int src);
extern int mca_spml_ikrit_get_nb(shmem_ctx_t ctx,
void* src_addr,
size_t size,
void* dst_addr,
int src,
void **handle);
extern int mca_spml_ikrit_put(shmem_ctx_t ctx,
void* dst_addr,
size_t size,
void* src_addr,
int dst);
extern int mca_spml_ikrit_put_nb(shmem_ctx_t ctx,
void* dst_addr,
size_t size,
void* src_addr,
int dst,
void **handle);
extern int mca_spml_ikrit_recv(void* buf, size_t size, int src);
extern int mca_spml_ikrit_send(void* buf,
size_t size,
int dst,
mca_spml_base_put_mode_t mode);
extern sshmem_mkey_t *mca_spml_ikrit_register(void* addr,
size_t size,
uint64_t shmid,
int *count);
extern int mca_spml_ikrit_deregister(sshmem_mkey_t *mkeys);
extern int mca_spml_ikrit_oob_get_mkeys(shmem_ctx_t ctx, int pe,
uint32_t segno,
sshmem_mkey_t *mkeys);
extern int mca_spml_ikrit_add_procs(ompi_proc_t** procs, size_t nprocs);
extern int mca_spml_ikrit_del_procs(ompi_proc_t** procs, size_t nprocs);
extern int mca_spml_ikrit_fence(shmem_ctx_t ctx);
extern int spml_ikrit_progress(void);
/* the functionreturns NULL if data can be directly copied via shared memory
* else it returns mxm mem key
*
* the function will abort() if va is not symmetric var address.
*/
static inline mxm_mem_key_t *mca_spml_ikrit_get_mkey(int pe, void *va, int ptl_id, void **rva,
mca_spml_ikrit_t *module)
{
spml_ikrit_mkey_t *mkey;
if (OPAL_UNLIKELY(MXM_PTL_RDMA != ptl_id)) {
assert(module->get_mkey_slow);
return module->get_mkey_slow(pe, va, ptl_id, rva);
}
mkey = module->mxm_peers[pe].mkeys;
mkey = (spml_ikrit_mkey_t *)map_segment_find_va(&mkey->super.super, sizeof(*mkey), va);
if (OPAL_UNLIKELY(NULL == mkey)) {
assert(module->get_mkey_slow);
return module->get_mkey_slow(pe, va, ptl_id, rva);
}
*rva = map_segment_va2rva(&mkey->super, va);
return &mkey->key;
}
END_C_DECLS
#endif

Просмотреть файл

@ -1,433 +0,0 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2013-2015 Mellanox Technologies, Inc.
* All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2018 Amazon.com, Inc. or its affiliates. All Rights reserved.
* Copyright (c) 2019 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <sys/types.h>
#include <unistd.h>
#include "opal/util/printf.h"
#include "opal/util/show_help.h"
#include "oshmem_config.h"
#include "shmem.h"
#include "oshmem/runtime/params.h"
#include "oshmem/mca/spml/spml.h"
#include "oshmem/mca/spml/base/base.h"
#include "spml_ikrit_component.h"
#include "oshmem/mca/spml/ikrit/spml_ikrit.h"
static int mca_spml_ikrit_component_register(void);
static int mca_spml_ikrit_component_open(void);
static int mca_spml_ikrit_component_close(void);
static mca_spml_base_module_t*
mca_spml_ikrit_component_init(int* priority,
bool enable_progress_threads,
bool enable_mpi_threads);
static int mca_spml_ikrit_component_fini(void);
mca_spml_base_component_2_0_0_t mca_spml_ikrit_component = {
/* First, the mca_base_component_t struct containing meta
information about the component itself */
.spmlm_version = {
MCA_SPML_BASE_VERSION_2_0_0,
.mca_component_name = "ikrit",
MCA_BASE_MAKE_VERSION(component, OSHMEM_MAJOR_VERSION, OSHMEM_MINOR_VERSION,
OSHMEM_RELEASE_VERSION),
.mca_open_component = mca_spml_ikrit_component_open,
.mca_close_component = mca_spml_ikrit_component_close,
.mca_register_component_params = mca_spml_ikrit_component_register,
},
.spmlm_data = {
/* The component is checkpoint ready */
MCA_BASE_METADATA_PARAM_CHECKPOINT
},
.spmlm_init = mca_spml_ikrit_component_init,
.spmlm_finalize = mca_spml_ikrit_component_fini,
};
#if MXM_API >= MXM_VERSION(2,1)
static inline int check_mxm_tls(char *var)
{
char *str;
str = getenv(var);
if (NULL == str) {
return OSHMEM_SUCCESS;
}
if (NULL != strstr(str, "shm")) {
if (0 < opal_asprintf(&str,
"%s=%s",
var, getenv(var)
)) {
opal_show_help("help-oshmem-spml-ikrit.txt", "mxm shm tls", true,
str);
free(str);
}
return OSHMEM_ERROR;
}
if (NULL == strstr(str, "rc") && NULL == strstr(str, "dc")) {
mca_spml_ikrit.ud_only = 1;
} else {
mca_spml_ikrit.ud_only = 0;
}
return OSHMEM_SUCCESS;
}
static inline int set_mxm_tls(void)
{
char *tls;
/*
* Set DC defaults optimized for shmem
*/
opal_setenv("MXM_OSHMEM_DC_QP_LIMIT", "2", 0, &environ);
opal_setenv("MXM_OSHMEM_DC_RNDV_QP_LIMIT", "2", 0, &environ);
opal_setenv("MXM_OSHMEM_DC_MSS", "8196", 0, &environ);
tls = getenv("MXM_OSHMEM_TLS");
if (NULL != tls) {
return check_mxm_tls("MXM_OSHMEM_TLS");
}
tls = getenv("MXM_TLS");
if (NULL == tls) {
opal_setenv("MXM_OSHMEM_TLS", mca_spml_ikrit.mxm_tls, 1, &environ);
return check_mxm_tls("MXM_OSHMEM_TLS");
}
if (OSHMEM_SUCCESS == check_mxm_tls("MXM_TLS")) {
opal_setenv("MXM_OSHMEM_TLS", tls, 1, &environ);
return OSHMEM_SUCCESS;
}
return OSHMEM_ERROR;
}
static inline int check_mxm_hw_tls(char *v, char *tls)
{
if (v && tls) {
if ((0 == strcmp(tls, "rc") || 0 == strcmp(tls, "dc"))) {
mca_spml_ikrit.ud_only = 0;
return OSHMEM_SUCCESS;
}
if (strstr(tls, "ud") &&
(NULL == strstr(tls, "rc") && NULL == strstr(tls, "dc") &&
NULL == strstr(tls, "shm"))) {
return OSHMEM_SUCCESS;
}
}
opal_show_help("help-oshmem-spml-ikrit.txt", "mxm tls", true,
v, tls);
return OSHMEM_ERROR;
}
static inline int set_mxm_hw_rdma_tls(void)
{
if (!mca_spml_ikrit.hw_rdma_channel) {
return check_mxm_hw_tls("MXM_OSHMEM_TLS", getenv("MXM_OSHMEM_TLS"));
}
opal_setenv("MXM_OSHMEM_HW_RDMA_RC_QP_LIMIT", "-1", 0, &environ);
opal_setenv("MXM_OSHMEM_HW_RDMA_TLS", "rc", 0, &environ);
SPML_VERBOSE(5, "Additional communication channel is enabled. Transports are: %s",
getenv("MXM_OSHMEM_HW_RDMA_TLS"));
return check_mxm_hw_tls("MXM_OSHMEM_HW_RDMA_TLS",
getenv("MXM_OSHMEM_HW_RDMA_TLS"));
}
#endif
static inline void mca_spml_ikrit_param_register_int(const char* param_name,
int default_value,
const char *help_msg,
int *storage)
{
*storage = default_value;
(void) mca_base_component_var_register(&mca_spml_ikrit_component.spmlm_version,
param_name,
help_msg,
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_9,
MCA_BASE_VAR_SCOPE_READONLY,
storage);
}
static inline void mca_spml_ikrit_param_register_size_t(const char* param_name,
size_t default_value,
const char *help_msg,
size_t *storage)
{
*storage = default_value;
(void) mca_base_component_var_register(&mca_spml_ikrit_component.spmlm_version,
param_name,
help_msg,
MCA_BASE_VAR_TYPE_SIZE_T, NULL, 0, 0,
OPAL_INFO_LVL_9,
MCA_BASE_VAR_SCOPE_READONLY,
storage);
}
static inline void mca_spml_ikrit_param_register_string(const char* param_name,
char* default_value,
const char *help_msg,
char **storage)
{
*storage = default_value;
(void) mca_base_component_var_register(&mca_spml_ikrit_component.spmlm_version,
param_name,
help_msg,
MCA_BASE_VAR_TYPE_STRING, NULL, 0, 0,
OPAL_INFO_LVL_9,
MCA_BASE_VAR_SCOPE_READONLY,
storage);
}
static int mca_spml_ikrit_component_register(void)
{
char *v;
mca_spml_ikrit_param_register_int("free_list_num", 1024,
0,
&mca_spml_ikrit.free_list_num);
mca_spml_ikrit_param_register_int("free_list_max", 1024,
0,
&mca_spml_ikrit.free_list_max);
mca_spml_ikrit_param_register_int("free_list_inc", 16,
0,
&mca_spml_ikrit.free_list_inc);
mca_spml_ikrit_param_register_int("bulk_connect", 1,
0,
&mca_spml_ikrit.bulk_connect);
mca_spml_ikrit_param_register_int("bulk_disconnect", 1,
0,
&mca_spml_ikrit.bulk_disconnect);
mca_spml_ikrit_param_register_int("priority", 20,
"[integer] ikrit priority",
&mca_spml_ikrit.priority);
mca_spml_ikrit_param_register_int("hw_rdma_channel", 0,
"create separate reliable connection channel",
&mca_spml_ikrit.hw_rdma_channel);
if (!mca_spml_ikrit.hw_rdma_channel)
v = "ud,self";
else
v = "rc,ud,self";
mca_spml_ikrit_param_register_string("mxm_tls",
v,
"[string] TL channels for MXM",
&mca_spml_ikrit.mxm_tls);
mca_spml_ikrit_param_register_int("np",
0,
"[integer] Minimal allowed job's NP to activate ikrit", &mca_spml_ikrit.np);
mca_spml_ikrit_param_register_int("unsync_conn_max", 8,
"[integer] Max number of connections that do not require notification of PUT operation remote completion. Increasing this number improves efficiency of p2p communication but increases overhead of shmem_fence/shmem_quiet/shmem_barrier",
&mca_spml_ikrit.unsync_conn_max);
mca_spml_ikrit_param_register_size_t("put_zcopy_threshold", 16384ULL,
"[size_t] Use zero copy put if message size is greater than the threshold",
&mca_spml_ikrit.put_zcopy_threshold);
if (oshmem_num_procs() < mca_spml_ikrit.np) {
SPML_VERBOSE(1,
"Not enough ranks (%d<%d), disqualifying spml/ikrit",
oshmem_num_procs(), mca_spml_ikrit.np);
return OSHMEM_ERR_NOT_AVAILABLE;
}
return OSHMEM_SUCCESS;
}
int spml_ikrit_progress(void)
{
mxm_error_t err;
err = mxm_progress(mca_spml_ikrit.mxm_context);
if ((MXM_OK != err) && (MXM_ERR_NO_PROGRESS != err)) {
opal_show_help("help-oshmem-spml-ikrit.txt",
"errors during mxm_progress",
true,
mxm_error_string(err));
}
return 1;
}
static int mca_spml_ikrit_component_open(void)
{
mxm_error_t err;
unsigned long cur_ver;
cur_ver = mxm_get_version();
if (cur_ver != MXM_API) {
SPML_WARNING(
"OSHMEM was compiled with MXM version %d.%d but version %ld.%ld detected.",
MXM_VERNO_MAJOR, MXM_VERNO_MINOR,
(cur_ver >> MXM_MAJOR_BIT) & 0xff,
(cur_ver >> MXM_MINOR_BIT) & 0xff);
}
mca_spml_ikrit.mxm_mq = NULL;
mca_spml_ikrit.mxm_context = NULL;
mca_spml_ikrit.ud_only = 0;
#if MXM_API < MXM_VERSION(2,1)
mca_spml_ikrit.hw_rdma_channel = 0;
if ((MXM_OK != mxm_config_read_context_opts(&mca_spml_ikrit.mxm_ctx_opts)) ||
(MXM_OK != mxm_config_read_ep_opts(&mca_spml_ikrit.mxm_ep_opts)))
#else
if (OSHMEM_SUCCESS != set_mxm_tls()) {
return OSHMEM_ERROR;
}
if (OSHMEM_SUCCESS != set_mxm_hw_rdma_tls()) {
return OSHMEM_ERROR;
}
if ((mca_spml_ikrit.hw_rdma_channel && MXM_OK != mxm_config_read_opts(&mca_spml_ikrit.mxm_ctx_opts,
&mca_spml_ikrit.mxm_ep_hw_rdma_opts,
"OSHMEM_HW_RDMA", NULL, 0)) ||
MXM_OK != mxm_config_read_opts(&mca_spml_ikrit.mxm_ctx_opts,
&mca_spml_ikrit.mxm_ep_opts,
"OSHMEM", NULL, 0))
#endif
{
SPML_ERROR("Failed to parse MXM configuration");
return OSHMEM_ERROR;
}
SPML_VERBOSE(5, "UD only mode is %s",
mca_spml_ikrit.ud_only ? "enabled" : "disabled");
err = mxm_init(mca_spml_ikrit.mxm_ctx_opts, &mca_spml_ikrit.mxm_context);
if (MXM_OK != err) {
if (MXM_ERR_NO_DEVICE == err) {
SPML_VERBOSE(1,
"No supported device found, disqualifying spml/ikrit");
} else {
opal_show_help("help-oshmem-spml-ikrit.txt",
"mxm init",
true,
mxm_error_string(err));
}
return OSHMEM_ERR_NOT_AVAILABLE;
}
err = mxm_mq_create(mca_spml_ikrit.mxm_context,
MXM_SHMEM_MQ_ID,
&mca_spml_ikrit.mxm_mq);
if (MXM_OK != err) {
opal_show_help("help-oshmem-spml-ikrit.txt",
"mxm mq create",
true,
mxm_error_string(err));
return OSHMEM_ERROR;
}
return OSHMEM_SUCCESS;
}
static int mca_spml_ikrit_component_close(void)
{
if (mca_spml_ikrit.mxm_mq) {
mxm_mq_destroy(mca_spml_ikrit.mxm_mq);
}
if (mca_spml_ikrit.mxm_context) {
mxm_cleanup(mca_spml_ikrit.mxm_context);
mxm_config_free_ep_opts(mca_spml_ikrit.mxm_ep_opts);
mxm_config_free_context_opts(mca_spml_ikrit.mxm_ctx_opts);
if (mca_spml_ikrit.hw_rdma_channel)
mxm_config_free_ep_opts(mca_spml_ikrit.mxm_ep_hw_rdma_opts);
}
mca_spml_ikrit.mxm_mq = NULL;
mca_spml_ikrit.mxm_context = NULL;
return OSHMEM_SUCCESS;
}
static int spml_ikrit_mxm_init(void)
{
mxm_error_t err;
/* Open MXM endpoint */
err = mxm_ep_create(mca_spml_ikrit.mxm_context,
mca_spml_ikrit.mxm_ep_opts,
&mca_spml_ikrit.mxm_ep);
if (MXM_OK != err) {
opal_show_help("help-oshmem-spml-ikrit.txt",
"unable to create endpoint",
true,
mxm_error_string(err));
return OSHMEM_ERROR;
}
if (mca_spml_ikrit.hw_rdma_channel) {
err = mxm_ep_create(mca_spml_ikrit.mxm_context,
mca_spml_ikrit.mxm_ep_hw_rdma_opts,
&mca_spml_ikrit.mxm_hw_rdma_ep);
if (MXM_OK != err) {
opal_show_help("help-oshmem-spml-ikrit.txt",
"unable to create endpoint",
true,
mxm_error_string(err));
return OSHMEM_ERROR;
}
} else {
mca_spml_ikrit.mxm_hw_rdma_ep = mca_spml_ikrit.mxm_ep;
}
oshmem_ctx_default = (shmem_ctx_t) &mca_spml_ikrit_ctx_default;
return OSHMEM_SUCCESS;
}
static mca_spml_base_module_t*
mca_spml_ikrit_component_init(int* priority,
bool enable_progress_threads,
bool enable_mpi_threads)
{
SPML_VERBOSE( 10, "in ikrit, my priority is %d\n", mca_spml_ikrit.priority);
if ((*priority) > mca_spml_ikrit.priority) {
*priority = mca_spml_ikrit.priority;
return NULL ;
}
*priority = mca_spml_ikrit.priority;
if (OSHMEM_SUCCESS != spml_ikrit_mxm_init())
return NULL ;
mca_spml_ikrit.n_active_puts = 0;
mca_spml_ikrit.n_active_gets = 0;
mca_spml_ikrit.n_mxm_fences = 0;
SPML_VERBOSE(50, "*** ikrit initialized ****");
return &mca_spml_ikrit.super;
}
static int mca_spml_ikrit_component_fini(void)
{
opal_progress_unregister(spml_ikrit_progress);
if (NULL != mca_spml_ikrit.mxm_ep) {
mxm_ep_destroy(mca_spml_ikrit.mxm_ep);
}
if (mca_spml_ikrit.hw_rdma_channel) {
mxm_ep_destroy(mca_spml_ikrit.mxm_hw_rdma_ep);
}
if(!mca_spml_ikrit.enabled)
return OSHMEM_SUCCESS; /* never selected.. return success.. */
mca_spml_ikrit.enabled = false; /* not anymore */
return OSHMEM_SUCCESS;
}

Просмотреть файл

@ -1,25 +0,0 @@
/*
* Copyright (c) 2013 Mellanox Technologies, Inc.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
/**
* @file
*/
#ifndef MCA_SPML_IKRIT_COMPONENT_H
#define MCA_SPML_IKRIT_COMPONENT_H
BEGIN_C_DECLS
/*
* SPML module functions.
*/
OSHMEM_MODULE_DECLSPEC extern mca_spml_base_component_2_0_0_t mca_spml_ikrit_component;
END_C_DECLS
#endif