1
1

Merge branch 'master' of github.com:open-mpi/ompi

Этот коммит содержится в:
George Bosilca 2015-02-25 12:01:35 -05:00
родитель 0871c5c489 efbb57430b
Коммит f3b58006c8
530 изменённых файлов: 5025 добавлений и 2841 удалений

17
HACKING
Просмотреть файл

@ -8,7 +8,7 @@ Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
University of Stuttgart. All rights reserved. University of Stuttgart. All rights reserved.
Copyright (c) 2004-2005 The Regents of the University of California. Copyright (c) 2004-2005 The Regents of the University of California.
All rights reserved. All rights reserved.
Copyright (c) 2008-2014 Cisco Systems, Inc. All rights reserved. Copyright (c) 2008-2015 Cisco Systems, Inc. All rights reserved.
Copyright (c) 2013 Intel, Inc. All rights reserved. Copyright (c) 2013 Intel, Inc. All rights reserved.
$COPYRIGHT$ $COPYRIGHT$
@ -83,18 +83,19 @@ developer's checkout, you have three main options:
Use of GNU Autoconf, Automake, and Libtool (and m4) Use of GNU Autoconf, Automake, and Libtool (and m4)
=================================================== ===================================================
This procedure is *ONLY* necessary if you are building from a You need to read/care about this section *ONLY* if you are building
developer's tree. If you have an Open MPI distribution tarball, this from a developer's tree (i.e., a Git clone of the Open MPI source
procedure is unnecessary -- you can (and should) skip reading this tree). If you have an Open MPI distribution tarball, the contents of
section. this section are optional -- you can (and probably should) skip
reading this section.
If you are building Open MPI from a developer's tree, you must first If you are building Open MPI from a developer's tree, you must first
install fairly recent versions of the GNU tools Autoconf, Automake, install fairly recent versions of the GNU tools Autoconf, Automake,
and Libtool (and possibly GNU m4, because recent versions of Autoconf and Libtool (and possibly GNU m4, because recent versions of Autoconf
have specific GNU m4 version requirements). The specific versions have specific GNU m4 version requirements). The specific versions
required depend on if you are using the trunk or a release branch (and required depend on if you are using the Git master branch or a release
which release branch you are using). The specific versions can be branch (and which release branch you are using). The specific
found at: versions can be found here:
http://www.open-mpi.org/source/building.php http://www.open-mpi.org/source/building.php

Просмотреть файл

@ -6,7 +6,7 @@ dnl Corporation. All rights reserved.
dnl Copyright (c) 2004-2005 The University of Tennessee and The University dnl Copyright (c) 2004-2005 The University of Tennessee and The University
dnl of Tennessee Research Foundation. All rights dnl of Tennessee Research Foundation. All rights
dnl reserved. dnl reserved.
dnl Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, dnl Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
dnl University of Stuttgart. All rights reserved. dnl University of Stuttgart. All rights reserved.
dnl Copyright (c) 2004-2005 The Regents of the University of California. dnl Copyright (c) 2004-2005 The Regents of the University of California.
dnl All rights reserved. dnl All rights reserved.
@ -15,9 +15,9 @@ dnl Copyright (c) 2009-2015 Cisco Systems, Inc. All rights reserved.
dnl Copyright (c) 2015 Research Organization for Information Science dnl Copyright (c) 2015 Research Organization for Information Science
dnl and Technology (RIST). All rights reserved. dnl and Technology (RIST). All rights reserved.
dnl $COPYRIGHT$ dnl $COPYRIGHT$
dnl dnl
dnl Additional copyrights may follow dnl Additional copyrights may follow
dnl dnl
dnl $HEADER$ dnl $HEADER$
dnl dnl
@ -45,8 +45,8 @@ AC_DEFUN([OPAL_WRAPPER_FLAGS_ADD], [
# OPAL_SETUP_WRAPPER_INIT() # OPAL_SETUP_WRAPPER_INIT()
# ------------------------- # -------------------------
# Setup wrapper compiler configuration information. Should be called early to # Setup wrapper compiler configuration information. Should be called early to
# prevent lots of calculations and then an abort for a silly user typo. This # prevent lots of calculations and then an abort for a silly user typo. This
# macro works in pair with OPAL_SETUP_WRAPPER_FINAL, which should be called # macro works in pair with OPAL_SETUP_WRAPPER_FINAL, which should be called
# almost at the end of configure (after the last call to OPAL_WRAPPER_FLAGS_ADD # almost at the end of configure (after the last call to OPAL_WRAPPER_FLAGS_ADD
# and after the MCA system has been setup). # and after the MCA system has been setup).
@ -74,50 +74,50 @@ AC_DEFUN([OPAL_WRAPPER_FLAGS_ADD], [
# <flag>_prefix, configure is not. There's no known use case for # <flag>_prefix, configure is not. There's no known use case for
# doing so, and we'd like to force the issue. # doing so, and we'd like to force the issue.
AC_DEFUN([OPAL_SETUP_WRAPPER_INIT],[ AC_DEFUN([OPAL_SETUP_WRAPPER_INIT],[
AC_ARG_WITH([wrapper-cflags], AC_ARG_WITH([wrapper-cflags],
[AC_HELP_STRING([--with-wrapper-cflags], [AC_HELP_STRING([--with-wrapper-cflags],
[Extra flags to add to CFLAGS when using mpicc])]) [Extra flags to add to CFLAGS when using mpicc])])
AS_IF([test "$with_wrapper_cflags" = "yes" || test "$with_wrapper_cflags" = "no"], AS_IF([test "$with_wrapper_cflags" = "yes" || test "$with_wrapper_cflags" = "no"],
[AC_MSG_ERROR([--with-wrapper-cflags must have an argument.])]) [AC_MSG_ERROR([--with-wrapper-cflags must have an argument.])])
AC_ARG_WITH([wrapper-cflags-prefix], AC_ARG_WITH([wrapper-cflags-prefix],
[AC_HELP_STRING([--with-wrapper-cflags-prefix], [AC_HELP_STRING([--with-wrapper-cflags-prefix],
[Extra flags (before user flags) to add to CFLAGS when using mpicc])]) [Extra flags (before user flags) to add to CFLAGS when using mpicc])])
AS_IF([test "$with_wrapper_cflags_prefix" = "yes" || test "$with_wrapper_cflags_prefix" = "no"], AS_IF([test "$with_wrapper_cflags_prefix" = "yes" || test "$with_wrapper_cflags_prefix" = "no"],
[AC_MSG_ERROR([--with-wrapper-cflags-prefix must have an argument.])]) [AC_MSG_ERROR([--with-wrapper-cflags-prefix must have an argument.])])
AC_ARG_WITH([wrapper-cxxflags], AC_ARG_WITH([wrapper-cxxflags],
[AC_HELP_STRING([--with-wrapper-cxxflags], [AC_HELP_STRING([--with-wrapper-cxxflags],
[Extra flags to add to CXXFLAGS when using mpiCC/mpic++])]) [Extra flags to add to CXXFLAGS when using mpiCC/mpic++])])
AS_IF([test "$with_wrapper_cxxflags" = "yes" || test "$with_wrapper_cxxflags" = "no"], AS_IF([test "$with_wrapper_cxxflags" = "yes" || test "$with_wrapper_cxxflags" = "no"],
[AC_MSG_ERROR([--with-wrapper-cxxflags must have an argument.])]) [AC_MSG_ERROR([--with-wrapper-cxxflags must have an argument.])])
AC_ARG_WITH([wrapper-cxxflags-prefix], AC_ARG_WITH([wrapper-cxxflags-prefix],
[AC_HELP_STRING([--with-wrapper-cxxflags-prefix], [AC_HELP_STRING([--with-wrapper-cxxflags-prefix],
[Extra flags to add to CXXFLAGS when using mpiCC/mpic++])]) [Extra flags to add to CXXFLAGS when using mpiCC/mpic++])])
AS_IF([test "$with_wrapper_cxxflags_prefix" = "yes" || test "$with_wrapper_cxxflags_prefix" = "no"], AS_IF([test "$with_wrapper_cxxflags_prefix" = "yes" || test "$with_wrapper_cxxflags_prefix" = "no"],
[AC_MSG_ERROR([--with-wrapper-cxxflags-prefix must have an argument.])]) [AC_MSG_ERROR([--with-wrapper-cxxflags-prefix must have an argument.])])
m4_ifdef([project_ompi], [ m4_ifdef([project_ompi], [
AC_ARG_WITH([wrapper-fcflags], AC_ARG_WITH([wrapper-fcflags],
[AC_HELP_STRING([--with-wrapper-fcflags], [AC_HELP_STRING([--with-wrapper-fcflags],
[Extra flags to add to FCFLAGS when using mpifort])]) [Extra flags to add to FCFLAGS when using mpifort])])
AS_IF([test "$with_wrapper_fcflags" = "yes" || test "$with_wrapper_fcflags" = "no"], AS_IF([test "$with_wrapper_fcflags" = "yes" || test "$with_wrapper_fcflags" = "no"],
[AC_MSG_ERROR([--with-wrapper-fcflags must have an argument.])]) [AC_MSG_ERROR([--with-wrapper-fcflags must have an argument.])])
AC_ARG_WITH([wrapper-fcflags-prefix], AC_ARG_WITH([wrapper-fcflags-prefix],
[AC_HELP_STRING([--with-wrapper-fcflags-prefix], [AC_HELP_STRING([--with-wrapper-fcflags-prefix],
[Extra flags (before user flags) to add to FCFLAGS when using mpifort])]) [Extra flags (before user flags) to add to FCFLAGS when using mpifort])])
AS_IF([test "$with_wrapper_fcflags_prefix" = "yes" || test "$with_wrapper_fcflags_prefix" = "no"], AS_IF([test "$with_wrapper_fcflags_prefix" = "yes" || test "$with_wrapper_fcflags_prefix" = "no"],
[AC_MSG_ERROR([--with-wrapper-fcflags-prefix must have an argument.])])]) [AC_MSG_ERROR([--with-wrapper-fcflags-prefix must have an argument.])])])
AC_ARG_WITH([wrapper-ldflags], AC_ARG_WITH([wrapper-ldflags],
[AC_HELP_STRING([--with-wrapper-ldflags], [AC_HELP_STRING([--with-wrapper-ldflags],
[Extra flags to add to LDFLAGS when using wrapper compilers])]) [Extra flags to add to LDFLAGS when using wrapper compilers])])
AS_IF([test "$with_wrapper_ldflags" = "yes" || test "$with_wrapper_ldflags" = "no"], AS_IF([test "$with_wrapper_ldflags" = "yes" || test "$with_wrapper_ldflags" = "no"],
[AC_MSG_ERROR([--with-wrapper-ldflags must have an argument.])]) [AC_MSG_ERROR([--with-wrapper-ldflags must have an argument.])])
AC_ARG_WITH([wrapper-libs], AC_ARG_WITH([wrapper-libs],
[AC_HELP_STRING([--with-wrapper-libs], [AC_HELP_STRING([--with-wrapper-libs],
[Extra flags to add to LIBS when using wrapper compilers])]) [Extra flags to add to LIBS when using wrapper compilers])])
AS_IF([test "$with_wrapper_libs" = "yes" || test "$with_wrapper_libs" = "no"], AS_IF([test "$with_wrapper_libs" = "yes" || test "$with_wrapper_libs" = "no"],
@ -186,7 +186,7 @@ EOF
# Check to see if the linker supports the DT_RUNPATH flags via # Check to see if the linker supports the DT_RUNPATH flags via
# --enable-new-dtags (a GNU ld-specific option). These flags are more # --enable-new-dtags (a GNU ld-specific option). These flags are more
# social than DT_RPATH -- they can be overridden by LD_LIBRARY_PATH # social than DT_RPATH -- they can be overridden by LD_LIBRARY_PATH
# (where a regular DT_RPATH cannot). # (where a regular DT_RPATH cannot).
# #
# If DT_RUNPATH is supported, then we'll use *both* the RPATH and # If DT_RUNPATH is supported, then we'll use *both* the RPATH and
# RUNPATH flags in the LDFLAGS. # RUNPATH flags in the LDFLAGS.
@ -236,6 +236,17 @@ AC_DEFUN([RPATHIFY_LDFLAGS],[
]) ])
dnl
dnl Avoid some repetitive code below
dnl
AC_DEFUN([_OPAL_SETUP_WRAPPER_FINAL_PKGCONFIG],[
AC_MSG_CHECKING([for $1 pkg-config LDFLAGS])
$1_PKG_CONFIG_LDFLAGS=`echo "$$1_WRAPPER_EXTRA_LDFLAGS" | sed -e 's/@{libdir}/\${libdir}/g'`
AC_SUBST([$1_PKG_CONFIG_LDFLAGS])
AC_MSG_RESULT([$$1_PKG_CONFIG_LDFLAGS])
])
# OPAL_SETUP_WRAPPER_FINAL() # OPAL_SETUP_WRAPPER_FINAL()
# --------------------------- # ---------------------------
AC_DEFUN([OPAL_SETUP_WRAPPER_FINAL],[ AC_DEFUN([OPAL_SETUP_WRAPPER_FINAL],[
@ -291,10 +302,7 @@ AC_DEFUN([OPAL_SETUP_WRAPPER_FINAL],[
AC_MSG_RESULT([$OPAL_WRAPPER_EXTRA_LDFLAGS]) AC_MSG_RESULT([$OPAL_WRAPPER_EXTRA_LDFLAGS])
# Convert @{libdir} to ${libdir} for pkg-config # Convert @{libdir} to ${libdir} for pkg-config
AC_MSG_CHECKING([for OPAL pkg-config LDFLAGS]) _OPAL_SETUP_WRAPPER_FINAL_PKGCONFIG([OPAL])
OPAL_PKG_CONFIG_LDFLAGS=`echo "$OPAL_WRAPPER_EXTRA_LDFLAGS" | sed -e 's/@{libdir}/\${libdir}/g'`
AC_SUBST([OPAL_PKG_CONFIG_LDFLAGS])
AC_MSG_RESULT([$OPAL_PKG_CONFIG_LDFLAGS])
# wrapper_extra_libs doesn't really get populated until after the mca system runs # wrapper_extra_libs doesn't really get populated until after the mca system runs
# since most of the libs come from libtool. So this is the first time we can # since most of the libs come from libtool. So this is the first time we can
@ -335,10 +343,7 @@ AC_DEFUN([OPAL_SETUP_WRAPPER_FINAL],[
AC_MSG_RESULT([$ORTE_WRAPPER_EXTRA_LDFLAGS]) AC_MSG_RESULT([$ORTE_WRAPPER_EXTRA_LDFLAGS])
# Convert @{libdir} to ${libdir} for pkg-config # Convert @{libdir} to ${libdir} for pkg-config
AC_MSG_CHECKING([for ORTE pkg-config LDFLAGS]) _OPAL_SETUP_WRAPPER_FINAL_PKGCONFIG([ORTE])
ORTE_PKG_CONFIG_LDFLAGS=`echo "$ORTE_WRAPPER_EXTRA_LDFLAGS" | sed -e 's/@{libdir}/\${libdir}/g'`
AC_SUBST([ORTE_PKG_CONFIG_LDFLAGS])
AC_MSG_RESULT([$ORTE_PKG_CONFIG_LDFLAGS])
AC_MSG_CHECKING([for ORTE LIBS]) AC_MSG_CHECKING([for ORTE LIBS])
ORTE_WRAPPER_EXTRA_LIBS="$orte_mca_wrapper_extra_libs" ORTE_WRAPPER_EXTRA_LIBS="$orte_mca_wrapper_extra_libs"
@ -409,10 +414,7 @@ AC_DEFUN([OPAL_SETUP_WRAPPER_FINAL],[
AC_MSG_RESULT([$OMPI_WRAPPER_EXTRA_LDFLAGS]) AC_MSG_RESULT([$OMPI_WRAPPER_EXTRA_LDFLAGS])
# Convert @{libdir} to ${libdir} for pkg-config # Convert @{libdir} to ${libdir} for pkg-config
AC_MSG_CHECKING([for OMPI pkg-config LDFLAGS]) _OPAL_SETUP_WRAPPER_FINAL_PKGCONFIG([OMPI])
OMPI_PKG_CONFIG_LDFLAGS=`echo "$OMPI_WRAPPER_EXTRA_LDFLAGS" | sed -e 's/@{libdir}/\${libdir}/g'`
AC_SUBST([OMPI_PKG_CONFIG_LDFLAGS])
AC_MSG_RESULT([$OMPI_PKG_CONFIG_LDFLAGS])
AC_MSG_CHECKING([for OMPI LIBS]) AC_MSG_CHECKING([for OMPI LIBS])
OMPI_WRAPPER_EXTRA_LIBS="$ompi_mca_wrapper_extra_libs" OMPI_WRAPPER_EXTRA_LIBS="$ompi_mca_wrapper_extra_libs"

Просмотреть файл

@ -283,16 +283,9 @@ m4_ifdef([project_orte],
############################################################################ ############################################################################
# #
# Part one of libtool magic. Enable static so that we have the --with # Part one of libtool magic. Default to: enable shared, disable static.
# tests done up here and can check for OS. Save the values of
# $enable_static and $enable_shared before setting the defaults,
# because if the user specified --[en|dis]able-[static|shared] on the
# command line, they'll already be set. In this way, we can tell if
# the user requested something or if the default was set here.
# #
ompi_enable_shared="$enable_shared"
ompi_enable_static="$enable_static"
AM_ENABLE_SHARED AM_ENABLE_SHARED
AM_DISABLE_STATIC AM_DISABLE_STATIC

Просмотреть файл

@ -32,8 +32,9 @@ static int ompi_comm_request_progress (void);
void ompi_comm_request_init (void) void ompi_comm_request_init (void)
{ {
OBJ_CONSTRUCT(&ompi_comm_requests, opal_free_list_t); OBJ_CONSTRUCT(&ompi_comm_requests, opal_free_list_t);
(void) opal_free_list_init (&ompi_comm_requests, sizeof (ompi_comm_request_t), (void) opal_free_list_init (&ompi_comm_requests, sizeof (ompi_comm_request_t), 8,
OBJ_CLASS(ompi_comm_request_t), 0, -1, 8); OBJ_CLASS(ompi_comm_request_t), 0, 0, 0, -1, 8,
NULL, 0, NULL, NULL, NULL);
OBJ_CONSTRUCT(&ompi_comm_requests_active, opal_list_t); OBJ_CONSTRUCT(&ompi_comm_requests_active, opal_list_t);
ompi_comm_request_progress_active = false; ompi_comm_request_progress_active = false;
@ -237,10 +238,11 @@ OBJ_CLASS_INSTANCE(ompi_comm_request_item_t, opal_list_item_t, NULL, NULL);
ompi_comm_request_t *ompi_comm_request_get (void) ompi_comm_request_t *ompi_comm_request_get (void)
{ {
opal_free_list_item_t *item; opal_free_list_item_t *item;
int rc;
OPAL_FREE_LIST_GET(&ompi_comm_requests, item, rc); item = opal_free_list_get (&ompi_comm_requests);
(void) rc; if (OPAL_UNLIKELY(NULL == item)) {
return NULL;
}
OMPI_REQUEST_INIT((ompi_request_t *) item, false); OMPI_REQUEST_INIT((ompi_request_t *) item, false);
@ -254,6 +256,6 @@ void ompi_comm_request_return (ompi_comm_request_t *request)
request->context = NULL; request->context = NULL;
} }
OPAL_FREE_LIST_RETURN(&ompi_comm_requests, (opal_free_list_item_t *) request); opal_free_list_return (&ompi_comm_requests, (opal_free_list_item_t *) request);
} }

Просмотреть файл

@ -1,3 +1,4 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2007-2014 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007-2014 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2004-2013 The University of Tennessee and The University * Copyright (c) 2004-2013 The University of Tennessee and The University
@ -8,6 +9,8 @@
* Copyright (c) 2014 Research Organization for Information Science * Copyright (c) 2014 Research Organization for Information Science
* and Technology (RIST). All rights reserved. * and Technology (RIST). All rights reserved.
* Copyright (c) 2014 Intel, Inc. All rights reserved. * Copyright (c) 2014 Intel, Inc. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -76,7 +79,7 @@ static int host_is_big_endian = 0;
* internal objects. We have to make sure we're able to find all of * internal objects. We have to make sure we're able to find all of
* them in the image and compute their ofset in order to be able to * them in the image and compute their ofset in order to be able to
* parse them later. We need to find the opal_list_item_t, the * parse them later. We need to find the opal_list_item_t, the
* opal_list_t, the ompi_free_list_item_t, and the ompi_free_list_t. * opal_list_t, the opal_free_list_item_t, and the opal_free_list_t.
* *
* Once we have these offsets, we should make sure that we have access * Once we have these offsets, we should make sure that we have access
* to all requests lists and types. We're looking here only at the * to all requests lists and types. We're looking here only at the
@ -111,41 +114,41 @@ int ompi_fill_in_type_info(mqs_image *image, char **message)
qh_type, opal_list_t, opal_list_sentinel); qh_type, opal_list_t, opal_list_sentinel);
} }
{ {
mqs_type* qh_type = mqs_find_type( image, "ompi_free_list_item_t", mqs_lang_c ); mqs_type* qh_type = mqs_find_type( image, "opal_free_list_item_t", mqs_lang_c );
if( !qh_type ) { if( !qh_type ) {
missing_in_action = "ompi_free_list_item_t"; missing_in_action = "opal_free_list_item_t";
goto type_missing; goto type_missing;
} }
/* This is just an overloaded opal_list_item_t */ /* This is just an overloaded opal_list_item_t */
i_info->ompi_free_list_item_t.type = qh_type; i_info->opal_free_list_item_t.type = qh_type;
i_info->ompi_free_list_item_t.size = mqs_sizeof(qh_type); i_info->opal_free_list_item_t.size = mqs_sizeof(qh_type);
} }
{ {
mqs_type* qh_type = mqs_find_type( image, "ompi_free_list_t", mqs_lang_c ); mqs_type* qh_type = mqs_find_type( image, "opal_free_list_t", mqs_lang_c );
if( !qh_type ) { if( !qh_type ) {
missing_in_action = "ompi_free_list_t"; missing_in_action = "opal_free_list_t";
goto type_missing; goto type_missing;
} }
i_info->ompi_free_list_t.type = qh_type; i_info->opal_free_list_t.type = qh_type;
i_info->ompi_free_list_t.size = mqs_sizeof(qh_type); i_info->opal_free_list_t.size = mqs_sizeof(qh_type);
ompi_field_offset(i_info->ompi_free_list_t.offset.fl_mpool, ompi_field_offset(i_info->opal_free_list_t.offset.fl_mpool,
qh_type, ompi_free_list_t, fl_mpool); qh_type, opal_free_list_t, fl_mpool);
ompi_field_offset(i_info->ompi_free_list_t.offset.fl_allocations, ompi_field_offset(i_info->opal_free_list_t.offset.fl_allocations,
qh_type, ompi_free_list_t, fl_allocations); qh_type, opal_free_list_t, fl_allocations);
ompi_field_offset(i_info->ompi_free_list_t.offset.fl_frag_class, ompi_field_offset(i_info->opal_free_list_t.offset.fl_frag_class,
qh_type, ompi_free_list_t, fl_frag_class); qh_type, opal_free_list_t, fl_frag_class);
ompi_field_offset(i_info->ompi_free_list_t.offset.fl_frag_size, ompi_field_offset(i_info->opal_free_list_t.offset.fl_frag_size,
qh_type, ompi_free_list_t, fl_frag_size); qh_type, opal_free_list_t, fl_frag_size);
ompi_field_offset(i_info->ompi_free_list_t.offset.fl_frag_alignment, ompi_field_offset(i_info->opal_free_list_t.offset.fl_frag_alignment,
qh_type, ompi_free_list_t, fl_frag_alignment); qh_type, opal_free_list_t, fl_frag_alignment);
ompi_field_offset(i_info->ompi_free_list_t.offset.fl_max_to_alloc, ompi_field_offset(i_info->opal_free_list_t.offset.fl_max_to_alloc,
qh_type, ompi_free_list_t, fl_max_to_alloc); qh_type, opal_free_list_t, fl_max_to_alloc);
ompi_field_offset(i_info->ompi_free_list_t.offset.fl_num_per_alloc, ompi_field_offset(i_info->opal_free_list_t.offset.fl_num_per_alloc,
qh_type, ompi_free_list_t, fl_num_per_alloc); qh_type, opal_free_list_t, fl_num_per_alloc);
ompi_field_offset(i_info->ompi_free_list_t.offset.fl_num_allocated, ompi_field_offset(i_info->opal_free_list_t.offset.fl_num_allocated,
qh_type, ompi_free_list_t, fl_num_allocated); qh_type, opal_free_list_t, fl_num_allocated);
} }
{ {
mqs_type* qh_type = mqs_find_type( image, "opal_hash_table_t", mqs_lang_c ); mqs_type* qh_type = mqs_find_type( image, "opal_hash_table_t", mqs_lang_c );

Просмотреть файл

@ -1,9 +1,12 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2007-2014 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007-2014 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2004-2013 The University of Tennessee and The University * Copyright (c) 2004-2013 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2012-2013 Inria. All rights reserved. * Copyright (c) 2012-2013 Inria. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -65,7 +68,7 @@ typedef struct
struct { struct {
mqs_type *type; mqs_type *type;
int size; int size;
} ompi_free_list_item_t; } opal_free_list_item_t;
struct { struct {
mqs_type *type; mqs_type *type;
int size; int size;
@ -79,7 +82,7 @@ typedef struct
int fl_num_per_alloc; /* size_t */ int fl_num_per_alloc; /* size_t */
int fl_num_allocated; /* size_t */ int fl_num_allocated; /* size_t */
} offset; } offset;
} ompi_free_list_t; } opal_free_list_t;
struct { struct {
mqs_type *type; mqs_type *type;
int size; int size;

Просмотреть файл

@ -1,4 +1,4 @@
/* -*- Mode: C; c-basic-offset:4 ; -*- */ /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology * University Research and Technology
@ -11,8 +11,8 @@
* Copyright (c) 2004-2005 The Regents of the University of California. * Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved. * All rights reserved.
* Copyright (c) 2007-2011 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007-2011 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC. * Copyright (c) 2012-2015 Los Alamos National Security, LLC. All rights
* All rights reserved. * reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -62,7 +62,7 @@
* debuggers will be unable to initialize the Open MPI debug library. * debuggers will be unable to initialize the Open MPI debug library.
*/ */
#include "opal/class/opal_list.h" #include "opal/class/opal_list.h"
#include "opal/class/ompi_free_list.h" #include "opal/class/opal_free_list.h"
#include "ompi/request/request.h" #include "ompi/request/request.h"
#include "ompi/mca/pml/base/pml_base_request.h" #include "ompi/mca/pml/base/pml_base_request.h"
#include "ompi/mca/pml/base/pml_base_sendreq.h" #include "ompi/mca/pml/base/pml_base_sendreq.h"
@ -110,8 +110,8 @@ OMPI_DECLSPEC int MPIR_debug_typedefs_sizeof[] = {
*/ */
OMPI_DECLSPEC opal_list_item_t* opal_list_item_t_type_force_inclusion = NULL; OMPI_DECLSPEC opal_list_item_t* opal_list_item_t_type_force_inclusion = NULL;
OMPI_DECLSPEC opal_list_t* opal_list_t_type_force_inclusion = NULL; OMPI_DECLSPEC opal_list_t* opal_list_t_type_force_inclusion = NULL;
OMPI_DECLSPEC ompi_free_list_item_t* ompi_free_list_item_t_type_force_inclusion = NULL; OMPI_DECLSPEC opal_free_list_item_t* opal_free_list_item_t_type_force_inclusion = NULL;
OMPI_DECLSPEC ompi_free_list_t* ompi_free_list_t_type_force_inclusion = NULL; OMPI_DECLSPEC opal_free_list_t* opal_free_list_t_type_force_inclusion = NULL;
OMPI_DECLSPEC ompi_request_t* ompi_request_t_type_force_inclusion = NULL; OMPI_DECLSPEC ompi_request_t* ompi_request_t_type_force_inclusion = NULL;
OMPI_DECLSPEC mca_pml_base_request_t* mca_pml_base_request_t_type_force_inclusion = NULL; OMPI_DECLSPEC mca_pml_base_request_t* mca_pml_base_request_t_type_force_inclusion = NULL;
OMPI_DECLSPEC mca_pml_base_send_request_t* mca_pml_base_send_request_t_type_force_inclusion = NULL; OMPI_DECLSPEC mca_pml_base_send_request_t* mca_pml_base_send_request_t_type_force_inclusion = NULL;

Просмотреть файл

@ -1,9 +1,12 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2007-2008 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007-2008 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2004-2010 The University of Tennessee and The University * Copyright (c) 2004-2010 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2008-2009 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2008-2009 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -860,9 +863,9 @@ static int next_item_opal_list_t( mqs_process *proc, mpi_process_info *p_info,
#if defined(CODE_NOT_USED) #if defined(CODE_NOT_USED)
/** /**
* Parsing the ompi_free_list lists. * Parsing the opal_free_list lists.
*/ */
static void ompi_free_list_t_dump_position( mqs_ompi_free_list_t_pos* position ) static void opal_free_list_t_dump_position( mqs_opal_free_list_t_pos* position )
{ {
printf( "position->opal_list_t_pos.current_item = 0x%llx\n", (long long)position->opal_list_t_pos.current_item ); printf( "position->opal_list_t_pos.current_item = 0x%llx\n", (long long)position->opal_list_t_pos.current_item );
printf( "position->opal_list_t_pos.list = 0x%llx\n", (long long)position->opal_list_t_pos.list ); printf( "position->opal_list_t_pos.list = 0x%llx\n", (long long)position->opal_list_t_pos.list );
@ -881,8 +884,8 @@ static void ompi_free_list_t_dump_position( mqs_ompi_free_list_t_pos* position )
} }
#endif /* CODE_NOT_USED */ #endif /* CODE_NOT_USED */
static int ompi_free_list_t_init_parser( mqs_process *proc, mpi_process_info *p_info, static int opal_free_list_t_init_parser( mqs_process *proc, mpi_process_info *p_info,
mqs_ompi_free_list_t_pos* position, mqs_taddr_t free_list ) mqs_opal_free_list_t_pos* position, mqs_taddr_t free_list )
{ {
mqs_image * image = mqs_get_image (proc); mqs_image * image = mqs_get_image (proc);
mpi_image_info *i_info = (mpi_image_info *)mqs_get_image_info (image); mpi_image_info *i_info = (mpi_image_info *)mqs_get_image_info (image);
@ -891,22 +894,22 @@ static int ompi_free_list_t_init_parser( mqs_process *proc, mpi_process_info *p_
position->free_list = free_list; position->free_list = free_list;
position->fl_frag_size = position->fl_frag_size =
ompi_fetch_size_t( proc, position->free_list + i_info->ompi_free_list_t.offset.fl_frag_size, ompi_fetch_size_t( proc, position->free_list + i_info->opal_free_list_t.offset.fl_frag_size,
p_info ); p_info );
position->fl_frag_alignment = position->fl_frag_alignment =
ompi_fetch_size_t( proc, position->free_list + i_info->ompi_free_list_t.offset.fl_frag_alignment, ompi_fetch_size_t( proc, position->free_list + i_info->opal_free_list_t.offset.fl_frag_alignment,
p_info ); p_info );
position->fl_frag_class = position->fl_frag_class =
ompi_fetch_pointer( proc, position->free_list + i_info->ompi_free_list_t.offset.fl_frag_class, ompi_fetch_pointer( proc, position->free_list + i_info->opal_free_list_t.offset.fl_frag_class,
p_info ); p_info );
position->fl_mpool = position->fl_mpool =
ompi_fetch_pointer( proc, position->free_list + i_info->ompi_free_list_t.offset.fl_mpool, ompi_fetch_pointer( proc, position->free_list + i_info->opal_free_list_t.offset.fl_mpool,
p_info ); p_info );
position->fl_num_per_alloc = position->fl_num_per_alloc =
ompi_fetch_size_t( proc, position->free_list + i_info->ompi_free_list_t.offset.fl_num_per_alloc, ompi_fetch_size_t( proc, position->free_list + i_info->opal_free_list_t.offset.fl_num_per_alloc,
p_info ); p_info );
position->fl_num_allocated = position->fl_num_allocated =
ompi_fetch_size_t( proc, position->free_list + i_info->ompi_free_list_t.offset.fl_num_allocated, ompi_fetch_size_t( proc, position->free_list + i_info->opal_free_list_t.offset.fl_num_allocated,
p_info ); p_info );
if( 0 == position->fl_mpool ) { if( 0 == position->fl_mpool ) {
@ -919,7 +922,7 @@ static int ompi_free_list_t_init_parser( mqs_process *proc, mpi_process_info *p_
position->fl_frag_alignment, mqs_taddr_t ); position->fl_frag_alignment, mqs_taddr_t );
/** /**
* Work around the strange ompi_free_list_t way to allocate elements. The first chunk is * Work around the strange opal_free_list_t way to allocate elements. The first chunk is
* not required to have the same size as the others. * not required to have the same size as the others.
* A similar work around should be set for the last chunk of allocations too !!! But how * A similar work around should be set for the last chunk of allocations too !!! But how
* can we solve ONE equation with 2 unknowns ? * can we solve ONE equation with 2 unknowns ?
@ -931,7 +934,7 @@ static int ompi_free_list_t_init_parser( mqs_process *proc, mpi_process_info *p_
if( 0 == position->fl_num_initial_alloc ) if( 0 == position->fl_num_initial_alloc )
position->fl_num_initial_alloc = position->fl_num_per_alloc; position->fl_num_initial_alloc = position->fl_num_per_alloc;
} }
DEBUG(VERBOSE_LISTS,("ompi_free_list_t fl_frag_size = %lld fl_header_space = %lld\n" DEBUG(VERBOSE_LISTS,("opal_free_list_t fl_frag_size = %lld fl_header_space = %lld\n"
" fl_frag_alignment = %lld fl_num_per_alloc = %lld\n" " fl_frag_alignment = %lld fl_num_per_alloc = %lld\n"
" fl_num_allocated = %lld fl_num_initial_alloc = %lld\n" " fl_num_allocated = %lld fl_num_initial_alloc = %lld\n"
" header_space = %lld\n", " header_space = %lld\n",
@ -944,7 +947,7 @@ static int ompi_free_list_t_init_parser( mqs_process *proc, mpi_process_info *p_
* Initialize the pointer to the opal_list_t. * Initialize the pointer to the opal_list_t.
*/ */
opal_list_t_init_parser( proc, p_info, &position->opal_list_t_pos, opal_list_t_init_parser( proc, p_info, &position->opal_list_t_pos,
position->free_list + i_info->ompi_free_list_t.offset.fl_allocations ); position->free_list + i_info->opal_free_list_t.offset.fl_allocations );
next_item_opal_list_t( proc, p_info, &position->opal_list_t_pos, &active_allocation ); next_item_opal_list_t( proc, p_info, &position->opal_list_t_pos, &active_allocation );
DEBUG(VERBOSE_LISTS,("active_allocation 0x%llx header_space %d\n", DEBUG(VERBOSE_LISTS,("active_allocation 0x%llx header_space %d\n",
(long long)active_allocation, (int)position->header_space)); (long long)active_allocation, (int)position->header_space));
@ -954,7 +957,7 @@ static int ompi_free_list_t_init_parser( mqs_process *proc, mpi_process_info *p_
/** /**
* Handle alignment issues... * Handle alignment issues...
*/ */
active_allocation += i_info->ompi_free_list_item_t.size; active_allocation += i_info->opal_free_list_item_t.size;
active_allocation = OPAL_ALIGN( active_allocation, active_allocation = OPAL_ALIGN( active_allocation,
position->fl_frag_alignment, mqs_taddr_t ); position->fl_frag_alignment, mqs_taddr_t );
/** /**
@ -968,15 +971,15 @@ static int ompi_free_list_t_init_parser( mqs_process *proc, mpi_process_info *p_
} }
position->current_item = active_allocation; position->current_item = active_allocation;
/*ompi_free_list_t_dump_position( position );*/ /*opal_free_list_t_dump_position( position );*/
return mqs_ok; return mqs_ok;
} }
/** /**
* Return the current position and move the internal counter to the next element. * Return the current position and move the internal counter to the next element.
*/ */
static int ompi_free_list_t_next_item( mqs_process *proc, mpi_process_info *p_info, static int opal_free_list_t_next_item( mqs_process *proc, mpi_process_info *p_info,
mqs_ompi_free_list_t_pos* position, mqs_taddr_t* active_item ) mqs_opal_free_list_t_pos* position, mqs_taddr_t* active_item )
{ {
mqs_image * image = mqs_get_image (proc); mqs_image * image = mqs_get_image (proc);
mpi_image_info *i_info = (mpi_image_info *)mqs_get_image_info (image); mpi_image_info *i_info = (mpi_image_info *)mqs_get_image_info (image);
@ -988,7 +991,7 @@ static int ompi_free_list_t_next_item( mqs_process *proc, mpi_process_info *p_in
position->current_item += position->header_space; position->current_item += position->header_space;
if( position->current_item >= position->upper_bound ) { if( position->current_item >= position->upper_bound ) {
DEBUG(VERBOSE_LISTS,("Reach the end of one of the ompi_free_list_t " DEBUG(VERBOSE_LISTS,("Reach the end of one of the opal_free_list_t "
"allocations. Go to the next one\n")); "allocations. Go to the next one\n"));
/* we should go to the next allocation */ /* we should go to the next allocation */
next_item_opal_list_t( proc, p_info, next_item_opal_list_t( proc, p_info,
@ -1000,7 +1003,7 @@ static int ompi_free_list_t_next_item( mqs_process *proc, mpi_process_info *p_in
/** /**
* Handle alignment issues... * Handle alignment issues...
*/ */
active_allocation += i_info->ompi_free_list_item_t.size; active_allocation += i_info->opal_free_list_item_t.size;
active_allocation = OPAL_ALIGN( active_allocation, active_allocation = OPAL_ALIGN( active_allocation,
position->fl_frag_alignment, mqs_taddr_t ); position->fl_frag_alignment, mqs_taddr_t );
/** /**
@ -1012,7 +1015,7 @@ static int ompi_free_list_t_next_item( mqs_process *proc, mpi_process_info *p_in
DEBUG(VERBOSE_LISTS,("there are more elements in the list " DEBUG(VERBOSE_LISTS,("there are more elements in the list "
"active_allocation = %llx upper_bound = %llx\n", "active_allocation = %llx upper_bound = %llx\n",
(long long)active_allocation, (long long)position->upper_bound)); (long long)active_allocation, (long long)position->upper_bound));
/*ompi_free_list_t_dump_position( position );*/ /*opal_free_list_t_dump_position( position );*/
} }
DEBUG(VERBOSE_LISTS,("Free list actual position 0x%llx next element at 0x%llx\n", DEBUG(VERBOSE_LISTS,("Free list actual position 0x%llx next element at 0x%llx\n",
(long long)*active_item, (long long)position->current_item)); (long long)*active_item, (long long)position->current_item));
@ -1079,7 +1082,7 @@ static int fetch_request( mqs_process *proc, mpi_process_info *p_info,
/* If we get a PML request with an internal tag we will jump back here */ /* If we get a PML request with an internal tag we will jump back here */
rescan_requests: rescan_requests:
while( 1 ) { while( 1 ) {
ompi_free_list_t_next_item( proc, p_info, opal_free_list_t_next_item( proc, p_info,
&extra->next_msg, &current_item ); &extra->next_msg, &current_item );
if( 0 == current_item ) { if( 0 == current_item ) {
DEBUG(VERBOSE_REQ,("no more items in the %s request queue\n", DEBUG(VERBOSE_REQ,("no more items in the %s request queue\n",
@ -1239,12 +1242,12 @@ int mqs_setup_operation_iterator (mqs_process *proc, int op)
switch (op) { switch (op) {
case mqs_pending_sends: case mqs_pending_sends:
DEBUG(VERBOSE_REQ,("setup the send queue iterator\n")); DEBUG(VERBOSE_REQ,("setup the send queue iterator\n"));
ompi_free_list_t_init_parser( proc, p_info, &extra->next_msg, extra->send_queue_base ); opal_free_list_t_init_parser( proc, p_info, &extra->next_msg, extra->send_queue_base );
return mqs_ok; return mqs_ok;
case mqs_pending_receives: case mqs_pending_receives:
DEBUG(VERBOSE_REQ,("setup the receive queue iterator\n")); DEBUG(VERBOSE_REQ,("setup the receive queue iterator\n"));
ompi_free_list_t_init_parser( proc, p_info, &extra->next_msg, extra->recv_queue_base ); opal_free_list_t_init_parser( proc, p_info, &extra->next_msg, extra->recv_queue_base );
return mqs_ok; return mqs_ok;
case mqs_unexpected_messages: /* TODO */ case mqs_unexpected_messages: /* TODO */

Просмотреть файл

@ -1,8 +1,11 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2004-2007 The University of Tennessee and The University * Copyright (c) 2004-2007 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -86,7 +89,7 @@ typedef struct {
mqs_tword_t fl_num_per_alloc; /* size_t */ mqs_tword_t fl_num_per_alloc; /* size_t */
mqs_tword_t fl_num_allocated; /* size_t */ mqs_tword_t fl_num_allocated; /* size_t */
mqs_tword_t fl_num_initial_alloc; /* size_t */ mqs_tword_t fl_num_initial_alloc; /* size_t */
} mqs_ompi_free_list_t_pos; } mqs_opal_free_list_t_pos;
/* Information for a single process, a list of communicators, some /* Information for a single process, a list of communicators, some
@ -112,7 +115,7 @@ typedef struct
int world_proc_array_entries; int world_proc_array_entries;
mqs_taddr_t* world_proc_array; mqs_taddr_t* world_proc_array;
mqs_ompi_free_list_t_pos next_msg; /* And state for the message iterator */ mqs_opal_free_list_t_pos next_msg; /* And state for the message iterator */
mqs_op_class what; /* What queue are we looking on */ mqs_op_class what; /* What queue are we looking on */
} mpi_process_info_extra; } mpi_process_info_extra;

7
ompi/mca/bcol/base/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: ORNL?
status: unmaintained

Просмотреть файл

@ -4,6 +4,7 @@
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. * Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2013-2014 Los Alamos National Security, LLC. All rights * Copyright (c) 2013-2014 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2015 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -196,7 +197,7 @@ static int allreduce_fanout (mca_bcol_basesmuma_module_t *bcol_module, volatile
static int bcol_basesmuma_allreduce_intra_fanin_fanout_progress (bcol_function_args_t *input_args, mca_bcol_base_function_t *c_input_args) static int bcol_basesmuma_allreduce_intra_fanin_fanout_progress (bcol_function_args_t *input_args, mca_bcol_base_function_t *c_input_args)
{ {
mca_bcol_basesmuma_module_t *bcol_module = (mca_bcol_basesmuma_module_t *) c_input_args->bcol_module; mca_bcol_basesmuma_module_t *bcol_module = (mca_bcol_basesmuma_module_t *) c_input_args->bcol_module;
int buff_idx = buff_idx = input_args->src_desc->buffer_index; int buff_idx = input_args->src_desc->buffer_index;
int *iteration = &bcol_module->ml_mem.nb_coll_desc[buff_idx].iteration; int *iteration = &bcol_module->ml_mem.nb_coll_desc[buff_idx].iteration;
void *data_addr = (void *) input_args->src_desc->data_addr; void *data_addr = (void *) input_args->src_desc->data_addr;
int my_node_index, my_rank, group_size, leading_dim, idx; int my_node_index, my_rank, group_size, leading_dim, idx;
@ -268,7 +269,7 @@ int bcol_basesmuma_allreduce_intra_fanin_fanout(bcol_function_args_t *input_args
{ {
/* local variables */ /* local variables */
mca_bcol_basesmuma_module_t *bcol_module = (mca_bcol_basesmuma_module_t *) c_input_args->bcol_module; mca_bcol_basesmuma_module_t *bcol_module = (mca_bcol_basesmuma_module_t *) c_input_args->bcol_module;
int buff_idx = buff_idx = input_args->src_desc->buffer_index; int buff_idx = input_args->src_desc->buffer_index;
int *iteration = &bcol_module->ml_mem.nb_coll_desc[buff_idx].iteration; int *iteration = &bcol_module->ml_mem.nb_coll_desc[buff_idx].iteration;
void *data_addr = (void *) input_args->src_desc->data_addr; void *data_addr = (void *) input_args->src_desc->data_addr;
volatile mca_bcol_basesmuma_header_t *my_ctl_pointer; volatile mca_bcol_basesmuma_header_t *my_ctl_pointer;

Просмотреть файл

@ -2,7 +2,7 @@
/* /*
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. * Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. * Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2013-2015 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
@ -767,7 +767,7 @@ typedef struct mca_bcol_base_function_t mca_bcol_base_function_t;
struct mca_bcol_base_descriptor_t { struct mca_bcol_base_descriptor_t {
ompi_free_list_item_t super; opal_free_list_item_t super;
/* Vasily: will be described in the future */ /* Vasily: will be described in the future */
}; };
typedef struct mca_bcol_base_descriptor_t mca_bcol_base_descriptor_t; typedef struct mca_bcol_base_descriptor_t mca_bcol_base_descriptor_t;

7
ompi/mca/bcol/iboffload/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: ORNL?
status: unmaintained

Просмотреть файл

@ -1,6 +1,9 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. * Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. * Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -74,7 +77,7 @@ struct mca_bcol_ptpcoll_component_t {
}; };
struct mca_bcol_ptpcoll_collreq_t { struct mca_bcol_ptpcoll_collreq_t {
ompi_free_list_item_t super; opal_free_list_item_t super;
int tag; int tag;
int num_reqs; int num_reqs;
@ -342,7 +345,7 @@ struct mca_bcol_ptpcoll_module_t {
int **allgather_offsets; int **allgather_offsets;
/* Free lists of outstanding collective operations */ /* Free lists of outstanding collective operations */
ompi_free_list_t collreqs_free; opal_free_list_t collreqs_free;
int log_group_size; int log_group_size;
struct iovec *alltoall_iovec; struct iovec *alltoall_iovec;

Просмотреть файл

@ -1,9 +1,12 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. * Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. * Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2013 The University of Tennessee and The University * Copyright (c) 2013 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -51,11 +54,11 @@ static int bcol_ptpcoll_barrier_recurs_knomial_new(
**rank_exchanges = my_exchange_node->rank_exchanges; **rank_exchanges = my_exchange_node->rank_exchanges;
ompi_request_t **requests; ompi_request_t **requests;
ompi_free_list_item_t *item; opal_free_list_item_t *item;
mca_bcol_ptpcoll_collreq_t *collreq; mca_bcol_ptpcoll_collreq_t *collreq;
OMPI_FREE_LIST_WAIT_MT(&ptpcoll_module->collreqs_free, item); item = opal_free_list_wait (&ptpcoll_module->collreqs_free);
if (OPAL_UNLIKELY(NULL == item)) { if (OPAL_UNLIKELY(NULL == item)) {
PTPCOLL_ERROR(("Free list waiting failed.")); PTPCOLL_ERROR(("Free list waiting failed."));
return OMPI_ERR_OUT_OF_RESOURCE; return OMPI_ERR_OUT_OF_RESOURCE;
@ -213,7 +216,7 @@ static int bcol_ptpcoll_barrier_recurs_knomial_new(
} }
} }
OMPI_FREE_LIST_RETURN_MT(&ptpcoll_module->collreqs_free, (ompi_free_list_item_t *) collreq); opal_free_list_return (&ptpcoll_module->collreqs_free, (opal_free_list_item_t *) collreq);
return BCOL_FN_COMPLETE; return BCOL_FN_COMPLETE;
} }
@ -382,11 +385,11 @@ static int bcol_ptpcoll_barrier_recurs_knomial_extra_new(
int *extra_sources_array = my_exchange_node->rank_extra_sources_array; int *extra_sources_array = my_exchange_node->rank_extra_sources_array;
ompi_request_t **requests; ompi_request_t **requests;
ompi_free_list_item_t *item; opal_free_list_item_t *item;
mca_bcol_ptpcoll_collreq_t *collreq; mca_bcol_ptpcoll_collreq_t *collreq;
OMPI_FREE_LIST_WAIT_MT(&ptpcoll_module->collreqs_free, item); item = opal_free_list_wait (&ptpcoll_module->collreqs_free);
if (OPAL_UNLIKELY(NULL == item)) { if (OPAL_UNLIKELY(NULL == item)) {
PTPCOLL_ERROR(("Free list waiting failed.")); PTPCOLL_ERROR(("Free list waiting failed."));
return OMPI_ERR_OUT_OF_RESOURCE; return OMPI_ERR_OUT_OF_RESOURCE;
@ -440,7 +443,7 @@ static int bcol_ptpcoll_barrier_recurs_knomial_extra_new(
return BCOL_FN_STARTED; return BCOL_FN_STARTED;
} }
OMPI_FREE_LIST_RETURN_MT(&ptpcoll_module->collreqs_free, (ompi_free_list_item_t *) collreq); opal_free_list_return (&ptpcoll_module->collreqs_free, (opal_free_list_item_t *) collreq);
return BCOL_FN_COMPLETE; return BCOL_FN_COMPLETE;
} }
@ -464,11 +467,11 @@ static int bcol_ptpcoll_barrier_recurs_dbl_new(
n_exchange = ptp_module->super.sbgp_partner_module->n_levels_pow2; n_exchange = ptp_module->super.sbgp_partner_module->n_levels_pow2;
ompi_request_t **requests; ompi_request_t **requests;
ompi_free_list_item_t *item; opal_free_list_item_t *item;
mca_bcol_ptpcoll_collreq_t *collreq; mca_bcol_ptpcoll_collreq_t *collreq;
OMPI_FREE_LIST_WAIT_MT(&ptp_module->collreqs_free, item); item = opal_free_list_wait (&ptp_module->collreqs_free);
if (OPAL_UNLIKELY(NULL == item)) { if (OPAL_UNLIKELY(NULL == item)) {
PTPCOLL_ERROR(("Free list waiting failed.")); PTPCOLL_ERROR(("Free list waiting failed."));
return OMPI_ERR_OUT_OF_RESOURCE; return OMPI_ERR_OUT_OF_RESOURCE;
@ -618,7 +621,7 @@ static int bcol_ptpcoll_barrier_recurs_dbl_new(
} }
} }
OMPI_FREE_LIST_RETURN_MT(&ptp_module->collreqs_free, (ompi_free_list_item_t *) collreq); opal_free_list_return (&ptp_module->collreqs_free, (opal_free_list_item_t *) collreq);
return BCOL_FN_COMPLETE; return BCOL_FN_COMPLETE;
} }
@ -765,7 +768,7 @@ static int bcol_ptpcoll_barrier_recurs_dbl_extra_new(
tag, my_extra_partner_comm_rank; tag, my_extra_partner_comm_rank;
ompi_request_t **requests; ompi_request_t **requests;
ompi_free_list_item_t *item; opal_free_list_item_t *item;
mca_bcol_ptpcoll_collreq_t *collreq; mca_bcol_ptpcoll_collreq_t *collreq;
@ -773,7 +776,7 @@ static int bcol_ptpcoll_barrier_recurs_dbl_extra_new(
(mca_bcol_ptpcoll_module_t *) const_args->bcol_module; (mca_bcol_ptpcoll_module_t *) const_args->bcol_module;
ompi_communicator_t *comm = ptp_module->super.sbgp_partner_module->group_comm; ompi_communicator_t *comm = ptp_module->super.sbgp_partner_module->group_comm;
OMPI_FREE_LIST_WAIT_MT(&ptp_module->collreqs_free, item); item = opal_free_list_wait (&ptp_module->collreqs_free);
if (OPAL_UNLIKELY(NULL == item)) { if (OPAL_UNLIKELY(NULL == item)) {
PTPCOLL_ERROR(("Free list waiting failed.")); PTPCOLL_ERROR(("Free list waiting failed."));
return OMPI_ERR_OUT_OF_RESOURCE; return OMPI_ERR_OUT_OF_RESOURCE;
@ -829,7 +832,7 @@ static int bcol_ptpcoll_barrier_recurs_dbl_extra_new(
return BCOL_FN_STARTED; return BCOL_FN_STARTED;
} }
OMPI_FREE_LIST_RETURN_MT(&ptp_module->collreqs_free, (ompi_free_list_item_t *) collreq); opal_free_list_return (&ptp_module->collreqs_free, (opal_free_list_item_t *) collreq);
return BCOL_FN_COMPLETE; return BCOL_FN_COMPLETE;
} }

Просмотреть файл

@ -1,6 +1,9 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. * Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. * Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -101,7 +104,7 @@ collreq_destruct(mca_bcol_ptpcoll_collreq_t *collreq)
} }
OBJ_CLASS_INSTANCE(mca_bcol_ptpcoll_collreq_t, OBJ_CLASS_INSTANCE(mca_bcol_ptpcoll_collreq_t,
ompi_free_list_item_t, opal_free_list_item_t,
collreq_construct, collreq_construct,
collreq_destruct); collreq_destruct);

Просмотреть файл

@ -2,7 +2,7 @@
/* /*
* Copyright (c) 2009-2013 Oak Ridge National Laboratory. All rights reserved. * Copyright (c) 2009-2013 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. * Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2012-2014 Los Alamos National Security, LLC. All rights * Copyright (c) 2012-2015 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014 Research Organization for Information Science * Copyright (c) 2014 Research Organization for Information Science
* and Technology (RIST). All rights reserved. * and Technology (RIST). All rights reserved.
@ -612,7 +612,7 @@ static int load_recursive_knomial_info(mca_bcol_ptpcoll_module_t *ptpcoll_module
return rc; return rc;
} }
static void bcol_ptpcoll_collreq_init(ompi_free_list_item_t *item, void* ctx) static int bcol_ptpcoll_collreq_init(opal_free_list_item_t *item, void* ctx)
{ {
mca_bcol_ptpcoll_module_t *ptpcoll_module= (mca_bcol_ptpcoll_module_t *) ctx; mca_bcol_ptpcoll_module_t *ptpcoll_module= (mca_bcol_ptpcoll_module_t *) ctx;
mca_bcol_ptpcoll_collreq_t *collreq = (mca_bcol_ptpcoll_collreq_t *) item; mca_bcol_ptpcoll_collreq_t *collreq = (mca_bcol_ptpcoll_collreq_t *) item;
@ -627,6 +627,12 @@ static void bcol_ptpcoll_collreq_init(ompi_free_list_item_t *item, void* ctx)
calloc(2 * ptpcoll_module->k_nomial_radix, sizeof(ompi_request_t *)); calloc(2 * ptpcoll_module->k_nomial_radix, sizeof(ompi_request_t *));
break; break;
} }
if (NULL == collreq->requests) {
return OPAL_ERR_OUT_OF_RESOURCE;
}
return OPAL_SUCCESS;
} }
/* query to see if the module is available for use on the given /* query to see if the module is available for use on the given
@ -705,18 +711,18 @@ mca_bcol_base_module_t **mca_bcol_ptpcoll_comm_query(mca_sbgp_base_module_t *sbg
} }
/* creating collfrag free list */ /* creating collfrag free list */
OBJ_CONSTRUCT(&ptpcoll_module->collreqs_free, ompi_free_list_t); OBJ_CONSTRUCT(&ptpcoll_module->collreqs_free, opal_free_list_t);
rc = ompi_free_list_init_ex_new(&ptpcoll_module->collreqs_free, rc = opal_free_list_init (&ptpcoll_module->collreqs_free,
sizeof(mca_bcol_ptpcoll_collreq_t), sizeof(mca_bcol_ptpcoll_collreq_t),
BCOL_PTP_CACHE_LINE_SIZE, BCOL_PTP_CACHE_LINE_SIZE,
OBJ_CLASS(mca_bcol_ptpcoll_collreq_t), OBJ_CLASS(mca_bcol_ptpcoll_collreq_t),
0, BCOL_PTP_CACHE_LINE_SIZE, 0, BCOL_PTP_CACHE_LINE_SIZE,
256 /* free_list_num */, 256 /* free_list_num */,
-1 /* free_list_max, -1 = infinite */, -1 /* free_list_max, -1 = infinite */,
32 /* free_list_inc */, 32 /* free_list_inc */,
NULL, NULL, 0, NULL,
bcol_ptpcoll_collreq_init, bcol_ptpcoll_collreq_init,
ptpcoll_module); ptpcoll_module);
if (OMPI_SUCCESS != rc) { if (OMPI_SUCCESS != rc) {
goto CLEANUP; goto CLEANUP;
} }

7
ompi/mca/bcol/ptpcoll/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: ORNL?
status: unmaintained

7
ompi/mca/bml/base/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: project
status: maintenance

7
ompi/mca/bml/r2/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: SNL
status: maintenance

7
ompi/mca/coll/base/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: project
status: maintenance

Просмотреть файл

@ -10,6 +10,7 @@
* Copyright (c) 2004-2005 The Regents of the University of California. * Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved. * All rights reserved.
* Copyright (c) 2012 Oak Ridge National Labs. All rights reserved. * Copyright (c) 2012 Oak Ridge National Labs. All rights reserved.
* Copyright (c) 2015 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -120,6 +121,9 @@ mca_coll_basic_barrier_intra_log(struct ompi_communicator_t *comm,
dim = comm->c_cube_dim; dim = comm->c_cube_dim;
hibit = opal_hibit(rank, dim); hibit = opal_hibit(rank, dim);
if (hibit < 0) {
return MPI_ERR_OTHER;
}
--dim; --dim;
/* Receive from children. */ /* Receive from children. */

Просмотреть файл

@ -9,6 +9,7 @@
* University of Stuttgart. All rights reserved. * University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California. * Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved. * All rights reserved.
* Copyright (c) 2015 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -134,6 +135,9 @@ mca_coll_basic_bcast_log_intra(void *buff, int count,
dim = comm->c_cube_dim; dim = comm->c_cube_dim;
hibit = opal_hibit(vrank, dim); hibit = opal_hibit(vrank, dim);
if (hibit < 0) {
return MPI_ERR_OTHER;
}
--dim; --dim;
/* Receive data from parent in the tree. */ /* Receive data from parent in the tree. */

7
ompi/mca/coll/basic/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: UH
status: maintenance

7
ompi/mca/coll/cuda/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: NVIDIA
status: maintenance

7
ompi/mca/coll/demo/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: project
status: maintenance

7
ompi/mca/coll/fca/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: MELLANOX
status: active

Просмотреть файл

@ -81,7 +81,7 @@ struct mca_coll_hcoll_component_t {
/* FCA global stuff */ /* FCA global stuff */
mca_coll_hcoll_ops_t hcoll_ops; mca_coll_hcoll_ops_t hcoll_ops;
ompi_free_list_t requests; opal_free_list_t requests;
}; };
typedef struct mca_coll_hcoll_component_t mca_coll_hcoll_component_t; typedef struct mca_coll_hcoll_component_t mca_coll_hcoll_component_t;

Просмотреть файл

@ -1,9 +1,12 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. * Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. * Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2013 The University of Tennessee and The University * Copyright (c) 2013 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -119,8 +122,8 @@ static void init_module_fns(void){
void hcoll_rte_fns_setup(void) void hcoll_rte_fns_setup(void)
{ {
init_module_fns(); init_module_fns();
OBJ_CONSTRUCT(&mca_coll_hcoll_component.requests, ompi_free_list_t); OBJ_CONSTRUCT(&mca_coll_hcoll_component.requests, opal_free_list_t);
ompi_free_list_init_ex_new( opal_free_list_init(
&(mca_coll_hcoll_component.requests), &(mca_coll_hcoll_component.requests),
sizeof(ompi_request_t), sizeof(ompi_request_t),
/* no special alignment needed */ /* no special alignment needed */
@ -132,7 +135,9 @@ void hcoll_rte_fns_setup(void)
10, 10,
-1, -1,
10, 10,
/* No Mpool */ /* No Mpool or init function */
NULL,
0,
NULL, NULL,
NULL, NULL,
NULL NULL
@ -175,7 +180,7 @@ static int recv_nb(struct dte_data_representation_t data,
int rc; int rc;
size_t size; size_t size;
ompi_request_t *ompi_req; ompi_request_t *ompi_req;
ompi_free_list_item_t *item; opal_free_list_item_t *item;
if (!buffer && !HCOL_DTE_IS_ZERO(data)) { if (!buffer && !HCOL_DTE_IS_ZERO(data)) {
fprintf(stderr, "***Error in hcolrte_rml_recv_nb: buffer pointer is NULL" fprintf(stderr, "***Error in hcolrte_rml_recv_nb: buffer pointer is NULL"
@ -246,7 +251,6 @@ static int send_nb( dte_data_representation_t data,
int rc; int rc;
size_t size; size_t size;
ompi_request_t *ompi_req; ompi_request_t *ompi_req;
ompi_free_list_item_t *item;
if (!buffer && !HCOL_DTE_IS_ZERO(data)) { if (!buffer && !HCOL_DTE_IS_ZERO(data)) {
fprintf(stderr, "***Error in hcolrte_rml_send_nb: buffer pointer is NULL" fprintf(stderr, "***Error in hcolrte_rml_send_nb: buffer pointer is NULL"
" for non DTE_ZERO INLINE data representation\n"); " for non DTE_ZERO INLINE data representation\n");
@ -390,8 +394,8 @@ request_free(struct ompi_request_t **ompi_req)
static void* get_coll_handle(void) static void* get_coll_handle(void)
{ {
ompi_request_t *ompi_req; ompi_request_t *ompi_req;
ompi_free_list_item_t *item; opal_free_list_item_t *item;
OMPI_FREE_LIST_WAIT_MT(&(mca_coll_hcoll_component.requests),item); item = opal_free_list_wait (&(mca_coll_hcoll_component.requests));
if (OPAL_UNLIKELY(NULL == item)) { if (OPAL_UNLIKELY(NULL == item)) {
HCOL_ERROR("Wait for free list failed.\n"); HCOL_ERROR("Wait for free list failed.\n");
return NULL; return NULL;
@ -412,8 +416,8 @@ static int coll_handle_test(void* handle)
static void coll_handle_free(void *handle){ static void coll_handle_free(void *handle){
ompi_request_t *ompi_req = (ompi_request_t *)handle; ompi_request_t *ompi_req = (ompi_request_t *)handle;
OMPI_FREE_LIST_RETURN_MT(&mca_coll_hcoll_component.requests, opal_free_list_return (&mca_coll_hcoll_component.requests,
(ompi_free_list_item_t *)ompi_req); (opal_free_list_item_t *)ompi_req);
} }
static void coll_handle_complete(void *handle) static void coll_handle_complete(void *handle)

7
ompi/mca/coll/hcoll/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: MELLANOX
status: active

7
ompi/mca/coll/hierarch/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: UH
status: unmaintained

7
ompi/mca/coll/inter/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: UH
status: maintenance

Просмотреть файл

@ -11,7 +11,7 @@
* Copyright (c) 2004-2005 The Regents of the University of California. * Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved. * All rights reserved.
* Copyright (c) 2008 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2008 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2013-2015 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014 Research Organization for Information Science * Copyright (c) 2014 Research Organization for Information Science
* and Technology (RIST). All rights reserved. * and Technology (RIST). All rights reserved.
@ -69,7 +69,7 @@ BEGIN_C_DECLS
struct ompi_coll_libnbc_component_t { struct ompi_coll_libnbc_component_t {
mca_coll_base_component_2_0_0_t super; mca_coll_base_component_2_0_0_t super;
ompi_free_list_t requests; opal_free_list_t requests;
opal_list_t active_requests; opal_list_t active_requests;
int32_t active_comms; int32_t active_comms;
opal_atomic_lock_t progress_lock; opal_atomic_lock_t progress_lock;
@ -123,8 +123,8 @@ typedef ompi_coll_libnbc_request_t NBC_Handle;
#define OMPI_COLL_LIBNBC_REQUEST_ALLOC(comm, req) \ #define OMPI_COLL_LIBNBC_REQUEST_ALLOC(comm, req) \
do { \ do { \
ompi_free_list_item_t *item; \ opal_free_list_item_t *item; \
OMPI_FREE_LIST_WAIT_MT(&mca_coll_libnbc_component.requests, item); \ item = opal_free_list_wait (&mca_coll_libnbc_component.requests); \
req = (ompi_coll_libnbc_request_t*) item; \ req = (ompi_coll_libnbc_request_t*) item; \
OMPI_REQUEST_INIT(&req->super, false); \ OMPI_REQUEST_INIT(&req->super, false); \
req->super.req_mpi_object.comm = comm; \ req->super.req_mpi_object.comm = comm; \
@ -135,8 +135,8 @@ typedef ompi_coll_libnbc_request_t NBC_Handle;
#define OMPI_COLL_LIBNBC_REQUEST_RETURN(req) \ #define OMPI_COLL_LIBNBC_REQUEST_RETURN(req) \
do { \ do { \
OMPI_REQUEST_FINI(&request->super); \ OMPI_REQUEST_FINI(&request->super); \
OMPI_FREE_LIST_RETURN_MT(&mca_coll_libnbc_component.requests, \ opal_free_list_return (&mca_coll_libnbc_component.requests, \
(ompi_free_list_item_t*) req); \ (opal_free_list_item_t*) req); \
} while (0) } while (0)
int ompi_coll_libnbc_progress(void); int ompi_coll_libnbc_progress(void);

Просмотреть файл

@ -11,7 +11,7 @@
* Copyright (c) 2004-2005 The Regents of the University of California. * Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved. * All rights reserved.
* Copyright (c) 2008 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2008 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2013-2015 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
@ -87,17 +87,14 @@ libnbc_open(void)
{ {
int ret; int ret;
OBJ_CONSTRUCT(&mca_coll_libnbc_component.requests, ompi_free_list_t); OBJ_CONSTRUCT(&mca_coll_libnbc_component.requests, opal_free_list_t);
ret = ompi_free_list_init(&mca_coll_libnbc_component.requests, OBJ_CONSTRUCT(&mca_coll_libnbc_component.active_requests, opal_list_t);
sizeof(ompi_coll_libnbc_request_t), ret = opal_free_list_init (&mca_coll_libnbc_component.requests,
OBJ_CLASS(ompi_coll_libnbc_request_t), sizeof(ompi_coll_libnbc_request_t), 8,
0, OBJ_CLASS(ompi_coll_libnbc_request_t),
-1, 0, 0, 0, -1, 8, NULL, 0, NULL, NULL, NULL);
8,
NULL);
if (OMPI_SUCCESS != ret) return ret; if (OMPI_SUCCESS != ret) return ret;
OBJ_CONSTRUCT(&mca_coll_libnbc_component.active_requests, opal_list_t);
/* note: active comms is the number of communicators who have had /* note: active comms is the number of communicators who have had
a non-blocking collective started */ a non-blocking collective started */
mca_coll_libnbc_component.active_comms = 0; mca_coll_libnbc_component.active_comms = 0;

Просмотреть файл

@ -52,7 +52,7 @@ int ompi_coll_libnbc_ireduce_scatter_block(void* sendbuf, void* recvbuf, int rec
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; } if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; }
res = MPI_Comm_size(comm, &p); res = MPI_Comm_size(comm, &p);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_size() (%i)\n", res); return res; } if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_size() (%i)\n", res); return res; }
MPI_Type_extent(datatype, &ext); res = MPI_Type_extent(datatype, &ext);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; } if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; }
schedule = (NBC_Schedule*)malloc(sizeof(NBC_Schedule)); schedule = (NBC_Schedule*)malloc(sizeof(NBC_Schedule));
@ -168,7 +168,7 @@ int ompi_coll_libnbc_ireduce_scatter_block_inter(void *sbuf, void *rbuf, int rco
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; } if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; }
res = MPI_Comm_remote_size(comm, &rsize); res = MPI_Comm_remote_size(comm, &rsize);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_remote_size() (%i)\n", res); return res; } if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_remote_size() (%i)\n", res); return res; }
MPI_Type_extent(dtype, &ext); res = MPI_Type_extent(dtype, &ext);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; } if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; }
schedule = (NBC_Schedule*)malloc(sizeof(NBC_Schedule)); schedule = (NBC_Schedule*)malloc(sizeof(NBC_Schedule));

Просмотреть файл

@ -70,7 +70,7 @@ int ompi_coll_libnbc_ireduce_scatter(void* sendbuf, void* recvbuf, int *recvcoun
res = MPI_Comm_rank(comm, &rank); res = MPI_Comm_rank(comm, &rank);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; } if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; }
MPI_Type_extent(datatype, &ext); res = MPI_Type_extent(datatype, &ext);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; } if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; }
schedule = (NBC_Schedule*)malloc(sizeof(NBC_Schedule)); schedule = (NBC_Schedule*)malloc(sizeof(NBC_Schedule));
@ -180,7 +180,7 @@ int ompi_coll_libnbc_ireduce_scatter_inter(void* sendbuf, void* recvbuf, int *re
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; } if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_rank() (%i)\n", res); return res; }
res = MPI_Comm_remote_size(comm, &rsize); res = MPI_Comm_remote_size(comm, &rsize);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_remote_size() (%i)\n", res); return res; } if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Comm_remote_size() (%i)\n", res); return res; }
MPI_Type_extent(datatype, &ext); res = MPI_Type_extent(datatype, &ext);
if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; } if (MPI_SUCCESS != res) { printf("MPI Error in MPI_Type_extent() (%i)\n", res); return res; }
schedule = (NBC_Schedule*)malloc(sizeof(NBC_Schedule)); schedule = (NBC_Schedule*)malloc(sizeof(NBC_Schedule));

7
ompi/mca/coll/libnbc/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: project
status: active

Просмотреть файл

@ -2,7 +2,7 @@
/* /*
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. * Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. * Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights * Copyright (c) 2013-2015 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014 Research Organization for Information Science * Copyright (c) 2014 Research Organization for Information Science
* and Technology (RIST). All rights reserved. * and Technology (RIST). All rights reserved.
@ -29,7 +29,7 @@
#include "ompi/mca/bcol/bcol.h" #include "ompi/mca/bcol/bcol.h"
#include "ompi/mca/sbgp/sbgp.h" #include "ompi/mca/sbgp/sbgp.h"
#include "ompi/op/op.h" #include "ompi/op/op.h"
#include "opal/class/ompi_free_list.h" #include "opal/class/opal_free_list.h"
#include "coll_ml_lmngr.h" #include "coll_ml_lmngr.h"
#include "coll_ml_functions.h" #include "coll_ml_functions.h"
@ -652,10 +652,10 @@ struct mca_coll_ml_module_t {
int32_t collective_sequence_num; int32_t collective_sequence_num;
/** ompi free list of full message descriptors **/ /** ompi free list of full message descriptors **/
ompi_free_list_t message_descriptors; opal_free_list_t message_descriptors;
/** ompi free list of message fragment descriptors **/ /** ompi free list of message fragment descriptors **/
ompi_free_list_t fragment_descriptors; opal_free_list_t fragment_descriptors;
/** pointer to the payload memory block **/ /** pointer to the payload memory block **/
struct mca_bcol_base_memory_block_desc_t *payload_block; struct mca_bcol_base_memory_block_desc_t *payload_block;
@ -672,7 +672,7 @@ struct mca_coll_ml_module_t {
/** collective operation descriptor free list - used to manage a single /** collective operation descriptor free list - used to manage a single
* collective operation. */ * collective operation. */
ompi_free_list_t coll_ml_collective_descriptors; opal_free_list_t coll_ml_collective_descriptors;
/** multiple function collective operation support */ /** multiple function collective operation support */
/** broadcast */ /** broadcast */

Просмотреть файл

@ -1,9 +1,12 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. * Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. * Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2013 The University of Tennessee and The University * Copyright (c) 2013 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -35,7 +38,7 @@ static void mca_coll_ml_barrier_task_setup(
static int mca_coll_ml_barrier_launch(mca_coll_ml_module_t *ml_module, static int mca_coll_ml_barrier_launch(mca_coll_ml_module_t *ml_module,
ompi_request_t **req) ompi_request_t **req)
{ {
ompi_free_list_item_t *item; opal_free_list_item_t *item;
mca_coll_ml_collective_operation_progress_t *coll_op; mca_coll_ml_collective_operation_progress_t *coll_op;
mca_bcol_base_payload_buffer_desc_t *src_buffer_desc = NULL; mca_bcol_base_payload_buffer_desc_t *src_buffer_desc = NULL;
@ -49,8 +52,7 @@ static int mca_coll_ml_barrier_launch(mca_coll_ml_module_t *ml_module,
/* Blocking call on fragment allocation (Maybe we want to make it non blocking ?) */ /* Blocking call on fragment allocation (Maybe we want to make it non blocking ?) */
OMPI_FREE_LIST_WAIT_MT(&(ml_module->coll_ml_collective_descriptors), item = opal_free_list_wait (&(ml_module->coll_ml_collective_descriptors));
item);
coll_op = (mca_coll_ml_collective_operation_progress_t *) item; coll_op = (mca_coll_ml_collective_operation_progress_t *) item;
assert(NULL != coll_op); assert(NULL != coll_op);

Просмотреть файл

@ -2,7 +2,7 @@
/* /*
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. * Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. * Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2014 Los Alamos National Security, LLC. All rights * Copyright (c) 2014-2015 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2014 Research Organization for Information Science * Copyright (c) 2014 Research Organization for Information Science
* and Technology (RIST). All rights reserved. * and Technology (RIST). All rights reserved.
@ -417,10 +417,10 @@ do {
struct ompi_communicator_t *comm = GET_COMM(op); \ struct ompi_communicator_t *comm = GET_COMM(op); \
bool is_coll_sync = IS_COLL_SYNCMEM(op); \ bool is_coll_sync = IS_COLL_SYNCMEM(op); \
ML_VERBOSE(10, ("Releasing %p", op)); \ ML_VERBOSE(10, ("Releasing %p", op)); \
OMPI_REQUEST_FINI(&(op)->full_message.super); \ OMPI_REQUEST_FINI(&(op)->full_message.super); \
OMPI_FREE_LIST_RETURN_MT(&(((mca_coll_ml_module_t *)(op)->coll_module)-> \ opal_free_list_return (&(((mca_coll_ml_module_t *)(op)->coll_module)-> \
coll_ml_collective_descriptors), \ coll_ml_collective_descriptors), \
(ompi_free_list_item_t *)op); \ (opal_free_list_item_t *)op); \
/* Special check for memory synchronization completion */ \ /* Special check for memory synchronization completion */ \
/* We have to return it first to free list, since the communicator */ \ /* We have to return it first to free list, since the communicator */ \
/* release potentially may trigger ML module distraction and having */ \ /* release potentially may trigger ML module distraction and having */ \

Просмотреть файл

@ -2,7 +2,7 @@
/* /*
* Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved. * Copyright (c) 2009-2012 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. * Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2014 Los Alamos National Security, LLC. All rights * Copyright (c) 2014-2015 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
@ -17,10 +17,10 @@
#include "ompi/mca/coll/ml/coll_ml_allocation.h" #include "ompi/mca/coll/ml/coll_ml_allocation.h"
/* collective managment descriptor initialization - called right after /* collective managment descriptor initialization - called right after
* the constructor by ompi_free_list code * the constructor by opal_free_list code
*/ */
static void mca_coll_ml_collective_operation_progress_init static void mca_coll_ml_collective_operation_progress_init
(ompi_free_list_item_t* item, void* ctx) (opal_free_list_item_t* item, void* ctx)
{ {
int i; int i;
int max_dag_size = ((struct coll_desc_init *)ctx)->max_dag_size; int max_dag_size = ((struct coll_desc_init *)ctx)->max_dag_size;
@ -161,7 +161,7 @@ int ml_coll_schedule_setup(mca_coll_ml_module_t *ml_module)
ml_module->coll_desc_init_data.bcol_base_module=(mca_coll_base_module_t *) ml_module->coll_desc_init_data.bcol_base_module=(mca_coll_base_module_t *)
ml_module; ml_module;
ret = ompi_free_list_init_ex_new( ret = opal_free_list_init (
&(ml_module->coll_ml_collective_descriptors), &(ml_module->coll_ml_collective_descriptors),
sizeof(mca_coll_ml_collective_operation_progress_t), sizeof(mca_coll_ml_collective_operation_progress_t),
/* no special alignment needed */ /* no special alignment needed */
@ -174,7 +174,7 @@ int ml_coll_schedule_setup(mca_coll_ml_module_t *ml_module)
cm->free_list_max_size, cm->free_list_max_size,
cm->free_list_grow_size, cm->free_list_grow_size,
/* No Mpool */ /* No Mpool */
NULL, NULL, 0, NULL,
mca_coll_ml_collective_operation_progress_init, mca_coll_ml_collective_operation_progress_init,
(void *)&(ml_module->coll_desc_init_data) (void *)&(ml_module->coll_desc_init_data)
); );

Просмотреть файл

@ -5,6 +5,8 @@
* Copyright (c) 2013 The University of Tennessee and The University * Copyright (c) 2013 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -468,13 +470,12 @@ mca_coll_ml_alloc_op_prog_single_frag_dag(
size_t offset_into_user_buffer size_t offset_into_user_buffer
) )
{ {
ompi_free_list_item_t *item; opal_free_list_item_t *item;
mca_coll_ml_collective_operation_progress_t *coll_op = NULL; mca_coll_ml_collective_operation_progress_t *coll_op = NULL;
ompi_request_t *req; ompi_request_t *req;
/* Blocking call on fragment allocation (Maybe we want to make it non blocking ?) */ /* Blocking call on fragment allocation (Maybe we want to make it non blocking ?) */
OMPI_FREE_LIST_WAIT_MT(&(ml_module->coll_ml_collective_descriptors), item = opal_free_list_wait (&(ml_module->coll_ml_collective_descriptors));
item);
coll_op = (mca_coll_ml_collective_operation_progress_t *) item; coll_op = (mca_coll_ml_collective_operation_progress_t *) item;
ML_VERBOSE(10, (">>> Allocating coll op %p", coll_op)); ML_VERBOSE(10, (">>> Allocating coll op %p", coll_op));
@ -529,12 +530,11 @@ static inline __opal_attribute_always_inline__ mca_coll_ml_collective_operation_
size_t offset_into_user_buffer size_t offset_into_user_buffer
) )
{ {
ompi_free_list_item_t *item; opal_free_list_item_t *item;
mca_coll_ml_collective_operation_progress_t *coll_op = NULL; mca_coll_ml_collective_operation_progress_t *coll_op = NULL;
/* Blocking call on fragment allocation (Maybe we want to make it non blocking ?) */ /* Blocking call on fragment allocation (Maybe we want to make it non blocking ?) */
OMPI_FREE_LIST_WAIT_MT(&(ml_module->coll_ml_collective_descriptors), item = opal_free_list_wait (&(ml_module->coll_ml_collective_descriptors));
item);
coll_op = (mca_coll_ml_collective_operation_progress_t *) item; coll_op = (mca_coll_ml_collective_operation_progress_t *) item;

Просмотреть файл

@ -2,7 +2,7 @@
/* /*
* Copyright (c) 2009-2013 Oak Ridge National Laboratory. All rights reserved. * Copyright (c) 2009-2013 Oak Ridge National Laboratory. All rights reserved.
* Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved. * Copyright (c) 2009-2012 Mellanox Technologies. All rights reserved.
* Copyright (c) 2012-2014 Los Alamos National Security, LLC. All rights * Copyright (c) 2012-2015 Los Alamos National Security, LLC. All rights
* reserved. * reserved.
* Copyright (c) 2013-2014 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2013-2014 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2014 Research Organization for Information Science * Copyright (c) 2014 Research Organization for Information Science
@ -120,9 +120,9 @@ mca_coll_ml_module_construct(mca_coll_ml_module_t *module)
OBJ_CONSTRUCT(&module->active_bcols_list, opal_list_t); OBJ_CONSTRUCT(&module->active_bcols_list, opal_list_t);
OBJ_CONSTRUCT(&module->waiting_for_memory_list, opal_list_t); OBJ_CONSTRUCT(&module->waiting_for_memory_list, opal_list_t);
OBJ_CONSTRUCT(&module->fragment_descriptors, ompi_free_list_t); OBJ_CONSTRUCT(&module->fragment_descriptors, opal_free_list_t);
OBJ_CONSTRUCT(&module->message_descriptors, ompi_free_list_t); OBJ_CONSTRUCT(&module->message_descriptors, opal_free_list_t);
OBJ_CONSTRUCT(&module->coll_ml_collective_descriptors, ompi_free_list_t); OBJ_CONSTRUCT(&module->coll_ml_collective_descriptors, opal_free_list_t);
memset (&module->fallback, 0, sizeof (module->fallback)); memset (&module->fallback, 0, sizeof (module->fallback));
} }
@ -260,8 +260,8 @@ static int mca_coll_ml_request_free(ompi_request_t** request)
ML_VERBOSE(10, ("Releasing Master %p", ml_request)); ML_VERBOSE(10, ("Releasing Master %p", ml_request));
/* Mark the request as invalid */ /* Mark the request as invalid */
OMPI_REQUEST_FINI(&ml_request->full_message.super); OMPI_REQUEST_FINI(&ml_request->full_message.super);
OMPI_FREE_LIST_RETURN_MT(&(ml_module->coll_ml_collective_descriptors), opal_free_list_return (&(ml_module->coll_ml_collective_descriptors),
(ompi_free_list_item_t *)ml_request); (opal_free_list_item_t *)ml_request);
/* MPI needs to return with the request object set to MPI_REQUEST_NULL /* MPI needs to return with the request object set to MPI_REQUEST_NULL
*/ */
@ -326,20 +326,20 @@ static void mca_coll_ml_collective_operation_progress_destruct
/* initialize the full message descriptor - can pass in module specific /* initialize the full message descriptor - can pass in module specific
* initialization data * initialization data
*/ */
static void init_ml_fragment_desc(ompi_free_list_item_t *desc , void* ctx); static void init_ml_fragment_desc(opal_free_list_item_t *desc , void* ctx);
static void init_ml_message_desc(ompi_free_list_item_t *desc , void* ctx) static void init_ml_message_desc(opal_free_list_item_t *desc , void* ctx)
{ {
mca_coll_ml_module_t *module= (mca_coll_ml_module_t *) ctx; mca_coll_ml_module_t *module= (mca_coll_ml_module_t *) ctx;
mca_coll_ml_descriptor_t *msg_desc = (mca_coll_ml_descriptor_t *) desc; mca_coll_ml_descriptor_t *msg_desc = (mca_coll_ml_descriptor_t *) desc;
/* finish setting up the fragment descriptor */ /* finish setting up the fragment descriptor */
init_ml_fragment_desc((ompi_free_list_item_t*)&(msg_desc->fragment),module); init_ml_fragment_desc((opal_free_list_item_t*)&(msg_desc->fragment),module);
} }
/* initialize the fragment descriptor - can pass in module specific /* initialize the fragment descriptor - can pass in module specific
* initialization data * initialization data
*/ */
static void init_ml_fragment_desc(ompi_free_list_item_t *desc , void* ctx) static void init_ml_fragment_desc(opal_free_list_item_t *desc , void* ctx)
{ {
mca_coll_ml_module_t *module= (mca_coll_ml_module_t *) ctx; mca_coll_ml_module_t *module= (mca_coll_ml_module_t *) ctx;
mca_coll_ml_fragment_t *frag_desc = (mca_coll_ml_fragment_t *) desc; mca_coll_ml_fragment_t *frag_desc = (mca_coll_ml_fragment_t *) desc;
@ -2614,14 +2614,14 @@ static int init_lists(mca_coll_ml_module_t *ml_module)
/* no data associated with the message descriptor */ /* no data associated with the message descriptor */
length = sizeof(mca_coll_ml_descriptor_t); length = sizeof(mca_coll_ml_descriptor_t);
ret = ompi_free_list_init_ex_new(&(ml_module->message_descriptors), length, ret = opal_free_list_init(&(ml_module->message_descriptors), length,
opal_cache_line_size, OBJ_CLASS(mca_coll_ml_descriptor_t), opal_cache_line_size, OBJ_CLASS(mca_coll_ml_descriptor_t),
length_payload, 0, length_payload, 0,
num_elements, max_elements, elements_per_alloc, num_elements, max_elements, elements_per_alloc,
NULL, NULL, 0, NULL,
init_ml_message_desc, ml_module); init_ml_message_desc, ml_module);
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) { if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
ML_ERROR(("ompi_free_list_init_ex_new exit with error")); ML_ERROR(("opal_free_list_init exit with error"));
return ret; return ret;
} }
@ -2632,14 +2632,14 @@ static int init_lists(mca_coll_ml_module_t *ml_module)
/* create a free list of fragment descriptors */ /* create a free list of fragment descriptors */
/*length_payload=sizeof(something);*/ /*length_payload=sizeof(something);*/
length = sizeof(mca_coll_ml_fragment_t); length = sizeof(mca_coll_ml_fragment_t);
ret = ompi_free_list_init_ex_new(&(ml_module->fragment_descriptors), length, ret = opal_free_list_init (&(ml_module->fragment_descriptors), length,
opal_cache_line_size, OBJ_CLASS(mca_coll_ml_fragment_t), opal_cache_line_size, OBJ_CLASS(mca_coll_ml_fragment_t),
length_payload, 0, length_payload, 0,
num_elements, max_elements, elements_per_alloc, num_elements, max_elements, elements_per_alloc,
NULL, NULL, 0, NULL,
init_ml_fragment_desc, ml_module); init_ml_fragment_desc, ml_module);
if (OMPI_SUCCESS != ret) { if (OMPI_SUCCESS != ret) {
ML_ERROR(("ompi_free_list_init_ex_new exit with error")); ML_ERROR(("opal_free_list_init exit with error"));
return ret; return ret;
} }

7
ompi/mca/coll/ml/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: ORNL?
status: unmaintained

Просмотреть файл

@ -1,5 +1,8 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2013 Sandia National Laboratories. All rights reserved. * Copyright (c) 2013 Sandia National Laboratories. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -42,7 +45,7 @@ struct mca_coll_portals4_component_t {
ptl_handle_md_t md_h; ptl_handle_md_t md_h;
#endif #endif
ompi_free_list_t requests; /* request free list for the i collectives */ opal_free_list_t requests; /* request free list for the i collectives */
}; };
typedef struct mca_coll_portals4_component_t mca_coll_portals4_component_t; typedef struct mca_coll_portals4_component_t mca_coll_portals4_component_t;
OMPI_MODULE_DECLSPEC extern mca_coll_portals4_component_t mca_coll_portals4_component; OMPI_MODULE_DECLSPEC extern mca_coll_portals4_component_t mca_coll_portals4_component;

Просмотреть файл

@ -1,3 +1,4 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana * Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology * University Research and Technology
@ -11,6 +12,8 @@
* All rights reserved. * All rights reserved.
* Copyright (c) 2008 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2008 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Sandia National Laboratories. All rights reserved. * Copyright (c) 2013 Sandia National Laboratories. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -110,17 +113,14 @@ portals4_open(void)
mca_coll_portals4_component.md_h = PTL_INVALID_HANDLE; mca_coll_portals4_component.md_h = PTL_INVALID_HANDLE;
#endif #endif
OBJ_CONSTRUCT(&mca_coll_portals4_component.requests, ompi_free_list_t); OBJ_CONSTRUCT(&mca_coll_portals4_component.requests, opal_free_list_t);
ret = ompi_free_list_init(&mca_coll_portals4_component.requests, ret = opal_free_list_init (&mca_coll_portals4_component.requests,
sizeof(ompi_coll_portals4_request_t), sizeof(ompi_coll_portals4_request_t),
OBJ_CLASS(ompi_coll_portals4_request_t), OBJ_CLASS(ompi_coll_portals4_request_t),
8, 0, 0, 8, 0, 8, NULL, 0, NULL, NULL, NULL);
0,
8,
NULL);
if (OMPI_SUCCESS != ret) { if (OMPI_SUCCESS != ret) {
opal_output_verbose(1, ompi_coll_base_framework.framework_output, opal_output_verbose(1, ompi_coll_base_framework.framework_output,
"%s:%d: ompi_free_list_init failed: %d\n", "%s:%d: opal_free_list_init failed: %d\n",
__FILE__, __LINE__, ret); __FILE__, __LINE__, ret);
return ret; return ret;
} }

Просмотреть файл

@ -1,5 +1,8 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2013 Sandia National Laboratories. All rights reserved. * Copyright (c) 2013 Sandia National Laboratories. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -29,9 +32,8 @@ OBJ_CLASS_DECLARATION(ompi_coll_portals4_request_t);
#define OMPI_COLL_PORTALS4_REQUEST_ALLOC(comm, req) \ #define OMPI_COLL_PORTALS4_REQUEST_ALLOC(comm, req) \
do { \ do { \
ompi_free_list_item_t *item; \ opal_free_list_item_t *item; \
OMPI_FREE_LIST_GET_MT(&mca_coll_portals4_component.requests, \ item = opal_free_list_get (&mca_coll_portals4_component.requests); \
item); \
req = (ompi_coll_portals4_request_t*) item; \ req = (ompi_coll_portals4_request_t*) item; \
OMPI_REQUEST_INIT(&req->super, false); \ OMPI_REQUEST_INIT(&req->super, false); \
req->super.req_mpi_object.comm = comm; \ req->super.req_mpi_object.comm = comm; \
@ -42,8 +44,8 @@ OBJ_CLASS_DECLARATION(ompi_coll_portals4_request_t);
#define OMPI_COLL_PORTALS4_REQUEST_RETURN(req) \ #define OMPI_COLL_PORTALS4_REQUEST_RETURN(req) \
do { \ do { \
OMPI_REQUEST_FINI(&request->super); \ OMPI_REQUEST_FINI(&request->super); \
OMPI_FREE_LIST_RETURN_MT(&mca_coll_portals4_component.requests, \ opal_free_list_return (&mca_coll_portals4_component.requests, \
(ompi_free_list_item_t*) req); \ (opal_free_list_item_t*) req); \
} while (0) } while (0)

7
ompi/mca/coll/portals4/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: SNL
status: active

7
ompi/mca/coll/self/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: CISCO
status: maintenance

7
ompi/mca/coll/sm/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: LANL?
status: maintenance

Просмотреть файл

@ -69,8 +69,6 @@ ompi_coll_tuned_sendrecv_zero(int dest, int stag,
/* post new irecv */ /* post new irecv */
err = MCA_PML_CALL(irecv( NULL, 0, MPI_BYTE, source, rtag, err = MCA_PML_CALL(irecv( NULL, 0, MPI_BYTE, source, rtag,
comm, &reqs[0])); comm, &reqs[0]));
/* try to silence CID 1269934 */
assert( MPI_ERR_IN_STATUS != err );
if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler; } if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler; }
/* send data to children */ /* send data to children */
@ -79,15 +77,6 @@ ompi_coll_tuned_sendrecv_zero(int dest, int stag,
if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler; } if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler; }
err = ompi_request_wait_all( 2, reqs, statuses ); err = ompi_request_wait_all( 2, reqs, statuses );
if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler; }
return (MPI_SUCCESS);
error_handler:
/* As we use wait_all we will get MPI_ERR_IN_STATUS which is not an error
* code that we can propagate up the stack. Instead, look for the real
* error code from the MPI_ERROR in the status.
*/
if( MPI_ERR_IN_STATUS == err ) { if( MPI_ERR_IN_STATUS == err ) {
/* At least we know the error was detected during the wait_all */ /* At least we know the error was detected during the wait_all */
int err_index = 1; int err_index = 1;
@ -98,13 +87,18 @@ ompi_coll_tuned_sendrecv_zero(int dest, int stag,
OPAL_OUTPUT ((ompi_coll_tuned_stream, "%s:%d: Error %d occurred in the %s" OPAL_OUTPUT ((ompi_coll_tuned_stream, "%s:%d: Error %d occurred in the %s"
" stage of ompi_coll_tuned_sendrecv_zero\n", " stage of ompi_coll_tuned_sendrecv_zero\n",
__FILE__, line, err, (0 == err_index ? "receive" : "send"))); __FILE__, line, err, (0 == err_index ? "receive" : "send")));
} else { return MPI_ERR_IN_STATUS;
/* Error discovered during the posting of the irecv or isend,
* and no status is available.
*/
OPAL_OUTPUT ((ompi_coll_tuned_stream, "%s:%d: Error %d occurred\n",
__FILE__, line, err));
} }
if (err != MPI_SUCCESS) { line = __LINE__; goto error_handler; }
return (MPI_SUCCESS);
error_handler:
/* Error discovered during the posting of the irecv or isend,
* and no status is available.
*/
OPAL_OUTPUT ((ompi_coll_tuned_stream, "%s:%d: Error %d occurred\n",
__FILE__, line, err));
return err; return err;
} }

7
ompi/mca/coll/tuned/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: UTK
status: maintenance

Просмотреть файл

@ -1,3 +1,4 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2004-2010 The Trustees of Indiana University. * Copyright (c) 2004-2010 The Trustees of Indiana University.
* All rights reserved. * All rights reserved.
@ -7,6 +8,8 @@
* University of Stuttgart. All rights reserved. * University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California. * Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved. * All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -50,13 +53,13 @@
* Object stuff * Object stuff
******************/ ******************/
OBJ_CLASS_INSTANCE(ompi_crcp_base_pml_state_t, OBJ_CLASS_INSTANCE(ompi_crcp_base_pml_state_t,
ompi_free_list_item_t, opal_free_list_item_t,
NULL, NULL,
NULL NULL
); );
OBJ_CLASS_INSTANCE(ompi_crcp_base_btl_state_t, OBJ_CLASS_INSTANCE(ompi_crcp_base_btl_state_t,
ompi_free_list_item_t, opal_free_list_item_t,
NULL, NULL,
NULL NULL
); );

7
ompi/mca/crcp/base/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: IU?
status: unmaintained

Просмотреть файл

@ -1,3 +1,4 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2004-2011 The Trustees of Indiana University. * Copyright (c) 2004-2011 The Trustees of Indiana University.
* All rights reserved. * All rights reserved.
@ -5,8 +6,8 @@
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2010-2012 Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2010-2012 Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC. * Copyright (c) 2012-2015 Los Alamos National Security, LLC. All rights
* All rights reserved. * reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -41,7 +42,7 @@
#include "ompi/mca/crcp/crcp.h" #include "ompi/mca/crcp/crcp.h"
#include "ompi/mca/crcp/base/base.h" #include "ompi/mca/crcp/base/base.h"
#include "opal/class/ompi_free_list.h" #include "opal/class/opal_free_list.h"
#include "ompi/runtime/ompi_cr.h" #include "ompi/runtime/ompi_cr.h"
#include "orte/runtime/orte_wait.h" #include "orte/runtime/orte_wait.h"
@ -106,12 +107,12 @@ opal_list_t drained_msg_ack_list;
/* /*
* Free lists * Free lists
*/ */
ompi_free_list_t coord_state_free_list; opal_free_list_t coord_state_free_list;
ompi_free_list_t content_ref_free_list; opal_free_list_t content_ref_free_list;
ompi_free_list_t peer_ref_free_list; opal_free_list_t peer_ref_free_list;
ompi_free_list_t traffic_msg_ref_free_list; opal_free_list_t traffic_msg_ref_free_list;
ompi_free_list_t drain_msg_ref_free_list; opal_free_list_t drain_msg_ref_free_list;
ompi_free_list_t drain_ack_msg_ref_free_list; opal_free_list_t drain_ack_msg_ref_free_list;
/* /*
* Quiescence requests to wait on * Quiescence requests to wait on
@ -618,74 +619,69 @@ static void traffic_message_dump_drain_msg_indv(ompi_crcp_bkmrk_pml_drain_messag
*/ */
#define HOKE_PEER_REF_ALLOC(peer_ref) \ #define HOKE_PEER_REF_ALLOC(peer_ref) \
do { \ do { \
ompi_free_list_item_t* item; \ peer_ref = (ompi_crcp_bkmrk_pml_peer_ref_t *) \
OMPI_FREE_LIST_WAIT_MT(&peer_ref_free_list, item); \ opal_free_list_wait (&peer_ref_free_list); \
peer_ref = (ompi_crcp_bkmrk_pml_peer_ref_t*)item; \ } while(0)
} while(0);
#define HOKE_PEER_REF_RETURN(peer_ref) \ #define HOKE_PEER_REF_RETURN(peer_ref) \
do { \ do { \
OMPI_FREE_LIST_RETURN_MT(&peer_ref_free_list, \ opal_free_list_return (&peer_ref_free_list, \
(ompi_free_list_item_t*)peer_ref); \ (opal_free_list_item_t*)peer_ref); \
} while(0); } while(0)
#define HOKE_CONTENT_REF_ALLOC(content_ref) \ #define HOKE_CONTENT_REF_ALLOC(content_ref) \
do { \ do { \
ompi_free_list_item_t* item; \ content_ref = (ompi_crcp_bkmrk_pml_message_content_ref_t*) \
OMPI_FREE_LIST_WAIT_MT(&content_ref_free_list, item); \ opal_free_list_wait (&content_ref_free_list); \
content_ref = (ompi_crcp_bkmrk_pml_message_content_ref_t*)item; \ content_ref->msg_id = content_ref_seq_num; \
content_ref->msg_id = content_ref_seq_num; \ content_ref_seq_num++; \
content_ref_seq_num++;\ } while(0)
} while(0);
#define HOKE_CONTENT_REF_RETURN(content_ref) \ #define HOKE_CONTENT_REF_RETURN(content_ref) \
do { \ do { \
OMPI_FREE_LIST_RETURN_MT(&content_ref_free_list, \ opal_free_list_return (&content_ref_free_list, \
(ompi_free_list_item_t*)content_ref); \ (opal_free_list_item_t*)content_ref); \
} while(0); } while(0)
#define HOKE_TRAFFIC_MSG_REF_ALLOC(msg_ref) \ #define HOKE_TRAFFIC_MSG_REF_ALLOC(msg_ref) \
do { \ do { \
ompi_free_list_item_t* item; \ msg_ref = (ompi_crcp_bkmrk_pml_traffic_message_ref_t*) \
OMPI_FREE_LIST_WAIT_MT(&traffic_msg_ref_free_list, item); \ opal_free_list_wait (&traffic_msg_ref_free_list); \
msg_ref = (ompi_crcp_bkmrk_pml_traffic_message_ref_t*)item; \ } while(0)
} while(0);
#define HOKE_TRAFFIC_MSG_REF_RETURN(msg_ref) \ #define HOKE_TRAFFIC_MSG_REF_RETURN(msg_ref) \
do { \ do { \
OMPI_FREE_LIST_RETURN_MT(&traffic_msg_ref_free_list, \ opal_free_list_return (&traffic_msg_ref_free_list, \
(ompi_free_list_item_t*)msg_ref); \ (opal_free_list_item_t*)msg_ref); \
} while(0); } while(0)
#define HOKE_DRAIN_MSG_REF_ALLOC(msg_ref) \ #define HOKE_DRAIN_MSG_REF_ALLOC(msg_ref) \
do { \ do { \
ompi_free_list_item_t* item; \ msg_ref = (ompi_crcp_bkmrk_pml_drain_message_ref_t *) \
OMPI_FREE_LIST_WAIT_MT(&drain_msg_ref_free_list, item); \ opal_free_list_wait (&drain_msg_ref_free_list); \
msg_ref = (ompi_crcp_bkmrk_pml_drain_message_ref_t*)item; \ } while(0)
} while(0);
#define HOKE_DRAIN_MSG_REF_RETURN(msg_ref) \ #define HOKE_DRAIN_MSG_REF_RETURN(msg_ref) \
do { \ do { \
OMPI_FREE_LIST_RETURN_MT(&drain_msg_ref_free_list, \ opal_free_list_return (&drain_msg_ref_free_list, \
(ompi_free_list_item_t*)msg_ref); \ (opal_free_list_item_t*)msg_ref); \
} while(0); } while(0)
#define HOKE_DRAIN_ACK_MSG_REF_ALLOC(msg_ref) \ #define HOKE_DRAIN_ACK_MSG_REF_ALLOC(msg_ref) \
do { \ do { \
ompi_free_list_item_t* item; \ msg_ref = (ompi_crcp_bkmrk_pml_drain_message_ack_ref_t *) \
OMPI_FREE_LIST_WAIT_MT(&drain_ack_msg_ref_free_list, item); \ opal_free_list_wait (&drain_ack_msg_ref_free_list); \
msg_ref = (ompi_crcp_bkmrk_pml_drain_message_ack_ref_t*)item; \ } while(0)
} while(0);
#define HOKE_DRAIN_ACK_MSG_REF_RETURN(msg_ref) \ #define HOKE_DRAIN_ACK_MSG_REF_RETURN(msg_ref) \
do { \ do { \
OMPI_FREE_LIST_RETURN_MT(&drain_ack_msg_ref_free_list, \ opal_free_list_return (&drain_ack_msg_ref_free_list, \
(ompi_free_list_item_t*)msg_ref); \ (opal_free_list_item_t*)msg_ref); \
} while(0); } while(0)
/* /*
@ -967,18 +963,17 @@ OBJ_CLASS_INSTANCE(ompi_crcp_bkmrk_pml_state_t,
/************************************ /************************************
* Some Macro shortcuts * Some Macro shortcuts
************************************/ ************************************/
#define CRCP_COORD_STATE_ALLOC(state_ref) \ #define CRCP_COORD_STATE_ALLOC(state_ref) \
do { \ do { \
ompi_free_list_item_t* item; \ state_ref = (ompi_crcp_bkmrk_pml_state_t *) \
OMPI_FREE_LIST_WAIT_MT(&coord_state_free_list, item); \ opal_free_list_wait (&coord_state_free_list); \
state_ref = (ompi_crcp_bkmrk_pml_state_t*)item; \ } while(0)
} while(0);
#define CRCP_COORD_STATE_RETURN(state_ref) \ #define CRCP_COORD_STATE_RETURN(state_ref) \
do { \ do { \
OMPI_FREE_LIST_RETURN_MT(&coord_state_free_list, \ opal_free_list_return (&coord_state_free_list, \
(ompi_free_list_item_t*)state_ref); \ (opal_free_list_item_t *)state_ref); \
} while(0); } while(0)
#define CREATE_COORD_STATE(coord_state, pml_state, v_peer_ref, v_msg_ref) \ #define CREATE_COORD_STATE(coord_state, pml_state, v_peer_ref, v_msg_ref) \
{ \ { \
@ -1100,71 +1095,71 @@ int ompi_crcp_bkmrk_pml_init(void) {
* - Drain ACK Messsage Refs * - Drain ACK Messsage Refs
* - Message Contents? * - Message Contents?
*/ */
OBJ_CONSTRUCT(&coord_state_free_list, ompi_free_list_t); OBJ_CONSTRUCT(&coord_state_free_list, opal_free_list_t);
ompi_free_list_init_new( &coord_state_free_list, opal_free_list_init (&coord_state_free_list,
sizeof(ompi_crcp_bkmrk_pml_state_t), sizeof(ompi_crcp_bkmrk_pml_state_t),
opal_cache_line_size, opal_cache_line_size,
OBJ_CLASS(ompi_crcp_bkmrk_pml_state_t), OBJ_CLASS(ompi_crcp_bkmrk_pml_state_t),
0,opal_cache_line_size, 0,opal_cache_line_size,
4, /* Initial number */ 4, /* Initial number */
-1, /* Max = Unlimited */ -1, /* Max = Unlimited */
4, /* Increment by */ 4, /* Increment by */
NULL); NULL, 0, NULL, NULL, NULL);
OBJ_CONSTRUCT(&content_ref_free_list, ompi_free_list_t); OBJ_CONSTRUCT(&content_ref_free_list, opal_free_list_t);
ompi_free_list_init_new( &content_ref_free_list, opal_free_list_init (&content_ref_free_list,
sizeof(ompi_crcp_bkmrk_pml_message_content_ref_t), sizeof(ompi_crcp_bkmrk_pml_message_content_ref_t),
opal_cache_line_size, opal_cache_line_size,
OBJ_CLASS(ompi_crcp_bkmrk_pml_message_content_ref_t), OBJ_CLASS(ompi_crcp_bkmrk_pml_message_content_ref_t),
0,opal_cache_line_size, 0,opal_cache_line_size,
80, /* Initial number */ 80, /* Initial number */
-1, /* Max = Unlimited */ -1, /* Max = Unlimited */
32, /* Increment by */ 32, /* Increment by */
NULL); NULL, 0, NULL, NULL, NULL);
OBJ_CONSTRUCT(&peer_ref_free_list, ompi_free_list_t); OBJ_CONSTRUCT(&peer_ref_free_list, opal_free_list_t);
ompi_free_list_init_new( &peer_ref_free_list, opal_free_list_init (&peer_ref_free_list,
sizeof(ompi_crcp_bkmrk_pml_peer_ref_t), sizeof(ompi_crcp_bkmrk_pml_peer_ref_t),
opal_cache_line_size, opal_cache_line_size,
OBJ_CLASS(ompi_crcp_bkmrk_pml_peer_ref_t), OBJ_CLASS(ompi_crcp_bkmrk_pml_peer_ref_t),
0,opal_cache_line_size, 0,opal_cache_line_size,
16, /* Initial number */ 16, /* Initial number */
-1, /* Max = Unlimited */ -1, /* Max = Unlimited */
16, /* Increment by */ 16, /* Increment by */
NULL); NULL, 0, NULL, NULL, NULL);
OBJ_CONSTRUCT(&traffic_msg_ref_free_list, ompi_free_list_t); OBJ_CONSTRUCT(&traffic_msg_ref_free_list, opal_free_list_t);
ompi_free_list_init_new( &traffic_msg_ref_free_list, opal_free_list_init (&traffic_msg_ref_free_list,
sizeof(ompi_crcp_bkmrk_pml_traffic_message_ref_t), sizeof(ompi_crcp_bkmrk_pml_traffic_message_ref_t),
opal_cache_line_size, opal_cache_line_size,
OBJ_CLASS(ompi_crcp_bkmrk_pml_traffic_message_ref_t), OBJ_CLASS(ompi_crcp_bkmrk_pml_traffic_message_ref_t),
0,opal_cache_line_size, 0,opal_cache_line_size,
32, /* Initial number */ 32, /* Initial number */
-1, /* Max = Unlimited */ -1, /* Max = Unlimited */
64, /* Increment by */ 64, /* Increment by */
NULL); NULL, 0, NULL, NULL, NULL);
OBJ_CONSTRUCT(&drain_msg_ref_free_list, ompi_free_list_t); OBJ_CONSTRUCT(&drain_msg_ref_free_list, opal_free_list_t);
ompi_free_list_init_new( &drain_msg_ref_free_list, opal_free_list_init (&drain_msg_ref_free_list,
sizeof(ompi_crcp_bkmrk_pml_drain_message_ref_t), sizeof(ompi_crcp_bkmrk_pml_drain_message_ref_t),
opal_cache_line_size, opal_cache_line_size,
OBJ_CLASS(ompi_crcp_bkmrk_pml_drain_message_ref_t), OBJ_CLASS(ompi_crcp_bkmrk_pml_drain_message_ref_t),
0,opal_cache_line_size, 0,opal_cache_line_size,
32, /* Initial number */ 32, /* Initial number */
-1, /* Max = Unlimited */ -1, /* Max = Unlimited */
64, /* Increment by */ 64, /* Increment by */
NULL); NULL, 0, NULL, NULL, NULL);
OBJ_CONSTRUCT(&drain_ack_msg_ref_free_list, ompi_free_list_t); OBJ_CONSTRUCT(&drain_ack_msg_ref_free_list, opal_free_list_t);
ompi_free_list_init_new( &drain_ack_msg_ref_free_list, opal_free_list_init (&drain_ack_msg_ref_free_list,
sizeof(ompi_crcp_bkmrk_pml_drain_message_ack_ref_t), sizeof(ompi_crcp_bkmrk_pml_drain_message_ack_ref_t),
opal_cache_line_size, opal_cache_line_size,
OBJ_CLASS(ompi_crcp_bkmrk_pml_drain_message_ack_ref_t), OBJ_CLASS(ompi_crcp_bkmrk_pml_drain_message_ack_ref_t),
0,opal_cache_line_size, 0,opal_cache_line_size,
16, /* Initial number */ 16, /* Initial number */
-1, /* Max = Unlimited */ -1, /* Max = Unlimited */
16, /* Increment by */ 16, /* Increment by */
NULL); NULL, 0, NULL, NULL, NULL);
clear_timers(); clear_timers();

7
ompi/mca/crcp/bkmrk/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: IU?
status: unmaintained

Просмотреть файл

@ -1,3 +1,4 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2004-2010 The Trustees of Indiana University and Indiana * Copyright (c) 2004-2010 The Trustees of Indiana University and Indiana
* University Research and Technology * University Research and Technology
@ -9,6 +10,8 @@
* University of Stuttgart. All rights reserved. * University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California. * Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved. * All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -34,7 +37,7 @@
#include "opal/mca/crs/base/base.h" #include "opal/mca/crs/base/base.h"
#include "opal/mca/btl/btl.h" #include "opal/mca/btl/btl.h"
#include "opal/mca/btl/base/base.h" #include "opal/mca/btl/base/base.h"
#include "opal/class/ompi_free_list.h" #include "opal/class/opal_free_list.h"
#include "ompi/datatype/ompi_datatype.h" #include "ompi/datatype/ompi_datatype.h"
#include "ompi/request/request.h" #include "ompi/request/request.h"
@ -91,7 +94,7 @@ enum ompi_crcp_base_pml_states_t {
typedef enum ompi_crcp_base_pml_states_t ompi_crcp_base_pml_states_t; typedef enum ompi_crcp_base_pml_states_t ompi_crcp_base_pml_states_t;
struct ompi_crcp_base_pml_state_t { struct ompi_crcp_base_pml_state_t {
ompi_free_list_item_t super; opal_free_list_item_t super;
ompi_crcp_base_pml_states_t state; ompi_crcp_base_pml_states_t state;
int error_code; int error_code;
mca_pml_base_component_t *wrapped_pml_component; mca_pml_base_component_t *wrapped_pml_component;
@ -183,7 +186,7 @@ enum ompi_crcp_base_btl_states_t {
typedef enum ompi_crcp_base_btl_states_t ompi_crcp_base_btl_states_t; typedef enum ompi_crcp_base_btl_states_t ompi_crcp_base_btl_states_t;
struct ompi_crcp_base_btl_state_t { struct ompi_crcp_base_btl_state_t {
ompi_free_list_item_t super; opal_free_list_item_t super;
ompi_crcp_base_btl_states_t state; ompi_crcp_base_btl_states_t state;
int error_code; int error_code;
mca_btl_base_descriptor_t* des; mca_btl_base_descriptor_t* des;

7
ompi/mca/dpm/base/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: INTEL
status: maintenance

7
ompi/mca/dpm/orte/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: INTEL
status: maintenance

Просмотреть файл

@ -1,3 +1,4 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology * University Research and Technology
@ -10,6 +11,8 @@
* Copyright (c) 2004-2005 The Regents of the University of California. * Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved. * All rights reserved.
* Copyright (c) 2008-2011 University of Houston. All rights reserved. * Copyright (c) 2008-2011 University of Houston. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -21,7 +24,6 @@
#include "ompi_config.h" #include "ompi_config.h"
#include <stdio.h> #include <stdio.h>
#include "opal/class/ompi_free_list.h"
#include "opal/mca/mca.h" #include "opal/mca/mca.h"
#include "opal/mca/base/base.h" #include "opal/mca/base/base.h"

7
ompi/mca/fbtl/base/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: UH
status: active

7
ompi/mca/fbtl/plfs/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: UH
status: active

7
ompi/mca/fbtl/posix/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: UH
status: active

7
ompi/mca/fbtl/pvfs2/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: UH
status: active

Просмотреть файл

@ -1,3 +1,4 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology * University Research and Technology
@ -10,6 +11,8 @@
* Copyright (c) 2004-2005 The Regents of the University of California. * Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved. * All rights reserved.
* Copyright (c) 2008-2011 University of Houston. All rights reserved. * Copyright (c) 2008-2011 University of Houston. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -21,7 +24,6 @@
#include "ompi_config.h" #include "ompi_config.h"
#include <stdio.h> #include <stdio.h>
#include "opal/class/ompi_free_list.h"
#include "opal/mca/mca.h" #include "opal/mca/mca.h"
#include "opal/mca/base/base.h" #include "opal/mca/base/base.h"

7
ompi/mca/fcoll/base/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: UH
status: active

7
ompi/mca/fcoll/dynamic/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: UH
status: active

7
ompi/mca/fcoll/individual/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: UH
status: active

7
ompi/mca/fcoll/static/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: UH
status: active

Просмотреть файл

@ -10,6 +10,7 @@
* Copyright (c) 2004-2005 The Regents of the University of California. * Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved. * All rights reserved.
* Copyright (c) 2008-2014 University of Houston. All rights reserved. * Copyright (c) 2008-2014 University of Houston. All rights reserved.
* Copyright (c) 2015 Cisco Systems, Inc. All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -124,7 +125,7 @@ mca_fcoll_two_phase_file_read_all (mca_io_ompio_file_t *fh,
MPI_Aint recv_buf_addr = 0; MPI_Aint recv_buf_addr = 0;
uint32_t iov_count = 0, ti = 0; uint32_t iov_count = 0, ti = 0;
struct iovec *decoded_iov = NULL, *temp_iov = NULL, *iov = NULL; struct iovec *decoded_iov = NULL, *temp_iov = NULL, *iov = NULL;
size_t max_data = 0, total_bytes = 0; size_t max_data = 0;
long long_max_data = 0, long_total_bytes = 0; long long_max_data = 0, long_total_bytes = 0;
int domain_size=0, *count_my_req_per_proc=NULL, count_my_req_procs = 0; int domain_size=0, *count_my_req_per_proc=NULL, count_my_req_procs = 0;
int count_other_req_procs; int count_other_req_procs;
@ -229,7 +230,6 @@ mca_fcoll_two_phase_file_read_all (mca_io_ompio_file_t *fh,
if ( OMPI_SUCCESS != ret ) { if ( OMPI_SUCCESS != ret ) {
goto exit; goto exit;
} }
total_bytes = (size_t) long_total_bytes;
if (!(fh->f_flags & OMPIO_CONTIGUOUS_MEMORY)) { if (!(fh->f_flags & OMPIO_CONTIGUOUS_MEMORY)) {
@ -299,7 +299,7 @@ mca_fcoll_two_phase_file_read_all (mca_io_ompio_file_t *fh,
#if DEBUG #if DEBUG
printf("%d: total_bytes:%ld, local_count: %d\n", printf("%d: total_bytes:%ld, local_count: %d\n",
fh->f_rank,total_bytes, local_count); fh->f_rank, long_total_bytes, local_count);
for (i=0 ; i<local_count ; i++) { for (i=0 ; i<local_count ; i++) {
printf("%d: fcoll:two_phase:read_all:OFFSET:%ld,LENGTH:%ld\n", printf("%d: fcoll:two_phase:read_all:OFFSET:%ld,LENGTH:%ld\n",
fh->f_rank, fh->f_rank,

7
ompi/mca/fcoll/two_phase/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: UH
status: active

7
ompi/mca/fs/base/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: UH
status: active

7
ompi/mca/fs/lustre/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: UH
status: active

7
ompi/mca/fs/plfs/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: UH
status: active

7
ompi/mca/fs/pvfs2/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: UH
status: active

7
ompi/mca/fs/ufs/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: UH
status: active

7
ompi/mca/io/base/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: project
status: maintenance

Просмотреть файл

@ -11,7 +11,7 @@
* Copyright (c) 2004-2005 The Regents of the University of California. * Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved. * All rights reserved.
* Copyright (c) 2008-2014 University of Houston. All rights reserved. * Copyright (c) 2008-2014 University of Houston. All rights reserved.
* Copyright (c) 2011 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2011-2015 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012-2013 Inria. All rights reserved. * Copyright (c) 2012-2013 Inria. All rights reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
@ -338,7 +338,7 @@ int ompi_io_ompio_generate_current_file_view (struct mca_io_ompio_file_t *fh,
sorted = (int *) malloc sorted = (int *) malloc
(tot_entries * sizeof(int)); (tot_entries * sizeof(int));
if (NULL == all_process){ if (NULL == sorted){
opal_output(1,"Error while allocating per process!\n"); opal_output(1,"Error while allocating per process!\n");
return OMPI_ERR_OUT_OF_RESOURCE; return OMPI_ERR_OUT_OF_RESOURCE;
} }

7
ompi/mca/io/ompio/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: UH
status: active

7
ompi/mca/io/romio/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: LANL/RIST
status: active

7
ompi/mca/mtl/base/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: project
status: active

Просмотреть файл

@ -1,8 +1,11 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (C) 2001-2011 Mellanox Technologies Ltd. ALL RIGHTS RESERVED. * Copyright (C) 2001-2011 Mellanox Technologies Ltd. ALL RIGHTS RESERVED.
* Copyright (c) 2013-2014 Intel, Inc. All rights reserved * Copyright (c) 2013-2014 Intel, Inc. All rights reserved
* Copyright (c) 2014 Research Organization for Information Science * Copyright (c) 2014 Research Organization for Information Science
* and Technology (RIST). All rights reserved. * and Technology (RIST). All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -255,6 +258,9 @@ static int ompi_mtl_mxm_recv_ep_address(ompi_proc_t *source_proc, void **address
&modex_cur_size); &modex_cur_size);
if (OMPI_SUCCESS != rc) { if (OMPI_SUCCESS != rc) {
MXM_ERROR("Open MPI couldn't distribute EP connection details"); MXM_ERROR("Open MPI couldn't distribute EP connection details");
free(*address_p);
*address_p = NULL;
*address_len_p = 0;
goto bail; goto bail;
} }
@ -267,9 +273,6 @@ static int ompi_mtl_mxm_recv_ep_address(ompi_proc_t *source_proc, void **address
bail: bail:
free(modex_component_name); free(modex_component_name);
free(modex_name); free(modex_name);
if (*address_p) {
free(*address_p);
}
return rc; return rc;
} }
@ -444,11 +447,13 @@ int ompi_mtl_mxm_add_procs(struct mca_mtl_base_module_t *mtl, size_t nprocs,
#if MXM_API < MXM_VERSION(2,0) #if MXM_API < MXM_VERSION(2,0)
if (ep_address_len != sizeof(ep_info[i])) { if (ep_address_len != sizeof(ep_info[i])) {
MXM_ERROR("Invalid endpoint address length"); MXM_ERROR("Invalid endpoint address length");
free(ep_address);
rc = OMPI_ERROR; rc = OMPI_ERROR;
goto bail; goto bail;
} }
memcpy(&ep_info[i], ep_address, ep_address_len); memcpy(&ep_info[i], ep_address, ep_address_len);
free(ep_address);
conn_reqs[ep_index].ptl_addr[MXM_PTL_SELF] = (struct sockaddr *)&(ep_info[i].ptl_addr[MXM_PTL_SELF]); conn_reqs[ep_index].ptl_addr[MXM_PTL_SELF] = (struct sockaddr *)&(ep_info[i].ptl_addr[MXM_PTL_SELF]);
conn_reqs[ep_index].ptl_addr[MXM_PTL_SHM] = (struct sockaddr *)&(ep_info[i].ptl_addr[MXM_PTL_SHM]); conn_reqs[ep_index].ptl_addr[MXM_PTL_SHM] = (struct sockaddr *)&(ep_info[i].ptl_addr[MXM_PTL_SHM]);
conn_reqs[ep_index].ptl_addr[MXM_PTL_RDMA] = (struct sockaddr *)&(ep_info[i].ptl_addr[MXM_PTL_RDMA]); conn_reqs[ep_index].ptl_addr[MXM_PTL_RDMA] = (struct sockaddr *)&(ep_info[i].ptl_addr[MXM_PTL_RDMA]);
@ -458,6 +463,7 @@ int ompi_mtl_mxm_add_procs(struct mca_mtl_base_module_t *mtl, size_t nprocs,
endpoint = OBJ_NEW(mca_mtl_mxm_endpoint_t); endpoint = OBJ_NEW(mca_mtl_mxm_endpoint_t);
endpoint->mtl_mxm_module = &ompi_mtl_mxm; endpoint->mtl_mxm_module = &ompi_mtl_mxm;
err = mxm_ep_connect(ompi_mtl_mxm.ep, ep_address, &endpoint->mxm_conn); err = mxm_ep_connect(ompi_mtl_mxm.ep, ep_address, &endpoint->mxm_conn);
free(ep_address);
if (err != MXM_OK) { if (err != MXM_OK) {
MXM_ERROR("MXM returned connect error: %s\n", mxm_error_string(err)); MXM_ERROR("MXM returned connect error: %s\n", mxm_error_string(err));
rc = OMPI_ERROR; rc = OMPI_ERROR;
@ -465,7 +471,6 @@ int ompi_mtl_mxm_add_procs(struct mca_mtl_base_module_t *mtl, size_t nprocs,
} }
procs[i]->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_MTL] = endpoint; procs[i]->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_MTL] = endpoint;
#endif #endif
free(ep_address);
} }
#if MXM_API < MXM_VERSION(2,0) #if MXM_API < MXM_VERSION(2,0)
@ -508,9 +513,6 @@ bail:
free(conn_reqs); free(conn_reqs);
free(ep_info); free(ep_info);
#endif #endif
if (ep_address) {
free(ep_address);
}
return rc; return rc;
} }
@ -539,10 +541,12 @@ int ompi_mtl_add_single_proc(struct mca_mtl_base_module_t *mtl,
if (ep_address_len != sizeof(ep_info)) { if (ep_address_len != sizeof(ep_info)) {
MXM_ERROR("Invalid endpoint address length"); MXM_ERROR("Invalid endpoint address length");
free(ep_address);
return OMPI_ERROR; return OMPI_ERROR;
} }
memcpy(&ep_info, ep_address, ep_address_len); memcpy(&ep_info, ep_address, ep_address_len);
free(ep_address);
conn_req.ptl_addr[MXM_PTL_SELF] = (struct sockaddr *)&(ep_info.ptl_addr[MXM_PTL_SELF]); conn_req.ptl_addr[MXM_PTL_SELF] = (struct sockaddr *)&(ep_info.ptl_addr[MXM_PTL_SELF]);
conn_req.ptl_addr[MXM_PTL_SHM] = (struct sockaddr *)&(ep_info.ptl_addr[MXM_PTL_SHM]); conn_req.ptl_addr[MXM_PTL_SHM] = (struct sockaddr *)&(ep_info.ptl_addr[MXM_PTL_SHM]);
conn_req.ptl_addr[MXM_PTL_RDMA] = (struct sockaddr *)&(ep_info.ptl_addr[MXM_PTL_RDMA]); conn_req.ptl_addr[MXM_PTL_RDMA] = (struct sockaddr *)&(ep_info.ptl_addr[MXM_PTL_RDMA]);
@ -557,7 +561,6 @@ int ompi_mtl_add_single_proc(struct mca_mtl_base_module_t *mtl,
"unknown" : procs->proc_hostname, "unknown" : procs->proc_hostname,
mxm_error_string(conn_reqs.error)); mxm_error_string(conn_reqs.error));
} }
free(ep_address);
return OMPI_ERROR; return OMPI_ERROR;
} }
@ -570,9 +573,9 @@ int ompi_mtl_add_single_proc(struct mca_mtl_base_module_t *mtl,
endpoint = OBJ_NEW(mca_mtl_mxm_endpoint_t); endpoint = OBJ_NEW(mca_mtl_mxm_endpoint_t);
endpoint->mtl_mxm_module = &ompi_mtl_mxm; endpoint->mtl_mxm_module = &ompi_mtl_mxm;
err = mxm_ep_connect(ompi_mtl_mxm.ep, ep_address, &endpoint->mxm_conn); err = mxm_ep_connect(ompi_mtl_mxm.ep, ep_address, &endpoint->mxm_conn);
free(ep_address);
if (err != MXM_OK) { if (err != MXM_OK) {
MXM_ERROR("MXM returned connect error: %s\n", mxm_error_string(err)); MXM_ERROR("MXM returned connect error: %s\n", mxm_error_string(err));
free(ep_address);
return OMPI_ERROR; return OMPI_ERROR;
} }
procs->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_MTL] = endpoint; procs->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_MTL] = endpoint;
@ -668,6 +671,6 @@ static void ompi_mtl_mxm_mem_release_cb(void *buf, size_t length,
OBJ_CLASS_INSTANCE( OBJ_CLASS_INSTANCE(
ompi_mtl_mxm_message_t, ompi_mtl_mxm_message_t,
ompi_free_list_item_t, opal_free_list_item_t,
NULL, NULL,
NULL); NULL);

Просмотреть файл

@ -1,5 +1,8 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (C) Mellanox Technologies Ltd. 2001-2011. ALL RIGHTS RESERVED. * Copyright (C) Mellanox Technologies Ltd. 2001-2011. ALL RIGHTS RESERVED.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -30,7 +33,7 @@
#include "ompi/mca/pml/pml.h" #include "ompi/mca/pml/pml.h"
#include "ompi/mca/mtl/mtl.h" #include "ompi/mca/mtl/mtl.h"
#include "ompi/mca/mtl/base/base.h" #include "ompi/mca/mtl/base/base.h"
#include "opal/class/ompi_free_list.h" #include "opal/class/opal_free_list.h"
#include "opal/util/output.h" #include "opal/util/output.h"
#include "opal/util/show_help.h" #include "opal/util/show_help.h"
@ -96,7 +99,7 @@ extern int ompi_mtl_mxm_finalize(struct mca_mtl_base_module_t* mtl);
int ompi_mtl_mxm_module_init(void); int ompi_mtl_mxm_module_init(void);
struct ompi_mtl_mxm_message_t { struct ompi_mtl_mxm_message_t {
ompi_free_list_item_t super; opal_free_list_item_t super;
mxm_mq_h mq; mxm_mq_h mq;
mxm_conn_h conn; mxm_conn_h conn;

Просмотреть файл

@ -1,3 +1,4 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (C) Mellanox Technologies Ltd. 2001-2011. ALL RIGHTS RESERVED. * Copyright (C) Mellanox Technologies Ltd. 2001-2011. ALL RIGHTS RESERVED.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights * Copyright (c) 2015 Los Alamos National Security, LLC. All rights
@ -240,16 +241,16 @@ static int ompi_mtl_mxm_component_open(void)
return OPAL_ERR_NOT_AVAILABLE; return OPAL_ERR_NOT_AVAILABLE;
} }
OBJ_CONSTRUCT(&mca_mtl_mxm_component.mxm_messages, ompi_free_list_t); OBJ_CONSTRUCT(&mca_mtl_mxm_component.mxm_messages, opal_free_list_t);
rc = ompi_free_list_init_new(&mca_mtl_mxm_component.mxm_messages, rc = opal_free_list_init (&mca_mtl_mxm_component.mxm_messages,
sizeof(ompi_mtl_mxm_message_t), sizeof(ompi_mtl_mxm_message_t),
opal_cache_line_size, opal_cache_line_size,
OBJ_CLASS(ompi_mtl_mxm_message_t), OBJ_CLASS(ompi_mtl_mxm_message_t),
0, opal_cache_line_size, 0, opal_cache_line_size,
32 /* free list num */, 32 /* free list num */,
-1 /* free list max */, -1 /* free list max */,
32 /* free list inc */, 32 /* free list inc */,
NULL); NULL, 0, NULL, NULL, NULL);
if (OMPI_SUCCESS != rc) { if (OMPI_SUCCESS != rc) {
opal_show_help("help-mtl-mxm.txt", "mxm init", true, opal_show_help("help-mtl-mxm.txt", "mxm init", true,
mxm_error_string(err)); mxm_error_string(err));

Просмотреть файл

@ -1,8 +1,11 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (C) Mellanox Technologies Ltd. 2001-2011. ALL RIGHTS RESERVED. * Copyright (C) Mellanox Technologies Ltd. 2001-2011. ALL RIGHTS RESERVED.
* Copyright (c) 2013 The University of Tennessee and The University * Copyright (c) 2013 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights * of Tennessee Research Foundation. All rights
* reserved. * reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -57,10 +60,10 @@ int ompi_mtl_mxm_improbe(struct mca_mtl_base_module_t *mtl,
mxm_error_t err; mxm_error_t err;
mxm_recv_req_t req; mxm_recv_req_t req;
ompi_free_list_item_t *item; opal_free_list_item_t *item;
ompi_mtl_mxm_message_t *msgp; ompi_mtl_mxm_message_t *msgp;
OMPI_FREE_LIST_WAIT_MT(&mca_mtl_mxm_component.mxm_messages, item); item = opal_free_list_wait (&mca_mtl_mxm_component.mxm_messages);
if (OPAL_UNLIKELY(NULL == item)) { if (OPAL_UNLIKELY(NULL == item)) {
return OMPI_ERR_OUT_OF_RESOURCE; return OMPI_ERR_OUT_OF_RESOURCE;
} }

Просмотреть файл

@ -1,5 +1,8 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (C) Mellanox Technologies Ltd. 2001-2011. ALL RIGHTS RESERVED. * Copyright (C) Mellanox Technologies Ltd. 2001-2011. ALL RIGHTS RESERVED.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -185,8 +188,7 @@ int ompi_mtl_mxm_imrecv(struct mca_mtl_base_module_t* mtl,
return OMPI_ERROR; return OMPI_ERROR;
} }
OMPI_FREE_LIST_RETURN_MT(&mca_mtl_mxm_component.mxm_messages, opal_free_list_return (&mca_mtl_mxm_component.mxm_messages, (opal_free_list_item_t *) msgp);
(ompi_free_list_item_t *) msgp);
ompi_message_return(*message); ompi_message_return(*message);
(*message) = MPI_MESSAGE_NULL; (*message) = MPI_MESSAGE_NULL;

Просмотреть файл

@ -1,5 +1,8 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (C) Mellanox Technologies Ltd. 2001-2011. ALL RIGHTS RESERVED. * Copyright (C) Mellanox Technologies Ltd. 2001-2011. ALL RIGHTS RESERVED.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -54,7 +57,7 @@ extern mca_mtl_mxm_module_t ompi_mtl_mxm;
typedef struct mca_mtl_mxm_component_t { typedef struct mca_mtl_mxm_component_t {
mca_mtl_base_component_2_0_0_t super; /**< base MTL component */ mca_mtl_base_component_2_0_0_t super; /**< base MTL component */
ompi_free_list_t mxm_messages; /* will be used for MPI_Mprobe and MPI_Mrecv calls */ opal_free_list_t mxm_messages; /* will be used for MPI_Mprobe and MPI_Mrecv calls */
} mca_mtl_mxm_component_t; } mca_mtl_mxm_component_t;

7
ompi/mca/mtl/mxm/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: MELLANOX
status: active

Просмотреть файл

@ -92,9 +92,9 @@ ompi_mtl_ofi_component_open(void)
OBJ_CONSTRUCT(&ompi_mtl_ofi.free_messages, opal_free_list_t); OBJ_CONSTRUCT(&ompi_mtl_ofi.free_messages, opal_free_list_t);
opal_free_list_init(&ompi_mtl_ofi.free_messages, opal_free_list_init(&ompi_mtl_ofi.free_messages,
sizeof(ompi_mtl_ofi_message_t), sizeof(ompi_mtl_ofi_message_t), 8,
OBJ_CLASS(ompi_mtl_ofi_message_t), OBJ_CLASS(ompi_mtl_ofi_message_t), 0, 0,
1, -1, 1); 1, -1, 1, NULL, 0, NULL, NULL, NULL);
ompi_mtl_ofi.domain = NULL; ompi_mtl_ofi.domain = NULL;
ompi_mtl_ofi.av = NULL; ompi_mtl_ofi.av = NULL;

Просмотреть файл

@ -1,6 +1,9 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2013-2014 Intel, Inc. All rights reserved * Copyright (c) 2013-2014 Intel, Inc. All rights reserved
* Copyright (c) 2014 Cisco Systems, Inc. All rights reserved * Copyright (c) 2014 Cisco Systems, Inc. All rights reserved
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* *
* $COPYRIGHT$ * $COPYRIGHT$
* *
@ -26,13 +29,10 @@ OBJ_CLASS_DECLARATION(ompi_mtl_ofi_message_t);
static inline ompi_mtl_ofi_message_t* static inline ompi_mtl_ofi_message_t*
ompi_mtl_ofi_message_alloc(const struct fi_cq_tagged_entry *wc) ompi_mtl_ofi_message_alloc(const struct fi_cq_tagged_entry *wc)
{ {
int rc __opal_attribute_unused__;
opal_free_list_item_t *tmp; opal_free_list_item_t *tmp;
ompi_mtl_ofi_message_t *message; ompi_mtl_ofi_message_t *message;
OPAL_FREE_LIST_GET(&ompi_mtl_ofi.free_messages, tmp = opal_free_list_get (&ompi_mtl_ofi.free_messages);
tmp,
rc);
if (NULL == tmp) return NULL; if (NULL == tmp) return NULL;
message = (ompi_mtl_ofi_message_t*) tmp; message = (ompi_mtl_ofi_message_t*) tmp;
@ -45,8 +45,8 @@ ompi_mtl_ofi_message_alloc(const struct fi_cq_tagged_entry *wc)
static inline void static inline void
ompi_mtl_ofi_message_free(ompi_mtl_ofi_message_t *message) ompi_mtl_ofi_message_free(ompi_mtl_ofi_message_t *message)
{ {
OPAL_FREE_LIST_RETURN(&ompi_mtl_ofi.free_messages, opal_free_list_return (&ompi_mtl_ofi.free_messages,
&message->super); &message->super);
} }
#endif #endif

7
ompi/mca/mtl/ofi/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: INTEL
status: active

Просмотреть файл

@ -1,3 +1,4 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana * Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology * University Research and Technology
@ -204,7 +205,7 @@ ompi_mtl_portals4_component_open(void)
sizeof(ompi_mtl_portals4_message_t) + sizeof(ompi_mtl_portals4_message_t) +
ompi_mtl_portals4.eager_limit, ompi_mtl_portals4.eager_limit,
OBJ_CLASS(ompi_mtl_portals4_message_t), OBJ_CLASS(ompi_mtl_portals4_message_t),
1, -1, 1); 0, 0, 1, -1, 1, NULL, 0, NULL, NULL, NULL);
ompi_mtl_portals4.ni_h = PTL_INVALID_HANDLE; ompi_mtl_portals4.ni_h = PTL_INVALID_HANDLE;
ompi_mtl_portals4.send_eq_h = PTL_INVALID_HANDLE; ompi_mtl_portals4.send_eq_h = PTL_INVALID_HANDLE;

Просмотреть файл

@ -1,5 +1,8 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2012 Sandia National Laboratories. All rights reserved. * Copyright (c) 2012 Sandia National Laboratories. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -41,7 +44,7 @@ ompi_mtl_portals4_flowctl_init(void)
opal_free_list_init(&ompi_mtl_portals4.flowctl.pending_fl, opal_free_list_init(&ompi_mtl_portals4.flowctl.pending_fl,
sizeof(ompi_mtl_portals4_pending_request_t), sizeof(ompi_mtl_portals4_pending_request_t),
OBJ_CLASS(ompi_mtl_portals4_pending_request_t), OBJ_CLASS(ompi_mtl_portals4_pending_request_t),
1, -1, 1); 0, 0, 1, -1, 1, NULL, 0, NULL, NULL, NULL);
ompi_mtl_portals4.flowctl.max_send_slots = (ompi_mtl_portals4.send_queue_size - 3) / 3; ompi_mtl_portals4.flowctl.max_send_slots = (ompi_mtl_portals4.send_queue_size - 3) / 3;
ompi_mtl_portals4.flowctl.send_slots = ompi_mtl_portals4.flowctl.max_send_slots; ompi_mtl_portals4.flowctl.send_slots = ompi_mtl_portals4.flowctl.max_send_slots;

Просмотреть файл

@ -1,5 +1,8 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2012 Sandia National Laboratories. All rights reserved. * Copyright (c) 2012 Sandia National Laboratories. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -22,13 +25,10 @@ OBJ_CLASS_DECLARATION(ompi_mtl_portals4_message_t);
static inline ompi_mtl_portals4_message_t* static inline ompi_mtl_portals4_message_t*
ompi_mtl_portals4_message_alloc(const ptl_event_t *ev) ompi_mtl_portals4_message_alloc(const ptl_event_t *ev)
{ {
int rc __opal_attribute_unused__;
opal_free_list_item_t *tmp; opal_free_list_item_t *tmp;
ompi_mtl_portals4_message_t* message; ompi_mtl_portals4_message_t* message;
OPAL_FREE_LIST_GET(&ompi_mtl_portals4.fl_message, tmp = opal_free_list_get (&ompi_mtl_portals4.fl_message);
tmp,
rc);
if (NULL == tmp) return NULL; if (NULL == tmp) return NULL;
message = (ompi_mtl_portals4_message_t*) tmp; message = (ompi_mtl_portals4_message_t*) tmp;
@ -51,8 +51,8 @@ ompi_mtl_portals4_message_alloc(const ptl_event_t *ev)
static inline void static inline void
ompi_mtl_portals4_message_free(ompi_mtl_portals4_message_t *message) ompi_mtl_portals4_message_free(ompi_mtl_portals4_message_t *message)
{ {
OPAL_FREE_LIST_RETURN(&ompi_mtl_portals4.fl_message, opal_free_list_return (&ompi_mtl_portals4.fl_message,
&message->super); &message->super);
} }
#endif #endif

Просмотреть файл

@ -1,3 +1,4 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/* /*
* Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana * Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
* University Research and Technology * University Research and Technology
@ -10,6 +11,8 @@
* Copyright (c) 2004-2005 The Regents of the University of California. * Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved. * All rights reserved.
* Copyright (c) 2010 Sandia National Laboratories. All rights reserved. * Copyright (c) 2010 Sandia National Laboratories. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$ * $COPYRIGHT$
* *
* Additional copyrights may follow * Additional copyrights may follow
@ -118,8 +121,8 @@ ompi_mtl_portals4_callback(ptl_event_t *ev,
*complete = true; *complete = true;
#if OMPI_MTL_PORTALS4_FLOW_CONTROL #if OMPI_MTL_PORTALS4_FLOW_CONTROL
OPAL_THREAD_ADD32(&ompi_mtl_portals4.flowctl.send_slots, 1); OPAL_THREAD_ADD32(&ompi_mtl_portals4.flowctl.send_slots, 1);
OPAL_FREE_LIST_RETURN(&ompi_mtl_portals4.flowctl.pending_fl, opal_free_list_return (&ompi_mtl_portals4.flowctl.pending_fl,
&ptl_request->pending->super); &ptl_request->pending->super);
if (OPAL_UNLIKELY(0 != opal_list_get_size(&ompi_mtl_portals4.flowctl.pending_sends))) { if (OPAL_UNLIKELY(0 != opal_list_get_size(&ompi_mtl_portals4.flowctl.pending_sends))) {
ompi_mtl_portals4_pending_list_progress(); ompi_mtl_portals4_pending_list_progress();
@ -423,7 +426,7 @@ ompi_mtl_portals4_send_start(struct mca_mtl_base_module_t* mtl,
(int)length)); (int)length));
#if OMPI_MTL_PORTALS4_FLOW_CONTROL #if OMPI_MTL_PORTALS4_FLOW_CONTROL
OPAL_FREE_LIST_GET(&ompi_mtl_portals4.flowctl.pending_fl, item, ret); item = opal_free_list_get (&ompi_mtl_portals4.flowctl.pending_fl);
if (NULL == item) return OMPI_ERR_OUT_OF_RESOURCE; if (NULL == item) return OMPI_ERR_OUT_OF_RESOURCE;
pending = (ompi_mtl_portals4_pending_request_t*) item; pending = (ompi_mtl_portals4_pending_request_t*) item;

7
ompi/mca/mtl/portals4/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: SNL
status: active

7
ompi/mca/mtl/psm/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: INTEL
status: active

7
ompi/mca/op/base/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: project
status: active

7
ompi/mca/op/example/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: CISCO
status: maintenance

7
ompi/mca/op/x86/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: CISCO
status: maintenance

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше