2015-03-06 06:50:44 +03:00
|
|
|
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
|
2005-03-14 23:57:21 +03:00
|
|
|
/*
|
2010-03-24 00:28:02 +03:00
|
|
|
* Copyright (c) 2004-2010 The Trustees of Indiana University and Indiana
|
2005-11-05 22:57:48 +03:00
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The University of Tennessee and The University
|
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
2006-02-07 06:32:36 +03:00
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
2005-03-14 23:57:21 +03:00
|
|
|
* University of Stuttgart. All rights reserved.
|
2005-03-24 15:43:37 +03:00
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
2015-03-06 06:50:44 +03:00
|
|
|
* Copyright (c) 2011-2015 Los Alamos National Security, LLC. All rights
|
2015-04-30 02:17:55 +03:00
|
|
|
* reserved.
|
2016-03-11 00:14:17 +03:00
|
|
|
* Copyright (c) 2014-2016 Intel, Inc. All rights reserved.
|
2015-07-07 05:00:13 +03:00
|
|
|
* Copyright (c) 2015 Research Organization for Information Science
|
|
|
|
* and Technology (RIST). All rights reserved.
|
2015-04-30 02:17:55 +03:00
|
|
|
*
|
2005-03-14 23:57:21 +03:00
|
|
|
* $COPYRIGHT$
|
2006-02-07 06:32:36 +03:00
|
|
|
*
|
2005-03-14 23:57:21 +03:00
|
|
|
* Additional copyrights may follow
|
2006-02-07 06:32:36 +03:00
|
|
|
*
|
2005-03-14 23:57:21 +03:00
|
|
|
* $HEADER$
|
|
|
|
*/
|
2007-07-20 05:34:02 +04:00
|
|
|
|
2015-04-30 02:17:55 +03:00
|
|
|
/**
|
2007-07-20 05:34:02 +04:00
|
|
|
* @file
|
|
|
|
*
|
|
|
|
* Runtime Messaging Layer (RML) Communication Interface
|
2005-03-14 23:57:21 +03:00
|
|
|
*
|
2007-07-20 05:34:02 +04:00
|
|
|
* The Runtime Messaging Layer (RML) provices basic point-to-point
|
|
|
|
* communication between ORTE processes. The system is available for
|
|
|
|
* most architectures, with some exceptions (the Cray XT3/XT4, for example).
|
2005-03-14 23:57:21 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
|
2007-07-20 05:34:02 +04:00
|
|
|
#ifndef ORTE_MCA_RML_RML_H_
|
|
|
|
#define ORTE_MCA_RML_RML_H_
|
2005-12-01 21:28:20 +03:00
|
|
|
|
2008-02-28 04:57:57 +03:00
|
|
|
#include "orte_config.h"
|
|
|
|
#include "orte/types.h"
|
|
|
|
|
2005-12-01 21:28:20 +03:00
|
|
|
#ifdef HAVE_UNISTD_H
|
|
|
|
#include <unistd.h>
|
|
|
|
#endif
|
|
|
|
|
2015-03-06 06:50:44 +03:00
|
|
|
#include "orte/mca/mca.h"
|
2007-03-17 02:11:45 +03:00
|
|
|
#include "opal/mca/crs/crs.h"
|
|
|
|
#include "opal/mca/crs/base/base.h"
|
2016-10-23 21:36:05 +03:00
|
|
|
#include "orte/mca/routed/routed.h"
|
2007-03-17 02:11:45 +03:00
|
|
|
|
2009-07-19 22:07:04 +04:00
|
|
|
#include "orte/mca/rml/rml_types.h"
|
|
|
|
|
2007-07-20 05:34:02 +04:00
|
|
|
BEGIN_C_DECLS
|
2005-03-14 23:57:21 +03:00
|
|
|
|
|
|
|
|
2007-07-20 05:34:02 +04:00
|
|
|
/* ******************************************************************** */
|
2006-02-07 06:32:36 +03:00
|
|
|
|
2016-09-28 23:25:55 +03:00
|
|
|
/* forward declare */
|
2016-03-10 05:13:48 +03:00
|
|
|
struct orte_rml_base_module_t;
|
2016-09-28 23:25:55 +03:00
|
|
|
struct orte_rml_component_t;
|
|
|
|
|
As per the RFC, bring in the ORTE async progress code and the rewrite of OOB:
*** THIS RFC INCLUDES A MINOR CHANGE TO THE MPI-RTE INTERFACE ***
Note: during the course of this work, it was necessary to completely separate the MPI and RTE progress engines. There were multiple places in the MPI layer where ORTE_WAIT_FOR_COMPLETION was being used. A new OMPI_WAIT_FOR_COMPLETION macro was created (defined in ompi/mca/rte/rte.h) that simply cycles across opal_progress until the provided flag becomes false. Places where the MPI layer blocked waiting for RTE to complete an event have been modified to use this macro.
***************************************************************************************
I am reissuing this RFC because of the time that has passed since its original release. Since its initial release and review, I have debugged it further to ensure it fully supports tests like loop_spawn. It therefore seems ready for merge back to the trunk. Given its prior review, I have set the timeout for one week.
The code is in https://bitbucket.org/rhc/ompi-oob2
WHAT: Rewrite of ORTE OOB
WHY: Support asynchronous progress and a host of other features
WHEN: Wed, August 21
SYNOPSIS:
The current OOB has served us well, but a number of limitations have been identified over the years. Specifically:
* it is only progressed when called via opal_progress, which can lead to hangs or recursive calls into libevent (which is not supported by that code)
* we've had issues when multiple NICs are available as the code doesn't "shift" messages between transports - thus, all nodes had to be available via the same TCP interface.
* the OOB "unloads" incoming opal_buffer_t objects during the transmission, thus preventing use of OBJ_RETAIN in the code when repeatedly sending the same message to multiple recipients
* there is no failover mechanism across NICs - if the selected NIC (or its attached switch) fails, we are forced to abort
* only one transport (i.e., component) can be "active"
The revised OOB resolves these problems:
* async progress is used for all application processes, with the progress thread blocking in the event library
* each available TCP NIC is supported by its own TCP module. The ability to asynchronously progress each module independently is provided, but not enabled by default (a runtime MCA parameter turns it "on")
* multi-address TCP NICs (e.g., a NIC with both an IPv4 and IPv6 address, or with virtual interfaces) are supported - reachability is determined by comparing the contact info for a peer against all addresses within the range covered by the address/mask pairs for the NIC.
* a message that arrives on one TCP NIC is automatically shifted to whatever NIC that is connected to the next "hop" if that peer cannot be reached by the incoming NIC. If no TCP module will reach the peer, then the OOB attempts to send the message via all other available components - if none can reach the peer, then an "error" is reported back to the RML, which then calls the errmgr for instructions.
* opal_buffer_t now conforms to standard object rules re OBJ_RETAIN as we no longer "unload" the incoming object
* NIC failure is reported to the TCP component, which then tries to resend the message across any other available TCP NIC. If that doesn't work, then the message is given back to the OOB base to try using other components. If all that fails, then the error is reported to the RML, which reports to the errmgr for instructions
* obviously from the above, multiple OOB components (e.g., TCP and UD) can be active in parallel
* the matching code has been moved to the RML (and out of the OOB/TCP component) so it is independent of transport
* routing is done by the individual OOB modules (as opposed to the RML). Thus, both routed and non-routed transports can simultaneously be active
* all blocking send/recv APIs have been removed. Everything operates asynchronously.
KNOWN LIMITATIONS:
* although provision is made for component failover as described above, the code for doing so has not been fully implemented yet. At the moment, if all connections for a given peer fail, the errmgr is notified of a "lost connection", which by default results in termination of the job if it was a lifeline
* the IPv6 code is present and compiles, but is not complete. Since the current IPv6 support in the OOB doesn't work anyway, I don't consider this a blocker
* routing is performed at the individual module level, yet the active routed component is selected on a global basis. We probably should update that to reflect that different transports may need/choose to route in different ways
* obviously, not every error path has been tested nor necessarily covered
* determining abnormal termination is more challenging than in the old code as we now potentially have multiple ways of connecting to a process. Ideally, we would declare "connection failed" when *all* transports can no longer reach the process, but that requires some additional (possibly complex) code. For now, the code replicates the old behavior only somewhat modified - i.e., if a module sees its connection fail, it checks to see if it is a lifeline. If so, it notifies the errmgr that the lifeline is lost - otherwise, it notifies the errmgr that a non-lifeline connection was lost.
* reachability is determined solely on the basis of a shared subnet address/mask - more sophisticated algorithms (e.g., the one used in the tcp btl) are required to handle routing via gateways
* the RML needs to assign sequence numbers to each message on a per-peer basis. The receiving RML will then deliver messages in order, thus preventing out-of-order messaging in the case where messages travel across different transports or a message needs to be redirected/resent due to failure of a NIC
This commit was SVN r29058.
2013-08-22 20:37:40 +04:00
|
|
|
typedef struct {
|
|
|
|
opal_object_t super;
|
|
|
|
orte_process_name_t name;
|
|
|
|
opal_buffer_t data;
|
|
|
|
bool active;
|
|
|
|
} orte_rml_recv_cb_t;
|
|
|
|
OBJ_CLASS_DECLARATION(orte_rml_recv_cb_t);
|
2007-07-20 05:34:02 +04:00
|
|
|
|
2012-04-06 18:23:13 +04:00
|
|
|
/* Provide a generic callback function to release buffers
|
|
|
|
* following a non-blocking send as this happens all over
|
|
|
|
* the code base
|
|
|
|
*/
|
|
|
|
ORTE_DECLSPEC void orte_rml_send_callback(int status, orte_process_name_t* sender,
|
|
|
|
opal_buffer_t* buffer, orte_rml_tag_t tag,
|
|
|
|
void* cbdata);
|
|
|
|
|
As per the RFC, bring in the ORTE async progress code and the rewrite of OOB:
*** THIS RFC INCLUDES A MINOR CHANGE TO THE MPI-RTE INTERFACE ***
Note: during the course of this work, it was necessary to completely separate the MPI and RTE progress engines. There were multiple places in the MPI layer where ORTE_WAIT_FOR_COMPLETION was being used. A new OMPI_WAIT_FOR_COMPLETION macro was created (defined in ompi/mca/rte/rte.h) that simply cycles across opal_progress until the provided flag becomes false. Places where the MPI layer blocked waiting for RTE to complete an event have been modified to use this macro.
***************************************************************************************
I am reissuing this RFC because of the time that has passed since its original release. Since its initial release and review, I have debugged it further to ensure it fully supports tests like loop_spawn. It therefore seems ready for merge back to the trunk. Given its prior review, I have set the timeout for one week.
The code is in https://bitbucket.org/rhc/ompi-oob2
WHAT: Rewrite of ORTE OOB
WHY: Support asynchronous progress and a host of other features
WHEN: Wed, August 21
SYNOPSIS:
The current OOB has served us well, but a number of limitations have been identified over the years. Specifically:
* it is only progressed when called via opal_progress, which can lead to hangs or recursive calls into libevent (which is not supported by that code)
* we've had issues when multiple NICs are available as the code doesn't "shift" messages between transports - thus, all nodes had to be available via the same TCP interface.
* the OOB "unloads" incoming opal_buffer_t objects during the transmission, thus preventing use of OBJ_RETAIN in the code when repeatedly sending the same message to multiple recipients
* there is no failover mechanism across NICs - if the selected NIC (or its attached switch) fails, we are forced to abort
* only one transport (i.e., component) can be "active"
The revised OOB resolves these problems:
* async progress is used for all application processes, with the progress thread blocking in the event library
* each available TCP NIC is supported by its own TCP module. The ability to asynchronously progress each module independently is provided, but not enabled by default (a runtime MCA parameter turns it "on")
* multi-address TCP NICs (e.g., a NIC with both an IPv4 and IPv6 address, or with virtual interfaces) are supported - reachability is determined by comparing the contact info for a peer against all addresses within the range covered by the address/mask pairs for the NIC.
* a message that arrives on one TCP NIC is automatically shifted to whatever NIC that is connected to the next "hop" if that peer cannot be reached by the incoming NIC. If no TCP module will reach the peer, then the OOB attempts to send the message via all other available components - if none can reach the peer, then an "error" is reported back to the RML, which then calls the errmgr for instructions.
* opal_buffer_t now conforms to standard object rules re OBJ_RETAIN as we no longer "unload" the incoming object
* NIC failure is reported to the TCP component, which then tries to resend the message across any other available TCP NIC. If that doesn't work, then the message is given back to the OOB base to try using other components. If all that fails, then the error is reported to the RML, which reports to the errmgr for instructions
* obviously from the above, multiple OOB components (e.g., TCP and UD) can be active in parallel
* the matching code has been moved to the RML (and out of the OOB/TCP component) so it is independent of transport
* routing is done by the individual OOB modules (as opposed to the RML). Thus, both routed and non-routed transports can simultaneously be active
* all blocking send/recv APIs have been removed. Everything operates asynchronously.
KNOWN LIMITATIONS:
* although provision is made for component failover as described above, the code for doing so has not been fully implemented yet. At the moment, if all connections for a given peer fail, the errmgr is notified of a "lost connection", which by default results in termination of the job if it was a lifeline
* the IPv6 code is present and compiles, but is not complete. Since the current IPv6 support in the OOB doesn't work anyway, I don't consider this a blocker
* routing is performed at the individual module level, yet the active routed component is selected on a global basis. We probably should update that to reflect that different transports may need/choose to route in different ways
* obviously, not every error path has been tested nor necessarily covered
* determining abnormal termination is more challenging than in the old code as we now potentially have multiple ways of connecting to a process. Ideally, we would declare "connection failed" when *all* transports can no longer reach the process, but that requires some additional (possibly complex) code. For now, the code replicates the old behavior only somewhat modified - i.e., if a module sees its connection fail, it checks to see if it is a lifeline. If so, it notifies the errmgr that the lifeline is lost - otherwise, it notifies the errmgr that a non-lifeline connection was lost.
* reachability is determined solely on the basis of a shared subnet address/mask - more sophisticated algorithms (e.g., the one used in the tcp btl) are required to handle routing via gateways
* the RML needs to assign sequence numbers to each message on a per-peer basis. The receiving RML will then deliver messages in order, thus preventing out-of-order messaging in the case where messages travel across different transports or a message needs to be redirected/resent due to failure of a NIC
This commit was SVN r29058.
2013-08-22 20:37:40 +04:00
|
|
|
ORTE_DECLSPEC void orte_rml_recv_callback(int status, orte_process_name_t* sender,
|
|
|
|
opal_buffer_t *buffer,
|
|
|
|
orte_rml_tag_t tag, void *cbdata);
|
|
|
|
|
2007-07-20 05:34:02 +04:00
|
|
|
/* ******************************************************************** */
|
2016-09-28 23:25:55 +03:00
|
|
|
/* RML CALLBACK FUNCTION DEFINITIONS */
|
2005-03-14 23:57:21 +03:00
|
|
|
|
|
|
|
/**
|
As per the RFC, bring in the ORTE async progress code and the rewrite of OOB:
*** THIS RFC INCLUDES A MINOR CHANGE TO THE MPI-RTE INTERFACE ***
Note: during the course of this work, it was necessary to completely separate the MPI and RTE progress engines. There were multiple places in the MPI layer where ORTE_WAIT_FOR_COMPLETION was being used. A new OMPI_WAIT_FOR_COMPLETION macro was created (defined in ompi/mca/rte/rte.h) that simply cycles across opal_progress until the provided flag becomes false. Places where the MPI layer blocked waiting for RTE to complete an event have been modified to use this macro.
***************************************************************************************
I am reissuing this RFC because of the time that has passed since its original release. Since its initial release and review, I have debugged it further to ensure it fully supports tests like loop_spawn. It therefore seems ready for merge back to the trunk. Given its prior review, I have set the timeout for one week.
The code is in https://bitbucket.org/rhc/ompi-oob2
WHAT: Rewrite of ORTE OOB
WHY: Support asynchronous progress and a host of other features
WHEN: Wed, August 21
SYNOPSIS:
The current OOB has served us well, but a number of limitations have been identified over the years. Specifically:
* it is only progressed when called via opal_progress, which can lead to hangs or recursive calls into libevent (which is not supported by that code)
* we've had issues when multiple NICs are available as the code doesn't "shift" messages between transports - thus, all nodes had to be available via the same TCP interface.
* the OOB "unloads" incoming opal_buffer_t objects during the transmission, thus preventing use of OBJ_RETAIN in the code when repeatedly sending the same message to multiple recipients
* there is no failover mechanism across NICs - if the selected NIC (or its attached switch) fails, we are forced to abort
* only one transport (i.e., component) can be "active"
The revised OOB resolves these problems:
* async progress is used for all application processes, with the progress thread blocking in the event library
* each available TCP NIC is supported by its own TCP module. The ability to asynchronously progress each module independently is provided, but not enabled by default (a runtime MCA parameter turns it "on")
* multi-address TCP NICs (e.g., a NIC with both an IPv4 and IPv6 address, or with virtual interfaces) are supported - reachability is determined by comparing the contact info for a peer against all addresses within the range covered by the address/mask pairs for the NIC.
* a message that arrives on one TCP NIC is automatically shifted to whatever NIC that is connected to the next "hop" if that peer cannot be reached by the incoming NIC. If no TCP module will reach the peer, then the OOB attempts to send the message via all other available components - if none can reach the peer, then an "error" is reported back to the RML, which then calls the errmgr for instructions.
* opal_buffer_t now conforms to standard object rules re OBJ_RETAIN as we no longer "unload" the incoming object
* NIC failure is reported to the TCP component, which then tries to resend the message across any other available TCP NIC. If that doesn't work, then the message is given back to the OOB base to try using other components. If all that fails, then the error is reported to the RML, which reports to the errmgr for instructions
* obviously from the above, multiple OOB components (e.g., TCP and UD) can be active in parallel
* the matching code has been moved to the RML (and out of the OOB/TCP component) so it is independent of transport
* routing is done by the individual OOB modules (as opposed to the RML). Thus, both routed and non-routed transports can simultaneously be active
* all blocking send/recv APIs have been removed. Everything operates asynchronously.
KNOWN LIMITATIONS:
* although provision is made for component failover as described above, the code for doing so has not been fully implemented yet. At the moment, if all connections for a given peer fail, the errmgr is notified of a "lost connection", which by default results in termination of the job if it was a lifeline
* the IPv6 code is present and compiles, but is not complete. Since the current IPv6 support in the OOB doesn't work anyway, I don't consider this a blocker
* routing is performed at the individual module level, yet the active routed component is selected on a global basis. We probably should update that to reflect that different transports may need/choose to route in different ways
* obviously, not every error path has been tested nor necessarily covered
* determining abnormal termination is more challenging than in the old code as we now potentially have multiple ways of connecting to a process. Ideally, we would declare "connection failed" when *all* transports can no longer reach the process, but that requires some additional (possibly complex) code. For now, the code replicates the old behavior only somewhat modified - i.e., if a module sees its connection fail, it checks to see if it is a lifeline. If so, it notifies the errmgr that the lifeline is lost - otherwise, it notifies the errmgr that a non-lifeline connection was lost.
* reachability is determined solely on the basis of a shared subnet address/mask - more sophisticated algorithms (e.g., the one used in the tcp btl) are required to handle routing via gateways
* the RML needs to assign sequence numbers to each message on a per-peer basis. The receiving RML will then deliver messages in order, thus preventing out-of-order messaging in the case where messages travel across different transports or a message needs to be redirected/resent due to failure of a NIC
This commit was SVN r29058.
2013-08-22 20:37:40 +04:00
|
|
|
* Funtion prototype for callback from non-blocking iovec send and recv
|
|
|
|
*
|
|
|
|
* Funtion prototype for callback from non-blocking iovec send and recv.
|
|
|
|
* On send, the iovec pointer will be the same pointer passed to
|
|
|
|
* send_nb and count will equal the count given to send.
|
2007-07-20 05:34:02 +04:00
|
|
|
*
|
As per the RFC, bring in the ORTE async progress code and the rewrite of OOB:
*** THIS RFC INCLUDES A MINOR CHANGE TO THE MPI-RTE INTERFACE ***
Note: during the course of this work, it was necessary to completely separate the MPI and RTE progress engines. There were multiple places in the MPI layer where ORTE_WAIT_FOR_COMPLETION was being used. A new OMPI_WAIT_FOR_COMPLETION macro was created (defined in ompi/mca/rte/rte.h) that simply cycles across opal_progress until the provided flag becomes false. Places where the MPI layer blocked waiting for RTE to complete an event have been modified to use this macro.
***************************************************************************************
I am reissuing this RFC because of the time that has passed since its original release. Since its initial release and review, I have debugged it further to ensure it fully supports tests like loop_spawn. It therefore seems ready for merge back to the trunk. Given its prior review, I have set the timeout for one week.
The code is in https://bitbucket.org/rhc/ompi-oob2
WHAT: Rewrite of ORTE OOB
WHY: Support asynchronous progress and a host of other features
WHEN: Wed, August 21
SYNOPSIS:
The current OOB has served us well, but a number of limitations have been identified over the years. Specifically:
* it is only progressed when called via opal_progress, which can lead to hangs or recursive calls into libevent (which is not supported by that code)
* we've had issues when multiple NICs are available as the code doesn't "shift" messages between transports - thus, all nodes had to be available via the same TCP interface.
* the OOB "unloads" incoming opal_buffer_t objects during the transmission, thus preventing use of OBJ_RETAIN in the code when repeatedly sending the same message to multiple recipients
* there is no failover mechanism across NICs - if the selected NIC (or its attached switch) fails, we are forced to abort
* only one transport (i.e., component) can be "active"
The revised OOB resolves these problems:
* async progress is used for all application processes, with the progress thread blocking in the event library
* each available TCP NIC is supported by its own TCP module. The ability to asynchronously progress each module independently is provided, but not enabled by default (a runtime MCA parameter turns it "on")
* multi-address TCP NICs (e.g., a NIC with both an IPv4 and IPv6 address, or with virtual interfaces) are supported - reachability is determined by comparing the contact info for a peer against all addresses within the range covered by the address/mask pairs for the NIC.
* a message that arrives on one TCP NIC is automatically shifted to whatever NIC that is connected to the next "hop" if that peer cannot be reached by the incoming NIC. If no TCP module will reach the peer, then the OOB attempts to send the message via all other available components - if none can reach the peer, then an "error" is reported back to the RML, which then calls the errmgr for instructions.
* opal_buffer_t now conforms to standard object rules re OBJ_RETAIN as we no longer "unload" the incoming object
* NIC failure is reported to the TCP component, which then tries to resend the message across any other available TCP NIC. If that doesn't work, then the message is given back to the OOB base to try using other components. If all that fails, then the error is reported to the RML, which reports to the errmgr for instructions
* obviously from the above, multiple OOB components (e.g., TCP and UD) can be active in parallel
* the matching code has been moved to the RML (and out of the OOB/TCP component) so it is independent of transport
* routing is done by the individual OOB modules (as opposed to the RML). Thus, both routed and non-routed transports can simultaneously be active
* all blocking send/recv APIs have been removed. Everything operates asynchronously.
KNOWN LIMITATIONS:
* although provision is made for component failover as described above, the code for doing so has not been fully implemented yet. At the moment, if all connections for a given peer fail, the errmgr is notified of a "lost connection", which by default results in termination of the job if it was a lifeline
* the IPv6 code is present and compiles, but is not complete. Since the current IPv6 support in the OOB doesn't work anyway, I don't consider this a blocker
* routing is performed at the individual module level, yet the active routed component is selected on a global basis. We probably should update that to reflect that different transports may need/choose to route in different ways
* obviously, not every error path has been tested nor necessarily covered
* determining abnormal termination is more challenging than in the old code as we now potentially have multiple ways of connecting to a process. Ideally, we would declare "connection failed" when *all* transports can no longer reach the process, but that requires some additional (possibly complex) code. For now, the code replicates the old behavior only somewhat modified - i.e., if a module sees its connection fail, it checks to see if it is a lifeline. If so, it notifies the errmgr that the lifeline is lost - otherwise, it notifies the errmgr that a non-lifeline connection was lost.
* reachability is determined solely on the basis of a shared subnet address/mask - more sophisticated algorithms (e.g., the one used in the tcp btl) are required to handle routing via gateways
* the RML needs to assign sequence numbers to each message on a per-peer basis. The receiving RML will then deliver messages in order, thus preventing out-of-order messaging in the case where messages travel across different transports or a message needs to be redirected/resent due to failure of a NIC
This commit was SVN r29058.
2013-08-22 20:37:40 +04:00
|
|
|
* On recv, the iovec pointer will be the address of a single iovec
|
|
|
|
* allocated and owned by the RML, not the process receiving the
|
|
|
|
* callback. Ownership of the data block can be transferred by setting
|
|
|
|
* a user variable to point to the data block, and setting the
|
|
|
|
* iovec->iov_base pointer to NULL.
|
2007-07-20 05:34:02 +04:00
|
|
|
*
|
|
|
|
* @note The parameter in/out parameters are relative to the user's callback
|
|
|
|
* function.
|
|
|
|
*
|
As per the RFC, bring in the ORTE async progress code and the rewrite of OOB:
*** THIS RFC INCLUDES A MINOR CHANGE TO THE MPI-RTE INTERFACE ***
Note: during the course of this work, it was necessary to completely separate the MPI and RTE progress engines. There were multiple places in the MPI layer where ORTE_WAIT_FOR_COMPLETION was being used. A new OMPI_WAIT_FOR_COMPLETION macro was created (defined in ompi/mca/rte/rte.h) that simply cycles across opal_progress until the provided flag becomes false. Places where the MPI layer blocked waiting for RTE to complete an event have been modified to use this macro.
***************************************************************************************
I am reissuing this RFC because of the time that has passed since its original release. Since its initial release and review, I have debugged it further to ensure it fully supports tests like loop_spawn. It therefore seems ready for merge back to the trunk. Given its prior review, I have set the timeout for one week.
The code is in https://bitbucket.org/rhc/ompi-oob2
WHAT: Rewrite of ORTE OOB
WHY: Support asynchronous progress and a host of other features
WHEN: Wed, August 21
SYNOPSIS:
The current OOB has served us well, but a number of limitations have been identified over the years. Specifically:
* it is only progressed when called via opal_progress, which can lead to hangs or recursive calls into libevent (which is not supported by that code)
* we've had issues when multiple NICs are available as the code doesn't "shift" messages between transports - thus, all nodes had to be available via the same TCP interface.
* the OOB "unloads" incoming opal_buffer_t objects during the transmission, thus preventing use of OBJ_RETAIN in the code when repeatedly sending the same message to multiple recipients
* there is no failover mechanism across NICs - if the selected NIC (or its attached switch) fails, we are forced to abort
* only one transport (i.e., component) can be "active"
The revised OOB resolves these problems:
* async progress is used for all application processes, with the progress thread blocking in the event library
* each available TCP NIC is supported by its own TCP module. The ability to asynchronously progress each module independently is provided, but not enabled by default (a runtime MCA parameter turns it "on")
* multi-address TCP NICs (e.g., a NIC with both an IPv4 and IPv6 address, or with virtual interfaces) are supported - reachability is determined by comparing the contact info for a peer against all addresses within the range covered by the address/mask pairs for the NIC.
* a message that arrives on one TCP NIC is automatically shifted to whatever NIC that is connected to the next "hop" if that peer cannot be reached by the incoming NIC. If no TCP module will reach the peer, then the OOB attempts to send the message via all other available components - if none can reach the peer, then an "error" is reported back to the RML, which then calls the errmgr for instructions.
* opal_buffer_t now conforms to standard object rules re OBJ_RETAIN as we no longer "unload" the incoming object
* NIC failure is reported to the TCP component, which then tries to resend the message across any other available TCP NIC. If that doesn't work, then the message is given back to the OOB base to try using other components. If all that fails, then the error is reported to the RML, which reports to the errmgr for instructions
* obviously from the above, multiple OOB components (e.g., TCP and UD) can be active in parallel
* the matching code has been moved to the RML (and out of the OOB/TCP component) so it is independent of transport
* routing is done by the individual OOB modules (as opposed to the RML). Thus, both routed and non-routed transports can simultaneously be active
* all blocking send/recv APIs have been removed. Everything operates asynchronously.
KNOWN LIMITATIONS:
* although provision is made for component failover as described above, the code for doing so has not been fully implemented yet. At the moment, if all connections for a given peer fail, the errmgr is notified of a "lost connection", which by default results in termination of the job if it was a lifeline
* the IPv6 code is present and compiles, but is not complete. Since the current IPv6 support in the OOB doesn't work anyway, I don't consider this a blocker
* routing is performed at the individual module level, yet the active routed component is selected on a global basis. We probably should update that to reflect that different transports may need/choose to route in different ways
* obviously, not every error path has been tested nor necessarily covered
* determining abnormal termination is more challenging than in the old code as we now potentially have multiple ways of connecting to a process. Ideally, we would declare "connection failed" when *all* transports can no longer reach the process, but that requires some additional (possibly complex) code. For now, the code replicates the old behavior only somewhat modified - i.e., if a module sees its connection fail, it checks to see if it is a lifeline. If so, it notifies the errmgr that the lifeline is lost - otherwise, it notifies the errmgr that a non-lifeline connection was lost.
* reachability is determined solely on the basis of a shared subnet address/mask - more sophisticated algorithms (e.g., the one used in the tcp btl) are required to handle routing via gateways
* the RML needs to assign sequence numbers to each message on a per-peer basis. The receiving RML will then deliver messages in order, thus preventing out-of-order messaging in the case where messages travel across different transports or a message needs to be redirected/resent due to failure of a NIC
This commit was SVN r29058.
2013-08-22 20:37:40 +04:00
|
|
|
* @param[in] status Completion status
|
2007-07-20 05:34:02 +04:00
|
|
|
* @param[in] peer Opaque name of peer process
|
As per the RFC, bring in the ORTE async progress code and the rewrite of OOB:
*** THIS RFC INCLUDES A MINOR CHANGE TO THE MPI-RTE INTERFACE ***
Note: during the course of this work, it was necessary to completely separate the MPI and RTE progress engines. There were multiple places in the MPI layer where ORTE_WAIT_FOR_COMPLETION was being used. A new OMPI_WAIT_FOR_COMPLETION macro was created (defined in ompi/mca/rte/rte.h) that simply cycles across opal_progress until the provided flag becomes false. Places where the MPI layer blocked waiting for RTE to complete an event have been modified to use this macro.
***************************************************************************************
I am reissuing this RFC because of the time that has passed since its original release. Since its initial release and review, I have debugged it further to ensure it fully supports tests like loop_spawn. It therefore seems ready for merge back to the trunk. Given its prior review, I have set the timeout for one week.
The code is in https://bitbucket.org/rhc/ompi-oob2
WHAT: Rewrite of ORTE OOB
WHY: Support asynchronous progress and a host of other features
WHEN: Wed, August 21
SYNOPSIS:
The current OOB has served us well, but a number of limitations have been identified over the years. Specifically:
* it is only progressed when called via opal_progress, which can lead to hangs or recursive calls into libevent (which is not supported by that code)
* we've had issues when multiple NICs are available as the code doesn't "shift" messages between transports - thus, all nodes had to be available via the same TCP interface.
* the OOB "unloads" incoming opal_buffer_t objects during the transmission, thus preventing use of OBJ_RETAIN in the code when repeatedly sending the same message to multiple recipients
* there is no failover mechanism across NICs - if the selected NIC (or its attached switch) fails, we are forced to abort
* only one transport (i.e., component) can be "active"
The revised OOB resolves these problems:
* async progress is used for all application processes, with the progress thread blocking in the event library
* each available TCP NIC is supported by its own TCP module. The ability to asynchronously progress each module independently is provided, but not enabled by default (a runtime MCA parameter turns it "on")
* multi-address TCP NICs (e.g., a NIC with both an IPv4 and IPv6 address, or with virtual interfaces) are supported - reachability is determined by comparing the contact info for a peer against all addresses within the range covered by the address/mask pairs for the NIC.
* a message that arrives on one TCP NIC is automatically shifted to whatever NIC that is connected to the next "hop" if that peer cannot be reached by the incoming NIC. If no TCP module will reach the peer, then the OOB attempts to send the message via all other available components - if none can reach the peer, then an "error" is reported back to the RML, which then calls the errmgr for instructions.
* opal_buffer_t now conforms to standard object rules re OBJ_RETAIN as we no longer "unload" the incoming object
* NIC failure is reported to the TCP component, which then tries to resend the message across any other available TCP NIC. If that doesn't work, then the message is given back to the OOB base to try using other components. If all that fails, then the error is reported to the RML, which reports to the errmgr for instructions
* obviously from the above, multiple OOB components (e.g., TCP and UD) can be active in parallel
* the matching code has been moved to the RML (and out of the OOB/TCP component) so it is independent of transport
* routing is done by the individual OOB modules (as opposed to the RML). Thus, both routed and non-routed transports can simultaneously be active
* all blocking send/recv APIs have been removed. Everything operates asynchronously.
KNOWN LIMITATIONS:
* although provision is made for component failover as described above, the code for doing so has not been fully implemented yet. At the moment, if all connections for a given peer fail, the errmgr is notified of a "lost connection", which by default results in termination of the job if it was a lifeline
* the IPv6 code is present and compiles, but is not complete. Since the current IPv6 support in the OOB doesn't work anyway, I don't consider this a blocker
* routing is performed at the individual module level, yet the active routed component is selected on a global basis. We probably should update that to reflect that different transports may need/choose to route in different ways
* obviously, not every error path has been tested nor necessarily covered
* determining abnormal termination is more challenging than in the old code as we now potentially have multiple ways of connecting to a process. Ideally, we would declare "connection failed" when *all* transports can no longer reach the process, but that requires some additional (possibly complex) code. For now, the code replicates the old behavior only somewhat modified - i.e., if a module sees its connection fail, it checks to see if it is a lifeline. If so, it notifies the errmgr that the lifeline is lost - otherwise, it notifies the errmgr that a non-lifeline connection was lost.
* reachability is determined solely on the basis of a shared subnet address/mask - more sophisticated algorithms (e.g., the one used in the tcp btl) are required to handle routing via gateways
* the RML needs to assign sequence numbers to each message on a per-peer basis. The receiving RML will then deliver messages in order, thus preventing out-of-order messaging in the case where messages travel across different transports or a message needs to be redirected/resent due to failure of a NIC
This commit was SVN r29058.
2013-08-22 20:37:40 +04:00
|
|
|
* @param[in] msg Pointer to the array of iovec that was sent
|
|
|
|
* or to a single iovec that has been recvd
|
|
|
|
* @param[in] count Number of iovecs in the array
|
2007-07-20 05:34:02 +04:00
|
|
|
* @param[in] tag User defined tag for matching send/recv
|
As per the RFC, bring in the ORTE async progress code and the rewrite of OOB:
*** THIS RFC INCLUDES A MINOR CHANGE TO THE MPI-RTE INTERFACE ***
Note: during the course of this work, it was necessary to completely separate the MPI and RTE progress engines. There were multiple places in the MPI layer where ORTE_WAIT_FOR_COMPLETION was being used. A new OMPI_WAIT_FOR_COMPLETION macro was created (defined in ompi/mca/rte/rte.h) that simply cycles across opal_progress until the provided flag becomes false. Places where the MPI layer blocked waiting for RTE to complete an event have been modified to use this macro.
***************************************************************************************
I am reissuing this RFC because of the time that has passed since its original release. Since its initial release and review, I have debugged it further to ensure it fully supports tests like loop_spawn. It therefore seems ready for merge back to the trunk. Given its prior review, I have set the timeout for one week.
The code is in https://bitbucket.org/rhc/ompi-oob2
WHAT: Rewrite of ORTE OOB
WHY: Support asynchronous progress and a host of other features
WHEN: Wed, August 21
SYNOPSIS:
The current OOB has served us well, but a number of limitations have been identified over the years. Specifically:
* it is only progressed when called via opal_progress, which can lead to hangs or recursive calls into libevent (which is not supported by that code)
* we've had issues when multiple NICs are available as the code doesn't "shift" messages between transports - thus, all nodes had to be available via the same TCP interface.
* the OOB "unloads" incoming opal_buffer_t objects during the transmission, thus preventing use of OBJ_RETAIN in the code when repeatedly sending the same message to multiple recipients
* there is no failover mechanism across NICs - if the selected NIC (or its attached switch) fails, we are forced to abort
* only one transport (i.e., component) can be "active"
The revised OOB resolves these problems:
* async progress is used for all application processes, with the progress thread blocking in the event library
* each available TCP NIC is supported by its own TCP module. The ability to asynchronously progress each module independently is provided, but not enabled by default (a runtime MCA parameter turns it "on")
* multi-address TCP NICs (e.g., a NIC with both an IPv4 and IPv6 address, or with virtual interfaces) are supported - reachability is determined by comparing the contact info for a peer against all addresses within the range covered by the address/mask pairs for the NIC.
* a message that arrives on one TCP NIC is automatically shifted to whatever NIC that is connected to the next "hop" if that peer cannot be reached by the incoming NIC. If no TCP module will reach the peer, then the OOB attempts to send the message via all other available components - if none can reach the peer, then an "error" is reported back to the RML, which then calls the errmgr for instructions.
* opal_buffer_t now conforms to standard object rules re OBJ_RETAIN as we no longer "unload" the incoming object
* NIC failure is reported to the TCP component, which then tries to resend the message across any other available TCP NIC. If that doesn't work, then the message is given back to the OOB base to try using other components. If all that fails, then the error is reported to the RML, which reports to the errmgr for instructions
* obviously from the above, multiple OOB components (e.g., TCP and UD) can be active in parallel
* the matching code has been moved to the RML (and out of the OOB/TCP component) so it is independent of transport
* routing is done by the individual OOB modules (as opposed to the RML). Thus, both routed and non-routed transports can simultaneously be active
* all blocking send/recv APIs have been removed. Everything operates asynchronously.
KNOWN LIMITATIONS:
* although provision is made for component failover as described above, the code for doing so has not been fully implemented yet. At the moment, if all connections for a given peer fail, the errmgr is notified of a "lost connection", which by default results in termination of the job if it was a lifeline
* the IPv6 code is present and compiles, but is not complete. Since the current IPv6 support in the OOB doesn't work anyway, I don't consider this a blocker
* routing is performed at the individual module level, yet the active routed component is selected on a global basis. We probably should update that to reflect that different transports may need/choose to route in different ways
* obviously, not every error path has been tested nor necessarily covered
* determining abnormal termination is more challenging than in the old code as we now potentially have multiple ways of connecting to a process. Ideally, we would declare "connection failed" when *all* transports can no longer reach the process, but that requires some additional (possibly complex) code. For now, the code replicates the old behavior only somewhat modified - i.e., if a module sees its connection fail, it checks to see if it is a lifeline. If so, it notifies the errmgr that the lifeline is lost - otherwise, it notifies the errmgr that a non-lifeline connection was lost.
* reachability is determined solely on the basis of a shared subnet address/mask - more sophisticated algorithms (e.g., the one used in the tcp btl) are required to handle routing via gateways
* the RML needs to assign sequence numbers to each message on a per-peer basis. The receiving RML will then deliver messages in order, thus preventing out-of-order messaging in the case where messages travel across different transports or a message needs to be redirected/resent due to failure of a NIC
This commit was SVN r29058.
2013-08-22 20:37:40 +04:00
|
|
|
* @param[in] cbdata User data passed to send_nb()
|
2007-07-20 05:34:02 +04:00
|
|
|
*/
|
|
|
|
typedef void (*orte_rml_callback_fn_t)(int status,
|
2014-11-12 04:00:42 +03:00
|
|
|
orte_process_name_t* peer,
|
2007-07-20 05:34:02 +04:00
|
|
|
struct iovec* msg,
|
|
|
|
int count,
|
|
|
|
orte_rml_tag_t tag,
|
|
|
|
void* cbdata);
|
2005-03-14 23:57:21 +03:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
2007-07-20 05:34:02 +04:00
|
|
|
* Funtion prototype for callback from non-blocking buffer send and receive
|
|
|
|
*
|
As per the RFC, bring in the ORTE async progress code and the rewrite of OOB:
*** THIS RFC INCLUDES A MINOR CHANGE TO THE MPI-RTE INTERFACE ***
Note: during the course of this work, it was necessary to completely separate the MPI and RTE progress engines. There were multiple places in the MPI layer where ORTE_WAIT_FOR_COMPLETION was being used. A new OMPI_WAIT_FOR_COMPLETION macro was created (defined in ompi/mca/rte/rte.h) that simply cycles across opal_progress until the provided flag becomes false. Places where the MPI layer blocked waiting for RTE to complete an event have been modified to use this macro.
***************************************************************************************
I am reissuing this RFC because of the time that has passed since its original release. Since its initial release and review, I have debugged it further to ensure it fully supports tests like loop_spawn. It therefore seems ready for merge back to the trunk. Given its prior review, I have set the timeout for one week.
The code is in https://bitbucket.org/rhc/ompi-oob2
WHAT: Rewrite of ORTE OOB
WHY: Support asynchronous progress and a host of other features
WHEN: Wed, August 21
SYNOPSIS:
The current OOB has served us well, but a number of limitations have been identified over the years. Specifically:
* it is only progressed when called via opal_progress, which can lead to hangs or recursive calls into libevent (which is not supported by that code)
* we've had issues when multiple NICs are available as the code doesn't "shift" messages between transports - thus, all nodes had to be available via the same TCP interface.
* the OOB "unloads" incoming opal_buffer_t objects during the transmission, thus preventing use of OBJ_RETAIN in the code when repeatedly sending the same message to multiple recipients
* there is no failover mechanism across NICs - if the selected NIC (or its attached switch) fails, we are forced to abort
* only one transport (i.e., component) can be "active"
The revised OOB resolves these problems:
* async progress is used for all application processes, with the progress thread blocking in the event library
* each available TCP NIC is supported by its own TCP module. The ability to asynchronously progress each module independently is provided, but not enabled by default (a runtime MCA parameter turns it "on")
* multi-address TCP NICs (e.g., a NIC with both an IPv4 and IPv6 address, or with virtual interfaces) are supported - reachability is determined by comparing the contact info for a peer against all addresses within the range covered by the address/mask pairs for the NIC.
* a message that arrives on one TCP NIC is automatically shifted to whatever NIC that is connected to the next "hop" if that peer cannot be reached by the incoming NIC. If no TCP module will reach the peer, then the OOB attempts to send the message via all other available components - if none can reach the peer, then an "error" is reported back to the RML, which then calls the errmgr for instructions.
* opal_buffer_t now conforms to standard object rules re OBJ_RETAIN as we no longer "unload" the incoming object
* NIC failure is reported to the TCP component, which then tries to resend the message across any other available TCP NIC. If that doesn't work, then the message is given back to the OOB base to try using other components. If all that fails, then the error is reported to the RML, which reports to the errmgr for instructions
* obviously from the above, multiple OOB components (e.g., TCP and UD) can be active in parallel
* the matching code has been moved to the RML (and out of the OOB/TCP component) so it is independent of transport
* routing is done by the individual OOB modules (as opposed to the RML). Thus, both routed and non-routed transports can simultaneously be active
* all blocking send/recv APIs have been removed. Everything operates asynchronously.
KNOWN LIMITATIONS:
* although provision is made for component failover as described above, the code for doing so has not been fully implemented yet. At the moment, if all connections for a given peer fail, the errmgr is notified of a "lost connection", which by default results in termination of the job if it was a lifeline
* the IPv6 code is present and compiles, but is not complete. Since the current IPv6 support in the OOB doesn't work anyway, I don't consider this a blocker
* routing is performed at the individual module level, yet the active routed component is selected on a global basis. We probably should update that to reflect that different transports may need/choose to route in different ways
* obviously, not every error path has been tested nor necessarily covered
* determining abnormal termination is more challenging than in the old code as we now potentially have multiple ways of connecting to a process. Ideally, we would declare "connection failed" when *all* transports can no longer reach the process, but that requires some additional (possibly complex) code. For now, the code replicates the old behavior only somewhat modified - i.e., if a module sees its connection fail, it checks to see if it is a lifeline. If so, it notifies the errmgr that the lifeline is lost - otherwise, it notifies the errmgr that a non-lifeline connection was lost.
* reachability is determined solely on the basis of a shared subnet address/mask - more sophisticated algorithms (e.g., the one used in the tcp btl) are required to handle routing via gateways
* the RML needs to assign sequence numbers to each message on a per-peer basis. The receiving RML will then deliver messages in order, thus preventing out-of-order messaging in the case where messages travel across different transports or a message needs to be redirected/resent due to failure of a NIC
This commit was SVN r29058.
2013-08-22 20:37:40 +04:00
|
|
|
* Function prototype for callback from non-blocking buffer send and
|
|
|
|
* receive. On send, the buffer will be the same pointer passed to
|
|
|
|
* send_buffer_nb. On receive, the buffer will be allocated and owned
|
|
|
|
* by the RML, not the process receiving the callback.
|
2007-07-20 05:34:02 +04:00
|
|
|
*
|
|
|
|
* @note The parameter in/out parameters are relative to the user's callback
|
|
|
|
* function.
|
|
|
|
*
|
As per the RFC, bring in the ORTE async progress code and the rewrite of OOB:
*** THIS RFC INCLUDES A MINOR CHANGE TO THE MPI-RTE INTERFACE ***
Note: during the course of this work, it was necessary to completely separate the MPI and RTE progress engines. There were multiple places in the MPI layer where ORTE_WAIT_FOR_COMPLETION was being used. A new OMPI_WAIT_FOR_COMPLETION macro was created (defined in ompi/mca/rte/rte.h) that simply cycles across opal_progress until the provided flag becomes false. Places where the MPI layer blocked waiting for RTE to complete an event have been modified to use this macro.
***************************************************************************************
I am reissuing this RFC because of the time that has passed since its original release. Since its initial release and review, I have debugged it further to ensure it fully supports tests like loop_spawn. It therefore seems ready for merge back to the trunk. Given its prior review, I have set the timeout for one week.
The code is in https://bitbucket.org/rhc/ompi-oob2
WHAT: Rewrite of ORTE OOB
WHY: Support asynchronous progress and a host of other features
WHEN: Wed, August 21
SYNOPSIS:
The current OOB has served us well, but a number of limitations have been identified over the years. Specifically:
* it is only progressed when called via opal_progress, which can lead to hangs or recursive calls into libevent (which is not supported by that code)
* we've had issues when multiple NICs are available as the code doesn't "shift" messages between transports - thus, all nodes had to be available via the same TCP interface.
* the OOB "unloads" incoming opal_buffer_t objects during the transmission, thus preventing use of OBJ_RETAIN in the code when repeatedly sending the same message to multiple recipients
* there is no failover mechanism across NICs - if the selected NIC (or its attached switch) fails, we are forced to abort
* only one transport (i.e., component) can be "active"
The revised OOB resolves these problems:
* async progress is used for all application processes, with the progress thread blocking in the event library
* each available TCP NIC is supported by its own TCP module. The ability to asynchronously progress each module independently is provided, but not enabled by default (a runtime MCA parameter turns it "on")
* multi-address TCP NICs (e.g., a NIC with both an IPv4 and IPv6 address, or with virtual interfaces) are supported - reachability is determined by comparing the contact info for a peer against all addresses within the range covered by the address/mask pairs for the NIC.
* a message that arrives on one TCP NIC is automatically shifted to whatever NIC that is connected to the next "hop" if that peer cannot be reached by the incoming NIC. If no TCP module will reach the peer, then the OOB attempts to send the message via all other available components - if none can reach the peer, then an "error" is reported back to the RML, which then calls the errmgr for instructions.
* opal_buffer_t now conforms to standard object rules re OBJ_RETAIN as we no longer "unload" the incoming object
* NIC failure is reported to the TCP component, which then tries to resend the message across any other available TCP NIC. If that doesn't work, then the message is given back to the OOB base to try using other components. If all that fails, then the error is reported to the RML, which reports to the errmgr for instructions
* obviously from the above, multiple OOB components (e.g., TCP and UD) can be active in parallel
* the matching code has been moved to the RML (and out of the OOB/TCP component) so it is independent of transport
* routing is done by the individual OOB modules (as opposed to the RML). Thus, both routed and non-routed transports can simultaneously be active
* all blocking send/recv APIs have been removed. Everything operates asynchronously.
KNOWN LIMITATIONS:
* although provision is made for component failover as described above, the code for doing so has not been fully implemented yet. At the moment, if all connections for a given peer fail, the errmgr is notified of a "lost connection", which by default results in termination of the job if it was a lifeline
* the IPv6 code is present and compiles, but is not complete. Since the current IPv6 support in the OOB doesn't work anyway, I don't consider this a blocker
* routing is performed at the individual module level, yet the active routed component is selected on a global basis. We probably should update that to reflect that different transports may need/choose to route in different ways
* obviously, not every error path has been tested nor necessarily covered
* determining abnormal termination is more challenging than in the old code as we now potentially have multiple ways of connecting to a process. Ideally, we would declare "connection failed" when *all* transports can no longer reach the process, but that requires some additional (possibly complex) code. For now, the code replicates the old behavior only somewhat modified - i.e., if a module sees its connection fail, it checks to see if it is a lifeline. If so, it notifies the errmgr that the lifeline is lost - otherwise, it notifies the errmgr that a non-lifeline connection was lost.
* reachability is determined solely on the basis of a shared subnet address/mask - more sophisticated algorithms (e.g., the one used in the tcp btl) are required to handle routing via gateways
* the RML needs to assign sequence numbers to each message on a per-peer basis. The receiving RML will then deliver messages in order, thus preventing out-of-order messaging in the case where messages travel across different transports or a message needs to be redirected/resent due to failure of a NIC
This commit was SVN r29058.
2013-08-22 20:37:40 +04:00
|
|
|
* @param[in] status Completion status
|
2007-07-20 05:34:02 +04:00
|
|
|
* @param[in] peer Name of peer process
|
|
|
|
* @param[in] buffer Message buffer
|
|
|
|
* @param[in] tag User defined tag for matching send/recv
|
As per the RFC, bring in the ORTE async progress code and the rewrite of OOB:
*** THIS RFC INCLUDES A MINOR CHANGE TO THE MPI-RTE INTERFACE ***
Note: during the course of this work, it was necessary to completely separate the MPI and RTE progress engines. There were multiple places in the MPI layer where ORTE_WAIT_FOR_COMPLETION was being used. A new OMPI_WAIT_FOR_COMPLETION macro was created (defined in ompi/mca/rte/rte.h) that simply cycles across opal_progress until the provided flag becomes false. Places where the MPI layer blocked waiting for RTE to complete an event have been modified to use this macro.
***************************************************************************************
I am reissuing this RFC because of the time that has passed since its original release. Since its initial release and review, I have debugged it further to ensure it fully supports tests like loop_spawn. It therefore seems ready for merge back to the trunk. Given its prior review, I have set the timeout for one week.
The code is in https://bitbucket.org/rhc/ompi-oob2
WHAT: Rewrite of ORTE OOB
WHY: Support asynchronous progress and a host of other features
WHEN: Wed, August 21
SYNOPSIS:
The current OOB has served us well, but a number of limitations have been identified over the years. Specifically:
* it is only progressed when called via opal_progress, which can lead to hangs or recursive calls into libevent (which is not supported by that code)
* we've had issues when multiple NICs are available as the code doesn't "shift" messages between transports - thus, all nodes had to be available via the same TCP interface.
* the OOB "unloads" incoming opal_buffer_t objects during the transmission, thus preventing use of OBJ_RETAIN in the code when repeatedly sending the same message to multiple recipients
* there is no failover mechanism across NICs - if the selected NIC (or its attached switch) fails, we are forced to abort
* only one transport (i.e., component) can be "active"
The revised OOB resolves these problems:
* async progress is used for all application processes, with the progress thread blocking in the event library
* each available TCP NIC is supported by its own TCP module. The ability to asynchronously progress each module independently is provided, but not enabled by default (a runtime MCA parameter turns it "on")
* multi-address TCP NICs (e.g., a NIC with both an IPv4 and IPv6 address, or with virtual interfaces) are supported - reachability is determined by comparing the contact info for a peer against all addresses within the range covered by the address/mask pairs for the NIC.
* a message that arrives on one TCP NIC is automatically shifted to whatever NIC that is connected to the next "hop" if that peer cannot be reached by the incoming NIC. If no TCP module will reach the peer, then the OOB attempts to send the message via all other available components - if none can reach the peer, then an "error" is reported back to the RML, which then calls the errmgr for instructions.
* opal_buffer_t now conforms to standard object rules re OBJ_RETAIN as we no longer "unload" the incoming object
* NIC failure is reported to the TCP component, which then tries to resend the message across any other available TCP NIC. If that doesn't work, then the message is given back to the OOB base to try using other components. If all that fails, then the error is reported to the RML, which reports to the errmgr for instructions
* obviously from the above, multiple OOB components (e.g., TCP and UD) can be active in parallel
* the matching code has been moved to the RML (and out of the OOB/TCP component) so it is independent of transport
* routing is done by the individual OOB modules (as opposed to the RML). Thus, both routed and non-routed transports can simultaneously be active
* all blocking send/recv APIs have been removed. Everything operates asynchronously.
KNOWN LIMITATIONS:
* although provision is made for component failover as described above, the code for doing so has not been fully implemented yet. At the moment, if all connections for a given peer fail, the errmgr is notified of a "lost connection", which by default results in termination of the job if it was a lifeline
* the IPv6 code is present and compiles, but is not complete. Since the current IPv6 support in the OOB doesn't work anyway, I don't consider this a blocker
* routing is performed at the individual module level, yet the active routed component is selected on a global basis. We probably should update that to reflect that different transports may need/choose to route in different ways
* obviously, not every error path has been tested nor necessarily covered
* determining abnormal termination is more challenging than in the old code as we now potentially have multiple ways of connecting to a process. Ideally, we would declare "connection failed" when *all* transports can no longer reach the process, but that requires some additional (possibly complex) code. For now, the code replicates the old behavior only somewhat modified - i.e., if a module sees its connection fail, it checks to see if it is a lifeline. If so, it notifies the errmgr that the lifeline is lost - otherwise, it notifies the errmgr that a non-lifeline connection was lost.
* reachability is determined solely on the basis of a shared subnet address/mask - more sophisticated algorithms (e.g., the one used in the tcp btl) are required to handle routing via gateways
* the RML needs to assign sequence numbers to each message on a per-peer basis. The receiving RML will then deliver messages in order, thus preventing out-of-order messaging in the case where messages travel across different transports or a message needs to be redirected/resent due to failure of a NIC
This commit was SVN r29058.
2013-08-22 20:37:40 +04:00
|
|
|
* @param[in] cbdata User data passed to send_buffer_nb() or recv_buffer_nb()
|
2007-07-20 05:34:02 +04:00
|
|
|
*/
|
|
|
|
typedef void (*orte_rml_buffer_callback_fn_t)(int status,
|
2014-11-12 04:00:42 +03:00
|
|
|
orte_process_name_t* peer,
|
2008-02-28 04:57:57 +03:00
|
|
|
struct opal_buffer_t* buffer,
|
2007-07-20 05:34:02 +04:00
|
|
|
orte_rml_tag_t tag,
|
|
|
|
void* cbdata);
|
2005-03-14 23:57:21 +03:00
|
|
|
|
|
|
|
/**
|
2007-07-20 05:34:02 +04:00
|
|
|
* Function prototype for exception callback
|
|
|
|
*
|
|
|
|
* Function prototype for callback triggered when a communication error is detected.
|
|
|
|
*
|
|
|
|
* @note The parameter in/out parameters are relative to the user's callback
|
|
|
|
* function.
|
|
|
|
*
|
|
|
|
* @param[in] peer Name of peer process
|
|
|
|
* @param[in] exception Description of the error causing the exception
|
|
|
|
*/
|
2014-11-12 04:00:42 +03:00
|
|
|
typedef void (*orte_rml_exception_callback_t)(orte_process_name_t* peer,
|
2007-07-20 05:34:02 +04:00
|
|
|
orte_rml_exception_t exception);
|
2006-02-07 06:32:36 +03:00
|
|
|
|
|
|
|
|
2007-07-20 05:34:02 +04:00
|
|
|
/* ******************************************************************** */
|
2016-09-28 23:25:55 +03:00
|
|
|
/* RML INTERNAL MODULE API DEFINITION */
|
2005-03-14 23:57:21 +03:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
2007-07-20 05:34:02 +04:00
|
|
|
* "Ping" another process to determine availability
|
|
|
|
*
|
|
|
|
* Ping another process to determine if it is available. This
|
|
|
|
* function only verifies that the process is alive and will allow a
|
|
|
|
* connection to the local process. It does *not* qualify as
|
|
|
|
* establishing communication with the remote process, as required by
|
|
|
|
* the note for set_contact_info().
|
|
|
|
*
|
|
|
|
* @param[in] contact_info The contact info string for the remote process
|
|
|
|
* @param[in] tv Timeout after which the ping should be failed
|
|
|
|
*
|
2015-04-30 02:17:55 +03:00
|
|
|
* @retval ORTE_SUCESS The process is available and will allow connections
|
2007-07-20 05:34:02 +04:00
|
|
|
* from the local process
|
|
|
|
* @retval ORTE_ERROR An unspecified error occurred during the update
|
2005-03-14 23:57:21 +03:00
|
|
|
*/
|
2016-09-28 23:25:55 +03:00
|
|
|
typedef int (*orte_rml_module_ping_fn_t)(struct orte_rml_base_module_t *mod,
|
|
|
|
const char* contact_info,
|
2007-07-20 05:34:02 +04:00
|
|
|
const struct timeval* tv);
|
2005-03-14 23:57:21 +03:00
|
|
|
|
Commit the orted-failed-to-start code. This correctly causes the system to detect the failure of an orted to start and allows the system to terminate all procs/orteds that *did* start.
The primary change that underlies all this is in the OOB. Specifically, the problem in the code until now has been that the OOB attempts to resolve an address when we call the "send" to an unknown recipient. The OOB would then wait forever if that recipient never actually started (and hence, never reported back its OOB contact info). In the case of an orted that failed to start, we would correctly detect that the orted hadn't started, but then we would attempt to order all orteds (including the one that failed to start) to die. This would cause the OOB to "hang" the system.
Unfortunately, revising how the OOB resolves addresses introduced a number of additional problems. Specifically, and most troublesome, was the fact that comm_spawn involved the immediate transmission of the rendezvous point from parent-to-child after the child was spawned. The current code used the OOB address resolution as a "barrier" - basically, the parent would attempt to send the info to the child, and then "hold" there until the child's contact info had arrived (meaning the child had started) and the send could be completed.
Note that this also caused comm_spawn to "hang" the entire system if the child never started... The app-failed-to-start helped improve that behavior - this code provides additional relief.
With this change, the OOB will return an ADDRESSEE_UNKNOWN error if you attempt to send to a recipient whose contact info isn't already in the OOB's hash tables. To resolve comm_spawn issues, we also now force the cross-sharing of connection info between parent and child jobs during spawn.
Finally, to aid in setting triggers to the right values, we introduce the "arith" API for the GPR. This function allows you to atomically change the value in a registry location (either divide, multiply, add, or subtract) by the provided operand. It is equivalent to first fetching the value using a "get", then modifying it, and then putting the result back into the registry via a "put".
This commit was SVN r14711.
2007-05-21 22:31:28 +04:00
|
|
|
|
2007-07-20 05:34:02 +04:00
|
|
|
/**
|
|
|
|
* Send an iovec non-blocking message
|
|
|
|
*
|
As per the RFC, bring in the ORTE async progress code and the rewrite of OOB:
*** THIS RFC INCLUDES A MINOR CHANGE TO THE MPI-RTE INTERFACE ***
Note: during the course of this work, it was necessary to completely separate the MPI and RTE progress engines. There were multiple places in the MPI layer where ORTE_WAIT_FOR_COMPLETION was being used. A new OMPI_WAIT_FOR_COMPLETION macro was created (defined in ompi/mca/rte/rte.h) that simply cycles across opal_progress until the provided flag becomes false. Places where the MPI layer blocked waiting for RTE to complete an event have been modified to use this macro.
***************************************************************************************
I am reissuing this RFC because of the time that has passed since its original release. Since its initial release and review, I have debugged it further to ensure it fully supports tests like loop_spawn. It therefore seems ready for merge back to the trunk. Given its prior review, I have set the timeout for one week.
The code is in https://bitbucket.org/rhc/ompi-oob2
WHAT: Rewrite of ORTE OOB
WHY: Support asynchronous progress and a host of other features
WHEN: Wed, August 21
SYNOPSIS:
The current OOB has served us well, but a number of limitations have been identified over the years. Specifically:
* it is only progressed when called via opal_progress, which can lead to hangs or recursive calls into libevent (which is not supported by that code)
* we've had issues when multiple NICs are available as the code doesn't "shift" messages between transports - thus, all nodes had to be available via the same TCP interface.
* the OOB "unloads" incoming opal_buffer_t objects during the transmission, thus preventing use of OBJ_RETAIN in the code when repeatedly sending the same message to multiple recipients
* there is no failover mechanism across NICs - if the selected NIC (or its attached switch) fails, we are forced to abort
* only one transport (i.e., component) can be "active"
The revised OOB resolves these problems:
* async progress is used for all application processes, with the progress thread blocking in the event library
* each available TCP NIC is supported by its own TCP module. The ability to asynchronously progress each module independently is provided, but not enabled by default (a runtime MCA parameter turns it "on")
* multi-address TCP NICs (e.g., a NIC with both an IPv4 and IPv6 address, or with virtual interfaces) are supported - reachability is determined by comparing the contact info for a peer against all addresses within the range covered by the address/mask pairs for the NIC.
* a message that arrives on one TCP NIC is automatically shifted to whatever NIC that is connected to the next "hop" if that peer cannot be reached by the incoming NIC. If no TCP module will reach the peer, then the OOB attempts to send the message via all other available components - if none can reach the peer, then an "error" is reported back to the RML, which then calls the errmgr for instructions.
* opal_buffer_t now conforms to standard object rules re OBJ_RETAIN as we no longer "unload" the incoming object
* NIC failure is reported to the TCP component, which then tries to resend the message across any other available TCP NIC. If that doesn't work, then the message is given back to the OOB base to try using other components. If all that fails, then the error is reported to the RML, which reports to the errmgr for instructions
* obviously from the above, multiple OOB components (e.g., TCP and UD) can be active in parallel
* the matching code has been moved to the RML (and out of the OOB/TCP component) so it is independent of transport
* routing is done by the individual OOB modules (as opposed to the RML). Thus, both routed and non-routed transports can simultaneously be active
* all blocking send/recv APIs have been removed. Everything operates asynchronously.
KNOWN LIMITATIONS:
* although provision is made for component failover as described above, the code for doing so has not been fully implemented yet. At the moment, if all connections for a given peer fail, the errmgr is notified of a "lost connection", which by default results in termination of the job if it was a lifeline
* the IPv6 code is present and compiles, but is not complete. Since the current IPv6 support in the OOB doesn't work anyway, I don't consider this a blocker
* routing is performed at the individual module level, yet the active routed component is selected on a global basis. We probably should update that to reflect that different transports may need/choose to route in different ways
* obviously, not every error path has been tested nor necessarily covered
* determining abnormal termination is more challenging than in the old code as we now potentially have multiple ways of connecting to a process. Ideally, we would declare "connection failed" when *all* transports can no longer reach the process, but that requires some additional (possibly complex) code. For now, the code replicates the old behavior only somewhat modified - i.e., if a module sees its connection fail, it checks to see if it is a lifeline. If so, it notifies the errmgr that the lifeline is lost - otherwise, it notifies the errmgr that a non-lifeline connection was lost.
* reachability is determined solely on the basis of a shared subnet address/mask - more sophisticated algorithms (e.g., the one used in the tcp btl) are required to handle routing via gateways
* the RML needs to assign sequence numbers to each message on a per-peer basis. The receiving RML will then deliver messages in order, thus preventing out-of-order messaging in the case where messages travel across different transports or a message needs to be redirected/resent due to failure of a NIC
This commit was SVN r29058.
2013-08-22 20:37:40 +04:00
|
|
|
* Send an array of iovecs to the specified peer. The call
|
|
|
|
* will return immediately, although the iovecs may not be modified
|
|
|
|
* until the completion callback is triggered. The iovecs *may* be
|
2007-07-20 05:34:02 +04:00
|
|
|
* passed to another call to send_nb before the completion callback is
|
|
|
|
* triggered. The callback being triggered does not give any
|
|
|
|
* indication of remote completion.
|
|
|
|
*
|
|
|
|
* @param[in] peer Name of receiving process
|
As per the RFC, bring in the ORTE async progress code and the rewrite of OOB:
*** THIS RFC INCLUDES A MINOR CHANGE TO THE MPI-RTE INTERFACE ***
Note: during the course of this work, it was necessary to completely separate the MPI and RTE progress engines. There were multiple places in the MPI layer where ORTE_WAIT_FOR_COMPLETION was being used. A new OMPI_WAIT_FOR_COMPLETION macro was created (defined in ompi/mca/rte/rte.h) that simply cycles across opal_progress until the provided flag becomes false. Places where the MPI layer blocked waiting for RTE to complete an event have been modified to use this macro.
***************************************************************************************
I am reissuing this RFC because of the time that has passed since its original release. Since its initial release and review, I have debugged it further to ensure it fully supports tests like loop_spawn. It therefore seems ready for merge back to the trunk. Given its prior review, I have set the timeout for one week.
The code is in https://bitbucket.org/rhc/ompi-oob2
WHAT: Rewrite of ORTE OOB
WHY: Support asynchronous progress and a host of other features
WHEN: Wed, August 21
SYNOPSIS:
The current OOB has served us well, but a number of limitations have been identified over the years. Specifically:
* it is only progressed when called via opal_progress, which can lead to hangs or recursive calls into libevent (which is not supported by that code)
* we've had issues when multiple NICs are available as the code doesn't "shift" messages between transports - thus, all nodes had to be available via the same TCP interface.
* the OOB "unloads" incoming opal_buffer_t objects during the transmission, thus preventing use of OBJ_RETAIN in the code when repeatedly sending the same message to multiple recipients
* there is no failover mechanism across NICs - if the selected NIC (or its attached switch) fails, we are forced to abort
* only one transport (i.e., component) can be "active"
The revised OOB resolves these problems:
* async progress is used for all application processes, with the progress thread blocking in the event library
* each available TCP NIC is supported by its own TCP module. The ability to asynchronously progress each module independently is provided, but not enabled by default (a runtime MCA parameter turns it "on")
* multi-address TCP NICs (e.g., a NIC with both an IPv4 and IPv6 address, or with virtual interfaces) are supported - reachability is determined by comparing the contact info for a peer against all addresses within the range covered by the address/mask pairs for the NIC.
* a message that arrives on one TCP NIC is automatically shifted to whatever NIC that is connected to the next "hop" if that peer cannot be reached by the incoming NIC. If no TCP module will reach the peer, then the OOB attempts to send the message via all other available components - if none can reach the peer, then an "error" is reported back to the RML, which then calls the errmgr for instructions.
* opal_buffer_t now conforms to standard object rules re OBJ_RETAIN as we no longer "unload" the incoming object
* NIC failure is reported to the TCP component, which then tries to resend the message across any other available TCP NIC. If that doesn't work, then the message is given back to the OOB base to try using other components. If all that fails, then the error is reported to the RML, which reports to the errmgr for instructions
* obviously from the above, multiple OOB components (e.g., TCP and UD) can be active in parallel
* the matching code has been moved to the RML (and out of the OOB/TCP component) so it is independent of transport
* routing is done by the individual OOB modules (as opposed to the RML). Thus, both routed and non-routed transports can simultaneously be active
* all blocking send/recv APIs have been removed. Everything operates asynchronously.
KNOWN LIMITATIONS:
* although provision is made for component failover as described above, the code for doing so has not been fully implemented yet. At the moment, if all connections for a given peer fail, the errmgr is notified of a "lost connection", which by default results in termination of the job if it was a lifeline
* the IPv6 code is present and compiles, but is not complete. Since the current IPv6 support in the OOB doesn't work anyway, I don't consider this a blocker
* routing is performed at the individual module level, yet the active routed component is selected on a global basis. We probably should update that to reflect that different transports may need/choose to route in different ways
* obviously, not every error path has been tested nor necessarily covered
* determining abnormal termination is more challenging than in the old code as we now potentially have multiple ways of connecting to a process. Ideally, we would declare "connection failed" when *all* transports can no longer reach the process, but that requires some additional (possibly complex) code. For now, the code replicates the old behavior only somewhat modified - i.e., if a module sees its connection fail, it checks to see if it is a lifeline. If so, it notifies the errmgr that the lifeline is lost - otherwise, it notifies the errmgr that a non-lifeline connection was lost.
* reachability is determined solely on the basis of a shared subnet address/mask - more sophisticated algorithms (e.g., the one used in the tcp btl) are required to handle routing via gateways
* the RML needs to assign sequence numbers to each message on a per-peer basis. The receiving RML will then deliver messages in order, thus preventing out-of-order messaging in the case where messages travel across different transports or a message needs to be redirected/resent due to failure of a NIC
This commit was SVN r29058.
2013-08-22 20:37:40 +04:00
|
|
|
* @param[in] msg Pointer to an array of iovecs to be sent
|
|
|
|
* @param[in] count Number of iovecs in array
|
2007-07-20 05:34:02 +04:00
|
|
|
* @param[in] tag User defined tag for matching send/recv
|
|
|
|
* @param[in] cbfunc Callback function on message comlpetion
|
|
|
|
* @param[in] cbdata User data to provide during completion callback
|
|
|
|
*
|
|
|
|
* @retval ORTE_SUCCESS The message was successfully started
|
|
|
|
* @retval ORTE_ERR_BAD_PARAM One of the parameters was invalid
|
|
|
|
* @retval ORTE_ERR_ADDRESSEE_UNKNOWN Contact information for the
|
|
|
|
* receiving process is not available
|
|
|
|
* @retval ORTE_ERROR An unspecified error occurred
|
Commit the orted-failed-to-start code. This correctly causes the system to detect the failure of an orted to start and allows the system to terminate all procs/orteds that *did* start.
The primary change that underlies all this is in the OOB. Specifically, the problem in the code until now has been that the OOB attempts to resolve an address when we call the "send" to an unknown recipient. The OOB would then wait forever if that recipient never actually started (and hence, never reported back its OOB contact info). In the case of an orted that failed to start, we would correctly detect that the orted hadn't started, but then we would attempt to order all orteds (including the one that failed to start) to die. This would cause the OOB to "hang" the system.
Unfortunately, revising how the OOB resolves addresses introduced a number of additional problems. Specifically, and most troublesome, was the fact that comm_spawn involved the immediate transmission of the rendezvous point from parent-to-child after the child was spawned. The current code used the OOB address resolution as a "barrier" - basically, the parent would attempt to send the info to the child, and then "hold" there until the child's contact info had arrived (meaning the child had started) and the send could be completed.
Note that this also caused comm_spawn to "hang" the entire system if the child never started... The app-failed-to-start helped improve that behavior - this code provides additional relief.
With this change, the OOB will return an ADDRESSEE_UNKNOWN error if you attempt to send to a recipient whose contact info isn't already in the OOB's hash tables. To resolve comm_spawn issues, we also now force the cross-sharing of connection info between parent and child jobs during spawn.
Finally, to aid in setting triggers to the right values, we introduce the "arith" API for the GPR. This function allows you to atomically change the value in a registry location (either divide, multiply, add, or subtract) by the provided operand. It is equivalent to first fetching the value using a "get", then modifying it, and then putting the result back into the registry via a "put".
This commit was SVN r14711.
2007-05-21 22:31:28 +04:00
|
|
|
*/
|
2016-09-28 23:25:55 +03:00
|
|
|
typedef int (*orte_rml_module_send_nb_fn_t)(struct orte_rml_base_module_t *mod,
|
|
|
|
orte_process_name_t* peer,
|
2007-07-20 05:34:02 +04:00
|
|
|
struct iovec* msg,
|
|
|
|
int count,
|
|
|
|
orte_rml_tag_t tag,
|
|
|
|
orte_rml_callback_fn_t cbfunc,
|
|
|
|
void* cbdata);
|
Commit the orted-failed-to-start code. This correctly causes the system to detect the failure of an orted to start and allows the system to terminate all procs/orteds that *did* start.
The primary change that underlies all this is in the OOB. Specifically, the problem in the code until now has been that the OOB attempts to resolve an address when we call the "send" to an unknown recipient. The OOB would then wait forever if that recipient never actually started (and hence, never reported back its OOB contact info). In the case of an orted that failed to start, we would correctly detect that the orted hadn't started, but then we would attempt to order all orteds (including the one that failed to start) to die. This would cause the OOB to "hang" the system.
Unfortunately, revising how the OOB resolves addresses introduced a number of additional problems. Specifically, and most troublesome, was the fact that comm_spawn involved the immediate transmission of the rendezvous point from parent-to-child after the child was spawned. The current code used the OOB address resolution as a "barrier" - basically, the parent would attempt to send the info to the child, and then "hold" there until the child's contact info had arrived (meaning the child had started) and the send could be completed.
Note that this also caused comm_spawn to "hang" the entire system if the child never started... The app-failed-to-start helped improve that behavior - this code provides additional relief.
With this change, the OOB will return an ADDRESSEE_UNKNOWN error if you attempt to send to a recipient whose contact info isn't already in the OOB's hash tables. To resolve comm_spawn issues, we also now force the cross-sharing of connection info between parent and child jobs during spawn.
Finally, to aid in setting triggers to the right values, we introduce the "arith" API for the GPR. This function allows you to atomically change the value in a registry location (either divide, multiply, add, or subtract) by the provided operand. It is equivalent to first fetching the value using a "get", then modifying it, and then putting the result back into the registry via a "put".
This commit was SVN r14711.
2007-05-21 22:31:28 +04:00
|
|
|
|
2007-07-20 05:34:02 +04:00
|
|
|
|
|
|
|
/**
|
As per the RFC, bring in the ORTE async progress code and the rewrite of OOB:
*** THIS RFC INCLUDES A MINOR CHANGE TO THE MPI-RTE INTERFACE ***
Note: during the course of this work, it was necessary to completely separate the MPI and RTE progress engines. There were multiple places in the MPI layer where ORTE_WAIT_FOR_COMPLETION was being used. A new OMPI_WAIT_FOR_COMPLETION macro was created (defined in ompi/mca/rte/rte.h) that simply cycles across opal_progress until the provided flag becomes false. Places where the MPI layer blocked waiting for RTE to complete an event have been modified to use this macro.
***************************************************************************************
I am reissuing this RFC because of the time that has passed since its original release. Since its initial release and review, I have debugged it further to ensure it fully supports tests like loop_spawn. It therefore seems ready for merge back to the trunk. Given its prior review, I have set the timeout for one week.
The code is in https://bitbucket.org/rhc/ompi-oob2
WHAT: Rewrite of ORTE OOB
WHY: Support asynchronous progress and a host of other features
WHEN: Wed, August 21
SYNOPSIS:
The current OOB has served us well, but a number of limitations have been identified over the years. Specifically:
* it is only progressed when called via opal_progress, which can lead to hangs or recursive calls into libevent (which is not supported by that code)
* we've had issues when multiple NICs are available as the code doesn't "shift" messages between transports - thus, all nodes had to be available via the same TCP interface.
* the OOB "unloads" incoming opal_buffer_t objects during the transmission, thus preventing use of OBJ_RETAIN in the code when repeatedly sending the same message to multiple recipients
* there is no failover mechanism across NICs - if the selected NIC (or its attached switch) fails, we are forced to abort
* only one transport (i.e., component) can be "active"
The revised OOB resolves these problems:
* async progress is used for all application processes, with the progress thread blocking in the event library
* each available TCP NIC is supported by its own TCP module. The ability to asynchronously progress each module independently is provided, but not enabled by default (a runtime MCA parameter turns it "on")
* multi-address TCP NICs (e.g., a NIC with both an IPv4 and IPv6 address, or with virtual interfaces) are supported - reachability is determined by comparing the contact info for a peer against all addresses within the range covered by the address/mask pairs for the NIC.
* a message that arrives on one TCP NIC is automatically shifted to whatever NIC that is connected to the next "hop" if that peer cannot be reached by the incoming NIC. If no TCP module will reach the peer, then the OOB attempts to send the message via all other available components - if none can reach the peer, then an "error" is reported back to the RML, which then calls the errmgr for instructions.
* opal_buffer_t now conforms to standard object rules re OBJ_RETAIN as we no longer "unload" the incoming object
* NIC failure is reported to the TCP component, which then tries to resend the message across any other available TCP NIC. If that doesn't work, then the message is given back to the OOB base to try using other components. If all that fails, then the error is reported to the RML, which reports to the errmgr for instructions
* obviously from the above, multiple OOB components (e.g., TCP and UD) can be active in parallel
* the matching code has been moved to the RML (and out of the OOB/TCP component) so it is independent of transport
* routing is done by the individual OOB modules (as opposed to the RML). Thus, both routed and non-routed transports can simultaneously be active
* all blocking send/recv APIs have been removed. Everything operates asynchronously.
KNOWN LIMITATIONS:
* although provision is made for component failover as described above, the code for doing so has not been fully implemented yet. At the moment, if all connections for a given peer fail, the errmgr is notified of a "lost connection", which by default results in termination of the job if it was a lifeline
* the IPv6 code is present and compiles, but is not complete. Since the current IPv6 support in the OOB doesn't work anyway, I don't consider this a blocker
* routing is performed at the individual module level, yet the active routed component is selected on a global basis. We probably should update that to reflect that different transports may need/choose to route in different ways
* obviously, not every error path has been tested nor necessarily covered
* determining abnormal termination is more challenging than in the old code as we now potentially have multiple ways of connecting to a process. Ideally, we would declare "connection failed" when *all* transports can no longer reach the process, but that requires some additional (possibly complex) code. For now, the code replicates the old behavior only somewhat modified - i.e., if a module sees its connection fail, it checks to see if it is a lifeline. If so, it notifies the errmgr that the lifeline is lost - otherwise, it notifies the errmgr that a non-lifeline connection was lost.
* reachability is determined solely on the basis of a shared subnet address/mask - more sophisticated algorithms (e.g., the one used in the tcp btl) are required to handle routing via gateways
* the RML needs to assign sequence numbers to each message on a per-peer basis. The receiving RML will then deliver messages in order, thus preventing out-of-order messaging in the case where messages travel across different transports or a message needs to be redirected/resent due to failure of a NIC
This commit was SVN r29058.
2013-08-22 20:37:40 +04:00
|
|
|
* Send a buffer non-blocking message
|
2007-07-20 05:34:02 +04:00
|
|
|
*
|
As per the RFC, bring in the ORTE async progress code and the rewrite of OOB:
*** THIS RFC INCLUDES A MINOR CHANGE TO THE MPI-RTE INTERFACE ***
Note: during the course of this work, it was necessary to completely separate the MPI and RTE progress engines. There were multiple places in the MPI layer where ORTE_WAIT_FOR_COMPLETION was being used. A new OMPI_WAIT_FOR_COMPLETION macro was created (defined in ompi/mca/rte/rte.h) that simply cycles across opal_progress until the provided flag becomes false. Places where the MPI layer blocked waiting for RTE to complete an event have been modified to use this macro.
***************************************************************************************
I am reissuing this RFC because of the time that has passed since its original release. Since its initial release and review, I have debugged it further to ensure it fully supports tests like loop_spawn. It therefore seems ready for merge back to the trunk. Given its prior review, I have set the timeout for one week.
The code is in https://bitbucket.org/rhc/ompi-oob2
WHAT: Rewrite of ORTE OOB
WHY: Support asynchronous progress and a host of other features
WHEN: Wed, August 21
SYNOPSIS:
The current OOB has served us well, but a number of limitations have been identified over the years. Specifically:
* it is only progressed when called via opal_progress, which can lead to hangs or recursive calls into libevent (which is not supported by that code)
* we've had issues when multiple NICs are available as the code doesn't "shift" messages between transports - thus, all nodes had to be available via the same TCP interface.
* the OOB "unloads" incoming opal_buffer_t objects during the transmission, thus preventing use of OBJ_RETAIN in the code when repeatedly sending the same message to multiple recipients
* there is no failover mechanism across NICs - if the selected NIC (or its attached switch) fails, we are forced to abort
* only one transport (i.e., component) can be "active"
The revised OOB resolves these problems:
* async progress is used for all application processes, with the progress thread blocking in the event library
* each available TCP NIC is supported by its own TCP module. The ability to asynchronously progress each module independently is provided, but not enabled by default (a runtime MCA parameter turns it "on")
* multi-address TCP NICs (e.g., a NIC with both an IPv4 and IPv6 address, or with virtual interfaces) are supported - reachability is determined by comparing the contact info for a peer against all addresses within the range covered by the address/mask pairs for the NIC.
* a message that arrives on one TCP NIC is automatically shifted to whatever NIC that is connected to the next "hop" if that peer cannot be reached by the incoming NIC. If no TCP module will reach the peer, then the OOB attempts to send the message via all other available components - if none can reach the peer, then an "error" is reported back to the RML, which then calls the errmgr for instructions.
* opal_buffer_t now conforms to standard object rules re OBJ_RETAIN as we no longer "unload" the incoming object
* NIC failure is reported to the TCP component, which then tries to resend the message across any other available TCP NIC. If that doesn't work, then the message is given back to the OOB base to try using other components. If all that fails, then the error is reported to the RML, which reports to the errmgr for instructions
* obviously from the above, multiple OOB components (e.g., TCP and UD) can be active in parallel
* the matching code has been moved to the RML (and out of the OOB/TCP component) so it is independent of transport
* routing is done by the individual OOB modules (as opposed to the RML). Thus, both routed and non-routed transports can simultaneously be active
* all blocking send/recv APIs have been removed. Everything operates asynchronously.
KNOWN LIMITATIONS:
* although provision is made for component failover as described above, the code for doing so has not been fully implemented yet. At the moment, if all connections for a given peer fail, the errmgr is notified of a "lost connection", which by default results in termination of the job if it was a lifeline
* the IPv6 code is present and compiles, but is not complete. Since the current IPv6 support in the OOB doesn't work anyway, I don't consider this a blocker
* routing is performed at the individual module level, yet the active routed component is selected on a global basis. We probably should update that to reflect that different transports may need/choose to route in different ways
* obviously, not every error path has been tested nor necessarily covered
* determining abnormal termination is more challenging than in the old code as we now potentially have multiple ways of connecting to a process. Ideally, we would declare "connection failed" when *all* transports can no longer reach the process, but that requires some additional (possibly complex) code. For now, the code replicates the old behavior only somewhat modified - i.e., if a module sees its connection fail, it checks to see if it is a lifeline. If so, it notifies the errmgr that the lifeline is lost - otherwise, it notifies the errmgr that a non-lifeline connection was lost.
* reachability is determined solely on the basis of a shared subnet address/mask - more sophisticated algorithms (e.g., the one used in the tcp btl) are required to handle routing via gateways
* the RML needs to assign sequence numbers to each message on a per-peer basis. The receiving RML will then deliver messages in order, thus preventing out-of-order messaging in the case where messages travel across different transports or a message needs to be redirected/resent due to failure of a NIC
This commit was SVN r29058.
2013-08-22 20:37:40 +04:00
|
|
|
* Send a buffer to the specified peer. The call
|
2007-07-20 05:34:02 +04:00
|
|
|
* will return immediately, although the buffer may not be modified
|
|
|
|
* until the completion callback is triggered. The buffer *may* be
|
|
|
|
* passed to another call to send_nb before the completion callback is
|
|
|
|
* triggered. The callback being triggered does not give any
|
|
|
|
* indication of remote completion.
|
|
|
|
*
|
|
|
|
* @param[in] peer Name of receiving process
|
As per the RFC, bring in the ORTE async progress code and the rewrite of OOB:
*** THIS RFC INCLUDES A MINOR CHANGE TO THE MPI-RTE INTERFACE ***
Note: during the course of this work, it was necessary to completely separate the MPI and RTE progress engines. There were multiple places in the MPI layer where ORTE_WAIT_FOR_COMPLETION was being used. A new OMPI_WAIT_FOR_COMPLETION macro was created (defined in ompi/mca/rte/rte.h) that simply cycles across opal_progress until the provided flag becomes false. Places where the MPI layer blocked waiting for RTE to complete an event have been modified to use this macro.
***************************************************************************************
I am reissuing this RFC because of the time that has passed since its original release. Since its initial release and review, I have debugged it further to ensure it fully supports tests like loop_spawn. It therefore seems ready for merge back to the trunk. Given its prior review, I have set the timeout for one week.
The code is in https://bitbucket.org/rhc/ompi-oob2
WHAT: Rewrite of ORTE OOB
WHY: Support asynchronous progress and a host of other features
WHEN: Wed, August 21
SYNOPSIS:
The current OOB has served us well, but a number of limitations have been identified over the years. Specifically:
* it is only progressed when called via opal_progress, which can lead to hangs or recursive calls into libevent (which is not supported by that code)
* we've had issues when multiple NICs are available as the code doesn't "shift" messages between transports - thus, all nodes had to be available via the same TCP interface.
* the OOB "unloads" incoming opal_buffer_t objects during the transmission, thus preventing use of OBJ_RETAIN in the code when repeatedly sending the same message to multiple recipients
* there is no failover mechanism across NICs - if the selected NIC (or its attached switch) fails, we are forced to abort
* only one transport (i.e., component) can be "active"
The revised OOB resolves these problems:
* async progress is used for all application processes, with the progress thread blocking in the event library
* each available TCP NIC is supported by its own TCP module. The ability to asynchronously progress each module independently is provided, but not enabled by default (a runtime MCA parameter turns it "on")
* multi-address TCP NICs (e.g., a NIC with both an IPv4 and IPv6 address, or with virtual interfaces) are supported - reachability is determined by comparing the contact info for a peer against all addresses within the range covered by the address/mask pairs for the NIC.
* a message that arrives on one TCP NIC is automatically shifted to whatever NIC that is connected to the next "hop" if that peer cannot be reached by the incoming NIC. If no TCP module will reach the peer, then the OOB attempts to send the message via all other available components - if none can reach the peer, then an "error" is reported back to the RML, which then calls the errmgr for instructions.
* opal_buffer_t now conforms to standard object rules re OBJ_RETAIN as we no longer "unload" the incoming object
* NIC failure is reported to the TCP component, which then tries to resend the message across any other available TCP NIC. If that doesn't work, then the message is given back to the OOB base to try using other components. If all that fails, then the error is reported to the RML, which reports to the errmgr for instructions
* obviously from the above, multiple OOB components (e.g., TCP and UD) can be active in parallel
* the matching code has been moved to the RML (and out of the OOB/TCP component) so it is independent of transport
* routing is done by the individual OOB modules (as opposed to the RML). Thus, both routed and non-routed transports can simultaneously be active
* all blocking send/recv APIs have been removed. Everything operates asynchronously.
KNOWN LIMITATIONS:
* although provision is made for component failover as described above, the code for doing so has not been fully implemented yet. At the moment, if all connections for a given peer fail, the errmgr is notified of a "lost connection", which by default results in termination of the job if it was a lifeline
* the IPv6 code is present and compiles, but is not complete. Since the current IPv6 support in the OOB doesn't work anyway, I don't consider this a blocker
* routing is performed at the individual module level, yet the active routed component is selected on a global basis. We probably should update that to reflect that different transports may need/choose to route in different ways
* obviously, not every error path has been tested nor necessarily covered
* determining abnormal termination is more challenging than in the old code as we now potentially have multiple ways of connecting to a process. Ideally, we would declare "connection failed" when *all* transports can no longer reach the process, but that requires some additional (possibly complex) code. For now, the code replicates the old behavior only somewhat modified - i.e., if a module sees its connection fail, it checks to see if it is a lifeline. If so, it notifies the errmgr that the lifeline is lost - otherwise, it notifies the errmgr that a non-lifeline connection was lost.
* reachability is determined solely on the basis of a shared subnet address/mask - more sophisticated algorithms (e.g., the one used in the tcp btl) are required to handle routing via gateways
* the RML needs to assign sequence numbers to each message on a per-peer basis. The receiving RML will then deliver messages in order, thus preventing out-of-order messaging in the case where messages travel across different transports or a message needs to be redirected/resent due to failure of a NIC
This commit was SVN r29058.
2013-08-22 20:37:40 +04:00
|
|
|
* @param[in] buffer Pointer to buffer to be sent
|
2007-07-20 05:34:02 +04:00
|
|
|
* @param[in] tag User defined tag for matching send/recv
|
|
|
|
* @param[in] cbfunc Callback function on message comlpetion
|
|
|
|
* @param[in] cbdata User data to provide during completion callback
|
|
|
|
*
|
|
|
|
* @retval ORTE_SUCCESS The message was successfully started
|
|
|
|
* @retval ORTE_ERR_BAD_PARAM One of the parameters was invalid
|
|
|
|
* @retval ORTE_ERR_ADDRESSEE_UNKNOWN Contact information for the
|
|
|
|
* receiving process is not available
|
|
|
|
* @retval ORTE_ERROR An unspecified error occurred
|
Commit the orted-failed-to-start code. This correctly causes the system to detect the failure of an orted to start and allows the system to terminate all procs/orteds that *did* start.
The primary change that underlies all this is in the OOB. Specifically, the problem in the code until now has been that the OOB attempts to resolve an address when we call the "send" to an unknown recipient. The OOB would then wait forever if that recipient never actually started (and hence, never reported back its OOB contact info). In the case of an orted that failed to start, we would correctly detect that the orted hadn't started, but then we would attempt to order all orteds (including the one that failed to start) to die. This would cause the OOB to "hang" the system.
Unfortunately, revising how the OOB resolves addresses introduced a number of additional problems. Specifically, and most troublesome, was the fact that comm_spawn involved the immediate transmission of the rendezvous point from parent-to-child after the child was spawned. The current code used the OOB address resolution as a "barrier" - basically, the parent would attempt to send the info to the child, and then "hold" there until the child's contact info had arrived (meaning the child had started) and the send could be completed.
Note that this also caused comm_spawn to "hang" the entire system if the child never started... The app-failed-to-start helped improve that behavior - this code provides additional relief.
With this change, the OOB will return an ADDRESSEE_UNKNOWN error if you attempt to send to a recipient whose contact info isn't already in the OOB's hash tables. To resolve comm_spawn issues, we also now force the cross-sharing of connection info between parent and child jobs during spawn.
Finally, to aid in setting triggers to the right values, we introduce the "arith" API for the GPR. This function allows you to atomically change the value in a registry location (either divide, multiply, add, or subtract) by the provided operand. It is equivalent to first fetching the value using a "get", then modifying it, and then putting the result back into the registry via a "put".
This commit was SVN r14711.
2007-05-21 22:31:28 +04:00
|
|
|
*/
|
2016-09-28 23:25:55 +03:00
|
|
|
typedef int (*orte_rml_module_send_buffer_nb_fn_t)(struct orte_rml_base_module_t *mod,
|
|
|
|
orte_process_name_t* peer,
|
2008-02-28 04:57:57 +03:00
|
|
|
struct opal_buffer_t* buffer,
|
2007-07-20 05:34:02 +04:00
|
|
|
orte_rml_tag_t tag,
|
|
|
|
orte_rml_buffer_callback_fn_t cbfunc,
|
|
|
|
void* cbdata);
|
2005-03-14 23:57:21 +03:00
|
|
|
|
2005-10-06 23:39:20 +04:00
|
|
|
/**
|
2016-09-28 23:25:55 +03:00
|
|
|
* Purge the RML/OOB of contact info and pending messages
|
|
|
|
* to/from a specified process. Used when a process aborts
|
|
|
|
* and is to be restarted
|
2005-10-06 23:39:20 +04:00
|
|
|
*/
|
2016-09-28 23:25:55 +03:00
|
|
|
typedef void (*orte_rml_module_purge_fn_t)(orte_process_name_t *peer);
|
|
|
|
|
2006-02-07 06:32:36 +03:00
|
|
|
|
|
|
|
|
2007-07-20 05:34:02 +04:00
|
|
|
/**
|
2016-09-28 23:25:55 +03:00
|
|
|
* RML internal module interface - these will be implemented by all RML components
|
2005-03-14 23:57:21 +03:00
|
|
|
*/
|
2016-10-12 18:44:03 +03:00
|
|
|
typedef struct orte_rml_base_module_t {
|
2016-09-28 23:25:55 +03:00
|
|
|
/* pointer to the parent component for this module */
|
|
|
|
struct orte_rml_component_t *component;
|
2016-10-23 21:36:05 +03:00
|
|
|
/* the routed module to be used */
|
|
|
|
char *routed;
|
2016-09-28 23:25:55 +03:00
|
|
|
/** Ping process for connectivity check */
|
|
|
|
orte_rml_module_ping_fn_t ping;
|
|
|
|
|
|
|
|
/** Send non-blocking iovec message */
|
|
|
|
orte_rml_module_send_nb_fn_t send_nb;
|
|
|
|
|
|
|
|
/** Send non-blocking buffer message */
|
|
|
|
orte_rml_module_send_buffer_nb_fn_t send_buffer_nb;
|
|
|
|
|
|
|
|
/** Purge information */
|
|
|
|
orte_rml_module_purge_fn_t purge;
|
|
|
|
} orte_rml_base_module_t;
|
2007-07-20 05:34:02 +04:00
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
|
2016-09-28 23:25:55 +03:00
|
|
|
/* ******************************************************************** */
|
|
|
|
/* RML PUBLIC MODULE API DEFINITION */
|
|
|
|
|
|
|
|
/** Open conduit - call each component and see if they can provide a
|
|
|
|
* conduit that can satisfy all these attributes - return the conduit id
|
|
|
|
* (a negative value indicates error)
|
|
|
|
*/
|
|
|
|
typedef orte_rml_conduit_t (*orte_rml_API_open_conduit_fn_t)(opal_list_t *attributes);
|
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
/**
|
2016-09-28 23:25:55 +03:00
|
|
|
* Close a conduit - allow the component to cleanup.
|
2005-03-14 23:57:21 +03:00
|
|
|
*/
|
2016-09-28 23:25:55 +03:00
|
|
|
typedef void (*orte_rml_API_close_conduit_fn_t)(orte_rml_conduit_t id);
|
2007-07-20 05:34:02 +04:00
|
|
|
|
2016-09-28 23:25:55 +03:00
|
|
|
/**
|
|
|
|
* Query the library to provide all the supported interfaces/transport
|
|
|
|
* providers in the current node/system.
|
|
|
|
*
|
|
|
|
* @param[out] List of providers and their attributes.
|
|
|
|
*/
|
|
|
|
typedef int (*orte_rml_API_query_transports_fn_t)(opal_list_t *transports);
|
2005-03-14 23:57:21 +03:00
|
|
|
|
2016-10-23 21:36:05 +03:00
|
|
|
/* query the routed module for a given conduit */
|
|
|
|
typedef char* (*orte_rml_API_query_routed_fn_t)(orte_rml_conduit_t id);
|
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
/**
|
2016-09-28 23:25:55 +03:00
|
|
|
* Get a "contact info" string for the local process
|
2007-07-20 05:34:02 +04:00
|
|
|
*
|
2016-09-28 23:25:55 +03:00
|
|
|
* Get a "contact info" string that can be used by other processes to
|
|
|
|
* share the contact information for the given process. The "contact
|
|
|
|
* info" string includes the process identifier for the given process
|
|
|
|
* and uses only basic ascii characters. It should be quoted when
|
|
|
|
* evaluated by a shell, although no special escaping is necessary.
|
2007-07-20 05:34:02 +04:00
|
|
|
*
|
2016-09-28 23:25:55 +03:00
|
|
|
* @note The function may return a contact info string which contains
|
|
|
|
* multiple addresses.
|
2007-07-20 05:34:02 +04:00
|
|
|
*
|
2016-09-28 23:25:55 +03:00
|
|
|
* @retval non-NULL The contact information for this process
|
|
|
|
* @retval NULL An error occurred when trying to get the current
|
|
|
|
* process contact info
|
2005-03-14 23:57:21 +03:00
|
|
|
*/
|
2016-09-28 23:25:55 +03:00
|
|
|
typedef char* (*orte_rml_API_get_contact_info_fn_t)(void);
|
2007-07-20 05:34:02 +04:00
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
|
2007-03-17 02:11:45 +03:00
|
|
|
/**
|
2016-09-28 23:25:55 +03:00
|
|
|
* Update the RML with a remote process's contact info
|
2007-07-20 05:34:02 +04:00
|
|
|
*
|
2016-09-28 23:25:55 +03:00
|
|
|
* Update the RML with a remote process's contact information, as
|
|
|
|
* returned from the get_contact_info() function on the remote
|
|
|
|
* process. Before a send can be initiated to a remote process,
|
|
|
|
* either this function must be called for that process or that
|
|
|
|
* process must have already established a connection to the local
|
|
|
|
* process.
|
2007-07-20 05:34:02 +04:00
|
|
|
*
|
2016-09-28 23:25:55 +03:00
|
|
|
* @note The user may not always explicitly call this function
|
|
|
|
* directly, but may instead cause it to be called through one of the
|
|
|
|
* contact setup functions available in
|
|
|
|
* orte/mca/rml/base/rml_contact.h.
|
2007-07-20 05:34:02 +04:00
|
|
|
*
|
2016-09-28 23:25:55 +03:00
|
|
|
* @param[in] contact_info The contact information string of a peer
|
2007-03-17 02:11:45 +03:00
|
|
|
*/
|
2016-09-28 23:25:55 +03:00
|
|
|
typedef void (*orte_rml_API_set_contact_info_fn_t)(const char *contact_info);
|
|
|
|
|
2007-03-17 02:11:45 +03:00
|
|
|
|
2010-03-24 00:28:02 +03:00
|
|
|
/**
|
2016-09-28 23:25:55 +03:00
|
|
|
* "Ping" another process to determine availability
|
|
|
|
*
|
|
|
|
* Ping another process to determine if it is available. This
|
|
|
|
* function only verifies that the process is alive and will allow a
|
|
|
|
* connection to the local process. It does *not* qualify as
|
|
|
|
* establishing communication with the remote process, as required by
|
|
|
|
* the note for set_contact_info().
|
|
|
|
*
|
|
|
|
* @param[in] contact_info The contact info string for the remote process
|
|
|
|
* @param[in] tv Timeout after which the ping should be failed
|
|
|
|
*
|
|
|
|
* @retval ORTE_SUCESS The process is available and will allow connections
|
|
|
|
* from the local process
|
|
|
|
* @retval ORTE_ERROR An unspecified error occurred during the update
|
2010-03-24 00:28:02 +03:00
|
|
|
*/
|
2016-10-23 21:36:05 +03:00
|
|
|
typedef int (*orte_rml_API_ping_fn_t)(orte_rml_conduit_t conduit_id,
|
|
|
|
const char* contact_info,
|
2016-09-28 23:25:55 +03:00
|
|
|
const struct timeval* tv);
|
|
|
|
|
|
|
|
|
2015-04-30 02:17:55 +03:00
|
|
|
/**
|
2016-09-28 23:25:55 +03:00
|
|
|
* Send an iovec non-blocking message
|
2015-04-30 02:17:55 +03:00
|
|
|
*
|
2016-09-28 23:25:55 +03:00
|
|
|
* Send an array of iovecs to the specified peer. The call
|
|
|
|
* will return immediately, although the iovecs may not be modified
|
|
|
|
* until the completion callback is triggered. The iovecs *may* be
|
|
|
|
* passed to another call to send_nb before the completion callback is
|
|
|
|
* triggered. The callback being triggered does not give any
|
|
|
|
* indication of remote completion.
|
2015-04-30 02:17:55 +03:00
|
|
|
*
|
2016-09-28 23:25:55 +03:00
|
|
|
* @param[in] peer Name of receiving process
|
|
|
|
* @param[in] msg Pointer to an array of iovecs to be sent
|
|
|
|
* @param[in] count Number of iovecs in array
|
|
|
|
* @param[in] tag User defined tag for matching send/recv
|
|
|
|
* @param[in] cbfunc Callback function on message comlpetion
|
|
|
|
* @param[in] cbdata User data to provide during completion callback
|
2015-04-30 02:17:55 +03:00
|
|
|
*
|
2016-09-28 23:25:55 +03:00
|
|
|
* @retval ORTE_SUCCESS The message was successfully started
|
|
|
|
* @retval ORTE_ERR_BAD_PARAM One of the parameters was invalid
|
|
|
|
* @retval ORTE_ERR_ADDRESSEE_UNKNOWN Contact information for the
|
|
|
|
* receiving process is not available
|
|
|
|
* @retval ORTE_ERROR An unspecified error occurred
|
2015-04-30 02:17:55 +03:00
|
|
|
*/
|
2016-10-23 21:36:05 +03:00
|
|
|
typedef int (*orte_rml_API_send_nb_fn_t)(orte_rml_conduit_t conduit_id,
|
|
|
|
orte_process_name_t* peer,
|
2016-09-28 23:25:55 +03:00
|
|
|
struct iovec* msg,
|
|
|
|
int count,
|
|
|
|
orte_rml_tag_t tag,
|
|
|
|
orte_rml_callback_fn_t cbfunc,
|
|
|
|
void* cbdata);
|
|
|
|
|
2015-04-30 02:17:55 +03:00
|
|
|
|
|
|
|
/**
|
2016-09-28 23:25:55 +03:00
|
|
|
* Send a buffer non-blocking message
|
2015-04-30 02:17:55 +03:00
|
|
|
*
|
2016-09-28 23:25:55 +03:00
|
|
|
* Send a buffer to the specified peer. The call
|
|
|
|
* will return immediately, although the buffer may not be modified
|
|
|
|
* until the completion callback is triggered. The buffer *may* be
|
|
|
|
* passed to another call to send_nb before the completion callback is
|
|
|
|
* triggered. The callback being triggered does not give any
|
|
|
|
* indication of remote completion.
|
|
|
|
*
|
|
|
|
* @param[in] peer Name of receiving process
|
|
|
|
* @param[in] buffer Pointer to buffer to be sent
|
|
|
|
* @param[in] tag User defined tag for matching send/recv
|
|
|
|
* @param[in] cbfunc Callback function on message comlpetion
|
|
|
|
* @param[in] cbdata User data to provide during completion callback
|
2015-04-30 02:17:55 +03:00
|
|
|
*
|
2016-09-28 23:25:55 +03:00
|
|
|
* @retval ORTE_SUCCESS The message was successfully started
|
|
|
|
* @retval ORTE_ERR_BAD_PARAM One of the parameters was invalid
|
|
|
|
* @retval ORTE_ERR_ADDRESSEE_UNKNOWN Contact information for the
|
|
|
|
* receiving process is not available
|
|
|
|
* @retval ORTE_ERROR An unspecified error occurred
|
2015-04-30 02:17:55 +03:00
|
|
|
*/
|
2016-10-23 21:36:05 +03:00
|
|
|
typedef int (*orte_rml_API_send_buffer_nb_fn_t)(orte_rml_conduit_t conduit_id,
|
|
|
|
orte_process_name_t* peer,
|
2016-09-28 23:25:55 +03:00
|
|
|
struct opal_buffer_t* buffer,
|
|
|
|
orte_rml_tag_t tag,
|
|
|
|
orte_rml_buffer_callback_fn_t cbfunc,
|
|
|
|
void* cbdata);
|
2015-04-30 02:17:55 +03:00
|
|
|
|
2016-09-28 23:25:55 +03:00
|
|
|
/**
|
|
|
|
* Purge the RML/OOB of contact info and pending messages
|
|
|
|
* to/from a specified process. Used when a process aborts
|
|
|
|
* and is to be restarted
|
|
|
|
*/
|
|
|
|
typedef void (*orte_rml_API_purge_fn_t)(orte_process_name_t *peer);
|
2015-04-30 02:17:55 +03:00
|
|
|
|
|
|
|
/**
|
2016-09-28 23:25:55 +03:00
|
|
|
* Receive an iovec non-blocking message
|
2015-04-30 02:17:55 +03:00
|
|
|
*
|
2016-09-28 23:25:55 +03:00
|
|
|
* @param[in] peer Peer process or ORTE_NAME_WILDCARD for wildcard receive
|
|
|
|
* @param[in] tag User defined tag for matching send/recv
|
|
|
|
* @param[in] persistent Boolean flag indicating whether or not this is a one-time recv
|
|
|
|
* @param[in] cbfunc Callback function on message comlpetion
|
|
|
|
* @param[in] cbdata User data to provide during completion callback
|
|
|
|
*/
|
|
|
|
typedef void (*orte_rml_API_recv_nb_fn_t)(orte_process_name_t* peer,
|
|
|
|
orte_rml_tag_t tag,
|
|
|
|
bool persistent,
|
|
|
|
orte_rml_callback_fn_t cbfunc,
|
|
|
|
void* cbdata);
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Receive a buffer non-blocking message
|
2015-04-30 02:17:55 +03:00
|
|
|
*
|
2016-09-28 23:25:55 +03:00
|
|
|
* @param[in] peer Peer process or ORTE_NAME_WILDCARD for wildcard receive
|
|
|
|
* @param[in] tag User defined tag for matching send/recv
|
|
|
|
* @param[in] persistent Boolean flag indicating whether or not this is a one-time recv
|
|
|
|
* @param[in] cbfunc Callback function on message comlpetion
|
|
|
|
* @param[in] cbdata User data to provide during completion callback
|
2015-04-30 02:17:55 +03:00
|
|
|
*/
|
2016-09-28 23:25:55 +03:00
|
|
|
typedef void (*orte_rml_API_recv_buffer_nb_fn_t)(orte_process_name_t* peer,
|
|
|
|
orte_rml_tag_t tag,
|
|
|
|
bool persistent,
|
|
|
|
orte_rml_buffer_callback_fn_t cbfunc,
|
|
|
|
void* cbdata);
|
2015-04-30 02:17:55 +03:00
|
|
|
|
2007-07-20 05:34:02 +04:00
|
|
|
|
2016-09-28 23:25:55 +03:00
|
|
|
/**
|
|
|
|
* Cancel a posted non-blocking receive
|
|
|
|
*
|
|
|
|
* Attempt to cancel a posted non-blocking receive.
|
|
|
|
*
|
|
|
|
* @param[in] peer Peer process or ORTE_NAME_WILDCARD, exactly as passed
|
|
|
|
* to the non-blocking receive call
|
|
|
|
* @param[in] tag Posted receive tag
|
|
|
|
*/
|
|
|
|
typedef void (*orte_rml_API_recv_cancel_fn_t)(orte_process_name_t* peer,
|
|
|
|
orte_rml_tag_t tag);
|
2007-07-20 05:34:02 +04:00
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
/**
|
2016-09-28 23:25:55 +03:00
|
|
|
* RML API interface
|
2016-03-10 05:13:48 +03:00
|
|
|
*/
|
2016-09-28 23:25:55 +03:00
|
|
|
typedef struct {
|
|
|
|
/** Open Conduit **/
|
|
|
|
orte_rml_API_open_conduit_fn_t open_conduit;
|
|
|
|
|
|
|
|
/** Shutdown the conduit and clean up resources */
|
|
|
|
orte_rml_API_close_conduit_fn_t close_conduit;
|
2007-07-20 05:34:02 +04:00
|
|
|
|
|
|
|
/** Get contact information for local process */
|
2016-09-28 23:25:55 +03:00
|
|
|
orte_rml_API_get_contact_info_fn_t get_contact_info;
|
2007-07-20 05:34:02 +04:00
|
|
|
/** Set contact information for remote process */
|
2016-09-28 23:25:55 +03:00
|
|
|
orte_rml_API_set_contact_info_fn_t set_contact_info;
|
2007-07-20 05:34:02 +04:00
|
|
|
|
|
|
|
/** Ping process for connectivity check */
|
2016-09-28 23:25:55 +03:00
|
|
|
orte_rml_API_ping_fn_t ping;
|
|
|
|
|
|
|
|
/** Send non-blocking iovec message */
|
2016-10-23 21:36:05 +03:00
|
|
|
orte_rml_API_send_nb_fn_t send_nb;
|
2016-09-28 23:25:55 +03:00
|
|
|
|
|
|
|
/** Send non-blocking buffer message */
|
2016-10-23 21:36:05 +03:00
|
|
|
orte_rml_API_send_buffer_nb_fn_t send_buffer_nb;
|
2007-07-20 05:34:02 +04:00
|
|
|
|
|
|
|
/** Receive non-blocking iovec message */
|
2016-09-28 23:25:55 +03:00
|
|
|
orte_rml_API_recv_nb_fn_t recv_nb;
|
As per the RFC, bring in the ORTE async progress code and the rewrite of OOB:
*** THIS RFC INCLUDES A MINOR CHANGE TO THE MPI-RTE INTERFACE ***
Note: during the course of this work, it was necessary to completely separate the MPI and RTE progress engines. There were multiple places in the MPI layer where ORTE_WAIT_FOR_COMPLETION was being used. A new OMPI_WAIT_FOR_COMPLETION macro was created (defined in ompi/mca/rte/rte.h) that simply cycles across opal_progress until the provided flag becomes false. Places where the MPI layer blocked waiting for RTE to complete an event have been modified to use this macro.
***************************************************************************************
I am reissuing this RFC because of the time that has passed since its original release. Since its initial release and review, I have debugged it further to ensure it fully supports tests like loop_spawn. It therefore seems ready for merge back to the trunk. Given its prior review, I have set the timeout for one week.
The code is in https://bitbucket.org/rhc/ompi-oob2
WHAT: Rewrite of ORTE OOB
WHY: Support asynchronous progress and a host of other features
WHEN: Wed, August 21
SYNOPSIS:
The current OOB has served us well, but a number of limitations have been identified over the years. Specifically:
* it is only progressed when called via opal_progress, which can lead to hangs or recursive calls into libevent (which is not supported by that code)
* we've had issues when multiple NICs are available as the code doesn't "shift" messages between transports - thus, all nodes had to be available via the same TCP interface.
* the OOB "unloads" incoming opal_buffer_t objects during the transmission, thus preventing use of OBJ_RETAIN in the code when repeatedly sending the same message to multiple recipients
* there is no failover mechanism across NICs - if the selected NIC (or its attached switch) fails, we are forced to abort
* only one transport (i.e., component) can be "active"
The revised OOB resolves these problems:
* async progress is used for all application processes, with the progress thread blocking in the event library
* each available TCP NIC is supported by its own TCP module. The ability to asynchronously progress each module independently is provided, but not enabled by default (a runtime MCA parameter turns it "on")
* multi-address TCP NICs (e.g., a NIC with both an IPv4 and IPv6 address, or with virtual interfaces) are supported - reachability is determined by comparing the contact info for a peer against all addresses within the range covered by the address/mask pairs for the NIC.
* a message that arrives on one TCP NIC is automatically shifted to whatever NIC that is connected to the next "hop" if that peer cannot be reached by the incoming NIC. If no TCP module will reach the peer, then the OOB attempts to send the message via all other available components - if none can reach the peer, then an "error" is reported back to the RML, which then calls the errmgr for instructions.
* opal_buffer_t now conforms to standard object rules re OBJ_RETAIN as we no longer "unload" the incoming object
* NIC failure is reported to the TCP component, which then tries to resend the message across any other available TCP NIC. If that doesn't work, then the message is given back to the OOB base to try using other components. If all that fails, then the error is reported to the RML, which reports to the errmgr for instructions
* obviously from the above, multiple OOB components (e.g., TCP and UD) can be active in parallel
* the matching code has been moved to the RML (and out of the OOB/TCP component) so it is independent of transport
* routing is done by the individual OOB modules (as opposed to the RML). Thus, both routed and non-routed transports can simultaneously be active
* all blocking send/recv APIs have been removed. Everything operates asynchronously.
KNOWN LIMITATIONS:
* although provision is made for component failover as described above, the code for doing so has not been fully implemented yet. At the moment, if all connections for a given peer fail, the errmgr is notified of a "lost connection", which by default results in termination of the job if it was a lifeline
* the IPv6 code is present and compiles, but is not complete. Since the current IPv6 support in the OOB doesn't work anyway, I don't consider this a blocker
* routing is performed at the individual module level, yet the active routed component is selected on a global basis. We probably should update that to reflect that different transports may need/choose to route in different ways
* obviously, not every error path has been tested nor necessarily covered
* determining abnormal termination is more challenging than in the old code as we now potentially have multiple ways of connecting to a process. Ideally, we would declare "connection failed" when *all* transports can no longer reach the process, but that requires some additional (possibly complex) code. For now, the code replicates the old behavior only somewhat modified - i.e., if a module sees its connection fail, it checks to see if it is a lifeline. If so, it notifies the errmgr that the lifeline is lost - otherwise, it notifies the errmgr that a non-lifeline connection was lost.
* reachability is determined solely on the basis of a shared subnet address/mask - more sophisticated algorithms (e.g., the one used in the tcp btl) are required to handle routing via gateways
* the RML needs to assign sequence numbers to each message on a per-peer basis. The receiving RML will then deliver messages in order, thus preventing out-of-order messaging in the case where messages travel across different transports or a message needs to be redirected/resent due to failure of a NIC
This commit was SVN r29058.
2013-08-22 20:37:40 +04:00
|
|
|
|
2007-07-20 05:34:02 +04:00
|
|
|
/** Receive non-blocking buffer message */
|
2016-09-28 23:25:55 +03:00
|
|
|
orte_rml_API_recv_buffer_nb_fn_t recv_buffer_nb;
|
As per the RFC, bring in the ORTE async progress code and the rewrite of OOB:
*** THIS RFC INCLUDES A MINOR CHANGE TO THE MPI-RTE INTERFACE ***
Note: during the course of this work, it was necessary to completely separate the MPI and RTE progress engines. There were multiple places in the MPI layer where ORTE_WAIT_FOR_COMPLETION was being used. A new OMPI_WAIT_FOR_COMPLETION macro was created (defined in ompi/mca/rte/rte.h) that simply cycles across opal_progress until the provided flag becomes false. Places where the MPI layer blocked waiting for RTE to complete an event have been modified to use this macro.
***************************************************************************************
I am reissuing this RFC because of the time that has passed since its original release. Since its initial release and review, I have debugged it further to ensure it fully supports tests like loop_spawn. It therefore seems ready for merge back to the trunk. Given its prior review, I have set the timeout for one week.
The code is in https://bitbucket.org/rhc/ompi-oob2
WHAT: Rewrite of ORTE OOB
WHY: Support asynchronous progress and a host of other features
WHEN: Wed, August 21
SYNOPSIS:
The current OOB has served us well, but a number of limitations have been identified over the years. Specifically:
* it is only progressed when called via opal_progress, which can lead to hangs or recursive calls into libevent (which is not supported by that code)
* we've had issues when multiple NICs are available as the code doesn't "shift" messages between transports - thus, all nodes had to be available via the same TCP interface.
* the OOB "unloads" incoming opal_buffer_t objects during the transmission, thus preventing use of OBJ_RETAIN in the code when repeatedly sending the same message to multiple recipients
* there is no failover mechanism across NICs - if the selected NIC (or its attached switch) fails, we are forced to abort
* only one transport (i.e., component) can be "active"
The revised OOB resolves these problems:
* async progress is used for all application processes, with the progress thread blocking in the event library
* each available TCP NIC is supported by its own TCP module. The ability to asynchronously progress each module independently is provided, but not enabled by default (a runtime MCA parameter turns it "on")
* multi-address TCP NICs (e.g., a NIC with both an IPv4 and IPv6 address, or with virtual interfaces) are supported - reachability is determined by comparing the contact info for a peer against all addresses within the range covered by the address/mask pairs for the NIC.
* a message that arrives on one TCP NIC is automatically shifted to whatever NIC that is connected to the next "hop" if that peer cannot be reached by the incoming NIC. If no TCP module will reach the peer, then the OOB attempts to send the message via all other available components - if none can reach the peer, then an "error" is reported back to the RML, which then calls the errmgr for instructions.
* opal_buffer_t now conforms to standard object rules re OBJ_RETAIN as we no longer "unload" the incoming object
* NIC failure is reported to the TCP component, which then tries to resend the message across any other available TCP NIC. If that doesn't work, then the message is given back to the OOB base to try using other components. If all that fails, then the error is reported to the RML, which reports to the errmgr for instructions
* obviously from the above, multiple OOB components (e.g., TCP and UD) can be active in parallel
* the matching code has been moved to the RML (and out of the OOB/TCP component) so it is independent of transport
* routing is done by the individual OOB modules (as opposed to the RML). Thus, both routed and non-routed transports can simultaneously be active
* all blocking send/recv APIs have been removed. Everything operates asynchronously.
KNOWN LIMITATIONS:
* although provision is made for component failover as described above, the code for doing so has not been fully implemented yet. At the moment, if all connections for a given peer fail, the errmgr is notified of a "lost connection", which by default results in termination of the job if it was a lifeline
* the IPv6 code is present and compiles, but is not complete. Since the current IPv6 support in the OOB doesn't work anyway, I don't consider this a blocker
* routing is performed at the individual module level, yet the active routed component is selected on a global basis. We probably should update that to reflect that different transports may need/choose to route in different ways
* obviously, not every error path has been tested nor necessarily covered
* determining abnormal termination is more challenging than in the old code as we now potentially have multiple ways of connecting to a process. Ideally, we would declare "connection failed" when *all* transports can no longer reach the process, but that requires some additional (possibly complex) code. For now, the code replicates the old behavior only somewhat modified - i.e., if a module sees its connection fail, it checks to see if it is a lifeline. If so, it notifies the errmgr that the lifeline is lost - otherwise, it notifies the errmgr that a non-lifeline connection was lost.
* reachability is determined solely on the basis of a shared subnet address/mask - more sophisticated algorithms (e.g., the one used in the tcp btl) are required to handle routing via gateways
* the RML needs to assign sequence numbers to each message on a per-peer basis. The receiving RML will then deliver messages in order, thus preventing out-of-order messaging in the case where messages travel across different transports or a message needs to be redirected/resent due to failure of a NIC
This commit was SVN r29058.
2013-08-22 20:37:40 +04:00
|
|
|
|
2007-07-20 05:34:02 +04:00
|
|
|
/** Cancel posted non-blocking receive */
|
2016-09-28 23:25:55 +03:00
|
|
|
orte_rml_API_recv_cancel_fn_t recv_cancel;
|
2015-04-30 02:17:55 +03:00
|
|
|
|
2010-03-24 00:28:02 +03:00
|
|
|
/** Purge information */
|
2016-09-28 23:25:55 +03:00
|
|
|
orte_rml_API_purge_fn_t purge;
|
|
|
|
|
|
|
|
/** Query information of transport in system */
|
|
|
|
orte_rml_API_query_transports_fn_t query_transports;
|
2016-03-11 00:14:17 +03:00
|
|
|
|
2016-10-23 21:36:05 +03:00
|
|
|
/* get the routed module for a given conduit */
|
|
|
|
orte_rml_API_query_routed_fn_t get_routed;
|
2016-09-28 23:25:55 +03:00
|
|
|
} orte_rml_base_API_t;
|
2005-03-14 23:57:21 +03:00
|
|
|
|
2007-07-20 05:34:02 +04:00
|
|
|
/** Interface for RML communication */
|
2016-09-28 23:25:55 +03:00
|
|
|
ORTE_DECLSPEC extern orte_rml_base_API_t orte_rml;
|
|
|
|
|
|
|
|
/* ******************************************************************** */
|
|
|
|
/* RML COMPONENT DEFINITION */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* RML open_conduit
|
|
|
|
*
|
|
|
|
* Create an instance (module) of the given RML component. Upon
|
|
|
|
* returning, the module data structure should be fully populated and
|
|
|
|
* all functions should be usable and will have the conduit information.
|
|
|
|
*
|
|
|
|
* @param[in] opal_list_t of all attributes requested for the conduit.
|
|
|
|
* Each attribute will be key-value.
|
|
|
|
* [TODO] put in examples of the key-value here.
|
|
|
|
* @return Exactly one module created by the call to the component's
|
|
|
|
* initialization function should be returned. The module structure
|
|
|
|
* should be fully populated, and the priority should be set to a
|
|
|
|
* reasonable value.
|
|
|
|
*
|
|
|
|
* @retval NULL An error occurred and initialization did not occur
|
|
|
|
* @retval non-NULL The module was successfully initialized
|
|
|
|
*/
|
|
|
|
typedef orte_rml_base_module_t* (*orte_rml_component_open_conduit_fn_t)(opal_list_t *attributes);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Query the library to provide all the supported interfaces/transport
|
|
|
|
* providers in the current node/system.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
typedef orte_rml_pathway_t* (*orte_rml_component_query_transports_fn_t)(void);
|
|
|
|
|
|
|
|
/* Get the contact info for this component */
|
|
|
|
typedef char* (*orte_rml_component_get_contact_info_fn_t)(void);
|
|
|
|
|
|
|
|
/* Set contact info */
|
|
|
|
typedef void (*orte_rml_component_set_contact_info_fn_t)(const char *uri);
|
|
|
|
|
|
|
|
/** Close conduit - allow the specific component to
|
|
|
|
* cleanup the module for this conduit
|
|
|
|
*/
|
|
|
|
typedef void (*orte_rml_module_close_conduit_fn_t)(orte_rml_base_module_t *mod);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* RML component interface
|
|
|
|
*
|
|
|
|
* Component interface for the RML framework. A public instance of
|
|
|
|
* this structure, called mca_rml_[component name]_component, must
|
|
|
|
* exist in any RML component.
|
|
|
|
*/
|
2016-10-12 18:44:03 +03:00
|
|
|
typedef struct orte_rml_component_t {
|
2016-09-28 23:25:55 +03:00
|
|
|
/* Base component description */
|
|
|
|
mca_base_component_t base;
|
|
|
|
/* Base component data block */
|
|
|
|
mca_base_component_data_t data;
|
|
|
|
/* Component priority */
|
|
|
|
int priority;
|
|
|
|
/* Component interface functions */
|
|
|
|
orte_rml_component_open_conduit_fn_t open_conduit;
|
|
|
|
orte_rml_component_query_transports_fn_t query_transports;
|
|
|
|
orte_rml_component_get_contact_info_fn_t get_contact_info;
|
|
|
|
orte_rml_component_set_contact_info_fn_t set_contact_info;
|
|
|
|
orte_rml_module_close_conduit_fn_t close_conduit;
|
2016-10-12 18:44:03 +03:00
|
|
|
} orte_rml_component_t;
|
2016-09-28 23:25:55 +03:00
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
|
|
|
|
|
2007-07-20 05:34:02 +04:00
|
|
|
/* ******************************************************************** */
|
2005-03-14 23:57:21 +03:00
|
|
|
|
|
|
|
|
2008-07-29 02:40:57 +04:00
|
|
|
/** Macro for use in components that are of type rml */
|
2016-09-28 23:25:55 +03:00
|
|
|
#define ORTE_RML_BASE_VERSION_3_0_0 \
|
|
|
|
ORTE_MCA_BASE_VERSION_2_1_0("rml", 3, 0, 0)
|
2005-03-14 23:57:21 +03:00
|
|
|
|
|
|
|
|
2007-07-20 05:34:02 +04:00
|
|
|
/* ******************************************************************** */
|
2005-03-14 23:57:21 +03:00
|
|
|
|
|
|
|
|
2007-07-20 05:34:02 +04:00
|
|
|
END_C_DECLS
|
|
|
|
|
2005-03-14 23:57:21 +03:00
|
|
|
#endif
|