2013-07-19 22:13:58 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2006 The University of Tennessee and The University
|
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
* Copyright (c) 2006 Sandia National Laboratories. All rights
|
|
|
|
* reserved.
|
2017-01-05 00:57:06 +00:00
|
|
|
* Copyright (c) 2013-2017 Cisco Systems, Inc. All rights reserved.
|
2013-07-19 22:13:58 +00:00
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
|
|
|
* $HEADER$
|
|
|
|
*/
|
|
|
|
|
George did the work and deserves all the credit for it. Ralph did the merge, and deserves whatever blame results from errors in it :-)
WHAT: Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL
All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies. This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP. Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose. UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs. A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic.
This commit was SVN r32317.
2014-07-26 00:47:28 +00:00
|
|
|
#ifndef OPAL_BTL_USNIC_ENDPOINT_H
|
|
|
|
#define OPAL_BTL_USNIC_ENDPOINT_H
|
2013-07-19 22:13:58 +00:00
|
|
|
|
2014-12-02 13:09:46 -08:00
|
|
|
#include <rdma/fabric.h>
|
2013-07-19 22:13:58 +00:00
|
|
|
|
|
|
|
#include "opal/class/opal_list.h"
|
|
|
|
#include "opal/class/opal_hotel.h"
|
|
|
|
#include "opal/mca/event/event.h"
|
|
|
|
|
|
|
|
#include "btl_usnic.h"
|
|
|
|
|
|
|
|
BEGIN_C_DECLS
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Forward declarations to avoid include loops
|
|
|
|
*/
|
George did the work and deserves all the credit for it. Ralph did the merge, and deserves whatever blame results from errors in it :-)
WHAT: Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL
All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies. This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP. Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose. UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs. A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic.
This commit was SVN r32317.
2014-07-26 00:47:28 +00:00
|
|
|
struct opal_btl_usnic_module_t;
|
|
|
|
struct opal_btl_usnic_send_segment_t;
|
2013-07-19 22:13:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Have the window size as a compile-time constant that is a power of
|
|
|
|
* two so that we can take advantage of fast bit operations.
|
|
|
|
*/
|
|
|
|
#define WINDOW_SIZE 4096
|
|
|
|
#define WINDOW_SIZE_MOD(a) (((a) & (WINDOW_SIZE - 1)))
|
2014-02-26 07:40:10 +00:00
|
|
|
#define WINDOW_OPEN(E) (SEQ_LT((E)->endpoint_next_seq_to_send, \
|
|
|
|
((E)->endpoint_ack_seq_rcvd + WINDOW_SIZE)))
|
2013-07-19 22:13:58 +00:00
|
|
|
#define WINDOW_EMPTY(E) ((E)->endpoint_ack_seq_rcvd == \
|
|
|
|
((E)->endpoint_next_seq_to_send-1))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns true when an endpoint has nothing left to send
|
|
|
|
*/
|
|
|
|
#define ENDPOINT_DRAINED(E) (WINDOW_EMPTY(E) && \
|
|
|
|
opal_list_is_empty(&(E)->endpoint_frag_send_queue))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Channel IDs
|
|
|
|
*/
|
George did the work and deserves all the credit for it. Ralph did the merge, and deserves whatever blame results from errors in it :-)
WHAT: Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL
All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies. This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP. Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose. UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs. A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic.
This commit was SVN r32317.
2014-07-26 00:47:28 +00:00
|
|
|
typedef enum opal_btl_usnic_channel_id_t {
|
2013-07-19 22:13:58 +00:00
|
|
|
USNIC_PRIORITY_CHANNEL,
|
|
|
|
USNIC_DATA_CHANNEL,
|
|
|
|
USNIC_NUM_CHANNELS
|
George did the work and deserves all the credit for it. Ralph did the merge, and deserves whatever blame results from errors in it :-)
WHAT: Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL
All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies. This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP. Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose. UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs. A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic.
This commit was SVN r32317.
2014-07-26 00:47:28 +00:00
|
|
|
} opal_btl_usnic_channel_id_t;
|
2013-07-19 22:13:58 +00:00
|
|
|
|
2014-12-02 13:09:46 -08:00
|
|
|
typedef struct opal_btl_usnic_modex_t {
|
|
|
|
/* Stored in network order */
|
2013-07-19 22:13:58 +00:00
|
|
|
uint32_t ipv4_addr;
|
2014-12-02 13:09:46 -08:00
|
|
|
/* Stored in host order */
|
|
|
|
uint32_t ports[USNIC_NUM_CHANNELS];
|
2016-02-05 13:04:34 -08:00
|
|
|
/* Stored in network order */
|
2014-12-02 13:09:46 -08:00
|
|
|
uint32_t netmask;
|
|
|
|
/* Stored in host order */
|
2014-02-26 22:21:25 +00:00
|
|
|
uint32_t connectivity_udp_port;
|
2014-02-26 07:50:26 +00:00
|
|
|
uint32_t link_speed_mbps;
|
2014-12-02 13:09:46 -08:00
|
|
|
uint16_t max_msg_size;
|
George did the work and deserves all the credit for it. Ralph did the merge, and deserves whatever blame results from errors in it :-)
WHAT: Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL
All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies. This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP. Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose. UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs. A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic.
This commit was SVN r32317.
2014-07-26 00:47:28 +00:00
|
|
|
opal_btl_usnic_seq_t isn;
|
2014-12-02 13:09:46 -08:00
|
|
|
uint32_t protocol;
|
|
|
|
} opal_btl_usnic_modex_t;
|
2013-07-19 22:13:58 +00:00
|
|
|
|
George did the work and deserves all the credit for it. Ralph did the merge, and deserves whatever blame results from errors in it :-)
WHAT: Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL
All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies. This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP. Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose. UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs. A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic.
This commit was SVN r32317.
2014-07-26 00:47:28 +00:00
|
|
|
struct opal_btl_usnic_send_segment_t;
|
|
|
|
struct opal_btl_usnic_proc_t;
|
2013-07-19 22:13:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This is a descriptor for an incoming fragment that is broken
|
|
|
|
* into chunks. When the first reference to this frag_id is seen,
|
|
|
|
* memory is allocated for it. When the last byte arrives, the assembled
|
|
|
|
* fragment is passed to the PML.
|
|
|
|
*
|
|
|
|
* The endpoint structure has space for WINDOW_SIZE/2 simultaneous fragments.
|
|
|
|
* This is the largest number of fragments that can possibly be in-flight
|
|
|
|
* to us from a particular endpoint because eash chunked fragment will occupy
|
|
|
|
* at least two segments, and only WINDOW_SIZE segments can be in flight.
|
2014-07-30 20:56:15 +00:00
|
|
|
* OK, so there is an extremely pathological case where we could see
|
2013-07-19 22:13:58 +00:00
|
|
|
* (WINDOW_SIZE/2)+1 "in flight" at once, but just dropping that last one
|
|
|
|
* and waiting for retrans is just fine in this hypothetical hyper-pathological
|
|
|
|
* case, which is what we'll do.
|
|
|
|
*/
|
|
|
|
#define MAX_ACTIVE_FRAGS (WINDOW_SIZE/2)
|
George did the work and deserves all the credit for it. Ralph did the merge, and deserves whatever blame results from errors in it :-)
WHAT: Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL
All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies. This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP. Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose. UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs. A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic.
This commit was SVN r32317.
2014-07-26 00:47:28 +00:00
|
|
|
typedef struct opal_btl_usnic_rx_frag_info_t {
|
2013-07-19 22:13:58 +00:00
|
|
|
uint32_t rfi_frag_id; /* ID for this fragment */
|
|
|
|
uint32_t rfi_frag_size; /* bytes in this fragment */
|
|
|
|
uint32_t rfi_bytes_left; /* bytes remaining to RX in fragment */
|
2015-02-12 11:15:58 -08:00
|
|
|
bool rfi_data_in_pool; /* data in data_pool if true, else malloced */
|
|
|
|
int rfi_data_pool; /* if <0, data malloced, else rx buf pool */
|
2013-07-19 22:13:58 +00:00
|
|
|
char *rfi_data; /* pointer to assembly area */
|
2015-02-23 08:07:45 -08:00
|
|
|
opal_free_list_item_t *rfi_fl_elt; /* free list elemement from buf pool
|
2013-11-07 01:27:31 +00:00
|
|
|
when rfi_data_pool is nonzero */
|
George did the work and deserves all the credit for it. Ralph did the merge, and deserves whatever blame results from errors in it :-)
WHAT: Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL
All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies. This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP. Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose. UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs. A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic.
This commit was SVN r32317.
2014-07-26 00:47:28 +00:00
|
|
|
} opal_btl_usnic_rx_frag_info_t;
|
2013-07-19 22:13:58 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* An abstraction that represents a connection to a remote process.
|
|
|
|
* An instance of mca_btl_base_endpoint_t is associated with each
|
|
|
|
* (btl_usnic_proc_t, btl_usnic_module_t) tuple and address
|
|
|
|
* information is exchanged at startup. The usnic BTL is
|
|
|
|
* connectionless, so no connection is ever established.
|
|
|
|
*/
|
|
|
|
typedef struct mca_btl_base_endpoint_t {
|
2014-12-02 13:09:46 -08:00
|
|
|
opal_list_item_t super;
|
2013-07-19 22:13:58 +00:00
|
|
|
|
|
|
|
/** BTL module that created this connection */
|
George did the work and deserves all the credit for it. Ralph did the merge, and deserves whatever blame results from errors in it :-)
WHAT: Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL
All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies. This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP. Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose. UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs. A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic.
This commit was SVN r32317.
2014-07-26 00:47:28 +00:00
|
|
|
struct opal_btl_usnic_module_t *endpoint_module;
|
2013-07-19 22:13:58 +00:00
|
|
|
|
|
|
|
/** proc that owns this endpoint */
|
George did the work and deserves all the credit for it. Ralph did the merge, and deserves whatever blame results from errors in it :-)
WHAT: Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL
All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies. This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP. Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose. UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs. A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic.
This commit was SVN r32317.
2014-07-26 00:47:28 +00:00
|
|
|
struct opal_btl_usnic_proc_t *endpoint_proc;
|
2013-07-19 22:13:58 +00:00
|
|
|
int endpoint_proc_index; /* index in owning proc's endpoint array */
|
|
|
|
|
|
|
|
/** True when proc has been deleted, but still have sends that need ACKs */
|
|
|
|
bool endpoint_exiting;
|
|
|
|
|
|
|
|
/** List item for linking into module "all_endpoints" */
|
|
|
|
opal_list_item_t endpoint_endpoint_li;
|
|
|
|
|
|
|
|
/** List item for linking into "need ack" */
|
|
|
|
opal_list_item_t endpoint_ack_li;
|
|
|
|
|
|
|
|
/** Remote address information */
|
2014-12-02 13:09:46 -08:00
|
|
|
opal_btl_usnic_modex_t endpoint_remote_modex;
|
2013-07-19 22:13:58 +00:00
|
|
|
|
2014-12-02 13:09:46 -08:00
|
|
|
/** Remote address handle. Need one for each
|
|
|
|
channel because each remote channel has different dest port */
|
|
|
|
fi_addr_t endpoint_remote_addrs[USNIC_NUM_CHANNELS];
|
2013-07-19 22:13:58 +00:00
|
|
|
|
|
|
|
/** Send-related data */
|
2014-12-02 13:09:46 -08:00
|
|
|
bool endpoint_ready_to_send;
|
|
|
|
opal_list_t endpoint_frag_send_queue;
|
|
|
|
int32_t endpoint_send_credits;
|
|
|
|
uint32_t endpoint_next_frag_id;
|
2013-07-19 22:13:58 +00:00
|
|
|
|
|
|
|
/** Receive-related data */
|
George did the work and deserves all the credit for it. Ralph did the merge, and deserves whatever blame results from errors in it :-)
WHAT: Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL
All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies. This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP. Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose. UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs. A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic.
This commit was SVN r32317.
2014-07-26 00:47:28 +00:00
|
|
|
struct opal_btl_usnic_rx_frag_info_t *endpoint_rx_frag_info;
|
2013-07-19 22:13:58 +00:00
|
|
|
|
|
|
|
/** OPAL hotel to track outstanding stends */
|
2014-12-02 13:09:46 -08:00
|
|
|
opal_hotel_t endpoint_hotel;
|
2013-07-19 22:13:58 +00:00
|
|
|
|
|
|
|
/** Sliding window parameters for this peer */
|
|
|
|
/* Values for the current proc to send to this endpoint on the
|
|
|
|
peer proc */
|
2014-12-02 13:09:46 -08:00
|
|
|
opal_btl_usnic_seq_t endpoint_next_seq_to_send; /* n_t */
|
|
|
|
opal_btl_usnic_seq_t endpoint_ack_seq_rcvd; /* n_a */
|
2013-07-19 22:13:58 +00:00
|
|
|
|
2017-01-05 00:57:06 +00:00
|
|
|
/* Table where sent segments sit while waiting for their ACKs.
|
|
|
|
When a segment is ACKed, it is removed from this table. */
|
2014-12-02 13:09:46 -08:00
|
|
|
struct opal_btl_usnic_send_segment_t *endpoint_sent_segs[WINDOW_SIZE];
|
2013-07-19 22:13:58 +00:00
|
|
|
|
|
|
|
/* Values for the current proc to receive from this endpoint on
|
|
|
|
the peer proc */
|
2014-12-02 13:09:46 -08:00
|
|
|
bool endpoint_ack_needed;
|
2013-07-19 22:13:58 +00:00
|
|
|
|
|
|
|
/* When we receive a packet that needs an ACK, set this
|
|
|
|
* to delay the ACK to allow for piggybacking
|
|
|
|
*/
|
2014-12-02 13:09:46 -08:00
|
|
|
uint64_t endpoint_acktime;
|
2013-07-19 22:13:58 +00:00
|
|
|
|
2014-12-02 13:09:46 -08:00
|
|
|
opal_btl_usnic_seq_t endpoint_next_contig_seq_to_recv; /* n_r */
|
|
|
|
opal_btl_usnic_seq_t endpoint_highest_seq_rcvd; /* n_s */
|
2013-07-19 22:13:58 +00:00
|
|
|
|
2014-12-02 13:09:46 -08:00
|
|
|
bool endpoint_rcvd_segs[WINDOW_SIZE];
|
|
|
|
uint32_t endpoint_rfstart;
|
2014-02-26 22:21:25 +00:00
|
|
|
|
2014-12-02 13:09:46 -08:00
|
|
|
bool endpoint_connectivity_checked;
|
|
|
|
bool endpoint_on_all_endpoints;
|
2013-07-19 22:13:58 +00:00
|
|
|
} mca_btl_base_endpoint_t;
|
|
|
|
|
George did the work and deserves all the credit for it. Ralph did the merge, and deserves whatever blame results from errors in it :-)
WHAT: Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL
All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies. This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP. Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose. UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs. A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic.
This commit was SVN r32317.
2014-07-26 00:47:28 +00:00
|
|
|
typedef mca_btl_base_endpoint_t opal_btl_usnic_endpoint_t;
|
|
|
|
OBJ_CLASS_DECLARATION(opal_btl_usnic_endpoint_t);
|
2013-07-19 22:13:58 +00:00
|
|
|
|
2014-12-02 13:09:46 -08:00
|
|
|
/*
|
|
|
|
* Helper struct for the asynchornous creation of fi_addr array
|
|
|
|
*/
|
|
|
|
typedef struct {
|
|
|
|
opal_btl_usnic_endpoint_t *endpoint;
|
|
|
|
opal_btl_usnic_channel_id_t channel_id;
|
|
|
|
} opal_btl_usnic_addr_context_t;
|
|
|
|
|
2013-07-19 22:13:58 +00:00
|
|
|
/*
|
|
|
|
* Flush all pending sends and resends from and endpoint
|
|
|
|
*/
|
|
|
|
void
|
George did the work and deserves all the credit for it. Ralph did the merge, and deserves whatever blame results from errors in it :-)
WHAT: Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL
All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies. This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP. Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose. UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs. A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic.
This commit was SVN r32317.
2014-07-26 00:47:28 +00:00
|
|
|
opal_btl_usnic_flush_endpoint(
|
|
|
|
opal_btl_usnic_endpoint_t *endpoint);
|
2013-07-19 22:13:58 +00:00
|
|
|
|
|
|
|
END_C_DECLS
|
|
|
|
#endif
|