1
1
openmpi/ompi/mca/osc/sm/osc_sm.h
Ralph Castain 552c9ca5a0 George did the work and deserves all the credit for it. Ralph did the merge, and deserves whatever blame results from errors in it :-)
WHAT:    Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL

All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies.  This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP.  Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose.  UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs.  A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic.

This commit was SVN r32317.
2014-07-26 00:47:28 +00:00

243 строки
8.8 KiB
C

/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2012 Sandia National Laboratories. All rights reserved.
* Copyright (c) 2014 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef OSC_SM_SM_H
#define OSC_SM_SM_H
#include "opal/class/ompi_free_list.h"
#include "opal/mca/shmem/base/base.h"
/* data shared across all peers */
struct ompi_osc_sm_global_state_t {
int use_barrier_for_fence;
#if OPAL_HAVE_POSIX_THREADS
pthread_mutex_t mtx;
pthread_cond_t cond;
int sense;
int32_t count;
#endif
};
typedef struct ompi_osc_sm_global_state_t ompi_osc_sm_global_state_t;
/* this is data exposed to remote nodes */
struct ompi_osc_sm_lock_t {
uint32_t counter;
uint32_t write;
uint32_t read;
};
typedef struct ompi_osc_sm_lock_t ompi_osc_sm_lock_t;
struct ompi_osc_sm_node_state_t {
int32_t post_count;
int32_t complete_count;
ompi_osc_sm_lock_t lock;
opal_atomic_lock_t accumulate_lock;
};
typedef struct ompi_osc_sm_node_state_t ompi_osc_sm_node_state_t;
struct ompi_osc_sm_component_t {
ompi_osc_base_component_t super;
};
typedef struct ompi_osc_sm_component_t ompi_osc_sm_component_t;
OMPI_DECLSPEC extern ompi_osc_sm_component_t mca_osc_sm_component;
enum ompi_osc_sm_locktype_t {
lock_none = 0,
lock_nocheck,
lock_exclusive,
lock_shared
};
struct ompi_osc_sm_module_t {
ompi_osc_base_module_t super;
struct ompi_communicator_t *comm;
int flavor;
opal_shmem_ds_t seg_ds;
void *segment_base;
bool noncontig;
size_t *sizes;
void **bases;
int *disp_units;
ompi_group_t *start_group;
ompi_group_t *post_group;
#if OPAL_HAVE_POSIX_THREADS
int my_sense;
#endif
enum ompi_osc_sm_locktype_t *outstanding_locks;
/* exposed data */
ompi_osc_sm_global_state_t *global_state;
ompi_osc_sm_node_state_t *my_node_state;
ompi_osc_sm_node_state_t *node_states;
};
typedef struct ompi_osc_sm_module_t ompi_osc_sm_module_t;
int ompi_osc_sm_shared_query(struct ompi_win_t *win, int rank, size_t *size, int *disp_unit, void *baseptr);
int ompi_osc_sm_attach(struct ompi_win_t *win, void *base, size_t len);
int ompi_osc_sm_detach(struct ompi_win_t *win, void *base);
int ompi_osc_sm_free(struct ompi_win_t *win);
int ompi_osc_sm_put(void *origin_addr,
int origin_count,
struct ompi_datatype_t *origin_dt,
int target,
OPAL_PTRDIFF_TYPE target_disp,
int target_count,
struct ompi_datatype_t *target_dt,
struct ompi_win_t *win);
int ompi_osc_sm_get(void *origin_addr,
int origin_count,
struct ompi_datatype_t *origin_dt,
int target,
OPAL_PTRDIFF_TYPE target_disp,
int target_count,
struct ompi_datatype_t *target_dt,
struct ompi_win_t *win);
int ompi_osc_sm_accumulate(void *origin_addr,
int origin_count,
struct ompi_datatype_t *origin_dt,
int target,
OPAL_PTRDIFF_TYPE target_disp,
int target_count,
struct ompi_datatype_t *target_dt,
struct ompi_op_t *op,
struct ompi_win_t *win);
int ompi_osc_sm_compare_and_swap(void *origin_addr,
void *compare_addr,
void *result_addr,
struct ompi_datatype_t *dt,
int target,
OPAL_PTRDIFF_TYPE target_disp,
struct ompi_win_t *win);
int ompi_osc_sm_fetch_and_op(void *origin_addr,
void *result_addr,
struct ompi_datatype_t *dt,
int target,
OPAL_PTRDIFF_TYPE target_disp,
struct ompi_op_t *op,
struct ompi_win_t *win);
int ompi_osc_sm_get_accumulate(void *origin_addr,
int origin_count,
struct ompi_datatype_t *origin_datatype,
void *result_addr,
int result_count,
struct ompi_datatype_t *result_datatype,
int target_rank,
MPI_Aint target_disp,
int target_count,
struct ompi_datatype_t *target_datatype,
struct ompi_op_t *op,
struct ompi_win_t *win);
int ompi_osc_sm_rput(void *origin_addr,
int origin_count,
struct ompi_datatype_t *origin_dt,
int target,
OPAL_PTRDIFF_TYPE target_disp,
int target_count,
struct ompi_datatype_t *target_dt,
struct ompi_win_t *win,
struct ompi_request_t **request);
int ompi_osc_sm_rget(void *origin_addr,
int origin_count,
struct ompi_datatype_t *origin_dt,
int target,
OPAL_PTRDIFF_TYPE target_disp,
int target_count,
struct ompi_datatype_t *target_dt,
struct ompi_win_t *win,
struct ompi_request_t **request);
int ompi_osc_sm_raccumulate(void *origin_addr,
int origin_count,
struct ompi_datatype_t *origin_dt,
int target,
OPAL_PTRDIFF_TYPE target_disp,
int target_count,
struct ompi_datatype_t *target_dt,
struct ompi_op_t *op,
struct ompi_win_t *win,
struct ompi_request_t **request);
int ompi_osc_sm_rget_accumulate(void *origin_addr,
int origin_count,
struct ompi_datatype_t *origin_datatype,
void *result_addr,
int result_count,
struct ompi_datatype_t *result_datatype,
int target_rank,
MPI_Aint target_disp,
int target_count,
struct ompi_datatype_t *target_datatype,
struct ompi_op_t *op,
struct ompi_win_t *win,
struct ompi_request_t **request);
int ompi_osc_sm_fence(int assert, struct ompi_win_t *win);
int ompi_osc_sm_start(struct ompi_group_t *group,
int assert,
struct ompi_win_t *win);
int ompi_osc_sm_complete(struct ompi_win_t *win);
int ompi_osc_sm_post(struct ompi_group_t *group,
int assert,
struct ompi_win_t *win);
int ompi_osc_sm_wait(struct ompi_win_t *win);
int ompi_osc_sm_test(struct ompi_win_t *win,
int *flag);
int ompi_osc_sm_lock(int lock_type,
int target,
int assert,
struct ompi_win_t *win);
int ompi_osc_sm_unlock(int target,
struct ompi_win_t *win);
int ompi_osc_sm_lock_all(int assert,
struct ompi_win_t *win);
int ompi_osc_sm_unlock_all(struct ompi_win_t *win);
int ompi_osc_sm_sync(struct ompi_win_t *win);
int ompi_osc_sm_flush(int target,
struct ompi_win_t *win);
int ompi_osc_sm_flush_all(struct ompi_win_t *win);
int ompi_osc_sm_flush_local(int target,
struct ompi_win_t *win);
int ompi_osc_sm_flush_local_all(struct ompi_win_t *win);
int ompi_osc_sm_set_info(struct ompi_win_t *win, struct ompi_info_t *info);
int ompi_osc_sm_get_info(struct ompi_win_t *win, struct ompi_info_t **info_used);
#endif