1
1
module open close and init
basic datastructures

This commit was SVN r1785.
Этот коммит содержится в:
Gopal Santhanaraman 2004-07-19 19:45:06 +00:00
родитель 8979824ae6
Коммит f41c116909
13 изменённых файлов: 1457 добавлений и 2 удалений

Просмотреть файл

@ -5,8 +5,25 @@
include $(top_ompi_srcdir)/config/Makefile.options
AM_CPPFLAGS = -I$(top_ompi_builddir)/src/include \
-I$(top_ompi_srcdir)/src -I$(top_ompi_srcdir)/src/include \
-I/home/devel/gm-2.0.1_Linux/include \
-L/home/devel/gm-2.0.1_Linux/lib
noinst_LTLIBRARIES = libmca_ptl_gm.la
libmca_ptl_gm_la_SOURCES = \
ptl_gm.c
ptl_gm.c \
ptl_gm.h \
ptl_gm_proc.c \
ptl_gm_proc.h \
ptl_gm_module.c \
ptl_gm_req.c \
ptl_gm_req.h \
ptl_address.h \
ptl_gm_priv.h \
ptl_gm_sendfrag.c \
ptl_gm_sendfrag.h \
ptl_gm_peer.h

Просмотреть файл

@ -1 +1,280 @@
/* This is the gm component */
/*
* $HEADER$
*/
#include <string.h>
#include "util/output.h"
#include "util/if.h"
#include "mca/pml/pml.h"
#include "mca/ptl/ptl.h"
#include "mca/ptl/base/ptl_base_header.h"
#include "mca/pml/base/pml_base_sendreq.h"
#include "mca/ptl/base/ptl_base_sendfrag.h"
#include "mca/pml/base/pml_base_recvreq.h"
#include "mca/ptl/base/ptl_base_recvfrag.h"
#include "mca/base/mca_base_module_exchange.h"
#include "ptl_gm.h"
#include "ptl_gm_addr.h"
#include "ptl_gm_proc.h"
#include "ptl_gm_req.h"
#include "ptl_gm_req.c"
#include "ptl_gm_peer.h"
mca_ptl_gm_t mca_ptl_gm = {
{
&mca_ptl_gm_module.super,
0, /* ptl_exclusivity */
0, /* ptl_latency */
0, /* ptl_andwidth */
0, /* ptl_frag_first_size */
0, /* ptl_frag_min_size */
0, /* ptl_frag_max_size */
MCA_PTL_PUT, /* ptl flags */
/* collection of interfaces */
mca_ptl_gm_add_procs,
mca_ptl_gm_del_procs,
mca_ptl_gm_finalize,
mca_ptl_gm_put,
mca_ptl_gm_get,
mca_ptl_gm_matched,
mca_ptl_gm_request_alloc,
mca_ptl_gm_request_return}
};
OBJ_CLASS_INSTANCE (mca_ptl_gm_recv_frag_t,
mca_ptl_base_recv_frag_t, NULL, NULL);
OBJ_CLASS_INSTANCE (mca_ptl_gm_send_request_t,
mca_pml_base_send_request_t, NULL, NULL);
OBJ_CLASS_INSTANCE (mca_ptl_gm_peer_t, ompi_list_item_t, NULL, NULL);
/*
*
*/
int
mca_ptl_gm_add_procs (struct mca_ptl_t *ptl,
size_t nprocs,
struct ompi_proc_t **ompi_procs,
struct mca_ptl_base_peer_t **peers,
ompi_bitmap_t * reachable)
{
int i;
struct ompi_proc_t *ompi_proc;
mca_ptl_gm_proc_t *ptl_proc;
mca_ptl_gm_peer_t *ptl_peer;
for (i = 0; i < nprocs; i++) {
ompi_proc = ompi_procs[i];
ptl_proc =
mca_ptl_gm_proc_create ((mca_ptl_gm_t *) ptl, ompi_proc);
if (NULL == ptl_proc) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
OMPI_THREAD_LOCK (&ptl_proc->proc_lock);
if (ptl_proc->proc_addr_count == ptl_proc->proc_peer_count) {
OMPI_THREAD_UNLOCK (&ptl_proc->proc_lock);
return OMPI_ERR_UNREACH;
}
ptl_peer = OBJ_NEW (mca_ptl_gm_peer_t);
if (NULL == ptl_peer) {
OMPI_THREAD_UNLOCK (&ptl_proc->proc_lock);
return OMPI_ERR_OUT_OF_RESOURCE;
}
ptl_peer->peer_ptl = (mca_ptl_gm_t *) ptl;
ptl_peer->peer_proc = ptl_proc;
ptl_proc->peer_arr[ptl_proc->proc_peer_count] = ptl_peer;
ptl_proc->proc_peer_count++;
ptl_peer->peer_addr = ptl_proc->proc_addrs + i;
ompi_bitmap_set_bit (reachable, i);
OMPI_THREAD_UNLOCK (&ptl_proc->proc_lock);
peers[i] = ptl_peer;
}
return OMPI_SUCCESS;
}
/*
*
*/
int
mca_ptl_gm_del_procs (struct mca_ptl_t *ptl,
size_t nprocs,
struct ompi_proc_t **procs,
struct mca_ptl_base_peer_t **peers)
{
size_t i;
for (i = 0; i < nprocs; i++) {
OBJ_RELEASE (peers[i]);
}
return OMPI_SUCCESS;
}
/*
*
*/
int
mca_ptl_gm_finalize (struct mca_ptl_t *ptl)
{
free (ptl);
return OMPI_SUCCESS;
}
/*
*
*/
int
mca_ptl_gm_request_alloc (struct mca_ptl_t *ptl,
struct mca_pml_base_send_request_t **request)
{
int rc;
mca_pml_base_send_request_t *sendreq;
ompi_list_item_t *item;
#if 0
OMPI_FREE_LIST_GET (&mca_ptl_gm_module.gm_send_req, item, rc);
if (NULL != (sendreq = (mca_pml_base_send_request_t *) item))
sendreq->req_owner = ptl;
*request = sendreq; /* the allocated memory must be registered */
#endif
return rc;
}
/*
*
*/
void
mca_ptl_gm_request_return (struct mca_ptl_t *ptl,
struct mca_pml_base_send_request_t *request)
{
/*OMPI_FREE_LIST_RETURN(&mca_ptl_gm_module.gm_send_req,
(ompi_list_item_t*)request); */
return;
}
/*
* Initiate a put
*/
int
mca_ptl_gm_put (struct mca_ptl_t *ptl,
struct mca_ptl_base_peer_t *ptl_peer,
struct mca_pml_base_send_request_t *sendreq,
size_t offset, size_t size, int flags)
{
#if 0
mca_ptl_gm_send_frag_t *sendfrag;
int rc;
if (offset == 0) {
sendfrag = &((mca_ptl_gm_send_request_t *) sendreq)->req_frag;
} else {
ompi_list_item_t *item;
OMPI_FREE_LIST_GET (&mca_ptl_gm_module.gm_send_frags, item, rc);
if (NULL == (sendfrag = (mca_ptl_gm_send_frag_t *) item))
return rc;
}
rc = mca_ptl_gm_send_frag_init (sendfrag, ptl_peer, sendreq, offset,
&size, flags);
if (rc != OMPI_SUCCESS)
return rc;
sendreq->req_offset += size;
return mca_ptl_gm_peer_send (ptl_peer, sendfrag);
#endif
return OMPI_SUCCESS;
}
/*
* initiate a get.
*/
int
mca_ptl_gm_get (struct mca_ptl_t *ptl,
struct mca_ptl_base_peer_t *ptl_base_peer,
struct mca_pml_base_recv_request_t *request,
size_t offset, size_t size, int flags)
{
return OMPI_SUCCESS;
}
/* A posted receive has been matched - if required send an
* ack back to the peer and process the fragment.
*/
void
mca_ptl_gm_matched (mca_ptl_t * ptl, mca_ptl_base_recv_frag_t * frag)
{
/* might need to send an ack back */
#if 0
mca_ptl_base_header_t *header = &frag->super.frag_header;
if (header->hdr_common.hdr_flags & MCA_PTL_FLAGS_ACK_MATCHED) {
int rc;
mca_ptl_gm_send_frag_t *ack;
mca_ptl_gm_recv_frag_t *recv_frag =
(mca_ptl_gm_recv_frag_t *) frag;
ompi_list_item_t *item;
MCA_PTL_GM_SEND_FRAG_ALLOC (item, rc);
ack = (mca_ptl_gm_send_frag_t *) item;
if (NULL == ack) {
OMPI_THREAD_LOCK (&mca_ptl_gm_module.gm_lock);
recv_frag->frag_ack_pending = true;
ompi_list_append (&mca_ptl_gm_module.gm_pending_acks,
(ompi_list_item_t *) frag);
OMPI_THREAD_UNLOCK (&mca_ptl_gm_module.gm_lock);
} else {
mca_ptl_gm_send_frag_init_ack (ack, ptl,
recv_frag->super.super.
frag_peer, recv_frag);
mca_ptl_gm_peer_send (ack->super.super.frag_peer, ack);
}
}
// process fragment if complete
#endif
}

208
src/mca/ptl/gm/src/ptl_gm.h Обычный файл
Просмотреть файл

@ -0,0 +1,208 @@
/*
* $HEADER$
*/
/**
* @file
*/
#ifndef MCA_PTL_GM_H
#define MCA_PTL_GM_H
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include "class/ompi_free_list.h"
#include "event/event.h"
#include "mca/pml/pml.h"
#include "mca/ptl/ptl.h"
#include "gm.h"
#define MCA_PTL_GM_STATISTICS 0
#define SIZE 30
#define THRESHOLD 16384
#define MAX_GM_PORTS 16
#define MAX_RECV_TOKENS 256
/**
* GM PTL module.
*/
struct mca_ptl_gm_module_1_0_0_t {
mca_ptl_base_module_1_0_0_t super; /**< base PTL module */
struct mca_ptl_gm_t **gm_ptls; /**< array of available PTLs */
size_t gm_num_ptls; /**< number of ptls actually used */
size_t gm_max_ptls; /**< maximum number of ptls - available */
int gm_free_list_num; /**< initial size of free lists */
int gm_free_list_max; /**< maximum size of free lists */
int gm_free_list_inc; /**< number of elements to alloc when growing free lists */
ompi_list_t gm_procs;
ompi_list_t gm_send_req;
ompi_mutex_t gm_lock; /**< lock for accessing module state */
};
typedef struct mca_ptl_gm_module_1_0_0_t mca_ptl_gm_module_1_0_0_t;
typedef struct mca_ptl_gm_module_1_0_0_t mca_ptl_gm_module_t;
extern mca_ptl_gm_module_1_0_0_t mca_ptl_gm_module;
/**
* GM PTL Interface
*/
struct mca_ptl_gm_t {
mca_ptl_t super; /**< base PTL interface */
struct gm_port *my_port;
unsigned int my_lid;
unsigned int my_gid;
unsigned int my_port_id;
unsigned int num_send_tokens;
unsigned int num_recv_tokens;
unsigned int max_send_tokens;
unsigned int max_recv_tokens;
struct mca_ptl_gm_addr_t *proc_id_table;
ompi_free_list_t gm_send_frags;
ompi_list_t gm_pending_acks;
#if MCA_PTL_GM_STATISTICS
size_t ptl_bytes_sent;
size_t ptl_bytes_recv;
#endif
};
typedef struct mca_ptl_gm_t mca_ptl_gm_t;
extern mca_ptl_gm_t mca_ptl_gm;
/**
* Register GM module parameters with the MCA framework
*/
extern int mca_ptl_gm_module_open (void);
/**
* Any final cleanup before being unloaded.
*/
extern int mca_ptl_gm_module_close (void);
/**
* GM module initialization.
*
* @param num_ptls (OUT) Number of PTLs returned in PTL array.
* @param allow_multi_user_threads (OUT) Flag indicating wether PTL supports user threads (TRUE)
* @param have_hidden_threads (OUT) Flag indicating wether PTL uses threads (TRUE)
*/
extern mca_ptl_t **mca_ptl_gm_module_init (int *num_ptls,
bool * allow_multi_user_threads,
bool * have_hidden_threads);
/**
* GM module control.
*/
extern int mca_ptl_gm_module_control (int param,
void *value, size_t size);
/**
* GM module progress.
*/
extern int mca_ptl_gm_module_progress (mca_ptl_tstamp_t tstamp);
/**
* GM put
*/
extern int mca_ptl_gm_put (struct mca_ptl_t *ptl,
struct mca_ptl_base_peer_t *ptl_peer,
struct mca_pml_base_send_request_t *sendreq,
size_t offset, size_t size, int flags);
/**
* GM get
*/
extern int mca_ptl_gm_get (struct mca_ptl_t *ptl,
struct mca_ptl_base_peer_t *ptl_peer,
struct mca_pml_base_recv_request_t *sendreq,
size_t offset, size_t size, int flags);
/**
* PML->PTL notification of change in the process list.
*
* @param ptl (IN)
* @param nprocs (IN) Number of processes
* @param procs (IN) Set of processes
* @param peers (OUT) Set of (optional) peer addressing info.
* @param peers (IN/OUT) Set of processes that are reachable via this PTL.
* @return OMPI_SUCCESS or error status on failure.
*
*/
extern int mca_ptl_gm_add_procs (struct mca_ptl_t *ptl,
size_t nprocs,
struct ompi_proc_t **procs,
struct mca_ptl_base_peer_t **peers,
ompi_bitmap_t * reachable);
/**
* PML->PTL notification of change in the process list.
*
* @param ptl (IN) PTL instance
* @param nproc (IN) Number of processes.
* @param procs (IN) Set of processes.
* @param peers (IN) Set of peer data structures.
* @return Status indicating if cleanup was successful
*
*/
extern int mca_ptl_gm_del_procs (struct mca_ptl_t *ptl,
size_t nprocs,
struct ompi_proc_t **procs,
struct mca_ptl_base_peer_t **peers);
/**
* PML->PTL Allocate a send request from the PTL modules free list.
*
* @param ptl (IN) PTL instance
* @param request (OUT) Pointer to allocated request.
* @return Status indicating if allocation was successful.
*
*/
extern int mca_ptl_gm_request_alloc (struct mca_ptl_t *ptl,
struct mca_pml_base_send_request_t
**);
/**
*
*/
extern void mca_ptl_gm_request_return (struct mca_ptl_t *ptl,
struct mca_pml_base_send_request_t
*);
/**
* PML->PTL Notification that a receive fragment has been matched.
*
* @param ptl (IN) PTL instance
* @param recv_frag (IN) Receive fragment
*
*/
extern void mca_ptl_gm_matched (struct mca_ptl_t *ptl,
struct mca_ptl_base_recv_frag_t *frag);
/**
*
*/
extern int mca_ptl_gm_finalize (struct mca_ptl_t *ptl);
#endif

26
src/mca/ptl/gm/src/ptl_gm_addr.h Обычный файл
Просмотреть файл

@ -0,0 +1,26 @@
/*
* $HEADER$
*/
/**
* @file
*/
#ifndef MCA_PTL_GM_ADDR_H
#define MCA_PTL_GM_ADDR_H
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
/**
* Structure used to publish GM id information to peers.
*/
struct mca_ptl_gm_addr_t {
unsigned int global_id;
unsigned int local_id;
unsigned int port_id;
};
typedef struct mca_ptl_gm_addr_t mca_ptl_gm_addr_t;
#endif

469
src/mca/ptl/gm/src/ptl_gm_module.c Обычный файл
Просмотреть файл

@ -0,0 +1,469 @@
/*
* $HEADER$
*/
#include <errno.h>
#include <unistd.h>
#include <string.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "include/constants.h"
#include "event/event.h"
#include "util/if.h"
#include "util/argv.h"
#include "util/output.h"
#include "mca/pml/pml.h"
#include "mca/ptl/ptl.h"
#include "mca/pml/base/pml_base_sendreq.h"
#include "mca/base/mca_base_param.h"
#include "mca/base/mca_base_module_exchange.h"
#include "ptl_gm.h"
#include "ptl_gm_addr.h"
#include "ptl_gm_proc.h"
#include "ptl_gm_req.h"
mca_ptl_gm_module_1_0_0_t mca_ptl_gm_module = {
{
/* First, the mca_base_module_t struct containing meta information
about the module itself */
{
/* Indicate that we are a pml v1.0.0 module (which also implies a
specific MCA version) */
MCA_PTL_BASE_VERSION_1_0_0,
"gm", /* MCA module name */
1, /* MCA module major version */
0, /* MCA module minor version */
0, /* MCA module release version */
mca_ptl_gm_module_open, /* module open */
mca_ptl_gm_module_close /* module close */
}
,
/* Next the MCA v1.0.0 module meta data */
{
/* Whether the module is checkpointable or not */
false}
,
mca_ptl_gm_module_init,
mca_ptl_gm_module_control,
mca_ptl_gm_module_progress}
};
static int mca_ptl_gm_module_initialized = 0;
/*
* utility routines for parameter registration
*/
static inline char *
mca_ptl_gm_param_register_string (const char *param_name,
const char *default_value)
{
char *param_value;
int id =
mca_base_param_register_string ("ptl", "gm", param_name, NULL,
default_value);
mca_base_param_lookup_string (id, &param_value);
return param_value;
}
static inline int
mca_ptl_gm_param_register_int (const char *param_name, int default_value)
{
int id =
mca_base_param_register_int ("ptl", "gm", param_name, NULL,
default_value);
int param_value = default_value;
mca_base_param_lookup_int (id, &param_value);
return param_value;
}
/*
*
*/
static int
ompi_mca_ptl_gm_finalize (mca_ptl_gm_module_1_0_0_t * gm)
{
/* add code */
return OMPI_SUCCESS;
}
/*
* Called by MCA framework to open the module, registers
* module parameters.
*/
int
mca_ptl_gm_module_open (void)
{
/* initialize state */
mca_ptl_gm_module.gm_ptls = NULL;
mca_ptl_gm_module.gm_num_ptls = 0;
/* initialize objects */
OBJ_CONSTRUCT (&mca_ptl_gm_module.gm_lock, ompi_mutex_t);
OBJ_CONSTRUCT (&mca_ptl_gm_module.gm_procs, ompi_list_t);
OBJ_CONSTRUCT (&mca_ptl_gm_module.gm_send_req, ompi_list_t);
/* register GM module parameters */
mca_ptl_gm.super.ptl_first_frag_size =
mca_ptl_gm_param_register_int ("first_frag_size", 16 * 1024);
mca_ptl_gm.super.ptl_min_frag_size =
mca_ptl_gm_param_register_int ("min_frag_size", 0);
mca_ptl_gm.super.ptl_min_frag_size =
mca_ptl_gm_param_register_int ("free_list_num", 32);
mca_ptl_gm.super.ptl_min_frag_size =
mca_ptl_gm_param_register_int ("free_list_inc", 32);
return OMPI_SUCCESS;
}
/*
* module close
*/
int
mca_ptl_gm_module_close (void)
{
/* if (OMPI_SUCCESS != ompi_mca_ptl_gm_finalize(&mca_ptl_gm_module)) {
ompi_output(0,
"[%s:%d] error in finalizing gm state and PTL's.\n",
__FILE__, __LINE__);
return NULL;
} */
if (NULL != mca_ptl_gm_module.gm_ptls)
free (mca_ptl_gm_module.gm_ptls);
OBJ_DESTRUCT (&mca_ptl_gm_module.gm_procs);
OBJ_DESTRUCT (&mca_ptl_gm_module.gm_send_req);
return ompi_event_fini ();
}
/*
* Create a ptl instance and add to modules list.
*/
static int
mca_ptl_gm_create ()
{
mca_ptl_gm_t *ptl;
char param[256];
ptl = malloc (sizeof (mca_ptl_gm_t));
if (NULL == ptl) {
ompi_output (0,
" ran out of resource to allocate ptl_instance \n");
return OMPI_ERR_OUT_OF_RESOURCE;
}
memcpy (ptl, &mca_ptl_gm, sizeof (mca_ptl_gm));
mca_ptl_gm_module.gm_ptls[mca_ptl_gm_module.gm_num_ptls++] = ptl;
return OMPI_SUCCESS;
}
/*
* Create a GM PTL instance
*/
static int
mca_ptl_gm_module_create_instances (void)
{
int i;
int maxptls = 1; // maxptls set to 1
/* allocate memory for ptls */
mca_ptl_gm_module.gm_max_ptls = maxptls;
mca_ptl_gm_module.gm_ptls = malloc (maxptls * sizeof (mca_ptl_gm_t *));
if (NULL == mca_ptl_gm_module.gm_ptls)
return OMPI_ERR_OUT_OF_RESOURCE;
for (i = 0; i < maxptls; i++) {
mca_ptl_gm_create ();
}
return OMPI_SUCCESS;
}
/*
* Register GM module addressing information. The MCA framework
* will make this available to all peers.
*/
static int
mca_ptl_gm_module_store_data_toexchange (void)
{
int rc;
size_t i;
size_t size;
mca_ptl_gm_addr_t *addrs;
size = mca_ptl_gm_module.gm_num_ptls * sizeof (mca_ptl_gm_addr_t);
addrs = malloc (size);
if (NULL == addrs) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
for (i = 0; i < mca_ptl_gm_module.gm_num_ptls; i++) {
mca_ptl_gm_t *ptl = mca_ptl_gm_module.gm_ptls[i];
addrs[i].local_id = ptl->my_lid;
addrs[i].global_id = ptl->my_gid;
addrs[i].port_id = ptl->my_port_id;
}
rc = mca_base_modex_send (&mca_ptl_gm_module.super.ptlm_version, addrs,
size);
free (addrs);
return rc;
}
/*
* initialize a ptl interface
*
*/
static int
ompi_mca_ptl_gm_init (mca_ptl_gm_module_1_0_0_t * gm)
{
mca_ptl_gm_t *ptl;
unsigned int board_no, port_no;
char *buffer_ptr;
gm_status_t status;
int buf_len;
int i;
if (OMPI_SUCCESS != mca_ptl_gm_module_create_instances ())
return 0;
/*hack : we have set the gm_max_ptls to 1 */
for (i = 0; i < mca_ptl_gm_module.gm_max_ptls; i++) {
ptl = mca_ptl_gm_module.gm_ptls[i];
// open the first available gm port for this board
board_no = i;
for (port_no = 2; port_no < MAX_GM_PORTS; port_no++) {
printf ("about to call open port\n");
if (port_no != 3) {
status = gm_open (&(ptl->my_port), board_no, port_no, "OMPI-GM", GM_API_VERSION_2_0); // port 0,1,3 reserved
if (GM_SUCCESS == status) {
ptl->my_port_id = port_no;
break;
}
}
}
/* Get node local Id */
if (GM_SUCCESS != gm_get_node_id (ptl->my_port, &(ptl->my_lid))) {
ompi_output (0, " failure to get local_id \n");
return 0;
}
/* Convert local id to global id */
if (GM_SUCCESS !=
gm_node_id_to_global_id (ptl->my_port, ptl->my_lid,
&(ptl->my_gid))) {
ompi_output (0, " Error: Unable to get my GM global id \n");
return 0;
}
}
/* publish GM parameters with the MCA framework */
if (OMPI_SUCCESS != mca_ptl_gm_module_store_data_toexchange ())
return 0;
return OMPI_SUCCESS;
}
static int
ompi_mca_ptl_gm_init_sendrecv (mca_ptl_gm_module_1_0_0_t * gm)
{
int i;
mca_ptl_gm_t *ptl;
gm_status_t status;
int buf_len;
void *buffer_ptr;
for (i = 0; i < mca_ptl_gm_module.gm_max_ptls; i++) {
ptl = mca_ptl_gm_module.gm_ptls[i];
#if 0
/* initialise the free lists */
ompi_free_list_init (&(mca_ptl_gm_module.gm_send_req),
sizeof (mca_ptl_gm_send_request_t),
OBJ_CLASS (mca_ptl_gm_send_request_t),
mca_ptl_gm_module.gm_free_list_num,
mca_ptl_gm_module.gm_free_list_max,
mca_ptl_gm_module.gm_free_list_inc, NULL);
#endif
/** Receive part **/
/*allow remote memory access */
status = gm_allow_remote_memory_access (ptl->my_port);
if (GM_SUCCESS != status) {
ompi_output (0, "unable to allow remote memory access\n");
}
ptl->num_send_tokens = gm_num_send_tokens (ptl->my_port);
ptl->num_recv_tokens = gm_num_receive_tokens (ptl->my_port);
/* set acceptable sizes */
/*status = gm_set_acceptable_sizes(ptl->my_port, GM_LOW_PRIORITY,
* MASK);*/
/* post receive buffers for each available token */
buf_len = THRESHOLD;
/*TODO need to provide buffers with two different sizes to distinguish
* between header and data */
for (i = 0; i < ptl->num_recv_tokens; i++) {
buffer_ptr = gm_dma_malloc (ptl->my_port, buf_len);
gm_provide_receive_buffer (ptl->my_port, buffer_ptr,
SIZE, GM_LOW_PRIORITY);
}
#if 0
/** Send Part **/
OBJ_CONSTRUCT (&mca_ptl_gm.gm_send_frag, ompi_free_list_t);
ompi_free_list_init (&(mca_ptl_gm_module.gm_send_frag),
sizeof (mca_ptl_gm_send_frag_t),
OBJ_CLASS (mca_ptl_gm_send_frag_t));
/* allocate send buffers */
total_registered_memory = max_send_buf * SIZE;
ptl->send_req->req_frag->head =
(struct send_buf *) gm_dma_malloc (ptl->my_port,
total_registered_memory);
#endif
}
return OMPI_SUCCESS;
}
/*
* Initialize the GM module,
* check how many boards are available and open ports on them.
*/
mca_ptl_t **
mca_ptl_gm_module_init (int *num_ptls,
bool * allow_multi_user_threads,
bool * have_hidden_threads)
{
mca_ptl_t **ptls;
int rc;
unsigned int board_id, port_id;
*num_ptls = 0;
*allow_multi_user_threads = false;
*have_hidden_threads = false;
/*
ompi_free_list_init (&(mca_ptl_gm_module.gm_send_req),
sizeof (mca_ptl_gm_send_request_t),
OBJ_CLASS (mca_ptl_gm_send_request_t),
mca_ptl_gm_module.gm_free_list_num,
mca_ptl_gm_module.gm_free_list_max,
mca_ptl_gm_module.gm_free_list_inc, NULL);
*/
if (OMPI_SUCCESS != ompi_mca_ptl_gm_init (&mca_ptl_gm_module)) {
ompi_output (0,
"[%s:%d] error in initializing gm state and PTL's.\n",
__FILE__, __LINE__);
return NULL;
}
if (OMPI_SUCCESS != ompi_mca_ptl_gm_init_sendrecv (&mca_ptl_gm_module)) {
ompi_output (0,
"[%s:%d] error in initializing buffer resources .\n",
__FILE__, __LINE__);
return NULL;
}
/* return array of PTLs */
ptls = malloc (mca_ptl_gm_module.gm_num_ptls * sizeof (mca_ptl_t *));
if (NULL == ptls)
return NULL;
memcpy (ptls, mca_ptl_gm_module.gm_ptls,
mca_ptl_gm_module.gm_num_ptls * sizeof (mca_ptl_gm_t *));
*num_ptls = mca_ptl_gm_module.gm_num_ptls;
return ptls;
}
/*
* GM module control
*/
int
mca_ptl_gm_module_control (int param, void *value, size_t size)
{
return OMPI_SUCCESS;
}
/*
* GM module progress.
*/
int
mca_ptl_gm_module_progress (mca_ptl_tstamp_t tstamp)
{
/* check the send queue to see if any pending send can proceed */
/* check for recieve and , call ptl_match to send it to the upper
level */
/* in case matched, do the appropriate queuing. */
return OMPI_SUCCESS;
}

39
src/mca/ptl/gm/src/ptl_gm_peer.h Обычный файл
Просмотреть файл

@ -0,0 +1,39 @@
/*
* $HEADER$
*/
/**
* @file
*/
#ifndef MCA_PTL_GM_PEER_H
#define MCA_PTL_GM_PEER_H
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include "class/ompi_list.h"
#include "event/event.h"
#include "mca/pml/pml.h"
#include "mca/ptl/ptl.h"
/**
* An abstraction that represents a connection to a peer process.
*/
struct mca_ptl_gm_peer_t {
ompi_list_item_t super;
struct mca_ptl_gm_t *peer_ptl;
struct mca_ptl_gm_proc_t *peer_proc;
struct mca_ptl_gm_addr_t *peer_addr; /**< address of peer */
int num_credits;
int max_credits;
int resending;
int num_resend;
};
typedef struct mca_ptl_gm_peer_t mca_ptl_gm_peer_t;
OBJ_CLASS_DECLARATION (mca_ptl_gm_peer_t);
#endif

15
src/mca/ptl/gm/src/ptl_gm_priv.h Обычный файл
Просмотреть файл

@ -0,0 +1,15 @@
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include "class/ompi_free_list.h"
#include "event/event.h"
#include "mca/pml/pml.h"
#include "mca/ptl/ptl.h"
#include "gm.h"
/* maintain list of registered buffers for send and receive */
struct reg_buf {
void *start; /* pointer to registered memory */
int length;
};

162
src/mca/ptl/gm/src/ptl_gm_proc.c Обычный файл
Просмотреть файл

@ -0,0 +1,162 @@
/*
* $HEADER$
*/
#include <string.h>
#include "atomic.h"
#include "class/ompi_hash_table.h"
#include "mca/base/mca_base_module_exchange.h"
#include "ptl_gm.h"
#include "ptl_gm_addr.h"
#include "ptl_gm_peer.h"
#include "ptl_gm_proc.h"
#include "ptl_gm_priv.h"
static void mca_ptl_gm_proc_construct (mca_ptl_gm_proc_t * proc);
static void mca_ptl_gm_proc_destruct (mca_ptl_gm_proc_t * proc);
static mca_ptl_gm_proc_t *mca_ptl_gm_proc_lookup_ompi (ompi_proc_t *
ompi_proc);
ompi_class_t mca_ptl_gm_proc_t_class = {
"mca_ptl_gm_proc_t",
OBJ_CLASS (ompi_list_item_t),
(ompi_construct_t) mca_ptl_gm_proc_construct,
(ompi_destruct_t) mca_ptl_gm_proc_destruct
};
/**
* Initialize gm proc instance
*/
void
mca_ptl_gm_proc_construct (mca_ptl_gm_proc_t * proc)
{
proc->proc_ompi = NULL;
proc->proc_addrs = NULL;
proc->proc_addr_count = 0;
proc->peer_arr = NULL;
proc->proc_peer_count = 0;
OBJ_CONSTRUCT (&proc->proc_lock, ompi_mutex_t);
/* add to list of all proc instance */
OMPI_THREAD_LOCK (&mca_ptl_gm_module.gm_lock);
ompi_list_append (&mca_ptl_gm_module.gm_procs, &proc->super);
OMPI_THREAD_UNLOCK (&mca_ptl_gm_module.gm_lock);
return;
}
/*
* Cleanup gm proc instance
*/
void
mca_ptl_gm_proc_destruct (mca_ptl_gm_proc_t * proc)
{
/* remove from list of all proc instances */
OMPI_THREAD_LOCK (&mca_ptl_gm_module.gm_lock);
ompi_list_remove_item (&mca_ptl_gm_module.gm_procs, &proc->super);
OMPI_THREAD_UNLOCK (&mca_ptl_gm_module.gm_lock);
/* release resources */
if (NULL != proc->peer_arr)
free (proc->peer_arr);
return;
}
/*
* Create a GM process structure. There is a one-to-one correspondence
* between a ompi_proc_t and a mca_ptl_gm_proc_t instance.
* We cache additional data (specifically the list
* of mca_ptl_gm_peer_t instances, and publiched
* addresses) associated w/ a given destination on this datastructure.
*/
mca_ptl_gm_proc_t *
mca_ptl_gm_proc_create (mca_ptl_gm_t * ptl, ompi_proc_t * ompi_proc)
{
int rc;
size_t size;
int i;
mca_ptl_gm_proc_t *ptl_proc;
/* only gm ptl opened */
ptl_proc = OBJ_NEW (mca_ptl_gm_proc_t);
ptl_proc->proc_ompi = ompi_proc;
/* Extract exposed addresses from remote proc */
rc = mca_base_modex_recv (&mca_ptl_gm_module.super.ptlm_version,
ompi_proc, (void **) &ptl_proc->proc_addrs,
&size);
if (rc != OMPI_SUCCESS) {
ompi_output (0,
"[%s:%d] mca_base_modex_recv failed to recv data \n",
__FILE__, __LINE__);
OBJ_RELEASE (ptl_proc);
return NULL;
}
if (0 != (size % sizeof (mca_ptl_gm_addr_t))) {
ompi_output (0, "[%s:%d] invalid received data size %d\n",
__FILE__, __LINE__, size);
return NULL;
}
ptl_proc->proc_addr_count = size / sizeof (mca_ptl_gm_addr_t);
for (i = 0; i < ptl_proc->proc_addr_count; i++) {
/*convert from global id to local id */
if (GM_SUCCESS !=
gm_global_id_to_node_id (ptl->my_port,
ptl_proc->proc_addrs[i].global_id,
ptl_proc->proc_addrs[i].local_id)) {
ompi_output (0,
"[%s:%d] error in converting global to local id \n",
__FILE__, __LINE__);
}
}
/* allocate space for peer array - one for each exported address */
ptl_proc->peer_arr = (mca_ptl_gm_peer_t **)
malloc (ptl_proc->proc_addr_count * sizeof (mca_ptl_gm_peer_t *));
if (NULL == ptl_proc->peer_arr) {
OBJ_RELEASE (ptl_proc);
ompi_output (0, "[%s:%d] unable to allocate peer procs \n"
__FILE__, __LINE__);
return NULL;
}
return ptl_proc;
}
/*
* Look for an existing GM process instances based on the associated
* ompi_proc_t instance.
*/
static mca_ptl_gm_proc_t *
mca_ptl_gm_proc_lookup_ompi (ompi_proc_t * ompi_proc)
{
mca_ptl_gm_proc_t *gm_proc;
OMPI_THREAD_LOCK (&mca_ptl_gm_module.gm_lock);
gm_proc = (mca_ptl_gm_proc_t *)
ompi_list_get_first (&mca_ptl_gm_module.gm_procs);
for (; gm_proc != (mca_ptl_gm_proc_t *)
ompi_list_get_end (&mca_ptl_gm_module.gm_procs);
gm_proc = (mca_ptl_gm_proc_t *) ompi_list_get_next (gm_proc)) {
if (gm_proc->proc_ompi == ompi_proc) {
OMPI_THREAD_UNLOCK (&mca_ptl_gm_module.gm_lock);
return gm_proc;
}
}
OMPI_THREAD_UNLOCK (&mca_ptl_gm_module.gm_lock);
return NULL;
}

35
src/mca/ptl/gm/src/ptl_gm_proc.h Обычный файл
Просмотреть файл

@ -0,0 +1,35 @@
/*
* $HEADER$
*/
/**
* @file
*/
#ifndef MCA_PTL_GM_PROC_H
#define MCA_PTL_GM_PROC_H
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include "class/ompi_object.h"
#include "proc/proc.h"
#include "ptl_gm.h"
extern ompi_class_t mca_ptl_gm_proc_t_class;
struct mca_ptl_gm_proc_t {
ompi_list_item_t super; /**< allow proc to be placed on a list */
ompi_proc_t *proc_ompi; /**< pointer to corresponding ompi_proc_t */
struct mca_ptl_gm_addr_t *proc_addrs; /**< array of addresses published by peer */
ompi_mutex_t proc_lock; /**< lock to protect against concurrent access to proc state */
size_t proc_peer_count;
size_t proc_addr_count;
struct mca_ptl_gm_peer_t **peer_arr;
};
typedef struct mca_ptl_gm_proc_t mca_ptl_gm_proc_t;
mca_ptl_gm_proc_t *mca_ptl_gm_proc_create (mca_ptl_gm_t * ptl,
ompi_proc_t * ompi_proc);
mca_ptl_gm_proc_t *mca_ptl_gm_proc_lookup (void *guid, size_t size);
#endif

41
src/mca/ptl/gm/src/ptl_gm_req.c Обычный файл
Просмотреть файл

@ -0,0 +1,41 @@
/*
*HEADER$
*/
#include <unistd.h>
#include <sys/types.h>
#include <sys/errno.h>
#include "include/types.h"
#include "mca/pml/base/pml_base_sendreq.h"
#include "ptl_gm.h"
#include "ptl_gm_req.h"
/*
static void mca_ptl_gm_send_request_construct (
mca_ptl_gm_send_request_t *);
static void mca_ptl_gm_send_request_destruct (
mca_ptl_gm_send_request_t *);
ompi_class_t mca_ptl_gm_send_request_t_class = {
"mca_ptl_gm_send_request_t",
OBJ_CLASS (mca_pml_base_send_request_t),
(ompi_construct_t) mca_ptl_gm_send_request_construct,
(ompi_destruct_t) mca_ptl_gm_send_request_destruct
};
void
mca_ptl_gm_send_request_construct (
mca_ptl_gm_send_request_t * request)
{
OBJ_CONSTRUCT (&request->req_frag, mca_ptl_gm_send_frag_t);
}
void
mca_ptl_gm_send_request_destruct (
mca_ptl_gm_send_request_t * request)
{
OBJ_DESTRUCT (&request->req_frag);
}
*/

29
src/mca/ptl/gm/src/ptl_gm_req.h Обычный файл
Просмотреть файл

@ -0,0 +1,29 @@
/*
*HEADER$
*/
/**
* @file
*/
#ifndef MCA_PTL_GM_SEND_REQUEST_H
#define MCA_PTL_GM_SEND_REQUEST_H
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include "ompi_config.h"
#include "mca/pml/base/pml_base_sendreq.h"
#include "ptl_gm_sendfrag.h"
OBJ_CLASS_DECLARATION (mca_ptl_gm_send_request_t);
struct mca_ptl_gm_send_request_t {
mca_pml_base_send_request_t super;
/* add stuff here */
mca_ptl_gm_send_frag_t req_frag;
};
typedef struct mca_ptl_gm_send_request_t mca_ptl_gm_send_request_t;
#endif

97
src/mca/ptl/gm/src/ptl_gm_sendfrag.c Обычный файл
Просмотреть файл

@ -0,0 +1,97 @@
/*
*HEADER$
*/
#include <unistd.h>
#include <sys/types.h>
#include <sys/errno.h>
#include "include/types.h"
#include "datatype/datatype.h"
#include "mca/pml/base/pml_base_sendreq.h"
#include "ptl_gm.h"
#include "ptl_gm_peer.h"
#include "ptl_gm_proc.h"
#include "ptl_gm_sendfrag.h"
#define frag_header super.super.frag_header
#define frag_owner super.super.frag_owner
#define frag_peer super.super.frag_peer
#define frag_convertor super.super.frag_convertor
static void mca_ptl_gm_send_frag_construct (mca_ptl_gm_send_frag_t * frag);
static void mca_ptl_gm_send_frag_destruct (mca_ptl_gm_send_frag_t * frag);
ompi_class_t mca_ptl_gm_send_frag_t_class = {
"mca_ptl_gm_send_frag_t",
OBJ_CLASS (mca_ptl_base_send_frag_t),
(ompi_construct_t) mca_ptl_gm_send_frag_construct,
(ompi_destruct_t) mca_ptl_gm_send_frag_destruct
};
/*
* send fragment constructor/destructors.
*/
static void
mca_ptl_gm_send_frag_construct (mca_ptl_gm_send_frag_t * frag)
{
}
static void
mca_ptl_gm_send_frag_destruct (mca_ptl_gm_send_frag_t * frag)
{
}
/*
static void send_callback(struct gm_port *, gm_status)
{
gm_status_t status;
switch (status)
{
case GM_SUCCESS:
break;
case GM_SEND_TIMED_OUT:
break;
case GM_SEND_DROPPED:
break;
default:
break;
}
}
static void put_callback(struct gm_port *, gm_status)
{
gm_status_t status;
switch (status)
{
case GM_SUCCESS:
break;
case GM_SEND_TIMED_OUT:
break;
case GM_SEND_DROPPED:
break;
default:
break;
}
}
*/

38
src/mca/ptl/gm/src/ptl_gm_sendfrag.h Обычный файл
Просмотреть файл

@ -0,0 +1,38 @@
/*
*HEADER$
*/
/**
* @file
*/
#ifndef MCA_PTL_GM_SEND_FRAG_H
#define MCA_PTL_GM_SEND_FRAG_H
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include "os/atomic.h"
#include "ompi_config.h"
#include "mca/pml/base/pml_base_sendreq.h"
#include "mca/ptl/base/ptl_base_sendfrag.h"
#include "ptl_gm.h"
#include "ptl_gm_priv.h"
OBJ_CLASS_DECLARATION (mca_ptl_gm_send_frag_t);
struct mca_ptl_base_peer_t;
/**
* GM send fragment derived type.
*/
struct mca_ptl_gm_send_frag_t {
mca_ptl_base_send_frag_t super; /**< base send fragment descriptor */
struct reg_buf *sbuf;
};
typedef struct mca_ptl_gm_send_frag_t mca_ptl_gm_send_frag_t;
#define MCA_PTL_GM_SEND_FRAG_ALLOC(item, rc) \
OMPI_FREE_LIST_GET(&mca_ptl_gm_module.gm_send_frags, item, rc);
#endif