1
1
This commit was SVN r301.
Этот коммит содержится в:
Tim Woodall 2004-01-12 18:17:29 +00:00
родитель f55da44561
Коммит a7d3a9752d
10 изменённых файлов: 670 добавлений и 622 удалений

Просмотреть файл

@ -11,7 +11,7 @@ noinst_LTLIBRARIES = libmem.la
headers = \
allocator.h \
free_list.h \
free_lists.h \
mem_globals.h \
mem_pool.h \
seg_list.h \
@ -21,6 +21,7 @@ libmem_la_SOURCES = \
$(headers) \
allocator.c \
free_list.c \
free_lists.c \
mem_globals.c \
mem_pool.c \
seg_list.c \

Просмотреть файл

@ -4,526 +4,4 @@
#include "lam_config.h"
#include "lam/mem/free_list.h"
#include "lam/util/lam_log.h"
#include "lam/os/numa.h"
#include "lam/os/lam_system.h"
#include "lam/mem/mem_globals.h"
/* private list functions */
static lam_flist_elt_t *lam_flr_request_elt(lam_free_list_t *flist,
int pool_idx);
static void lam_frl_append(lam_free_list_t *flist, void *chunk, int pool_idx);
static int lam_frl_create_more_elts(lam_free_list_t *flist, int pool_idx);
static void *lam_frl_get_mem_chunk(lam_free_list_t *flist, int index, size_t *len, int *err);
static int lam_frl_mem_pool_init(lam_free_list_t *flist, int nlists, long pages_per_list, ssize_t chunk_size,
size_t page_size, long min_pages_per_list,
long default_min_pages_per_list, long default_pages_per_list,
long max_pages_per_list, ssize_t max_mem_in_pool);
lam_class_info_t free_list_cls = {"lam_free_list_t", &lam_object_cls,
(class_init_t)lam_frl_init, (class_destroy_t)lam_frl_destroy};
void lam_frl_init(lam_free_list_t *flist)
{
SUPER_INIT(flist, free_list_cls.cls_parent);
lam_mtx_init(&flist->fl_lock);
flist->fl_pool = NULL;
flist->fl_elt_cls = NULL;
flist->fl_description = NULL;
flist->fl_free_lists = NULL;
flist->fl_is_shared = 0;
flist->fl_nlists = 0;
flist->fl_elt_per_chunk = 0;
flist->fl_elt_size = 0;
flist->fl_retry_more_resources = 0;
flist->fl_enforce_affinity = 0;
flist->fl_affinity = NULL;
flist->fl_threshold_grow = 0;
#if LAM_ENABLE_MEM_PROFILE
flist->fl_elt_out = NULL;
flist->fl_elt_max = NULL;
flist->fl_elt_sum = NULL;
flist->fl_nevents = NULL;
flist->fl_chunks_req = NULL;
flist->fl_chunks_returned = NULL;
#endif
}
void lam_frl_destroy(lam_free_list_t *flist)
{
int i;
OBJ_RELEASE(flist->fl_pool);
for ( i = 0; i < flist->fl_nlists; i++ )
OBJ_RELEASE(flist->fl_free_lists[i]);
if ( flist->fl_affinity )
free(flist->fl_affinity);
#if LAM_ENABLE_MEM_PROFILE
if ( flist->fl_elt_out )
free(flist->fl_elt_out);
if ( flist->fl_elt_max )
free(flist->fl_elt_max);
if ( flist->fl_elt_sum )
free(flist->fl_elt_sum);
if ( flist->fl_nevents )
free(flist->fl_nevents);
if ( flist->fl_chunks_req )
free(flist->fl_chunks_req);
if ( flist->fl_chunks_returned )
free(flist->fl_chunks_returned);
#endif
SUPER_DESTROY(flist, free_list_cls.cls_parent);
}
int lam_frl_init_with(
lam_free_list_t *flist,
int nlists,
int pages_per_list,
size_t chunk_size,
size_t page_size,
size_t elt_size,
int min_pages_per_list,
int max_pages_per_list,
int max_consec_req_fail,
const char *description,
bool retry_for_more_resources,
lam_affinity_t *affinity,
bool enforce_affinity,
lam_mem_pool_t *mem_pool)
{
/* lam_frl_init must have been called prior to calling this function */
size_t max_mem_in_pool;
size_t initial_mem_per_list;
long max_mem_per_list;
int list, pool;
int err = LAM_SUCCESS;
flist->fl_description = description;
flist->fl_nlists = nlists;
/* set up the memory pool */
if ( mem_pool )
{
flist->fl_pool = mem_pool;
OBJ_RETAIN(flist->fl_pool);
}
else
{
/* instantiate memory pool */
max_mem_in_pool = max_pages_per_list * page_size;
err = lam_frl_mem_pool_init(
flist,
nlists,
pages_per_list,
chunk_size,
page_size,
min_pages_per_list,
min_pages_per_list,
pages_per_list,
max_pages_per_list,
max_mem_in_pool);
if (err != LAM_SUCCESS)
{
return err;
}
}
/* reset pool chunk size */
chunk_size = lam_mp_get_chunk_size(flist->fl_pool);
/* Number of elements per chunk */
flist->fl_elt_per_chunk = chunk_size / elt_size;
initial_mem_per_list = min_pages_per_list * page_size;
/* adjust initial_mem_per_list to increments of chunk_size */
if ( initial_mem_per_list < chunk_size )
{
min_pages_per_list = (((chunk_size - 1) / page_size) + 1);
initial_mem_per_list = min_pages_per_list * page_size;
}
/* determine upper limit on number of pages in a given list */
if ( (max_pages_per_list != -1) && (max_pages_per_list < min_pages_per_list) )
max_pages_per_list = min_pages_per_list;
if (max_pages_per_list == -1)
max_mem_per_list = -1;
else
max_mem_per_list = max_pages_per_list * page_size;
/* initialize empty lists of available descriptors */
flist->fl_free_lists = (lam_seg_list_t **)
malloc(sizeof(lam_seg_list_t *) *
flist->fl_nlists);
if ( !flist->fl_free_lists )
{
lam_exit((-1, "Error: Out of memory\n"));
}
/* run constructors */
for (list = 0; list < flist->fl_nlists; list++)
{
if ( flist->fl_is_shared )
{
/* process shared memory allocation */
flist->fl_free_lists[list] =
(lam_seg_list_t *)
lam_fmp_get_mem_segment(&lam_per_proc_shmem_pools,
sizeof(lam_seg_list_t), CACHE_ALIGNMENT, list);
}
else
{
/* process private memory allocation */
flist->fl_free_lists[list] =
(lam_seg_list_t *)malloc(sizeof(lam_seg_list_t));
}
if (!flist->fl_free_lists[list])
lam_exit((-1, "Error: Out of memory\n"));
STATIC_INIT(flist->fl_free_lists[list], &lam_seg_list_cls);
lam_sgl_set_min_bytes_pushed(flist->fl_free_lists[list],
initial_mem_per_list);
lam_sgl_set_max_bytes_pushed(flist->fl_free_lists[list],
max_mem_per_list);
lam_sgl_set_max_consec_fail(flist->fl_free_lists[list],
max_consec_req_fail);
} /* end list loop */
flist->fl_retry_more_resources = retry_for_more_resources;
flist->fl_enforce_affinity = enforce_affinity;
if ( enforce_affinity )
{
flist->fl_affinity = (affinity_t *)malloc(sizeof(affinity_t) *
flist->fl_nlists);
if ( !flist->fl_affinity )
lam_exit((-1, "Error: Out of memory\n"));
/* copy policies in */
for ( pool = 0; pool < flist->fl_nlists; pool++ )
{
flist->fl_affinity[pool] = affinity[pool];
}
}
/* initialize locks for memory pool and individual list and link locks */
for ( pool = 0; pool < flist->fl_nlists; pool++ ) {
/* gain exclusive use of list */
if ( 1 == lam_sgl_lock_list(flist->fl_free_lists[pool]) ) {
while ( lam_sgl_get_bytes_pushed(flist->fl_free_lists[pool])
< lam_sgl_get_min_bytes_pushed(flist->fl_free_lists[pool]) )
{
if (lam_frl_create_more_elts(flist, pool) != LAM_SUCCESS)
{
lam_exit((-1, "Error: Setting up initial private "
"free list for %s.\n", flist->fl_description));
}
}
lam_sgl_unlock_list(flist->fl_free_lists[pool]);
}
else
{
/* only 1 process should be initializing the list */
lam_exit((-1, "Error: Setting up initial private free "
"list %d for %s.\n", pool, flist->fl_description));
}
}
return err;
}
static int lam_frl_mem_pool_init(lam_free_list_t *flist,
int nlists, long pages_per_list, ssize_t chunk_size,
size_t page_size, long min_pages_per_list,
long default_min_pages_per_list, long default_pages_per_list,
long max_pages_per_list, ssize_t max_mem_in_pool)
{
int err = LAM_SUCCESS;
long total_pgs_to_alloc;
ssize_t mem_in_pool;
size_t to_alloc;
/* set chunksize - multiple of page size */
chunk_size =
((((chunk_size - 1) / page_size) + 1) * page_size);
/* determine number how much memory to allocate */
if ( pages_per_list == -1 ) {
/* minimum size is defaultNPagesPerList*number of local procs */
total_pgs_to_alloc = default_pages_per_list * nlists;
} else {
total_pgs_to_alloc = pages_per_list * nlists;
}
mem_in_pool = total_pgs_to_alloc * page_size;
/* Initialize memory pool */
if ( flist->fl_is_shared ) {
/* shared memory allocation */
to_alloc = sizeof(lam_mem_pool_t);
flist->fl_pool =
(lam_mem_pool_t *)lam_fmp_get_mem_segment(&lam_shmem_pools,
to_alloc,
CACHE_ALIGNMENT, 0);
if ( flist->fl_pool )
STATIC_INIT(flist->fl_pool, &shmem_pool_cls);
} else {
/* process private memory allocation */
CREATE_OBJECT(flist->fl_pool, lam_mem_pool_t, &mem_pool_cls);
}
err = lam_mp_init_with(
flist->fl_pool,
mem_in_pool,
max_mem_in_pool,
chunk_size,
page_size);
return err;
}
static void *lam_frl_get_mem_chunk(lam_free_list_t *flist, int index, size_t *len, int *err)
{
void *chunk = 0;
uint64_t sz_to_add;
/* check to make sure that the amount to add to the list does not
exceed the amount allowed */
sz_to_add = lam_mp_get_chunk_size(flist->fl_pool);
#if LAM_ENABLE_MEM_PROFILE
flist->fl_chunks_req[index]++;
#endif
if (index >= flist->fl_nlists)
{
lam_err(("Error: Array out of bounds\n"));
return chunk;
}
if ( lam_sgl_get_max_bytes_pushed(flist->fl_free_lists[index]) != -1 )
{
if (sz_to_add +
lam_sgl_get_bytes_pushed(flist->fl_free_lists[index]) >
lam_sgl_get_max_bytes_pushed(flist->fl_free_lists[index]) )
{
lam_sgl_inc_consec_fail(flist->fl_free_lists[index]);
if ( lam_sgl_get_consec_fail(flist->fl_free_lists[index]) >=
lam_sgl_get_max_consec_fail(flist->fl_free_lists[index]) )
{
*err = LAM_ERR_OUT_OF_RESOURCE;
lam_err(("Error: List out of memory in pool for %s\n",
flist->fl_description));
return chunk;
} else
*err = LAM_ERR_TEMP_OUT_OF_RESOURCE;
return chunk;
}
}
/* set len */
*len = sz_to_add;
/* get chunk of memory */
chunk = lam_mp_request_chunk(flist->fl_pool, index);
if ( 0 == chunk )
{
/* increment failure count */
lam_sgl_inc_consec_fail(flist->fl_free_lists[index]);
if ( lam_sgl_get_consec_fail(flist->fl_free_lists[index]) >=
lam_sgl_get_max_consec_fail(flist->fl_free_lists[index]) )
{
*err = LAM_ERR_OUT_OF_RESOURCE;
lam_err(("Error: List out of memory in pool for %s\n",
flist->fl_description));
return chunk;
} else
*err = LAM_ERR_TEMP_OUT_OF_RESOURCE;
return chunk;
}
/* set consecutive failure count to 0 - if we fail, we don't get
this far in the code. */
lam_sgl_set_consec_fail(flist->fl_free_lists[index], 0);
#if LAM_ENABLE_MEM_PROFILE
flist->fl_chunks_returned[index]++;
#endif
return chunk;
}
static lam_flist_elt_t *lam_flr_request_elt(lam_free_list_t *flist, int pool_idx)
{
#if ROB_HASNT_FINISHED_THIS_YET
lam_dbl_list_t *seg_list = &(flist->fl_free_lists[pool_idx]->sgl_list);
volatile lam_flist_elt_t *elt = lam_dbl_get_last(seg_list);
if ( elt )
lam_sgl_set_consec_fail(seg_list, 0);
return elt;
#else
return NULL;
#endif
}
static void lam_frl_append(lam_free_list_t *flist, void *chunk, int pool_idx)
{
/* ASSERT: mp_chunk_sz >= fl_elt_per_chunk * fl_elt_size */
/* push items onto list */
lam_sgl_append_elt_chunk(flist->fl_free_lists[pool_idx],
chunk, lam_mp_get_chunk_size(flist->fl_pool),
flist->fl_elt_per_chunk, flist->fl_elt_size);
}
static int lam_frl_create_more_elts(lam_free_list_t *flist, int pool_idx)
{
int err = LAM_SUCCESS, desc;
size_t len_added;
char *current_loc;
void *ptr = lam_frl_get_mem_chunk(flist, pool_idx, &len_added, &err);
if (0 == ptr ) {
lam_err(("Error: Can't get new elements for %s\n",
flist->fl_description));
return err;
}
/* attach memory affinity */
if ( flist->fl_enforce_affinity )
{
if (!lam_set_affinity(ptr, len_added,
flist->fl_affinity[pool_idx]))
{
err = LAM_ERROR;
#ifdef _DEBUGQUEUES
lam_err(("Error: Can't set memory policy (pool_idx=%d)\n",
pool_idx));
return err;
#endif /* _DEBUGQUEUES */
}
}
/* Construct new descriptors using placement new */
current_loc = (char *) ptr;
for (desc = 0; desc < flist->fl_elt_per_chunk; desc++)
{
STATIC_INIT(*(lam_flist_elt_t *)current_loc, flist->fl_elt_cls);
lam_fle_set_idx((lam_flist_elt_t *)current_loc, pool_idx);
current_loc += flist->fl_elt_size;
}
/* push chunk of memory onto the list */
lam_frl_append(flist, ptr, pool_idx);
return err;
}
lam_flist_elt_t *lam_frl_get_elt(lam_free_list_t *flist, int index, int *error)
{
#if ROB_HASNT_FINISHED_THIS_YET
int error;
volatile lam_flist_elt_t *elem = NULL;
elem = lam_flr_request_elt(flist, index);
if ( elem )
{
error = LAM_SUCCESS;
}
else if ( lam_sgl_get_consec_fail(&(flist->fl_free_lists[index]->sgl_list))
< flist->fl_threshold_grow )
{
error = LAM_ERR_TEMP_OUT_OF_RESOURCE;
}
else
{
error = LAM_SUCCESS;
while ( (LAM_SUCCESS) && (0 == elem) &&
(flist->fl_retry_more_resources) )
{
error = lam_frl_create_more_elts(flist, index);
/* get element if managed to add resources to the list */
if ( LAM_SUCCESS == error )
{
elem = lam_flr_request_elt(flist, index);
}
}
if ( (LAM_ERR_OUT_OF_RESOURCE == error)
|| (LAM_ERR_FATAL == error) )
{
return 0;
}
}
#if LAM_ENABLE_MEM_PROFILE
flist->fl_elt_out[index]++;
flist->fl_elt_sum[index] += flist->fl_elt_out[index];
flist->fl_nevents[index]++;
if (flist->fl_elt_max[index] < flist->fl_elt_out[index])
{
flist->fl_elt_max[index] = flist->fl_elt_out[index];
}
#endif
return elem;
#else
return NULL;
#endif
}
int lam_frl_return_elt(lam_free_list_t *flist, int index, lam_flist_elt_t *item)
{
#if ROB_HASNT_FINISHED_THIS_YET
mb();
lam_dbl_append(&(flist->fl_free_lists[index]->sgl_list), item);
mb();
#if LAM_ENABLE_MEM_PROFILE
flist->fl_elt_out[index]--;
#endif
return LAM_SUCCESS;
#else
return LAM_ERROR;
#endif
}

Просмотреть файл

@ -2,8 +2,8 @@
* $HEADER$
*/
#ifndef LAM_FREE_LIST
#define LAM_FREE_LIST
#ifndef LAM_FREE_LIST_H
#define LAM_FREE_LIST_H
#include "lam_config.h"
#include "lam/lfc/list.h"
@ -11,48 +11,23 @@
#include "lam/mem/seg_list.h"
#include "lam/mem/mem_pool.h"
/*
* Free list element interface
* Any item that goes into the free list must
* inherit from this class.
*/
extern lam_class_info_t lam_free_lists_cls;
typedef struct lam_flist_elt
{
lam_list_item_t super;
int fle_pool_idx;
} lam_flist_elt_t;
void lam_flist_elt_init(lam_flist_elt_t*);
void lam_flist_elt_destroy(lam_flist_elt_t*);
#define lam_fle_get_idx(elt) (elt)->fle_pool_idx
#define lam_fle_set_idx(elt, idx) ((elt)->fle_pool_idx = idx)
/*
* Memory affinity is almost certainly an int everywhere, but let's
* make it a typedef in case we need to make it OS dependenent
* sometime...
*/
typedef int lam_affinity_t;
typedef struct lam_free_list
struct lam_free_list_t
{
lam_object_t super;
int fl_is_shared;
lam_mem_pool_t *fl_pool;
const char *fl_description;
int fl_nlists;
int fl_elt_per_chunk;
size_t fl_elt_size;
lam_seg_list_t **fl_free_lists;
lam_seg_list_t *fl_free_list;
int fl_retry_more_resources;
int fl_enforce_affinity;
lam_affinity_t *fl_affinity; /* array of lam_affinity_t */
int fl_threshold_grow;
lam_class_info_t *fl_elt_cls; /* this will be used to create new free list elements. */
lam_mutex_t fl_lock;
lam_class_info_t *fl_elt_cls; /* this will be used to create new free list elements. */
lam_mutex_t fl_lock;
/* for mem profiling */
int *fl_elt_out;
@ -63,41 +38,25 @@ typedef struct lam_free_list
int *fl_chunks_req;
int *fl_chunks_returned;
#endif
} lam_free_list_t;
extern lam_class_info_t free_list_cls;
void lam_frl_init(lam_free_list_t *flist);
void lam_frl_destroy(lam_free_list_t *flist);
/* lam_frl_init must have been called prior to calling this function */
int lam_frl_init_with(lam_free_list_t *flist,
int nlists,
int pages_per_list,
size_t chunk_size,
size_t page_size,
size_t element_size,
int min_pages_per_list,
int max_pages_per_list,
int max_consec_req_fail,
const char *description,
bool retry_for_more_resources,
lam_affinity_t *affinity,
bool enforce_affinity,
lam_mem_pool_t *pool);
};
typedef struct lam_free_list_t lam_free_list_t;
lam_flist_elt_t *lam_frl_get_elt(lam_free_list_t *flist, int index, int *error);
int lam_frl_return_elt(lam_free_list_t *flist, int index, lam_flist_elt_t *item);
void lam_free_list_init(lam_free_list_t *flist);
void lam_free_list_destroy(lam_free_list_t *flist);
/*
* Accessor functions
*/
/* lam_free_list_init() must have been called prior to calling this function */
int lam_free_list_init_with(
lam_free_list_t *flist,
size_t element_size,
int min_pages,
int max_pages,
int num_pages_per_alloc,
lam_mem_pool_t *pool);
int lam_frl_get_thresh_grow(lam_free_list_t *flist);
void lam_frl_set_thresh_grow(lam_free_list_t *flist, int to_grow);
lam_list_item_t *lam_free_list_get(lam_free_list_t *, int *);
int lam_free_list_return(lam_free_list_t *, lam_list_item_t *);
#endif

532
src/lam/mem/free_lists.c Обычный файл
Просмотреть файл

@ -0,0 +1,532 @@
/*
* $HEADER$
*/
#include "lam_config.h"
#include "lam/mem/free_lists.h"
#include "lam/util/lam_log.h"
#include "lam/os/numa.h"
#include "lam/os/lam_system.h"
#include "lam/mem/mem_globals.h"
#ifndef ROB_HASNT_FINISHED_THIS_YET
#define ROB_HASNT_FINISHED_THIS_YET 0
#endif
/* private list functions */
static lam_list_item_t *lam_free_lists_request_elt(lam_free_lists_t *flist,
int pool_idx);
static void lam_free_lists_append(lam_free_lists_t *flist, void *chunk, int pool_idx);
static int lam_free_lists_create_more_elts(lam_free_lists_t *flist, int pool_idx);
static void *lam_free_lists_get_mem_chunk(lam_free_lists_t *flist, int index, size_t *len, int *err);
static int lam_free_lists_mem_pool_init(lam_free_lists_t *flist, int nlists, long pages_per_list, ssize_t chunk_size,
size_t page_size, long min_pages_per_list,
long default_min_pages_per_list, long default_pages_per_list,
long max_pages_per_list, ssize_t max_mem_in_pool);
lam_class_info_t lam_free_lists_cls = {"lam_free_lists_t", &lam_object_cls,
(class_init_t)lam_free_lists_init, (class_destroy_t)lam_free_lists_destroy};
void lam_free_lists_init(lam_free_lists_t *flist)
{
SUPER_INIT(flist, lam_free_lists_cls.cls_parent);
lam_mtx_init(&flist->fl_lock);
flist->fl_pool = NULL;
flist->fl_elt_cls = NULL;
flist->fl_description = NULL;
flist->fl_free_lists = NULL;
flist->fl_is_shared = 0;
flist->fl_nlists = 0;
flist->fl_elt_per_chunk = 0;
flist->fl_elt_size = 0;
flist->fl_retry_more_resources = 0;
flist->fl_enforce_affinity = 0;
flist->fl_affinity = NULL;
flist->fl_threshold_grow = 0;
#if LAM_ENABLE_MEM_PROFILE
flist->fl_elt_out = NULL;
flist->fl_elt_max = NULL;
flist->fl_elt_sum = NULL;
flist->fl_nevents = NULL;
flist->fl_chunks_req = NULL;
flist->fl_chunks_returned = NULL;
#endif
}
void lam_free_lists_destroy(lam_free_lists_t *flist)
{
int i;
OBJ_RELEASE(flist->fl_pool);
for ( i = 0; i < flist->fl_nlists; i++ )
OBJ_RELEASE(flist->fl_free_lists[i]);
if ( flist->fl_affinity )
free(flist->fl_affinity);
#if LAM_ENABLE_MEM_PROFILE
if ( flist->fl_elt_out )
free(flist->fl_elt_out);
if ( flist->fl_elt_max )
free(flist->fl_elt_max);
if ( flist->fl_elt_sum )
free(flist->fl_elt_sum);
if ( flist->fl_nevents )
free(flist->fl_nevents);
if ( flist->fl_chunks_req )
free(flist->fl_chunks_req);
if ( flist->fl_chunks_returned )
free(flist->fl_chunks_returned);
#endif
SUPER_DESTROY(flist, lam_free_lists_cls.cls_parent);
}
int lam_free_lists_init_with(
lam_free_lists_t *flist,
int nlists,
int pages_per_list,
size_t chunk_size,
size_t page_size,
size_t elt_size,
int min_pages_per_list,
int max_pages_per_list,
int max_consec_req_fail,
const char *description,
bool retry_for_more_resources,
lam_affinity_t *affinity,
bool enforce_affinity,
lam_mem_pool_t *mem_pool)
{
/* lam_free_lists_init must have been called prior to calling this function */
size_t max_mem_in_pool;
size_t initial_mem_per_list;
long max_mem_per_list;
int list, pool;
int err = LAM_SUCCESS;
flist->fl_description = description;
flist->fl_nlists = nlists;
/* set up the memory pool */
if ( mem_pool )
{
flist->fl_pool = mem_pool;
OBJ_RETAIN(flist->fl_pool);
}
else
{
/* instantiate memory pool */
max_mem_in_pool = max_pages_per_list * page_size;
err = lam_free_lists_mem_pool_init(
flist,
nlists,
pages_per_list,
chunk_size,
page_size,
min_pages_per_list,
min_pages_per_list,
pages_per_list,
max_pages_per_list,
max_mem_in_pool);
if (err != LAM_SUCCESS)
{
return err;
}
}
/* reset pool chunk size */
chunk_size = lam_mp_get_chunk_size(flist->fl_pool);
/* Number of elements per chunk */
flist->fl_elt_per_chunk = chunk_size / elt_size;
initial_mem_per_list = min_pages_per_list * page_size;
/* adjust initial_mem_per_list to increments of chunk_size */
if ( initial_mem_per_list < chunk_size )
{
min_pages_per_list = (((chunk_size - 1) / page_size) + 1);
initial_mem_per_list = min_pages_per_list * page_size;
}
/* determine upper limit on number of pages in a given list */
if ( (max_pages_per_list != -1) && (max_pages_per_list < min_pages_per_list) )
max_pages_per_list = min_pages_per_list;
if (max_pages_per_list == -1)
max_mem_per_list = -1;
else
max_mem_per_list = max_pages_per_list * page_size;
/* initialize empty lists of available descriptors */
flist->fl_free_lists = (lam_seg_list_t **)
malloc(sizeof(lam_seg_list_t *) *
flist->fl_nlists);
if ( !flist->fl_free_lists )
{
lam_exit((-1, "Error: Out of memory\n"));
}
/* run constructors */
for (list = 0; list < flist->fl_nlists; list++)
{
if ( flist->fl_is_shared )
{
/* process shared memory allocation */
flist->fl_free_lists[list] =
(lam_seg_list_t *)
lam_fmp_get_mem_segment(&lam_per_proc_shmem_pools,
sizeof(lam_seg_list_t), CACHE_ALIGNMENT, list);
}
else
{
/* process private memory allocation */
flist->fl_free_lists[list] =
(lam_seg_list_t *)malloc(sizeof(lam_seg_list_t));
}
if (!flist->fl_free_lists[list])
lam_exit((-1, "Error: Out of memory\n"));
STATIC_INIT(flist->fl_free_lists[list], &lam_seg_list_cls);
lam_sgl_set_min_bytes_pushed(flist->fl_free_lists[list],
initial_mem_per_list);
lam_sgl_set_max_bytes_pushed(flist->fl_free_lists[list],
max_mem_per_list);
lam_sgl_set_max_consec_fail(flist->fl_free_lists[list],
max_consec_req_fail);
} /* end list loop */
flist->fl_retry_more_resources = retry_for_more_resources;
flist->fl_enforce_affinity = enforce_affinity;
if ( enforce_affinity )
{
flist->fl_affinity = (affinity_t *)malloc(sizeof(affinity_t) *
flist->fl_nlists);
if ( !flist->fl_affinity )
lam_exit((-1, "Error: Out of memory\n"));
/* copy policies in */
for ( pool = 0; pool < flist->fl_nlists; pool++ )
{
flist->fl_affinity[pool] = affinity[pool];
}
}
/* initialize locks for memory pool and individual list and link locks */
for ( pool = 0; pool < flist->fl_nlists; pool++ ) {
/* gain exclusive use of list */
if ( 1 == lam_sgl_lock_list(flist->fl_free_lists[pool]) ) {
while ( lam_sgl_get_bytes_pushed(flist->fl_free_lists[pool])
< lam_sgl_get_min_bytes_pushed(flist->fl_free_lists[pool]) )
{
if (lam_free_lists_create_more_elts(flist, pool) != LAM_SUCCESS)
{
lam_exit((-1, "Error: Setting up initial private "
"free list for %s.\n", flist->fl_description));
}
}
lam_sgl_unlock_list(flist->fl_free_lists[pool]);
}
else
{
/* only 1 process should be initializing the list */
lam_exit((-1, "Error: Setting up initial private free "
"list %d for %s.\n", pool, flist->fl_description));
}
}
return err;
}
static int lam_free_lists_mem_pool_init(lam_free_lists_t *flist,
int nlists, long pages_per_list, ssize_t chunk_size,
size_t page_size, long min_pages_per_list,
long default_min_pages_per_list, long default_pages_per_list,
long max_pages_per_list, ssize_t max_mem_in_pool)
{
int err = LAM_SUCCESS;
long total_pgs_to_alloc;
ssize_t mem_in_pool;
size_t to_alloc;
/* set chunksize - multiple of page size */
chunk_size =
((((chunk_size - 1) / page_size) + 1) * page_size);
/* determine number how much memory to allocate */
if ( pages_per_list == -1 ) {
/* minimum size is defaultNPagesPerList*number of local procs */
total_pgs_to_alloc = default_pages_per_list * nlists;
} else {
total_pgs_to_alloc = pages_per_list * nlists;
}
mem_in_pool = total_pgs_to_alloc * page_size;
/* Initialize memory pool */
if ( flist->fl_is_shared ) {
/* shared memory allocation */
to_alloc = sizeof(lam_mem_pool_t);
flist->fl_pool =
(lam_mem_pool_t *)lam_fmp_get_mem_segment(&lam_shmem_pools,
to_alloc,
CACHE_ALIGNMENT, 0);
if ( flist->fl_pool )
STATIC_INIT(flist->fl_pool, &shmem_pool_cls);
} else {
/* process private memory allocation */
CREATE_OBJECT(flist->fl_pool, lam_mem_pool_t, &mem_pool_cls);
}
err = lam_mp_init_with(
flist->fl_pool,
mem_in_pool,
max_mem_in_pool,
chunk_size,
page_size);
return err;
}
static void *lam_free_lists_get_mem_chunk(lam_free_lists_t *flist, int index, size_t *len, int *err)
{
void *chunk = 0;
uint64_t sz_to_add;
/* check to make sure that the amount to add to the list does not
exceed the amount allowed */
sz_to_add = lam_mp_get_chunk_size(flist->fl_pool);
#if LAM_ENABLE_MEM_PROFILE
flist->fl_chunks_req[index]++;
#endif
if (index >= flist->fl_nlists)
{
lam_err(("Error: Array out of bounds\n"));
return chunk;
}
if ( lam_sgl_get_max_bytes_pushed(flist->fl_free_lists[index]) != -1 )
{
if (sz_to_add +
lam_sgl_get_bytes_pushed(flist->fl_free_lists[index]) >
lam_sgl_get_max_bytes_pushed(flist->fl_free_lists[index]) )
{
lam_sgl_inc_consec_fail(flist->fl_free_lists[index]);
if ( lam_sgl_get_consec_fail(flist->fl_free_lists[index]) >=
lam_sgl_get_max_consec_fail(flist->fl_free_lists[index]) )
{
*err = LAM_ERR_OUT_OF_RESOURCE;
lam_err(("Error: List out of memory in pool for %s\n",
flist->fl_description));
return chunk;
} else
*err = LAM_ERR_TEMP_OUT_OF_RESOURCE;
return chunk;
}
}
/* set len */
*len = sz_to_add;
/* get chunk of memory */
chunk = lam_mp_request_chunk(flist->fl_pool, index);
if ( 0 == chunk )
{
/* increment failure count */
lam_sgl_inc_consec_fail(flist->fl_free_lists[index]);
if ( lam_sgl_get_consec_fail(flist->fl_free_lists[index]) >=
lam_sgl_get_max_consec_fail(flist->fl_free_lists[index]) )
{
*err = LAM_ERR_OUT_OF_RESOURCE;
lam_err(("Error: List out of memory in pool for %s\n",
flist->fl_description));
return chunk;
} else
*err = LAM_ERR_TEMP_OUT_OF_RESOURCE;
return chunk;
}
/* set consecutive failure count to 0 - if we fail, we don't get
this far in the code. */
lam_sgl_set_consec_fail(flist->fl_free_lists[index], 0);
#if LAM_ENABLE_MEM_PROFILE
flist->fl_chunks_returned[index]++;
#endif
return chunk;
}
static lam_list_item_t *lam_free_lists_request_elt(lam_free_lists_t *flist, int pool_idx)
{
#if ROB_HASNT_FINISHED_THIS_YET
lam_dbl_list_t *seg_list = &(flist->fl_free_lists[pool_idx]->sgl_list);
volatile lam_list_item_t *elt = lam_dbl_get_last(seg_list);
if ( elt )
lam_sgl_set_consec_fail(seg_list, 0);
return elt;
#else
return NULL;
#endif
}
static void lam_free_lists_append(lam_free_lists_t *flist, void *chunk, int pool_idx)
{
/* ASSERT: mp_chunk_sz >= fl_elt_per_chunk * fl_elt_size */
/* push items onto list */
lam_sgl_append_elt_chunk(flist->fl_free_lists[pool_idx],
chunk, lam_mp_get_chunk_size(flist->fl_pool),
flist->fl_elt_per_chunk, flist->fl_elt_size);
}
static int lam_free_lists_create_more_elts(lam_free_lists_t *flist, int pool_idx)
{
int err = LAM_SUCCESS, desc;
size_t len_added;
char *current_loc;
void *ptr = lam_free_lists_get_mem_chunk(flist, pool_idx, &len_added, &err);
if (0 == ptr ) {
lam_err(("Error: Can't get new elements for %s\n",
flist->fl_description));
return err;
}
/* attach memory affinity */
if ( flist->fl_enforce_affinity )
{
if (!lam_set_affinity(ptr, len_added,
flist->fl_affinity[pool_idx]))
{
err = LAM_ERROR;
#ifdef _DEBUGQUEUES
lam_err(("Error: Can't set memory policy (pool_idx=%d)\n",
pool_idx));
return err;
#endif /* _DEBUGQUEUES */
}
}
/* Construct new descriptors using placement new */
current_loc = (char *) ptr;
for (desc = 0; desc < flist->fl_elt_per_chunk; desc++)
{
STATIC_INIT(*(lam_list_item_t *)current_loc, flist->fl_elt_cls);
current_loc += flist->fl_elt_size;
}
/* push chunk of memory onto the list */
lam_free_lists_append(flist, ptr, pool_idx);
return err;
}
lam_list_item_t *lam_free_lists_get_elt(lam_free_lists_t *flist, int index, int *error)
{
#if ROB_HASNT_FINISHED_THIS_YET
int error;
volatile lam_list_item_t *elem = NULL;
elem = lam_free_lists_request_elt(flist, index);
if ( elem )
{
error = LAM_SUCCESS;
}
else if ( lam_sgl_get_consec_fail(&(flist->fl_free_lists[index]->sgl_list))
< flist->fl_threshold_grow )
{
error = LAM_ERR_TEMP_OUT_OF_RESOURCE;
}
else
{
error = LAM_SUCCESS;
while ( (LAM_SUCCESS) && (0 == elem) &&
(flist->fl_retry_more_resources) )
{
error = lam_free_lists_create_more_elts(flist, index);
/* get element if managed to add resources to the list */
if ( LAM_SUCCESS == error )
{
elem = lam_free_lists_request_elt(flist, index);
}
}
if ( (LAM_ERR_OUT_OF_RESOURCE == error)
|| (LAM_ERR_FATAL == error) )
{
return 0;
}
}
#if LAM_ENABLE_MEM_PROFILE
flist->fl_elt_out[index]++;
flist->fl_elt_sum[index] += flist->fl_elt_out[index];
flist->fl_nevents[index]++;
if (flist->fl_elt_max[index] < flist->fl_elt_out[index])
{
flist->fl_elt_max[index] = flist->fl_elt_out[index];
}
#endif
return elem;
#else
return NULL;
#endif
}
int lam_free_lists_return_elt(lam_free_lists_t *flist, int index, lam_list_item_t *item)
{
#if ROB_HASNT_FINISHED_THIS_YET
mb();
lam_dbl_append(&(flist->fl_free_lists[index]->sgl_list), item);
mb();
#if LAM_ENABLE_MEM_PROFILE
flist->fl_elt_out[index]--;
#endif
return LAM_SUCCESS;
#else
return LAM_ERROR;
#endif
}

87
src/lam/mem/free_lists.h Обычный файл
Просмотреть файл

@ -0,0 +1,87 @@
/*
* $HEADER$
*/
#ifndef LAM_FREE_LISTS_H
#define LAM_FREE_LISTS_H
#include "lam_config.h"
#include "lam/lfc/list.h"
#include "lam/threads/mutex.h"
#include "lam/mem/seg_list.h"
#include "lam/mem/mem_pool.h"
/*
* Memory affinity is almost certainly an int everywhere, but let's
* make it a typedef in case we need to make it OS dependenent
* sometime...
*/
typedef int lam_affinity_t;
struct lam_free_lists_t
{
lam_object_t super;
int fl_is_shared;
lam_mem_pool_t *fl_pool;
const char *fl_description;
int fl_nlists;
int fl_elt_per_chunk;
size_t fl_elt_size;
lam_seg_list_t **fl_free_lists;
int fl_retry_more_resources;
int fl_enforce_affinity;
lam_affinity_t *fl_affinity; /* array of lam_affinity_t */
int fl_threshold_grow;
lam_class_info_t *fl_elt_cls; /* this will be used to create new free list elements. */
lam_mutex_t fl_lock;
/* for mem profiling */
int *fl_elt_out;
int *fl_elt_max;
int *fl_elt_sum;
int *fl_nevents;
#if LAM_ENABLE_DEBUG
int *fl_chunks_req;
int *fl_chunks_returned;
#endif
};
typedef struct lam_free_lists_t lam_free_lists_t;
extern lam_class_info_t lam_free_lists_cls;
void lam_free_lists_init(lam_free_lists_t *flist);
void lam_free_lists_destroy(lam_free_lists_t *flist);
/* lam_frl_init must have been called prior to calling this function */
int lam_free_lists_init_with(lam_free_lists_t *flist,
int nlists,
int pages_per_list,
size_t chunk_size,
size_t page_size,
size_t element_size,
int min_pages_per_list,
int max_pages_per_list,
int max_consec_req_fail,
const char *description,
bool retry_for_more_resources,
lam_affinity_t *affinity,
bool enforce_affinity,
lam_mem_pool_t *pool);
lam_list_item_t *lam_free_lists_get_elt(lam_free_lists_t *flist, int index, int *error);
int lam_free_lists_return_elt(lam_free_lists_t *flist, int index, lam_list_item_t *item);
/*
* Accessor functions
*/
int lam_free_lists_get_thresh_grow(lam_free_lists_t *flist);
void lam_free_lists_set_thresh_grow(lam_free_lists_t *flist, int to_grow);
#endif

Просмотреть файл

@ -125,7 +125,7 @@ extern lam_class_info_t fixed_mem_pool_cls;
void lam_fmp_init(lam_fixed_mpool_t *pool);
void lam_fmp_destroy(lam_fixed_mpool_t *pool);
int lam_fmp_init_with(lam_fixed_mpool_t *pool, ssize_t initial_allocation,
int lam_fmp_init_with(lam_fixed_mpool_t *pool, ssize_t initial_allocation,
ssize_t min_allocation_size,
int n_pools, int n_array_elements_to_add, int apply_mem_affinity);
void *lam_fmp_get_mem_segment(lam_fixed_mpool_t *pool,

Просмотреть файл

@ -13,9 +13,11 @@
#include "lam/threads/mutex_spinlock.h"
#endif
extern bool lam_uses_threads;
static inline bool lam_use_threads(void) { return lam_uses_threads; }
static inline bool lam_use_threads(void)
{
extern bool lam_uses_threads;
return lam_uses_threads;
}
/*
* Lock macros

Просмотреть файл

@ -6,28 +6,14 @@
include $(top_lam_srcdir)/config/Makefile.options
SUBDIRS = src
DIST_SUBDIRS = config $(SUBDIRS)
EXTRA_DIST = VERSION
AM_CPPFLAGS = \
-I$(top_lam_builddir)/src/include \
-I$(top_lam_srcdir)/src \
-I$(top_lam_srcdir)/src/include
# Source code files
sources = \
comm.h \
proc.h \
ptl_array.h \
teg.h \
pml_teg_proc.c \
pml_teg_ptl_array.c \
pml_teg.c
# For static MCA modules, we have to make the output library here in
# the top-level directory, and it has to be named
# libmca_{library}_{type}_{name}.la. For dynamic modules, we build
# mca_{type}_{name}.la and install them into $(libdir)/lam.
# According to the MCA spec, we have to make the output library here
# in the top-level directory, and it has to be named
# liblam_ssi_coll_lam_basic.la
if LAM_BUILD_LOADABLE_MODULE
module_noinst =
@ -45,4 +31,3 @@ mca_pml_teg_la_LDFLAGS = -module -avoid-version
noinst_LTLIBRARIES = $(module_noinst)
libmca_mpi_pml_teg_la_SOURCES = $(sources)
libmca_mpi_pml_teg_la_LDFLAGS = -module -avoid-version

Просмотреть файл

@ -5,6 +5,6 @@
# Specific to this module
PARAM_INIT_FILE=pml_teg.c
PARAM_CONFIG_HEADER_FILE="teg_config.h"
PARAM_CONFIG_FILES="Makefile"
PARAM_INIT_FILE=src/pml_teg.c
PARAM_CONFIG_HEADER_FILE="src/teg_config.h"
PARAM_CONFIG_FILES="Makefile src/Makefile"

Просмотреть файл

@ -6,9 +6,11 @@
#define LAM_MCA_PTL_H
#include "mca/mca.h"
#include "proc.h"
#include "lam.h"
#include "lam/lam.h"
#include "lam/lfc/list.h"
#include "mpi/proc/proc.h"
#include "mca/mpi/pml/pml.h"
#include "mca/mpi/pml/base/pml_base_sendreq.h"
/*
@ -22,8 +24,8 @@ typedef struct mca_ptl_1_0_0* (*mca_ptl_init_1_0_0_fn_t)();
* PTL action functions.
*/
typedef int (*mca_ptl_fragment_fn_t)(mca_ptl_send_request_t*, size_t);
typedef int (*mca_ptl_progress_fn_t)(mca_time_t);
typedef int (*mca_ptl_fragment_fn_t)(mca_pml_base_send_request_t*, size_t);
typedef int (*mca_ptl_progress_fn_t)(mca_pml_base_tstamp_t);
/*
* Struct used to pass PTL module information from the each PTL
@ -31,7 +33,9 @@ typedef int (*mca_ptl_progress_fn_t)(mca_time_t);
*/
typedef struct mca_ptl_module_1_0_0 {
mca_1_0_0_t super;
mca_base_module_t ptlm_version;
mca_base_module_data_1_0_0_t ptlm_data;
mca_ptl_query_fn_t ptlm_query;
mca_ptl_init_1_0_0_fn_t ptlm_init;
} mca_ptl_module_1_0_0_t;
@ -59,11 +63,11 @@ typedef struct mca_ptl_1_0_0 {
/*
* Set the default type to use version 1.1.0 of the PTL
* Set the default type to use version 1.0.0 of the PTL
*/
typedef mca_ptl_module_1_1_0_t mca_ptl_module_t;
typedef mca_ptl_1_1_0_t mca_ptl_t;
typedef mca_ptl_module_1_0_0_t mca_ptl_module_t;
typedef mca_ptl_1_0_0_t mca_ptl_t;
/*
@ -93,6 +97,6 @@ extern lam_list_t *mca_ptl_base_available;
* effectively be filled in by configure.
*/
extern const mca_t **mca_ptl_modules;
extern const mca_base_module_t **mca_ptl_base_modules;
#endif /* LAM_MCA_PTL_H */