1
1
- moved mem/free_list.h to class/ompi_free_list.h
- moved mem/malloc.h to util/malloc.h
- added src/mca/mpool component type

This commit was SVN r1274.
Этот коммит содержится в:
Tim Woodall 2004-06-15 19:07:45 +00:00
родитель 31ccd700cc
Коммит abba2b038c
50 изменённых файлов: 410 добавлений и 2575 удалений

Просмотреть файл

@ -43,7 +43,7 @@ unset msg
# The list of MCA types (it's fixed) # The list of MCA types (it's fixed)
AC_MSG_CHECKING([for MCA types]) AC_MSG_CHECKING([for MCA types])
found_types="oob pcm registry coll io allocator one pml ptl topo" found_types="allocator coll io mpool oob one pcm pml ptl registry topo"
AC_MSG_RESULT([$found_types]) AC_MSG_RESULT([$found_types])
# Get the list of all the non-configure MCA modules that were found by # Get the list of all the non-configure MCA modules that were found by
@ -262,6 +262,11 @@ AC_SUBST(MCA_io_STATIC_SUBDIRS)
AC_SUBST(MCA_io_DYNAMIC_SUBDIRS) AC_SUBST(MCA_io_DYNAMIC_SUBDIRS)
AC_SUBST(MCA_io_STATIC_LTLIBS) AC_SUBST(MCA_io_STATIC_LTLIBS)
AC_SUBST(MCA_mpool_ALL_SUBDIRS)
AC_SUBST(MCA_mpool_STATIC_SUBDIRS)
AC_SUBST(MCA_mpool_DYNAMIC_SUBDIRS)
AC_SUBST(MCA_mpool_STATIC_LTLIBS)
AC_SUBST(MCA_one_ALL_SUBDIRS) AC_SUBST(MCA_one_ALL_SUBDIRS)
AC_SUBST(MCA_one_STATIC_SUBDIRS) AC_SUBST(MCA_one_STATIC_SUBDIRS)
AC_SUBST(MCA_one_DYNAMIC_SUBDIRS) AC_SUBST(MCA_one_DYNAMIC_SUBDIRS)

Просмотреть файл

@ -780,7 +780,6 @@ AC_CONFIG_FILES([
src/ctnetwork/Makefile src/ctnetwork/Makefile
src/class/Makefile src/class/Makefile
src/mem/Makefile
src/os/Makefile src/os/Makefile
src/os/cygwin/Makefile src/os/cygwin/Makefile
src/os/darwin/Makefile src/os/darwin/Makefile
@ -830,16 +829,18 @@ AC_CONFIG_FILES([
src/mca/allocator/base/Makefile src/mca/allocator/base/Makefile
src/mca/coll/Makefile src/mca/coll/Makefile
src/mca/coll/base/Makefile src/mca/coll/base/Makefile
src/mca/topo/Makefile
src/mca/topo/base/Makefile
src/mca/io/Makefile src/mca/io/Makefile
src/mca/io/base/Makefile src/mca/io/base/Makefile
src/mca/mpool/Makefile
src/mca/mpool/base/Makefile
src/mca/one/Makefile src/mca/one/Makefile
src/mca/one/base/Makefile src/mca/one/base/Makefile
src/mca/pml/Makefile src/mca/pml/Makefile
src/mca/pml/base/Makefile src/mca/pml/base/Makefile
src/mca/ptl/Makefile src/mca/ptl/Makefile
src/mca/ptl/base/Makefile src/mca/ptl/base/Makefile
src/mca/topo/Makefile
src/mca/topo/base/Makefile
src/mca/gpr/Makefile src/mca/gpr/Makefile
src/mpi/Makefile src/mpi/Makefile

Просмотреть файл

@ -56,12 +56,12 @@ extern bool ompi_mpi_param_check;
*/ */
#if OMPI_ENABLE_MEM_DEBUG && defined(OMPI_BUILDING) && OMPI_BUILDING #if OMPI_ENABLE_MEM_DEBUG && defined(OMPI_BUILDING) && OMPI_BUILDING
/* It is safe to include mem/malloc.h here because a) it will only /* It is safe to include util/malloc.h here because a) it will only
happen when we are building OMPI and therefore have a full OMPI happen when we are building OMPI and therefore have a full OMPI
source tree [including headers] available, and b) we guaranteed to source tree [including headers] available, and b) we guaranteed to
*not* to include anything else via mem/malloc.h, so we won't *not* to include anything else via mem/malloc.h, so we won't
have Cascading Includes Of Death. */ have Cascading Includes Of Death. */
#include "mem/malloc.h" #include "util/malloc.h"
#define malloc(size) ompi_malloc((size), __FILE__, __LINE__) #define malloc(size) ompi_malloc((size), __FILE__, __LINE__)
#define realloc(ptr, size) ompi_realloc((ptr), (size), __FILE__, __LINE__) #define realloc(ptr, size) ompi_realloc((ptr), (size), __FILE__, __LINE__)
#define free(ptr) ompi_free((ptr), __FILE__, __LINE__) #define free(ptr) ompi_free((ptr), __FILE__, __LINE__)

Просмотреть файл

@ -19,7 +19,6 @@ SUBDIRS = \
info \ info \
class \ class \
mca \ mca \
mem \
mpi \ mpi \
op \ op \
os \ os \
@ -46,7 +45,6 @@ libmpi_la_LIBADD = \
info/libinfo.la \ info/libinfo.la \
class/liblfc.la \ class/liblfc.la \
mca/libmca.la \ mca/libmca.la \
mem/libmem.la \
mpi/libmpi_bindings.la \ mpi/libmpi_bindings.la \
op/libop.la \ op/libop.la \
proc/libproc.la \ proc/libproc.la \

Просмотреть файл

@ -11,6 +11,7 @@ noinst_LTLIBRARIES = liblfc.la
headers = \ headers = \
ompi_bitmap.h \ ompi_bitmap.h \
ompi_free_list.h \
ompi_hash_table.h \ ompi_hash_table.h \
ompi_list.h \ ompi_list.h \
ompi_object.h \ ompi_object.h \
@ -19,6 +20,7 @@ headers = \
liblfc_la_SOURCES = \ liblfc_la_SOURCES = \
$(headers) \ $(headers) \
ompi_free_list.c \
ompi_hash_table.c \ ompi_hash_table.c \
ompi_list.c \ ompi_list.c \
ompi_object.c \ ompi_object.c \

Просмотреть файл

Просмотреть файл

@ -10,6 +10,7 @@ SUBDIRS = \
coll \ coll \
common \ common \
io \ io \
mpool \
one \ one \
oob \ oob \
pcm \ pcm \
@ -31,6 +32,7 @@ libmca_la_LIBADD = \
allocator/libmca_allocator.la \ allocator/libmca_allocator.la \
base/libmca_base.la \ base/libmca_base.la \
coll/libmca_coll.la \ coll/libmca_coll.la \
mpool/libmca_mpool.la \
oob/libmca_oob.la \ oob/libmca_oob.la \
pcm/libmca_pcm.la \ pcm/libmca_pcm.la \
pml/libmca_pml.la \ pml/libmca_pml.la \

Просмотреть файл

@ -1,28 +0,0 @@
#
# $HEADER$
#
include $(top_srcdir)/config/Makefile.options
SUBDIRS = base $(MCA_mem_STATIC_SUBDIRS)
DIST_SUBDIRS = base $(MCA_mem_ALL_SUBDIRS)
# Source code files
headers = mem.h
noinst_LTLIBRARIES = libmca_mem.la
libmca_mem_la_SOURCES =
libmca_mem_la_LIBADD = \
base/libmca_mem_base.la \
$(MCA_mem_STATIC_LTLIBS)
libmca_mem_la_DEPENDENCIES = $(libmca_mem_la_LIBADD)
# Conditionally install the header files
if WANT_INSTALL_HEADERS
ompidir = $(includedir)/ompi/mca/mem
ompi_HEADERS = $(headers)
else
ompidir = $(includedir)
endif

Просмотреть файл

@ -1,48 +0,0 @@
/*
* $HEADER$
*/
/**
* @file
*/
#ifndef MCA_MEM_BASE_H
#define MCA_MEM_BASE_H
#include "ompi_config.h"
#include "class/ompi_list.h"
#include "mca/mca.h"
#include "mca/mem/mem.h"
struct mca_mem_base_selected_module_t {
ompi_list_item_t super;
mca_mem_base_module_t *pbsm_module;
mca_mem_t *pbsm_actions;
};
typedef struct mca_mem_base_selected_module_t mca_mem_base_selected_module_t;
/*
* Global functions for MCA: overall PTL open and close
*/
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
int mca_mem_base_open(void);
int mca_mem_base_select(bool *allow_multi_user_threads,
bool *have_hidden_threads);
int mca_mem_base_close(void);
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
/*
* Globals
*/
extern int mca_mem_base_output;
extern ompi_list_t mca_mem_base_modules_available;
extern ompi_list_t mca_mem_base_modules_initialized;
#endif /* MCA_MEM_BASE_H */

Просмотреть файл

@ -1,211 +0,0 @@
/*
* $HEADER$
*/
#include "mca/mem/base/mem_base_allocator.h"
/**
* The define controls the size in bytes of the 1st bucket and hence every one
* afterwards.
*/
#define MCA_MEM_BUCKET_1_SIZE 8
/**
* This is the number of left bit shifts from 1 needed to get to the number of
* bytes in the initial memory buckets
*/
#define MCA_MEM_BUCKET_1_BITSHIFTS 3
/*
* Initializes the mca_mem_options_t data structure for the passed
* parameters.
*/
mca_mem_options_t * mca_mem_init(int num_buckets,
mca_mem_get_mem_fn_t get_mem_funct,
mca_mem_free_mem_fn_t free_mem_funct)
{
int i;
/* Create a new mca_mem_options struct */
size_t size = sizeof(mca_mem_options_t);
mca_mem_options_t * mem_options =
(mca_mem_options_t *)get_mem_funct(&size);
if(NULL == mem_options) {
return(NULL);
}
/* if a bad value is used for the number of buckets, default to 30 */
if(i <= 0) {
num_buckets = 30;
}
/* initialize the array of buckets */
size = sizeof(mca_mem_bucket_t) * num_buckets;
mem_options->buckets = (mca_mem_bucket_t*) get_mem_funct(&size);
if(NULL == mem_options->buckets) {
free_mem_funct(mem_options);
return(NULL);
}
for(i = 0; i < num_buckets; i++) {
mem_options->buckets[i].free_chunk = NULL;
mem_options->buckets[i].segment_head = NULL;
OBJ_CONSTRUCT(&(mem_options->buckets[i].lock), ompi_mutex_t);
}
mem_options->num_buckets = num_buckets;
mem_options->get_mem_fn = get_mem_funct;
mem_options->free_mem_fn = free_mem_funct;
return(mem_options);
}
/*
* Accepts a request for memory in a specific region defined by the
* mca_mem_options_t struct and returns a pointer to memory in that
* region or NULL if there was an error
*
*/
void * mca_mem_alloc(mca_mem_options_t * mem_options, size_t size)
{
int bucket_num = 0;
/* initialize for the later bit shifts */
size_t bucket_size = 1;
size_t allocated_size;
mca_mem_chunk_header_t * chunk;
mca_mem_chunk_header_t * first_chunk;
mca_mem_segment_head_t * segment_header;
/* add the size of the header into the amount we need to request */
size += sizeof(mca_mem_chunk_header_t);
/* figure out which bucket it will come from. */
while(size > MCA_MEM_BUCKET_1_SIZE) {
size >>= 1;
bucket_num++;
}
/* now that we know what bucket it will come from, we must get the lock */
THREAD_LOCK(&(mem_options->buckets[bucket_num].lock));
/* see if there is already a free chunk */
if(NULL != mem_options->buckets[bucket_num].free_chunk) {
chunk = mem_options->buckets[bucket_num].free_chunk;
mem_options->buckets[bucket_num].free_chunk = chunk->u.next_free;
chunk->u.bucket = bucket_num;
/* go past the header */
chunk += 1;
/*release the lock */
THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
return((void *) chunk);
}
/* figure out the size of bucket we need */
bucket_size <<= (bucket_num + MCA_MEM_BUCKET_1_BITSHIFTS);
allocated_size = bucket_size;
/* we have to add in the size of the segment header into the
* amount we need to request */
allocated_size += sizeof(mca_mem_segment_head_t);
/* attempt to get the memory */
segment_header = (mca_mem_segment_head_t *)
mem_options->get_mem_fn(&allocated_size);
if(NULL == segment_header) {
/* release the lock */
THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
return(NULL);
}
/* if were allocated more memory then we actually need, then we will try to
* break it up into multiple chunks in the current bucket */
allocated_size -= (sizeof(mca_mem_segment_head_t) + bucket_size);
chunk = first_chunk = segment_header->first_chunk =
(mca_mem_chunk_header_t *) (segment_header + 1);
/* add the segment into the segment list */
segment_header->next_segment = mem_options->buckets[bucket_num].segment_head;
mem_options->buckets[bucket_num].segment_head = segment_header;
if(allocated_size >= bucket_size) {
mem_options->buckets[bucket_num].free_chunk =
(mca_mem_chunk_header_t *) ((char *) chunk + bucket_size);
chunk->next_in_segment = (mca_mem_chunk_header_t *)
((char *)chunk + bucket_size);
while(allocated_size >= bucket_size) {
chunk = (mca_mem_chunk_header_t *) ((char *) chunk + bucket_size);
chunk->u.next_free = (mca_mem_chunk_header_t *)
((char *) chunk + bucket_size);
chunk->next_in_segment = chunk->u.next_free;
allocated_size -= bucket_size;
}
chunk->next_in_segment = first_chunk;
chunk->u.next_free = NULL;
} else {
first_chunk->next_in_segment = first_chunk;
}
first_chunk->u.bucket = bucket_num;
THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
/* return the memory moved past the header */
return((void *) (first_chunk + 1));
}
/*
* Frees the passed region of memory
*
*/
void mca_mem_free(mca_mem_options_t * mem_options, void * ptr)
{
mca_mem_chunk_header_t * chunk = (mca_mem_chunk_header_t *) ptr - 1;
int bucket_num = chunk->u.bucket;
THREAD_LOCK(&(mem_options->buckets[bucket_num].lock));
chunk->u.next_free = mem_options->buckets[bucket_num].free_chunk;
mem_options->buckets[bucket_num].free_chunk = chunk;
THREAD_UNLOCK(&(mem_options->buckets[bucket_num].lock));
}
/*
* Frees all the memory from all the buckets back to the system. Note that
* this function only frees memory that was previously freed with
* mca_mem_free().
*
*/
void mca_mem_cleanup(mca_mem_options_t * mem_options)
{
int i;
mca_mem_chunk_header_t * next_chunk;
mca_mem_chunk_header_t * chunk;
mca_mem_chunk_header_t * first_chunk;
mca_mem_segment_head_t ** segment_header;
mca_mem_segment_head_t * segment;
bool empty = true;
for(i = 0; i < mem_options->num_buckets; i++) {
THREAD_LOCK(&(mem_options->buckets[i].lock));
segment_header = &(mem_options->buckets[i].segment_head);
/* traverse the list of segment headers until we hit NULL */
while(NULL != *segment_header) {
first_chunk = (*segment_header)->first_chunk;
chunk = first_chunk;
/* determine if the segment is free */
do
{
if(chunk->u.bucket == i) {
empty = false;
}
chunk = chunk->next_in_segment;
} while(empty && (chunk != first_chunk));
if(empty) {
chunk = first_chunk;
/* remove the chunks from the free list */
do
{
if(mem_options->buckets[i].free_chunk == chunk) {
mem_options->buckets[i].free_chunk = chunk->u.next_free;
} else {
next_chunk = mem_options->buckets[i].free_chunk;
while(next_chunk->u.next_free != chunk) {
next_chunk = next_chunk->u.next_free;
}
next_chunk->u.next_free = chunk->u.next_free;
}
} while((chunk = chunk->next_in_segment) != first_chunk);
/* set the segment list to point to the next segment */
segment = *segment_header;
*segment_header = segment->next_segment;
/* free the memory */
mem_options->free_mem_fn(segment);
} else {
/* go to next segment */
segment_header = &((*segment_header)->next_segment);
}
empty = true;
}
/* relese the lock on the bucket */
THREAD_UNLOCK(&(mem_options->buckets[i].lock));
}
}

Просмотреть файл

@ -1,208 +0,0 @@
/**
* $HEADER$
*/
/** @file
* A generic memory allocator.
*
*
**/
#ifndef MEM_BASE_ALLOCATOR_H
#define MEM_BASE_ALLOCATOR_H
#include <stdlib.h>
#include <stdbool.h>
#include "threads/mutex.h"
#include "class/ompi_object.h"
/**
* Typedef for a pointer to a function to get more memory. This function
* accepts a pointer to the minimum size that is needed. This function is free
* to allocate more memory than is necessary, but it must return the amount
* allocated in the size_t variable. If it cannot allocate at least the
* amount requested, it MUST return NULL.
*/
typedef void*(*mca_mem_get_mem_fn_t)(size_t *);
/**
* Typedef for a pointer to a function to free memory
*/
typedef void(*mca_mem_free_mem_fn_t)(void *);
/**
* Typedef so we can add a pointer to mca_mem_chunk_header_t in
* mca_mem_chunk_header_t
*/
typedef struct mca_mem_chunk_header_t * mca_mem_chunk_header_ptr_t;
/**
* Structure for the header of each memory chunk
*/
struct mca_mem_chunk_header_t {
mca_mem_chunk_header_ptr_t next_in_segment; /**< The next chunk in the
memory segment */
/**
* Union which holds either a pointer to the next free chunk
* or the bucket number
*/
union u {
mca_mem_chunk_header_ptr_t next_free; /**< if the chunk is free this
will point to the next free
chunk in the bucket */
int bucket; /**< the bucket number it belongs to */
} u; /**< the union */
};
/**
* Typedef so we don't have to use struct
*/
typedef struct mca_mem_chunk_header_t mca_mem_chunk_header_t;
/**
* Typedef so we can reference a pointer to mca_mem_segment_head_t from itself
*/
typedef struct mca_mem_segment_head_t * mca_mem_segment_head_ptr;
/**
* Structure that heads each segment
*/
struct mca_mem_segment_head_t {
mca_mem_chunk_header_t * first_chunk; /**< the first chunk of the header */
mca_mem_segment_head_ptr next_segment; /**< the next segment in the
bucket */
};
/**
* Typedef so we don't have to use struct
*/
typedef struct mca_mem_segment_head_t mca_mem_segment_head_t;
/**
* Structure for each bucket
*/
struct mca_mem_bucket_t {
mca_mem_chunk_header_t * free_chunk; /**< the first free chunk of memory */
ompi_mutex_t lock; /**< the lock on the bucket */
mca_mem_segment_head_t * segment_head; /**< the list of segment headers */
};
/**
* Typedef so we don't have to use struct
*/
typedef struct mca_mem_bucket_t mca_mem_bucket_t;
/**
* Structure that holds the necessary information for each area of memory
*/
struct mca_mem_options_t {
mca_mem_bucket_t * buckets; /**< the array of buckets */
int num_buckets; /**< the number of buckets */
mca_mem_get_mem_fn_t get_mem_fn; /**< pointer to the function to get
more memory */
mca_mem_free_mem_fn_t free_mem_fn; /**< pointer to the function to free
memory */
};
/**
* Typedef so we don't have to use struct
*/
typedef struct mca_mem_options_t mca_mem_options_t;
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
/**
* Initializes the mca_mem_options_t data structure for the passed
* parameters.
* @param numBuckets The number of buckets the allocator will use
* @param get_mem_funct A pointer to the function that the allocator
* will use to get more memory
* @param free_mem_funct A pointer to the function that the allocator
* will use to free memory
*
* @retval Pointer to the initialized mca_mem_options_t structure
* @retval NULL if there was an error
*/
mca_mem_options_t * mca_mem_init(int num_buckets,
mca_mem_get_mem_fn_t get_mem_funct,
mca_mem_free_mem_fn_t free_mem_funct);
/**
* Accepts a request for memory in a specific region defined by the
* mca_mem_options_t struct and returns a pointer to memory in that
* region or NULL if there was an error
*
* @param mem_options A pointer to the appropriate struct for the area of
* memory.
* @param size The size of the requested area of memory
*
* @retval Pointer to the area of memory if the allocation was successful
* @retval NULL if the allocation was unsuccessful
*
*/
void * mca_mem_alloc(mca_mem_options_t * mem_options, size_t size);
/**
* NOT YET IMPLEMENTED.
* Accepts a request for memory in a specific region defined by the
* mca_mem_options_t struct and aligned by the specified amount and returns a
* pointer to memory in that region or NULL if there was an error
*
* @param mem_options A pointer to the appropriate struct for the area of
* memory.
* @param size The size of the requested area of memory
* @param alignment The requested alignment of the new area of memory. This
* MUST be a power of 2. If it is 0 then the memory is aligned on a page
* boundry
*
* @retval Pointer to the area of memory if the allocation was successful
* @retval NULL if the allocation was unsuccessful
*
*/
void * mca_mem_alloc_align(mca_mem_options_t * mem_options, size_t size,
int alignment);
/**
* NOT YET IMPLEMENTED.
* Attempts to resize the passed region of memory into a larger or a smaller
* region.
*
* @param mem_options A pointer to the appropriate struct for the area of
* memory.
* @param size The size of the requested area of memory
* @param ptr A pointer to the region of memory to be resized
*
* @retval Pointer to the area of memory if the reallocation was successful
* @retval NULL if the allocation was unsuccessful
*
*/
void * mca_mem_realloc(mca_mem_options_t * mem_options, size_t size,
void * ptr);
/**
* Frees the passed region of memory
*
* @param mem_options A pointer to the appropriate struct for the area of
* memory.
* @param ptr A pointer to the region of memory to be freed
*
* @retval None
*
*/
void mca_mem_free(mca_mem_options_t * mem_options, void * ptr);
/**
* Frees all the memory from all the buckets back to the system. Note that
* this function only frees memory that was previously freed with
* mca_mem_free().
*
* @param mem_options A pointer to the appropriate struct for the area of
* memory.
*
* @retval None
*
*/
void mca_mem_cleanup(mca_mem_options_t * mem_options);
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
#endif /* MEM_BASE_ALLOCATOR */

Просмотреть файл

@ -1,45 +0,0 @@
/*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "include/constants.h"
#include "mca/mca.h"
#include "mca/base/base.h"
#include "mca/mem/mem.h"
#include "mca/mem/base/base.h"
int mca_mem_base_close(void)
{
ompi_list_item_t *item;
mca_mem_base_selected_module_t *sm;
/* Finalize all the mem modules and free their list items */
for (item = ompi_list_remove_first(&mca_mem_base_modules_initialized);
NULL != item;
item = ompi_list_remove_first(&mca_mem_base_modules_initialized)) {
sm = (mca_mem_base_selected_module_t *) item;
/* Blatently ignore the return code (what would we do to recover,
anyway? This module is going away, so errors don't matter
anymore) */
sm->pbsm_actions->mem_finalize(sm->pbsm_actions);
free(sm);
}
/* Close all remaining available modules (may be one if this is a
OMPI RTE program, or [possibly] multiple if this is ompi_info) */
mca_base_modules_close(mca_mem_base_output,
&mca_mem_base_modules_available, NULL);
/* All done */
return OMPI_SUCCESS;
}

28
src/mca/mpool/Makefile.am Обычный файл
Просмотреть файл

@ -0,0 +1,28 @@
#
# $HEADER$
#
include $(top_srcdir)/config/Makefile.options
SUBDIRS = base $(MCA_mpool_STATIC_SUBDIRS)
DIST_SUBDIRS = base $(MCA_mpool_ALL_SUBDIRS)
# Source code files
headers = mpool.h
noinst_LTLIBRARIES = libmca_mpool.la
libmca_mpool_la_SOURCES =
libmca_mpool_la_LIBADD = \
base/libmca_mpool_base.la \
$(MCA_mpool_STATIC_LTLIBS)
libmca_mpool_la_DEPENDENCIES = $(libmca_mpool_la_LIBADD)
# Conditionally install the header files
if WANT_INSTALL_HEADERS
ompidir = $(includedir)/ompi/mca/mpool
ompi_HEADERS = $(headers)
else
ompidir = $(includedir)
endif

Просмотреть файл

@ -4,7 +4,7 @@
include $(top_srcdir)/config/Makefile.options include $(top_srcdir)/config/Makefile.options
noinst_LTLIBRARIES = libmca_mem_base.la noinst_LTLIBRARIES = libmca_mpool_base.la
# For VPATH builds, have to specify where static-modules.h will be found # For VPATH builds, have to specify where static-modules.h will be found
@ -13,20 +13,18 @@ AM_CPPFLAGS = -I$(top_builddir)/src
# Source code files # Source code files
headers = \ headers = \
base.h \ base.h
mem_base_allocator.h
libmca_mem_base_la_SOURCES = \ libmca_mpool_base_la_SOURCES = \
$(headers) \ $(headers) \
mem_base_open.c \ mpool_base_open.c \
mem_base_close.c \ mpool_base_close.c \
mem_base_allocator.c \ mpool_base_select.c
mem_base_select.c
# Conditionally install the header files # Conditionally install the header files
if WANT_INSTALL_HEADERS if WANT_INSTALL_HEADERS
ompidir = $(includedir)/mca/mem/base ompidir = $(includedir)/mca/mpool/base
ompi_HEADERS = $(headers) ompi_HEADERS = $(headers)
else else
ompidir = $(includedir) ompidir = $(includedir)

48
src/mca/mpool/base/base.h Обычный файл
Просмотреть файл

@ -0,0 +1,48 @@
/*
* $HEADER$
*/
/**
* @file
*/
#ifndef MCA_MEM_BASE_H
#define MCA_MEM_BASE_H
#include "ompi_config.h"
#include "class/ompi_list.h"
#include "mca/mca.h"
#include "mca/mpool/mpool.h"
struct mca_mpool_base_selected_module_t {
ompi_list_item_t super;
mca_mpool_base_module_t *pbsm_module;
mca_mpool_t *pbsm_actions;
};
typedef struct mca_mpool_base_selected_module_t mca_mpool_base_selected_module_t;
/*
* Global functions for MCA: overall PTL open and close
*/
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
int mca_mpool_base_open(void);
int mca_mpool_base_select(bool *allow_multi_user_threads);
int mca_mpool_base_close(void);
void* mca_mpool_base_is_registered(void* addr, size_t size);
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
/*
* Globals
*/
extern int mca_mpool_base_output;
extern ompi_list_t mca_mpool_base_modules_available;
extern ompi_list_t mca_mpool_base_modules_initialized;
#endif /* MCA_MEM_BASE_H */

45
src/mca/mpool/base/mpool_base_close.c Обычный файл
Просмотреть файл

@ -0,0 +1,45 @@
/*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "include/constants.h"
#include "mca/mca.h"
#include "mca/base/base.h"
#include "mca/mpool/mpool.h"
#include "mca/mpool/base/base.h"
int mca_mpool_base_close(void)
{
ompi_list_item_t *item;
mca_mpool_base_selected_module_t *sm;
/* Finalize all the mpool modules and free their list items */
for (item = ompi_list_remove_first(&mca_mpool_base_modules_initialized);
NULL != item;
item = ompi_list_remove_first(&mca_mpool_base_modules_initialized)) {
sm = (mca_mpool_base_selected_module_t *) item;
/* Blatently ignore the return code (what would we do to recover,
anyway? This module is going away, so errors don't matter
anymore) */
sm->pbsm_actions->mpool_finalize(sm->pbsm_actions);
free(sm);
}
/* Close all remaining available modules (may be one if this is a
OMPI RTE program, or [possibly] multiple if this is ompi_info) */
mca_base_modules_close(mca_mpool_base_output,
&mca_mpool_base_modules_available, NULL);
/* All done */
return OMPI_SUCCESS;
}

Просмотреть файл

@ -8,8 +8,8 @@
#include "mca/mca.h" #include "mca/mca.h"
#include "mca/base/base.h" #include "mca/base/base.h"
#include "mca/mem/mem.h" #include "mca/mpool/mpool.h"
#include "mca/mem/base/base.h" #include "mca/mpool/base/base.h"
/* /*
@ -18,36 +18,36 @@
* module's public mca_base_module_t struct. * module's public mca_base_module_t struct.
*/ */
#include "mca/mem/base/static-modules.h" #include "mca/mpool/base/static-modules.h"
/* /*
* Global variables * Global variables
*/ */
int mca_mem_base_output = -1; int mca_mpool_base_output = -1;
ompi_list_t mca_mem_base_modules_available; ompi_list_t mca_mpool_base_modules_available;
ompi_list_t mca_mem_base_modules_initialized; ompi_list_t mca_mpool_base_modules_initialized;
/** /**
* Function for finding and opening either all MCA modules, or the one * Function for finding and opening either all MCA modules, or the one
* that was specifically requested via a MCA parameter. * that was specifically requested via a MCA parameter.
*/ */
int mca_mem_base_open(void) int mca_mpool_base_open(void)
{ {
/* Open up all available modules */ /* Open up all available modules */
if (OMPI_SUCCESS != if (OMPI_SUCCESS !=
mca_base_modules_open("mem", 0, mca_mem_base_static_modules, mca_base_modules_open("mpool", 0, mca_mpool_base_static_modules,
&mca_mem_base_modules_available)) { &mca_mpool_base_modules_available)) {
return OMPI_ERROR; return OMPI_ERROR;
} }
/* Initialize the list so that in mca_mem_base_close(), we can /* Initialize the list so that in mca_mpool_base_close(), we can
iterate over it (even if it's empty, as in the case of iterate over it (even if it's empty, as in the case of
ompi_info) */ ompi_info) */
OBJ_CONSTRUCT(&mca_mem_base_modules_initialized, ompi_list_t); OBJ_CONSTRUCT(&mca_mpool_base_modules_initialized, ompi_list_t);
/* All done */ /* All done */

Просмотреть файл

@ -7,59 +7,59 @@
#include "runtime/runtime.h" #include "runtime/runtime.h"
#include "mca/mca.h" #include "mca/mca.h"
#include "mca/base/base.h" #include "mca/base/base.h"
#include "mca/ptl/ptl.h" #include "mca/mpool/mpool.h"
#include "mca/ptl/base/base.h" #include "mca/mpool/base/base.h"
/** /**
* Function for weeding out ptl modules that don't want to run. * Function for weeding out mpool modules that don't want to run.
* *
* Call the init function on all available modules to find out if they * Call the init function on all available modules to find out if they
* want to run. Select all modules that don't fail. Failing modules * want to run. Select all modules that don't fail. Failing modules
* will be closed and unloaded. The selected modules will be returned * will be closed and unloaded. The selected modules will be returned
* to the caller in a ompi_list_t. * to the caller in a ompi_list_t.
*/ */
int mca_ptl_base_select(bool *allow_multi_user_threads, int mca_mpool_base_select(bool *allow_multi_user_threads)
bool *have_hidden_threads)
{ {
int i, num_ptls; #if 0
int i, num_mpools;
bool user_threads, hidden_threads; bool user_threads, hidden_threads;
ompi_list_item_t *item; ompi_list_item_t *item;
mca_base_module_list_item_t *mli; mca_base_module_list_item_t *mli;
mca_ptl_base_module_t *module; mca_mpool_base_module_t *module;
mca_ptl_t **actions; mca_mpool_t **actions;
mca_ptl_base_selected_module_t *sm; mca_mpool_base_selected_module_t *sm;
/* Traverse the list of available modules; call their init /* Traverse the list of available modules; call their init
functions. */ functions. */
for (item = ompi_list_get_first(&mca_ptl_base_modules_available); for (item = ompi_list_get_first(&mca_mpool_base_modules_available);
ompi_list_get_end(&mca_ptl_base_modules_available) != item; ompi_list_get_end(&mca_mpool_base_modules_available) != item;
item = ompi_list_get_next(item)) { item = ompi_list_get_next(item)) {
mli = (mca_base_module_list_item_t *) item; mli = (mca_base_module_list_item_t *) item;
module = (mca_ptl_base_module_t *) mli->mli_module; module = (mca_mpool_base_module_t *) mli->mli_module;
ompi_output_verbose(10, mca_ptl_base_output, ompi_output_verbose(10, mca_mpool_base_output,
"select: initializing %s module %s", "select: initializing %s module %s",
module->ptlm_version.mca_type_name, module->mpoolm_version.mca_type_name,
module->ptlm_version.mca_module_name); module->mpoolm_version.mca_module_name);
if (NULL == module->ptlm_init) { if (NULL == module->mpoolm_init) {
ompi_output_verbose(10, mca_ptl_base_output, ompi_output_verbose(10, mca_mpool_base_output,
"select: no init function; ignoring module"); "select: no init function; ignoring module");
} else { } else {
actions = module->ptlm_init(&num_ptls, &user_threads, actions = module->mpoolm_init(&num_mpools, &user_threads,
&hidden_threads); &hidden_threads);
/* If the module didn't initialize, unload it */ /* If the module didn't initialize, unload it */
if (NULL == actions) { if (NULL == actions) {
ompi_output_verbose(10, mca_ptl_base_output, ompi_output_verbose(10, mca_mpool_base_output,
"select: init returned failure"); "select: init returned failure");
mca_base_module_repository_release((mca_base_module_t *) module); mca_base_module_repository_release((mca_base_module_t *) module);
ompi_output_verbose(10, mca_ptl_base_output, ompi_output_verbose(10, mca_mpool_base_output,
"select: module %s unloaded", "select: module %s unloaded",
module->ptlm_version.mca_module_name); module->mpoolm_version.mca_module_name);
} }
/* Otherwise, it initialized properly. Save it. */ /* Otherwise, it initialized properly. Save it. */
@ -68,18 +68,18 @@ int mca_ptl_base_select(bool *allow_multi_user_threads,
*allow_multi_user_threads |= user_threads; *allow_multi_user_threads |= user_threads;
*have_hidden_threads |= hidden_threads; *have_hidden_threads |= hidden_threads;
ompi_output_verbose(10, mca_ptl_base_output, ompi_output_verbose(10, mca_mpool_base_output,
"select: init returned success"); "select: init returned success");
for (i = 0; i < num_ptls; ++i) { for (i = 0; i < num_mpools; ++i) {
sm = malloc(sizeof(mca_ptl_base_selected_module_t)); sm = malloc(sizeof(mca_mpool_base_selected_module_t));
if (NULL == sm) { if (NULL == sm) {
return OMPI_ERR_OUT_OF_RESOURCE; return OMPI_ERR_OUT_OF_RESOURCE;
} }
OBJ_CONSTRUCT(sm, ompi_list_item_t); OBJ_CONSTRUCT(sm, ompi_list_item_t);
sm->pbsm_module = module; sm->pbsm_module = module;
sm->pbsm_actions = actions[i]; sm->pbsm_actions = actions[i];
ompi_list_append(&mca_ptl_base_modules_initialized, ompi_list_append(&mca_mpool_base_modules_initialized,
(ompi_list_item_t*) sm); (ompi_list_item_t*) sm);
} }
free(actions); free(actions);
@ -89,12 +89,12 @@ int mca_ptl_base_select(bool *allow_multi_user_threads,
/* Finished querying all modules. Check for the bozo case. */ /* Finished querying all modules. Check for the bozo case. */
if (0 == ompi_list_get_size(&mca_ptl_base_modules_initialized)) { if (0 == ompi_list_get_size(&mca_mpool_base_modules_initialized)) {
/* JMS Replace with show_help */ /* JMS Replace with show_help */
ompi_abort(1, "No ptl module available. This shouldn't happen."); ompi_abort(1, "No mpool module available. This shouldn't happen.");
} }
/* All done */ /* All done */
#endif
return OMPI_SUCCESS; return OMPI_SUCCESS;
} }

Просмотреть файл

70
src/mca/mpool/mpool.h Обычный файл
Просмотреть файл

@ -0,0 +1,70 @@
/**
* $HEADER$
*/
/**
* @file
*/
#ifndef MCA_MPOOL_H
#define MCA_MPOOL_H
#include "mca/mca.h"
struct mca_mpool_t;
/**
* allocate function typedef
*/
typedef void* (*mca_mpool_alloc_fn_t)(struct mca_mpool_t*, size_t size, size_t align);
/**
* realloc function typedef
*/
typedef void* (*mca_mpool_realloc_fn_t)(struct mca_mpool_t*, void* addr, size_t size);
/**
* free function typedef
*/
typedef void (*mca_mpool_free_fn_t)(struct mca_mpool_t*, void *);
/**
* register memory
*/
typedef void (*mca_mpool_register_fn_t)(struct mca_mpool_t*, void * addr, size_t size, void* user);
/**
* deregister memory
*/
typedef void (*mca_mpool_deregister_fn_t)(struct mca_mpool_t*, void * addr);
typedef int (*mca_mpool_base_finalize_fn_t)(
struct mca_mpool_t* mpool
);
struct mca_mpool_t {
/* interface functions */
mca_mpool_alloc_fn_t mpool_alloc;
mca_mpool_alloc_fn_t mpool_realloc;
mca_mpool_free_fn_t mpool_free;
mca_mpool_register_fn_t mpool_register;
mca_mpool_deregister_fn_t mpool_deregister;
mca_mpool_base_finalize_fn_t mpool_finalize;
};
typedef struct mca_mpool_t mca_mpool_t;
/**
* module initialization function
*/
typedef struct mca_mpool_t* (*mca_mpool_base_module_init_fn_t)(
bool *allow_multi_user_threads
);
struct mca_mpool_base_module_1_0_0_t {
mca_base_module_t mpool_version;
mca_base_module_data_1_0_0_t mpool_data;
mca_mpool_base_module_init_fn_t mpool_init;
};
typedef struct mca_mpool_base_module_1_0_0_t mca_mpool_base_module_t;
#endif /* MCA_MPOOL_H */

Просмотреть файл

@ -8,7 +8,6 @@
#include "mca/pcm/pcm.h" #include "mca/pcm/pcm.h"
#include "mca/pcm/rsh/src/pcm_rsh.h" #include "mca/pcm/rsh/src/pcm_rsh.h"
#include "mem/malloc.h"
#include "types.h" #include "types.h"
#include <stdio.h> #include <stdio.h>

Просмотреть файл

@ -8,7 +8,7 @@
#include "constants.h" #include "constants.h"
#include "types.h" #include "types.h"
#include "mem/malloc.h" #include "util/malloc.h"
#include "class/ompi_list.h" #include "class/ompi_list.h"
#include "mca/mca.h" #include "mca/mca.h"
#include "mca/base/mca_base_param.h" #include "mca/base/mca_base_param.h"

Просмотреть файл

@ -7,7 +7,7 @@
#ifndef MCA_PML_BASE_REQUEST_H #ifndef MCA_PML_BASE_REQUEST_H
#define MCA_PML_BASE_REQUEST_H #define MCA_PML_BASE_REQUEST_H
#include "mem/free_list.h" #include "class/ompi_free_list.h"
#include "request/request.h" #include "request/request.h"
#include "datatype/datatype.h" #include "datatype/datatype.h"
#include "communicator/communicator.h" #include "communicator/communicator.h"

Просмотреть файл

@ -10,7 +10,7 @@
#include "threads/thread.h" #include "threads/thread.h"
#include "threads/condition.h" #include "threads/condition.h"
#include "mem/free_list.h" #include "class/ompi_free_list.h"
#include "util/cmd_line.h" #include "util/cmd_line.h"
#include "request/request.h" #include "request/request.h"
#include "mca/pml/pml.h" #include "mca/pml/pml.h"

Просмотреть файл

@ -10,7 +10,7 @@
#include <sys/types.h> #include <sys/types.h>
#include <sys/socket.h> #include <sys/socket.h>
#include <netinet/in.h> #include <netinet/in.h>
#include "mem/free_list.h" #include "class/ompi_free_list.h"
#include "event/event.h" #include "event/event.h"
#include "mca/pml/pml.h" #include "mca/pml/pml.h"
#include "mca/ptl/ptl.h" #include "mca/ptl/ptl.h"

Просмотреть файл

@ -8,7 +8,7 @@
#ifndef PTL_SELF_H_HAS_BEEN_INCLUDED #ifndef PTL_SELF_H_HAS_BEEN_INCLUDED
#define PTL_SELF_H_HAS_BEEN_INCLUDED #define PTL_SELF_H_HAS_BEEN_INCLUDED
#include "mem/free_list.h" #include "class/ompi_free_list.h"
#include "event/event.h" #include "event/event.h"
#include "mca/pml/pml.h" #include "mca/pml/pml.h"
#include "mca/ptl/ptl.h" #include "mca/ptl/ptl.h"

Просмотреть файл

@ -11,7 +11,7 @@
#include <sys/types.h> #include <sys/types.h>
#include <sys/socket.h> #include <sys/socket.h>
#include <netinet/in.h> #include <netinet/in.h>
#include "mem/free_list.h" #include "class/ompi_free_list.h"
#include "mca/pml/pml.h" #include "mca/pml/pml.h"
#include "mca/ptl/ptl.h" #include "mca/ptl/ptl.h"
#include "ptl_sm_mmap.h" #include "ptl_sm_mmap.h"

Просмотреть файл

@ -10,7 +10,7 @@
#include <sys/types.h> #include <sys/types.h>
#include <sys/socket.h> #include <sys/socket.h>
#include <netinet/in.h> #include <netinet/in.h>
#include "mem/free_list.h" #include "class/ompi_free_list.h"
#include "event/event.h" #include "event/event.h"
#include "mca/pml/pml.h" #include "mca/pml/pml.h"
#include "mca/ptl/ptl.h" #include "mca/ptl/ptl.h"

Просмотреть файл

@ -165,11 +165,14 @@ int mca_ptl_tcp_peer_send(mca_ptl_base_peer_t* ptl_peer, mca_ptl_tcp_send_frag_t
if (NULL != ptl_peer->peer_send_frag) { if (NULL != ptl_peer->peer_send_frag) {
ompi_list_append(&ptl_peer->peer_frags, (ompi_list_item_t*)frag); ompi_list_append(&ptl_peer->peer_frags, (ompi_list_item_t*)frag);
} else { } else {
#if 0
if(mca_ptl_tcp_send_frag_handler(frag, ptl_peer->peer_sd)) { if(mca_ptl_tcp_send_frag_handler(frag, ptl_peer->peer_sd)) {
THREAD_UNLOCK(&ptl_peer->peer_send_lock); THREAD_UNLOCK(&ptl_peer->peer_send_lock);
mca_ptl_tcp_send_frag_progress(frag); mca_ptl_tcp_send_frag_progress(frag);
return rc; return rc;
} else { } else
#endif
{
ptl_peer->peer_send_frag = frag; ptl_peer->peer_send_frag = frag;
ompi_event_add(&ptl_peer->peer_send_event, 0); ompi_event_add(&ptl_peer->peer_send_event, 0);
} }

Просмотреть файл

@ -10,7 +10,7 @@
#define MCA_TOPO_UNTIY_H #define MCA_TOPO_UNTIY_H
#include "threads/condition.h" #include "threads/condition.h"
#include "mem/free_list.h" #include "class/ompi_free_list.h"
#include "util/cmd_line.h" #include "util/cmd_line.h"
#include "request/request.h" #include "request/request.h"
#include "mca/topo/topo.h" #include "mca/topo/topo.h"

Просмотреть файл

@ -1,40 +0,0 @@
# -*- makefile -*-
#
# $HEADER$
#
include $(top_srcdir)/config/Makefile.options
noinst_LTLIBRARIES = libmem.la
# Source code files
headers = \
allocator.h \
free_list.h \
free_lists.h \
malloc.h \
mem_globals.h \
mem_pool.h \
seg_list.h \
sharedmem_util.h
libmem_la_SOURCES = \
$(headers) \
allocator.c \
free_list.c \
free_lists.c \
malloc.c \
mem_globals.c \
mem_pool.c \
seg_list.c \
sharedmem_util.c
# Conditionally install the header files
if WANT_INSTALL_HEADERS
ompidir = $(includedir)/ompi/mem
ompi_HEADERS = $(headers)
else
ompidir = $(includedir)
endif

Просмотреть файл

@ -1,67 +0,0 @@
/*
* $HEADER$
*/
#include "mem/allocator.h"
#include "mem/sharedmem_util.h"
void *ompi_allocator_malloc(ompi_allocator_t *allocator, size_t chunk_size);
void ompi_allocator_default_free(ompi_allocator_t *allocator, void *base_ptr);
static void ompi_allocator_construct(ompi_allocator_t *allocator)
{
allocator->alc_alloc_fn = ompi_allocator_malloc;
allocator->alc_free_fn = ompi_allocator_free;
allocator->alc_is_shared = 0;
allocator->alc_mem_prot = 0;
allocator->alc_should_pin = 0;
allocator->alc_pinned_offset = 0;
allocator->alc_pinned_sz = 0;
}
static void ompi_allocator_destruct(ompi_allocator_t *allocator)
{
}
ompi_class_t ompi_allocator_t_class = {
"ompi_allocator_t",
OBJ_CLASS(ompi_object_t),
(ompi_construct_t) ompi_allocator_construct,
(ompi_destruct_t) ompi_allocator_destruct
};
void *ompi_alg_get_chunk(size_t chunk_size, int is_shared,
int mem_protect)
{
if ( !is_shared )
return malloc(chunk_size);
else
{
return ompi_zero_alloc(chunk_size, mem_protect, MMAP_SHARED_FLAGS);
}
}
void *ompi_allocator_alloc(ompi_allocator_t *allocator, size_t chunk_size)
{
return allocator->alc_alloc_fn(allocator, chunk_size);
}
void ompi_allocator_free(ompi_allocator_t *allocator, void *chunk_ptr)
{
if ( chunk_ptr )
allocator->alc_free_fn(allocator, chunk_ptr);
}
void *ompi_allocator_malloc(ompi_allocator_t *allocator, size_t chunk_size)
{
return malloc(chunk_size);
}
void ompi_allocator_default_free(ompi_allocator_t *allocator, void *chunk_ptr)
{
if ( chunk_ptr )
free(chunk_ptr);
}

Просмотреть файл

@ -1,98 +0,0 @@
/*
* $HEADER$
*/
#ifndef OMPI_ALLOCATOR_H
#define OMPI_ALLOCATOR_H
#include "class/ompi_object.h"
/*
* This class is used to provide a generic and flexible way for the
* mem pool to allocate memory. It's meant to be derived for
* device-dependent logic, e.g. GM.
*
* You should be able to share allocators, but then you will need to
* protect with a lock.
*/
/*
* Base allocator is a wrapper for malloc
*/
typedef struct ompi_allocator {
ompi_object_t super;
int alc_is_shared; /* indicates whether to get shared memory */
int alc_mem_prot; /* memory protection for shared mem */
int alc_should_pin; /* should pin memory when allocating */
uint64_t alc_pinned_offset; /* pinned memory offset */
uint64_t alc_pinned_sz; /* pinned mem size (may be different from alloc size. */
void *(*alc_alloc_fn) (struct ompi_allocator *, size_t);
void (*alc_free_fn) (struct ompi_allocator *, void *);
} ompi_allocator_t;
extern ompi_class_t ompi_allocator_t_class;
void *ompi_alg_get_chunk(size_t chunk_size, int is_shared, int mem_protect);
void *ompi_allocator_alloc(ompi_allocator_t *allocator, size_t chunk_size);
void ompi_allocator_free(ompi_allocator_t *allocator, void *chunk_ptr);
static inline int ompi_allocator_get_is_shared(ompi_allocator_t *allocator)
{
return allocator->alc_is_shared;
}
static inline void ompi_allocator_set_is_shared(ompi_allocator_t *allocator,
int is_shared)
{
allocator->alc_is_shared = is_shared;
}
static inline int ompi_allocator_get_mem_prot(ompi_allocator_t *allocator)
{
return allocator->alc_mem_prot;
}
static inline void ompi_allocator_set_mem_prot(ompi_allocator_t *allocator,
int mem_prot)
{
allocator->alc_mem_prot = mem_prot;
}
static inline int ompi_allocator_get_should_pin(ompi_allocator_t *allocator)
{
return allocator->alc_should_pin;
}
static inline void ompi_allocator_set_should_pin(ompi_allocator_t *allocator,
int pin)
{
allocator->alc_should_pin = pin;
}
static inline uint64_t ompi_allocator_get_pin_offset(ompi_allocator_t
*allocator)
{
return allocator->alc_pinned_offset;
}
static inline void ompi_allocator_set_pin_offset(ompi_allocator_t *allocator,
uint64_t pin_offset)
{
allocator->alc_pinned_offset = pin_offset;
}
static inline uint64_t ompi_allocator_get_pin_size(ompi_allocator_t
*allocator)
{
return allocator->alc_pinned_sz;
}
static inline void ompi_allocator_set_pin_size(ompi_allocator_t *allocator,
uint64_t pin_sz)
{
allocator->alc_pinned_sz = pin_sz;
}
#endif /* OMPI_ALLOCATOR_H */

Просмотреть файл

@ -1,82 +0,0 @@
/*
* $HEADER$
*/
#include "ompi_config.h"
#include "mem/free_list.h"
static void ompi_free_list_construct(ompi_free_list_t* fl);
static void ompi_free_list_destruct(ompi_free_list_t* fl);
ompi_class_t ompi_free_list_t_class = {
"ompi_free_list_t",
OBJ_CLASS(ompi_list_t),
(ompi_construct_t)ompi_free_list_construct,
(ompi_destruct_t)ompi_free_list_destruct
};
static void ompi_free_list_construct(ompi_free_list_t* fl)
{
OBJ_CONSTRUCT(&fl->fl_lock, ompi_mutex_t);
fl->fl_max_to_alloc = 0;
fl->fl_num_allocated = 0;
fl->fl_num_per_alloc = 0;
fl->fl_elem_size = 0;
fl->fl_elem_class = 0;
fl->fl_allocator = 0;
}
static void ompi_free_list_destruct(ompi_free_list_t* fl)
{
OBJ_DESTRUCT(&fl->fl_lock);
}
int ompi_free_list_init(
ompi_free_list_t *flist,
size_t elem_size,
ompi_class_t* elem_class,
int num_elements_to_alloc,
int max_elements_to_alloc,
int num_elements_per_alloc,
ompi_allocator_t* allocator)
{
flist->fl_elem_size = elem_size;
flist->fl_elem_class = elem_class;
flist->fl_max_to_alloc = max_elements_to_alloc;
flist->fl_num_allocated = 0;
flist->fl_num_per_alloc = num_elements_per_alloc;
flist->fl_allocator = allocator;
return ompi_free_list_grow(flist, num_elements_to_alloc);
}
int ompi_free_list_grow(ompi_free_list_t* flist, size_t num_elements)
{
unsigned char* ptr;
size_t i;
if (flist->fl_max_to_alloc > 0 && flist->fl_num_allocated + num_elements > flist->fl_max_to_alloc)
return OMPI_ERR_TEMP_OUT_OF_RESOURCE;
if (NULL != flist->fl_allocator)
ptr = (unsigned char*)ompi_allocator_alloc(flist->fl_allocator, num_elements * flist->fl_elem_size);
else
ptr = malloc(num_elements * flist->fl_elem_size);
if(NULL == ptr)
return OMPI_ERR_TEMP_OUT_OF_RESOURCE;
for(i=0; i<num_elements; i++) {
ompi_list_item_t* item = (ompi_list_item_t*)ptr;
if (NULL != flist->fl_elem_class) {
OBJ_CONSTRUCT_INTERNAL(item, flist->fl_elem_class);
}
ompi_list_append(&flist->super, item);
ptr += flist->fl_elem_size;
}
flist->fl_num_allocated += num_elements;
return OMPI_SUCCESS;
}

Просмотреть файл

@ -1,68 +0,0 @@
/*
* $HEADER$
*/
#ifndef OMPI_FREE_LIST_H
#define OMPI_FREE_LIST_H
#include "ompi_config.h"
#include "class/ompi_list.h"
#include "include/constants.h"
#include "mem/seg_list.h"
#include "mem/mem_pool.h"
extern ompi_class_t ompi_free_list_t_class;
struct ompi_free_list_t
{
ompi_list_t super;
int fl_max_to_alloc;
int fl_num_allocated;
int fl_num_per_alloc;
size_t fl_elem_size;
ompi_class_t* fl_elem_class;
ompi_allocator_t* fl_allocator;
ompi_mutex_t fl_lock;
};
typedef struct ompi_free_list_t ompi_free_list_t;
int ompi_free_list_init(
ompi_free_list_t *flist,
size_t element_size,
ompi_class_t* element_class,
int num_elements_to_alloc,
int max_elements_to_alloc,
int num_elements_per_alloc,
ompi_allocator_t*);
int ompi_free_list_grow(ompi_free_list_t* flist, size_t num_elements);
#define OMPI_FREE_LIST_GET(fl, item, rc) \
{ \
if(ompi_using_threads()) { \
ompi_mutex_lock(&((fl)->fl_lock)); \
item = ompi_list_remove_first(&((fl)->super)); \
if(NULL == item) { \
ompi_free_list_grow((fl), (fl)->fl_num_per_alloc); \
item = ompi_list_remove_first(&((fl)->super)); \
} \
ompi_mutex_unlock(&((fl)->fl_lock)); \
} else { \
item = ompi_list_remove_first(&((fl)->super)); \
if(NULL == item) { \
ompi_free_list_grow((fl), (fl)->fl_num_per_alloc); \
item = ompi_list_remove_first(&((fl)->super)); \
} \
} \
rc = (NULL == item) ? OMPI_ERR_TEMP_OUT_OF_RESOURCE : OMPI_SUCCESS; \
}
#define OMPI_FREE_LIST_RETURN(fl, item) \
THREAD_SCOPED_LOCK(&((fl)->fl_lock), ompi_list_append(&((fl)->super), (item)));
#endif

Просмотреть файл

@ -1,537 +0,0 @@
/*
* $HEADER$
*/
#include "ompi_config.h"
#include "mem/free_lists.h"
#include "runtime/runtime.h"
#include "util/output.h"
#include "os/numa.h"
#include "os/ompi_system.h"
#include "mem/mem_globals.h"
#ifndef ROB_HASNT_FINISHED_THIS_YET
#define ROB_HASNT_FINISHED_THIS_YET 0
#endif
/* private list functions */
#if ROB_HASNT_FINISHED_THIS_YET
static ompi_list_item_t *ompi_free_lists_request_elt(ompi_free_lists_t *flist,
int pool_idx);
#endif
static void ompi_free_lists_append(ompi_free_lists_t *flist, void *chunk, int pool_idx);
static int ompi_free_lists_create_more_elts(ompi_free_lists_t *flist, int pool_idx);
static void *ompi_free_lists_get_mem_chunk(ompi_free_lists_t *flist, int index, size_t *len, int *err);
static int ompi_free_lists_mem_pool_construct(ompi_free_lists_t *flist, int nlists, long pages_per_list, ssize_t chunk_size,
size_t page_size, long min_pages_per_list,
long default_min_pages_per_list, long default_pages_per_list,
long max_pages_per_list, ssize_t max_mem_in_pool);
ompi_class_t ompi_free_lists_t_class = {
"ompi_free_lists_t",
OBJ_CLASS(ompi_object_t),
(ompi_construct_t) ompi_free_lists_construct,
(ompi_destruct_t) ompi_free_lists_destruct
};
void ompi_free_lists_construct(ompi_free_lists_t *flist)
{
OBJ_CONSTRUCT(&flist->fl_lock, ompi_mutex_t);
flist->fl_pool = NULL;
flist->fl_elt_cls = NULL;
flist->fl_description = NULL;
flist->fl_free_lists = NULL;
flist->fl_is_shared = 0;
flist->fl_nlists = 0;
flist->fl_elt_per_chunk = 0;
flist->fl_elt_size = 0;
flist->fl_retry_more_resources = 0;
flist->fl_enforce_affinity = 0;
flist->fl_affinity = NULL;
flist->fl_threshold_grow = 0;
#if OMPI_ENABLE_MEM_PROFILE
flist->fl_elt_out = NULL;
flist->fl_elt_max = NULL;
flist->fl_elt_sum = NULL;
flist->fl_nevents = NULL;
flist->fl_chunks_req = NULL;
flist->fl_chunks_returned = NULL;
#endif /* OMPI_ENABLE_MEM_PROFILE */
}
void ompi_free_lists_destruct(ompi_free_lists_t *flist)
{
int i;
OBJ_RELEASE(flist->fl_pool);
for ( i = 0; i < flist->fl_nlists; i++ )
OBJ_RELEASE(flist->fl_free_lists[i]);
if ( flist->fl_affinity )
free(flist->fl_affinity);
#if OMPI_ENABLE_MEM_PROFILE
if ( flist->fl_elt_out )
free(flist->fl_elt_out);
if ( flist->fl_elt_max )
free(flist->fl_elt_max);
if ( flist->fl_elt_sum )
free(flist->fl_elt_sum);
if ( flist->fl_nevents )
free(flist->fl_nevents);
if ( flist->fl_chunks_req )
free(flist->fl_chunks_req);
if ( flist->fl_chunks_returned )
free(flist->fl_chunks_returned);
#endif /* OMPI_ENABLE_MEM_PROFILE */
}
int ompi_free_lists_construct_with(
ompi_free_lists_t *flist,
int nlists,
int pages_per_list,
size_t chunk_size,
size_t page_size,
size_t elt_size,
int min_pages_per_list,
int max_pages_per_list,
int max_consec_req_fail,
const char *description,
bool retry_for_more_resources,
ompi_affinity_t *affinity,
bool enforce_affinity,
ompi_mem_pool_t *mem_pool)
{
/* ompi_free_lists_construct must have been called prior to calling this function */
size_t max_mem_in_pool;
size_t initial_mem_per_list;
long max_mem_per_list;
int list, pool;
int err = OMPI_SUCCESS;
flist->fl_description = description;
flist->fl_nlists = nlists;
/* set up the memory pool */
if ( mem_pool )
{
flist->fl_pool = mem_pool;
OBJ_RETAIN(flist->fl_pool);
}
else
{
/* instantiate memory pool */
max_mem_in_pool = max_pages_per_list * page_size;
err = ompi_free_lists_mem_pool_construct(
flist,
nlists,
pages_per_list,
chunk_size,
page_size,
min_pages_per_list,
min_pages_per_list,
pages_per_list,
max_pages_per_list,
max_mem_in_pool);
if (err != OMPI_SUCCESS)
{
return err;
}
}
/* reset pool chunk size */
chunk_size = ompi_mp_get_chunk_size(flist->fl_pool);
/* Number of elements per chunk */
flist->fl_elt_per_chunk = chunk_size / elt_size;
initial_mem_per_list = min_pages_per_list * page_size;
/* adjust initial_mem_per_list to increments of chunk_size */
if ( initial_mem_per_list < chunk_size )
{
min_pages_per_list = (((chunk_size - 1) / page_size) + 1);
initial_mem_per_list = min_pages_per_list * page_size;
}
/* determine upper limit on number of pages in a given list */
if ( (max_pages_per_list != -1) && (max_pages_per_list < min_pages_per_list) )
max_pages_per_list = min_pages_per_list;
if (max_pages_per_list == -1)
max_mem_per_list = -1;
else
max_mem_per_list = max_pages_per_list * page_size;
/* initialize empty lists of available descriptors */
flist->fl_free_lists = (ompi_seg_list_t **)
malloc(sizeof(ompi_seg_list_t *) *
flist->fl_nlists);
if ( !flist->fl_free_lists )
{
ompi_abort(1, "Error: Out of memory");
}
/* run constructors */
for (list = 0; list < flist->fl_nlists; list++)
{
if ( flist->fl_is_shared )
{
/* process shared memory allocation */
flist->fl_free_lists[list] =
(ompi_seg_list_t *)
ompi_fmp_get_mem_segment(&ompi_per_proc_shmem_pools,
sizeof(ompi_seg_list_t), CACHE_ALIGNMENT, list);
}
else
{
/* process private memory allocation */
flist->fl_free_lists[list] =
(ompi_seg_list_t *)malloc(sizeof(ompi_seg_list_t));
}
if (!flist->fl_free_lists[list]) {
ompi_abort(1, "Error: Out of memory");
}
OBJ_CONSTRUCT(&flist->fl_free_lists[list], ompi_seg_list_t);
ompi_sgl_set_min_bytes_pushed(flist->fl_free_lists[list],
initial_mem_per_list);
ompi_sgl_set_max_bytes_pushed(flist->fl_free_lists[list],
max_mem_per_list);
ompi_sgl_set_max_consec_fail(flist->fl_free_lists[list],
max_consec_req_fail);
} /* end list loop */
flist->fl_retry_more_resources = retry_for_more_resources;
flist->fl_enforce_affinity = enforce_affinity;
if ( enforce_affinity )
{
flist->fl_affinity = (affinity_t *)malloc(sizeof(affinity_t) *
flist->fl_nlists);
if ( !flist->fl_affinity ) {
ompi_abort(1, "Error: Out of memory");
}
/* copy policies in */
for ( pool = 0; pool < flist->fl_nlists; pool++ )
{
flist->fl_affinity[pool] = affinity[pool];
}
}
/* initialize locks for memory pool and individual list and link locks */
for ( pool = 0; pool < flist->fl_nlists; pool++ ) {
/* gain exclusive use of list */
if ( 1 == ompi_sgl_lock_list(flist->fl_free_lists[pool]) ) {
while ( ompi_sgl_get_bytes_pushed(flist->fl_free_lists[pool])
< ompi_sgl_get_min_bytes_pushed(flist->fl_free_lists[pool]) )
{
if (ompi_free_lists_create_more_elts(flist, pool) != OMPI_SUCCESS)
{
ompi_abort(1, "Error: Setting up initial private "
"free list for %s.\n", flist->fl_description);
}
}
ompi_sgl_unlock_list(flist->fl_free_lists[pool]);
}
else
{
/* only 1 process should be initializing the list */
ompi_abort(1, "Error: Setting up initial private free "
"list %d for %s.\n", pool, flist->fl_description);
}
}
return err;
}
static int ompi_free_lists_mem_pool_construct(ompi_free_lists_t *flist,
int nlists, long pages_per_list, ssize_t chunk_size,
size_t page_size, long min_pages_per_list,
long default_min_pages_per_list, long default_pages_per_list,
long max_pages_per_list, ssize_t max_mem_in_pool)
{
int err = OMPI_SUCCESS;
long total_pgs_to_alloc;
ssize_t mem_in_pool;
size_t to_alloc;
/* set chunksize - multiple of page size */
chunk_size =
((((chunk_size - 1) / page_size) + 1) * page_size);
/* determine number how much memory to allocate */
if ( pages_per_list == -1 ) {
/* minimum size is defaultNPagesPerList*number of local procs */
total_pgs_to_alloc = default_pages_per_list * nlists;
} else {
total_pgs_to_alloc = pages_per_list * nlists;
}
mem_in_pool = total_pgs_to_alloc * page_size;
/* Initialize memory pool */
if ( flist->fl_is_shared ) {
/* shared memory allocation */
to_alloc = sizeof(ompi_mem_pool_t);
flist->fl_pool =
(ompi_mem_pool_t *)ompi_fmp_get_mem_segment(&ompi_shmem_pools,
to_alloc,
CACHE_ALIGNMENT, 0);
if ( flist->fl_pool ) {
OBJ_CONSTRUCT(&flist->fl_pool, shmem_pool_t);
}
} else {
/* process private memory allocation */
flist->fl_pool = OBJ_NEW(ompi_mem_pool_t);
}
err = ompi_mp_construct_with(
flist->fl_pool,
mem_in_pool,
max_mem_in_pool,
chunk_size,
page_size);
return err;
}
static void *ompi_free_lists_get_mem_chunk(ompi_free_lists_t *flist, int index, size_t *len, int *err)
{
void *chunk = 0;
uint64_t sz_to_add;
/* check to make sure that the amount to add to the list does not
exceed the amount allowed */
sz_to_add = ompi_mp_get_chunk_size(flist->fl_pool);
#if OMPI_ENABLE_MEM_PROFILE
flist->fl_chunks_req[index]++;
#endif
if (index >= flist->fl_nlists)
{
ompi_output(0, "Error: Array out of bounds");
return chunk;
}
if ( ompi_sgl_get_max_bytes_pushed(flist->fl_free_lists[index]) != -1 )
{
if (sz_to_add +
ompi_sgl_get_bytes_pushed(flist->fl_free_lists[index]) >
ompi_sgl_get_max_bytes_pushed(flist->fl_free_lists[index]) )
{
ompi_sgl_inc_consec_fail(flist->fl_free_lists[index]);
if ( ompi_sgl_get_consec_fail(flist->fl_free_lists[index]) >=
ompi_sgl_get_max_consec_fail(flist->fl_free_lists[index]) )
{
*err = OMPI_ERR_OUT_OF_RESOURCE;
ompi_output(0, "Error: List out of memory in pool for %s",
flist->fl_description);
return chunk;
} else
*err = OMPI_ERR_TEMP_OUT_OF_RESOURCE;
return chunk;
}
}
/* set len */
*len = sz_to_add;
/* get chunk of memory */
chunk = ompi_mp_request_chunk(flist->fl_pool, index);
if ( 0 == chunk )
{
/* increment failure count */
ompi_sgl_inc_consec_fail(flist->fl_free_lists[index]);
if ( ompi_sgl_get_consec_fail(flist->fl_free_lists[index]) >=
ompi_sgl_get_max_consec_fail(flist->fl_free_lists[index]) )
{
*err = OMPI_ERR_OUT_OF_RESOURCE;
ompi_output(0, "Error: List out of memory in pool for %s\n",
flist->fl_description);
return chunk;
} else
*err = OMPI_ERR_TEMP_OUT_OF_RESOURCE;
return chunk;
}
/* set consecutive failure count to 0 - if we fail, we don't get
this far in the code. */
ompi_sgl_set_consec_fail(flist->fl_free_lists[index], 0);
#if OMPI_ENABLE_MEM_PROFILE
flist->fl_chunks_returned[index]++;
#endif
return chunk;
}
#if ROB_HASNT_FINISHED_THIS_YET
static ompi_list_item_t *ompi_free_lists_request_elt(ompi_free_lists_t *flist, int pool_idx)
{
ompi_dbl_list_t *seg_list = &(flist->fl_free_lists[pool_idx]->sgl_list);
volatile ompi_list_item_t *elt = ompi_dbl_get_last(seg_list);
if ( elt )
ompi_sgl_set_consec_fail(seg_list, 0);
return elt;
}
#endif
static void ompi_free_lists_append(ompi_free_lists_t *flist, void *chunk, int pool_idx)
{
/* ASSERT: mp_chunk_sz >= fl_elt_per_chunk * fl_elt_size */
/* push items onto list */
ompi_sgl_append_elt_chunk(flist->fl_free_lists[pool_idx],
chunk, ompi_mp_get_chunk_size(flist->fl_pool),
flist->fl_elt_per_chunk, flist->fl_elt_size);
}
static int ompi_free_lists_create_more_elts(ompi_free_lists_t *flist, int pool_idx)
{
int err = OMPI_SUCCESS, desc;
size_t len_added;
char *current_loc;
void *ptr = ompi_free_lists_get_mem_chunk(flist, pool_idx, &len_added, &err);
if (0 == ptr ) {
ompi_output(0, "Error: Can't get new elements for %s\n",
flist->fl_description);
return err;
}
/* attach memory affinity */
if ( flist->fl_enforce_affinity )
{
if (!ompi_set_affinity(ptr, len_added,
flist->fl_affinity[pool_idx]))
{
err = OMPI_ERROR;
#ifdef _DEBUGQUEUES
ompi_err(("Error: Can't set memory policy (pool_idx=%d)\n",
pool_idx));
return err;
#endif /* _DEBUGQUEUES */
}
}
/* Construct new descriptors using placement new */
current_loc = (char *) ptr;
for (desc = 0; desc < flist->fl_elt_per_chunk; desc++)
{
OBJ_CONSTRUCT_INTERNAL(current_loc, flist->fl_elt_cls);
current_loc += flist->fl_elt_size;
}
/* push chunk of memory onto the list */
ompi_free_lists_append(flist, ptr, pool_idx);
return err;
}
ompi_list_item_t *ompi_free_lists_get_elt(ompi_free_lists_t *flist, int index, int *error)
{
#if ROB_HASNT_FINISHED_THIS_YET
int error;
volatile ompi_list_item_t *elem = NULL;
elem = ompi_free_lists_request_elt(flist, index);
if ( elem )
{
error = OMPI_SUCCESS;
}
else if ( ompi_sgl_get_consec_fail(&(flist->fl_free_lists[index]->sgl_list))
< flist->fl_threshold_grow )
{
error = OMPI_ERR_TEMP_OUT_OF_RESOURCE;
}
else
{
error = OMPI_SUCCESS;
while ( (OMPI_SUCCESS) && (0 == elem) &&
(flist->fl_retry_more_resources) )
{
error = ompi_free_lists_create_more_elts(flist, index);
/* get element if managed to add resources to the list */
if ( OMPI_SUCCESS == error )
{
elem = ompi_free_lists_request_elt(flist, index);
}
}
if ( (OMPI_ERR_OUT_OF_RESOURCE == error)
|| (OMPI_ERR_FATAL == error) )
{
return 0;
}
}
#if OMPI_ENABLE_MEM_PROFILE
flist->fl_elt_out[index]++;
flist->fl_elt_sum[index] += flist->fl_elt_out[index];
flist->fl_nevents[index]++;
if (flist->fl_elt_max[index] < flist->fl_elt_out[index])
{
flist->fl_elt_max[index] = flist->fl_elt_out[index];
}
#endif
return elem;
#else
return NULL;
#endif
}
int ompi_free_lists_return_elt(ompi_free_lists_t *flist, int index, ompi_list_item_t *item)
{
#if ROB_HASNT_FINISHED_THIS_YET
mb();
ompi_dbl_append(&(flist->fl_free_lists[index]->sgl_list), item);
mb();
#if OMPI_ENABLE_MEM_PROFILE
flist->fl_elt_out[index]--;
#endif
return OMPI_SUCCESS;
#else
return OMPI_ERROR;
#endif
}

Просмотреть файл

@ -1,87 +0,0 @@
/*
* $HEADER$
*/
#ifndef OMPI_FREE_LISTS_H
#define OMPI_FREE_LISTS_H
#include "ompi_config.h"
#include "class/ompi_list.h"
#include "threads/mutex.h"
#include "mem/seg_list.h"
#include "mem/mem_pool.h"
/*
* Memory affinity is almost certainly an int everywhere, but let's
* make it a typedef in case we need to make it OS dependenent
* sometime...
*/
typedef int ompi_affinity_t;
struct ompi_free_lists_t
{
ompi_object_t super;
int fl_is_shared;
ompi_mem_pool_t *fl_pool;
const char *fl_description;
int fl_nlists;
int fl_elt_per_chunk;
size_t fl_elt_size;
ompi_seg_list_t **fl_free_lists;
int fl_retry_more_resources;
int fl_enforce_affinity;
ompi_affinity_t *fl_affinity; /* array of ompi_affinity_t */
int fl_threshold_grow;
ompi_class_t *fl_elt_cls; /* this will be used to create new free list elements. */
ompi_mutex_t fl_lock;
#if OMPI_ENABLE_MEM_PROFILE
/* for mem profiling */
int *fl_elt_out;
int *fl_elt_max;
int *fl_elt_sum;
int *fl_nevents;
int *fl_chunks_req;
int *fl_chunks_returned;
#endif /* OMPI_ENABLE_MEM_PROFILE */
};
typedef struct ompi_free_lists_t ompi_free_lists_t;
extern ompi_class_t ompi_free_lists_t_class;
void ompi_free_lists_construct(ompi_free_lists_t *flist);
void ompi_free_lists_destruct(ompi_free_lists_t *flist);
/* ompi_frl_construct must have been called prior to calling this function */
int ompi_free_lists_construct_with(ompi_free_lists_t *flist,
int nlists,
int pages_per_list,
size_t chunk_size,
size_t page_size,
size_t element_size,
int min_pages_per_list,
int max_pages_per_list,
int max_consec_req_fail,
const char *description,
bool retry_for_more_resources,
ompi_affinity_t *affinity,
bool enforce_affinity,
ompi_mem_pool_t *pool);
ompi_list_item_t *ompi_free_lists_get_elt(ompi_free_lists_t *flist, int index, int *error);
int ompi_free_lists_return_elt(ompi_free_lists_t *flist, int index, ompi_list_item_t *item);
/*
* Accessor functions
*/
int ompi_free_lists_get_thresh_grow(ompi_free_lists_t *flist);
void ompi_free_lists_set_thresh_grow(ompi_free_lists_t *flist, int to_grow);
#endif

Просмотреть файл

@ -1,29 +0,0 @@
/*
* $HEADER$
*/
#include <unistd.h>
#include "include/constants.h"
#include "class/ompi_object.h"
#include "mem/mem_globals.h"
ompi_fixed_mpool_t ompi_shmem_pools;
ompi_fixed_mpool_t ompi_per_proc_shmem_pools;
int ompi_setup_per_proc_shmem_pools(int npools)
{
int ret = OMPI_SUCCESS;
ssize_t initial_alloc = 0;
ssize_t min_alloc_size = 4 * getpagesize();
int n_array_elts_add = 10;
OBJ_CONSTRUCT(&ompi_per_proc_shmem_pools, ompi_fixed_mpool_t);
ompi_fmp_construct_with(&ompi_per_proc_shmem_pools,
initial_alloc, min_alloc_size,
npools, n_array_elts_add, 1);
return ret;
}

Просмотреть файл

@ -1,21 +0,0 @@
/*
* $HEADER$
*/
#ifndef MEM_GLOBALS_H
#define MEM_GLOBALS_H
#include "mem/mem_pool.h"
/* shared memory pool for use before fork.
should be initialized during prefork init.
*/
extern ompi_fixed_mpool_t ompi_shmem_pools;
extern ompi_fixed_mpool_t ompi_per_proc_shmem_pools;
int ompi_setup_per_proc_shmem_pools(int npools);
#endif

Просмотреть файл

@ -1,540 +0,0 @@
/*
* $HEADER$
*/
#include "ompi_config.h"
#include <string.h>
#include <sys/errno.h>
#include <unistd.h>
#include "include/constants.h"
#include "runtime/runtime.h"
#include "mem/mem_pool.h"
#include "mem/sharedmem_util.h"
#include "util/output.h"
#include "os/numa.h"
ompi_class_t ompi_mem_pool_t_class = {
"ompi_mem_pool_t",
OBJ_CLASS(ompi_object_t),
(ompi_construct_t) ompi_mp_construct,
(ompi_destruct_t) ompi_mp_destruct
};
/* process-shared mem pool class */
ompi_class_t shmem_pool_t_class = {
"shmem_pool_t",
OBJ_CLASS(ompi_object_t),
(ompi_construct_t) ompi_mp_shared_construct,
(ompi_destruct_t) ompi_mp_destruct
};
void ompi_mp_construct(ompi_mem_pool_t *pool)
{
pool->mp_private_alloc = OBJ_NEW(ompi_allocator_t);
OBJ_CONSTRUCT(&pool->mp_lock, ompi_mutex_t);
pool->mp_dev_alloc = NULL;
}
void ompi_mp_shared_construct(ompi_mem_pool_t *pool)
{
pool->mp_private_alloc = OBJ_NEW(ompi_allocator_t);
OBJ_CONSTRUCT(&pool->mp_lock, ompi_mutex_t);
ompi_allocator_set_is_shared(pool->mp_private_alloc, 1);
ompi_allocator_set_mem_prot(pool->mp_private_alloc, MMAP_SHARED_PROT);
pool->mp_dev_alloc = NULL;
}
void ompi_mp_destruct(ompi_mem_pool_t *pool)
{
if ( pool->mp_dev_alloc )
OBJ_RELEASE(pool->mp_dev_alloc);
OBJ_RELEASE(pool->mp_private_alloc);
OBJ_DESTRUCT(&pool->mp_lock);
}
int ompi_mp_construct_with(ompi_mem_pool_t *pool, uint64_t pool_size,
uint64_t max_len,
uint64_t chunk_size, size_t page_size)
{
char *ptr = 0;
ssize_t wrk_size = pool_size;
void *base = 0;
ssize_t to_alloc;
int retval, chunk;
pool->mp_page_sz = page_size;
if (((pool->mp_page_sz / getpagesize()) * getpagesize()) != pool->mp_page_sz)
{
return OMPI_ERR_BAD_PARAM;
}
pool->mp_chunk_sz = chunk_size;
if ( !chunk_size )
{
return OMPI_ERR_BAD_PARAM;
}
/* set upper limit on pool */
if (max_len < 0)
{
/* no upper limit on size */
pool->mp_max_chunks = -1;
}
else
{
pool->mp_max_chunks = ((max_len - 1) / page_size) + 1;
if (pool->mp_max_chunks == 0)
{
return OMPI_ERR_BAD_PARAM;
}
}
/* round up pool size to multiple of page size */
pool_size = ((((pool_size - 1) / chunk_size) + 1) * chunk_size);
if (0 == pool_size) {
ompi_output(0, "Error: pool_size == 0");
return OMPI_ERR_BAD_PARAM;
}
if (pool_size < chunk_size) {
ompi_output(0, "Error: pool_size < chunk_size");
return OMPI_ERR_BAD_PARAM;
}
/* add red-zone pages */
/* set up dev allocator to use pinned memory */
ompi_allocator_set_should_pin(pool->mp_dev_alloc, 1);
ompi_allocator_set_pin_offset(pool->mp_dev_alloc, page_size);
while (!ptr && wrk_size) {
to_alloc = wrk_size + 2 * page_size;
/* allocate memory. Reset pinned memory size. */
ompi_allocator_set_pin_size(pool->mp_dev_alloc, wrk_size);
ptr = ompi_allocator_alloc(pool->mp_dev_alloc, to_alloc);
if (ptr == 0)
wrk_size /= 2;
else
{
base = ptr + page_size;
}
}
/* reset pool size */
pool_size = wrk_size;
pool->mp_num_chunks = ((pool_size - 1) / chunk_size) + 1;
if ((pool->mp_num_chunks > pool->mp_max_chunks) && (pool->mp_max_chunks > 0))
{
ompi_output(0, "Error: NPoolChunks (%ld) > maxNPoolChunks (%ld)",
pool->mp_num_chunks, pool->mp_max_chunks);
return OMPI_ERR_BAD_PARAM;
}
/* change memory protection for red zones */
retval = mprotect(ptr, page_size, PROT_NONE);
if (retval != 0)
{
ompi_abort(1, "Error in red zone 1 mprotect");
}
/* end red zone */
retval =
mprotect(ptr + page_size + wrk_size, page_size, PROT_NONE);
if (retval != 0)
{
ompi_abort(1, "Error in red zone 2 mprotect");
}
/* initialize chunk descriptors */
to_alloc = sizeof(ompi_chunk_desc_t) * pool->mp_num_chunks;
pool->mp_chunks = ompi_allocator_alloc(pool->mp_private_alloc, to_alloc);
if ( !pool->mp_chunks )
{
ompi_output(0, "Error: Out of memory");
return OMPI_ERROR;
}
ptr = (char *) base;
for ( chunk = 0; chunk < pool->mp_num_chunks; chunk++ )
{
pool->mp_chunks[chunk].chd_flags = ALLOCELEMENT_FLAG_AVAILABLE;
pool->mp_chunks[chunk].chd_index = -1;
pool->mp_chunks[chunk].chd_base_ptr = ptr;
ptr += chunk_size;
}
/* set next available chunk */
pool->mp_next_avail_chunk = 0;
return 1;
}
void *ompi_mp_request_chunk(ompi_mem_pool_t *pool, int pool_index)
{
void *chunk = 0;
int chunk_found;
int next_chunk_to_use;
ompi_chunk_desc_t *chunk_desc;
size_t to_alloc;
int desc;
/* grab lock on pool */
ompi_mutex_lock(&(pool->mp_lock));
/* Have we used all the allocated memory? */
if ( pool->mp_next_avail_chunk == pool->mp_num_chunks )
{
/* can we increase the pool size ? We currently won't grow a shared
memory region. */
if ( ompi_mp_uses_shared_mem(pool) ||
((pool->mp_max_chunks > 0) && (pool->mp_num_chunks == pool->mp_max_chunks)) )
{
ompi_mutex_unlock(&(pool->mp_lock));
return chunk;
}
/* allocate larger array of chunk descriptors and
copy old array into new array */
to_alloc = sizeof(ompi_chunk_desc_t) * (pool->mp_num_chunks + 1);
chunk_desc = ompi_allocator_alloc(pool->mp_private_alloc, to_alloc);
if ( !chunk_desc )
{
ompi_output(0, "Error! Out of memory!");
ompi_mutex_unlock(&(pool->mp_lock));
return 0;
}
for ( desc = 0; desc < pool->mp_num_chunks; desc++ ) {
chunk_desc[desc] = pool->mp_chunks[desc];
}
/* free old array and set old array pointer to point to new array */
ompi_allocator_free(pool->mp_private_alloc, pool->mp_chunks);
pool->mp_chunks = chunk_desc;
/* allocate new memory chunk using device allocator. */
ompi_allocator_set_should_pin(pool->mp_dev_alloc, 1);
ompi_allocator_set_pin_offset(pool->mp_dev_alloc, 0);
ompi_allocator_set_pin_size(pool->mp_dev_alloc, 0);
pool->mp_chunks[pool->mp_num_chunks].chd_base_ptr =
ompi_allocator_alloc(pool->mp_dev_alloc, pool->mp_chunk_sz);
if ( !pool->mp_chunks[pool->mp_num_chunks].chd_base_ptr )
{
ompi_output(0, "Error: Out of memory");
ompi_mutex_unlock(&(pool->mp_lock));
return chunk;
}
/* reset pool chunk counter */
pool->mp_num_chunks++;
}
/* grab chunk */
chunk = pool->mp_chunks[pool->mp_next_avail_chunk].chd_base_ptr;
pool->mp_chunks[pool->mp_next_avail_chunk].chd_flags = ALLOCELEMENT_FLAG_INUSE;
pool->mp_chunks[pool->mp_next_avail_chunk].chd_index = pool_index;
/* find next available chunk */
chunk_found = 0;
next_chunk_to_use = pool->mp_next_avail_chunk + 1;
while ( next_chunk_to_use < pool->mp_num_chunks )
{
if ( pool->mp_chunks[next_chunk_to_use].chd_flags ==
ALLOCELEMENT_FLAG_AVAILABLE )
{
pool->mp_next_avail_chunk = next_chunk_to_use;
chunk_found = 1;
break;
}
next_chunk_to_use++;
}
/* if no chunks available set next chunk past end of list so that next
time around more memory will be allocated */
if ( !chunk_found ) {
pool->mp_next_avail_chunk = pool->mp_num_chunks;
}
ompi_mutex_unlock(&(pool->mp_lock));
return chunk;
}
/*
*
* Fixed shared mem pool interface
*
*/
ompi_class_t ompi_fixed_mpool_t_class = {
"ompi_fixed_mpool_t",
OBJ_CLASS(ompi_object_t),
(ompi_construct_t) ompi_fmp_construct,
(ompi_destruct_t) ompi_fmp_destruct
};
void ompi_fmp_construct(ompi_fixed_mpool_t *pool)
{
pool->fmp_private_alloc = OBJ_NEW(ompi_allocator_t);
ompi_allocator_set_is_shared(pool->fmp_private_alloc, 1);
ompi_allocator_set_mem_prot(pool->fmp_private_alloc, MMAP_SHARED_PROT);
pool->fmp_segments = NULL;
pool->fmp_n_segments = NULL;
pool->fmp_n_segs_in_array = NULL;
pool->fmp_min_alloc_size = 0;
pool->fmp_n_elts_to_add = 0;
pool->fmp_n_pools = 0;
pool->fmp_pool_ok_to_use = 0;
pool->fmp_apply_affinity = 0;
}
void ompi_fmp_destruct(ompi_fixed_mpool_t *pool)
{
int i;
if ( pool->fmp_segments )
{
for ( i = 0; i < pool->fmp_n_pools; i++ )
OBJ_RELEASE(pool->fmp_segments[i]);
free(pool->fmp_segments);
}
if ( pool->fmp_n_segments )
free(pool->fmp_n_segments);
if ( pool->fmp_n_segs_in_array )
free(pool->fmp_n_segs_in_array);
}
int ompi_fmp_construct_with(ompi_fixed_mpool_t *pool, ssize_t initial_allocation,
ssize_t min_allocation_size,
int n_pools, int n_array_elements_to_add, int apply_mem_affinity)
{
int pool_idx;
void *ptr;
pool->fmp_pool_ok_to_use = 1;
pool->fmp_apply_affinity = apply_mem_affinity;
pool->fmp_min_alloc_size = min_allocation_size;
if (pool->fmp_min_alloc_size < (ssize_t)getpagesize())
pool->fmp_min_alloc_size = getpagesize();
pool->fmp_n_elts_to_add = n_array_elements_to_add;
pool->fmp_n_pools = n_pools;
pool->fmp_segments = (ompi_memseg_t **)
malloc(sizeof(ompi_memseg_t *)*n_pools);
if ( !pool->fmp_segments )
{
ompi_abort(1, "Unable to allocate memory for "
"pool->fmp_segments, requested %ld bytes, errno %d",
sizeof(int) * n_pools, errno);
}
memset(pool->fmp_segments, 0, sizeof(ompi_memseg_t *)*n_pools);
pool->fmp_n_segs_in_array = malloc(sizeof(int) * n_pools);
if ( !pool->fmp_n_segs_in_array ) {
ompi_abort(1, "Unable to allocate memory for "
"pool->fmp_n_segs_in_array, requested %ld bytes, errno %d",
sizeof(int) * n_pools, errno);
}
bzero(pool->fmp_n_segs_in_array, sizeof(int) * n_pools);
for ( pool_idx = 0; pool_idx < n_pools; pool_idx++ )
{
ptr = ompi_zero_alloc(initial_allocation, MMAP_SHARED_PROT,
MMAP_SHARED_FLAGS);
if ( !ptr ) {
ompi_abort(1, "Unable to allocate "
"memory pool , requested %ld, errno %d",
initial_allocation, errno);
}
if ( apply_mem_affinity )
{
if (!ompi_set_affinity(ptr, initial_allocation, pool_idx))
{
ompi_abort(1, "Error: setting memory affinity");
}
}
/* set ompi_memseg_t data */
pool->fmp_segments[pool_idx][0].ms_base_ptr = ptr;
pool->fmp_segments[pool_idx][0].ms_current_ptr = ptr;
pool->fmp_segments[pool_idx][0].ms_length = initial_allocation;
pool->fmp_segments[pool_idx][0].ms_mem_available = initial_allocation;
/* update the number of elements in use */
pool->fmp_n_segments[pool_idx] = 1;
} /* end pool loop */
return OMPI_SUCCESS;
}
void *ompi_fmp_get_mem_segment(ompi_fixed_mpool_t *pool,
size_t length, size_t alignment, int which_pool)
{
void *segment = NULL;
size_t mask;
int idx, seg_idx;
ssize_t len_to_alloc;
char *ptr;
ompi_memseg_t *tmp_seg;
void *tmp_ptr;
/* return if pool can't be used */
if ( !pool->fmp_pool_ok_to_use )
return NULL;
/* get the appropriate mask for the alignment */
mask = ~(alignment - 1);
/* loop over pool->fmp_segments elements to see if available memory
exists */
idx = -1;
len_to_alloc = length;
for ( seg_idx = 0; seg_idx < pool->fmp_n_segments[which_pool];
seg_idx++ )
{
ptr =
(char *) pool->fmp_segments[which_pool][seg_idx].ms_current_ptr;
/* check to see if pointer is aligned correctly */
if ( (((size_t) ptr) & mask) == ((size_t) ptr) )
{
segment = ptr;
len_to_alloc = length;
}
else
{
/* align the pointer */
ptr = (char *) ((size_t) ptr + alignment);
ptr = (char *) ((size_t) ptr & mask);
len_to_alloc = length +
(ptr - (char *) pool->fmp_segments[which_pool][seg_idx].ms_current_ptr);
/* continue if not enough memory in this segment */
if (len_to_alloc >
pool->fmp_segments[which_pool][seg_idx].ms_mem_available) {
continue;
}
segment = ptr;
}
if (pool->fmp_segments[which_pool][seg_idx].ms_mem_available >=
len_to_alloc)
{
idx = seg_idx;
break;
}
}
/* if no available memory exists - get more memory */
if ( idx < 0 )
{
/* if need be, increase the size of pool->fmp_segments[] */
if (pool->fmp_n_segments[which_pool] ==
pool->fmp_n_segs_in_array[which_pool])
{
/* create a temp version of pool->fmp_segments[] */
tmp_seg = malloc(sizeof(ompi_memseg_t) *
(pool->fmp_n_segments[which_pool] +
pool->fmp_n_elts_to_add));
if ( !tmp_seg ) {
ompi_abort(1, "Unable to allocate memory for tmp_seg, errno %d",
errno);
}
/* copy old version of pool->fmp_segments to tmp copy */
for (seg_idx = 0; seg_idx < pool->fmp_n_segments[which_pool]; seg_idx++)
{
tmp_seg[seg_idx].ms_base_ptr =
pool->fmp_segments[which_pool][seg_idx].ms_base_ptr;
tmp_seg[seg_idx].ms_current_ptr =
pool->fmp_segments[which_pool][seg_idx].ms_current_ptr;
tmp_seg[seg_idx].ms_length =
pool->fmp_segments[which_pool][seg_idx].ms_length;
tmp_seg[seg_idx].ms_mem_available =
pool->fmp_segments[which_pool][seg_idx].ms_mem_available;
}
free(pool->fmp_segments[which_pool]);
pool->fmp_segments[which_pool] = tmp_seg;
/* set the element of pool->fmp_segments to grab */
pool->fmp_n_segs_in_array[which_pool] += pool->fmp_n_elts_to_add;
} /* end increase size of pool->fmp_segments[] */
idx = pool->fmp_n_segments[which_pool];
/* allocate more memory */
len_to_alloc = 4 * (length + alignment);
if (len_to_alloc < pool->fmp_min_alloc_size)
len_to_alloc = 2 * pool->fmp_min_alloc_size;
tmp_ptr =
ompi_zero_alloc(len_to_alloc, MMAP_SHARED_PROT, MMAP_SHARED_FLAGS);
if ( !tmp_ptr )
{
ompi_abort(1, "Unable to allocate memory pool");
}
if ( pool->fmp_apply_affinity )
{
if ( !ompi_set_affinity(tmp_ptr, len_to_alloc, which_pool) ) {
ompi_abort(1, "Error: setting memory affinity");
}
}
/* fill in pool->fmp_segments */
pool->fmp_segments[which_pool][idx].ms_base_ptr = tmp_ptr;
pool->fmp_segments[which_pool][idx].ms_current_ptr = tmp_ptr;
pool->fmp_segments[which_pool][idx].ms_length =
len_to_alloc;
pool->fmp_segments[which_pool][idx].ms_mem_available =
len_to_alloc;
pool->fmp_n_segments[which_pool]++;
/* set pointer and length */
ptr =
(char *) pool->fmp_segments[which_pool][idx].ms_current_ptr;
/* check to see if pointer is aligned correctly */
if ((((size_t) ptr) & mask) == ((size_t) ptr)) {
segment = ptr;
len_to_alloc = length;
} else {
/* align the pointer */
ptr = (char *) ((size_t) ptr + alignment);
ptr = (char *) ((size_t) ptr & mask);
segment = ptr;
len_to_alloc = length +
(ptr -
(char *) pool->fmp_segments[which_pool][idx].
ms_current_ptr);
}
} /* end " idx < 0 " */
/* update pool->fmp_segments */
pool->fmp_segments[which_pool][idx].ms_current_ptr = (void *)
((char *) (pool->fmp_segments[which_pool][idx].ms_current_ptr) +
len_to_alloc);
pool->fmp_segments[which_pool][idx].ms_mem_available -=
len_to_alloc;
return segment;
}

Просмотреть файл

@ -1,134 +0,0 @@
/*
* $HEADER$
*/
#ifndef OMPI_MEMORY_POOL_H
#define OMPI_MEMORY_POOL_H
#include "include/types.h"
#include "class/ompi_object.h"
#include "mem/allocator.h"
#include "threads/mutex.h"
#define ALLOCELEMENT_FLAG_UNUSABLE (0)
#define ALLOCELEMENT_FLAG_AVAILABLE (1)
#define ALLOCELEMENT_FLAG_INUSE (2)
#define ALLOCELEMENT_FLAG_NEVERFREE (4)
#define ALLOCELEMENT_FLAG_LOANED (8)
/*
To create a process-private pool, use
pool = OBJ_NEW(ompi_mem_pool_t);
To create a process-shared pool, use
pool = OBJ_NEW(ompi_shmem_pool_t);
*/
typedef struct ompi_chunk_desc
{
uint16_t chd_flags;
uint32_t chd_index;
void *chd_base_ptr;
} ompi_chunk_desc_t;
typedef struct ompi_mem_pool
{
ompi_object_t super;
ompi_allocator_t *mp_dev_alloc; /* possibly device-dependent allocator for registering memory */
ompi_allocator_t *mp_private_alloc; /* for use by pool only; do not set! */
ompi_mutex_t mp_lock;
uint64_t mp_page_sz;
uint64_t mp_chunk_sz;
uint32_t mp_num_chunks;
uint32_t mp_max_chunks;
uint32_t mp_next_avail_chunk;
ompi_chunk_desc_t *mp_chunks;
} ompi_mem_pool_t;
/* process-private mem pool class */
extern ompi_class_t ompi_mem_pool_t_class;
/* process-shared mem pool class */
extern ompi_class_t shmem_pool_t_class;
void ompi_mp_construct(ompi_mem_pool_t *pool);
void ompi_mp_shared_construct(ompi_mem_pool_t *pool);
void ompi_mp_destruct(ompi_mem_pool_t *pool);
int ompi_mp_construct_with(ompi_mem_pool_t *pool, uint64_t pool_size,
uint64_t max_len,
uint64_t chunk_size, size_t pg_size);
void *ompi_mp_request_chunk(ompi_mem_pool_t *pool, int pool_index);
/*
*
* Memory Pool accessor functions
*
*/
/* returns 1 if pool uses shared memory, 0 otherwise. */
#define ompi_mp_uses_shared_mem(pool) \
ompi_allocator_get_is_shared(pool->mp_private_alloc)
#define ompi_mp_get_dev_allocator(pool) \
((pool)->mp_dev_alloc)
static inline void ompi_mp_set_dev_allocator(ompi_mem_pool_t *pool, ompi_allocator_t *allocator)
{
/* releases old allocator and retains new one. */
if ( pool->mp_dev_alloc )
OBJ_RELEASE(pool->mp_dev_alloc);
pool->mp_dev_alloc = allocator;
OBJ_RETAIN(pool->mp_dev_alloc);
}
#define ompi_mp_get_chunk_size(pool) \
((pool)->mp_chunk_sz)
/*
*
* Fixed shared mem pool interface
*
*/
/*
Class used to satisfy shared memory requests. Assumes that request
are made before the child process are forked, and that this memory
will not be recycled or freed until the app exits.
*/
typedef struct ompi_mem_segment
{
void *ms_base_ptr;
void *ms_current_ptr;
size_t ms_length;
size_t ms_mem_available;
} ompi_memseg_t;
typedef struct ompi_fixed_mpool
{
ompi_object_t super;
ompi_allocator_t *fmp_private_alloc;
ompi_memseg_t **fmp_segments;
int *fmp_n_segments;
int *fmp_n_segs_in_array;
size_t fmp_min_alloc_size;
int fmp_n_elts_to_add;
int fmp_n_pools;
int fmp_pool_ok_to_use;
int fmp_apply_affinity;
} ompi_fixed_mpool_t;
extern ompi_class_t ompi_fixed_mpool_t_class;
void ompi_fmp_construct(ompi_fixed_mpool_t *pool);
void ompi_fmp_destruct(ompi_fixed_mpool_t *pool);
int ompi_fmp_construct_with(ompi_fixed_mpool_t *pool, ssize_t initial_allocation,
ssize_t min_allocation_size,
int n_pools, int n_array_elements_to_add, int apply_mem_affinity);
void *ompi_fmp_get_mem_segment(ompi_fixed_mpool_t *pool,
size_t length, size_t align, int which_pool);
#endif /* OMPI_MEMORY_POOL_H */

Просмотреть файл

@ -1,57 +0,0 @@
/*
* $HEADER$
*/
#include "mem/seg_list.h"
#include "class/ompi_list.h"
/*
* Public variable
*/
ompi_class_t ompi_seg_list_t_class = {
"ompi_seg_list_t",
OBJ_CLASS(ompi_object_t),
(ompi_construct_t) ompi_sgl_construct,
(ompi_destruct_t) ompi_sgl_destruct
};
void ompi_sgl_construct(ompi_seg_list_t *slist)
{
OBJ_CONSTRUCT(&slist->sgl_list, ompi_list_t);
OBJ_CONSTRUCT(&slist->sgl_lock, ompi_mutex_t);
slist->sgl_min_bytes_pushed = 0;
slist->sgl_max_bytes_pushed = 0;
slist->sgl_bytes_pushed = 0;
slist->sgl_max_consec_fail = 0;
slist->sgl_consec_fail = 0;
}
void ompi_sgl_destruct(ompi_seg_list_t *slist)
{
OBJ_DESTRUCT(&slist->sgl_list);
OBJ_DESTRUCT(&slist->sgl_lock);
}
void ompi_sgl_append_elt_chunk(
ompi_seg_list_t *slist,
void *chunk,
size_t chunk_size,
int n_elts,
size_t elt_size)
{
/* Since this function could be called frequently, we do not
want to compute the number of elements each time, so we
require it to be passed as an arg.
*/
int i;
char *ptr;
ptr = (char *)chunk;
slist->sgl_bytes_pushed += chunk_size;
for ( i = 0; i < n_elts; i++ )
{
ompi_list_append(&(slist->sgl_list), (ompi_list_item_t *)ptr);
ptr += elt_size;
}
}

Просмотреть файл

@ -1,83 +0,0 @@
/*
* $HEADER$
*/
#ifndef SEG_LIST_H
#define SEG_LIST_H
#include "ompi_config.h"
#include "class/ompi_list.h"
#include "threads/mutex.h"
struct ompi_seg_list_t
{
ompi_object_t super;
size_t sgl_min_bytes_pushed;
size_t sgl_max_bytes_pushed;
size_t sgl_bytes_pushed;
int sgl_max_consec_fail;
int sgl_consec_fail;
ompi_mutex_t sgl_lock;
ompi_list_t sgl_list;
};
typedef struct ompi_seg_list_t ompi_seg_list_t;
extern ompi_class_t ompi_seg_list_t_class;
void ompi_sgl_construct(ompi_seg_list_t *slist);
void ompi_sgl_destruct(ompi_seg_list_t *slist);
void ompi_sgl_append_elt_chunk(
ompi_seg_list_t *slist,
void *chunk,
size_t chunk_size,
int n_elts,
size_t elt_size);
/*
*
* Segment list accessor functions
*
*/
#define ompi_sgl_lock_list(slist) ompi_mutex_trylock(&slist->sgl_lock)
#define ompi_sgl_unlock_list(slist) ompi_mutex_unlock(&slist->sgl_lock)
static inline bool ompi_sgl_is_locked(ompi_seg_list_t *slist);
static inline bool ompi_sgl_is_locked(ompi_seg_list_t *slist)
{
/* returns 1 if list is currently locked, otherwise 0. */
int ret;
ret = ompi_mutex_trylock(&slist->sgl_lock);
if ( !ret )
ompi_mutex_unlock(&slist->sgl_lock);
return !ret;
}
#define ompi_sgl_get_min_bytes_pushed(slist) ((slist)->sgl_min_bytes_pushed)
#define ompi_sgl_set_min_bytes_pushed(slist, min_bytes) \
((slist)->sgl_min_bytes_pushed = min_bytes)
#define ompi_sgl_get_max_bytes_pushed(slist) ((slist)->sgl_max_bytes_pushed)
#define ompi_sgl_set_max_bytes_pushed(slist, min_bytes) \
((slist)->sgl_max_bytes_pushed = min_bytes)
#define ompi_sgl_get_bytes_pushed(slist) ((slist)->sgl_bytes_pushed)
#define ompi_sgl_set_bytes_pushed(slist, min_bytes) \
((slist)->sgl_bytes_pushed = min_bytes)
#define ompi_sgl_get_max_consec_fail(slist) ((slist)->sgl_max_consec_fail)
#define ompi_sgl_set_max_consec_fail(slist, max_fail) \
((slist)->sgl_max_consec_fail = max_fail)
#define ompi_sgl_get_consec_fail(slist) ((slist)->sgl_consec_fail)
#define ompi_sgl_set_consec_fail(slist, fail) \
((slist)->sgl_consec_fail = fail)
#define ompi_sgl_inc_consec_fail(slist) \
((slist)->sgl_consec_fail++)
#endif /* SEG_LIST_H */

Просмотреть файл

@ -1,99 +0,0 @@
/*
* $HEADER$
*/
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "mem/sharedmem_util.h"
#include "util/output.h"
void *ompi_zero_alloc(size_t len, int mem_prot, int mem_flags)
{
void *ptr;
int fd, flags = mem_flags;
#ifndef __osf__
fd = -1;
#ifndef __APPLE__
fd = open("/dev/zero", O_RDWR);
if (fd < 0)
{
perror("/dev/zero");
close(fd);
return 0;
}
#else
flags = flags | MAP_ANON;
#endif /* __APPLE__ */
ptr = mmap(NULL, len, mem_prot, flags, fd, 0);
if ( ptr == MAP_FAILED )
{
ompi_output(0, "Error: mmap failed (%s)", strerror(errno));
close(fd);
return (void *)0;
}
close(fd);
#else /* this is __osf__ */
if( mem_flags & MAP_PRIVATE ) {
/*
* private memory allocation
*/
fd = open("/dev/zero", O_RDWR);
if (fd < 0)
{
perror("/dev/zero");
close(fd);
return 0;
}
ptr = mmap(NULL, len, mem_prot, mem_flags, fd, 0);
if ( ptr == MAP_FAILED )
{
fprintf(stderr,
" ZeroAlloc :: error in mmap(\"/dev/zero\") call. Bytes Allocated :: %ld\n", len);
fprintf(stderr, " errno :: %d\n", errno);
perror(" mmap failed");
close(fd);
return (void *)0;
}
close(fd);
} else {
long pageSize = sysconf(_SC_PAGESIZE);
long long paddedLen = len + (2 * pageSize);
ptr = ulm_malloc(paddedLen * sizeof(char));
if (!ptr) {
ulm_warn(("ZeroAlloc :: padded ulm_malloc() failed for "
"%lld bytes\n", paddedLen));
return (void *)0;
}
memset(ptr, 0, paddedLen * sizeof(char));
ptr = (char *)ptr + (pageSize - 1);
ptr = (void *)((long)ptr & ~(pageSize - 1));
/*
* shared memory allocation
*/
fd = -1;
ptr = mmap(ptr, len, mem_prot, MAP_FIXED | mem_flags, fd, 0);
if ( ptr == MAP_FAILED )
{
ulm_warn(("ZeroAlloc shared mmap error :: %d fd %d\n", errno, fd));
perror(" mmap failed");
return (void *)0;
}
} /* end memory allocation */
#endif /* __osf__ */
return ptr;
}

Просмотреть файл

@ -1,24 +0,0 @@
/*
* $HEADER$
*/
#ifndef SHAREDMEM_UTIL_H
#define SHAREDMEM_UTIL_H
#include <sys/types.h>
#include <sys/mman.h>
#define MMAP_SHARED_PROT PROT_READ|PROT_WRITE
#define MMAP_PRIVATE_PROT PROT_READ|PROT_WRITE
#define MMAP_PRIVATE_FLAGS MAP_PRIVATE
#ifndef __osf__
# define MMAP_SHARED_FLAGS MAP_SHARED
#else
# define MMAP_SHARED_FLAGS MAP_SHARED|MAP_ANONYMOUS
#endif
void *ompi_zero_alloc(size_t len, int mem_prot, int mem_flags);
#endif /* SHAREDMEM_UTIL_H */

Просмотреть файл

@ -7,7 +7,6 @@
#include "include/constants.h" #include "include/constants.h"
#include "runtime/runtime.h" #include "runtime/runtime.h"
#include "util/output.h" #include "util/output.h"
#include "mem/malloc.h"
int ompi_finalize(void) int ompi_finalize(void)

Просмотреть файл

@ -11,7 +11,6 @@
#include "util/output.h" #include "util/output.h"
#include "threads/mutex.h" #include "threads/mutex.h"
#include "event/event.h" #include "event/event.h"
#include "mem/malloc.h"
/** /**

Просмотреть файл

@ -15,6 +15,7 @@ headers = \
few.h \ few.h \
hibit.h \ hibit.h \
if.h \ if.h \
malloc.h \
output.h \ output.h \
path.h \ path.h \
sys_info.h \ sys_info.h \
@ -28,6 +29,7 @@ libutil_la_SOURCES = \
cmd_line.c \ cmd_line.c \
few.c \ few.c \
if.c \ if.c \
malloc.c \
output.c \ output.c \
path.c \ path.c \
sys_info.c \ sys_info.c \

Просмотреть файл

@ -6,7 +6,7 @@
#include <stdlib.h> #include <stdlib.h>
#include "mem/malloc.h" #include "util/malloc.h"
#include "util/output.h" #include "util/output.h"

142
src/util/malloc.h Обычный файл
Просмотреть файл

@ -0,0 +1,142 @@
/*
* $HEADER$
*/
/** @file */
#ifndef OMPI_MALLOC_H
#define OMPI_MALLOC_H
#include <stdlib.h>
/*
* THIS FILE CANNOT INCLUDE ANY OTHER OMPI HEADER FILES!!!
*
* It is included via <ompi_config_bottom.h>. Hence, it should not
* include ANY other files, nor should it include "ompi_config.h".
*
*/
/*
* Set OMPI_MALLOC_DEBUG_LEVEL to
* 0 for no checking
* 1 for basic error checking
* 2 for more error checking
*/
#ifndef OMPI_MALLOC_DEBUG_LEVEL
#define OMPI_MALLOC_DEBUG_LEVEL 2
#endif
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
/**
* Shut down malloc debug output.
*
* This function is invoked as part of ompi_finalize() to shut down the
* output stream for malloc debug messages.
*/
void ompi_malloc_init(void);
/**
* Initialize malloc debug output.
*
* This function is invoked to setup a dedicated output stream for
* malloc debug functions. It does \em not (currently) do anything
* other than that (i.e., no internal accounting for tracking
* malloc/free statements, etc.).
*
* It is invoked as part of ompi_init(). Although this function is
* not \em necessary for OMPI_MALLOC() and OMPI_FREE(), it is strong
* recommended because no output messages -- regardless of the
* malloc debug level set by ompi_malloc_debug() -- will be displayed
* unless this function is invoked first.
*/
void ompi_malloc_finalize(void);
/**
* \internal
*
* Back-end error-checking malloc function for OMPI (you should use
* the normal malloc() instead of this function).
*
* @param size The number of bytes to allocate
* @param file Typically the __FILE__ macro
* @param line Typically the __LINE__ macro
*
* This function is only used when --enable-mem-debug was specified to
* configure (or by default if you're building in a SVN checkout).
*/
void *ompi_malloc(size_t size, char *file, int line);
/**
* \internal
*
* Back-end error-checking calloc function for OMPI (you should use
* the normal calloc() instead of this function).
*
* @param nmembers Number of elements to malloc
* @param size Size of each elements
* @param file Typically the __FILE__ macro
* @param line Typically the __LINE__ macro
*
* This function is only used when --enable-mem-debug was specified to
* configure (or by default if you're building in a SVN checkout).
*/
void *ompi_calloc(size_t nmembers, size_t size, char *file, int line);
/**
* \internal
*
* Back-end error-checking realloc function for OMPI (you should use
* the normal realloc() instead of this function).
*
* @param ptr Pointer to reallocate
* @param size The number of bytes to allocate
* @param file Typically the __FILE__ macro
* @param line Typically the __LINE__ macro
*
* This function is only used when --enable-mem-debug was specified to
* configure (or by default if you're building in a SVN checkout).
*/
void *ompi_realloc(void *ptr, size_t size, char *file, int line);
/**
* \internal
*
* Back-end error-checking free function for OMPI (you should use
* free() instead of this function).
*
* @param addr Address on the heap to free()
* @param file Typically the __FILE__ macro
* @param line Typically the __LINE__ macro
*
* This function is only used when --enable-mem-debug was specified
* to configure (or by default if you're building in a SVN
* checkout).
*/
void ompi_free(void *addr, char *file, int line);
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
extern int ompi_malloc_debug_level;
extern int ompi_malloc_output;
static inline void ompi_malloc_debug(int level);
/**
* Used to set the debug level for malloc debug.
*
* @param level The level of debugging (0 = none, 1 = some, 2 = more)
*
* This value defaults to the OMPI_MALLOC_DEBUG_LEVEL.
*/
static inline void ompi_malloc_debug(int level)
{
ompi_malloc_debug_level = level;
}
#endif /* OMPI_MALLOC_H */