1
1

copy ptmalloc2 release into the main branch

This commit was SVN r6787.
Этот коммит содержится в:
Brian Barrett 2005-08-09 19:39:39 +00:00
родитель bd4a0c4ee0
Коммит 2541f15880
24 изменённых файлов: 9451 добавлений и 0 удалений

19
opal/memory/ptmalloc2/COPYRIGHT Обычный файл
Просмотреть файл

@ -0,0 +1,19 @@
Copyright (c) 2001-2004 Wolfram Gloger
Permission to use, copy, modify, distribute, and sell this software
and its documentation for any purpose is hereby granted without fee,
provided that (i) the above copyright notices and this permission
notice appear in all copies of the software and related documentation,
and (ii) the name of Wolfram Gloger may not be used in any advertising
or publicity relating to the software.
THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
IN NO EVENT SHALL WOLFRAM GLOGER BE LIABLE FOR ANY SPECIAL,
INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY
DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY
OF LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.

181
opal/memory/ptmalloc2/ChangeLog Обычный файл
Просмотреть файл

@ -0,0 +1,181 @@
2004-11-05 Wolfram Gloger <wg@malloc.de>
* malloc/hooks.c (malloc_starter, memalign_starter): Call
ptmalloc_init_minimal().
2004-11-04 Wolfram Gloger <wg@malloc.de>
* malloc/malloc.c (USE_STARTER): New macro.
* malloc/hooks.c: Use USE_STARTER.
* malloc/arena.c: Use USE_STARTER.
2004-08-13 Ulrich Drepper <drepper@redhat.com>
* malloc/malloc.c: Use strong_alias instead of weak_alias wherever
possible.
2002-12-06 Roland McGrath <roland@redhat.com>
* malloc/arena.c (ptmalloc_init_minimal): New function, broken out
of ptmalloc_init.
2002-08-23 Roland McGrath <roland@redhat.com>
* malloc/hooks.c (__malloc_initialize_hook, __free_hook,
__malloc_hook, __realloc_hook, __memalign_hook,
__after_morecore_hook): Variable definitions moved to ...
* malloc/malloc.c: ... here, so as to be before all references.
2004-10-19 Wolfram Gloger <wg@malloc.de>
* malloc/hooks.c (mem2chunk_check, top_check): Handle
non-contiguous arena. Reported by Michael Dalton
<mwdalton@stanford.edu> [BZ #457]. Add further checks for top
chunk.
2004-08-08 Wolfram Gloger <wg@malloc.de>
* include/malloc.h (mstate): Move type declaration from here...
* malloc/malloc.h: ...to here.
(struct malloc_arena_info, struct malloc_global_info): New types.
(_int_get_arena, _int_get_arena_info, _int_get_global_info): New
functions.
* malloc/malloc.c (mSTATS, public_mSTATs, mALLINFo): Remove.
(_int_get_arena_info, _int_get_global_info): New functions.
* malloc/arena.c (_int_get_arena): New function.
* malloc/malloc-stats.c: New file.
* malloc/tst-mstats.c: New file.
* malloc/Makefile (tests): Add tst-mstats.
(distribute): Remove no-longer existing thread-m.h.
(dist-routines): Add malloc-stats.
* malloc/Versions: Add _int_get_arena, _int_get_arena_info,
_int_get_global_info.
2004-07-25 Wolfram Gloger <wg@malloc.de>
* sysdeps/generic/thread-st.h: New file.
* sysdeps/pthread/thread-st.h: New file.
* sysdeps/sproc/thread-st.h: New file.
* sysdeps/solaris/thread-st.h: New file.
* thread-st.h: Removed.
2004-03-18 Ulrich Drepper <drepper@redhat.com>
* malloc/malloc.c (__posix_memalign): Correct alignment check.
Reported by Don Heller <dheller@cse.psu.edu>.
2003-12-17 Jakub Jelinek <jakub@redhat.com>
* malloc/malloc.c (__posix_memalign): If __memalign_hook != NULL,
call it directly instead of memalign_internal.
2003-09-27 Wolfram Gloger <wg@malloc.de>
* malloc/malloc.c: Include <malloc-machine.h> earlier instead of
"thread-m.h", so that default parameters can be overridden in a
system-specific malloc-machine.h. Remove extra ; from extern "C"
closing brace.
* sysdeps/generic/malloc-machine.h: New file.
* malloc/thread-m.h: Removed.
2003-09-08 Wolfram Gloger <wg@malloc.de>
* malloc/malloc.c (sYSMALLOc): Move foreign sbrk accounting into
contiguous case. Bug report from Prem Gopalan
<prem@mazunetworks.com>.
2003-08-18 Art Haas <ahaas@airmail.net>
* malloc/malloc.h: Remove unneeded ';' where closing the C++
extern block.
2003-06-18 Ulrich Drepper <drepper@redhat.com>
* malloc/malloc.c (public_mALLINFo): Initialize malloc if it
hasn't happened yet.
2003-05-28 Roland McGrath <roland@redhat.com>
* malloc/malloc.h [! __GNUC__] (__const): Define if undefined.
2003-05-04 H.J. Lu <hongjiu.lu@intel.com>
* malloc/arena.c (arena_get2): Add atomic_write_barrier.
* malloc/thread-m.h: Include <atomic.h>.
(atomic_full_barrier): Provide default.
(atomic_read_barrier): Likewise.
(atomic_write_barrier): Likewise.
2003-05-01 Ulrich Drepper <drepper@redhat.com>
* malloc/malloc.c (mSTATs): Call ptmalloc_init if necessary.
2003-01-27 Wolfram Gloger <wg@malloc.de>
* malloc/hooks.c (mem2chunk_check): Check alignment of mem
pointer, not of the computed chunk. Bug report from Carlos
O'Donell <carlos@baldric.uwo.ca>.
2002-12-27 Jakub Jelinek <jakub@redhat.com>
* malloc/arena.c (ptmalloc_init): Don't call next_env_entry if
_environ is NULL.
2002-12-17 Ulrich Drepper <drepper@redhat.com>
* malloc/malloc.c (mALLOPt): Make sure malloc is initialized.
2002-12-06 Roland McGrath <roland@redhat.com>
* malloc/hooks.c [_LIBC && (USE___THREAD || (USE_TLS && !SHARED))]
(malloc_starter, memalign_starter, free_starter): Don't define these.
* malloc/hooks.c (memalign_starter): New function.
* malloc/malloc.c: Declare it.
* malloc/arena.c (save_memalign_hook): New variable.
(ptmalloc_init): Set __memalign_hook to memalign_starter.
2002-11-18 Wolfram Gloger <wg@malloc.de>
* malloc/arena.c
(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2): Do
nothing if not initialized. Bug report from Marcus Brinkmann
<Marcus.Brinkmann@ruhr-uni-bochum.de>.
2002-10-07 Wolfram Gloger <wg@malloc.de>
* malloc/malloc.c (sYSMALLOc): Only check for breakage due
to foreign sbrk()'s if arena is contiguous. Bug report from
Bruno Haible <bruno@clisp.org>.
2002-07-11 Wolfram Gloger <wmglo@dent.med.uni-muenchen.de>
* malloc/hooks.c: typo fix in NO_THREADS case, realloc_check
fix in HAVE_MREMAP case.
2002-06-11 Wolfram Gloger <wg@malloc.de>
* malloc/malloc.c: Fix error path when new_heap() returns NULL.
Reported by Michael Meissner <meissner@redhat.com>.
2002-03-29 Wolfram Gloger <wg@malloc.de>
* malloc/malloc.c: Add short description and prototypes for
malloc_get_state, malloc_set_state and posix_memalign, for
consistency and to avoid warnings with -Wstrict-prototypes.
Reported by Andreas Jaeger <aj@suse.de>.
2002-03-13 Wolfram Gloger <wg@malloc.de>
* malloc/malloc.c (sYSMALLOc): Don't change brk if mmap
failed.
2002-01-18 Wolfram Gloger <wg@malloc.de>
* malloc/malloc.c: Rewrite, adapted from Doug Lea's malloc-2.7.0.c.
* malloc/malloc.h: Likewise.
* malloc/arena.c: New file.
* malloc/hooks.c: New file.
* malloc/tst-mallocstate.c: New file.
* malloc/Makefile: Add new testcase tst-mallocstate.
Add arena.c and hooks.c to distribute. Fix commented CPPFLAGS.

218
opal/memory/ptmalloc2/Makefile Обычный файл
Просмотреть файл

@ -0,0 +1,218 @@
# Makefile for ptmalloc, version 2
# by Wolfram Gloger 1996-1999, 2001, 2002, 2003, 2004
DIST_FILES0 = ChangeLog malloc.h malloc.c arena.c hooks.c \
malloc-stats.c \
sysdeps \
tst-mallocstate.c tst-mstats.c
DIST_FILES1 = COPYRIGHT README Makefile \
$(DIST_FILES0) \
lran2.h t-test.h t-test1.c t-test2.c \
#debian
DIST_FILES2 = $(DIST_FILES1) \
Makefile.glibc glibc-include RCS/*,v
# malloc-int.h
TAR_FLAGS = --numeric-owner --exclude "*~" --exclude "debian/tmp*"
#CC = /pkg/gcc-2.95.2-wg/bin/gcc
CC = cc
SYS_FLAGS =
OPT_FLAGS = -g -O # -O2
WARN_FLAGS = #-Wall -Wstrict-prototypes
SH_FLAGS = -shared -fpic
INC_FLAGS = -Isysdeps/generic
# Flags for the test programs
T_FLAGS = -DUSE_MALLOC=1 -DTEST=1
# Flags for the compilation of malloc.c
M_FLAGS = -DTHREAD_STATS=1 #-DMALLOC_DEBUG=1
# Thread flags.
# See the platform-specific targets below.
THR_FLAGS = -DUSE_TSD_DATA_HACK -D_REENTRANT
THR_LIBS = -lpthread
RM = rm -f
AR = ar
RANLIB = ranlib
MALLOC_OBJ = malloc.o malloc-stats.o
LIB_MALLOC = libmalloc.a
T_SUF =
TESTS = t-test1$(T_SUF) t-test2$(T_SUF) \
tst-mallocstate$(T_SUF) tst-mstats$(T_SUF)
CFLAGS = $(SYS_FLAGS) $(OPT_FLAGS) $(WARN_FLAGS) $(THR_FLAGS) $(INC_FLAGS)
.c.o:
$(CC) -c $(CFLAGS) $<
all: $(LIB_MALLOC) $(TESTS)
malloc.o: malloc.c malloc.h
$(CC) -c $(CFLAGS) $(M_FLAGS) $<
malloc-stats.o: malloc-stats.c malloc.h
$(CC) -c $(CFLAGS) $(M_FLAGS) $<
libmalloc.a: $(MALLOC_OBJ)
$(AR) cr $@ $(MALLOC_OBJ)
$(RANLIB) $@
shared: malloc.so
malloc.so: malloc.c malloc-stats.c malloc.h
$(CC) $(SH_FLAGS) $(CFLAGS) $(M_FLAGS) malloc.c malloc-stats.c -o $@
again:
$(RM) $(TESTS)
$(MAKE) $(TESTS)
clean:
$(RM) $(MALLOC_OBJ) libmalloc.a malloc.so $(TESTS) core core.[0-9]*
t-test1$(T_SUF): t-test1.c t-test.h $(LIB_MALLOC)
$(CC) $(CFLAGS) $(T_FLAGS) t-test1.c $(LIB_MALLOC) $(THR_LIBS) -o $@
t-test2$(T_SUF): t-test2.c t-test.h $(LIB_MALLOC)
$(CC) $(CFLAGS) $(T_FLAGS) t-test2.c $(LIB_MALLOC) $(THR_LIBS) -o $@
tst-mallocstate$(T_SUF): tst-mallocstate.c $(LIB_MALLOC)
$(CC) $(CFLAGS) $(T_FLAGS) tst-mallocstate.c $(LIB_MALLOC) \
$(THR_LIBS) -o $@
tst-mstats$(T_SUF): tst-mstats.c $(LIB_MALLOC)
$(CC) $(CFLAGS) $(T_FLAGS) tst-mstats.c $(LIB_MALLOC) \
$(THR_LIBS) -o $@
############################################################################
# Platform-specific targets. The ones ending in `-libc' are provided
# to enable comparison with the standard malloc implementation from
# the system's native C library. The option USE_TSD_DATA_HACK is now
# the default for pthreads systems, as most (Irix 6, Solaris 2) seem
# to need it. Try with USE_TSD_DATA_HACK undefined only if you're
# confident that your systems's thread specific data functions do _not_
# use malloc themselves.
# posix threads with TSD data hack
posix:
$(MAKE) THR_FLAGS='-DUSE_TSD_DATA_HACK -D_REENTRANT' \
OPT_FLAGS='$(OPT_FLAGS)' SYS_FLAGS='$(SYS_FLAGS)' CC='$(CC)' \
INC_FLAGS='-Isysdeps/pthread -Isysdeps/generic -I.'
THR_LIBS=-lpthread
# posix threads with explicit initialization. Known to be needed on HPUX.
posix-explicit:
$(MAKE) THR_FLAGS='-D_REENTRANT -DUSE_TSD_DATA_HACK -DUSE_STARTER=2' \
THR_LIBS=-lpthread \
OPT_FLAGS='$(OPT_FLAGS)' SYS_FLAGS='$(SYS_FLAGS)' CC='$(CC)' \
INC_FLAGS='-Isysdeps/pthread -Isysdeps/generic -I.' \
M_FLAGS='$(M_FLAGS)'
# posix threads without TSD data hack -- not known to work
posix-with-tsd:
$(MAKE) THR_FLAGS='-D_REENTRANT' THR_LIBS=-lpthread \
INC_FLAGS='-Isysdeps/pthread -Isysdeps/generic -I.' \
M_FLAGS='$(M_FLAGS)'
posix-libc:
$(MAKE) THR_FLAGS='-D_REENTRANT' THR_LIBS=-lpthread \
INC_FLAGS='-Isysdeps/pthread -Isysdeps/generic -I.' \
M_FLAGS='$(M_FLAGS)' LIB_MALLOC= T_FLAGS= T_SUF=-libc
linux-pthread:
$(MAKE) SYS_FLAGS='-D_GNU_SOURCE=1' \
WARN_FLAGS='-Wall -Wstrict-prototypes' \
OPT_FLAGS='$(OPT_FLAGS)' THR_FLAGS='-DUSE_TSD_DATA_HACK' \
INC_FLAGS='-Isysdeps/pthread -Isysdeps/generic -I.' M_FLAGS='$(M_FLAGS)' \
TESTS='$(TESTS)'
linux-malloc.so:
$(MAKE) SYS_FLAGS='-D_GNU_SOURCE=1' \
WARN_FLAGS='-Wall -Wstrict-prototypes' \
OPT_FLAGS='$(OPT_FLAGS)' THR_FLAGS='-DUSE_TSD_DATA_HACK' \
INC_FLAGS='-Isysdeps/pthread -Isysdeps/generic -I.' M_FLAGS='$(M_FLAGS)' \
malloc.so
sproc:
$(MAKE) THR_FLAGS='' THR_LIBS='' OPT_FLAGS='$(OPT_FLAGS)' CC='$(CC)' \
INC_FLAGS='-Isysdeps/sproc -Isysdeps/generic -I.' \
M_FLAGS='$(M_FLAGS)'
sproc-shared:
$(MAKE) THR_FLAGS='' THR_LIBS= \
SH_FLAGS='-shared -check_registry /usr/lib/so_locations' \
INC_FLAGS='-Isysdeps/sproc -Isysdeps/generic -I.' \
LIB_MALLOC=malloc.so M_FLAGS='$(M_FLAGS)'
sproc-libc:
$(MAKE) THR_FLAGS='1' THR_LIBS= LIB_MALLOC= T_FLAGS= \
INC_FLAGS='-Isysdeps/sproc -Isysdeps/generic -I.' \
T_SUF=-libc M_FLAGS='$(M_FLAGS)'
solaris:
$(MAKE) THR_FLAGS='-D_REENTRANT' OPT_FLAGS='$(OPT_FLAGS)' \
INC_FLAGS='-Isysdeps/solaris -Isysdeps/generic -I.' \
THR_LIBS=-lthread M_FLAGS='$(M_FLAGS)'
solaris-libc:
$(MAKE) THR_FLAGS='-D_REENTRANT' OPT_FLAGS='$(OPT_FLAGS)' \
INC_FLAGS='-Isysdeps/solaris -Isysdeps/generic -I.' \
THR_LIBS=-lthread LIB_MALLOC= T_FLAGS= T_SUF=-libc M_FLAGS='$(M_FLAGS)'
nothreads:
$(MAKE) OPT_FLAGS='$(OPT_FLAGS)' SYS_FLAGS='$(SYS_FLAGS)' \
INC_FLAGS='-Isysdeps/generic -I.' \
THR_FLAGS='' THR_LIBS='' M_FLAGS='$(M_FLAGS)'
gcc-nothreads:
$(MAKE) CC='gcc' WARN_FLAGS='-Wall' OPT_FLAGS='$(OPT_FLAGS)' \
INC_FLAGS='-Isysdeps/generic -I.' \
SYS_FLAGS='$(SYS_FLAGS)' THR_FLAGS='' THR_LIBS='' M_FLAGS='$(M_FLAGS)'
linux-nothreads:
$(MAKE) CC='gcc' WARN_FLAGS='-Wall' OPT_FLAGS='$(OPT_FLAGS)' \
INC_FLAGS='-Isysdeps/generic -I.' \
SYS_FLAGS='-D_GNU_SOURCE' THR_FLAGS='' THR_LIBS='' M_FLAGS='$(M_FLAGS)'
# note: non-ANSI compilers are no longer considered important
traditional:
$(MAKE) THR_FLAGS='' THR_LIBS='' CC='gcc -traditional'
#glibc-test:
# $(MAKE) THR_FLAGS='-DUSE_PTHREADS=1 -D_LIBC' \
# SYS_FLAGS='-D_GNU_SOURCE=1 ' \
# WARN_FLAGS='-Wall -Wstrict-prototypes -Wbad-function-cast -Wmissing-noreturn -Wmissing-prototypes -Wmissing-declarations -Wcomment -Wcomments -Wtrigraphs -Wmultichar -Wstrict-prototypes -Winline' \
# INC_FLAGS='-Iglibc-include -include glibc-include/libc-symbols.h' \
# malloc.o && mv malloc.o malloc-glibc.o
############################################################################
check: $(TESTS)
./t-test1
./t-test2
./tst-mallocstate || echo "Test mallocstate failed!"
./tst-mstats || echo "Test mstats failed!"
snap:
cd ..; tar $(TAR_FLAGS) -c -f - $(DIST_FILES1:%=ptmalloc2/%) | \
gzip -9 >ptmalloc2-current.tar.gz
dist:
cd ..; tar $(TAR_FLAGS) -c -f - $(DIST_FILES2:%=ptmalloc2/%) | \
gzip -9 >ptmalloc2.tar.gz
Makefile.glibc.diff: Makefile.glibc
-diff -u /mount/public/export/glibc/cvs/libc/malloc/Makefile \
Makefile.glibc >$@
dist-glibc: Makefile.glibc.diff
tar cf - $(DIST_FILES0) Makefile.glibc.diff | \
gzip -9 >../libc.malloc.tar.gz
# dependencies
malloc.o: arena.c hooks.c

192
opal/memory/ptmalloc2/README Обычный файл
Просмотреть файл

@ -0,0 +1,192 @@
ptmalloc2 - a multi-thread malloc implementation
================================================
Wolfram Gloger (wg@malloc.de)
Nov 2004
Introduction
============
This package is a modified version of Doug Lea's malloc-2.7.1pre
implementation (available seperately from ftp://g.oswego.edu/pub/misc)
that I adapted for multiple threads, while trying to avoid lock
contention as much as possible. Many thanks should go to Doug Lea
(dl@cs.oswego.edu) for the great original malloc implementation.
As part of the GNU C library, the source files are available under the
GNU Library General Public License (see the comments in the files).
But as part of this stand-alone package, the code is also available
under the (probably less restrictive) conditions described in the file
'COPYRIGHT'. In any case, there is no warranty whatsoever for this
package.
The current distribution should be available from:
http://www.malloc.de/malloc/ptmalloc2.tar.gz
Compilation
===========
It should be possible to build ptmalloc2 on any UN*X-like system that
implements the sbrk(), mmap(), munmap() and mprotect() calls. If
mmap() is not available, it is only possible to produce a
non-threadsafe implementation. Since there are now several source
files, a library (libmalloc.a) is generated. See the Makefile for
examples of the compile-time options.
Note that support for non-ANSI compilers is no longer a significant
goal.
Several example targets are provided in the Makefile:
o Posix threads (pthreads), compile with "make posix"
o Posix threads with explicit initialization, compile with
"make posix-explicit" (known to be required on HPUX)
o Posix threads without "tsd data hack" (see below), compile with
"make posix-with-tsd"
o Solaris threads, compile with "make solaris"
o SGI sproc() threads, compile with "make sproc"
o no threads, compile with "make nothreads"
For Linux:
o make "linux-pthread" (almost the same as "make posix")
Note that some compilers need special flags for multi-threaded code,
e.g. with Solaris cc with Posix threads, one should use:
% make posix SYS_FLAGS='-mt'
Some additional targets, ending in `-libc', are also provided in the
Makefile, to compare performance of the test programs to the case when
linking with the standard malloc implementation in libc.
A potential problem remains: If any of the system-specific functions
for getting/setting thread-specific data or for locking a mutex call
one of the malloc-related functions internally, the implementation
cannot work at all due to infinite recursion. One example seems to be
Solaris 2.4. I would like to hear if this problem occurs on other
systems, and whether similar workarounds could be applied.
For Posix threads, too, an optional hack like that has been integrated
(activated when defining USE_TSD_DATA_HACK) which depends on
`pthread_t' being convertible to an integral type (which is of course
not generally guaranteed). USE_TSD_DATA_HACK is now the default
because I haven't yet found a non-glibc pthreads system where this
hack is _not_ needed.
*NEW* and _important_: In (currently) one place in the ptmalloc2
source, a write memory barrier is needed, named
atomic_write_barrier(). This macro needs to be defined at the end of
malloc-machine.h. For gcc, a fallback in the form of a full memory
barrier is already defined, but you may need to add another definition
if you don't use gcc.
Usage
=====
Just link libmalloc.a into your application.
Some wicked systems (e.g. HPUX apparently) won't let malloc call _any_
thread-related functions before main(). On these systems,
USE_STARTER=2 must be defined during compilation (see "make
posix-explicit" above) and the global initialization function
ptmalloc_init() must be called explitly, preferably at the start of
main().
Otherwise, when using ptmalloc2, no special precautions are necessary.
Link order is important
=======================
On some systems, when overriding malloc and linking against shared
libraries, the link order becomes very important. E.g., when linking
C++ programs on Solaris, don't rely on libC being included by default,
but instead put `-lthread' behind `-lC' on the command line:
CC ... libmalloc.a -lC -lthread
This is because there are global constructors in libC that need
malloc/ptmalloc, which in turn needs to have the thread library to be
already initialized.
Debugging hooks
===============
All calls to malloc(), realloc(), free() and memalign() are routed
through the global function pointers __malloc_hook, __realloc_hook,
__free_hook and __memalign_hook if they are not NULL (see the malloc.h
header file for declarations of these pointers). Therefore the malloc
implementation can be changed at runtime, if care is taken not to call
free() or realloc() on pointers obtained with a different
implementation than the one currently in effect. (The easiest way to
guarantee this is to set up the hooks before any malloc call, e.g.
with a function pointed to by the global variable
__malloc_initialize_hook).
A useful application of the hooks is built-in into ptmalloc2: The
implementation is usually very unforgiving with respect to misuse,
such as free()ing a pointer twice or free()ing a pointer not obtained
with malloc() (these will typically crash the application
immediately). To debug in such situations, you can set the
environment variable `MALLOC_CHECK_' (note the trailing underscore).
Performance will suffer somewhat, but you will get more controlled
behaviour in the case of misuse. If MALLOC_CHECK_=0, wrong free()s
will be silently ignored, if MALLOC_CHECK_=1, diagnostics will be
printed on stderr, and if MALLOC_CHECK_=2, abort() will be called on
any error.
You can now also tune other malloc parameters (normally adjused via
mallopt() calls from the application) with environment variables:
MALLOC_TRIM_THRESHOLD_ for deciding to shrink the heap (in bytes)
MALLOC_TOP_PAD_ how much extra memory to allocate on
each system call (in bytes)
MALLOC_MMAP_THRESHOLD_ min. size for chunks allocated via
mmap() (in bytes)
MALLOC_MMAP_MAX_ max. number of mmapped regions to use
Tests
=====
Two testing applications, t-test1 and t-test2, are included in this
source distribution. Both perform pseudo-random sequences of
allocations/frees, and can be given numeric arguments (all arguments
are optional):
% t-test[12] <n-total> <n-parallel> <n-allocs> <size-max> <bins>
n-total = total number of threads executed (default 10)
n-parallel = number of threads running in parallel (2)
n-allocs = number of malloc()'s / free()'s per thread (10000)
size-max = max. size requested with malloc() in bytes (10000)
bins = number of bins to maintain
The first test `t-test1' maintains a completely seperate pool of
allocated bins for each thread, and should therefore show full
parallelism. On the other hand, `t-test2' creates only a single pool
of bins, and each thread randomly allocates/frees any bin. Some lock
contention is to be expected in this case, as the threads frequently
cross each others arena.
Performance results from t-test1 should be quite repeatable, while the
behaviour of t-test2 depends on scheduling variations.
Conclusion
==========
I'm always interested in performance data and feedback, just send mail
to ptmalloc@malloc.de.
Good luck!

800
opal/memory/ptmalloc2/arena.c Обычный файл
Просмотреть файл

@ -0,0 +1,800 @@
/* Malloc implementation for multiple threads without lock contention.
Copyright (C) 2001 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* $Id: arena.c,v 1.9 2004/11/05 14:42:23 wg Exp $ */
/* Compile-time constants. */
#define HEAP_MIN_SIZE (32*1024)
#ifndef HEAP_MAX_SIZE
#define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
#endif
/* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
that are dynamically created for multi-threaded programs. The
maximum size must be a power of two, for fast determination of
which heap belongs to a chunk. It should be much larger than the
mmap threshold, so that requests with a size just below that
threshold can be fulfilled without creating too many heaps. */
#ifndef THREAD_STATS
#define THREAD_STATS 0
#endif
/* If THREAD_STATS is non-zero, some statistics on mutex locking are
computed. */
/***************************************************************************/
#define top(ar_ptr) ((ar_ptr)->top)
/* A heap is a single contiguous memory region holding (coalesceable)
malloc_chunks. It is allocated with mmap() and always starts at an
address aligned to HEAP_MAX_SIZE. Not used unless compiling with
USE_ARENAS. */
typedef struct _heap_info {
mstate ar_ptr; /* Arena for this heap. */
struct _heap_info *prev; /* Previous heap. */
size_t size; /* Current size in bytes. */
size_t pad; /* Make sure the following data is properly aligned. */
} heap_info;
/* Thread specific data */
static tsd_key_t arena_key;
static mutex_t list_lock;
#if THREAD_STATS
static int stat_n_heaps;
#define THREAD_STAT(x) x
#else
#define THREAD_STAT(x) do ; while(0)
#endif
/* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
static unsigned long arena_mem;
/* Already initialized? */
int __malloc_initialized = -1;
/**************************************************************************/
#if USE_ARENAS
/* arena_get() acquires an arena and locks the corresponding mutex.
First, try the one last locked successfully by this thread. (This
is the common case and handled with a macro for speed.) Then, loop
once over the circularly linked list of arenas. If no arena is
readily available, create a new one. In this latter case, `size'
is just a hint as to how much memory will be required immediately
in the new arena. */
#define arena_get(ptr, size) do { \
Void_t *vptr = NULL; \
ptr = (mstate)tsd_getspecific(arena_key, vptr); \
if(ptr && !mutex_trylock(&ptr->mutex)) { \
THREAD_STAT(++(ptr->stat_lock_direct)); \
} else \
ptr = arena_get2(ptr, (size)); \
} while(0)
/* find the heap and corresponding arena for a given ptr */
#define heap_for_ptr(ptr) \
((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
#define arena_for_chunk(ptr) \
(chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
#else /* !USE_ARENAS */
/* There is only one arena, main_arena. */
#if THREAD_STATS
#define arena_get(ar_ptr, sz) do { \
ar_ptr = &main_arena; \
if(!mutex_trylock(&ar_ptr->mutex)) \
++(ar_ptr->stat_lock_direct); \
else { \
(void)mutex_lock(&ar_ptr->mutex); \
++(ar_ptr->stat_lock_wait); \
} \
} while(0)
#else
#define arena_get(ar_ptr, sz) do { \
ar_ptr = &main_arena; \
(void)mutex_lock(&ar_ptr->mutex); \
} while(0)
#endif
#define arena_for_chunk(ptr) (&main_arena)
#endif /* USE_ARENAS */
/**************************************************************************/
#ifndef NO_THREADS
/* atfork support. */
static __malloc_ptr_t (*save_malloc_hook) __MALLOC_P ((size_t __size,
__const __malloc_ptr_t));
# if !defined _LIBC || !defined USE_TLS || (defined SHARED && !USE___THREAD)
static __malloc_ptr_t (*save_memalign_hook) __MALLOC_P ((size_t __align,
size_t __size,
__const __malloc_ptr_t));
# endif
static void (*save_free_hook) __MALLOC_P ((__malloc_ptr_t __ptr,
__const __malloc_ptr_t));
static Void_t* save_arena;
/* Magic value for the thread-specific arena pointer when
malloc_atfork() is in use. */
#define ATFORK_ARENA_PTR ((Void_t*)-1)
/* The following hooks are used while the `atfork' handling mechanism
is active. */
static Void_t*
malloc_atfork(size_t sz, const Void_t *caller)
{
Void_t *vptr = NULL;
Void_t *victim;
tsd_getspecific(arena_key, vptr);
if(vptr == ATFORK_ARENA_PTR) {
/* We are the only thread that may allocate at all. */
if(save_malloc_hook != malloc_check) {
return _int_malloc(&main_arena, sz);
} else {
if(top_check()<0)
return 0;
victim = _int_malloc(&main_arena, sz+1);
return mem2mem_check(victim, sz);
}
} else {
/* Suspend the thread until the `atfork' handlers have completed.
By that time, the hooks will have been reset as well, so that
mALLOc() can be used again. */
(void)mutex_lock(&list_lock);
(void)mutex_unlock(&list_lock);
return public_mALLOc(sz);
}
}
static void
free_atfork(Void_t* mem, const Void_t *caller)
{
Void_t *vptr = NULL;
mstate ar_ptr;
mchunkptr p; /* chunk corresponding to mem */
if (mem == 0) /* free(0) has no effect */
return;
p = mem2chunk(mem); /* do not bother to replicate free_check here */
#if HAVE_MMAP
if (chunk_is_mmapped(p)) /* release mmapped memory. */
{
munmap_chunk(p);
return;
}
#endif
ar_ptr = arena_for_chunk(p);
tsd_getspecific(arena_key, vptr);
if(vptr != ATFORK_ARENA_PTR)
(void)mutex_lock(&ar_ptr->mutex);
_int_free(ar_ptr, mem);
if(vptr != ATFORK_ARENA_PTR)
(void)mutex_unlock(&ar_ptr->mutex);
}
/* The following two functions are registered via thread_atfork() to
make sure that the mutexes remain in a consistent state in the
fork()ed version of a thread. Also adapt the malloc and free hooks
temporarily, because the `atfork' handler mechanism may use
malloc/free internally (e.g. in LinuxThreads). */
static void
ptmalloc_lock_all __MALLOC_P((void))
{
mstate ar_ptr;
if(__malloc_initialized < 1)
return;
(void)mutex_lock(&list_lock);
for(ar_ptr = &main_arena;;) {
(void)mutex_lock(&ar_ptr->mutex);
ar_ptr = ar_ptr->next;
if(ar_ptr == &main_arena) break;
}
save_malloc_hook = __malloc_hook;
save_free_hook = __free_hook;
__malloc_hook = malloc_atfork;
__free_hook = free_atfork;
/* Only the current thread may perform malloc/free calls now. */
tsd_getspecific(arena_key, save_arena);
tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
}
static void
ptmalloc_unlock_all __MALLOC_P((void))
{
mstate ar_ptr;
if(__malloc_initialized < 1)
return;
tsd_setspecific(arena_key, save_arena);
__malloc_hook = save_malloc_hook;
__free_hook = save_free_hook;
for(ar_ptr = &main_arena;;) {
(void)mutex_unlock(&ar_ptr->mutex);
ar_ptr = ar_ptr->next;
if(ar_ptr == &main_arena) break;
}
(void)mutex_unlock(&list_lock);
}
#ifdef __linux__
/* In LinuxThreads, unlocking a mutex in the child process after a
fork() is currently unsafe, whereas re-initializing it is safe and
does not leak resources. Therefore, a special atfork handler is
installed for the child. */
static void
ptmalloc_unlock_all2 __MALLOC_P((void))
{
mstate ar_ptr;
if(__malloc_initialized < 1)
return;
#if defined _LIBC || defined MALLOC_HOOKS
tsd_setspecific(arena_key, save_arena);
__malloc_hook = save_malloc_hook;
__free_hook = save_free_hook;
#endif
for(ar_ptr = &main_arena;;) {
(void)mutex_init(&ar_ptr->mutex);
ar_ptr = ar_ptr->next;
if(ar_ptr == &main_arena) break;
}
(void)mutex_init(&list_lock);
}
#else
#define ptmalloc_unlock_all2 ptmalloc_unlock_all
#endif
#endif /* !defined NO_THREADS */
/* Initialization routine. */
#ifdef _LIBC
#include <string.h>
extern char **_environ;
static char *
internal_function
next_env_entry (char ***position)
{
char **current = *position;
char *result = NULL;
while (*current != NULL)
{
if (__builtin_expect ((*current)[0] == 'M', 0)
&& (*current)[1] == 'A'
&& (*current)[2] == 'L'
&& (*current)[3] == 'L'
&& (*current)[4] == 'O'
&& (*current)[5] == 'C'
&& (*current)[6] == '_')
{
result = &(*current)[7];
/* Save current position for next visit. */
*position = ++current;
break;
}
++current;
}
return result;
}
#endif /* _LIBC */
/* Set up basic state so that _int_malloc et al can work. */
static void
ptmalloc_init_minimal __MALLOC_P((void))
{
#if DEFAULT_TOP_PAD != 0
mp_.top_pad = DEFAULT_TOP_PAD;
#endif
mp_.n_mmaps_max = DEFAULT_MMAP_MAX;
mp_.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
mp_.trim_threshold = DEFAULT_TRIM_THRESHOLD;
mp_.pagesize = malloc_getpagesize;
}
#if !(USE_STARTER & 2)
static
#endif
void
ptmalloc_init __MALLOC_P((void))
{
#if __STD_C
const char* s;
#else
char* s;
#endif
int secure = 0;
if(__malloc_initialized >= 0) return;
__malloc_initialized = 0;
if (mp_.pagesize == 0)
ptmalloc_init_minimal();
#ifndef NO_THREADS
# if USE_STARTER & 1
/* With some threads implementations, creating thread-specific data
or initializing a mutex may call malloc() itself. Provide a
simple starter version (realloc() won't work). */
save_malloc_hook = __malloc_hook;
save_memalign_hook = __memalign_hook;
save_free_hook = __free_hook;
__malloc_hook = malloc_starter;
__memalign_hook = memalign_starter;
__free_hook = free_starter;
# ifdef _LIBC
/* Initialize the pthreads interface. */
if (__pthread_initialize != NULL)
__pthread_initialize();
# endif /* !defined _LIBC */
# endif /* USE_STARTER & 1 */
#endif /* !defined NO_THREADS */
mutex_init(&main_arena.mutex);
main_arena.next = &main_arena;
mutex_init(&list_lock);
tsd_key_create(&arena_key, NULL);
tsd_setspecific(arena_key, (Void_t *)&main_arena);
thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
#ifndef NO_THREADS
# if USE_STARTER & 1
__malloc_hook = save_malloc_hook;
__memalign_hook = save_memalign_hook;
__free_hook = save_free_hook;
# endif
# if USE_STARTER & 2
__malloc_hook = 0;
__memalign_hook = 0;
__free_hook = 0;
# endif
#endif
#ifdef _LIBC
secure = __libc_enable_secure;
s = NULL;
if (__builtin_expect (_environ != NULL, 1))
{
char **runp = _environ;
char *envline;
while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
0))
{
size_t len = strcspn (envline, "=");
if (envline[len] != '=')
/* This is a "MALLOC_" variable at the end of the string
without a '=' character. Ignore it since otherwise we
will access invalid memory below. */
continue;
switch (len)
{
case 6:
if (memcmp (envline, "CHECK_", 6) == 0)
s = &envline[7];
break;
case 8:
if (! secure && memcmp (envline, "TOP_PAD_", 8) == 0)
mALLOPt(M_TOP_PAD, atoi(&envline[9]));
break;
case 9:
if (! secure && memcmp (envline, "MMAP_MAX_", 9) == 0)
mALLOPt(M_MMAP_MAX, atoi(&envline[10]));
break;
case 15:
if (! secure)
{
if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
mALLOPt(M_TRIM_THRESHOLD, atoi(&envline[16]));
else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
mALLOPt(M_MMAP_THRESHOLD, atoi(&envline[16]));
}
break;
default:
break;
}
}
}
#else
if (! secure)
{
if((s = getenv("MALLOC_TRIM_THRESHOLD_")))
mALLOPt(M_TRIM_THRESHOLD, atoi(s));
if((s = getenv("MALLOC_TOP_PAD_")))
mALLOPt(M_TOP_PAD, atoi(s));
if((s = getenv("MALLOC_MMAP_THRESHOLD_")))
mALLOPt(M_MMAP_THRESHOLD, atoi(s));
if((s = getenv("MALLOC_MMAP_MAX_")))
mALLOPt(M_MMAP_MAX, atoi(s));
}
s = getenv("MALLOC_CHECK_");
#endif
if(s) {
if(s[0]) mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0'));
__malloc_check_init();
}
if(__malloc_initialize_hook != NULL)
(*__malloc_initialize_hook)();
__malloc_initialized = 1;
}
/* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
#ifdef thread_atfork_static
thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
ptmalloc_unlock_all2)
#endif
/* Managing heaps and arenas (for concurrent threads) */
#if USE_ARENAS
#if MALLOC_DEBUG > 1
/* Print the complete contents of a single heap to stderr. */
static void
#if __STD_C
dump_heap(heap_info *heap)
#else
dump_heap(heap) heap_info *heap;
#endif
{
char *ptr;
mchunkptr p;
fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
ptr = (heap->ar_ptr != (mstate)(heap+1)) ?
(char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state);
p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
~MALLOC_ALIGN_MASK);
for(;;) {
fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
if(p == top(heap->ar_ptr)) {
fprintf(stderr, " (top)\n");
break;
} else if(p->size == (0|PREV_INUSE)) {
fprintf(stderr, " (fence)\n");
break;
}
fprintf(stderr, "\n");
p = next_chunk(p);
}
}
#endif /* MALLOC_DEBUG > 1 */
/* Create a new heap. size is automatically rounded up to a multiple
of the page size. */
static heap_info *
internal_function
#if __STD_C
new_heap(size_t size, size_t top_pad)
#else
new_heap(size, top_pad) size_t size, top_pad;
#endif
{
size_t page_mask = malloc_getpagesize - 1;
char *p1, *p2;
unsigned long ul;
heap_info *h;
if(size+top_pad < HEAP_MIN_SIZE)
size = HEAP_MIN_SIZE;
else if(size+top_pad <= HEAP_MAX_SIZE)
size += top_pad;
else if(size > HEAP_MAX_SIZE)
return 0;
else
size = HEAP_MAX_SIZE;
size = (size + page_mask) & ~page_mask;
/* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
No swap space needs to be reserved for the following large
mapping (on Linux, this is the case for all non-writable mappings
anyway). */
p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE);
if(p1 != MAP_FAILED) {
p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1)) & ~(HEAP_MAX_SIZE-1));
ul = p2 - p1;
munmap(p1, ul);
munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
} else {
/* Try to take the chance that an allocation of only HEAP_MAX_SIZE
is already aligned. */
p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE);
if(p2 == MAP_FAILED)
return 0;
if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
munmap(p2, HEAP_MAX_SIZE);
return 0;
}
}
if(mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
munmap(p2, HEAP_MAX_SIZE);
return 0;
}
h = (heap_info *)p2;
h->size = size;
THREAD_STAT(stat_n_heaps++);
return h;
}
/* Grow or shrink a heap. size is automatically rounded up to a
multiple of the page size if it is positive. */
static int
#if __STD_C
grow_heap(heap_info *h, long diff)
#else
grow_heap(h, diff) heap_info *h; long diff;
#endif
{
size_t page_mask = malloc_getpagesize - 1;
long new_size;
if(diff >= 0) {
diff = (diff + page_mask) & ~page_mask;
new_size = (long)h->size + diff;
if(new_size > HEAP_MAX_SIZE)
return -1;
if(mprotect((char *)h + h->size, diff, PROT_READ|PROT_WRITE) != 0)
return -2;
} else {
new_size = (long)h->size + diff;
if(new_size < (long)sizeof(*h))
return -1;
/* Try to re-map the extra heap space freshly to save memory, and
make it inaccessible. */
if((char *)MMAP((char *)h + new_size, -diff, PROT_NONE,
MAP_PRIVATE|MAP_FIXED) == (char *) MAP_FAILED)
return -2;
/*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
}
h->size = new_size;
return 0;
}
/* Delete a heap. */
#define delete_heap(heap) munmap((char*)(heap), HEAP_MAX_SIZE)
static int
internal_function
#if __STD_C
heap_trim(heap_info *heap, size_t pad)
#else
heap_trim(heap, pad) heap_info *heap; size_t pad;
#endif
{
mstate ar_ptr = heap->ar_ptr;
unsigned long pagesz = mp_.pagesize;
mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
heap_info *prev_heap;
long new_size, top_size, extra;
/* Can this heap go away completely? */
while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
prev_heap = heap->prev;
p = chunk_at_offset(prev_heap, prev_heap->size - (MINSIZE-2*SIZE_SZ));
assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
p = prev_chunk(p);
new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ);
assert(new_size>0 && new_size<(long)(2*MINSIZE));
if(!prev_inuse(p))
new_size += p->prev_size;
assert(new_size>0 && new_size<HEAP_MAX_SIZE);
if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
break;
ar_ptr->system_mem -= heap->size;
arena_mem -= heap->size;
delete_heap(heap);
heap = prev_heap;
if(!prev_inuse(p)) { /* consolidate backward */
p = prev_chunk(p);
unlink(p, bck, fwd);
}
assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
top(ar_ptr) = top_chunk = p;
set_head(top_chunk, new_size | PREV_INUSE);
/*check_chunk(ar_ptr, top_chunk);*/
}
top_size = chunksize(top_chunk);
extra = ((top_size - pad - MINSIZE + (pagesz-1))/pagesz - 1) * pagesz;
if(extra < (long)pagesz)
return 0;
/* Try to shrink. */
if(grow_heap(heap, -extra) != 0)
return 0;
ar_ptr->system_mem -= extra;
arena_mem -= extra;
/* Success. Adjust top accordingly. */
set_head(top_chunk, (top_size - extra) | PREV_INUSE);
/*check_chunk(ar_ptr, top_chunk);*/
return 1;
}
static mstate
internal_function
#if __STD_C
arena_get2(mstate a_tsd, size_t size)
#else
arena_get2(a_tsd, size) mstate a_tsd; size_t size;
#endif
{
mstate a;
int err;
if(!a_tsd)
a = a_tsd = &main_arena;
else {
a = a_tsd->next;
if(!a) {
/* This can only happen while initializing the new arena. */
(void)mutex_lock(&main_arena.mutex);
THREAD_STAT(++(main_arena.stat_lock_wait));
return &main_arena;
}
}
/* Check the global, circularly linked list for available arenas. */
repeat:
do {
if(!mutex_trylock(&a->mutex)) {
THREAD_STAT(++(a->stat_lock_loop));
tsd_setspecific(arena_key, (Void_t *)a);
return a;
}
a = a->next;
} while(a != a_tsd);
/* If not even the list_lock can be obtained, try again. This can
happen during `atfork', or for example on systems where thread
creation makes it temporarily impossible to obtain _any_
locks. */
if(mutex_trylock(&list_lock)) {
a = a_tsd;
goto repeat;
}
(void)mutex_unlock(&list_lock);
/* Nothing immediately available, so generate a new arena. */
a = _int_new_arena(size);
if(!a)
return 0;
tsd_setspecific(arena_key, (Void_t *)a);
mutex_init(&a->mutex);
err = mutex_lock(&a->mutex); /* remember result */
/* Add the new arena to the global list. */
(void)mutex_lock(&list_lock);
a->next = main_arena.next;
atomic_write_barrier ();
main_arena.next = a;
(void)mutex_unlock(&list_lock);
if(err) /* locking failed; keep arena for further attempts later */
return 0;
THREAD_STAT(++(a->stat_lock_loop));
return a;
}
/* Create a new arena with initial size "size". */
mstate
_int_new_arena(size_t size)
{
mstate a;
heap_info *h;
char *ptr;
unsigned long misalign;
h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT),
mp_.top_pad);
if(!h) {
/* Maybe size is too large to fit in a single heap. So, just try
to create a minimally-sized arena and let _int_malloc() attempt
to deal with the large request via mmap_chunk(). */
h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
if(!h)
return 0;
}
a = h->ar_ptr = (mstate)(h+1);
malloc_init_state(a);
/*a->next = NULL;*/
a->system_mem = a->max_system_mem = h->size;
arena_mem += h->size;
#ifdef NO_THREADS
if((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) >
mp_.max_total_mem)
mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem;
#endif
/* Set up the top chunk, with proper alignment. */
ptr = (char *)(a + 1);
misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
if (misalign > 0)
ptr += MALLOC_ALIGNMENT - misalign;
top(a) = (mchunkptr)ptr;
set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
return a;
}
/* Obtain the arena number n. Needed in malloc_stats. */
mstate
_int_get_arena (int n)
{
mstate a = &main_arena;
while (n-- != 0) {
a = a->next;
if (a == &main_arena)
return 0;
}
return a;
}
#endif /* USE_ARENAS */
/*
* Local variables:
* c-basic-offset: 2
* End:
*/

640
opal/memory/ptmalloc2/hooks.c Обычный файл
Просмотреть файл

@ -0,0 +1,640 @@
/* Malloc implementation for multiple threads without lock contention.
Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* $Id: hooks.c,v 1.12 2004/11/05 14:42:32 wg Exp $ */
#ifndef DEFAULT_CHECK_ACTION
#define DEFAULT_CHECK_ACTION 1
#endif
/* What to do if the standard debugging hooks are in place and a
corrupt pointer is detected: do nothing (0), print an error message
(1), or call abort() (2). */
/* Hooks for debugging versions. The initial hooks just call the
initialization routine, then do the normal work. */
#if !(USE_STARTER & 2)
static Void_t*
#if __STD_C
malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
#else
malloc_hook_ini(sz, caller)
size_t sz; const __malloc_ptr_t caller;
#endif
{
__malloc_hook = NULL;
ptmalloc_init();
return public_mALLOc(sz);
}
static Void_t*
#if __STD_C
realloc_hook_ini(Void_t* ptr, size_t sz, const __malloc_ptr_t caller)
#else
realloc_hook_ini(ptr, sz, caller)
Void_t* ptr; size_t sz; const __malloc_ptr_t caller;
#endif
{
__malloc_hook = NULL;
__realloc_hook = NULL;
ptmalloc_init();
return public_rEALLOc(ptr, sz);
}
static Void_t*
#if __STD_C
memalign_hook_ini(size_t alignment, size_t sz, const __malloc_ptr_t caller)
#else
memalign_hook_ini(alignment, sz, caller)
size_t alignment; size_t sz; const __malloc_ptr_t caller;
#endif
{
__memalign_hook = NULL;
ptmalloc_init();
return public_mEMALIGn(alignment, sz);
}
#endif /* !(USE_STARTER & 2) */
static int check_action = DEFAULT_CHECK_ACTION;
/* Whether we are using malloc checking. */
static int using_malloc_checking;
/* A flag that is set by malloc_set_state, to signal that malloc checking
must not be enabled on the request from the user (via the MALLOC_CHECK_
environment variable). It is reset by __malloc_check_init to tell
malloc_set_state that the user has requested malloc checking.
The purpose of this flag is to make sure that malloc checking is not
enabled when the heap to be restored was constructed without malloc
checking, and thus does not contain the required magic bytes.
Otherwise the heap would be corrupted by calls to free and realloc. If
it turns out that the heap was created with malloc checking and the
user has requested it malloc_set_state just calls __malloc_check_init
again to enable it. On the other hand, reusing such a heap without
further malloc checking is safe. */
static int disallow_malloc_check;
/* Activate a standard set of debugging hooks. */
void
__malloc_check_init()
{
if (disallow_malloc_check) {
disallow_malloc_check = 0;
return;
}
using_malloc_checking = 1;
__malloc_hook = malloc_check;
__free_hook = free_check;
__realloc_hook = realloc_check;
__memalign_hook = memalign_check;
if(check_action & 1)
fprintf(stderr, "malloc: using debugging hooks\n");
}
/* A simple, standard set of debugging hooks. Overhead is `only' one
byte per chunk; still this will catch most cases of double frees or
overruns. The goal here is to avoid obscure crashes due to invalid
usage, unlike in the MALLOC_DEBUG code. */
#define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
/* Instrument a chunk with overrun detector byte(s) and convert it
into a user pointer with requested size sz. */
static Void_t*
internal_function
#if __STD_C
mem2mem_check(Void_t *ptr, size_t sz)
#else
mem2mem_check(ptr, sz) Void_t *ptr; size_t sz;
#endif
{
mchunkptr p;
unsigned char* m_ptr = (unsigned char*)BOUNDED_N(ptr, sz);
size_t i;
if (!ptr)
return ptr;
p = mem2chunk(ptr);
for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
i > sz;
i -= 0xFF) {
if(i-sz < 0x100) {
m_ptr[i] = (unsigned char)(i-sz);
break;
}
m_ptr[i] = 0xFF;
}
m_ptr[sz] = MAGICBYTE(p);
return (Void_t*)m_ptr;
}
/* Convert a pointer to be free()d or realloc()ed to a valid chunk
pointer. If the provided pointer is not valid, return NULL. */
static mchunkptr
internal_function
#if __STD_C
mem2chunk_check(Void_t* mem)
#else
mem2chunk_check(mem) Void_t* mem;
#endif
{
mchunkptr p;
INTERNAL_SIZE_T sz, c;
unsigned char magic;
if(!aligned_OK(mem)) return NULL;
p = mem2chunk(mem);
if (!chunk_is_mmapped(p)) {
/* Must be a chunk in conventional heap memory. */
int contig = contiguous(&main_arena);
sz = chunksize(p);
if((contig &&
((char*)p<mp_.sbrk_base ||
((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) ||
sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
(contig && (char*)prev_chunk(p)<mp_.sbrk_base) ||
next_chunk(prev_chunk(p))!=p) ))
return NULL;
magic = MAGICBYTE(p);
for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
}
((unsigned char*)p)[sz] ^= 0xFF;
} else {
unsigned long offset, page_mask = malloc_getpagesize-1;
/* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
alignment relative to the beginning of a page. Check this
first. */
offset = (unsigned long)mem & page_mask;
if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
offset<0x2000) ||
!chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
return NULL;
magic = MAGICBYTE(p);
for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
}
((unsigned char*)p)[sz] ^= 0xFF;
}
return p;
}
/* Check for corruption of the top chunk, and try to recover if
necessary. */
static int
internal_function
#if __STD_C
top_check(void)
#else
top_check()
#endif
{
mchunkptr t = top(&main_arena);
char* brk, * new_brk;
INTERNAL_SIZE_T front_misalign, sbrk_size;
unsigned long pagesz = malloc_getpagesize;
if (t == initial_top(&main_arena) ||
(!chunk_is_mmapped(t) &&
chunksize(t)>=MINSIZE &&
prev_inuse(t) &&
(!contiguous(&main_arena) ||
(char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem)))
return 0;
if(check_action & 1)
fprintf(stderr, "malloc: top chunk is corrupt\n");
if(check_action & 2)
abort();
/* Try to set up a new top chunk. */
brk = MORECORE(0);
front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
if (front_misalign > 0)
front_misalign = MALLOC_ALIGNMENT - front_misalign;
sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
new_brk = (char*)(MORECORE (sbrk_size));
if (new_brk == (char*)(MORECORE_FAILURE)) return -1;
/* Call the `morecore' hook if necessary. */
if (__after_morecore_hook)
(*__after_morecore_hook) ();
main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
top(&main_arena) = (mchunkptr)(brk + front_misalign);
set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
return 0;
}
static Void_t*
#if __STD_C
malloc_check(size_t sz, const Void_t *caller)
#else
malloc_check(sz, caller) size_t sz; const Void_t *caller;
#endif
{
Void_t *victim;
(void)mutex_lock(&main_arena.mutex);
victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
(void)mutex_unlock(&main_arena.mutex);
return mem2mem_check(victim, sz);
}
static void
#if __STD_C
free_check(Void_t* mem, const Void_t *caller)
#else
free_check(mem, caller) Void_t* mem; const Void_t *caller;
#endif
{
mchunkptr p;
if(!mem) return;
(void)mutex_lock(&main_arena.mutex);
p = mem2chunk_check(mem);
if(!p) {
(void)mutex_unlock(&main_arena.mutex);
if(check_action & 1)
fprintf(stderr, "free(): invalid pointer %p!\n", mem);
if(check_action & 2)
abort();
return;
}
#if HAVE_MMAP
if (chunk_is_mmapped(p)) {
(void)mutex_unlock(&main_arena.mutex);
munmap_chunk(p);
return;
}
#endif
#if 0 /* Erase freed memory. */
memset(mem, 0, chunksize(p) - (SIZE_SZ+1));
#endif
_int_free(&main_arena, mem);
(void)mutex_unlock(&main_arena.mutex);
}
static Void_t*
#if __STD_C
realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller)
#else
realloc_check(oldmem, bytes, caller)
Void_t* oldmem; size_t bytes; const Void_t *caller;
#endif
{
mchunkptr oldp;
INTERNAL_SIZE_T nb, oldsize;
Void_t* newmem = 0;
if (oldmem == 0) return malloc_check(bytes, NULL);
(void)mutex_lock(&main_arena.mutex);
oldp = mem2chunk_check(oldmem);
(void)mutex_unlock(&main_arena.mutex);
if(!oldp) {
if(check_action & 1)
fprintf(stderr, "realloc(): invalid pointer %p!\n", oldmem);
if(check_action & 2)
abort();
return malloc_check(bytes, NULL);
}
oldsize = chunksize(oldp);
checked_request2size(bytes+1, nb);
(void)mutex_lock(&main_arena.mutex);
#if HAVE_MMAP
if (chunk_is_mmapped(oldp)) {
#if HAVE_MREMAP
mchunkptr newp = mremap_chunk(oldp, nb);
if(newp)
newmem = chunk2mem(newp);
else
#endif
{
/* Note the extra SIZE_SZ overhead. */
if(oldsize - SIZE_SZ >= nb)
newmem = oldmem; /* do nothing */
else {
/* Must alloc, copy, free. */
if (top_check() >= 0)
newmem = _int_malloc(&main_arena, bytes+1);
if (newmem) {
MALLOC_COPY(BOUNDED_N(newmem, bytes+1), oldmem, oldsize - 2*SIZE_SZ);
munmap_chunk(oldp);
}
}
}
} else {
#endif /* HAVE_MMAP */
if (top_check() >= 0)
newmem = _int_realloc(&main_arena, oldmem, bytes+1);
#if 0 /* Erase freed memory. */
if(newmem)
newp = mem2chunk(newmem);
nb = chunksize(newp);
if(oldp<newp || oldp>=chunk_at_offset(newp, nb)) {
memset((char*)oldmem + 2*sizeof(mbinptr), 0,
oldsize - (2*sizeof(mbinptr)+2*SIZE_SZ+1));
} else if(nb > oldsize+SIZE_SZ) {
memset((char*)BOUNDED_N(chunk2mem(newp), bytes) + oldsize,
0, nb - (oldsize+SIZE_SZ));
}
#endif
#if HAVE_MMAP
}
#endif
(void)mutex_unlock(&main_arena.mutex);
return mem2mem_check(newmem, bytes);
}
static Void_t*
#if __STD_C
memalign_check(size_t alignment, size_t bytes, const Void_t *caller)
#else
memalign_check(alignment, bytes, caller)
size_t alignment; size_t bytes; const Void_t *caller;
#endif
{
INTERNAL_SIZE_T nb;
Void_t* mem;
if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
if (alignment < MINSIZE) alignment = MINSIZE;
checked_request2size(bytes+1, nb);
(void)mutex_lock(&main_arena.mutex);
mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
NULL;
(void)mutex_unlock(&main_arena.mutex);
return mem2mem_check(mem, bytes);
}
#if !defined NO_THREADS && USE_STARTER
/* The following hooks are used when the global initialization in
ptmalloc_init() hasn't completed yet. */
static Void_t*
#if __STD_C
malloc_starter(size_t sz, const Void_t *caller)
#else
malloc_starter(sz, caller) size_t sz; const Void_t *caller;
#endif
{
Void_t* victim;
ptmalloc_init_minimal();
victim = _int_malloc(&main_arena, sz);
return victim ? BOUNDED_N(victim, sz) : 0;
}
static Void_t*
#if __STD_C
memalign_starter(size_t align, size_t sz, const Void_t *caller)
#else
memalign_starter(align, sz, caller) size_t align, sz; const Void_t *caller;
#endif
{
Void_t* victim;
ptmalloc_init_minimal();
victim = _int_memalign(&main_arena, align, sz);
return victim ? BOUNDED_N(victim, sz) : 0;
}
static void
#if __STD_C
free_starter(Void_t* mem, const Void_t *caller)
#else
free_starter(mem, caller) Void_t* mem; const Void_t *caller;
#endif
{
mchunkptr p;
if(!mem) return;
p = mem2chunk(mem);
#if HAVE_MMAP
if (chunk_is_mmapped(p)) {
munmap_chunk(p);
return;
}
#endif
_int_free(&main_arena, mem);
}
#endif /* !defined NO_THREADS && USE_STARTER */
/* Get/set state: malloc_get_state() records the current state of all
malloc variables (_except_ for the actual heap contents and `hook'
function pointers) in a system dependent, opaque data structure.
This data structure is dynamically allocated and can be free()d
after use. malloc_set_state() restores the state of all malloc
variables to the previously obtained state. This is especially
useful when using this malloc as part of a shared library, and when
the heap contents are saved/restored via some other method. The
primary example for this is GNU Emacs with its `dumping' procedure.
`Hook' function pointers are never saved or restored by these
functions, with two exceptions: If malloc checking was in use when
malloc_get_state() was called, then malloc_set_state() calls
__malloc_check_init() if possible; if malloc checking was not in
use in the recorded state but the user requested malloc checking,
then the hooks are reset to 0. */
#define MALLOC_STATE_MAGIC 0x444c4541l
#define MALLOC_STATE_VERSION (0*0x100l + 2l) /* major*0x100 + minor */
struct malloc_save_state {
long magic;
long version;
mbinptr av[NBINS * 2 + 2];
char* sbrk_base;
int sbrked_mem_bytes;
unsigned long trim_threshold;
unsigned long top_pad;
unsigned int n_mmaps_max;
unsigned long mmap_threshold;
int check_action;
unsigned long max_sbrked_mem;
unsigned long max_total_mem;
unsigned int n_mmaps;
unsigned int max_n_mmaps;
unsigned long mmapped_mem;
unsigned long max_mmapped_mem;
int using_malloc_checking;
};
Void_t*
public_gET_STATe(void)
{
struct malloc_save_state* ms;
int i;
mbinptr b;
ms = (struct malloc_save_state*)public_mALLOc(sizeof(*ms));
if (!ms)
return 0;
(void)mutex_lock(&main_arena.mutex);
malloc_consolidate(&main_arena);
ms->magic = MALLOC_STATE_MAGIC;
ms->version = MALLOC_STATE_VERSION;
ms->av[0] = 0;
ms->av[1] = 0; /* used to be binblocks, now no longer used */
ms->av[2] = top(&main_arena);
ms->av[3] = 0; /* used to be undefined */
for(i=1; i<NBINS; i++) {
b = bin_at(&main_arena, i);
if(first(b) == b)
ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
else {
ms->av[2*i+2] = first(b);
ms->av[2*i+3] = last(b);
}
}
ms->sbrk_base = mp_.sbrk_base;
ms->sbrked_mem_bytes = main_arena.system_mem;
ms->trim_threshold = mp_.trim_threshold;
ms->top_pad = mp_.top_pad;
ms->n_mmaps_max = mp_.n_mmaps_max;
ms->mmap_threshold = mp_.mmap_threshold;
ms->check_action = check_action;
ms->max_sbrked_mem = main_arena.max_system_mem;
#ifdef NO_THREADS
ms->max_total_mem = mp_.max_total_mem;
#else
ms->max_total_mem = 0;
#endif
ms->n_mmaps = mp_.n_mmaps;
ms->max_n_mmaps = mp_.max_n_mmaps;
ms->mmapped_mem = mp_.mmapped_mem;
ms->max_mmapped_mem = mp_.max_mmapped_mem;
ms->using_malloc_checking = using_malloc_checking;
(void)mutex_unlock(&main_arena.mutex);
return (Void_t*)ms;
}
int
public_sET_STATe(Void_t* msptr)
{
struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
int i;
mbinptr b;
disallow_malloc_check = 1;
ptmalloc_init();
if(ms->magic != MALLOC_STATE_MAGIC) return -1;
/* Must fail if the major version is too high. */
if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
(void)mutex_lock(&main_arena.mutex);
/* There are no fastchunks. */
clear_fastchunks(&main_arena);
set_max_fast(&main_arena, DEFAULT_MXFAST);
for (i=0; i<NFASTBINS; ++i)
main_arena.fastbins[i] = 0;
for (i=0; i<BINMAPSIZE; ++i)
main_arena.binmap[i] = 0;
top(&main_arena) = ms->av[2];
main_arena.last_remainder = 0;
for(i=1; i<NBINS; i++) {
b = bin_at(&main_arena, i);
if(ms->av[2*i+2] == 0) {
assert(ms->av[2*i+3] == 0);
first(b) = last(b) = b;
} else {
if(i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
largebin_index(chunksize(ms->av[2*i+3]))==i)) {
first(b) = ms->av[2*i+2];
last(b) = ms->av[2*i+3];
/* Make sure the links to the bins within the heap are correct. */
first(b)->bk = b;
last(b)->fd = b;
/* Set bit in binblocks. */
mark_bin(&main_arena, i);
} else {
/* Oops, index computation from chunksize must have changed.
Link the whole list into unsorted_chunks. */
first(b) = last(b) = b;
b = unsorted_chunks(&main_arena);
ms->av[2*i+2]->bk = b;
ms->av[2*i+3]->fd = b->fd;
b->fd->bk = ms->av[2*i+3];
b->fd = ms->av[2*i+2];
}
}
}
mp_.sbrk_base = ms->sbrk_base;
main_arena.system_mem = ms->sbrked_mem_bytes;
mp_.trim_threshold = ms->trim_threshold;
mp_.top_pad = ms->top_pad;
mp_.n_mmaps_max = ms->n_mmaps_max;
mp_.mmap_threshold = ms->mmap_threshold;
check_action = ms->check_action;
main_arena.max_system_mem = ms->max_sbrked_mem;
#ifdef NO_THREADS
mp_.max_total_mem = ms->max_total_mem;
#endif
mp_.n_mmaps = ms->n_mmaps;
mp_.max_n_mmaps = ms->max_n_mmaps;
mp_.mmapped_mem = ms->mmapped_mem;
mp_.max_mmapped_mem = ms->max_mmapped_mem;
/* add version-dependent code here */
if (ms->version >= 1) {
/* Check whether it is safe to enable malloc checking, or whether
it is necessary to disable it. */
if (ms->using_malloc_checking && !using_malloc_checking &&
!disallow_malloc_check)
__malloc_check_init ();
else if (!ms->using_malloc_checking && using_malloc_checking) {
__malloc_hook = 0;
__free_hook = 0;
__realloc_hook = 0;
__memalign_hook = 0;
using_malloc_checking = 0;
}
}
check_malloc_state(&main_arena);
(void)mutex_unlock(&main_arena.mutex);
return 0;
}
/*
* Local variables:
* c-basic-offset: 2
* End:
*/

51
opal/memory/ptmalloc2/lran2.h Обычный файл
Просмотреть файл

@ -0,0 +1,51 @@
/* lran2.h
* by Wolfram Gloger 1996.
*
* A small, portable pseudo-random number generator.
*/
#ifndef _LRAN2_H
#define _LRAN2_H
#define LRAN2_MAX 714025l /* constants for portable */
#define IA 1366l /* random number generator */
#define IC 150889l /* (see e.g. `Numerical Recipes') */
struct lran2_st {
long x, y, v[97];
};
static void
lran2_init(struct lran2_st* d, long seed)
{
long x;
int j;
x = (IC - seed) % LRAN2_MAX;
if(x < 0) x = -x;
for(j=0; j<97; j++) {
x = (IA*x + IC) % LRAN2_MAX;
d->v[j] = x;
}
d->x = (IA*x + IC) % LRAN2_MAX;
d->y = d->x;
}
#ifdef __GNUC__
__inline__
#endif
static long
lran2(struct lran2_st* d)
{
int j = (d->y % 97);
d->y = d->v[j];
d->x = (IA*d->x + IC) % LRAN2_MAX;
d->v[j] = d->x;
return d->y;
}
#undef IA
#undef IC
#endif

161
opal/memory/ptmalloc2/malloc-stats.c Обычный файл
Просмотреть файл

@ -0,0 +1,161 @@
/* Malloc implementation for multiple threads; statistics printing.
Copyright (C) 2004 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Wolfram Gloger <wg@malloc.de>, 2004.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
/* $Id: $ */
#include <stdio.h> /* needed for malloc_stats */
#include <malloc-machine.h>
#include "malloc.h"
/*
Define HAVE_MMAP as true to optionally make malloc() use mmap() to
allocate very large blocks. These will be returned to the
operating system immediately after a free(). Also, if mmap
is available, it is used as a backup strategy in cases where
MORECORE fails to provide space from system.
This malloc is best tuned to work with mmap for large requests.
If you do not have mmap, operations involving very large chunks (1MB
or so) may be slower than you'd like.
*/
#ifndef HAVE_MMAP
#define HAVE_MMAP 1
#endif
#ifdef USE_DL_PREFIX
#define public_mSTATs dlmalloc_stats
#else /* USE_DL_PREFIX */
#ifdef _LIBC
#define public_mSTATs __malloc_stats
#else /* !_LIBC */
#define public_mSTATs malloc_stats
#endif /* _LIBC */
#endif /* USE_DL_PREFIX */
/*
malloc_stats();
Prints on stderr the amount of space obtained from the system (both
via sbrk and mmap), the maximum amount (which may be more than
current if malloc_trim and/or munmap got called), and the current
number of bytes allocated via malloc (or realloc, etc) but not yet
freed. Note that this is the number of bytes allocated, not the
number requested. It will be larger than the number requested
because of alignment and bookkeeping overhead. Because it includes
alignment wastage as being in use, this figure may be greater than
zero even when no user-level chunks are allocated.
The reported current and maximum system memory can be inaccurate if
a program makes other calls to system memory allocation functions
(normally sbrk) outside of malloc.
malloc_stats prints only the most commonly interesting statistics.
More information can be obtained by calling mallinfo.
*/
void public_mSTATs __MALLOC_P((void));
/*
------------------------------ malloc_stats ------------------------------
*/
void public_mSTATs()
{
int i;
mstate ar_ptr;
struct malloc_global_info mgi;
struct malloc_arena_info mai;
unsigned long in_use_b, system_b, avail_b;
#if THREAD_STATS
long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0;
#endif
#if 0
if(__malloc_initialized < 0)
ptmalloc_init ();
#endif
_int_get_global_info(&mgi);
system_b = in_use_b = mgi.mmapped_mem;
#ifdef _LIBC
_IO_flockfile (stderr);
int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
#endif
for (i=0; (ar_ptr = _int_get_arena(i)); i++) {
_int_get_arena_info(ar_ptr, &mai);
avail_b = mai.fastavail + mai.binavail + mai.top_size;
fprintf(stderr, "Arena %d:\n", i);
fprintf(stderr, "system bytes = %10lu\n",
(unsigned long)mai.system_mem);
fprintf(stderr, "in use bytes = %10lu\n",
(unsigned long)(mai.system_mem - avail_b));
#if MALLOC_DEBUG > 1
if (i > 0)
dump_heap(heap_for_ptr(top(ar_ptr)));
#endif
system_b += mai.system_mem;
in_use_b += mai.system_mem - avail_b;
#if THREAD_STATS
stat_lock_direct += mai.stat_lock_direct;
stat_lock_loop += mai.stat_lock_loop;
stat_lock_wait += mai.stat_lock_wait;
#endif
}
#if HAVE_MMAP
fprintf(stderr, "Total (incl. mmap):\n");
#else
fprintf(stderr, "Total:\n");
#endif
fprintf(stderr, "system bytes = %10lu\n", system_b);
fprintf(stderr, "in use bytes = %10lu\n", in_use_b);
#ifdef NO_THREADS
fprintf(stderr, "max system bytes = %10lu\n",
(unsigned long)mgi.max_total_mem);
#endif
#if HAVE_MMAP
fprintf(stderr, "max mmap regions = %10u\n", (unsigned int)mgi.max_n_mmaps);
fprintf(stderr, "max mmap bytes = %10lu\n",
(unsigned long)mgi.max_mmapped_mem);
#endif
#if THREAD_STATS
fprintf(stderr, "heaps created = %10d\n", mgi.stat_n_heaps);
fprintf(stderr, "locked directly = %10ld\n", stat_lock_direct);
fprintf(stderr, "locked in loop = %10ld\n", stat_lock_loop);
fprintf(stderr, "locked waiting = %10ld\n", stat_lock_wait);
fprintf(stderr, "locked total = %10ld\n",
stat_lock_direct + stat_lock_loop + stat_lock_wait);
#endif
#ifdef _LIBC
((_IO_FILE *) stderr)->_flags2 |= old_flags2;
_IO_funlockfile (stderr);
#endif
}
#ifdef _LIBC
weak_alias (__malloc_stats, malloc_stats)
#endif

5439
opal/memory/ptmalloc2/malloc.c Обычный файл

Разница между файлами не показана из-за своего большого размера Загрузить разницу

291
opal/memory/ptmalloc2/malloc.h Обычный файл
Просмотреть файл

@ -0,0 +1,291 @@
/* Prototypes and definition for malloc implementation.
Copyright (C) 1996,97,99,2000,2002,2003,2004 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#ifndef _MALLOC_H
#define _MALLOC_H 1
#ifdef _LIBC
#include <features.h>
#endif
/*
$Id: malloc.h,v 1.7 2004/08/08 12:34:57 wg Exp $
`ptmalloc2', a malloc implementation for multiple threads without
lock contention, by Wolfram Gloger <wg@malloc.de>.
VERSION 2.7.0
This work is mainly derived from malloc-2.7.0 by Doug Lea
<dl@cs.oswego.edu>, which is available from:
ftp://gee.cs.oswego.edu/pub/misc/malloc.c
This trimmed-down header file only provides function prototypes and
the exported data structures. For more detailed function
descriptions and compile-time options, see the source file
`malloc.c'.
*/
#if defined(__STDC__) || defined (__cplusplus)
# include <stddef.h>
# define __malloc_ptr_t void *
#else
# undef size_t
# define size_t unsigned int
# undef ptrdiff_t
# define ptrdiff_t int
# define __malloc_ptr_t char *
#endif
#ifdef _LIBC
/* Used by GNU libc internals. */
# define __malloc_size_t size_t
# define __malloc_ptrdiff_t ptrdiff_t
#elif !defined __attribute_malloc__
# define __attribute_malloc__
#endif
#ifdef __GNUC__
/* GCC can always grok prototypes. For C++ programs we add throw()
to help it optimize the function calls. But this works only with
gcc 2.8.x and egcs. */
# if defined __cplusplus && (__GNUC__ >= 3 || __GNUC_MINOR__ >= 8)
# define __THROW throw ()
# else
# define __THROW
# endif
# define __MALLOC_P(args) args __THROW
/* This macro will be used for functions which might take C++ callback
functions. */
# define __MALLOC_PMT(args) args
#else /* Not GCC. */
# define __THROW
# if (defined __STDC__ && __STDC__) || defined __cplusplus
# define __MALLOC_P(args) args
# define __MALLOC_PMT(args) args
# ifndef __const
# define __const const
# endif
# else /* Not ANSI C or C++. */
# define __MALLOC_P(args) () /* No prototypes. */
# define __MALLOC_PMT(args) ()
# ifndef __const
# define __const
# endif
# endif /* ANSI C or C++. */
#endif /* GCC. */
#ifndef NULL
# ifdef __cplusplus
# define NULL 0
# else
# define NULL ((__malloc_ptr_t) 0)
# endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* Nonzero if the malloc is already initialized. */
#ifdef _LIBC
/* In the GNU libc we rename the global variable
`__malloc_initialized' to `__libc_malloc_initialized'. */
# define __malloc_initialized __libc_malloc_initialized
#endif
extern int __malloc_initialized;
/* Allocate SIZE bytes of memory. */
extern __malloc_ptr_t malloc __MALLOC_P ((size_t __size)) __attribute_malloc__;
/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
extern __malloc_ptr_t calloc __MALLOC_P ((size_t __nmemb, size_t __size))
__attribute_malloc__;
/* Re-allocate the previously allocated block in __ptr, making the new
block SIZE bytes long. */
extern __malloc_ptr_t realloc __MALLOC_P ((__malloc_ptr_t __ptr,
size_t __size))
__attribute_malloc__;
/* Free a block allocated by `malloc', `realloc' or `calloc'. */
extern void free __MALLOC_P ((__malloc_ptr_t __ptr));
/* Free a block allocated by `calloc'. */
extern void cfree __MALLOC_P ((__malloc_ptr_t __ptr));
/* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
extern __malloc_ptr_t memalign __MALLOC_P ((size_t __alignment, size_t __size));
/* Allocate SIZE bytes on a page boundary. */
extern __malloc_ptr_t valloc __MALLOC_P ((size_t __size)) __attribute_malloc__;
/* Equivalent to valloc(minimum-page-that-holds(n)), that is, round up
__size to nearest pagesize. */
extern __malloc_ptr_t pvalloc __MALLOC_P ((size_t __size))
__attribute_malloc__;
/* Underlying allocation function; successive calls should return
contiguous pieces of memory. */
extern __malloc_ptr_t (*__morecore) __MALLOC_PMT ((ptrdiff_t __size));
/* Default value of `__morecore'. */
extern __malloc_ptr_t __default_morecore __MALLOC_P ((ptrdiff_t __size))
__attribute_malloc__;
/* SVID2/XPG mallinfo structure */
struct mallinfo {
int arena; /* non-mmapped space allocated from system */
int ordblks; /* number of free chunks */
int smblks; /* number of fastbin blocks */
int hblks; /* number of mmapped regions */
int hblkhd; /* space in mmapped regions */
int usmblks; /* maximum total allocated space */
int fsmblks; /* space available in freed fastbin blocks */
int uordblks; /* total allocated space */
int fordblks; /* total free space */
int keepcost; /* top-most, releasable (via malloc_trim) space */
};
/* Returns a copy of the updated current mallinfo. */
extern struct mallinfo mallinfo __MALLOC_P ((void));
/* SVID2/XPG mallopt options */
#ifndef M_MXFAST
# define M_MXFAST 1 /* maximum request size for "fastbins" */
#endif
#ifndef M_NLBLKS
# define M_NLBLKS 2 /* UNUSED in this malloc */
#endif
#ifndef M_GRAIN
# define M_GRAIN 3 /* UNUSED in this malloc */
#endif
#ifndef M_KEEP
# define M_KEEP 4 /* UNUSED in this malloc */
#endif
/* mallopt options that actually do something */
#define M_TRIM_THRESHOLD -1
#define M_TOP_PAD -2
#define M_MMAP_THRESHOLD -3
#define M_MMAP_MAX -4
#define M_CHECK_ACTION -5
/* General SVID/XPG interface to tunable parameters. */
extern int mallopt __MALLOC_P ((int __param, int __val));
/* Release all but __pad bytes of freed top-most memory back to the
system. Return 1 if successful, else 0. */
extern int malloc_trim __MALLOC_P ((size_t __pad));
/* Report the number of usable allocated bytes associated with allocated
chunk __ptr. */
extern size_t malloc_usable_size __MALLOC_P ((__malloc_ptr_t __ptr));
/* Prints brief summary statistics on stderr. */
extern void malloc_stats __MALLOC_P ((void));
/* Record the state of all malloc variables in an opaque data structure. */
extern __malloc_ptr_t malloc_get_state __MALLOC_P ((void));
/* Restore the state of all malloc variables from data obtained with
malloc_get_state(). */
extern int malloc_set_state __MALLOC_P ((__malloc_ptr_t __ptr));
/* Called once when malloc is initialized; redefining this variable in
the application provides the preferred way to set up the hook
pointers. */
extern void (*__malloc_initialize_hook) __MALLOC_PMT ((void));
/* Hooks for debugging and user-defined versions. */
extern void (*__free_hook) __MALLOC_PMT ((__malloc_ptr_t __ptr,
__const __malloc_ptr_t));
extern __malloc_ptr_t (*__malloc_hook) __MALLOC_PMT ((size_t __size,
__const __malloc_ptr_t));
extern __malloc_ptr_t (*__realloc_hook) __MALLOC_PMT ((__malloc_ptr_t __ptr,
size_t __size,
__const __malloc_ptr_t));
extern __malloc_ptr_t (*__memalign_hook) __MALLOC_PMT ((size_t __alignment,
size_t __size,
__const __malloc_ptr_t));
extern void (*__after_morecore_hook) __MALLOC_PMT ((void));
/* Activate a standard set of debugging hooks. */
extern void __malloc_check_init __MALLOC_P ((void));
/* Internal routines, operating on "arenas". */
struct malloc_state;
typedef struct malloc_state *mstate;
extern mstate _int_new_arena __MALLOC_P ((size_t __ini_size));
extern __malloc_ptr_t _int_malloc __MALLOC_P ((mstate __m, size_t __size));
extern void _int_free __MALLOC_P ((mstate __m, __malloc_ptr_t __ptr));
extern __malloc_ptr_t _int_realloc __MALLOC_P ((mstate __m,
__malloc_ptr_t __ptr,
size_t __size));
extern __malloc_ptr_t _int_memalign __MALLOC_P ((mstate __m, size_t __alignment,
size_t __size));
/* Return arena number __n, or 0 if out of bounds. Arena 0 is the
main arena. */
extern mstate _int_get_arena __MALLOC_P ((int __n));
/* Implementation-specific mallinfo. More detailed than mallinfo, and
also works for size_t wider than int. */
struct malloc_arena_info {
int nfastblocks; /* number of freed "fastchunks" */
int nbinblocks; /* number of available chunks in bins */
size_t fastavail; /* total space in freed "fastchunks" */
size_t binavail; /* total space in binned chunks */
size_t top_size; /* size of top chunk */
size_t system_mem; /* bytes allocated from system in this arena */
size_t max_system_mem; /* max. bytes allocated from system */
/* Statistics for locking. Only kept if THREAD_STATS is defined
at compile time. */
long stat_lock_direct, stat_lock_loop, stat_lock_wait;
};
struct malloc_global_info {
int n_mmaps; /* number of mmap'ed chunks */
int max_n_mmaps; /* max. number of mmap'ed chunks reached */
size_t mmapped_mem; /* total bytes allocated in mmap'ed chunks */
size_t max_mmapped_mem; /* max. bytes allocated in mmap'ed chunks */
size_t max_total_mem; /* only kept for NO_THREADS */
int stat_n_heaps; /* only kept if THREAD_STATS is defined */
};
extern void _int_get_arena_info __MALLOC_P ((mstate __m,
struct malloc_arena_info *__ma));
extern void _int_get_global_info __MALLOC_P ((struct malloc_global_info *__m));
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* malloc.h */

Просмотреть файл

@ -0,0 +1 @@
/* Empty placeholder */

Просмотреть файл

@ -0,0 +1,68 @@
/* Basic platform-independent macro definitions for mutexes,
thread-specific data and parameters for malloc.
Copyright (C) 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#ifndef _GENERIC_MALLOC_MACHINE_H
#define _GENERIC_MALLOC_MACHINE_H
#include <atomic.h>
#ifndef mutex_init /* No threads, provide dummy macros */
# define NO_THREADS
/* The mutex functions used to do absolutely nothing, i.e. lock,
trylock and unlock would always just return 0. However, even
without any concurrently active threads, a mutex can be used
legitimately as an `in use' flag. To make the code that is
protected by a mutex async-signal safe, these macros would have to
be based on atomic test-and-set operations, for example. */
typedef int mutex_t;
# define mutex_init(m) (*(m) = 0)
# define mutex_lock(m) ((*(m) = 1), 0)
# define mutex_trylock(m) (*(m) ? 1 : ((*(m) = 1), 0))
# define mutex_unlock(m) (*(m) = 0)
typedef void *tsd_key_t;
# define tsd_key_create(key, destr) do {} while(0)
# define tsd_setspecific(key, data) ((key) = (data))
# define tsd_getspecific(key, vptr) (vptr = (key))
# define thread_atfork(prepare, parent, child) do {} while(0)
#endif /* !defined mutex_init */
#ifndef atomic_full_barrier
# define atomic_full_barrier() __asm ("" ::: "memory")
#endif
#ifndef atomic_read_barrier
# define atomic_read_barrier() atomic_full_barrier ()
#endif
#ifndef atomic_write_barrier
# define atomic_write_barrier() atomic_full_barrier ()
#endif
#ifndef DEFAULT_TOP_PAD
# define DEFAULT_TOP_PAD 131072
#endif
#endif /* !defined(_GENERIC_MALLOC_MACHINE_H) */

Просмотреть файл

@ -0,0 +1,48 @@
/*
* $Id:$
* Generic version: no threads.
* by Wolfram Gloger 2004
*/
#include <stdio.h>
struct thread_st {
char *sp; /* stack pointer, can be 0 */
void (*func)(struct thread_st* st); /* must be set by user */
int id;
int flags;
struct user_data u;
};
static void
thread_init(void)
{
printf("No threads.\n");
}
/* Create a thread. */
static int
thread_create(struct thread_st *st)
{
st->flags = 0;
st->id = 1;
st->func(st);
return 0;
}
/* Wait for one of several subthreads to finish. */
static void
wait_for_thread(struct thread_st st[], int n_thr,
int (*end_thr)(struct thread_st*))
{
int i;
for(i=0; i<n_thr; i++)
if(end_thr)
end_thr(&st[i]);
}
/*
* Local variables:
* tab-width: 4
* End:
*/

Просмотреть файл

@ -0,0 +1,132 @@
/* Basic platform-independent macro definitions for mutexes,
thread-specific data and parameters for malloc.
Posix threads (pthreads) version.
Copyright (C) 2004 Wolfram Gloger <wg@malloc.de>.
Permission to use, copy, modify, distribute, and sell this software
and its documentation for any purpose is hereby granted without fee,
provided that (i) the above copyright notices and this permission
notice appear in all copies of the software and related documentation,
and (ii) the name of Wolfram Gloger may not be used in any advertising
or publicity relating to the software.
THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
IN NO EVENT SHALL WOLFRAM GLOGER BE LIABLE FOR ANY SPECIAL,
INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY
DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY
OF LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _PTHREAD_MALLOC_MACHINE_H
#define _PTHREAD_MALLOC_MACHINE_H
#include <pthread.h>
#undef thread_atfork_static
/* Use fast inline spinlocks with gcc. */
#if (defined __i386__ || defined __x86_64__) && defined __GNUC__ && \
!defined USE_NO_SPINLOCKS
#include <time.h>
#include <sched.h>
typedef struct {
volatile unsigned int lock;
int pad0_;
} mutex_t;
#define MUTEX_INITIALIZER { 0 }
#define mutex_init(m) ((m)->lock = 0)
static inline int mutex_lock(mutex_t *m) {
int cnt = 0, r;
struct timespec tm;
for(;;) {
__asm__ __volatile__
("xchgl %0, %1"
: "=r"(r), "=m"(m->lock)
: "0"(1), "m"(m->lock)
: "memory");
if(!r)
return 0;
if(cnt < 50) {
sched_yield();
cnt++;
} else {
tm.tv_sec = 0;
tm.tv_nsec = 2000001;
nanosleep(&tm, NULL);
cnt = 0;
}
}
}
static inline int mutex_trylock(mutex_t *m) {
int r;
__asm__ __volatile__
("xchgl %0, %1"
: "=r"(r), "=m"(m->lock)
: "0"(1), "m"(m->lock)
: "memory");
return r;
}
static inline int mutex_unlock(mutex_t *m) {
m->lock = 0;
__asm __volatile ("" : "=m" (m->lock) : "0" (m->lock));
return 0;
}
#else
/* Normal pthread mutex. */
typedef pthread_mutex_t mutex_t;
#define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
#define mutex_init(m) pthread_mutex_init(m, NULL)
#define mutex_lock(m) pthread_mutex_lock(m)
#define mutex_trylock(m) pthread_mutex_trylock(m)
#define mutex_unlock(m) pthread_mutex_unlock(m)
#endif /* (__i386__ || __x86_64__) && __GNUC__ && !USE_NO_SPINLOCKS */
/* thread specific data */
#if defined(__sgi) || defined(USE_TSD_DATA_HACK)
/* Hack for thread-specific data, e.g. on Irix 6.x. We can't use
pthread_setspecific because that function calls malloc() itself.
The hack only works when pthread_t can be converted to an integral
type. */
typedef void *tsd_key_t[256];
#define tsd_key_create(key, destr) do { \
int i; \
for(i=0; i<256; i++) (*key)[i] = 0; \
} while(0)
#define tsd_setspecific(key, data) \
(key[(unsigned)pthread_self() % 256] = (data))
#define tsd_getspecific(key, vptr) \
(vptr = key[(unsigned)pthread_self() % 256])
#else
typedef pthread_key_t tsd_key_t;
#define tsd_key_create(key, destr) pthread_key_create(key, destr)
#define tsd_setspecific(key, data) pthread_setspecific(key, data)
#define tsd_getspecific(key, vptr) (vptr = pthread_getspecific(key))
#endif
/* at fork */
#define thread_atfork(prepare, parent, child) \
pthread_atfork(prepare, parent, child)
#include <sysdeps/generic/malloc-machine.h>
#endif /* !defined(_MALLOC_MACHINE_H) */

Просмотреть файл

@ -0,0 +1,111 @@
/*
* $Id: thread-st.h$
* pthread version
* by Wolfram Gloger 2004
*/
#include <pthread.h>
#include <stdio.h>
pthread_cond_t finish_cond = PTHREAD_COND_INITIALIZER;
pthread_mutex_t finish_mutex = PTHREAD_MUTEX_INITIALIZER;
#ifndef USE_PTHREADS_STACKS
#define USE_PTHREADS_STACKS 0
#endif
#ifndef STACKSIZE
#define STACKSIZE 32768
#endif
struct thread_st {
char *sp; /* stack pointer, can be 0 */
void (*func)(struct thread_st* st); /* must be set by user */
pthread_t id;
int flags;
struct user_data u;
};
static void
thread_init(void)
{
printf("Using posix threads.\n");
pthread_cond_init(&finish_cond, NULL);
pthread_mutex_init(&finish_mutex, NULL);
}
static void *
thread_wrapper(void *ptr)
{
struct thread_st *st = (struct thread_st*)ptr;
/*printf("begin %p\n", st->sp);*/
st->func(st);
pthread_mutex_lock(&finish_mutex);
st->flags = 1;
pthread_mutex_unlock(&finish_mutex);
pthread_cond_signal(&finish_cond);
/*printf("end %p\n", st->sp);*/
return NULL;
}
/* Create a thread. */
static int
thread_create(struct thread_st *st)
{
st->flags = 0;
{
pthread_attr_t* attr_p = 0;
#if USE_PTHREADS_STACKS
pthread_attr_t attr;
pthread_attr_init (&attr);
if(!st->sp)
st->sp = malloc(STACKSIZE+16);
if(!st->sp)
return -1;
if(pthread_attr_setstacksize(&attr, STACKSIZE))
fprintf(stderr, "error setting stacksize");
else
pthread_attr_setstackaddr(&attr, st->sp + STACKSIZE);
/*printf("create %p\n", st->sp);*/
attr_p = &attr;
#endif
return pthread_create(&st->id, attr_p, thread_wrapper, st);
}
return 0;
}
/* Wait for one of several subthreads to finish. */
static void
wait_for_thread(struct thread_st st[], int n_thr,
int (*end_thr)(struct thread_st*))
{
int i;
pthread_mutex_lock(&finish_mutex);
for(;;) {
int term = 0;
for(i=0; i<n_thr; i++)
if(st[i].flags) {
/*printf("joining %p\n", st[i].sp);*/
if(pthread_join(st[i].id, NULL) == 0) {
st[i].flags = 0;
if(end_thr)
end_thr(&st[i]);
} else
fprintf(stderr, "can't join\n");
++term;
}
if(term > 0)
break;
pthread_cond_wait(&finish_cond, &finish_mutex);
}
pthread_mutex_unlock(&finish_mutex);
}
/*
* Local variables:
* tab-width: 4
* End:
*/

Просмотреть файл

@ -0,0 +1,51 @@
/* Basic platform-independent macro definitions for mutexes,
thread-specific data and parameters for malloc.
Solaris threads version.
Copyright (C) 2004 Wolfram Gloger <wg@malloc.de>.
Permission to use, copy, modify, distribute, and sell this software
and its documentation for any purpose is hereby granted without fee,
provided that (i) the above copyright notices and this permission
notice appear in all copies of the software and related documentation,
and (ii) the name of Wolfram Gloger may not be used in any advertising
or publicity relating to the software.
THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
IN NO EVENT SHALL WOLFRAM GLOGER BE LIABLE FOR ANY SPECIAL,
INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY
DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY
OF LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _SOLARIS_MALLOC_MACHINE_H
#define _SOLARIS_MALLOC_MACHINE_H
#include <thread.h>
typedef thread_t thread_id;
#define MUTEX_INITIALIZER { 0 }
#define mutex_init(m) mutex_init(m, USYNC_THREAD, NULL)
/*
* Hack for thread-specific data on Solaris. We can't use thr_setspecific
* because that function calls malloc() itself.
*/
typedef void *tsd_key_t[256];
#define tsd_key_create(key, destr) do { \
int i; \
for(i=0; i<256; i++) (*key)[i] = 0; \
} while(0)
#define tsd_setspecific(key, data) (key[(unsigned)thr_self() % 256] = (data))
#define tsd_getspecific(key, vptr) (vptr = key[(unsigned)thr_self() % 256])
#define thread_atfork(prepare, parent, child) do {} while(0)
#include <sysdeps/generic/malloc-machine.h>
#endif /* !defined(_SOLARIS_MALLOC_MACHINE_H) */

Просмотреть файл

@ -0,0 +1,72 @@
/*
* $Id:$
* Solaris version
* by Wolfram Gloger 2004
*/
#include <thread.h>
#include <stdio.h>
#ifndef STACKSIZE
#define STACKSIZE 32768
#endif
struct thread_st {
char *sp; /* stack pointer, can be 0 */
void (*func)(struct thread_st* st); /* must be set by user */
thread_id id;
int flags;
struct user_data u;
};
static void
thread_init(void)
{
printf("Using Solaris threads.\n");
}
static void *
thread_wrapper(void *ptr)
{
struct thread_st *st = (struct thread_st*)ptr;
/*printf("begin %p\n", st->sp);*/
st->func(st);
/*printf("end %p\n", st->sp);*/
return NULL;
}
/* Create a thread. */
static int
thread_create(struct thread_st *st)
{
st->flags = 0;
if(!st->sp)
st->sp = malloc(STACKSIZE);
if(!st->sp) return -1;
thr_create(st->sp, STACKSIZE, thread_wrapper, st, THR_NEW_LWP, &st->id);
return 0;
}
/* Wait for one of several subthreads to finish. */
static void
wait_for_thread(struct thread_st st[], int n_thr,
int (*end_thr)(struct thread_st*))
{
int i;
thread_t id;
thr_join(0, &id, NULL);
for(i=0; i<n_thr; i++)
if(id == st[i].id) {
if(end_thr)
end_thr(&st[i]);
break;
}
}
/*
* Local variables:
* tab-width: 4
* End:
*/

Просмотреть файл

@ -0,0 +1,51 @@
/* Basic platform-independent macro definitions for mutexes,
thread-specific data and parameters for malloc.
SGI threads (sprocs) version.
Copyright (C) 2004 Wolfram Gloger <wg@malloc.de>.
Permission to use, copy, modify, distribute, and sell this software
and its documentation for any purpose is hereby granted without fee,
provided that (i) the above copyright notices and this permission
notice appear in all copies of the software and related documentation,
and (ii) the name of Wolfram Gloger may not be used in any advertising
or publicity relating to the software.
THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
IN NO EVENT SHALL WOLFRAM GLOGER BE LIABLE FOR ANY SPECIAL,
INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY
DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY
OF LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _SPROC_MALLOC_MACHINE_H
#define _SPROC_MALLOC_MACHINE_H
#include <sys/wait.h>
#include <sys/types.h>
#include <sys/prctl.h>
#include <abi_mutex.h>
typedef abilock_t mutex_t;
#define MUTEX_INITIALIZER { 0 }
#define mutex_init(m) init_lock(m)
#define mutex_lock(m) (spin_lock(m), 0)
#define mutex_trylock(m) acquire_lock(m)
#define mutex_unlock(m) release_lock(m)
typedef int tsd_key_t;
int tsd_key_next;
#define tsd_key_create(key, destr) ((*key) = tsd_key_next++)
#define tsd_setspecific(key, data) (((void **)(&PRDA->usr_prda))[key] = data)
#define tsd_getspecific(key, vptr) (vptr = ((void **)(&PRDA->usr_prda))[key])
#define thread_atfork(prepare, parent, child) do {} while(0)
#include <sysdeps/generic/malloc-machine.h>
#endif /* !defined(_SPROC_MALLOC_MACHINE_H) */

Просмотреть файл

@ -0,0 +1,84 @@
/*
* $Id:$
* sproc version
* by Wolfram Gloger 2001, 2004
*/
#include <stdio.h>
#include <sys/wait.h>
#include <sys/types.h>
#ifndef STACKSIZE
#define STACKSIZE 32768
#endif
struct thread_st {
char *sp; /* stack pointer, can be 0 */
void (*func)(struct thread_st* st); /* must be set by user */
thread_id id;
int flags;
struct user_data u;
};
static void
thread_init(void)
{
printf("Using sproc() threads.\n");
}
static void
thread_wrapper(void *ptr, size_t stack_len)
{
struct thread_st *st = (struct thread_st*)ptr;
/*printf("begin %p\n", st->sp);*/
st->func(st);
/*printf("end %p\n", st->sp);*/
}
/* Create a thread. */
static int
thread_create(struct thread_st *st)
{
st->flags = 0;
if(!st->sp)
st->sp = malloc(STACKSIZE);
if(!st->sp) return -1;
st->id = sprocsp(thread_wrapper, PR_SALL, st, st->sp+STACKSIZE, STACKSIZE);
if(st->id < 0) {
return -1;
}
return 0;
}
/* Wait for one of several subthreads to finish. */
static void
wait_for_thread(struct thread_st st[], int n_thr,
int (*end_thr)(struct thread_st*))
{
int i;
int id;
int status = 0;
id = wait(&status);
if(status != 0) {
if(WIFSIGNALED(status))
printf("thread %id terminated by signal %d\n",
id, WTERMSIG(status));
else
printf("thread %id exited with status %d\n",
id, WEXITSTATUS(status));
}
for(i=0; i<n_thr; i++)
if(id == st[i].id) {
if(end_thr)
end_thr(&st[i]);
break;
}
}
/*
* Local variables:
* tab-width: 4
* End:
*/

143
opal/memory/ptmalloc2/t-test.h Обычный файл
Просмотреть файл

@ -0,0 +1,143 @@
/*
* $Id: t-test.h,v 1.1 2004/11/04 14:32:21 wg Exp $
* by Wolfram Gloger 1996.
* Common data structures and functions for testing malloc performance.
*/
/* Testing level */
#ifndef TEST
#define TEST 0
#endif
/* For large allocation sizes, the time required by copying in
realloc() can dwarf all other execution times. Avoid this with a
size threshold. */
#ifndef REALLOC_MAX
#define REALLOC_MAX 2000
#endif
struct bin {
unsigned char *ptr;
unsigned long size;
};
#if TEST > 0
static void
mem_init(unsigned char *ptr, unsigned long size)
{
unsigned long i, j;
if(size == 0) return;
for(i=0; i<size; i+=2047) {
j = (unsigned long)ptr ^ i;
ptr[i] = ((j ^ (j>>8)) & 0xFF);
}
j = (unsigned long)ptr ^ (size-1);
ptr[size-1] = ((j ^ (j>>8)) & 0xFF);
}
static int
mem_check(unsigned char *ptr, unsigned long size)
{
unsigned long i, j;
if(size == 0) return 0;
for(i=0; i<size; i+=2047) {
j = (unsigned long)ptr ^ i;
if(ptr[i] != ((j ^ (j>>8)) & 0xFF)) return 1;
}
j = (unsigned long)ptr ^ (size-1);
if(ptr[size-1] != ((j ^ (j>>8)) & 0xFF)) return 2;
return 0;
}
static int
zero_check(unsigned* ptr, unsigned long size)
{
unsigned char* ptr2;
while(size >= sizeof(*ptr)) {
if(*ptr++ != 0)
return -1;
size -= sizeof(*ptr);
}
ptr2 = (unsigned char*)ptr;
while(size > 0) {
if(*ptr2++ != 0)
return -1;
--size;
}
return 0;
}
#endif /* TEST > 0 */
/* Allocate a bin with malloc(), realloc() or memalign(). r must be a
random number >= 1024. */
static void
bin_alloc(struct bin *m, unsigned long size, int r)
{
#if TEST > 0
if(mem_check(m->ptr, m->size)) {
printf("memory corrupt!\n");
exit(1);
}
#endif
r %= 1024;
/*printf("%d ", r);*/
if(r < 4) { /* memalign */
if(m->size > 0) free(m->ptr);
m->ptr = (unsigned char *)memalign(sizeof(int) << r, size);
} else if(r < 20) { /* calloc */
if(m->size > 0) free(m->ptr);
m->ptr = (unsigned char *)calloc(size, 1);
#if TEST > 0
if(zero_check((unsigned*)m->ptr, size)) {
long i;
for(i=0; i<size; i++)
if(m->ptr[i] != 0)
break;
printf("calloc'ed memory non-zero (ptr=%p, i=%ld)!\n", m->ptr, i);
exit(1);
}
#endif
} else if(r < 100 && m->size < REALLOC_MAX) { /* realloc */
if(m->size == 0) m->ptr = NULL;
m->ptr = realloc(m->ptr, size);
} else { /* plain malloc */
if(m->size > 0) free(m->ptr);
m->ptr = (unsigned char *)malloc(size);
}
if(!m->ptr) {
printf("out of memory (r=%d, size=%ld)!\n", r, (long)size);
exit(1);
}
m->size = size;
#if TEST > 0
mem_init(m->ptr, m->size);
#endif
}
/* Free a bin. */
static void
bin_free(struct bin *m)
{
if(m->size == 0) return;
#if TEST > 0
if(mem_check(m->ptr, m->size)) {
printf("memory corrupt!\n");
exit(1);
}
#endif
free(m->ptr);
m->size = 0;
}
/*
* Local variables:
* tab-width: 4
* End:
*/

285
opal/memory/ptmalloc2/t-test1.c Обычный файл
Просмотреть файл

@ -0,0 +1,285 @@
/*
* $Id: t-test1.c,v 1.2 2004/11/04 14:58:45 wg Exp $
* by Wolfram Gloger 1996-1999, 2001, 2004
* A multi-thread test for malloc performance, maintaining one pool of
* allocated bins per thread.
*/
#if (defined __STDC__ && __STDC__) || defined __cplusplus
# include <stdlib.h>
#endif
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/wait.h>
#include <sys/mman.h>
#if !USE_MALLOC
#include <malloc.h>
#else
#include "malloc.h"
#endif
#include "lran2.h"
#include "t-test.h"
struct user_data {
int bins, max;
unsigned long size;
long seed;
};
#include "thread-st.h"
#define N_TOTAL 10
#ifndef N_THREADS
#define N_THREADS 2
#endif
#ifndef N_TOTAL_PRINT
#define N_TOTAL_PRINT 50
#endif
#ifndef MEMORY
#define MEMORY 8000000l
#endif
#define SIZE 10000
#define I_MAX 10000
#define ACTIONS_MAX 30
#ifndef TEST_FORK
#define TEST_FORK 0
#endif
#define RANDOM(d,s) (lran2(d) % (s))
struct bin_info {
struct bin *m;
unsigned long size, bins;
};
#if TEST > 0
void
bin_test(struct bin_info *p)
{
int b;
for(b=0; b<p->bins; b++) {
if(mem_check(p->m[b].ptr, p->m[b].size)) {
printf("memory corrupt!\n");
abort();
}
}
}
#endif
void
malloc_test(struct thread_st *st)
{
int b, i, j, actions, pid = 1;
struct bin_info p;
struct lran2_st ld; /* data for random number generator */
lran2_init(&ld, st->u.seed);
#if TEST_FORK>0
if(RANDOM(&ld, TEST_FORK) == 0) {
int status;
#if !USE_THR
pid = fork();
#else
pid = fork1();
#endif
if(pid > 0) {
/*printf("forked, waiting for %d...\n", pid);*/
waitpid(pid, &status, 0);
printf("done with %d...\n", pid);
if(!WIFEXITED(status)) {
printf("child term with signal %d\n", WTERMSIG(status));
exit(1);
}
return;
}
exit(0);
}
#endif
p.m = (struct bin *)malloc(st->u.bins*sizeof(*p.m));
p.bins = st->u.bins;
p.size = st->u.size;
for(b=0; b<p.bins; b++) {
p.m[b].size = 0;
p.m[b].ptr = NULL;
if(RANDOM(&ld, 2) == 0)
bin_alloc(&p.m[b], RANDOM(&ld, p.size) + 1, lran2(&ld));
}
for(i=0; i<=st->u.max;) {
#if TEST > 1
bin_test(&p);
#endif
actions = RANDOM(&ld, ACTIONS_MAX);
#if USE_MALLOC && MALLOC_DEBUG
if(actions < 2) { mallinfo(); }
#endif
for(j=0; j<actions; j++) {
b = RANDOM(&ld, p.bins);
bin_free(&p.m[b]);
}
i += actions;
actions = RANDOM(&ld, ACTIONS_MAX);
for(j=0; j<actions; j++) {
b = RANDOM(&ld, p.bins);
bin_alloc(&p.m[b], RANDOM(&ld, p.size) + 1, lran2(&ld));
#if TEST > 2
bin_test(&p);
#endif
}
#if 0 /* Test illegal free()s while setting MALLOC_CHECK_ */
for(j=0; j<8; j++) {
b = RANDOM(&ld, p.bins);
if(p.m[b].ptr) {
int offset = (RANDOM(&ld, 11) - 5)*8;
char *rogue = (char*)(p.m[b].ptr) + offset;
/*printf("p=%p rogue=%p\n", p.m[b].ptr, rogue);*/
free(rogue);
}
}
#endif
i += actions;
}
for(b=0; b<p.bins; b++)
bin_free(&p.m[b]);
free(p.m);
if(pid == 0)
exit(0);
}
int n_total=0, n_total_max=N_TOTAL, n_running;
int
my_end_thread(struct thread_st *st)
{
/* Thread st has finished. Start a new one. */
#if 0
printf("Thread %lx terminated.\n", (long)st->id);
#endif
if(n_total >= n_total_max) {
n_running--;
} else if(st->u.seed++, thread_create(st)) {
printf("Creating thread #%d failed.\n", n_total);
} else {
n_total++;
if(n_total%N_TOTAL_PRINT == 0)
printf("n_total = %d\n", n_total);
}
return 0;
}
#if 0
/* Protect address space for allocation of n threads by LinuxThreads. */
static void
protect_stack(int n)
{
char buf[2048*1024];
char* guard;
size_t guard_size = 2*2048*1024UL*(n+2);
buf[0] = '\0';
guard = (char*)(((unsigned long)buf - 4096)& ~4095UL) - guard_size;
printf("Setting up stack guard at %p\n", guard);
if(mmap(guard, guard_size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
-1, 0)
!= guard)
printf("failed!\n");
}
#endif
int
main(int argc, char *argv[])
{
int i, bins;
int n_thr=N_THREADS;
int i_max=I_MAX;
unsigned long size=SIZE;
struct thread_st *st;
#if USE_MALLOC && USE_STARTER==2
ptmalloc_init();
printf("ptmalloc_init\n");
#endif
if(argc > 1) n_total_max = atoi(argv[1]);
if(n_total_max < 1) n_thr = 1;
if(argc > 2) n_thr = atoi(argv[2]);
if(n_thr < 1) n_thr = 1;
if(n_thr > 100) n_thr = 100;
if(argc > 3) i_max = atoi(argv[3]);
if(argc > 4) size = atol(argv[4]);
if(size < 2) size = 2;
bins = MEMORY/(size*n_thr);
if(argc > 5) bins = atoi(argv[5]);
if(bins < 4) bins = 4;
/*protect_stack(n_thr);*/
thread_init();
printf("total=%d threads=%d i_max=%d size=%ld bins=%d\n",
n_total_max, n_thr, i_max, size, bins);
st = (struct thread_st *)malloc(n_thr*sizeof(*st));
if(!st) exit(-1);
#if !defined NO_THREADS && (defined __sun__ || defined sun)
/* I know of no other way to achieve proper concurrency with Solaris. */
thr_setconcurrency(n_thr);
#endif
/* Start all n_thr threads. */
for(i=0; i<n_thr; i++) {
st[i].u.bins = bins;
st[i].u.max = i_max;
st[i].u.size = size;
st[i].u.seed = ((long)i_max*size + i) ^ bins;
st[i].sp = 0;
st[i].func = malloc_test;
if(thread_create(&st[i])) {
printf("Creating thread #%d failed.\n", i);
n_thr = i;
break;
}
printf("Created thread %lx.\n", (long)st[i].id);
}
/* Start an extra thread so we don't run out of stacks. */
if(0) {
struct thread_st lst;
lst.u.bins = 10; lst.u.max = 20; lst.u.size = 8000; lst.u.seed = 8999;
lst.sp = 0;
lst.func = malloc_test;
if(thread_create(&lst)) {
printf("Creating thread #%d failed.\n", i);
} else {
wait_for_thread(&lst, 1, NULL);
}
}
for(n_running=n_total=n_thr; n_running>0;) {
wait_for_thread(st, n_thr, my_end_thread);
}
for(i=0; i<n_thr; i++) {
free(st[i].sp);
}
free(st);
#if USE_MALLOC
malloc_stats();
#endif
printf("Done.\n");
return 0;
}
/*
* Local variables:
* tab-width: 4
* End:
*/

231
opal/memory/ptmalloc2/t-test2.c Обычный файл
Просмотреть файл

@ -0,0 +1,231 @@
/*
* $Id: t-test2.c,v 1.3 2004/11/04 15:01:05 wg Exp $
* by Wolfram Gloger 1996-1999, 2001, 2004
* A multi-thread test for malloc performance, maintaining a single
* global pool of allocated bins.
*/
#if (defined __STDC__ && __STDC__) || defined __cplusplus
# include <stdlib.h>
#endif
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/wait.h>
#if !USE_MALLOC
#include <malloc.h>
#else
#include "malloc.h"
#endif
#include "lran2.h"
#include "t-test.h"
struct user_data {
int max;
unsigned long size;
long seed;
};
#include "thread-st.h"
#include "malloc-machine.h" /* for mutex */
#define N_TOTAL 10
#ifndef N_THREADS
#define N_THREADS 2
#endif
#ifndef N_TOTAL_PRINT
#define N_TOTAL_PRINT 50
#endif
#define STACKSIZE 32768
#ifndef MEMORY
#define MEMORY 8000000l
#endif
#define SIZE 10000
#define I_MAX 10000
#define BINS_PER_BLOCK 20
#define RANDOM(d,s) (lran2(d) % (s))
struct block {
struct bin b[BINS_PER_BLOCK];
mutex_t mutex;
} *blocks;
int n_blocks;
#if TEST > 0
void
bin_test(void)
{
int b, i;
for(b=0; b<n_blocks; b++) {
mutex_lock(&blocks[b].mutex);
for(i=0; i<BINS_PER_BLOCK; i++) {
if(mem_check(blocks[b].b[i].ptr, blocks[b].b[i].size)) {
printf("memory corrupt!\n");
exit(1);
}
}
mutex_unlock(&blocks[b].mutex);
}
}
#endif
void
malloc_test(struct thread_st *st)
{
struct block *bl;
int i, b, r;
struct lran2_st ld; /* data for random number generator */
unsigned long rsize[BINS_PER_BLOCK];
int rnum[BINS_PER_BLOCK];
lran2_init(&ld, st->u.seed);
for(i=0; i<=st->u.max;) {
#if TEST > 1
bin_test();
#endif
bl = &blocks[RANDOM(&ld, n_blocks)];
r = RANDOM(&ld, 1024);
if(r < 200) { /* free only */
mutex_lock(&bl->mutex);
for(b=0; b<BINS_PER_BLOCK; b++)
bin_free(&bl->b[b]);
mutex_unlock(&bl->mutex);
i += BINS_PER_BLOCK;
} else { /* alloc/realloc */
/* Generate random numbers in advance. */
for(b=0; b<BINS_PER_BLOCK; b++) {
rsize[b] = RANDOM(&ld, st->u.size) + 1;
rnum[b] = lran2(&ld);
}
mutex_lock(&bl->mutex);
for(b=0; b<BINS_PER_BLOCK; b++)
bin_alloc(&bl->b[b], rsize[b], rnum[b]);
mutex_unlock(&bl->mutex);
i += BINS_PER_BLOCK;
}
#if TEST > 2
bin_test();
#endif
}
}
int n_total=0, n_total_max=N_TOTAL, n_running;
int
my_end_thread(struct thread_st *st)
{
/* Thread st has finished. Start a new one. */
#if 0
printf("Thread %lx terminated.\n", (long)st->id);
#endif
if(n_total >= n_total_max) {
n_running--;
} else if(st->u.seed++, thread_create(st)) {
printf("Creating thread #%d failed.\n", n_total);
} else {
n_total++;
if(n_total%N_TOTAL_PRINT == 0)
printf("n_total = %d\n", n_total);
}
return 0;
}
int
main(int argc, char *argv[])
{
int i, j, bins;
int n_thr=N_THREADS;
int i_max=I_MAX;
unsigned long size=SIZE;
struct thread_st *st;
#if USE_MALLOC && USE_STARTER==2
ptmalloc_init();
printf("ptmalloc_init\n");
#endif
if(argc > 1) n_total_max = atoi(argv[1]);
if(n_total_max < 1) n_thr = 1;
if(argc > 2) n_thr = atoi(argv[2]);
if(n_thr < 1) n_thr = 1;
if(n_thr > 100) n_thr = 100;
if(argc > 3) i_max = atoi(argv[3]);
if(argc > 4) size = atol(argv[4]);
if(size < 2) size = 2;
bins = MEMORY/size;
if(argc > 5) bins = atoi(argv[5]);
if(bins < BINS_PER_BLOCK) bins = BINS_PER_BLOCK;
n_blocks = bins/BINS_PER_BLOCK;
blocks = (struct block *)malloc(n_blocks*sizeof(*blocks));
if(!blocks)
exit(1);
thread_init();
printf("total=%d threads=%d i_max=%d size=%ld bins=%d\n",
n_total_max, n_thr, i_max, size, n_blocks*BINS_PER_BLOCK);
for(i=0; i<n_blocks; i++) {
mutex_init(&blocks[i].mutex);
for(j=0; j<BINS_PER_BLOCK; j++) blocks[i].b[j].size = 0;
}
st = (struct thread_st *)malloc(n_thr*sizeof(*st));
if(!st) exit(-1);
#if !defined NO_THREADS && (defined __sun__ || defined sun)
/* I know of no other way to achieve proper concurrency with Solaris. */
thr_setconcurrency(n_thr);
#endif
/* Start all n_thr threads. */
for(i=0; i<n_thr; i++) {
st[i].u.max = i_max;
st[i].u.size = size;
st[i].u.seed = ((long)i_max*size + i) ^ n_blocks;
st[i].sp = 0;
st[i].func = malloc_test;
if(thread_create(&st[i])) {
printf("Creating thread #%d failed.\n", i);
n_thr = i;
break;
}
printf("Created thread %lx.\n", (long)st[i].id);
}
for(n_running=n_total=n_thr; n_running>0;) {
wait_for_thread(st, n_thr, my_end_thread);
}
for(i=0; i<n_blocks; i++) {
for(j=0; j<BINS_PER_BLOCK; j++)
bin_free(&blocks[i].b[j]);
}
for(i=0; i<n_thr; i++) {
free(st[i].sp);
}
free(st);
free(blocks);
#if USE_MALLOC
malloc_stats();
#endif
printf("Done.\n");
return 0;
}
/*
* Local variables:
* tab-width: 4
* End:
*/

82
opal/memory/ptmalloc2/tst-mallocstate.c Обычный файл
Просмотреть файл

@ -0,0 +1,82 @@
/* Copyright (C) 2001 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#include <errno.h>
#include <stdio.h>
#include "malloc.h"
static int errors = 0;
static void
merror (const char *msg)
{
++errors;
printf ("Error: %s\n", msg);
}
int
main (void)
{
void *p1, *p2;
void *save_state;
long i;
errno = 0;
p1 = malloc (10);
if (p1 == NULL)
merror ("malloc (10) failed.");
p2 = malloc (20);
if (p2 == NULL)
merror ("malloc (20) failed.");
free (malloc (10));
for (i=0; i<100; ++i)
{
save_state = malloc_get_state ();
if (save_state == NULL)
{
merror ("malloc_get_state () failed.");
break;
}
/*free (malloc (10)); This could change the top chunk! */
malloc_set_state (save_state);
p1 = realloc (p1, i*4 + 4);
if (p1 == NULL)
merror ("realloc (i*4) failed.");
free (save_state);
}
p1 = realloc (p1, 40);
free (p2);
p2 = malloc (10);
if (p2 == NULL)
merror ("malloc (10) failed.");
free (p1);
return errors != 0;
}
/*
* Local variables:
* c-basic-offset: 2
* End:
*/

100
opal/memory/ptmalloc2/tst-mstats.c Обычный файл
Просмотреть файл

@ -0,0 +1,100 @@
/* Copyright (C) 2004 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Wolfram Gloger <wg@malloc.de>, 2004.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#include <errno.h>
#include <stdio.h>
#include "malloc.h"
static int errors = 0;
static void
merror (const char *msg)
{
++errors;
printf ("Error: %s\n", msg);
}
int
main (void)
{
void *p1, *p2;
long i;
mstate a;
struct malloc_arena_info mai;
int nfree;
unsigned long navail;
errno = 0;
malloc_stats(); /* check that it works even without initialization */
a = _int_get_arena(0);
if (!a) {
merror ("Can't get main arena.");
return 1;
}
free (malloc (10));
_int_get_arena_info(a, &mai);
printf("nfree = %d\navail = %lu\nfastavail = %lu\ntop_size = %lu\n",
mai.nbinblocks + mai.nfastblocks,
(unsigned long)mai.binavail,
(unsigned long)mai.fastavail,
(unsigned long)mai.top_size);
if (mai.nfastblocks+mai.nbinblocks < 1)
merror ("initial _int_get_arena_info() failed.");
nfree = mai.nbinblocks + mai.nfastblocks;
navail = mai.binavail + mai.fastavail;
p1 = malloc (10);
if (p1 == NULL)
merror ("malloc (10) failed.");
p2 = malloc (30);
if (p2 == NULL)
merror ("malloc (30) failed.");
free (malloc (10));
for (i=0; i<100; ++i)
{
p1 = realloc (p1, i*7 + 3);
if (p1 == NULL)
merror ("realloc (i*7 + 3) failed.");
}
free (p2);
_int_get_arena_info(a, &mai);
printf("nfree = %d\navail = %lu\nfastavail = %lu\ntop_size = %lu\n",
mai.nbinblocks + mai.nfastblocks,
(unsigned long)mai.binavail,
(unsigned long)mai.fastavail,
(unsigned long)mai.top_size);
/* Assume that no memory is returned to the system from these small
chunks. */
if (mai.nbinblocks+mai.nfastblocks < nfree ||
mai.binavail+mai.fastavail < navail)
merror ("final _int_get_arena_info() failed.");
malloc_stats();
return errors != 0;
}
/*
* Local variables:
* c-basic-offset: 2
* End:
*/