Removing unused and little-used macros from lam_object.h
This commit was SVN r725.
Этот коммит содержится в:
родитель
31aaaa2a34
Коммит
a8636277af
@ -6,7 +6,8 @@
|
||||
|
||||
#include "lam/ctnetwork/ctchannel.h"
|
||||
|
||||
#define CHANNEL_CLS(chnl) ((lam_ctchannel_class_t *)(OBJECT(chnl)->obj_class))
|
||||
#define CHANNEL_CLS(chnl) \
|
||||
((lam_ctchannel_class_t *)(((lam_object_t *) chnl)->obj_class))
|
||||
|
||||
lam_ctchannel_class_t lam_ct_channel_t_class_info = {
|
||||
{
|
||||
|
@ -8,13 +8,13 @@
|
||||
|
||||
#include "lam/lfc/lam_object.h"
|
||||
|
||||
void lam_object_construct(lam_object_t * obj)
|
||||
static void lam_object_construct(lam_object_t * obj)
|
||||
{
|
||||
obj->obj_reference_count = 1;
|
||||
}
|
||||
|
||||
|
||||
void lam_object_destruct(lam_object_t * obj)
|
||||
static void lam_object_destruct(lam_object_t * obj)
|
||||
{
|
||||
/* Move along, nothing to see here! */
|
||||
}
|
||||
|
@ -138,9 +138,6 @@ struct lam_object_t {
|
||||
int obj_reference_count; /**< reference count for the class */
|
||||
};
|
||||
|
||||
extern void lam_object_construct(lam_object_t *obj);
|
||||
extern void lam_object_destruct(lam_object_t *obj);
|
||||
|
||||
|
||||
/* Inline functions and prototypes *******************************/
|
||||
|
||||
@ -173,16 +170,6 @@ static inline lam_object_t *lam_new(size_t size,
|
||||
*/
|
||||
static inline int fetchNadd(volatile int *addr, int inc);
|
||||
|
||||
/**
|
||||
* Test if object inherits from class
|
||||
*
|
||||
* @param obj Pointer to the object
|
||||
* @param class Class to query
|
||||
* @return 1 if the object is of, or derived from, typ
|
||||
*
|
||||
*/
|
||||
#define OBJ_IS_KIND_OF(obj, class) lam_obj_is_kind_of(obj, class ## _class_info)
|
||||
|
||||
|
||||
/**
|
||||
* Test if object inherits from class.
|
||||
@ -247,15 +234,6 @@ static inline void lam_obj_release(lam_object_t *obj)
|
||||
* Macros
|
||||
*/
|
||||
|
||||
/**
|
||||
* Return a pointer to the object cast to the base object type
|
||||
*
|
||||
* @param obj Pointer to the object
|
||||
* @return Cast pointer to the object
|
||||
*/
|
||||
#define OBJECT(obj) ((lam_object_t *)(obj))
|
||||
|
||||
|
||||
/**
|
||||
* Return a pointer to the class info descriptor associated with a
|
||||
* class type.
|
||||
@ -284,7 +262,7 @@ static inline void lam_obj_release(lam_object_t *obj)
|
||||
*/
|
||||
#define OBJ_RETAIN(obj) \
|
||||
do { \
|
||||
if (obj) lam_obj_retain(OBJECT(obj)); \
|
||||
if (obj) lam_obj_retain((lam_object_t *) obj); \
|
||||
} while (0)
|
||||
|
||||
|
||||
@ -297,7 +275,7 @@ static inline void lam_obj_release(lam_object_t *obj)
|
||||
*/
|
||||
#define OBJ_RELEASE(obj) \
|
||||
do { \
|
||||
if (obj) lam_obj_release(OBJECT(obj)); \
|
||||
if (obj) lam_obj_release((lam_object_t *) obj); \
|
||||
} while (0)
|
||||
|
||||
|
||||
@ -309,8 +287,9 @@ static inline void lam_obj_release(lam_object_t *obj)
|
||||
*/
|
||||
#define OBJ_CONSTRUCT(obj, type) \
|
||||
do { \
|
||||
OBJECT(obj)->obj_class_info = CLASS_INFO(type); \
|
||||
OBJECT(obj)->obj_class_info->cls_construct(OBJECT(obj)); \
|
||||
((lam_object_t *) obj)->obj_class_info = CLASS_INFO(type); \
|
||||
((lam_object_t *) obj) \
|
||||
->obj_class_info->cls_construct((lam_object_t *) obj); \
|
||||
} while (0)
|
||||
|
||||
|
||||
@ -321,7 +300,8 @@ static inline void lam_obj_release(lam_object_t *obj)
|
||||
*/
|
||||
#define OBJ_DESTRUCT(obj) \
|
||||
do { \
|
||||
OBJECT(obj)->obj_class_info->cls_destruct(OBJECT(obj)); \
|
||||
((lam_object_t *) obj) \
|
||||
->obj_class_info->cls_destruct((lam_object_t *) obj); \
|
||||
} while (0)
|
||||
|
||||
|
||||
@ -333,7 +313,7 @@ static inline void lam_obj_release(lam_object_t *obj)
|
||||
*/
|
||||
#define OBJ_CONSTRUCT_SUPER(obj, super_type) \
|
||||
do { \
|
||||
CLASS_INFO(super_type)->cls_construct(OBJECT(obj)); \
|
||||
CLASS_INFO(super_type)->cls_construct((lam_object_t *) obj); \
|
||||
} while (0)
|
||||
|
||||
|
||||
@ -345,7 +325,7 @@ static inline void lam_obj_release(lam_object_t *obj)
|
||||
*/
|
||||
#define OBJ_DESTRUCT_SUPER(obj, super_type) \
|
||||
do { \
|
||||
CLASS_INFO(super_type)->cls_destruct(OBJECT(obj)); \
|
||||
CLASS_INFO(super_type)->cls_destruct((lam_object_t *) obj); \
|
||||
} while (0)
|
||||
|
||||
#endif /* LAM_OBJECT_H */
|
||||
|
@ -8,15 +8,7 @@
|
||||
void *lam_allocator_malloc(lam_allocator_t *allocator, size_t chunk_size);
|
||||
void lam_allocator_default_free(lam_allocator_t *allocator, void *base_ptr);
|
||||
|
||||
lam_class_info_t lam_allocator_t_class_info = {
|
||||
"lam_allocator_t",
|
||||
CLASS_INFO(lam_object_t),
|
||||
(lam_construct_t) lam_allocator_construct,
|
||||
(lam_destruct_t) lam_object_destruct
|
||||
};
|
||||
|
||||
|
||||
void lam_allocator_construct(lam_allocator_t *allocator)
|
||||
static void lam_allocator_construct(lam_allocator_t *allocator)
|
||||
{
|
||||
OBJ_CONSTRUCT_SUPER(allocator, lam_object_t);
|
||||
allocator->alc_alloc_fn = lam_allocator_malloc;
|
||||
@ -28,6 +20,19 @@ void lam_allocator_construct(lam_allocator_t *allocator)
|
||||
allocator->alc_pinned_sz = 0;
|
||||
}
|
||||
|
||||
static void lam_allocator_destruct(lam_allocator_t *allocator)
|
||||
{
|
||||
OBJ_DESTRUCT_SUPER(allocator, lam_object_t);
|
||||
}
|
||||
|
||||
lam_class_info_t lam_allocator_t_class_info = {
|
||||
"lam_allocator_t",
|
||||
CLASS_INFO(lam_object_t),
|
||||
(lam_construct_t) lam_allocator_construct,
|
||||
(lam_destruct_t) lam_allocator_destruct
|
||||
};
|
||||
|
||||
|
||||
void *lam_alg_get_chunk(size_t chunk_size, int is_shared,
|
||||
int mem_protect)
|
||||
{
|
||||
|
@ -7,56 +7,92 @@
|
||||
|
||||
#include "lam/lfc/lam_object.h"
|
||||
|
||||
/* This class is used to provide a generic and flexible
|
||||
way for the mem pool to allocate memory. It's meant
|
||||
to be derived for device-dependent logic, e.g. GM.
|
||||
|
||||
You should be able to share allocators, but then
|
||||
you will need to protect with a lock.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Base allocator is a wrapper for malloc
|
||||
* This class is used to provide a generic and flexible way for the
|
||||
* mem pool to allocate memory. It's meant to be derived for
|
||||
* device-dependent logic, e.g. GM.
|
||||
*
|
||||
* You should be able to share allocators, but then you will need to
|
||||
* protect with a lock.
|
||||
*/
|
||||
|
||||
typedef struct lam_allocator
|
||||
{
|
||||
lam_object_t super;
|
||||
int alc_is_shared; /* indicates whether to get shared memory */
|
||||
int alc_mem_prot; /* memory protection for shared mem */
|
||||
int alc_should_pin; /* should pin memory when allocating */
|
||||
uint64_t alc_pinned_offset; /* pinned memory offset */
|
||||
uint64_t alc_pinned_sz; /* pinned mem size (may be different from alloc size. */
|
||||
void *(*alc_alloc_fn)(struct lam_allocator *, size_t);
|
||||
void (*alc_free_fn)(struct lam_allocator *, void *);
|
||||
/*
|
||||
* Base allocator is a wrapper for malloc
|
||||
*/
|
||||
|
||||
typedef struct lam_allocator {
|
||||
lam_object_t super;
|
||||
int alc_is_shared; /* indicates whether to get shared memory */
|
||||
int alc_mem_prot; /* memory protection for shared mem */
|
||||
int alc_should_pin; /* should pin memory when allocating */
|
||||
uint64_t alc_pinned_offset; /* pinned memory offset */
|
||||
uint64_t alc_pinned_sz; /* pinned mem size (may be different from alloc size. */
|
||||
void *(*alc_alloc_fn) (struct lam_allocator *, size_t);
|
||||
void (*alc_free_fn) (struct lam_allocator *, void *);
|
||||
} lam_allocator_t;
|
||||
|
||||
extern lam_class_info_t lam_allocator_t_class_info;
|
||||
|
||||
void lam_allocator_construct(lam_allocator_t *allocator);
|
||||
|
||||
void *lam_alg_get_chunk(size_t chunk_size, int is_shared,
|
||||
int mem_protect);
|
||||
void *lam_alg_get_chunk(size_t chunk_size, int is_shared, int mem_protect);
|
||||
|
||||
void *lam_allocator_alloc(lam_allocator_t *allocator, size_t chunk_size);
|
||||
void lam_allocator_free(lam_allocator_t *allocator, void *chunk_ptr);
|
||||
|
||||
static inline int lam_allocator_get_is_shared(lam_allocator_t *allocator) {return allocator->alc_is_shared;}
|
||||
static inline void lam_allocator_set_is_shared(lam_allocator_t *allocator, int is_shared) {allocator->alc_is_shared = is_shared;}
|
||||
static inline int lam_allocator_get_is_shared(lam_allocator_t *allocator)
|
||||
{
|
||||
return allocator->alc_is_shared;
|
||||
}
|
||||
|
||||
static inline int lam_allocator_get_mem_prot(lam_allocator_t *allocator) {return allocator->alc_mem_prot;}
|
||||
static inline void lam_allocator_set_mem_prot(lam_allocator_t *allocator, int mem_prot) {allocator->alc_mem_prot = mem_prot;}
|
||||
static inline void lam_allocator_set_is_shared(lam_allocator_t *allocator,
|
||||
int is_shared)
|
||||
{
|
||||
allocator->alc_is_shared = is_shared;
|
||||
}
|
||||
|
||||
static inline int lam_allocator_get_should_pin(lam_allocator_t *allocator) {return allocator->alc_should_pin;}
|
||||
static inline void lam_allocator_set_should_pin(lam_allocator_t *allocator, int pin) {allocator->alc_should_pin = pin;}
|
||||
static inline int lam_allocator_get_mem_prot(lam_allocator_t *allocator)
|
||||
{
|
||||
return allocator->alc_mem_prot;
|
||||
}
|
||||
|
||||
static inline uint64_t lam_allocator_get_pin_offset(lam_allocator_t *allocator) {return allocator->alc_pinned_offset;}
|
||||
static inline void lam_allocator_set_pin_offset(lam_allocator_t *allocator, uint64_t pin_offset)
|
||||
{allocator->alc_pinned_offset = pin_offset;}
|
||||
static inline void lam_allocator_set_mem_prot(lam_allocator_t *allocator,
|
||||
int mem_prot)
|
||||
{
|
||||
allocator->alc_mem_prot = mem_prot;
|
||||
}
|
||||
|
||||
static inline uint64_t lam_allocator_get_pin_size(lam_allocator_t *allocator) {return allocator->alc_pinned_sz;}
|
||||
static inline void lam_allocator_set_pin_size(lam_allocator_t *allocator, uint64_t pin_sz)
|
||||
{allocator->alc_pinned_sz = pin_sz;}
|
||||
static inline int lam_allocator_get_should_pin(lam_allocator_t *allocator)
|
||||
{
|
||||
return allocator->alc_should_pin;
|
||||
}
|
||||
|
||||
#endif /* LAM_ALLOCATOR_H */
|
||||
static inline void lam_allocator_set_should_pin(lam_allocator_t *allocator,
|
||||
int pin)
|
||||
{
|
||||
allocator->alc_should_pin = pin;
|
||||
}
|
||||
|
||||
static inline uint64_t lam_allocator_get_pin_offset(lam_allocator_t
|
||||
*allocator)
|
||||
{
|
||||
return allocator->alc_pinned_offset;
|
||||
}
|
||||
|
||||
static inline void lam_allocator_set_pin_offset(lam_allocator_t *allocator,
|
||||
uint64_t pin_offset)
|
||||
{
|
||||
allocator->alc_pinned_offset = pin_offset;
|
||||
}
|
||||
|
||||
static inline uint64_t lam_allocator_get_pin_size(lam_allocator_t
|
||||
*allocator)
|
||||
{
|
||||
return allocator->alc_pinned_sz;
|
||||
}
|
||||
|
||||
static inline void lam_allocator_set_pin_size(lam_allocator_t *allocator,
|
||||
uint64_t pin_sz)
|
||||
{
|
||||
allocator->alc_pinned_sz = pin_sz;
|
||||
}
|
||||
|
||||
#endif /* LAM_ALLOCATOR_H */
|
||||
|
@ -63,9 +63,10 @@ int lam_free_list_grow(lam_free_list_t* flist, size_t num_elements)
|
||||
for(i=0; i<num_elements; i++) {
|
||||
lam_list_item_t* item = (lam_list_item_t*)ptr;
|
||||
if (NULL != flist->fl_elem_class) {
|
||||
/* by-pass OBJ_CONSTRUCT() in this case */
|
||||
OBJECT(item)->obj_class_info = flist->fl_elem_class;
|
||||
OBJECT(item)->obj_class_info->cls_construct(OBJECT(item));
|
||||
/* bypass OBJ_CONSTRUCT() in this case (generic types) */
|
||||
((lam_object_t *) item)->obj_class_info = flist->fl_elem_class;
|
||||
((lam_object_t *) item)
|
||||
->obj_class_info->cls_construct((lam_object_t *) item);
|
||||
}
|
||||
lam_list_append(&flist->super, item);
|
||||
ptr += flist->fl_elem_size;
|
||||
|
@ -454,9 +454,10 @@ static int lam_free_lists_create_more_elts(lam_free_lists_t *flist, int pool_idx
|
||||
current_loc = (char *) ptr;
|
||||
for (desc = 0; desc < flist->fl_elt_per_chunk; desc++)
|
||||
{
|
||||
/* bypass OBJ_CONSTRUCT() */
|
||||
OBJECT(current_loc)->obj_class_info = flist->fl_elt_cls;
|
||||
OBJECT(current_loc)->obj_class_info->cls_construct(OBJECT(current_loc));
|
||||
/* bypass OBJ_CONSTRUCT() in this case (generic types) */
|
||||
((lam_object_t *) current_loc)->obj_class_info = flist->fl_elt_cls;
|
||||
((lam_object_t *) current_loc)
|
||||
->obj_class_info->cls_construct((lam_object_t *) current_loc);
|
||||
current_loc += flist->fl_elt_size;
|
||||
}
|
||||
|
||||
|
@ -60,9 +60,8 @@ typedef struct lam_memcpy_state_t lam_memcpy_state_t;
|
||||
|
||||
/* Function prototype for a generalized memcpy() */
|
||||
typedef void *(lam_memcpy_fn_t) (void *restrict dst,
|
||||
const void *restrict src,
|
||||
size_t size,
|
||||
lam_memcpy_state_t *check);
|
||||
const void *restrict src,
|
||||
size_t size, lam_memcpy_state_t *check);
|
||||
|
||||
/* enums **************************************************************/
|
||||
|
||||
@ -107,20 +106,8 @@ enum lam_datatype_kind_t {
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* lam_checksum_kind_t - checksum types
|
||||
*/
|
||||
enum lam_checksum_kind_t {
|
||||
LAM_CHECKSUM_KIND_NONE = 0,
|
||||
LAM_CHECKSUM_KIND_CRC32,
|
||||
LAM_CHECKSUM_KIND_SUM32,
|
||||
LAM_CHECKSUM_KIND_SUM64
|
||||
};
|
||||
|
||||
|
||||
typedef enum lam_datatype_state_t lam_datatype_state_t;
|
||||
typedef enum lam_datatype_kind_t lam_datatype_kind_t;
|
||||
typedef enum lam_checksum_kind_t lam_checksum_kind_t;
|
||||
|
||||
/* structs ************************************************************/
|
||||
|
||||
@ -128,53 +115,47 @@ typedef enum lam_checksum_kind_t lam_checksum_kind_t;
|
||||
* State of incremental memcpy with checksum or CRC
|
||||
*/
|
||||
struct lam_memcpy_state_t {
|
||||
size_t size; /**< total size in bytes of the object
|
||||
* being checksummed / CRCed */
|
||||
size_t partial_size; /**< size of non- uint32_t to be carried
|
||||
* over to next call */
|
||||
uint32_t partial_int; /**< value of non- uint32_t to be carried
|
||||
* over to next call */
|
||||
uint32_t sum; /**< current value of the CRC or
|
||||
* checksum */
|
||||
bool first_call; /**< is this the first call for this
|
||||
* checksum/CRC? */
|
||||
size_t size; /**< total size in bytes of the object being checksummed / CRCed */
|
||||
size_t partial_size; /**< size of non- uint32_t to be carried over to next call */
|
||||
uint32_t partial_int; /**< value of non- uint32_t to be carried over to next call */
|
||||
uint32_t sum; /**< current value of the CRC or checksum */
|
||||
bool first_call; /**< is this the first call for this checksum/CRC? */
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Internal representation of MPI datatype
|
||||
*/
|
||||
struct lam_datatype_t {
|
||||
|
||||
lam_object_t d_super; /**< object super class */
|
||||
lam_object_t d_super; /**< object super class */
|
||||
char d_name[MPI_MAX_OBJECT_NAME]; /**< object name */
|
||||
int d_flags; /**< bit flags */
|
||||
int d_flags; /**< bit flags */
|
||||
|
||||
/* cached information */
|
||||
|
||||
ssize_t d_lower_bound;
|
||||
size_t d_extent;
|
||||
size_t d_packed_size; /**< size in bytes, ignoring gaps */
|
||||
int d_nbasic; /**< number of basic elements */
|
||||
size_t d_packed_size; /**< size in bytes, ignoring gaps */
|
||||
int d_nbasic; /**< number of basic elements */
|
||||
|
||||
/* optimized representation */
|
||||
|
||||
size_t d_datavec_size; /**< size of optimized representation */
|
||||
lam_datavec_t *d_datavec; /**< optimized representation (may be null) */
|
||||
size_t d_datavec_size; /**< size of optimized representation */
|
||||
lam_datavec_t *d_datavec; /**< optimized representation (may be null) */
|
||||
|
||||
/* XDR representation */
|
||||
|
||||
size_t d_dataxdr_size; /**< size of XDR representation */
|
||||
lam_dataxdr_t *d_dataxdr; /**< XDR representation (may be null) */
|
||||
size_t d_dataxdr_size; /**< size of XDR representation */
|
||||
lam_dataxdr_t *d_dataxdr; /**< XDR representation (may be null) */
|
||||
|
||||
/* full representation (c.f. MPI_Type_create_struct) */
|
||||
|
||||
struct {
|
||||
lam_datatype_kind_t c_kind; /**< creation function */
|
||||
int c_count; /**< number of blocks */
|
||||
int *c_blocklengths; /**< number of elements in each block */
|
||||
MPI_Aint *c_offset; /**< stride/displacement as appropriate */
|
||||
lam_datatype_t **c_types; /**< array of types (array) */
|
||||
lam_datatype_kind_t c_kind; /**< creation function */
|
||||
int c_count; /**< number of blocks */
|
||||
int *c_blocklengths; /**< number of elements in each block */
|
||||
MPI_Aint *c_offset; /**< stride/displacement as appropriate */
|
||||
lam_datatype_t **c_types; /**< array of types (array) */
|
||||
} d_creator;
|
||||
};
|
||||
|
||||
@ -195,9 +176,9 @@ struct lam_datavec_t {
|
||||
* An element of a data type in optimized form
|
||||
*/
|
||||
struct lam_datavec_element_t {
|
||||
size_t dve_size; /**< size in bytes of element */
|
||||
ssize_t dve_offset; /**< offset from start of data type */
|
||||
ssize_t dve_seq_offset; /**< offset from start of packed data type */
|
||||
size_t dve_size; /**< size in bytes of element */
|
||||
ssize_t dve_offset; /**< offset from start of data type */
|
||||
ssize_t dve_seq_offset; /**< offset from start of packed data type */
|
||||
};
|
||||
|
||||
|
||||
@ -206,7 +187,7 @@ struct lam_datavec_element_t {
|
||||
*/
|
||||
struct lam_dataxdr_element_t {
|
||||
/* to be done */
|
||||
void *x_xdrs; /**< XDR stream */
|
||||
void *x_xdrs; /**< XDR stream */
|
||||
};
|
||||
|
||||
|
||||
@ -222,9 +203,9 @@ struct lam_dataxdr_element_t {
|
||||
* @return 0 on success, -1 on error
|
||||
*/
|
||||
int lam_datatype_checksum(const void *addr,
|
||||
size_t count,
|
||||
lam_datatype_t *datatype,
|
||||
lam_checksum_t *checksum);
|
||||
size_t count,
|
||||
lam_datatype_t *datatype,
|
||||
lam_checksum_t *checksum);
|
||||
|
||||
/**
|
||||
* Copy (the contents of) an array of data types
|
||||
@ -237,11 +218,11 @@ int lam_datatype_checksum(const void *addr,
|
||||
* @return 0 on success, -1 on error
|
||||
*/
|
||||
int lam_datatype_copy(void *dst,
|
||||
const void *src,
|
||||
size_t count,
|
||||
lam_datatype_t *datatype,
|
||||
lam_memcpy_fn_t *memcpy_fn,
|
||||
lam_memcpy_state_t *check);
|
||||
const void *src,
|
||||
size_t count,
|
||||
lam_datatype_t *datatype,
|
||||
lam_memcpy_fn_t *memcpy_fn,
|
||||
lam_memcpy_state_t *check);
|
||||
|
||||
/**
|
||||
* Copy (the contents of) an array of data types, and convert to
|
||||
@ -257,13 +238,13 @@ int lam_datatype_copy(void *dst,
|
||||
* @return 0 on success, -1 on error
|
||||
*/
|
||||
int lam_datatype_convert(void *dst,
|
||||
lam_datatype_t *dst_datatype,
|
||||
size_t dst_count,
|
||||
const void *src,
|
||||
lam_datatype_t *src_datatype,
|
||||
size_t src_count,
|
||||
lam_memcpy_fn_t *memcpy_fn,
|
||||
lam_memcpy_state_t *check);
|
||||
lam_datatype_t *dst_datatype,
|
||||
size_t dst_count,
|
||||
const void *src,
|
||||
lam_datatype_t *src_datatype,
|
||||
size_t src_count,
|
||||
lam_memcpy_fn_t *memcpy_fn,
|
||||
lam_memcpy_state_t *check);
|
||||
|
||||
|
||||
/**
|
||||
@ -274,9 +255,9 @@ int lam_datatype_convert(void *dst,
|
||||
*/
|
||||
struct lam_pack_state_t {
|
||||
size_t current_offset_packed; /**< current offset into packed buffer */
|
||||
size_t current_type; /**< current index of datatype */
|
||||
size_t current_repeat; /**< current index of datavec repeat */
|
||||
size_t current_element; /**< current index of datavec element */
|
||||
size_t current_type; /**< current index of datatype */
|
||||
size_t current_repeat; /**< current index of datavec repeat */
|
||||
size_t current_element; /**< current index of datavec element */
|
||||
size_t current_offset_datavec; /**< current offset into datavec element */
|
||||
};
|
||||
|
||||
@ -302,13 +283,13 @@ struct lam_pack_state_t {
|
||||
* call.
|
||||
*/
|
||||
int lam_datatype_pack(lam_pack_state_t *state,
|
||||
void *buf,
|
||||
size_t bufsize,
|
||||
const void *typebuf,
|
||||
size_t ntype,
|
||||
lam_datatype_t *datatype,
|
||||
lam_memcpy_fn_t *memcpy_fn,
|
||||
lam_memcpy_state_t *check);
|
||||
void *buf,
|
||||
size_t bufsize,
|
||||
const void *typebuf,
|
||||
size_t ntype,
|
||||
lam_datatype_t *datatype,
|
||||
lam_memcpy_fn_t *memcpy_fn,
|
||||
lam_memcpy_state_t *check);
|
||||
|
||||
|
||||
/**
|
||||
@ -332,13 +313,13 @@ int lam_datatype_pack(lam_pack_state_t *state,
|
||||
* call.
|
||||
*/
|
||||
int lam_datatype_unpack(lam_pack_state_t *state,
|
||||
void *typebuf,
|
||||
size_t ntype,
|
||||
const void *buf,
|
||||
size_t bufsize,
|
||||
lam_datatype_t *datatype,
|
||||
lam_memcpy_fn_t *memcpy_fn,
|
||||
lam_memcpy_state_t *check);
|
||||
void *typebuf,
|
||||
size_t ntype,
|
||||
const void *buf,
|
||||
size_t bufsize,
|
||||
lam_datatype_t *datatype,
|
||||
lam_memcpy_fn_t *memcpy_fn,
|
||||
lam_memcpy_state_t *check);
|
||||
|
||||
/**
|
||||
* Incrementally generate an iovec for gathering from an array of
|
||||
@ -364,14 +345,14 @@ int lam_datatype_unpack(lam_pack_state_t *state,
|
||||
* call.
|
||||
*/
|
||||
int lam_datatype_gather_iovec(lam_pack_state_t *state,
|
||||
void *base_addr,
|
||||
struct iovec *vec,
|
||||
size_t vec_count,
|
||||
size_t max_bytes,
|
||||
const void *typebuf,
|
||||
size_t ntype,
|
||||
lam_datatype_t *datatype,
|
||||
lam_checksum_t *checksum);
|
||||
void *base_addr,
|
||||
struct iovec *vec,
|
||||
size_t vec_count,
|
||||
size_t max_bytes,
|
||||
const void *typebuf,
|
||||
size_t ntype,
|
||||
lam_datatype_t *datatype,
|
||||
lam_checksum_t *checksum);
|
||||
|
||||
/**
|
||||
* Incrementally generate an iovec for scattering from a packed array
|
||||
@ -402,15 +383,15 @@ int lam_datatype_gather_iovec(lam_pack_state_t *state,
|
||||
* call.
|
||||
*/
|
||||
int lam_datatype_scatter_iovec(lam_pack_state_t *state,
|
||||
void *base_addr,
|
||||
struct iovec *vec,
|
||||
size_t vec_count,
|
||||
size_t max_bytes,
|
||||
const void *buf,
|
||||
size_t bufsize,
|
||||
lam_datatype_t *datatype,
|
||||
lam_memcpy_fn_t *memcpy_fn,
|
||||
lam_memcpy_state_t *check);
|
||||
void *base_addr,
|
||||
struct iovec *vec,
|
||||
size_t vec_count,
|
||||
size_t max_bytes,
|
||||
const void *buf,
|
||||
size_t bufsize,
|
||||
lam_datatype_t *datatype,
|
||||
lam_memcpy_fn_t *memcpy_fn,
|
||||
lam_memcpy_state_t *check);
|
||||
|
||||
|
||||
/*
|
||||
@ -442,7 +423,7 @@ lam_memcpy_init(lam_memcpy_state_t *state, size_t sum_size)
|
||||
* @return the original value of dst
|
||||
*/
|
||||
static inline void *lam_memcpy(void *dst, const void *src, size_t size,
|
||||
void *check)
|
||||
void *check)
|
||||
{
|
||||
return memcpy(dst, src, size);
|
||||
}
|
||||
@ -473,7 +454,7 @@ void *lam_memcpy_alt(void *dst, const void *src, size_t size,
|
||||
* value.
|
||||
*/
|
||||
uint32_t lam_crc32(const void *restrict buffer, size_t size,
|
||||
uint32_t initial_crc);
|
||||
uint32_t initial_crc);
|
||||
|
||||
|
||||
/**
|
||||
@ -505,8 +486,7 @@ uint32_t lam_sum32(const void *restrict buffer, size_t size);
|
||||
*/
|
||||
void *lam_memcpy_crc32(void *restrict dst,
|
||||
const void *restrict src,
|
||||
size_t size,
|
||||
lam_memcpy_state_t *check);
|
||||
size_t size, lam_memcpy_state_t *check);
|
||||
|
||||
|
||||
/**
|
||||
@ -527,8 +507,7 @@ void *lam_memcpy_crc32(void *restrict dst,
|
||||
*/
|
||||
void *lam_memcpy_sum32(void *restrict dst,
|
||||
const void *restrict src,
|
||||
size_t size,
|
||||
lam_memcpy_state_t *check);
|
||||
size_t size, lam_memcpy_state_t *check);
|
||||
|
||||
|
||||
/**
|
||||
@ -542,8 +521,7 @@ void *lam_memcpy_sum32(void *restrict dst,
|
||||
*/
|
||||
void *lam_memcpy_sum64(void *restrict dst,
|
||||
const void *restrict src,
|
||||
size_t size,
|
||||
lam_memcpy_state_t *check);
|
||||
size_t size, lam_memcpy_state_t *check);
|
||||
|
||||
|
||||
/**
|
||||
@ -569,8 +547,7 @@ int lam_datatype_create(int combiner,
|
||||
int naddresses,
|
||||
ssize_t addresses[],
|
||||
int ntypes,
|
||||
lam_datatype_t *types[],
|
||||
lam_datatype_t **newtype);
|
||||
lam_datatype_t *types[], lam_datatype_t **newtype);
|
||||
|
||||
|
||||
/**
|
||||
@ -587,4 +564,4 @@ int lam_datatype_delete(lam_datatype_t *type);
|
||||
|
||||
|
||||
|
||||
#endif /* LAM_DATATYPE_H_INCLUDED */
|
||||
#endif /* LAM_DATATYPE_H_INCLUDED */
|
||||
|
Загрузка…
Ссылка в новой задаче
Block a user