2009-02-12 20:29:14 +03:00
|
|
|
/* -*- Mode: C; c-basic-offset:4 ; -*- */
|
2004-06-15 23:46:26 +04:00
|
|
|
/*
|
2005-11-05 22:57:48 +03:00
|
|
|
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
|
|
|
|
* University Research and Technology
|
|
|
|
* Corporation. All rights reserved.
|
2009-02-12 20:29:14 +03:00
|
|
|
* Copyright (c) 2004-2009 The University of Tennessee and The University
|
2005-11-05 22:57:48 +03:00
|
|
|
* of Tennessee Research Foundation. All rights
|
|
|
|
* reserved.
|
2004-11-28 23:09:25 +03:00
|
|
|
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
|
|
|
|
* University of Stuttgart. All rights reserved.
|
2005-03-24 15:43:37 +03:00
|
|
|
* Copyright (c) 2004-2005 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
2007-02-15 21:03:20 +03:00
|
|
|
* Copyright (c) 2006-2007 Mellanox Technologies. All rights reserved.
|
2004-11-22 04:38:40 +03:00
|
|
|
* $COPYRIGHT$
|
|
|
|
*
|
|
|
|
* Additional copyrights may follow
|
|
|
|
*
|
2004-06-15 23:46:26 +04:00
|
|
|
* $HEADER$
|
|
|
|
*/
|
2004-08-19 03:24:27 +04:00
|
|
|
|
2004-06-15 23:46:26 +04:00
|
|
|
#include "ompi_config.h"
|
2004-08-19 03:24:27 +04:00
|
|
|
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "ompi/class/ompi_free_list.h"
|
2007-03-05 17:17:50 +03:00
|
|
|
#include "opal/include/opal/align.h"
|
2009-02-14 05:26:12 +03:00
|
|
|
#include "opal/util/output.h"
|
2006-02-12 04:33:29 +03:00
|
|
|
#include "opal/sys/cache.h"
|
2006-06-14 21:43:50 +04:00
|
|
|
#include "ompi/mca/mpool/mpool.h"
|
2004-06-15 23:46:26 +04:00
|
|
|
|
|
|
|
static void ompi_free_list_construct(ompi_free_list_t* fl);
|
|
|
|
static void ompi_free_list_destruct(ompi_free_list_t* fl);
|
|
|
|
|
2006-07-20 18:39:05 +04:00
|
|
|
OBJ_CLASS_INSTANCE(ompi_free_list_t, opal_atomic_lifo_t,
|
|
|
|
ompi_free_list_construct, ompi_free_list_destruct);
|
2004-06-15 23:46:26 +04:00
|
|
|
|
2007-10-25 20:47:54 +04:00
|
|
|
typedef struct ompi_free_list_item_t ompi_free_list_memory_t;
|
2005-09-03 23:46:44 +04:00
|
|
|
|
2006-06-12 20:44:00 +04:00
|
|
|
OBJ_CLASS_INSTANCE(ompi_free_list_item_t,
|
|
|
|
opal_list_item_t,
|
|
|
|
NULL, NULL);
|
2004-06-15 23:46:26 +04:00
|
|
|
|
|
|
|
static void ompi_free_list_construct(ompi_free_list_t* fl)
|
|
|
|
{
|
2005-07-04 02:45:48 +04:00
|
|
|
OBJ_CONSTRUCT(&fl->fl_lock, opal_mutex_t);
|
2005-09-02 20:00:42 +04:00
|
|
|
OBJ_CONSTRUCT(&fl->fl_condition, opal_condition_t);
|
2004-06-15 23:46:26 +04:00
|
|
|
fl->fl_max_to_alloc = 0;
|
|
|
|
fl->fl_num_allocated = 0;
|
|
|
|
fl->fl_num_per_alloc = 0;
|
2004-10-19 20:56:45 +04:00
|
|
|
fl->fl_num_waiting = 0;
|
2007-11-01 19:47:44 +03:00
|
|
|
fl->fl_frag_size = sizeof(ompi_free_list_item_t);
|
|
|
|
fl->fl_frag_alignment = 0;
|
|
|
|
fl->fl_payload_buffer_size=0;
|
|
|
|
fl->fl_payload_buffer_alignment=0;
|
2007-11-01 20:25:12 +03:00
|
|
|
fl->fl_frag_class = OBJ_CLASS(ompi_free_list_item_t);
|
2004-06-15 23:46:26 +04:00
|
|
|
fl->fl_mpool = 0;
|
2008-01-22 21:17:06 +03:00
|
|
|
fl->ctx = NULL;
|
2005-09-03 23:46:44 +04:00
|
|
|
OBJ_CONSTRUCT(&(fl->fl_allocations), opal_list_t);
|
2004-06-15 23:46:26 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ompi_free_list_destruct(ompi_free_list_t* fl)
|
|
|
|
{
|
2005-09-03 23:46:44 +04:00
|
|
|
opal_list_item_t *item;
|
2007-03-05 17:17:50 +03:00
|
|
|
ompi_free_list_memory_t *fl_mem;
|
2005-09-03 23:46:44 +04:00
|
|
|
|
2006-03-17 21:46:48 +03:00
|
|
|
#if 0 && OMPI_ENABLE_DEBUG
|
2006-03-16 20:27:54 +03:00
|
|
|
if(opal_list_get_size(&fl->super) != fl->fl_num_allocated) {
|
2008-06-09 18:53:58 +04:00
|
|
|
opal_output(0, "ompi_free_list: %d allocated %d returned: %s:%d\n",
|
2006-03-16 20:27:54 +03:00
|
|
|
fl->fl_num_allocated, opal_list_get_size(&fl->super),
|
|
|
|
fl->super.super.cls_init_file_name, fl->super.super.cls_init_lineno);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-02-12 20:29:14 +03:00
|
|
|
if( NULL != fl->fl_mpool ) {
|
|
|
|
while(NULL != (item = opal_list_remove_first(&(fl->fl_allocations)))) {
|
|
|
|
fl_mem = (ompi_free_list_memory_t*)item;
|
|
|
|
|
2007-10-25 20:47:54 +04:00
|
|
|
fl->fl_mpool->mpool_free(fl->fl_mpool, fl_mem->ptr,
|
2009-02-12 20:29:14 +03:00
|
|
|
fl_mem->registration);
|
|
|
|
|
2009-02-15 00:49:06 +03:00
|
|
|
/* destruct the item (we constructed it), then free the memory chunk */
|
|
|
|
OBJ_DESTRUCT(item);
|
|
|
|
free(item);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
while(NULL != (item = opal_list_remove_first(&(fl->fl_allocations)))) {
|
2009-02-12 20:29:14 +03:00
|
|
|
/* destruct the item (we constructed it), then free the memory chunk */
|
|
|
|
OBJ_DESTRUCT(item);
|
|
|
|
free(item);
|
2005-09-03 23:46:44 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
OBJ_DESTRUCT(&fl->fl_allocations);
|
2005-09-02 20:00:42 +04:00
|
|
|
OBJ_DESTRUCT(&fl->fl_condition);
|
2004-06-15 23:46:26 +04:00
|
|
|
OBJ_DESTRUCT(&fl->fl_lock);
|
|
|
|
}
|
|
|
|
|
2006-07-20 18:39:05 +04:00
|
|
|
int ompi_free_list_init_ex(
|
2004-06-15 23:46:26 +04:00
|
|
|
ompi_free_list_t *flist,
|
|
|
|
size_t elem_size,
|
2006-07-20 18:39:05 +04:00
|
|
|
size_t alignment,
|
2005-07-03 20:06:07 +04:00
|
|
|
opal_class_t* elem_class,
|
2004-06-15 23:46:26 +04:00
|
|
|
int num_elements_to_alloc,
|
|
|
|
int max_elements_to_alloc,
|
|
|
|
int num_elements_per_alloc,
|
This commit brings in two major things:
1. Galen's fine-grain control of queue pair resources in the openib
BTL.
1. Pasha's new implementation of asychronous HCA event handling.
Pasha's new implementation doesn't take much explanation, but the new
"multifrag" stuff does.
Note that "svn merge" was not used to bring this new code from the
/tmp/ib_multifrag branch -- something Bad happened in the periodic
trunk pulls on that branch making an actual merge back to the trunk
effectively impossible (i.e., lots and lots of arbitrary conflicts and
artifical changes). :-(
== Fine-grain control of queue pair resources ==
Galen's fine-grain control of queue pair resources to the OpenIB BTL
(thanks to Gleb for fixing broken code and providing additional
functionality, Pasha for finding broken code, and Jeff for doing all
the svn work and regression testing).
Prior to this commit, the OpenIB BTL created two queue pairs: one for
eager size fragments and one for max send size fragments. When the
use of the shared receive queue (SRQ) was specified (via "-mca
btl_openib_use_srq 1"), these QPs would use a shared receive queue for
receive buffers instead of the default per-peer (PP) receive queues
and buffers. One consequence of this design is that receive buffer
utilization (the size of the data received as a percentage of the
receive buffer used for the data) was quite poor for a number of
applications.
The new design allows multiple QPs to be specified at runtime. Each
QP can be setup to use PP or SRQ receive buffers as well as giving
fine-grained control over receive buffer size, number of receive
buffers to post, when to replenish the receive queue (low water mark)
and for SRQ QPs, the number of outstanding sends can also be
specified. The following is an example of the syntax to describe QPs
to the OpenIB BTL using the new MCA parameter btl_openib_receive_queues:
{{{
-mca btl_openib_receive_queues \
"P,128,16,4;S,1024,256,128,32;S,4096,256,128,32;S,65536,256,128,32"
}}}
Each QP description is delimited by ";" (semicolon) with individual
fields of the QP description delimited by "," (comma). The above
example therefore describes 4 QPs.
The first QP is:
P,128,16,4
Meaning: per-peer receive buffer QPs are indicated by a starting field
of "P"; the first QP (shown above) is therefore a per-peer based QP.
The second field indicates the size of the receive buffer in bytes
(128 bytes). The third field indicates the number of receive buffers
to allocate to the QP (16). The fourth field indicates the low
watermark for receive buffers at which time the BTL will repost
receive buffers to the QP (4).
The second QP is:
S,1024,256,128,32
Shared receive queue based QPs are indicated by a starting field of
"S"; the second QP (shown above) is therefore a shared receive queue
based QP. The second, third and fourth fields are the same as in the
per-peer based QP. The fifth field is the number of outstanding sends
that are allowed at a given time on the QP (32). This provides a
"good enough" mechanism of flow control for some regular communication
patterns.
QPs MUST be specified in ascending receive buffer size order. This
requirement may be removed prior to 1.3 release.
This commit was SVN r15474.
2007-07-18 05:15:59 +04:00
|
|
|
mca_mpool_base_module_t* mpool,
|
|
|
|
ompi_free_list_item_init_fn_t item_init,
|
|
|
|
void* ctx)
|
2004-06-15 23:46:26 +04:00
|
|
|
{
|
2007-03-05 17:17:50 +03:00
|
|
|
/* alignment must be more than zero and power of two */
|
|
|
|
if(alignment <= 1 || (alignment & (alignment - 1)))
|
|
|
|
return OMPI_ERROR;
|
|
|
|
|
2007-11-01 19:47:44 +03:00
|
|
|
if(elem_size > flist->fl_frag_size)
|
|
|
|
flist->fl_frag_size = elem_size;
|
2007-11-02 20:51:32 +03:00
|
|
|
flist->fl_frag_alignment = alignment;
|
2006-07-20 18:39:05 +04:00
|
|
|
if(elem_class)
|
2007-11-01 20:25:12 +03:00
|
|
|
flist->fl_frag_class = elem_class;
|
2007-11-02 20:51:32 +03:00
|
|
|
flist->fl_payload_buffer_size=flist->fl_frag_size-
|
|
|
|
flist->fl_frag_class->cls_sizeof;
|
|
|
|
flist->fl_payload_buffer_alignment=alignment;
|
2004-06-15 23:46:26 +04:00
|
|
|
flist->fl_max_to_alloc = max_elements_to_alloc;
|
|
|
|
flist->fl_num_allocated = 0;
|
|
|
|
flist->fl_num_per_alloc = num_elements_per_alloc;
|
|
|
|
flist->fl_mpool = mpool;
|
This commit brings in two major things:
1. Galen's fine-grain control of queue pair resources in the openib
BTL.
1. Pasha's new implementation of asychronous HCA event handling.
Pasha's new implementation doesn't take much explanation, but the new
"multifrag" stuff does.
Note that "svn merge" was not used to bring this new code from the
/tmp/ib_multifrag branch -- something Bad happened in the periodic
trunk pulls on that branch making an actual merge back to the trunk
effectively impossible (i.e., lots and lots of arbitrary conflicts and
artifical changes). :-(
== Fine-grain control of queue pair resources ==
Galen's fine-grain control of queue pair resources to the OpenIB BTL
(thanks to Gleb for fixing broken code and providing additional
functionality, Pasha for finding broken code, and Jeff for doing all
the svn work and regression testing).
Prior to this commit, the OpenIB BTL created two queue pairs: one for
eager size fragments and one for max send size fragments. When the
use of the shared receive queue (SRQ) was specified (via "-mca
btl_openib_use_srq 1"), these QPs would use a shared receive queue for
receive buffers instead of the default per-peer (PP) receive queues
and buffers. One consequence of this design is that receive buffer
utilization (the size of the data received as a percentage of the
receive buffer used for the data) was quite poor for a number of
applications.
The new design allows multiple QPs to be specified at runtime. Each
QP can be setup to use PP or SRQ receive buffers as well as giving
fine-grained control over receive buffer size, number of receive
buffers to post, when to replenish the receive queue (low water mark)
and for SRQ QPs, the number of outstanding sends can also be
specified. The following is an example of the syntax to describe QPs
to the OpenIB BTL using the new MCA parameter btl_openib_receive_queues:
{{{
-mca btl_openib_receive_queues \
"P,128,16,4;S,1024,256,128,32;S,4096,256,128,32;S,65536,256,128,32"
}}}
Each QP description is delimited by ";" (semicolon) with individual
fields of the QP description delimited by "," (comma). The above
example therefore describes 4 QPs.
The first QP is:
P,128,16,4
Meaning: per-peer receive buffer QPs are indicated by a starting field
of "P"; the first QP (shown above) is therefore a per-peer based QP.
The second field indicates the size of the receive buffer in bytes
(128 bytes). The third field indicates the number of receive buffers
to allocate to the QP (16). The fourth field indicates the low
watermark for receive buffers at which time the BTL will repost
receive buffers to the QP (4).
The second QP is:
S,1024,256,128,32
Shared receive queue based QPs are indicated by a starting field of
"S"; the second QP (shown above) is therefore a shared receive queue
based QP. The second, third and fourth fields are the same as in the
per-peer based QP. The fifth field is the number of outstanding sends
that are allowed at a given time on the QP (32). This provides a
"good enough" mechanism of flow control for some regular communication
patterns.
QPs MUST be specified in ascending receive buffer size order. This
requirement may be removed prior to 1.3 release.
This commit was SVN r15474.
2007-07-18 05:15:59 +04:00
|
|
|
flist->item_init = item_init;
|
|
|
|
flist->ctx = ctx;
|
2004-07-15 22:08:20 +04:00
|
|
|
if(num_elements_to_alloc)
|
|
|
|
return ompi_free_list_grow(flist, num_elements_to_alloc);
|
|
|
|
return OMPI_SUCCESS;
|
2004-06-15 23:46:26 +04:00
|
|
|
}
|
|
|
|
|
2007-11-01 20:25:12 +03:00
|
|
|
/* this will replace ompi_free_list_init_ex */
|
|
|
|
int ompi_free_list_init_ex_new(
|
|
|
|
ompi_free_list_t *flist,
|
|
|
|
size_t frag_size,
|
|
|
|
size_t frag_alignment,
|
|
|
|
opal_class_t* frag_class,
|
|
|
|
size_t payload_buffer_size,
|
|
|
|
size_t payload_buffer_alignment,
|
|
|
|
int num_elements_to_alloc,
|
|
|
|
int max_elements_to_alloc,
|
|
|
|
int num_elements_per_alloc,
|
|
|
|
mca_mpool_base_module_t* mpool,
|
|
|
|
ompi_free_list_item_init_fn_t item_init,
|
|
|
|
void* ctx)
|
|
|
|
{
|
|
|
|
/* alignment must be more than zero and power of two */
|
|
|
|
if (frag_alignment <= 1 || (frag_alignment & (frag_alignment - 1)))
|
|
|
|
return OMPI_ERROR;
|
|
|
|
if (0 < payload_buffer_size) {
|
|
|
|
if (payload_buffer_alignment <= 1 || (payload_buffer_alignment & (payload_buffer_alignment - 1)))
|
|
|
|
return OMPI_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (frag_size > flist->fl_frag_size)
|
|
|
|
flist->fl_frag_size = frag_size;
|
|
|
|
if (frag_class)
|
|
|
|
flist->fl_frag_class = frag_class;
|
|
|
|
flist->fl_payload_buffer_size=payload_buffer_size;
|
|
|
|
flist->fl_max_to_alloc = max_elements_to_alloc;
|
|
|
|
flist->fl_num_allocated = 0;
|
|
|
|
flist->fl_num_per_alloc = num_elements_per_alloc;
|
|
|
|
flist->fl_mpool = mpool;
|
|
|
|
flist->fl_frag_alignment = frag_alignment;
|
2008-05-26 12:29:02 +04:00
|
|
|
flist->fl_payload_buffer_alignment = payload_buffer_alignment;
|
2007-11-01 20:25:12 +03:00
|
|
|
flist->item_init = item_init;
|
|
|
|
flist->ctx = ctx;
|
|
|
|
if (num_elements_to_alloc)
|
|
|
|
return ompi_free_list_grow(flist, num_elements_to_alloc);
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
2004-06-15 23:46:26 +04:00
|
|
|
int ompi_free_list_grow(ompi_free_list_t* flist, size_t num_elements)
|
|
|
|
{
|
2007-03-05 17:17:50 +03:00
|
|
|
unsigned char *ptr, *mpool_alloc_ptr = NULL;
|
2005-09-03 23:46:44 +04:00
|
|
|
ompi_free_list_memory_t *alloc_ptr;
|
2007-03-05 17:17:50 +03:00
|
|
|
size_t i, alloc_size, head_size, elem_size = 0;
|
|
|
|
mca_mpool_base_registration_t *reg = NULL;
|
2005-05-24 02:06:50 +04:00
|
|
|
|
2007-03-05 17:17:50 +03:00
|
|
|
if(flist->fl_max_to_alloc > 0)
|
|
|
|
if(flist->fl_num_allocated + num_elements > flist->fl_max_to_alloc)
|
2005-10-27 21:48:40 +04:00
|
|
|
num_elements = flist->fl_max_to_alloc - flist->fl_num_allocated;
|
|
|
|
|
2007-03-05 17:17:50 +03:00
|
|
|
if(num_elements == 0)
|
2006-01-20 02:57:03 +03:00
|
|
|
return OMPI_ERR_TEMP_OUT_OF_RESOURCE;
|
2004-06-15 23:46:26 +04:00
|
|
|
|
2007-11-01 19:47:44 +03:00
|
|
|
head_size = (NULL == flist->fl_mpool) ? flist->fl_frag_size:
|
2007-11-01 20:25:12 +03:00
|
|
|
flist->fl_frag_class->cls_sizeof;
|
2007-11-01 19:47:44 +03:00
|
|
|
head_size = OPAL_ALIGN(head_size, flist->fl_frag_alignment, size_t);
|
2007-03-05 17:17:50 +03:00
|
|
|
|
|
|
|
/* calculate head allocation size */
|
|
|
|
alloc_size = num_elements * head_size + sizeof(ompi_free_list_memory_t) +
|
2007-11-01 19:47:44 +03:00
|
|
|
flist->fl_frag_alignment;
|
2006-07-20 18:39:05 +04:00
|
|
|
|
2007-03-05 17:17:50 +03:00
|
|
|
alloc_ptr = (ompi_free_list_memory_t*)malloc(alloc_size);
|
2006-07-20 18:39:05 +04:00
|
|
|
|
2005-09-03 23:46:44 +04:00
|
|
|
if(NULL == alloc_ptr)
|
2004-06-15 23:46:26 +04:00
|
|
|
return OMPI_ERR_TEMP_OUT_OF_RESOURCE;
|
|
|
|
|
2007-03-05 17:17:50 +03:00
|
|
|
/* allocate the rest from the mpool */
|
|
|
|
if(flist->fl_mpool != NULL) {
|
2007-11-02 02:38:50 +03:00
|
|
|
elem_size = OPAL_ALIGN(flist->fl_payload_buffer_size,
|
|
|
|
flist->fl_payload_buffer_alignment, size_t);
|
2007-03-05 17:17:50 +03:00
|
|
|
if(elem_size != 0) {
|
2007-09-12 19:29:58 +04:00
|
|
|
mpool_alloc_ptr = (unsigned char *) flist->fl_mpool->mpool_alloc(flist->fl_mpool,
|
2008-05-26 12:29:02 +04:00
|
|
|
num_elements * elem_size, flist->fl_payload_buffer_alignment,
|
2007-03-05 17:17:50 +03:00
|
|
|
MCA_MPOOL_FLAGS_CACHE_BYPASS, ®);
|
|
|
|
if(NULL == mpool_alloc_ptr) {
|
|
|
|
free(alloc_ptr);
|
|
|
|
return OMPI_ERR_TEMP_OUT_OF_RESOURCE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2006-06-22 18:07:14 +04:00
|
|
|
|
2007-03-05 17:17:50 +03:00
|
|
|
/* make the alloc_ptr a list item, save the chunk in the allocations list,
|
|
|
|
* and have ptr point to memory right after the list item structure */
|
2007-10-25 20:47:54 +04:00
|
|
|
OBJ_CONSTRUCT(alloc_ptr, ompi_free_list_item_t);
|
2007-03-05 17:17:50 +03:00
|
|
|
opal_list_append(&(flist->fl_allocations), (opal_list_item_t*)alloc_ptr);
|
2006-06-22 18:07:14 +04:00
|
|
|
|
2007-03-05 17:17:50 +03:00
|
|
|
alloc_ptr->registration = reg;
|
2007-10-25 20:47:54 +04:00
|
|
|
alloc_ptr->ptr = mpool_alloc_ptr;
|
2005-09-03 23:46:44 +04:00
|
|
|
|
2007-03-05 17:17:50 +03:00
|
|
|
ptr = (unsigned char*)alloc_ptr + sizeof(ompi_free_list_memory_t);
|
2007-11-01 19:47:44 +03:00
|
|
|
ptr = OPAL_ALIGN_PTR(ptr, flist->fl_frag_alignment, unsigned char*);
|
2006-01-20 02:57:03 +03:00
|
|
|
|
2006-07-20 18:39:05 +04:00
|
|
|
for(i=0; i<num_elements; i++) {
|
|
|
|
ompi_free_list_item_t* item = (ompi_free_list_item_t*)ptr;
|
2007-03-05 17:17:50 +03:00
|
|
|
item->registration = reg;
|
|
|
|
item->ptr = mpool_alloc_ptr;
|
2006-01-20 02:57:03 +03:00
|
|
|
|
2007-11-01 20:25:12 +03:00
|
|
|
OBJ_CONSTRUCT_INTERNAL(item, flist->fl_frag_class);
|
This commit brings in two major things:
1. Galen's fine-grain control of queue pair resources in the openib
BTL.
1. Pasha's new implementation of asychronous HCA event handling.
Pasha's new implementation doesn't take much explanation, but the new
"multifrag" stuff does.
Note that "svn merge" was not used to bring this new code from the
/tmp/ib_multifrag branch -- something Bad happened in the periodic
trunk pulls on that branch making an actual merge back to the trunk
effectively impossible (i.e., lots and lots of arbitrary conflicts and
artifical changes). :-(
== Fine-grain control of queue pair resources ==
Galen's fine-grain control of queue pair resources to the OpenIB BTL
(thanks to Gleb for fixing broken code and providing additional
functionality, Pasha for finding broken code, and Jeff for doing all
the svn work and regression testing).
Prior to this commit, the OpenIB BTL created two queue pairs: one for
eager size fragments and one for max send size fragments. When the
use of the shared receive queue (SRQ) was specified (via "-mca
btl_openib_use_srq 1"), these QPs would use a shared receive queue for
receive buffers instead of the default per-peer (PP) receive queues
and buffers. One consequence of this design is that receive buffer
utilization (the size of the data received as a percentage of the
receive buffer used for the data) was quite poor for a number of
applications.
The new design allows multiple QPs to be specified at runtime. Each
QP can be setup to use PP or SRQ receive buffers as well as giving
fine-grained control over receive buffer size, number of receive
buffers to post, when to replenish the receive queue (low water mark)
and for SRQ QPs, the number of outstanding sends can also be
specified. The following is an example of the syntax to describe QPs
to the OpenIB BTL using the new MCA parameter btl_openib_receive_queues:
{{{
-mca btl_openib_receive_queues \
"P,128,16,4;S,1024,256,128,32;S,4096,256,128,32;S,65536,256,128,32"
}}}
Each QP description is delimited by ";" (semicolon) with individual
fields of the QP description delimited by "," (comma). The above
example therefore describes 4 QPs.
The first QP is:
P,128,16,4
Meaning: per-peer receive buffer QPs are indicated by a starting field
of "P"; the first QP (shown above) is therefore a per-peer based QP.
The second field indicates the size of the receive buffer in bytes
(128 bytes). The third field indicates the number of receive buffers
to allocate to the QP (16). The fourth field indicates the low
watermark for receive buffers at which time the BTL will repost
receive buffers to the QP (4).
The second QP is:
S,1024,256,128,32
Shared receive queue based QPs are indicated by a starting field of
"S"; the second QP (shown above) is therefore a shared receive queue
based QP. The second, third and fourth fields are the same as in the
per-peer based QP. The fifth field is the number of outstanding sends
that are allowed at a given time on the QP (32). This provides a
"good enough" mechanism of flow control for some regular communication
patterns.
QPs MUST be specified in ascending receive buffer size order. This
requirement may be removed prior to 1.3 release.
This commit was SVN r15474.
2007-07-18 05:15:59 +04:00
|
|
|
|
|
|
|
/* run the initialize function if present */
|
|
|
|
if(flist->item_init) {
|
|
|
|
flist->item_init(item, flist->ctx);
|
|
|
|
}
|
2006-04-13 03:27:38 +04:00
|
|
|
|
2006-07-20 18:39:05 +04:00
|
|
|
opal_atomic_lifo_push(&(flist->super), &(item->super));
|
2007-03-05 17:17:50 +03:00
|
|
|
ptr += head_size;
|
|
|
|
mpool_alloc_ptr += elem_size;
|
This commit brings in two major things:
1. Galen's fine-grain control of queue pair resources in the openib
BTL.
1. Pasha's new implementation of asychronous HCA event handling.
Pasha's new implementation doesn't take much explanation, but the new
"multifrag" stuff does.
Note that "svn merge" was not used to bring this new code from the
/tmp/ib_multifrag branch -- something Bad happened in the periodic
trunk pulls on that branch making an actual merge back to the trunk
effectively impossible (i.e., lots and lots of arbitrary conflicts and
artifical changes). :-(
== Fine-grain control of queue pair resources ==
Galen's fine-grain control of queue pair resources to the OpenIB BTL
(thanks to Gleb for fixing broken code and providing additional
functionality, Pasha for finding broken code, and Jeff for doing all
the svn work and regression testing).
Prior to this commit, the OpenIB BTL created two queue pairs: one for
eager size fragments and one for max send size fragments. When the
use of the shared receive queue (SRQ) was specified (via "-mca
btl_openib_use_srq 1"), these QPs would use a shared receive queue for
receive buffers instead of the default per-peer (PP) receive queues
and buffers. One consequence of this design is that receive buffer
utilization (the size of the data received as a percentage of the
receive buffer used for the data) was quite poor for a number of
applications.
The new design allows multiple QPs to be specified at runtime. Each
QP can be setup to use PP or SRQ receive buffers as well as giving
fine-grained control over receive buffer size, number of receive
buffers to post, when to replenish the receive queue (low water mark)
and for SRQ QPs, the number of outstanding sends can also be
specified. The following is an example of the syntax to describe QPs
to the OpenIB BTL using the new MCA parameter btl_openib_receive_queues:
{{{
-mca btl_openib_receive_queues \
"P,128,16,4;S,1024,256,128,32;S,4096,256,128,32;S,65536,256,128,32"
}}}
Each QP description is delimited by ";" (semicolon) with individual
fields of the QP description delimited by "," (comma). The above
example therefore describes 4 QPs.
The first QP is:
P,128,16,4
Meaning: per-peer receive buffer QPs are indicated by a starting field
of "P"; the first QP (shown above) is therefore a per-peer based QP.
The second field indicates the size of the receive buffer in bytes
(128 bytes). The third field indicates the number of receive buffers
to allocate to the QP (16). The fourth field indicates the low
watermark for receive buffers at which time the BTL will repost
receive buffers to the QP (4).
The second QP is:
S,1024,256,128,32
Shared receive queue based QPs are indicated by a starting field of
"S"; the second QP (shown above) is therefore a shared receive queue
based QP. The second, third and fourth fields are the same as in the
per-peer based QP. The fifth field is the number of outstanding sends
that are allowed at a given time on the QP (32). This provides a
"good enough" mechanism of flow control for some regular communication
patterns.
QPs MUST be specified in ascending receive buffer size order. This
requirement may be removed prior to 1.3 release.
This commit was SVN r15474.
2007-07-18 05:15:59 +04:00
|
|
|
|
2004-06-15 23:46:26 +04:00
|
|
|
}
|
|
|
|
flist->fl_num_allocated += num_elements;
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
2006-04-20 23:53:45 +04:00
|
|
|
|
2006-10-10 18:47:51 +04:00
|
|
|
/**
|
|
|
|
* This function resize the free_list to contain at least the specified
|
|
|
|
* number of elements. We do not create all of them in the same memory
|
|
|
|
* segment. Instead we will several time the fl_num_per_alloc elements
|
|
|
|
* until we reach the required number of the maximum allowed by the
|
|
|
|
* initialization.
|
|
|
|
*/
|
2006-10-07 01:13:49 +04:00
|
|
|
int
|
|
|
|
ompi_free_list_resize(ompi_free_list_t* flist, size_t size)
|
|
|
|
{
|
2006-10-10 18:47:51 +04:00
|
|
|
ssize_t inc_num;
|
2006-10-07 01:13:49 +04:00
|
|
|
int ret = OMPI_SUCCESS;
|
|
|
|
|
|
|
|
if (flist->fl_num_allocated > size) {
|
|
|
|
return OMPI_SUCCESS;
|
|
|
|
}
|
2006-10-10 18:47:51 +04:00
|
|
|
OPAL_THREAD_LOCK(&((flist)->fl_lock));
|
2006-10-20 23:33:55 +04:00
|
|
|
inc_num = (ssize_t)size - (ssize_t)flist->fl_num_allocated;
|
2006-10-10 18:47:51 +04:00
|
|
|
while( inc_num > 0 ) {
|
|
|
|
ret = ompi_free_list_grow(flist, flist->fl_num_per_alloc);
|
|
|
|
if( OMPI_SUCCESS != ret ) break;
|
2006-10-20 23:33:55 +04:00
|
|
|
inc_num = (ssize_t)size - (ssize_t)flist->fl_num_allocated;
|
2006-10-10 18:47:51 +04:00
|
|
|
}
|
|
|
|
OPAL_THREAD_UNLOCK(&((flist)->fl_lock));
|
2006-10-07 01:13:49 +04:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|