1
1

Merge pull request #4938 from edgargabriel/topic/vulcan

fcoll/vulcan: add new fcoll component
Этот коммит содержится в:
Edgar Gabriel 2018-06-07 18:39:34 -05:00 коммит произвёл GitHub
родитель caaf008179 deaeaa60de
Коммит b31a0bd471
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
31 изменённых файлов: 3609 добавлений и 300 удалений

Просмотреть файл

@ -23,10 +23,12 @@
headers = \
common_ompio_print_queue.h \
common_ompio_request.h \
common_ompio.h
sources = \
common_ompio_print_queue.c \
common_ompio_request.c \
common_ompio_file_open.c \
common_ompio_file_view.c \
common_ompio_file_read.c \

Просмотреть файл

@ -163,14 +163,13 @@ int mca_common_ompio_file_open (ompi_communicator_t *comm,
#endif
goto fn_fail;
}
if (OMPI_SUCCESS != (ret = mca_fcoll_base_file_select (ompio_fh,
NULL))) {
opal_output(1, "mca_fcoll_base_file_select() failed\n");
goto fn_fail;
}
if ( true == use_sharedfp ) {
/* open the file once more for the shared file pointer if required.
** Can be disabled by the user if no shared file pointer operations
@ -305,6 +304,10 @@ int mca_common_ompio_file_close (mca_io_ompio_file_t *ompio_fh)
free (ompio_fh->f_init_aggr_list);
ompio_fh->f_init_aggr_list = NULL;
}
if (NULL != ompio_fh->f_aggr_list) {
free (ompio_fh->f_aggr_list);
ompio_fh->f_aggr_list = NULL;
}
if (NULL != ompio_fh->f_init_procs_in_group) {
free (ompio_fh->f_init_procs_in_group);
ompio_fh->f_init_procs_in_group = NULL;
@ -422,6 +425,8 @@ int mca_common_ompio_set_file_defaults (mca_io_ompio_file_t *fh)
fh->f_init_num_aggrs = -1;
fh->f_init_aggr_list = NULL;
fh->f_num_aggrs = -1;
fh->f_aggr_list = NULL;
/* Default file View */
fh->f_iov_type = MPI_DATATYPE_NULL;

Просмотреть файл

@ -9,7 +9,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2016 University of Houston. All rights reserved.
* Copyright (c) 2008-2018 University of Houston. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -30,8 +30,8 @@
#include "ompi/mca/fbtl/base/base.h"
#include "common_ompio.h"
#include "common_ompio_request.h"
#include "ompi/mca/io/ompio/io_ompio.h"
#include "ompi/mca/io/ompio/io_ompio_request.h"
#include "math.h"
#include <unistd.h>
@ -185,9 +185,7 @@ int mca_common_ompio_file_iread (mca_io_ompio_file_t *fh,
mca_ompio_request_t *ompio_req=NULL;
size_t spc=0;
ompio_req = OBJ_NEW(mca_ompio_request_t);
ompio_req->req_type = MCA_OMPIO_REQUEST_READ;
ompio_req->req_ompi.req_state = OMPI_REQUEST_ACTIVE;
mca_common_ompio_request_alloc ( &ompio_req, MCA_OMPIO_REQUEST_READ);
if ( 0 == count ) {
ompio_req->req_ompi.req_status.MPI_ERROR = OMPI_SUCCESS;
@ -236,12 +234,7 @@ int mca_common_ompio_file_iread (mca_io_ompio_file_t *fh,
fh->f_fbtl->fbtl_ipreadv (fh, (ompi_request_t *) ompio_req);
}
if ( false == mca_io_ompio_progress_is_registered ) {
// Lazy initialization of progress function to minimize impact
// on other ompi functionality in case its not used.
opal_progress_register (mca_io_ompio_component_progress);
mca_io_ompio_progress_is_registered=true;
}
mca_common_ompio_register_progress ();
fh->f_num_of_io_entries = 0;
if (NULL != fh->f_io_array) {

Просмотреть файл

@ -62,8 +62,9 @@ int mca_common_ompio_set_view (mca_io_ompio_file_t *fh,
{
int ret=OMPI_SUCCESS;
size_t max_data = 0;
int i;
int i, flag;
int num_groups = 0;
int num_cb_nodes=-1;
mca_io_ompio_contg *contg_groups=NULL;
size_t ftype_size;
@ -172,48 +173,76 @@ int mca_common_ompio_set_view (mca_io_ompio_file_t *fh,
}
}
if ( SIMPLE != mca_io_ompio_grouping_option && SIMPLE_PLUS != mca_io_ompio_grouping_option ) {
ret = mca_io_ompio_fview_based_grouping(fh,
&num_groups,
contg_groups);
if ( OMPI_SUCCESS != ret ) {
opal_output(1, "mca_common_ompio_set_view: mca_io_ompio_fview_based_grouping failed\n");
goto exit;
}
char char_stripe[MPI_MAX_INFO_KEY];
/* Check the info object set during File_open */
opal_info_get (fh->f_info, "cb_nodes", MPI_MAX_INFO_VAL, char_stripe, &flag);
if ( flag ) {
sscanf ( char_stripe, "%d", &num_cb_nodes );
}
else {
int done=0;
int ndims;
/* Check the info object set during file_set_view */
opal_info_get (info, "cb_nodes", MPI_MAX_INFO_VAL, char_stripe, &flag);
if ( flag ) {
sscanf ( char_stripe, "%d", &num_cb_nodes );
}
}
if ( fh->f_comm->c_flags & OMPI_COMM_CART ){
ret = fh->f_comm->c_topo->topo.cart.cartdim_get( fh->f_comm, &ndims);
if ( OMPI_SUCCESS != ret ){
if ( -1 != mca_io_ompio_num_aggregators || -1 != num_cb_nodes) {
/* The user requested a particular number of aggregators */
num_groups = mca_io_ompio_num_aggregators;
if ( -1 != num_cb_nodes ) {
/* A hint through an MPI Info object trumps an mca parameter value */
num_groups = num_cb_nodes;
}
if ( num_groups > fh->f_size ) {
num_groups = fh->f_size;
}
mca_io_ompio_forced_grouping ( fh, num_groups, contg_groups);
}
else {
if ( SIMPLE != mca_io_ompio_grouping_option &&
SIMPLE_PLUS != mca_io_ompio_grouping_option ) {
ret = mca_io_ompio_fview_based_grouping(fh,
&num_groups,
contg_groups);
if ( OMPI_SUCCESS != ret ) {
opal_output(1, "mca_common_ompio_set_view: mca_io_ompio_fview_based_grouping failed\n");
goto exit;
}
if ( ndims > 1 ) {
ret = mca_io_ompio_cart_based_grouping( fh,
&num_groups,
contg_groups);
if (OMPI_SUCCESS != ret ) {
opal_output(1, "mca_common_ompio_set_view: mca_io_ompio_cart_based_grouping failed\n");
goto exit;
}
done=1;
}
}
if ( !done ) {
ret = mca_io_ompio_simple_grouping(fh,
&num_groups,
contg_groups);
if ( OMPI_SUCCESS != ret ){
opal_output(1, "mca_common_ompio_set_view: mca_io_ompio_simple_grouping failed\n");
goto exit;
else {
int done=0;
int ndims;
if ( fh->f_comm->c_flags & OMPI_COMM_CART ){
ret = fh->f_comm->c_topo->topo.cart.cartdim_get( fh->f_comm, &ndims);
if ( OMPI_SUCCESS != ret ){
goto exit;
}
if ( ndims > 1 ) {
ret = mca_io_ompio_cart_based_grouping( fh,
&num_groups,
contg_groups);
if (OMPI_SUCCESS != ret ) {
opal_output(1, "mca_common_ompio_set_view: mca_io_ompio_cart_based_grouping failed\n");
goto exit;
}
done=1;
}
}
if ( !done ) {
ret = mca_io_ompio_simple_grouping(fh,
&num_groups,
contg_groups);
if ( OMPI_SUCCESS != ret ){
opal_output(1, "mca_common_ompio_set_view: mca_io_ompio_simple_grouping failed\n");
goto exit;
}
}
}
}
#ifdef DEBUG_OMPIO
if ( fh->f_rank == 0) {
int ii, jj;

Просмотреть файл

@ -9,7 +9,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2016 University of Houston. All rights reserved.
* Copyright (c) 2008-2018 University of Houston. All rights reserved.
* Copyright (c) 2015-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
@ -30,8 +30,8 @@
#include "ompi/mca/fbtl/base/base.h"
#include "common_ompio.h"
#include "common_ompio_request.h"
#include "ompi/mca/io/ompio/io_ompio.h"
#include "ompi/mca/io/ompio/io_ompio_request.h"
#include "math.h"
#include <unistd.h>
@ -156,11 +156,9 @@ int mca_common_ompio_file_iwrite (mca_io_ompio_file_t *fh,
mca_ompio_request_t *ompio_req=NULL;
size_t spc=0;
ompio_req = OBJ_NEW(mca_ompio_request_t);
ompio_req->req_type = MCA_OMPIO_REQUEST_WRITE;
ompio_req->req_ompi.req_state = OMPI_REQUEST_ACTIVE;
mca_common_ompio_request_alloc ( &ompio_req, MCA_OMPIO_REQUEST_WRITE);
if ( 0 == count ) {
if ( 0 == count ) {
ompio_req->req_ompi.req_status.MPI_ERROR = OMPI_SUCCESS;
ompio_req->req_ompi.req_status._ucount = 0;
ompi_request_complete (&ompio_req->req_ompi, false);
@ -205,12 +203,7 @@ int mca_common_ompio_file_iwrite (mca_io_ompio_file_t *fh,
fh->f_fbtl->fbtl_ipwritev (fh, (ompi_request_t *) ompio_req);
}
if ( false == mca_io_ompio_progress_is_registered ) {
// Lazy initialization of progress function to minimize impact
// on other ompi functionality in case its not used.
opal_progress_register (mca_io_ompio_component_progress);
mca_io_ompio_progress_is_registered=true;
}
mca_common_ompio_register_progress ();
fh->f_num_of_io_entries = 0;
if (NULL != fh->f_io_array) {

143
ompi/mca/common/ompio/common_ompio_request.c Обычный файл
Просмотреть файл

@ -0,0 +1,143 @@
/* -*- Mode: C; c-basic-offset:4 ; -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2016 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2018 University of Houston. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "common_ompio_request.h"
static void mca_common_ompio_request_construct(mca_ompio_request_t* req);
static void mca_common_ompio_request_destruct(mca_ompio_request_t *req);
bool mca_common_ompio_progress_is_registered=false;
/*
* Global list of requests for this component
*/
opal_list_t mca_common_ompio_pending_requests = {{0}};
static int mca_common_ompio_request_free ( struct ompi_request_t **req)
{
mca_ompio_request_t *ompio_req = ( mca_ompio_request_t *)*req;
if ( NULL != ompio_req->req_free_fn ) {
ompio_req->req_free_fn (ompio_req );
}
opal_list_remove_item (&mca_common_ompio_pending_requests, &ompio_req->req_item);
OBJ_RELEASE (*req);
*req = MPI_REQUEST_NULL;
return OMPI_SUCCESS;
}
static int mca_common_ompio_request_cancel ( struct ompi_request_t *req, int flag)
{
return OMPI_SUCCESS;
}
OBJ_CLASS_INSTANCE(mca_ompio_request_t, ompi_request_t,
mca_common_ompio_request_construct,
mca_common_ompio_request_destruct);
void mca_common_ompio_request_construct(mca_ompio_request_t* req)
{
OMPI_REQUEST_INIT (&(req->req_ompi), false );
req->req_ompi.req_free = mca_common_ompio_request_free;
req->req_ompi.req_cancel = mca_common_ompio_request_cancel;
req->req_ompi.req_type = OMPI_REQUEST_IO;
req->req_data = NULL;
req->req_progress_fn = NULL;
req->req_free_fn = NULL;
OBJ_CONSTRUCT(&req->req_item, opal_list_item_t);
opal_list_append (&mca_common_ompio_pending_requests, &req->req_item);
return;
}
void mca_common_ompio_request_destruct(mca_ompio_request_t* req)
{
OMPI_REQUEST_FINI ( &(req->req_ompi));
OBJ_DESTRUCT (&req->req_item);
if ( NULL != req->req_data ) {
free (req->req_data);
}
return;
}
void mca_common_ompio_request_init ( void )
{
/* Create the list of pending requests */
OBJ_CONSTRUCT(&mca_common_ompio_pending_requests, opal_list_t);
return;
}
void mca_common_ompio_request_fini ( void )
{
/* Destroy the list of pending requests */
/* JMS: Good opprotunity here to list out all the IO requests that
were not destroyed / completed upon MPI_FINALIZE */
OBJ_DESTRUCT(&mca_common_ompio_pending_requests);
return;
}
void mca_common_ompio_request_alloc ( mca_ompio_request_t **req, mca_ompio_request_type_t type )
{
mca_ompio_request_t *ompio_req = NULL;
ompio_req = OBJ_NEW(mca_ompio_request_t);
ompio_req->req_type = type;
ompio_req->req_ompi.req_state = OMPI_REQUEST_ACTIVE;
*req=ompio_req;
return;
}
void mca_common_ompio_register_progress ( void )
{
if ( false == mca_common_ompio_progress_is_registered) {
opal_progress_register (mca_common_ompio_progress);
mca_common_ompio_progress_is_registered=true;
}
return;
}
int mca_common_ompio_progress ( void )
{
mca_ompio_request_t *req=NULL;
opal_list_item_t *litem=NULL;
int completed=0;
OPAL_LIST_FOREACH(litem, &mca_common_ompio_pending_requests, opal_list_item_t) {
req = GET_OMPIO_REQ_FROM_ITEM(litem);
if( REQUEST_COMPLETE(&req->req_ompi) ) {
continue;
}
if ( NULL != req->req_progress_fn ) {
if ( req->req_progress_fn(req) ) {
completed++;
ompi_request_complete (&req->req_ompi, true);
/* The fbtl progress function is expected to set the
* status elements
*/
}
}
}
return completed;
}

Просмотреть файл

@ -10,7 +10,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2014 University of Houston. All rights reserved.
* Copyright (c) 2008-2018 University of Houston. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -18,18 +18,18 @@
* $HEADER$
*/
#ifndef MCA_IO_OMPIO_REQUEST_H
#define MCA_IO_OMPIO_REQUEST_H
#ifndef MCA_COMMON_OMPIO_REQUEST_H
#define MCA_COMMON_OMPIO_REQUEST_H
#include "ompi_config.h"
#include "ompi/request/request.h"
#include "ompi/mca/fbtl/fbtl.h"
#include "io_ompio.h"
#include "ompi/mca/io/ompio/io_ompio.h"
BEGIN_C_DECLS
extern opal_list_t mca_io_ompio_pending_requests;
extern bool mca_io_ompio_progress_is_registered;
extern opal_list_t mca_common_ompio_pending_requests;
extern bool mca_common_ompio_progress_is_registered;
/**
* Type of request.
@ -59,8 +59,12 @@ OBJ_CLASS_DECLARATION(mca_ompio_request_t);
#define GET_OMPIO_REQ_FROM_ITEM(ITEM) ((mca_ompio_request_t *)((char *)ITEM - offsetof(struct mca_ompio_request_t,req_item)))
OMPI_DECLSPEC int mca_io_ompio_component_progress ( void);
OMPI_DECLSPEC void mca_common_ompio_request_init ( void);
OMPI_DECLSPEC void mca_common_ompio_request_fini ( void );
OMPI_DECLSPEC void mca_common_ompio_request_alloc ( mca_ompio_request_t **req, mca_ompio_request_type_t type);
OMPI_DECLSPEC int mca_common_ompio_progress ( void);
OMPI_DECLSPEC void mca_common_ompio_register_progress ( void );
END_C_DECLS
#endif /* MCA_IO_OMPIO_REQUEST_H */
#endif /* MCA_COMMON_OMPIO_REQUEST_H */

Просмотреть файл

@ -9,7 +9,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2016 University of Houston. All rights reserved.
* Copyright (c) 2008-2018 University of Houston. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -24,7 +24,7 @@
#include "ompi/mca/mca.h"
#include "ompi/mca/fbtl/fbtl.h"
#include "ompi/mca/common/ompio/common_ompio.h"
#include "ompi/mca/io/ompio/io_ompio_request.h"
#include "ompi/mca/common/ompio/common_ompio_request.h"
extern int mca_fbtl_posix_priority;

Просмотреть файл

@ -10,7 +10,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2011 University of Houston. All rights reserved.
* Copyright (c) 2008-2018 University of Houston. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$

Просмотреть файл

@ -155,7 +155,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
if (OMPI_SUCCESS != ret){
goto exit;
}
my_aggregator = fh->f_procs_in_group[fh->f_aggregator_index];
my_aggregator = fh->f_procs_in_group[0];
/**************************************************************************
** 2. Determine the total amount of data to be written
@ -175,7 +175,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
total_bytes_per_process,
1,
MPI_LONG,
fh->f_aggregator_index,
0,
fh->f_procs_in_group,
fh->f_procs_per_group,
fh->f_comm);
@ -227,7 +227,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
fview_count,
1,
MPI_INT,
fh->f_aggregator_index,
0,
fh->f_procs_in_group,
fh->f_procs_per_group,
fh->f_comm);
@ -286,7 +286,7 @@ mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
fview_count,
displs,
fh->f_iov_type,
fh->f_aggregator_index,
0,
fh->f_procs_in_group,
fh->f_procs_per_group,
fh->f_comm);

Просмотреть файл

@ -158,7 +158,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
if (OMPI_SUCCESS != ret){
goto exit;
}
my_aggregator = fh->f_procs_in_group[fh->f_aggregator_index];
my_aggregator = fh->f_procs_in_group[0];
/**************************************************************************
** 2. Determine the total amount of data to be written
**************************************************************************/
@ -179,7 +179,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
total_bytes_per_process,
1,
MPI_LONG,
fh->f_aggregator_index,
0,
fh->f_procs_in_group,
fh->f_procs_per_group,
fh->f_comm);
@ -242,7 +242,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
fview_count,
1,
MPI_INT,
fh->f_aggregator_index,
0,
fh->f_procs_in_group,
fh->f_procs_per_group,
fh->f_comm);
@ -305,7 +305,7 @@ mca_fcoll_dynamic_file_write_all (mca_io_ompio_file_t *fh,
fview_count,
displs,
fh->f_iov_type,
fh->f_aggregator_index,
0,
fh->f_procs_in_group,
fh->f_procs_per_group,
fh->f_comm);

Просмотреть файл

@ -155,7 +155,7 @@ mca_fcoll_dynamic_gen2_file_read_all (mca_io_ompio_file_t *fh,
if (OMPI_SUCCESS != ret){
goto exit;
}
my_aggregator = fh->f_procs_in_group[fh->f_aggregator_index];
my_aggregator = fh->f_procs_in_group[0];
/**************************************************************************
** 2. Determine the total amount of data to be written
@ -175,7 +175,7 @@ mca_fcoll_dynamic_gen2_file_read_all (mca_io_ompio_file_t *fh,
total_bytes_per_process,
1,
MPI_LONG,
fh->f_aggregator_index,
0,
fh->f_procs_in_group,
fh->f_procs_per_group,
fh->f_comm);
@ -227,7 +227,7 @@ mca_fcoll_dynamic_gen2_file_read_all (mca_io_ompio_file_t *fh,
fview_count,
1,
MPI_INT,
fh->f_aggregator_index,
0,
fh->f_procs_in_group,
fh->f_procs_per_group,
fh->f_comm);
@ -286,7 +286,7 @@ mca_fcoll_dynamic_gen2_file_read_all (mca_io_ompio_file_t *fh,
fview_count,
displs,
fh->f_iov_type,
fh->f_aggregator_index,
0,
fh->f_procs_in_group,
fh->f_procs_per_group,
fh->f_comm);

Просмотреть файл

@ -152,7 +152,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
fh->f_set_aggregator_props ((struct mca_io_ompio_file_t *) fh,
static_num_io_procs,
max_data);
my_aggregator = fh->f_procs_in_group[fh->f_aggregator_index];
my_aggregator = fh->f_procs_in_group[0];
/* printf("max_data %ld\n", max_data); */
ret = fh->f_generate_current_file_view((struct mca_io_ompio_file_t *)fh,
@ -310,7 +310,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
iovec_count_per_process,
1,
MPI_INT,
fh->f_aggregator_index,
0,
fh->f_procs_in_group,
fh->f_procs_per_group,
fh->f_comm);
@ -354,7 +354,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
iovec_count_per_process,
displs,
io_array_type,
fh->f_aggregator_index,
0,
fh->f_procs_in_group,
fh->f_procs_per_group,
fh->f_comm);
@ -512,7 +512,7 @@ mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
bytes_per_process,
1,
MPI_INT,
fh->f_aggregator_index,
0,
fh->f_procs_in_group,
fh->f_procs_per_group,
fh->f_comm);

Просмотреть файл

@ -155,7 +155,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
static_num_io_procs,
max_data);
my_aggregator = fh->f_procs_in_group[fh->f_aggregator_index];
my_aggregator = fh->f_procs_in_group[0];
/* io_array datatype for using in communication*/
types[0] = &ompi_mpi_long.dt;
@ -312,7 +312,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
iovec_count_per_process,
1,
MPI_INT,
fh->f_aggregator_index,
0,
fh->f_procs_in_group,
fh->f_procs_per_group,
fh->f_comm);
@ -357,7 +357,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
iovec_count_per_process,
displs,
io_array_type,
fh->f_aggregator_index,
0,
fh->f_procs_in_group,
fh->f_procs_per_group,
fh->f_comm);
@ -517,7 +517,7 @@ mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
bytes_per_process,
1,
MPI_INT,
fh->f_aggregator_index,
0,
fh->f_procs_in_group,
fh->f_procs_per_group,
fh->f_comm);

Просмотреть файл

@ -199,7 +199,7 @@ mca_fcoll_two_phase_file_read_all (mca_io_ompio_file_t *fh,
goto exit;
}
two_phase_num_io_procs = fh->f_final_num_aggrs;
two_phase_num_io_procs = fh->f_num_aggrs;
}

Просмотреть файл

@ -235,7 +235,7 @@ mca_fcoll_two_phase_file_write_all (mca_io_ompio_file_t *fh,
goto exit;
}
two_phase_num_io_procs = fh->f_final_num_aggrs;
two_phase_num_io_procs = fh->f_num_aggrs;
}

47
ompi/mca/fcoll/vulcan/Makefile.am Обычный файл
Просмотреть файл

@ -0,0 +1,47 @@
#
# Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
# University Research and Technology
# Corporation. All rights reserved.
# Copyright (c) 2004-2005 The University of Tennessee and The University
# of Tennessee Research Foundation. All rights
# reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# Copyright (c) 2004-2005 The Regents of the University of California.
# All rights reserved.
# Copyright (c) 2008-2018 University of Houston. All rights reserved.
# Copyright (c) 2012 Cisco Systems, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
sources = \
fcoll_vulcan.h \
fcoll_vulcan_module.c \
fcoll_vulcan_component.c \
fcoll_vulcan_file_read_all.c \
fcoll_vulcan_file_write_all.c
# Make the output library in this directory, and name it either
# mca_<type>_<name>.la (for DSO builds) or libmca_<type>_<name>.la
# (for static builds).
if MCA_BUILD_ompi_fcoll_vulcan_DSO
component_noinst =
component_install = mca_fcoll_vulcan.la
else
component_noinst = libmca_fcoll_vulcan.la
component_install =
endif
mcacomponentdir = $(ompilibdir)
mcacomponent_LTLIBRARIES = $(component_install)
mca_fcoll_vulcan_la_SOURCES = $(sources)
mca_fcoll_vulcan_la_LDFLAGS = -module -avoid-version
noinst_LTLIBRARIES = $(component_noinst)
libmca_fcoll_vulcan_la_SOURCES =$(sources)
libmca_fcoll_vulcan_la_LDFLAGS = -module -avoid-version

72
ompi/mca/fcoll/vulcan/fcoll_vulcan.h Обычный файл
Просмотреть файл

@ -0,0 +1,72 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2006 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2017 University of Houston. All rights reserved.
* Copyright (c) 2015 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef MCA_FCOLL_VULCAN_EXPORT_H
#define MCA_FCOLL_VULCAN_EXPORT_H
#include "ompi_config.h"
#include "mpi.h"
#include "ompi/mca/mca.h"
#include "ompi/mca/fcoll/fcoll.h"
#include "ompi/mca/fcoll/base/base.h"
#include "ompi/mca/common/ompio/common_ompio.h"
BEGIN_C_DECLS
/* Globally exported variables */
extern int mca_fcoll_vulcan_priority;
extern int mca_fcoll_vulcan_num_groups;
extern int mca_fcoll_vulcan_write_chunksize;
extern int mca_fcoll_vulcan_async_io;
OMPI_MODULE_DECLSPEC extern mca_fcoll_base_component_2_0_0_t mca_fcoll_vulcan_component;
/* API functions */
int mca_fcoll_vulcan_component_init_query(bool enable_progress_threads,
bool enable_mpi_threads);
struct mca_fcoll_base_module_1_0_0_t *
mca_fcoll_vulcan_component_file_query (mca_io_ompio_file_t *fh, int *priority);
int mca_fcoll_vulcan_component_file_unquery (mca_io_ompio_file_t *file);
int mca_fcoll_vulcan_module_init (mca_io_ompio_file_t *file);
int mca_fcoll_vulcan_module_finalize (mca_io_ompio_file_t *file);
int mca_fcoll_vulcan_file_read_all (mca_io_ompio_file_t *fh,
void *buf,
int count,
struct ompi_datatype_t *datatype,
ompi_status_public_t * status);
int mca_fcoll_vulcan_file_write_all (mca_io_ompio_file_t *fh,
const void *buf,
int count,
struct ompi_datatype_t *datatype,
ompi_status_public_t * status);
END_C_DECLS
#endif /* MCA_FCOLL_VULCAN_EXPORT_H */

Просмотреть файл

@ -0,0 +1,115 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2008-2017 University of Houston. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*
* These symbols are in a file by themselves to provide nice linker
* semantics. Since linkers generally pull in symbols by object
* files, keeping these symbols as the only symbols in this file
* prevents utility programs such as "ompi_info" from having to import
* entire components just to query their version and parameters.
*/
#include "ompi_config.h"
#include "fcoll_vulcan.h"
#include "mpi.h"
/*
* Public string showing the fcoll ompi_vulcan component version number
*/
const char *mca_fcoll_vulcan_component_version_string =
"Open MPI vulcan collective MCA component version " OMPI_VERSION;
/*
* Global variables
*/
int mca_fcoll_vulcan_priority = 10;
int mca_fcoll_vulcan_num_groups = 1;
int mca_fcoll_vulcan_write_chunksize = -1;
int mca_fcoll_vulcan_async_io = 0;
/*
* Local function
*/
static int vulcan_register(void);
/*
* Instantiate the public struct with all of our public information
* and pointers to our public functions in it
*/
mca_fcoll_base_component_2_0_0_t mca_fcoll_vulcan_component = {
/* First, the mca_component_t struct containing meta information
* about the component itself */
.fcollm_version = {
MCA_FCOLL_BASE_VERSION_2_0_0,
/* Component name and version */
.mca_component_name = "vulcan",
MCA_BASE_MAKE_VERSION(component, OMPI_MAJOR_VERSION, OMPI_MINOR_VERSION,
OMPI_RELEASE_VERSION),
.mca_register_component_params = vulcan_register,
},
.fcollm_data = {
/* The component is checkpoint ready */
MCA_BASE_METADATA_PARAM_CHECKPOINT
},
.fcollm_init_query = mca_fcoll_vulcan_component_init_query,
.fcollm_file_query = mca_fcoll_vulcan_component_file_query,
.fcollm_file_unquery = mca_fcoll_vulcan_component_file_unquery,
};
static int
vulcan_register(void)
{
mca_fcoll_vulcan_priority = 10;
(void) mca_base_component_var_register(&mca_fcoll_vulcan_component.fcollm_version,
"priority", "Priority of the vulcan fcoll component",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_9,
MCA_BASE_VAR_SCOPE_READONLY, &mca_fcoll_vulcan_priority);
mca_fcoll_vulcan_num_groups = 1;
(void) mca_base_component_var_register(&mca_fcoll_vulcan_component.fcollm_version,
"num_groups", "Number of subgroups created by the vulcan component",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_9,
MCA_BASE_VAR_SCOPE_READONLY, &mca_fcoll_vulcan_num_groups);
mca_fcoll_vulcan_write_chunksize = -1;
(void) mca_base_component_var_register(&mca_fcoll_vulcan_component.fcollm_version,
"write_chunksize", "Chunk size written at once. Default: stripe_size of the file system",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_9,
MCA_BASE_VAR_SCOPE_READONLY, &mca_fcoll_vulcan_write_chunksize);
mca_fcoll_vulcan_async_io = 0;
(void) mca_base_component_var_register(&mca_fcoll_vulcan_component.fcollm_version,
"async_io", "Asynchronous I/O support options. 0: Automatic choice (default) "
"1: Asynchronous I/O only. 2: Synchronous I/O only.",
MCA_BASE_VAR_TYPE_INT, NULL, 0, 0,
OPAL_INFO_LVL_9,
MCA_BASE_VAR_SCOPE_READONLY, &mca_fcoll_vulcan_async_io);
return OMPI_SUCCESS;
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

90
ompi/mca/fcoll/vulcan/fcoll_vulcan_module.c Обычный файл
Просмотреть файл

@ -0,0 +1,90 @@
/*
* Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2006 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2017 University of Houston. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "fcoll_vulcan.h"
#include <stdio.h>
#include "mpi.h"
#include "ompi/mca/fcoll/fcoll.h"
#include "ompi/mca/fcoll/base/base.h"
/*
* *******************************************************************
* ************************ actions structure ************************
* *******************************************************************
*/
static mca_fcoll_base_module_1_0_0_t vulcan = {
mca_fcoll_vulcan_module_init,
mca_fcoll_vulcan_module_finalize,
mca_fcoll_vulcan_file_read_all,
NULL, /* iread_all */
mca_fcoll_vulcan_file_write_all,
NULL, /*iwrite_all */
NULL, /* progress */
NULL /* request_free */
};
int
mca_fcoll_vulcan_component_init_query(bool enable_progress_threads,
bool enable_mpi_threads)
{
/* Nothing to do */
return OMPI_SUCCESS;
}
mca_fcoll_base_module_1_0_0_t *
mca_fcoll_vulcan_component_file_query (mca_io_ompio_file_t *fh, int *priority)
{
*priority = mca_fcoll_vulcan_priority;
if (0 >= mca_fcoll_vulcan_priority) {
return NULL;
}
if (mca_fcoll_base_query_table (fh, "vulcan")) {
if (*priority < 50) {
*priority = 50;
}
}
return &vulcan;
}
int mca_fcoll_vulcan_component_file_unquery (mca_io_ompio_file_t *file)
{
/* This function might be needed for some purposes later. for now it
* does not have anything to do since there are no steps which need
* to be undone if this module is not selected */
return OMPI_SUCCESS;
}
int mca_fcoll_vulcan_module_init (mca_io_ompio_file_t *file)
{
return OMPI_SUCCESS;
}
int mca_fcoll_vulcan_module_finalize (mca_io_ompio_file_t *file)
{
return OMPI_SUCCESS;
}

7
ompi/mca/fcoll/vulcan/owner.txt Обычный файл
Просмотреть файл

@ -0,0 +1,7 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner: UH
status: active

Просмотреть файл

@ -46,7 +46,6 @@ libmca_io_ompio_la_LDFLAGS = -module -avoid-version
headers = \
io_ompio.h \
io_ompio_request.h \
io_ompio_aggregators.h
sources = \
@ -57,5 +56,4 @@ sources = \
io_ompio_file_set_view.c \
io_ompio_file_open.c \
io_ompio_file_write.c \
io_ompio_file_read.c \
io_ompio_request.c
io_ompio_file_read.c

Просмотреть файл

@ -10,7 +10,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2016 University of Houston. All rights reserved.
* Copyright (c) 2008-2018 University of Houston. All rights reserved.
* Copyright (c) 2015-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2016-2017 IBM Corporation. All rights reserved.
@ -238,10 +238,6 @@ struct mca_io_ompio_file_t {
*/
void *f_sharedfp_data;
/* process grouping parameters */
int *f_procs_in_group;
int f_procs_per_group;
int f_aggregator_index;
/* File View parameters */
struct iovec *f_decoded_iov;
@ -250,7 +246,7 @@ struct mca_io_ompio_file_t {
size_t f_position_in_file_view; /* in bytes */
size_t f_total_bytes; /* total bytes read/written within 1 Fview*/
int f_index_in_file_view;
ptrdiff_t f_view_extent;
ptrdiff_t f_view_extent;
size_t f_view_size;
ompi_datatype_t *f_etype;
ompi_datatype_t *f_filetype;
@ -259,7 +255,7 @@ struct mca_io_ompio_file_t {
/* contains IO requests that needs to be read/written */
mca_io_ompio_io_array_t *f_io_array;
int f_num_of_io_entries;
int f_num_of_io_entries;
/* Hooks for modules to hang things */
mca_base_component_t *f_fs_component;
@ -280,11 +276,14 @@ struct mca_io_ompio_file_t {
/*initial list of aggregators and groups*/
int *f_init_aggr_list;
int f_init_num_aggrs;
int f_init_procs_per_group;
int f_init_procs_per_group;
int *f_init_procs_in_group;
int f_final_num_aggrs;
/* final of aggregators and groups*/
int *f_aggr_list;
int f_num_aggrs;
int *f_procs_in_group;
int f_procs_per_group;
/* internal ompio functions required by fbtl and fcoll */
mca_io_ompio_decode_datatype_fn_t f_decode_datatype;

Просмотреть файл

@ -62,9 +62,6 @@ int mca_io_ompio_simple_grouping(mca_io_ompio_file_t *fh,
int *num_groups_out,
mca_io_ompio_contg *contg_groups)
{
int group_size = 0;
int k=0, p=0, g=0;
int total_procs = 0;
int num_groups=1;
double time=0.0, time_prev=0.0, dtime=0.0, dtime_abs=0.0, dtime_diff=0.0, dtime_prev=0.0;
@ -185,23 +182,46 @@ int mca_io_ompio_simple_grouping(mca_io_ompio_file_t *fh,
if ( 1 >= num_groups ) {
num_groups = 1;
}
group_size = fh->f_size / num_groups;
*num_groups_out = num_groups;
return mca_io_ompio_forced_grouping ( fh, num_groups, contg_groups);
}
int mca_io_ompio_forced_grouping ( mca_io_ompio_file_t *fh,
int num_groups,
mca_io_ompio_contg *contg_groups)
{
int group_size = fh->f_size / num_groups;
int rest = fh->f_size % num_groups;
int flag = OMPI_COMM_IS_MAPBY_NODE (&ompi_mpi_comm_world.comm);
int k=0, p=0, g=0;
int total_procs = 0;
for ( k=0, p=0; p<num_groups; p++ ) {
if ( p == (num_groups - 1) ) {
contg_groups[p].procs_per_contg_group = fh->f_size - total_procs;
if ( p < rest ) {
contg_groups[p].procs_per_contg_group = group_size+1;
total_procs +=(group_size+1);
}
else {
contg_groups[p].procs_per_contg_group = group_size;
total_procs +=group_size;
}
for ( g=0; g<contg_groups[p].procs_per_contg_group; g++ ) {
contg_groups[p].procs_in_contg_group[g] = k;
k++;
}
}
*num_groups_out = num_groups;
if ( flag ) {
/* Map by node used for MPI_COMM_WORLD */
for ( g=0; g<contg_groups[p].procs_per_contg_group; g++ ) {
k = g*num_groups+p;
contg_groups[p].procs_in_contg_group[g] = k;
}
}
else {
for ( g=0; g<contg_groups[p].procs_per_contg_group; g++ ) {
contg_groups[p].procs_in_contg_group[g] = k;
k++;
}
}
}
return OMPI_SUCCESS;
}
@ -486,71 +506,41 @@ int mca_io_ompio_set_aggregator_props (struct mca_io_ompio_file_t *fh,
int num_aggregators,
size_t bytes_per_proc)
{
int j,procs_per_group = 0;
int j;
int ret=OMPI_SUCCESS;
/*If only one process used, no need to do aggregator selection!*/
if (fh->f_size == 1){
num_aggregators = 1;
}
fh->f_flags |= OMPIO_AGGREGATOR_IS_SET;
if (-1 == num_aggregators) {
if ( SIMPLE == mca_io_ompio_grouping_option ||
NO_REFINEMENT == mca_io_ompio_grouping_option ||
SIMPLE_PLUS == mca_io_ompio_grouping_option ) {
fh->f_aggregator_index = 0;
fh->f_final_num_aggrs = fh->f_init_num_aggrs;
fh->f_procs_per_group = fh->f_init_procs_per_group;
fh->f_procs_in_group = (int*)malloc (fh->f_procs_per_group * sizeof(int));
if (NULL == fh->f_procs_in_group) {
opal_output (1, "OUT OF MEMORY\n");
return OMPI_ERR_OUT_OF_RESOURCE;
}
for (j=0 ; j<fh->f_procs_per_group ; j++) {
fh->f_procs_in_group[j] = fh->f_init_procs_in_group[j];
}
}
else {
ret = mca_io_ompio_create_groups(fh,bytes_per_proc);
}
return ret;
}
/* Forced number of aggregators
** calculate the offset at which each group of processes will start
*/
if ( num_aggregators > fh->f_size ) {
num_aggregators = fh->f_size;
}
procs_per_group = ceil ((float)fh->f_size/num_aggregators);
/* calculate the number of processes in the local group */
if (fh->f_size/procs_per_group != fh->f_rank/procs_per_group) {
fh->f_procs_per_group = procs_per_group;
if ( (-1 == num_aggregators) &&
((SIMPLE != mca_io_ompio_grouping_option &&
NO_REFINEMENT != mca_io_ompio_grouping_option &&
SIMPLE_PLUS != mca_io_ompio_grouping_option ))) {
ret = mca_io_ompio_create_groups(fh,bytes_per_proc);
}
else {
fh->f_procs_per_group = fh->f_size%procs_per_group;
fh->f_procs_per_group = fh->f_init_procs_per_group;
fh->f_procs_in_group = (int*)malloc (fh->f_procs_per_group * sizeof(int));
if (NULL == fh->f_procs_in_group) {
opal_output (1, "OUT OF MEMORY\n");
return OMPI_ERR_OUT_OF_RESOURCE;
}
for (j=0 ; j<fh->f_procs_per_group ; j++) {
fh->f_procs_in_group[j] = fh->f_init_procs_in_group[j];
}
fh->f_num_aggrs = fh->f_init_num_aggrs;
fh->f_aggr_list = (int*) malloc ( fh->f_num_aggrs * sizeof(int));
if (NULL == fh->f_aggr_list ) {
opal_output (1, "OUT OF MEMORY\n");
return OMPI_ERR_OUT_OF_RESOURCE;
}
for (j=0 ; j<fh->f_num_aggrs; j++) {
fh->f_aggr_list[j] = fh->f_init_aggr_list[j];
}
}
fh->f_procs_in_group = (int*)malloc (fh->f_procs_per_group * sizeof(int));
if (NULL == fh->f_procs_in_group) {
opal_output (1, "OUT OF MEMORY\n");
return OMPI_ERR_OUT_OF_RESOURCE;
}
for (j=0 ; j<fh->f_procs_per_group ; j++) {
fh->f_procs_in_group[j] = (fh->f_rank/procs_per_group) * procs_per_group + j;
}
fh->f_aggregator_index = 0;
fh->f_final_num_aggrs = num_aggregators;
return OMPI_SUCCESS;
}
return ret;
}
@ -562,8 +552,9 @@ int mca_io_ompio_create_groups(mca_io_ompio_file_t *fh,
int final_aggr = 0;
int final_num_aggrs = 0;
int ret = OMPI_SUCCESS, ompio_grouping_flag = 0;
int *tmp_final_aggrs=NULL;
int *decision_list = NULL;
int i,j;
OMPI_MPI_OFFSET_TYPE *start_offsets_lens = NULL;
OMPI_MPI_OFFSET_TYPE *end_offsets = NULL;
@ -614,10 +605,9 @@ int mca_io_ompio_create_groups(mca_io_ompio_file_t *fh,
}
//Set aggregator index
fh->f_aggregator_index = 0;
//Calculate final number of aggregators
if(fh->f_rank == fh->f_procs_in_group[fh->f_aggregator_index]){
if(fh->f_rank == fh->f_procs_in_group[0]){
final_aggr = 1;
}
ret = fh->f_comm->c_coll->coll_allreduce (&final_aggr,
@ -629,10 +619,44 @@ int mca_io_ompio_create_groups(mca_io_ompio_file_t *fh,
fh->f_comm->c_coll->coll_allreduce_module);
if ( OMPI_SUCCESS != ret ) {
opal_output (1, "mca_io_ompio_create_groups: error in allreduce\n");
goto exit;
}
tmp_final_aggrs =(int*) malloc ( fh->f_size *sizeof(int));
if ( NULL == tmp_final_aggrs ) {
opal_output(1,"mca_io_ompio_create_groups: could not allocate memory\n");
goto exit;
}
ret = fh->f_comm->c_coll->coll_allgather (&final_aggr,
1,
MPI_INT,
tmp_final_aggrs,
1,
MPI_INT,
fh->f_comm,
fh->f_comm->c_coll->coll_allgather_module);
if ( OMPI_SUCCESS != ret ) {
opal_output (1, "mca_io_ompio_create_groups: error in allreduce\n");
goto exit;
}
//Set final number of aggregators in file handle
fh->f_final_num_aggrs = final_num_aggrs;
fh->f_num_aggrs = final_num_aggrs;
fh->f_aggr_list = (int*) malloc (fh->f_num_aggrs * sizeof(int));
if ( NULL == fh->f_aggr_list ) {
opal_output(1,"mca_io_ompio_create_groups: could not allocate memory\n");
goto exit;
}
for ( i=0, j=0; i<fh->f_num_aggrs; i++ ) {
for ( ; j<fh->f_size; j++ ) {
if ( 1 == tmp_final_aggrs[j] ) {
break;
}
fh->f_aggr_list[i] = tmp_final_aggrs[j];
}
}
exit:
@ -643,14 +667,16 @@ exit:
free (end_offsets);
}
if(NULL != aggr_bytes_per_group){
free(aggr_bytes_per_group);
free(aggr_bytes_per_group);
}
if( NULL != decision_list){
free(decision_list);
free(decision_list);
}
if ( NULL != tmp_final_aggrs){
free(tmp_final_aggrs);
}
return OMPI_SUCCESS;
return ret;
}
int mca_io_ompio_merge_initial_groups(mca_io_ompio_file_t *fh,
@ -1321,7 +1347,7 @@ int mca_io_ompio_prepare_to_group(mca_io_ompio_file_t *fh,
fh->f_init_num_aggrs,
fh->f_comm);
if ( OMPI_SUCCESS != ret ) {
opal_output (1, "mca_io_ompio_prepare_to_grou[: error in ompi_fcoll_base_coll_allgather_array 2\n");
opal_output (1, "mca_io_ompio_prepare_to_group: error in ompi_fcoll_base_coll_allgather_array 2\n");
free(decision_list_tmp);
goto exit;
}

Просмотреть файл

@ -51,6 +51,10 @@ OMPI_DECLSPEC int mca_io_ompio_set_aggregator_props (struct mca_io_ompio_file_t
int num_aggregators,
size_t bytes_per_proc);
int mca_io_ompio_forced_grouping ( mca_io_ompio_file_t *fh,
int num_groups,
mca_io_ompio_contg *contg_groups);
int mca_io_ompio_cart_based_grouping(mca_io_ompio_file_t *ompio_fh, int *num_groups,
mca_io_ompio_contg *contg_groups);

Просмотреть файл

@ -10,7 +10,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2017 University of Houston. All rights reserved.
* Copyright (c) 2008-2018 University of Houston. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2015 Research Organization for Information Science
@ -32,6 +32,7 @@
#include "ompi/mca/io/io.h"
#include "ompi/mca/fs/base/base.h"
#include "io_ompio.h"
#include "ompi/mca/common/ompio/common_ompio_request.h"
int mca_io_ompio_cycle_buffer_size = OMPIO_DEFAULT_CYCLE_BUF_SIZE;
int mca_io_ompio_bytes_per_agg = OMPIO_PREALLOC_MAX_BUF_SIZE;
@ -90,11 +91,6 @@ static int delete_priority_param = 30;
opal_mutex_t mca_io_ompio_mutex = {{0}};
/*
* Global list of requests for this component
*/
opal_list_t mca_io_ompio_pending_requests = {{0}};
/*
* Public string showing this component's version number
@ -262,9 +258,7 @@ static int open_component(void)
/* Create the mutex */
OBJ_CONSTRUCT(&mca_io_ompio_mutex, opal_mutex_t);
/* Create the list of pending requests */
OBJ_CONSTRUCT(&mca_io_ompio_pending_requests, opal_list_t);
mca_common_ompio_request_init ();
return OMPI_SUCCESS;
}
@ -272,11 +266,7 @@ static int open_component(void)
static int close_component(void)
{
/* Destroy the list of pending requests */
/* JMS: Good opprotunity here to list out all the IO requests that
were not destroyed / completed upon MPI_FINALIZE */
OBJ_DESTRUCT(&mca_io_ompio_pending_requests);
mca_common_ompio_request_fini ();
OBJ_DESTRUCT(&mca_io_ompio_mutex);

Просмотреть файл

@ -9,7 +9,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2016 University of Houston. All rights reserved.
* Copyright (c) 2008-2018 University of Houston. All rights reserved.
* Copyright (c) 2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
@ -32,7 +32,7 @@
#include "ompi/mca/fbtl/base/base.h"
#include "io_ompio.h"
#include "io_ompio_request.h"
#include "ompi/mca/common/ompio/common_ompio_request.h"
#include "math.h"
#include <unistd.h>

Просмотреть файл

@ -9,7 +9,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2016 University of Houston. All rights reserved.
* Copyright (c) 2008-2018 University of Houston. All rights reserved.
* Copyright (c) 2015-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
@ -34,7 +34,7 @@
#include "ompi/mca/sharedfp/base/base.h"
#include "io_ompio.h"
#include "io_ompio_request.h"
#include "ompi/mca/common/ompio/common_ompio_request.h"
#include "math.h"
#include <unistd.h>

Просмотреть файл

@ -1,99 +0,0 @@
/* -*- Mode: C; c-basic-offset:4 ; -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2016 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2014 University of Houston. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "io_ompio_request.h"
static void mca_io_ompio_request_construct(mca_ompio_request_t* req);
static void mca_io_ompio_request_destruct(mca_ompio_request_t *req);
bool mca_io_ompio_progress_is_registered=false;
static int mca_io_ompio_request_free ( struct ompi_request_t **req)
{
mca_ompio_request_t *ompio_req = ( mca_ompio_request_t *)*req;
if ( NULL != ompio_req->req_free_fn ) {
ompio_req->req_free_fn (ompio_req );
}
opal_list_remove_item (&mca_io_ompio_pending_requests, &ompio_req->req_item);
OBJ_RELEASE (*req);
*req = MPI_REQUEST_NULL;
return OMPI_SUCCESS;
}
static int mca_io_ompio_request_cancel ( struct ompi_request_t *req, int flag)
{
return OMPI_SUCCESS;
}
OBJ_CLASS_INSTANCE(mca_ompio_request_t, ompi_request_t,
mca_io_ompio_request_construct,
mca_io_ompio_request_destruct);
void mca_io_ompio_request_construct(mca_ompio_request_t* req)
{
OMPI_REQUEST_INIT (&(req->req_ompi), false );
req->req_ompi.req_free = mca_io_ompio_request_free;
req->req_ompi.req_cancel = mca_io_ompio_request_cancel;
req->req_ompi.req_type = OMPI_REQUEST_IO;
req->req_data = NULL;
req->req_progress_fn = NULL;
req->req_free_fn = NULL;
OBJ_CONSTRUCT(&req->req_item, opal_list_item_t);
opal_list_append (&mca_io_ompio_pending_requests, &req->req_item);
return;
}
void mca_io_ompio_request_destruct(mca_ompio_request_t* req)
{
OMPI_REQUEST_FINI ( &(req->req_ompi));
OBJ_DESTRUCT (&req->req_item);
if ( NULL != req->req_data ) {
free (req->req_data);
}
return;
}
int mca_io_ompio_component_progress ( void )
{
mca_ompio_request_t *req=NULL;
opal_list_item_t *litem=NULL;
int completed=0;
OPAL_LIST_FOREACH(litem, &mca_io_ompio_pending_requests, opal_list_item_t) {
req = GET_OMPIO_REQ_FROM_ITEM(litem);
if( REQUEST_COMPLETE(&req->req_ompi) ) {
continue;
}
if ( NULL != req->req_progress_fn ) {
if ( req->req_progress_fn(req) ) {
completed++;
ompi_request_complete (&req->req_ompi, true);
/* The fbtl progress function is expected to set the
* status elements
*/
}
}
}
return completed;
}