1
1

Merge pull request #4551 from ggouaillardet/topic/communicator_mutex_c_lock

Make usage of ompi_communicator_t, ompi_file_t and ompi_win_t mutex consistent
Этот коммит содержится в:
Gilles Gouaillardet 2017-12-04 09:20:52 +09:00 коммит произвёл GitHub
родитель 710fb72afa b8e77ba759
Коммит 2f5b1e9fe0
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
11 изменённых файлов: 135 добавлений и 131 удалений

Просмотреть файл

@ -465,6 +465,8 @@ static void ompi_comm_destruct(ompi_communicator_t* comm)
opal_pointer_array_set_item ( &ompi_comm_f_to_c_table,
comm->c_f_to_c_index, NULL);
}
OBJ_DESTRUCT(&comm->c_lock);
}
#define OMPI_COMM_SET_INFO_FN(name, flag) \

Просмотреть файл

@ -12,7 +12,7 @@
* All rights reserved.
* Copyright (c) 2008-2009 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2015 Research Organization for Information Science
* Copyright (c) 2015-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2016 University of Houston. All rights reserved.
* Copyright (c) 2016-2017 IBM Corporation. All rights reserved.
@ -128,7 +128,7 @@ int ompi_file_open(struct ompi_communicator_t *comm, const char *filename,
}
/* Create the mutex */
OBJ_CONSTRUCT(&file->f_mutex, opal_mutex_t);
OBJ_CONSTRUCT(&file->f_lock, opal_mutex_t);
/* Select a module and actually open the file */
@ -150,7 +150,7 @@ int ompi_file_open(struct ompi_communicator_t *comm, const char *filename,
int ompi_file_close(ompi_file_t **file)
{
OBJ_DESTRUCT(&(*file)->f_mutex);
OBJ_DESTRUCT(&(*file)->f_lock);
(*file)->f_flags |= OMPI_FILE_ISCLOSED;
OBJ_RELEASE(*file);

Просмотреть файл

@ -12,7 +12,7 @@
* All rights reserved.
* Copyright (c) 2009 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2009-2017 Cisco Systems, Inc. All rights reserved
* Copyright (c) 2015 Research Organization for Information Science
* Copyright (c) 2015-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2016 University of Houston. All rights reserved.
* Copyright (c) 2016-2017 IBM Corporation. All rights reserved.
@ -79,7 +79,7 @@ struct ompi_file_t {
/** Mutex to be used to protect access to the selected component
on a per file-handle basis */
opal_mutex_t f_mutex;
opal_mutex_t f_lock;
/** The selected component (note that this is a union) -- we need
this to add and remove the component from the list of

Просмотреть файл

@ -10,7 +10,7 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2016 University of Houston. All rights reserved.
* Copyright (c) 2015 Research Organization for Information Science
* Copyright (c) 2015-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2016 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2016-2017 IBM Corporation. All rights reserved.
@ -137,7 +137,7 @@ int mca_io_ompio_file_preallocate (ompi_file_t *fh,
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
tmp = diskspace;
ret = data->ompio_fh.f_comm->c_coll->coll_bcast (&tmp,
@ -147,23 +147,23 @@ int mca_io_ompio_file_preallocate (ompi_file_t *fh,
data->ompio_fh.f_comm,
data->ompio_fh.f_comm->c_coll->coll_bcast_module);
if ( OMPI_SUCCESS != ret ) {
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return OMPI_ERROR;
}
if (tmp != diskspace) {
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return OMPI_ERROR;
}
ret = data->ompio_fh.f_fs->fs_file_get_size (&data->ompio_fh,
&current_size);
if ( OMPI_SUCCESS != ret ) {
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return OMPI_ERROR;
}
if ( current_size > diskspace ) {
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return OMPI_SUCCESS;
}
@ -240,7 +240,7 @@ exit:
if ( diskspace > current_size ) {
data->ompio_fh.f_fs->fs_file_set_size (&data->ompio_fh, diskspace);
}
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -255,7 +255,7 @@ int mca_io_ompio_file_set_size (ompi_file_t *fh,
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
tmp = size;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
ret = data->ompio_fh.f_comm->c_coll->coll_bcast (&tmp,
1,
OMPI_OFFSET_DATATYPE,
@ -264,20 +264,20 @@ int mca_io_ompio_file_set_size (ompi_file_t *fh,
data->ompio_fh.f_comm->c_coll->coll_bcast_module);
if ( OMPI_SUCCESS != ret ) {
opal_output(1, ",mca_io_ompio_file_set_size: error in bcast\n");
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
if (tmp != size) {
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return OMPI_ERROR;
}
ret = data->ompio_fh.f_fs->fs_file_set_size (&data->ompio_fh, size);
if ( OMPI_SUCCESS != ret ) {
opal_output(1, ",mca_io_ompio_file_set_size: error in fs->set_size\n");
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -285,10 +285,10 @@ int mca_io_ompio_file_set_size (ompi_file_t *fh,
data->ompio_fh.f_comm->c_coll->coll_barrier_module);
if ( OMPI_SUCCESS != ret ) {
opal_output(1, ",mca_io_ompio_file_set_size: error in barrier\n");
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -300,9 +300,9 @@ int mca_io_ompio_file_get_size (ompi_file_t *fh,
mca_io_ompio_data_t *data;
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
ret = mca_common_ompio_file_get_size(&data->ompio_fh,size);
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -339,7 +339,7 @@ int mca_io_ompio_file_set_atomicity (ompi_file_t *fh,
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
if (flag) {
flag = 1;
}
@ -354,12 +354,12 @@ int mca_io_ompio_file_set_atomicity (ompi_file_t *fh,
data->ompio_fh.f_comm->c_coll->coll_bcast_module);
if (tmp != flag) {
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return OMPI_ERROR;
}
data->ompio_fh.f_atomicity = flag;
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return OMPI_SUCCESS;
}
@ -371,9 +371,9 @@ int mca_io_ompio_file_get_atomicity (ompi_file_t *fh,
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
*flag = data->ompio_fh.f_atomicity;
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return OMPI_SUCCESS;
}
@ -385,9 +385,9 @@ int mca_io_ompio_file_sync (ompi_file_t *fh)
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
ret = data->ompio_fh.f_fs->fs_file_sync (&data->ompio_fh);
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -403,13 +403,13 @@ int mca_io_ompio_file_seek (ompi_file_t *fh,
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
offset = off * data->ompio_fh.f_etype_size;
switch(whence) {
case MPI_SEEK_SET:
if (offset < 0) {
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return OMPI_ERROR;
}
break;
@ -417,7 +417,7 @@ int mca_io_ompio_file_seek (ompi_file_t *fh,
offset += data->ompio_fh.f_position_in_file_view;
offset += data->ompio_fh.f_disp;
if (offset < 0) {
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return OMPI_ERROR;
}
break;
@ -426,18 +426,18 @@ int mca_io_ompio_file_seek (ompi_file_t *fh,
&temp_offset);
offset += temp_offset;
if (offset < 0 || OMPI_SUCCESS != ret) {
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return OMPI_ERROR;
}
break;
default:
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return OMPI_ERROR;
}
ret = mca_common_ompio_set_explicit_offset (&data->ompio_fh,
offset/data->ompio_fh.f_etype_size);
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -452,9 +452,9 @@ int mca_io_ompio_file_get_position (ompi_file_t *fd,
data = (mca_io_ompio_data_t *) fd->f_io_selected_data;
fh = &data->ompio_fh;
OPAL_THREAD_LOCK(&fd->f_mutex);
OPAL_THREAD_LOCK(&fd->f_lock);
ret = mca_common_ompio_file_get_position (fh, offset);
OPAL_THREAD_UNLOCK(&fd->f_mutex);
OPAL_THREAD_UNLOCK(&fd->f_lock);
return ret;
}
@ -470,7 +470,7 @@ int mca_io_ompio_file_get_byte_offset (ompi_file_t *fh,
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
temp_offset = data->ompio_fh.f_view_extent *
(offset*data->ompio_fh.f_etype_size / data->ompio_fh.f_view_size);
@ -497,7 +497,7 @@ int mca_io_ompio_file_get_byte_offset (ompi_file_t *fh,
*disp = data->ompio_fh.f_disp + temp_offset +
(OMPI_MPI_OFFSET_TYPE)(intptr_t)data->ompio_fh.f_decoded_iov[index].iov_base + k;
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return OMPI_SUCCESS;
}
@ -521,9 +521,9 @@ int mca_io_ompio_file_seek_shared (ompi_file_t *fp,
return OMPI_ERROR;
}
OPAL_THREAD_LOCK(&fp->f_mutex);
OPAL_THREAD_LOCK(&fp->f_lock);
ret = shared_fp_base_module->sharedfp_seek(fh,offset,whence);
OPAL_THREAD_UNLOCK(&fp->f_mutex);
OPAL_THREAD_UNLOCK(&fp->f_lock);
return ret;
}
@ -546,10 +546,10 @@ int mca_io_ompio_file_get_position_shared (ompi_file_t *fp,
opal_output(0, "No shared file pointer component found for this communicator. Can not execute\n");
return OMPI_ERROR;
}
OPAL_THREAD_LOCK(&fp->f_mutex);
OPAL_THREAD_LOCK(&fp->f_lock);
ret = shared_fp_base_module->sharedfp_get_position(fh,offset);
*offset = *offset / fh->f_etype_size;
OPAL_THREAD_UNLOCK(&fp->f_mutex);
OPAL_THREAD_UNLOCK(&fp->f_lock);
return ret;
}

Просмотреть файл

@ -1,20 +1,22 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2016 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2016 University of Houston. All rights reserved.
* $COPYRIGHT$
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2016 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2016 University of Houston. All rights reserved.
* Copyright (c) 2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
* Additional copyrights may follow
*
* $HEADER$
* $HEADER$
*/
#include "ompi_config.h"
@ -59,9 +61,9 @@ int mca_io_ompio_file_read (ompi_file_t *fp,
mca_io_ompio_data_t *data;
data = (mca_io_ompio_data_t *) fp->f_io_selected_data;
OPAL_THREAD_LOCK(&fp->f_mutex);
OPAL_THREAD_LOCK(&fp->f_lock);
ret = mca_common_ompio_file_read(&data->ompio_fh,buf,count,datatype,status);
OPAL_THREAD_UNLOCK(&fp->f_mutex);
OPAL_THREAD_UNLOCK(&fp->f_lock);
return ret;
}
@ -77,9 +79,9 @@ int mca_io_ompio_file_read_at (ompi_file_t *fh,
mca_io_ompio_data_t *data;
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
ret = mca_common_ompio_file_read_at(&data->ompio_fh, offset,buf,count,datatype,status);
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -94,9 +96,9 @@ int mca_io_ompio_file_iread (ompi_file_t *fh,
mca_io_ompio_data_t *data;
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
ret = mca_common_ompio_file_iread(&data->ompio_fh,buf,count,datatype,request);
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -113,9 +115,9 @@ int mca_io_ompio_file_iread_at (ompi_file_t *fh,
mca_io_ompio_data_t *data;
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
ret = mca_common_ompio_file_iread_at(&data->ompio_fh,offset,buf,count,datatype,request);
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -134,14 +136,14 @@ int mca_io_ompio_file_read_all (ompi_file_t *fh,
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
ret = data->ompio_fh.
f_fcoll->fcoll_file_read_all (&data->ompio_fh,
buf,
count,
datatype,
status);
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
if ( MPI_STATUS_IGNORE != status ) {
size_t size;
@ -165,7 +167,7 @@ int mca_io_ompio_file_iread_all (ompi_file_t *fh,
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
fp = &data->ompio_fh;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
if ( NULL != fp->f_fcoll->fcoll_file_iread_all ) {
ret = fp->f_fcoll->fcoll_file_iread_all (&data->ompio_fh,
buf,
@ -179,7 +181,7 @@ int mca_io_ompio_file_iread_all (ompi_file_t *fh,
individual non-blocking I/O operations. */
ret = mca_common_ompio_file_iread ( fp, buf, count, datatype, request );
}
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -196,9 +198,9 @@ int mca_io_ompio_file_read_at_all (ompi_file_t *fh,
mca_io_ompio_data_t *data;
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
ret = mca_common_ompio_file_read_at_all(&data->ompio_fh,offset,buf,count,datatype,status);
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -214,9 +216,9 @@ int mca_io_ompio_file_iread_at_all (ompi_file_t *fh,
mca_io_ompio_data_t *data;
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
ret = mca_common_ompio_file_iread_at_all ( &data->ompio_fh, offset, buf, count, datatype, request );
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -244,9 +246,9 @@ int mca_io_ompio_file_read_shared (ompi_file_t *fp,
opal_output(0, "No shared file pointer component found for the given communicator. Can not execute\n");
return OMPI_ERROR;
}
OPAL_THREAD_LOCK(&fp->f_mutex);
OPAL_THREAD_LOCK(&fp->f_lock);
ret = shared_fp_base_module->sharedfp_read(fh,buf,count,datatype,status);
OPAL_THREAD_UNLOCK(&fp->f_mutex);
OPAL_THREAD_UNLOCK(&fp->f_lock);
return ret;
}
@ -271,9 +273,9 @@ int mca_io_ompio_file_iread_shared (ompi_file_t *fh,
opal_output(0, "No shared file pointer component found for the given communicator. Can not execute\n");
return OMPI_ERROR;
}
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
ret = shared_fp_base_module->sharedfp_iread(ompio_fh,buf,count,datatype,request);
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -298,9 +300,9 @@ int mca_io_ompio_file_read_ordered (ompi_file_t *fh,
opal_output(0, "No shared file pointer component found for the given communicator. Can not execute\n");
return OMPI_ERROR;
}
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
ret = shared_fp_base_module->sharedfp_read_ordered(ompio_fh,buf,count,datatype,status);
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -323,9 +325,9 @@ int mca_io_ompio_file_read_ordered_begin (ompi_file_t *fh,
opal_output(0, "No shared file pointer component found for the given communicator. Can not execute\n");
return OMPI_ERROR;
}
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
ret = shared_fp_base_module->sharedfp_read_ordered_begin(ompio_fh,buf,count,datatype);
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -348,9 +350,9 @@ int mca_io_ompio_file_read_ordered_end (ompi_file_t *fh,
opal_output(0, "No shared file pointer component found for the given communicator. Can not execute\n");
return OMPI_ERROR;
}
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
ret = shared_fp_base_module->sharedfp_read_ordered_end(ompio_fh,buf,status);
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -373,7 +375,7 @@ int mca_io_ompio_file_read_all_begin (ompi_file_t *fh,
printf("Only one split collective I/O operation allowed per file handle at any given point in time!\n");
return MPI_ERR_OTHER;
}
/* No need for locking fh->f_mutex, that is done in file_iread_all */
/* No need for locking fh->f_lock, that is done in file_iread_all */
ret = mca_io_ompio_file_iread_all ( fh, buf, count, datatype, &fp->f_split_coll_req );
fp->f_split_coll_in_use = true;
@ -413,9 +415,9 @@ int mca_io_ompio_file_read_at_all_begin (ompi_file_t *fh,
printf("Only one split collective I/O operation allowed per file handle at any given point in time!\n");
return MPI_ERR_REQUEST;
}
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
ret = mca_common_ompio_file_iread_at_all ( fp, offset, buf, count, datatype, &fp->f_split_coll_req );
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
fp->f_split_coll_in_use = true;
return ret;
}

Просмотреть файл

@ -10,7 +10,7 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2016 University of Houston. All rights reserved.
* Copyright (c) 2015 Research Organization for Information Science
* Copyright (c) 2015-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2016-2017 IBM Corporation. All rights reserved.
* $COPYRIGHT$
@ -74,7 +74,7 @@ int mca_io_ompio_file_set_view (ompi_file_t *fp,
*/
fh = &data->ompio_fh;
OPAL_THREAD_LOCK(&fp->f_mutex);
OPAL_THREAD_LOCK(&fp->f_lock);
ret = mca_common_ompio_set_view(fh, disp, etype, filetype, datarep, info);
if ( NULL != fh->f_sharedfp_data) {
@ -82,7 +82,7 @@ int mca_io_ompio_file_set_view (ompi_file_t *fp,
ret = mca_common_ompio_set_view(sh, disp, etype, filetype, datarep, info);
}
OPAL_THREAD_UNLOCK(&fp->f_mutex);
OPAL_THREAD_UNLOCK(&fp->f_lock);
return ret;
}
@ -98,12 +98,12 @@ int mca_io_ompio_file_get_view (struct ompi_file_t *fp,
data = (mca_io_ompio_data_t *) fp->f_io_selected_data;
fh = &data->ompio_fh;
OPAL_THREAD_LOCK(&fp->f_mutex);
OPAL_THREAD_LOCK(&fp->f_lock);
*disp = fh->f_disp;
datatype_duplicate (fh->f_etype, etype);
datatype_duplicate (fh->f_orig_filetype, filetype);
strcpy (datarep, fh->f_datarep);
OPAL_THREAD_UNLOCK(&fp->f_mutex);
OPAL_THREAD_UNLOCK(&fp->f_lock);
return OMPI_SUCCESS;
}

Просмотреть файл

@ -10,7 +10,7 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2016 University of Houston. All rights reserved.
* Copyright (c) 2015 Research Organization for Information Science
* Copyright (c) 2015-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
@ -66,9 +66,9 @@ int mca_io_ompio_file_write (ompi_file_t *fp,
data = (mca_io_ompio_data_t *) fp->f_io_selected_data;
fh = &data->ompio_fh;
OPAL_THREAD_LOCK(&fp->f_mutex);
OPAL_THREAD_LOCK(&fp->f_lock);
ret = mca_common_ompio_file_write(fh,buf,count,datatype,status);
OPAL_THREAD_UNLOCK(&fp->f_mutex);
OPAL_THREAD_UNLOCK(&fp->f_lock);
return ret;
}
@ -84,9 +84,9 @@ int mca_io_ompio_file_write_at (ompi_file_t *fh,
mca_io_ompio_data_t *data;
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
ret = mca_common_ompio_file_write_at (&data->ompio_fh, offset,buf,count,datatype,status);
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -101,9 +101,9 @@ int mca_io_ompio_file_iwrite (ompi_file_t *fp,
mca_io_ompio_data_t *data;
data = (mca_io_ompio_data_t *) fp->f_io_selected_data;
OPAL_THREAD_LOCK(&fp->f_mutex);
OPAL_THREAD_LOCK(&fp->f_lock);
ret = mca_common_ompio_file_iwrite(&data->ompio_fh,buf,count,datatype,request);
OPAL_THREAD_UNLOCK(&fp->f_mutex);
OPAL_THREAD_UNLOCK(&fp->f_lock);
return ret;
}
@ -120,9 +120,9 @@ int mca_io_ompio_file_iwrite_at (ompi_file_t *fh,
mca_io_ompio_data_t *data;
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
ret = mca_common_ompio_file_iwrite_at(&data->ompio_fh,offset,buf,count,datatype,request);
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -142,14 +142,14 @@ int mca_io_ompio_file_write_all (ompi_file_t *fh,
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
ret = data->ompio_fh.
f_fcoll->fcoll_file_write_all (&data->ompio_fh,
buf,
count,
datatype,
status);
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
if ( MPI_STATUS_IGNORE != status ) {
size_t size;
@ -171,9 +171,9 @@ int mca_io_ompio_file_write_at_all (ompi_file_t *fh,
mca_io_ompio_data_t *data;
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
ret = mca_common_ompio_file_write_at_all(&data->ompio_fh,offset,buf,count,datatype,status);
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -191,7 +191,7 @@ int mca_io_ompio_file_iwrite_all (ompi_file_t *fh,
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
fp = &data->ompio_fh;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
if ( NULL != fp->f_fcoll->fcoll_file_iwrite_all ) {
ret = fp->f_fcoll->fcoll_file_iwrite_all (&data->ompio_fh,
buf,
@ -205,7 +205,7 @@ int mca_io_ompio_file_iwrite_all (ompi_file_t *fh,
individual non-blocking I/O operations. */
ret = mca_common_ompio_file_iwrite ( fp, buf, count, datatype, request );
}
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -222,9 +222,9 @@ int mca_io_ompio_file_iwrite_at_all (ompi_file_t *fh,
mca_io_ompio_data_t *data;
data = (mca_io_ompio_data_t *) fh->f_io_selected_data;
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
ret = mca_common_ompio_file_iwrite_at_all ( &data->ompio_fh, offset, buf, count, datatype, request );
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
return ret;
}
@ -253,9 +253,9 @@ int mca_io_ompio_file_write_shared (ompi_file_t *fp,
opal_output(0, "No shared file pointer component found for this communicator. Can not execute\n");
return OMPI_ERROR;
}
OPAL_THREAD_LOCK(&fp->f_mutex);
OPAL_THREAD_LOCK(&fp->f_lock);
ret = shared_fp_base_module->sharedfp_write(fh,buf,count,datatype,status);
OPAL_THREAD_UNLOCK(&fp->f_mutex);
OPAL_THREAD_UNLOCK(&fp->f_lock);
return ret;
}
@ -280,9 +280,9 @@ int mca_io_ompio_file_iwrite_shared (ompi_file_t *fp,
opal_output(0, "No shared file pointer component found for this communicator. Can not execute\n");
return OMPI_ERROR;
}
OPAL_THREAD_LOCK(&fp->f_mutex);
OPAL_THREAD_LOCK(&fp->f_lock);
ret = shared_fp_base_module->sharedfp_iwrite(fh,buf,count,datatype,request);
OPAL_THREAD_UNLOCK(&fp->f_mutex);
OPAL_THREAD_UNLOCK(&fp->f_lock);
return ret;
}
@ -307,9 +307,9 @@ int mca_io_ompio_file_write_ordered (ompi_file_t *fp,
opal_output(0,"No shared file pointer component found for this communicator. Can not execute\n");
return OMPI_ERROR;
}
OPAL_THREAD_LOCK(&fp->f_mutex);
OPAL_THREAD_LOCK(&fp->f_lock);
ret = shared_fp_base_module->sharedfp_write_ordered(fh,buf,count,datatype,status);
OPAL_THREAD_UNLOCK(&fp->f_mutex);
OPAL_THREAD_UNLOCK(&fp->f_lock);
return ret;
}
@ -333,9 +333,9 @@ int mca_io_ompio_file_write_ordered_begin (ompi_file_t *fp,
opal_output(0, "No shared file pointer component found for this communicator. Can not execute\n");
return OMPI_ERROR;
}
OPAL_THREAD_LOCK(&fp->f_mutex);
OPAL_THREAD_LOCK(&fp->f_lock);
ret = shared_fp_base_module->sharedfp_write_ordered_begin(fh,buf,count,datatype);
OPAL_THREAD_UNLOCK(&fp->f_mutex);
OPAL_THREAD_UNLOCK(&fp->f_lock);
return ret;
}
@ -358,9 +358,9 @@ int mca_io_ompio_file_write_ordered_end (ompi_file_t *fp,
opal_output(0, "No shared file pointer component found for this communicator. Can not execute\n");
return OMPI_ERROR;
}
OPAL_THREAD_LOCK(&fp->f_mutex);
OPAL_THREAD_LOCK(&fp->f_lock);
ret = shared_fp_base_module->sharedfp_write_ordered_end(fh,buf,status);
OPAL_THREAD_UNLOCK(&fp->f_mutex);
OPAL_THREAD_UNLOCK(&fp->f_lock);
return ret;
}
@ -383,7 +383,7 @@ int mca_io_ompio_file_write_all_begin (ompi_file_t *fh,
printf("Only one split collective I/O operation allowed per file handle at any given point in time!\n");
return MPI_ERR_OTHER;
}
/* No need for locking fh->f_mutex, that is done in file_iwrite_all */
/* No need for locking fh->f_lock, that is done in file_iwrite_all */
ret = mca_io_ompio_file_iwrite_all ( fh, buf, count, datatype, &fp->f_split_coll_req );
fp->f_split_coll_in_use = true;
@ -425,9 +425,9 @@ int mca_io_ompio_file_write_at_all_begin (ompi_file_t *fh,
printf("Only one split collective I/O operation allowed per file handle at any given point in time!\n");
return MPI_ERR_REQUEST;
}
OPAL_THREAD_LOCK(&fh->f_mutex);
OPAL_THREAD_LOCK(&fh->f_lock);
ret = mca_common_ompio_file_iwrite_at_all ( fp, offset, buf, count, datatype, &fp->f_split_coll_req );
OPAL_THREAD_UNLOCK(&fh->f_mutex);
OPAL_THREAD_UNLOCK(&fh->f_lock);
fp->f_split_coll_in_use = true;
return ret;

Просмотреть файл

@ -11,7 +11,7 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2015 Research Organization for Information Science
* Copyright (c) 2015-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2016-2017 Los Alamos National Security, LLC. All rights
* reserved.
@ -62,12 +62,12 @@ int MPI_File_get_errhandler( MPI_File file, MPI_Errhandler *errhandler)
}
}
opal_mutex_lock (&file->f_mutex);
OPAL_THREAD_LOCK(&file->f_lock);
/* Retain the errhandler, corresponding to object refcount
decrease in errhandler_free.c. */
*errhandler = file->error_handler;
OBJ_RETAIN(file->error_handler);
opal_mutex_unlock (&file->f_mutex);
OPAL_THREAD_UNLOCK(&file->f_lock);
/* All done */

Просмотреть файл

@ -11,7 +11,7 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2015 Research Organization for Information Science
* Copyright (c) 2015-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2016-2017 Los Alamos National Security, LLC. All rights
* reserved.
@ -68,12 +68,12 @@ int MPI_File_set_errhandler( MPI_File file, MPI_Errhandler errhandler)
/* Prepare the new error handler */
OBJ_RETAIN(errhandler);
opal_mutex_lock (&file->f_mutex);
OPAL_THREAD_LOCK(&file->f_lock);
/* Ditch the old errhandler, and decrement its refcount. */
tmp = file->error_handler;
file->error_handler = errhandler;
OBJ_RELEASE(tmp);
opal_mutex_unlock (&file->f_mutex);
OPAL_THREAD_UNLOCK(&file->f_lock);
/* All done */
return MPI_SUCCESS;

Просмотреть файл

@ -11,7 +11,7 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2009 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2015 Research Organization for Information Science
* Copyright (c) 2015-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2016 Los Alamos National Security, LLC. All rights
* reserved.
@ -55,12 +55,12 @@ int MPI_Win_get_errhandler(MPI_Win win, MPI_Errhandler *errhandler)
}
}
opal_mutex_lock (&win->w_lock);
OPAL_THREAD_LOCK(&win->w_lock);
/* Retain the errhandler, corresponding to object refcount
decrease in errhandler_free.c. */
OBJ_RETAIN(win->error_handler);
*errhandler = win->error_handler;
opal_mutex_unlock (&win->w_lock);
OPAL_THREAD_UNLOCK(&win->w_lock);
/* All done */
return MPI_SUCCESS;

Просмотреть файл

@ -11,7 +11,7 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2008-2009 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2015 Research Organization for Information Science
* Copyright (c) 2015-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2016-2017 Los Alamos National Security, LLC. All rights
* reserved.
@ -63,12 +63,12 @@ int MPI_Win_set_errhandler(MPI_Win win, MPI_Errhandler errhandler)
/* Prepare the new error handler */
OBJ_RETAIN(errhandler);
opal_mutex_lock (&win->w_lock);
OPAL_THREAD_LOCK(&win->w_lock);
/* Ditch the old errhandler, and decrement its refcount. */
tmp = win->error_handler;
win->error_handler = errhandler;
OBJ_RELEASE(tmp);
opal_mutex_unlock (&win->w_lock);
OPAL_THREAD_UNLOCK(&win->w_lock);
/* All done */
return MPI_SUCCESS;