1
1

* Update ROMIO release to the one included with MPICH2-1.0.4p1, tagged in

vendor/romio as mpich2-1.0.4p1.

This commit was SVN r11715.
Этот коммит содержится в:
Brian Barrett 2006-09-19 16:13:46 +00:00
родитель 18c54f574f
Коммит d1402cf8f5
256 изменённых файлов: 5828 добавлений и 554 удалений
ompi/mca/io/romio/romio
.cvsignore
adio
configure.in
doc
include
localdefs.in
man/man3

17
ompi/mca/io/romio/romio/.cvsignore Обычный файл

@ -0,0 +1,17 @@
TAGS
Makefile
config.status
config.log
config.cache
config.system
autom4te.cache
configure2
configure
configure.lineno
lib
bin
misc
localdefs
.deps
cscope.files
cscope.out

@ -0,0 +1,2 @@
Makefile
.*-cache

@ -0,0 +1,40 @@
<dir>
<file name="ad_gridftp.h" info="1136395894"/>
<file name="ad_gridftp_delete.c" info="1141070937"/>
<file name="ad_gridftp_resize.c" info="1141070937"/>
<file name="ad_gridftp_flush.c" info="1118265383"/>
<file name="ad_gridftp_write.c" info="1141070937"/>
<file name="ad_gridftp.c" info="1118265383"/>
<file name="ad_gridftp_open.c" info="1141070937"/>
<file name="globus_routines.c" info="1123623158"/>
<file name="ad_gridftp_hints.c" info="1118265383"/>
<file name="ad_gridftp_fcntl.c" info="1141070937"/>
<file name="ad_gridftp_read.c" info="1141070937"/>
<file name="ad_gridftp_close.c" info="1141070937"/>
</dir>
<data>
<fileinfo name="ad_gridftp.h">
</fileinfo>
<fileinfo name="ad_gridftp_delete.c">
</fileinfo>
<fileinfo name="ad_gridftp_resize.c">
</fileinfo>
<fileinfo name="ad_gridftp_flush.c">
</fileinfo>
<fileinfo name="ad_gridftp_write.c">
</fileinfo>
<fileinfo name="ad_gridftp.c">
</fileinfo>
<fileinfo name="ad_gridftp_open.c">
</fileinfo>
<fileinfo name="globus_routines.c">
</fileinfo>
<fileinfo name="ad_gridftp_hints.c">
</fileinfo>
<fileinfo name="ad_gridftp_fcntl.c">
</fileinfo>
<fileinfo name="ad_gridftp_read.c">
</fileinfo>
<fileinfo name="ad_gridftp_close.c">
</fileinfo>
</data>

@ -78,8 +78,6 @@ void ADIOI_GRIDFTP_IwriteStrided(ADIO_File fd, void *buf, int count,
*error_code);
void ADIOI_GRIDFTP_Flush(ADIO_File fd, int *error_code);
void ADIOI_GRIDFTP_Resize(ADIO_File fd, ADIO_Offset size, int *error_code);
ADIO_Offset ADIOI_GRIDFTP_SeekIndividual(ADIO_File fd, ADIO_Offset offset,
int whence, int *error_code);
void ADIOI_GRIDFTP_SetInfo(ADIO_File fd, MPI_Info users_info, int *error_code);
void ADIOI_GRIDFTP_Get_shared_fp(ADIO_File fd, int size,
ADIO_Offset *shared_fp,

@ -23,10 +23,10 @@ void ADIOI_GRIDFTP_Close(ADIO_File fd, int *error_code)
{
globus_err_handler("globus_ftp_client_operationattr_destroy",
myname,result);
*error_code = MPIO_Err_create_code(MPI_SUCESS, MPIR_ERR_RECOVERABLE,
*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_IO,
"**io",
"**io %s",globus_object_printable_to_string(result));
"**io %s",globus_object_printable_to_string(globus_error_get(result)));
return;
}
result=globus_ftp_client_handle_destroy(&(gridftp_fh[fd->fd_sys]));
@ -34,10 +34,10 @@ void ADIOI_GRIDFTP_Close(ADIO_File fd, int *error_code)
{
globus_err_handler("globus_ftp_client_handle_destroy",
myname,result);
*error_code = MPIO_Err_create_code(MPI_SUCESS, MPIR_ERR_RECOVERABLE,
*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_IO,
"**io",
"**io %s", globus_object_printable_to_string(result));
"**io %s", globus_object_printable_to_string(globus_error_get(result)));
return;
}

@ -48,7 +48,7 @@ void ADIOI_GRIDFTP_Delete(char *filename, int *error_code)
myname, __LINE__,
MPI_ERR_IO,
"**io", "**io %s",
globus_object_printable_to_string(result));
globus_object_printable_to_string(globus_error_get(result)));
return;
}
@ -63,7 +63,7 @@ void ADIOI_GRIDFTP_Delete(char *filename, int *error_code)
myname, __LINE__,
MPI_ERR_IO,
"**io", "**io %s",
globus_object_printable_to_string(result));
globus_object_printable_to_string(globus_error_get(result)));
return;
}
globus_mutex_lock(&lock);
@ -79,7 +79,7 @@ void ADIOI_GRIDFTP_Delete(char *filename, int *error_code)
myname, __LINE__,
MPI_ERR_IO,
"**io", "**io %s",
globus_object_printable_to_string(result));
globus_object_printable_to_string(globus_error_get(result)));
return;
}
@ -90,6 +90,6 @@ void ADIOI_GRIDFTP_Delete(char *filename, int *error_code)
myname, __LINE__,
MPI_ERR_IO,
"**io", "**io %s",
globus_object_printable_to_string(result));
globus_object_printable_to_string(globus_error_get(result)));
}
}

@ -62,7 +62,7 @@ void ADIOI_GRIDFTP_Fcntl(ADIO_File fd, int flag, ADIO_Fcntl_t *fcntl_struct,
MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_IO,
"**io", "**io %s",
globus_object_printable_to_string(result));
globus_object_printable_to_string(globus_error_get(result)));
return;
}
globus_mutex_lock(&fcntl_size_lock);

@ -74,7 +74,7 @@ void ADIOI_GRIDFTP_Open(ADIO_File fd, int *error_code)
have to check themselves if the file is being accessed rdonly, rdwr,
or wronly.
*/
result=globus_ftp_client_handleattr_init(&hattr)
result=globus_ftp_client_handleattr_init(&hattr);
if ( result != GLOBUS_SUCCESS )
{
@ -85,7 +85,7 @@ void ADIOI_GRIDFTP_Open(ADIO_File fd, int *error_code)
*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_IO,
"**io",
"**io %s", globus_object_printable_to_string(result));
"**io %s", globus_object_printable_to_string(globus_error_get(result)));
return;
}
result = globus_ftp_client_operationattr_init(&(oattr[fd->fd_sys]));
@ -97,18 +97,18 @@ void ADIOI_GRIDFTP_Open(ADIO_File fd, int *error_code)
*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_IO,
"**io",
"**io %s", globus_object_printable_to_string(result));
"**io %s", globus_object_printable_to_string(globus_error_get(result)));
return;
}
/* Always use connection caching unless told otherwise */
result=globus_ftp_client_handleattr_set_cache_all(&hattr,GLOBUS_TRUE)
result=globus_ftp_client_handleattr_set_cache_all(&hattr,GLOBUS_TRUE);
if ( result !=GLOBUS_SUCCESS )
globus_err_handler("globus_ftp_client_handleattr_set_cache_all",myname,result);
/* Assume that it's safe to cache a file if it's read-only */
if ( (fd->access_mode&MPI_MODE_RDONLY) &&
if ( (fd->access_mode&ADIO_RDONLY) &&
(result=globus_ftp_client_handleattr_add_cached_url(&hattr,fd->filename))!=GLOBUS_SUCCESS )
globus_err_handler("globus_ftp_client_handleattr_add_cached_url",myname,result);
@ -128,7 +128,7 @@ void ADIOI_GRIDFTP_Open(ADIO_File fd, int *error_code)
*/
/* Set append mode if necessary */
if ( (fd->access_mode&MPI_MODE_APPEND) &&
if ( (fd->access_mode&ADIO_APPEND) &&
((result=globus_ftp_client_operationattr_set_append(&(oattr[fd->fd_sys]),GLOBUS_TRUE))!=GLOBUS_SUCCESS) )
globus_err_handler("globus_ftp_client_operationattr_set_append",myname,result);
@ -235,7 +235,7 @@ void ADIOI_GRIDFTP_Open(ADIO_File fd, int *error_code)
FPRINTF(stderr,"no MPI_Info object associated with %s\n",fd->filename);
/* Create the ftp handle */
result=globus_ftp_client_handle_init(&(gridftp_fh[fd->fd_sys]),&hattr)
result=globus_ftp_client_handle_init(&(gridftp_fh[fd->fd_sys]),&hattr);
if ( result != GLOBUS_SUCCESS )
{
globus_err_handler("globus_ftp_client_handle_init",myname,result);
@ -243,7 +243,7 @@ void ADIOI_GRIDFTP_Open(ADIO_File fd, int *error_code)
*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_IO,
"**io",
"**io %s", globus_object_printable_to_string(result));
"**io %s", globus_object_printable_to_string(globus_error_get(result)));
return;
}
@ -265,7 +265,7 @@ void ADIOI_GRIDFTP_Open(ADIO_File fd, int *error_code)
*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_IO,
"**io", "**io %s",
globus_object_printable_to_string(result));
globus_object_printable_to_string(globus_error_get(result)));
return;
}
/* wait till the callback completes */
@ -278,8 +278,8 @@ void ADIOI_GRIDFTP_Open(ADIO_File fd, int *error_code)
MPI_Bcast(&file_exists,1,MPI_INT,0,fd->comm);
/* It turns out that this is handled by MPI_File_open() directly */
if ( (file_exists!=GLOBUS_TRUE) && (fd->access_mode&MPI_MODE_CREATE) &&
!(fd->access_mode&MPI_MODE_EXCL) && !(fd->access_mode&MPI_MODE_RDONLY) )
if ( (file_exists!=GLOBUS_TRUE) && (fd->access_mode&ADIO_CREATE) &&
!(fd->access_mode&ADIO_EXCL) && !(fd->access_mode&ADIO_RDONLY) )
{
if ( myrank==0 )
{
@ -299,7 +299,7 @@ void ADIOI_GRIDFTP_Open(ADIO_File fd, int *error_code)
MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_IO,
"**io", "**io %s",
globus_object_printable_to_string(result));
globus_object_printable_to_string(globus_error_get(result)));
return;
}
result=globus_ftp_client_register_write(&(gridftp_fh[fd->fd_sys]),
@ -314,7 +314,7 @@ void ADIOI_GRIDFTP_Open(ADIO_File fd, int *error_code)
MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_IO,
"**io", "**io %s",
globus_object_printable_to_string(result));
globus_object_printable_to_string(globus_error_get(result)));
return;
}
globus_mutex_lock(&lock);
@ -324,7 +324,7 @@ void ADIOI_GRIDFTP_Open(ADIO_File fd, int *error_code)
}
MPI_Barrier(fd->comm);
}
else if ( (fd->access_mode&MPI_MODE_EXCL) && (file_exists==GLOBUS_TRUE) )
else if ( (fd->access_mode&ADIO_EXCL) && (file_exists==GLOBUS_TRUE) )
{
fd->fd_sys = -1;
*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
@ -332,7 +332,7 @@ void ADIOI_GRIDFTP_Open(ADIO_File fd, int *error_code)
"**io", 0);
return;
}
else if ( (fd->access_mode&MPI_MODE_RDONLY) && (file_exists!=GLOBUS_TRUE) )
else if ( (fd->access_mode&ADIO_RDONLY) && (file_exists!=GLOBUS_TRUE) )
{
if ( myrank==0 )
{

@ -115,7 +115,7 @@ void ADIOI_GRIDFTP_ReadContig(ADIO_File fd, void *buf, int count,
globus_off_t goff;
globus_result_t result;
if ( fd->access_mode&MPI_MODE_WRONLY )
if ( fd->access_mode&ADIO_WRONLY )
{
*error_code=MPIR_ERR_MODE_WRONLY;
return;
@ -162,7 +162,7 @@ void ADIOI_GRIDFTP_ReadContig(ADIO_File fd, void *buf, int count,
*error_code = MPIO_Err_create_code(MPI_SUCCESS,
MPIR_ERR_RECOVERABLE, myname, __LINE__,
MPI_ERR_IO, "**io", "**io %s",
globus_object_printable_to_string(result));
globus_object_printable_to_string(globus_error_get(result)));
return;
}
@ -211,7 +211,7 @@ void ADIOI_GRIDFTP_ReadDiscontig(ADIO_File fd, void *buf, int count,
globus_result_t result;
globus_byte_t *tmp;
if ( fd->access_mode&MPI_MODE_WRONLY )
if ( fd->access_mode&ADIO_WRONLY )
{
*error_code=MPIR_ERR_MODE_WRONLY;
return;
@ -283,7 +283,7 @@ void ADIOI_GRIDFTP_ReadDiscontig(ADIO_File fd, void *buf, int count,
myrank,nprocs,myname,extent,count*btype_size);
fflush(stderr);
*error_code = MPIO_Err_create_code(MPI_SUCCESS,
MPIR_ERR_RECOVERABLE, myanem, __LINE__,
MPIR_ERR_RECOVERABLE, myname, __LINE__,
MPI_ERR_IO, "**io", 0);
return;
}
@ -305,9 +305,9 @@ void ADIOI_GRIDFTP_ReadDiscontig(ADIO_File fd, void *buf, int count,
{
globus_err_handler("globus_ftp_client_partial_get",myname,result);
*error_code = MPIO_Err_create_code(MPI_SUCCESS,
MPIR_ERR_RECOVERABLE, myanem, __LINE__,
MPIR_ERR_RECOVERABLE, myname, __LINE__,
MPI_ERR_IO, "**io", "**io %s",
globus_object_printable_to_string(result));
globus_object_printable_to_string(globus_error_get(result)));
return;
}
@ -326,10 +326,10 @@ void ADIOI_GRIDFTP_ReadDiscontig(ADIO_File fd, void *buf, int count,
(void *)(&bytes_read)))!=GLOBUS_SUCCESS )
{
globus_err_handler("globus_ftp_client_register_read",myname,result);
*error_code = MPIO_Err_create_code(MPI_SUCESS, MPIR_ERR_RECOVERABLE,
*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_IO,
"**io",
"**io %s", globus_object_printable_to_string(result));
"**io %s", globus_object_printable_to_string(globus_error_get(result)));
return;
}
/* The ctl callback won't start till the data callbacks complete, so it's

@ -65,11 +65,11 @@ void ADIOI_GRIDFTP_Resize(ADIO_File fd, ADIO_Offset size, int *error_code)
MPI_Comm_rank(fd->comm, &myrank);
/* Sanity check */
if ( fd->access_mode&MPI_MODE_RDONLY )
if ( fd->access_mode&ADIO_RDONLY )
{
FPRINTF(stderr,"%s: attempt to resize read-only file %s!\n",
myname,fd->filename);
*error_code = MPIO_Err_create_code(MPI_SUCESS, MPIR_ERR_RECOVERABLE,
*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_IO,
"**io", 0);
return;
@ -88,10 +88,10 @@ void ADIOI_GRIDFTP_Resize(ADIO_File fd, ADIO_Offset size, int *error_code)
GLOBUS_NULL))!=GLOBUS_SUCCESS )
{
globus_err_handler("globus_ftp_client_size",myname,result);
*error_code = MPIO_Err_create_code(MPI_SUCESS, MPIR_ERR_RECOVERABLE,
*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_IO,
"**io",
"**io %s", globus_object_printable_to_string(result));
"**io %s", globus_object_printable_to_string(globus_error_get(result)));
return;
}
globus_mutex_lock(&resize_lock);
@ -113,10 +113,10 @@ void ADIOI_GRIDFTP_Resize(ADIO_File fd, ADIO_Offset size, int *error_code)
GLOBUS_NULL))!=GLOBUS_SUCCESS )
{
globus_err_handler("globus_ftp_client_partial_put",myname,result);
*error_code = MPIO_Err_create_code(MPI_SUCESS,
*error_code = MPIO_Err_create_code(MPI_SUCCESS,
MPIR_ERR_RECOVERABLE, myname, __LINE__,
MPI_ERR_IO, "**io", "**io %s",
globus_object_printable_to_string(result));
globus_object_printable_to_string(globus_error_get(result)));
return;
}
@ -129,10 +129,10 @@ void ADIOI_GRIDFTP_Resize(ADIO_File fd, ADIO_Offset size, int *error_code)
GLOBUS_NULL))!=GLOBUS_SUCCESS )
{
globus_err_handler("globus_ftp_client_register_write",myname,result);
*error_code = MPIO_Err_create_code(MPI_SUCESS,
*error_code = MPIO_Err_create_code(MPI_SUCCESS,
MPIR_ERR_RECOVERABLE, myname, __LINE__,
MPI_ERR_IO, "**io", "**io %s",
globus_object_printable_to_string(result));
globus_object_printable_to_string(globus_error_get(result)));
return;
}
globus_mutex_lock(&resize_lock);
@ -161,10 +161,10 @@ void ADIOI_GRIDFTP_Resize(ADIO_File fd, ADIO_Offset size, int *error_code)
GLOBUS_NULL))!=GLOBUS_SUCCESS )
{
globus_err_handler("globus_ftp_client_move",myname,result);
*error_code = MPIO_Err_create_code(MPI_SUCESS,
*error_code = MPIO_Err_create_code(MPI_SUCCESS,
MPIR_ERR_RECOVERABLE, myname, __LINE__,
MPI_ERR_IO, "**io", "**io %s",
globus_object_printable_to_string(result));
globus_object_printable_to_string(globus_error_get(result)));
return;
}
globus_mutex_lock(&resize_lock);
@ -189,10 +189,10 @@ void ADIOI_GRIDFTP_Resize(ADIO_File fd, ADIO_Offset size, int *error_code)
GLOBUS_NULL))!=GLOBUS_SUCCESS )
{
globus_err_handler("globus_ftp_client_partial_third_party_transfer",myname,result);
*error_code = MPIO_Err_create_code(MPI_SUCESS,
*error_code = MPIO_Err_create_code(MPI_SUCCESS,
MPIR_ERR_RECOVERABLE, myname, __LINE__,
MPI_ERR_IO, "**io", "**io %s",
globus_object_printable_to_string(result));
globus_object_printable_to_string(globus_error_get(result)));
return;
}
globus_mutex_lock(&resize_lock);
@ -213,10 +213,10 @@ void ADIOI_GRIDFTP_Resize(ADIO_File fd, ADIO_Offset size, int *error_code)
GLOBUS_NULL))!=GLOBUS_SUCCESS )
{
globus_err_handler("globus_ftp_client_delete",myname,result);
*error_code = MPIO_Err_create_code(MPI_SUCESS,
*error_code = MPIO_Err_create_code(MPI_SUCCESS,
MPIR_ERR_RECOVERABLE, myname, __LINE__,
MPI_ERR_IO, "**io", "**io %s",
globus_object_printable_to_string(result));
globus_object_printable_to_string(globus_error_get(result)));
return;
}
globus_mutex_lock(&resize_lock);

@ -117,7 +117,7 @@ void ADIOI_GRIDFTP_WriteContig(ADIO_File fd, void *buf, int count,
globus_off_t goff;
globus_result_t result;
if ( fd->access_mode&MPI_MODE_RDONLY )
if ( fd->access_mode&ADIO_RDONLY )
{
*error_code=MPI_ERR_AMODE;
return;
@ -151,10 +151,10 @@ void ADIOI_GRIDFTP_WriteContig(ADIO_File fd, void *buf, int count,
GLOBUS_NULL))!=GLOBUS_SUCCESS )
{
globus_err_handler("globus_ftp_client_partial_put",myname,result);
*error_code = MPIO_Err_create_code(MPI_SUCESS, MPIR_ERR_RECOVERABLE,
*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_IO,
"**io",
"**io %s", globus_object_printable_to_string(result));
"**io %s", globus_object_printable_to_string(globus_error_get(result)));
return;
}
if ( (result=globus_ftp_client_register_write(&(gridftp_fh[fd->fd_sys]),
@ -166,10 +166,10 @@ void ADIOI_GRIDFTP_WriteContig(ADIO_File fd, void *buf, int count,
(void *)(&bytes_written)))!=GLOBUS_SUCCESS )
{
globus_err_handler("globus_ftp_client_register_write",myname,result);
*error_code = MPIO_Err_create_code(MPI_SUCESS, MPIR_ERR_RECOVERABLE,
*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_IO,
"**io",
"**io %s", globus_object_printable_to_string(result));
"**io %s", globus_object_printable_to_string(globus_error_get(result)));
return;
}
@ -232,10 +232,10 @@ void ADIOI_GRIDFTP_WriteDiscontig(ADIO_File fd, void *buf, int count,
FPRINTF(stderr,"[%d/%d] %s called with discontigous memory buffer\n",
myrank,nprocs,myname);
fflush(stderr);
*error_code = MPIO_Err_create_code(MPI_SUCESS, MPIR_ERR_RECOVERABLE,
*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_IO,
"**io",
"**io %s", globus_object_printable_to_string(result));
"**io %s", globus_object_printable_to_string(globus_error_get(result)));
return;
}
/* from here we can assume btype_extent==btype_size */
@ -275,10 +275,10 @@ void ADIOI_GRIDFTP_WriteDiscontig(ADIO_File fd, void *buf, int count,
FPRINTF(stderr,"[%d/%d] %s error in computing extent -- extent %d is smaller than total bytes requested %d!\n",
myrank,nprocs,myname,extent,count*btype_size);
fflush(stderr);
*error_code = MPIO_Err_create_code(MPI_SUCESS, MPIR_ERR_RECOVERABLE,
*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_IO,
"**io",
"**io %s", globus_object_printable_to_string(result));
"**io %s", globus_object_printable_to_string(globus_error_get(result)));
return;
}
end=start+(globus_off_t)extent;
@ -300,10 +300,10 @@ void ADIOI_GRIDFTP_WriteDiscontig(ADIO_File fd, void *buf, int count,
GLOBUS_NULL))!=GLOBUS_SUCCESS )
{
globus_err_handler("globus_ftp_client_partial_get",myname,result);
*error_code = MPIO_Err_create_code(MPI_SUCESS, MPIR_ERR_RECOVERABLE,
*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_IO,
"**io",
"**io %s", globus_object_printable_to_string(result));
"**io %s", globus_object_printable_to_string(globus_error_get(result)));
return;
}
@ -384,7 +384,7 @@ void ADIOI_GRIDFTP_WriteStrided(ADIO_File fd, void *buf, int count,
#ifdef GRIDFTP_USE_GENERIC_STRIDED
int myrank, nprocs;
if ( fd->access_mode&MPI_MODE_RDONLY )
if ( fd->access_mode&ADIO_RDONLY )
{
*error_code=MPI_ERR_AMODE;
return;

@ -0,0 +1,6 @@
Makefile
.deps
*.bb
*.bbg
.*-cache
*.lo

@ -0,0 +1,25 @@
<dir>
<file name="ad_hfs.c" info="1118265384"/>
<file name="ad_hfs_fcntl.c" info="1118265384"/>
<file name="ad_hfs_open.c" info="1118265384"/>
<file name="ad_hfs_write.c" info="1118265384"/>
<file name="ad_hfs.h" info="1118265384"/>
<file name="ad_hfs_read.c" info="1118265384"/>
<file name="ad_hfs_resize.c" info="1118265384"/>
</dir>
<data>
<fileinfo name="ad_hfs.c">
</fileinfo>
<fileinfo name="ad_hfs_fcntl.c">
</fileinfo>
<fileinfo name="ad_hfs_open.c">
</fileinfo>
<fileinfo name="ad_hfs_write.c">
</fileinfo>
<fileinfo name="ad_hfs.h">
</fileinfo>
<fileinfo name="ad_hfs_read.c">
</fileinfo>
<fileinfo name="ad_hfs_resize.c">
</fileinfo>
</data>

@ -0,0 +1,8 @@
Makefile
.deps
*.bb
*.bbg
.libs
.libstamp*
*.lo
.*-cache

@ -0,0 +1,46 @@
<dir>
<file name="ad_nfs_iread.c" info="1116890860"/>
<file name="ad_nfs_read.c" info="1118265385"/>
<file name="ad_nfs_resize.c" info="1152727497"/>
<file name="ad_nfs.c" info="1136423789"/>
<file name="ad_nfs_done.c" info="1116890860"/>
<file name="ad_nfs_open.c" info="1118265385"/>
<file name="ad_nfs_fcntl.c" info="1118265384"/>
<file name="ad_nfs_getsh.c" info="1136502929"/>
<file name="ad_nfs_iwrite.c" info="1116890861"/>
<file name="ad_nfs_write.c" info="1118265385"/>
<file name="ad_nfs_hints.c" info="1118265385"/>
<file name="ad_nfs.h" info="1136476880"/>
<file name="ad_nfs_wait.c" info="1116965767"/>
<file name="ad_nfs_setsh.c" info="1136502929"/>
</dir>
<data>
<fileinfo name="ad_nfs_iread.c">
</fileinfo>
<fileinfo name="ad_nfs_read.c">
</fileinfo>
<fileinfo name="ad_nfs_resize.c">
</fileinfo>
<fileinfo name="ad_nfs.c">
</fileinfo>
<fileinfo name="ad_nfs_done.c">
</fileinfo>
<fileinfo name="ad_nfs_open.c">
</fileinfo>
<fileinfo name="ad_nfs_fcntl.c">
</fileinfo>
<fileinfo name="ad_nfs_getsh.c">
</fileinfo>
<fileinfo name="ad_nfs_iwrite.c">
</fileinfo>
<fileinfo name="ad_nfs_write.c">
</fileinfo>
<fileinfo name="ad_nfs_hints.c">
</fileinfo>
<fileinfo name="ad_nfs.h">
</fileinfo>
<fileinfo name="ad_nfs_wait.c">
</fileinfo>
<fileinfo name="ad_nfs_setsh.c">
</fileinfo>
</data>

@ -39,6 +39,7 @@ libadio_nfs_la_SOURCES = \
io_romio_ad_nfs_iwrite.c \
io_romio_ad_nfs_open.c \
io_romio_ad_nfs_read.c \
io_romio_ad_nfs_resize.c \
io_romio_ad_nfs_setsh.c \
io_romio_ad_nfs_wait.c \
io_romio_ad_nfs_write.c

@ -31,6 +31,6 @@ struct ADIOI_Fns_struct ADIO_NFS_operations = {
ADIOI_GEN_IreadStrided, /* IreadStrided */
ADIOI_GEN_IwriteStrided, /* IwriteStrided */
ADIOI_GEN_Flush, /* Flush */
ADIOI_GEN_Resize, /* Resize */
ADIOI_NFS_Resize, /* Resize */
ADIOI_GEN_Delete, /* Delete */
};

@ -74,5 +74,6 @@ void ADIOI_NFS_SetInfo(ADIO_File fd, MPI_Info users_info, int *error_code);
void ADIOI_NFS_Get_shared_fp(ADIO_File fd, int size, ADIO_Offset *shared_fp,
int *error_code);
void ADIOI_NFS_Set_shared_fp(ADIO_File fd, ADIO_Offset offset, int *error_code);
void ADIOI_NFS_Resize(ADIO_File fd, ADIO_Offset size, int *error_code);
#endif

@ -27,7 +27,7 @@ void ADIOI_NFS_Get_shared_fp(ADIO_File fd, int incr, ADIO_Offset *shared_fp,
fd->file_system,
fd->fns,
ADIO_CREATE | ADIO_RDWR | ADIO_DELETE_ON_CLOSE,
0, MPI_BYTE, MPI_BYTE, 0, MPI_INFO_NULL,
0, MPI_BYTE, MPI_BYTE, MPI_INFO_NULL,
ADIO_PERM_NULL, error_code);
if (*error_code != MPI_SUCCESS) return;
*shared_fp = 0;

@ -0,0 +1,37 @@
/* -*- Mode: C; c-basic-offset:4 ; -*- */
/*
*
* Copyright (C) 2004 University of Chicago.
* See COPYRIGHT notice in top-level directory.
*/
#include "ad_nfs.h"
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
/* NFS resize
*
* Note: we resize on all processors to guarantee that all processors
* will have updated cache values. This used to be the generic
* implementation used by the majority of the ADIO implementations.
*/
void ADIOI_NFS_Resize(ADIO_File fd, ADIO_Offset size, int *error_code)
{
int err;
static char myname[] = "ADIOI_GEN_RESIZE";
err = ftruncate(fd->fd_sys, size);
/* --BEGIN ERROR HANDLING-- */
if (err == -1) {
*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
myname, __LINE__, MPI_ERR_IO,
"**io", "**io %s", strerror(errno));
return;
}
/* --END ERROR HANDLING-- */
*error_code = MPI_SUCCESS;
}

@ -40,7 +40,7 @@ void ADIOI_NFS_Set_shared_fp(ADIO_File fd, ADIO_Offset offset, int *error_code)
fd->shared_fp_fname,
fd->file_system, fd->fns,
ADIO_CREATE | ADIO_RDWR | ADIO_DELETE_ON_CLOSE,
0, MPI_BYTE, MPI_BYTE, 0, MPI_INFO_NULL,
0, MPI_BYTE, MPI_BYTE, MPI_INFO_NULL,
ADIO_PERM_NULL, error_code);
}

@ -0,0 +1,6 @@
Makefile
.deps
*.bb
*.bbg
*.lo
.*-cache

@ -0,0 +1,43 @@
<dir>
<file name="ad_ntfs_open.c" info="1130941880"/>
<file name="ad_ntfs_done.c" info="1130941879"/>
<file name="ad_ntfs_flush.c" info="1120183852"/>
<file name="ad_ntfs_resize.c" info="1131402741"/>
<file name="ad_ntfs_iread.c" info="1130941879"/>
<file name="ad_ntfs.h" info="1120183851"/>
<file name="ad_ntfs_read.c" info="1131402741"/>
<file name="ad_ntfs.c" info="1116890861"/>
<file name="ad_ntfs_wait.c" info="1120183853"/>
<file name="ad_ntfs_iwrite.c" info="1131402741"/>
<file name="ad_ntfs_fcntl.c" info="1130941879"/>
<file name="ad_ntfs_write.c" info="1131402741"/>
<file name="ad_ntfs_close.c" info="1120183851"/>
</dir>
<data>
<fileinfo name="ad_ntfs_open.c">
</fileinfo>
<fileinfo name="ad_ntfs_done.c">
</fileinfo>
<fileinfo name="ad_ntfs_flush.c">
</fileinfo>
<fileinfo name="ad_ntfs_resize.c">
</fileinfo>
<fileinfo name="ad_ntfs_iread.c">
</fileinfo>
<fileinfo name="ad_ntfs.h">
</fileinfo>
<fileinfo name="ad_ntfs_read.c">
</fileinfo>
<fileinfo name="ad_ntfs.c">
</fileinfo>
<fileinfo name="ad_ntfs_wait.c">
</fileinfo>
<fileinfo name="ad_ntfs_iwrite.c">
</fileinfo>
<fileinfo name="ad_ntfs_fcntl.c">
</fileinfo>
<fileinfo name="ad_ntfs_write.c">
</fileinfo>
<fileinfo name="ad_ntfs_close.c">
</fileinfo>
</data>

@ -100,10 +100,20 @@ int ADIOI_NTFS_aio(ADIO_File fd, void *buf, int len, ADIO_Offset offset,
if (wr)
{
/*printf("WriteFile(%d bytes)\n", len);fflush(stdout);*/
ret_val = WriteFile(fd_sys, buf, len, &dwNumWritten, pOvl);
}
else
{
/*
{
ADIO_Fcntl_t fcntl_struct;
int error_code;
ADIO_Fcntl(fd, ADIO_FCNTL_GET_FSIZE, &fcntl_struct, &error_code);
printf("File size a: %d\n", fcntl_struct.fsize);
}
printf("ReadFile(%d bytes)\n", len);fflush(stdout);
*/
ret_val = ReadFile(fd_sys, buf, len, &dwNumRead, pOvl);
}

@ -60,6 +60,15 @@ void ADIOI_NTFS_ReadContig(ADIO_File fd, void *buf, int count,
}
}
}
/*
{
ADIO_Fcntl_t fcntl_struct;
int error_code;
ADIO_Fcntl(fd, ADIO_FCNTL_GET_FSIZE, &fcntl_struct, &error_code);
printf("File size b: %d\n", fcntl_struct.fsize);
}
printf("ReadFile(%d bytes)\n", len);fflush(stdout);
*/
err = ReadFile(fd->fd_sys, buf, len, &dwNumRead, pOvl);
/* --BEGIN ERROR HANDLING-- */
if (err == FALSE)
@ -70,6 +79,7 @@ void ADIOI_NTFS_ReadContig(ADIO_File fd, void *buf, int count,
case ERROR_IO_PENDING:
break;
case ERROR_HANDLE_EOF:
/*printf("EOF error\n");fflush(stdout);*/
SetEvent(pOvl->hEvent);
break;
default:
@ -88,13 +98,16 @@ void ADIOI_NTFS_ReadContig(ADIO_File fd, void *buf, int count,
if (err == FALSE)
{
err = GetLastError();
*error_code = MPIO_Err_create_code(MPI_SUCCESS,
MPIR_ERR_RECOVERABLE, myname,
__LINE__, MPI_ERR_IO, "**io",
"**io %s", ADIOI_NTFS_Strerror(err));
CloseHandle(pOvl->hEvent);
ADIOI_Free(pOvl);
return;
if (err != ERROR_HANDLE_EOF) /* Ignore EOF errors */
{
*error_code = MPIO_Err_create_code(MPI_SUCCESS,
MPIR_ERR_RECOVERABLE, myname,
__LINE__, MPI_ERR_IO, "**io",
"**io %s", ADIOI_NTFS_Strerror(err));
CloseHandle(pOvl->hEvent);
ADIOI_Free(pOvl);
return;
}
}
/* --END ERROR HANDLING-- */
if (!CloseHandle(pOvl->hEvent))
@ -132,6 +145,15 @@ void ADIOI_NTFS_ReadContig(ADIO_File fd, void *buf, int count,
}
}
}
/*
{
ADIO_Fcntl_t fcntl_struct;
int error_code;
ADIO_Fcntl(fd, ADIO_FCNTL_GET_FSIZE, &fcntl_struct, &error_code);
printf("File size c: %d\n", fcntl_struct.fsize);
}
printf("ReadFile(%d bytes)\n", len);fflush(stdout);
*/
err = ReadFile(fd->fd_sys, buf, len, &dwNumRead, pOvl);
/* --BEGIN ERROR HANDLING-- */
if (err == FALSE)
@ -142,6 +164,7 @@ void ADIOI_NTFS_ReadContig(ADIO_File fd, void *buf, int count,
case ERROR_IO_PENDING:
break;
case ERROR_HANDLE_EOF:
/*printf("EOF error\n");fflush(stdout);*/
SetEvent(pOvl->hEvent);
break;
default:
@ -160,13 +183,16 @@ void ADIOI_NTFS_ReadContig(ADIO_File fd, void *buf, int count,
if (err == FALSE)
{
err = GetLastError();
*error_code = MPIO_Err_create_code(MPI_SUCCESS,
MPIR_ERR_RECOVERABLE, myname,
__LINE__, MPI_ERR_IO, "**io",
"**io %s", ADIOI_NTFS_Strerror(err));
CloseHandle(pOvl->hEvent);
ADIOI_Free(pOvl);
return;
if (err != ERROR_HANDLE_EOF) /* Ignore EOF errors */
{
*error_code = MPIO_Err_create_code(MPI_SUCCESS,
MPIR_ERR_RECOVERABLE, myname,
__LINE__, MPI_ERR_IO, "**io",
"**io %s", ADIOI_NTFS_Strerror(err));
CloseHandle(pOvl->hEvent);
ADIOI_Free(pOvl);
return;
}
}
/* --END ERROR HANDLING-- */
if (!CloseHandle(pOvl->hEvent))

@ -29,6 +29,7 @@ void ADIOI_NTFS_Resize(ADIO_File fd, ADIO_Offset size, int *error_code)
return;
}
}
/*printf("setting file length to %d\n", size);fflush(stdout);*/
/* --END ERROR HANDLING-- */
result = SetEndOfFile(fd->fd_sys);
/* --BEGIN ERROR HANDLING-- */

@ -60,6 +60,7 @@ void ADIOI_NTFS_WriteContig(ADIO_File fd, void *buf, int count,
}
}
}
/*printf("WriteFile(%d bytes)\n", len);fflush(stdout);*/
err = WriteFile(fd->fd_sys, buf, len, &dwNumWritten, pOvl);
/* --BEGIN ERROR HANDLING-- */
if (err == FALSE)
@ -126,6 +127,7 @@ void ADIOI_NTFS_WriteContig(ADIO_File fd, void *buf, int count,
}
}
}
/*printf("WriteFile(%d bytes)\n", len);fflush(stdout);*/
err = WriteFile(fd->fd_sys, buf, len, &dwNumWritten, pOvl);
/* --BEGIN ERROR HANDLING-- */
if (err == FALSE)

@ -0,0 +1,3 @@
Makefile
.*-cache
*.lo

@ -0,0 +1,16 @@
<dir>
<file name="ad_panfs.c" info="1116890862"/>
<file name="ad_panfs_open.c" info="1123803225"/>
<file name="ad_panfs_hints.c" info="1116890862"/>
<file name="ad_panfs.h" info="1116890862"/>
</dir>
<data>
<fileinfo name="ad_panfs.c">
</fileinfo>
<fileinfo name="ad_panfs_open.c">
</fileinfo>
<fileinfo name="ad_panfs_hints.c">
</fileinfo>
<fileinfo name="ad_panfs.h">
</fileinfo>
</data>

@ -0,0 +1,6 @@
Makefile
.deps
*.bb
*.bbg
*.lo
.*-cache

@ -0,0 +1,40 @@
<dir>
<file name="ad_pfs.c" info="1118265386"/>
<file name="ad_pfs.h" info="1118265386"/>
<file name="ad_pfs_fcntl.c" info="1118265386"/>
<file name="ad_pfs_wait.c" info="1118265387"/>
<file name="ad_pfs_open.c" info="1123803225"/>
<file name="ad_pfs_done.c" info="1118265386"/>
<file name="ad_pfs_iread.c" info="1118265386"/>
<file name="ad_pfs_hints.c" info="1118265386"/>
<file name="ad_pfs_flush.c" info="1118265386"/>
<file name="ad_pfs_iwrite.c" info="1118265386"/>
<file name="ad_pfs_read.c" info="1118265387"/>
<file name="ad_pfs_write.c" info="1118265387"/>
</dir>
<data>
<fileinfo name="ad_pfs.c">
</fileinfo>
<fileinfo name="ad_pfs.h">
</fileinfo>
<fileinfo name="ad_pfs_fcntl.c">
</fileinfo>
<fileinfo name="ad_pfs_wait.c">
</fileinfo>
<fileinfo name="ad_pfs_open.c">
</fileinfo>
<fileinfo name="ad_pfs_done.c">
</fileinfo>
<fileinfo name="ad_pfs_iread.c">
</fileinfo>
<fileinfo name="ad_pfs_hints.c">
</fileinfo>
<fileinfo name="ad_pfs_flush.c">
</fileinfo>
<fileinfo name="ad_pfs_iwrite.c">
</fileinfo>
<fileinfo name="ad_pfs_read.c">
</fileinfo>
<fileinfo name="ad_pfs_write.c">
</fileinfo>
</data>

@ -0,0 +1,6 @@
Makefile
.deps
*.bb
*.bbg
*.lo
.*-cache

@ -0,0 +1,25 @@
<dir>
<file name="ad_piofs_hints.c" info="1123803225"/>
<file name="ad_piofs.h" info="1118265387"/>
<file name="ad_piofs_open.c" info="1123803225"/>
<file name="ad_piofs_read.c" info="1118265388"/>
<file name="ad_piofs.c" info="1118265387"/>
<file name="ad_piofs_fcntl.c" info="1118265387"/>
<file name="ad_piofs_write.c" info="1118265388"/>
</dir>
<data>
<fileinfo name="ad_piofs_hints.c">
</fileinfo>
<fileinfo name="ad_piofs.h">
</fileinfo>
<fileinfo name="ad_piofs_open.c">
</fileinfo>
<fileinfo name="ad_piofs_read.c">
</fileinfo>
<fileinfo name="ad_piofs.c">
</fileinfo>
<fileinfo name="ad_piofs_fcntl.c">
</fileinfo>
<fileinfo name="ad_piofs_write.c">
</fileinfo>
</data>

@ -0,0 +1,6 @@
Makefile
.deps
*.bb
*.bbg
*.lo
.*-cache

@ -0,0 +1,37 @@
<dir>
<file name="ad_pvfs.c" info="1118265388"/>
<file name="ad_pvfs_close.c" info="1118265388"/>
<file name="ad_pvfs_hints.c" info="1118265388"/>
<file name="ad_pvfs_delete.c" info="1118265388"/>
<file name="ad_pvfs_open.c" info="1123803226"/>
<file name="ad_pvfs_resize.c" info="1136483608"/>
<file name="ad_pvfs.h" info="1118265388"/>
<file name="ad_pvfs_read.c" info="1149871364"/>
<file name="ad_pvfs_write.c" info="1149871364"/>
<file name="ad_pvfs_fcntl.c" info="1118265388"/>
<file name="ad_pvfs_flush.c" info="1136483608"/>
</dir>
<data>
<fileinfo name="ad_pvfs.c">
</fileinfo>
<fileinfo name="ad_pvfs_close.c">
</fileinfo>
<fileinfo name="ad_pvfs_hints.c">
</fileinfo>
<fileinfo name="ad_pvfs_delete.c">
</fileinfo>
<fileinfo name="ad_pvfs_open.c">
</fileinfo>
<fileinfo name="ad_pvfs_resize.c">
</fileinfo>
<fileinfo name="ad_pvfs.h">
</fileinfo>
<fileinfo name="ad_pvfs_read.c">
</fileinfo>
<fileinfo name="ad_pvfs_write.c">
</fileinfo>
<fileinfo name="ad_pvfs_fcntl.c">
</fileinfo>
<fileinfo name="ad_pvfs_flush.c">
</fileinfo>
</data>

@ -24,7 +24,7 @@ void ADIOI_PVFS_Flush(ADIO_File fd, int *error_code)
if (rank == fd->hints->ranklist[0]) {
err = pvfs_fsync(fd->fd_sys);
}
MPI_Bcast(&err, 1, MPI_INT, 0, fd->comm);
MPI_Bcast(&err, 1, MPI_INT, fd->hints->ranklist[0], fd->comm);
if (err == -1) {
*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,

@ -541,7 +541,7 @@ void ADIOI_PVFS_ReadStridedListIO(ADIO_File fd, void *buf, int count,
max_mem_list = mem_list_count;
if (max_file_list < file_list_count)
max_file_list = file_list_count;
if (max_mem_list == max_mem_list == MAX_ARRAY_SIZE)
if (max_mem_list == MAX_ARRAY_SIZE)
break;
} /* while (size_read < bufsize) */

@ -20,7 +20,7 @@ void ADIOI_PVFS_Resize(ADIO_File fd, ADIO_Offset size, int *error_code)
if (rank == fd->hints->ranklist[0]) {
err = pvfs_ftruncate64(fd->fd_sys, size);
}
MPI_Bcast(&err, 1, MPI_INT, 0, fd->comm);
MPI_Bcast(&err, 1, MPI_INT, fd->hints->ranklist[0], fd->comm);
if (err == -1) {
*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,

@ -881,7 +881,7 @@ void ADIOI_PVFS_WriteStridedListIO(ADIO_File fd, void *buf, int count,
max_mem_list = mem_list_count;
if (max_file_list < file_list_count)
max_file_list = file_list_count;
if (max_mem_list == max_mem_list == MAX_ARRAY_SIZE)
if (max_mem_list == MAX_ARRAY_SIZE)
break;
} /* while (size_wrote < bufsize) */

@ -0,0 +1,6 @@
Makefile
.deps
*.bb
*.bbg
*.lo
.*-cache

@ -0,0 +1,43 @@
<dir>
<file name="ad_pvfs2_common.h" info="1118265389"/>
<file name="ad_pvfs2.c" info="1118265389"/>
<file name="ad_pvfs2_write.c" info="1154553244"/>
<file name="ad_pvfs2_fcntl.c" info="1118265390"/>
<file name="ad_pvfs2_close.c" info="1137022455"/>
<file name="ad_pvfs2_common.c" info="1150128393"/>
<file name="ad_pvfs2_hints.c" info="1148679771"/>
<file name="ad_pvfs2_read.c" info="1154553241"/>
<file name="ad_pvfs2_open.c" info="1150128393"/>
<file name="ad_pvfs2_flush.c" info="1136483610"/>
<file name="ad_pvfs2.h" info="1118265389"/>
<file name="ad_pvfs2_delete.c" info="1118265390"/>
<file name="ad_pvfs2_resize.c" info="1136420074"/>
</dir>
<data>
<fileinfo name="ad_pvfs2_common.h">
</fileinfo>
<fileinfo name="ad_pvfs2.c">
</fileinfo>
<fileinfo name="ad_pvfs2_write.c">
</fileinfo>
<fileinfo name="ad_pvfs2_fcntl.c">
</fileinfo>
<fileinfo name="ad_pvfs2_close.c">
</fileinfo>
<fileinfo name="ad_pvfs2_common.c">
</fileinfo>
<fileinfo name="ad_pvfs2_hints.c">
</fileinfo>
<fileinfo name="ad_pvfs2_read.c">
</fileinfo>
<fileinfo name="ad_pvfs2_open.c">
</fileinfo>
<fileinfo name="ad_pvfs2_flush.c">
</fileinfo>
<fileinfo name="ad_pvfs2.h">
</fileinfo>
<fileinfo name="ad_pvfs2_delete.c">
</fileinfo>
<fileinfo name="ad_pvfs2_resize.c">
</fileinfo>
</data>

@ -10,6 +10,7 @@
void ADIOI_PVFS2_Close(ADIO_File fd, int *error_code)
{
ADIOI_Free(fd->fs_ptr);
fd->fs_ptr = NULL;
/* PVFS2 doesn't have a 'close', but MPI-IO semantics dictate that we
* ensure all data has been flushed.
*/

@ -36,10 +36,9 @@ void ADIOI_PVFS2_Flush(ADIO_File fd, int *error_code)
/* io_worker computed in ADIO_Open */
if (rank == fd->hints->ranklist[0]) {
ret = PVFS_sys_flush(pvfs_fs->object_ref, &(pvfs_fs->credentials));
MPI_Bcast(&ret, 1, MPI_INT, 0, fd->comm);
} else {
MPI_Bcast(&ret, 1, MPI_INT, 0, fd->comm);
}
MPI_Bcast(&ret, 1, MPI_INT, fd->hints->ranklist[0], fd->comm);
/* --BEGIN ERROR HANDLING-- */
if (ret != 0) {
*error_code = MPIO_Err_create_code(MPI_SUCCESS,

@ -19,6 +19,12 @@ void ADIOI_PVFS2_SetInfo(ADIO_File fd, MPI_Info users_info, int *error_code)
MPI_Info_create(&(fd->info));
MPI_Info_set(fd->info, "romio_pvfs2_debugmask", "0");
fd->hints->fs_hints.pvfs2.debugmask = 0;
MPI_Info_set(fd->info, "striping_factor", "0");
fd->hints->striping_factor = 0;
MPI_Info_set(fd->info, "striping_unit", "0");
fd->hints->striping_unit = 0;
/* any user-provided hints? */
if (users_info != MPI_INFO_NULL) {
@ -62,6 +68,30 @@ void ADIOI_PVFS2_SetInfo(ADIO_File fd, MPI_Info users_info, int *error_code)
MPI_Info_set(fd->info, "striping_factor", value);
}
/* the striping unit */
MPI_Info_get(users_info, "striping_unit",
MPI_MAX_INFO_VAL, value, &flag);
if (flag) {
tmp_value = fd->hints->striping_unit = atoi(value);
MPI_Bcast(&tmp_value, 1, MPI_INT, 0, fd->comm);
/* --BEGIN ERROR HANDLING-- */
if (tmp_value != fd->hints->striping_unit) {
MPIO_ERR_CREATE_CODE_INFO_NOT_SAME(myname,
"striping_unit",
error_code);
return;
}
/* --END ERROR HANDLING-- */
MPI_Info_set(fd->info, "striping_unit", value);
}
/* distribution name */
MPI_Info_get(users_info, "romio_pvfs2_distribution_name",
MPI_MAX_INFO_VAL, value, &flag);
if (flag) {
}
ADIOI_Free(value);
}
}

@ -29,7 +29,8 @@ typedef struct open_status_s open_status;
* handle to everyone else in the communicator
*/
static void fake_an_open(PVFS_fs_id fs_id, char *pvfs_name, int access_mode,
int nr_datafiles, ADIOI_PVFS2_fs *pvfs2_fs,
int nr_datafiles, int strip_size,
ADIOI_PVFS2_fs *pvfs2_fs,
open_status *o_status)
{
int ret;
@ -37,10 +38,16 @@ static void fake_an_open(PVFS_fs_id fs_id, char *pvfs_name, int access_mode,
PVFS_sysresp_getparent resp_getparent;
PVFS_sysresp_create resp_create;
PVFS_sys_attr attribs;
PVFS_sys_dist* dist;
ADIOI_PVFS2_makeattribs(&attribs);
attribs.dfile_count = nr_datafiles;
if (nr_datafiles > 0 ) {
attribs.dfile_count = nr_datafiles;
attribs.mask |= PVFS_ATTR_SYS_DFILE_COUNT;
}
dist = NULL;
memset(&resp_lookup, 0, sizeof(resp_lookup));
memset(&resp_getparent, 0, sizeof(resp_getparent));
memset(&resp_create, 0, sizeof(resp_create));
@ -49,17 +56,34 @@ static void fake_an_open(PVFS_fs_id fs_id, char *pvfs_name, int access_mode,
ret = PVFS_sys_lookup(fs_id, pvfs_name,
&(pvfs2_fs->credentials), &resp_lookup, PVFS2_LOOKUP_LINK_FOLLOW);
if ( (ret < 0) ) { /* XXX: check what the error was */
if (access_mode & MPI_MODE_CREATE) {
if (access_mode & ADIO_CREATE) {
ret = PVFS_sys_getparent(fs_id, pvfs_name,
&(pvfs2_fs->credentials), &resp_getparent);
if (ret < 0) {
FPRINTF(stderr, "pvfs_sys_getparent returns with %d\n", ret);
o_status->error = ret;
return;
}
ret = PVFS_sys_create(resp_getparent.basename,
}
/* Set the distribution strip size if specified */
if (0 < strip_size) {
/* Note that the distribution is hardcoded here */
dist = PVFS_sys_dist_lookup("simple_stripe");
ret = PVFS_sys_dist_setparam(dist,
"strip_size",
&strip_size);
if (ret < 0)
{
FPRINTF(stderr,
"pvfs_sys_dist_setparam returns with %d\n", ret);
o_status->error = ret;
}
}
/* Perform file creation */
ret = PVFS_sys_create(resp_getparent.basename,
resp_getparent.parent_ref, attribs,
&(pvfs2_fs->credentials), NULL, &resp_create);
&(pvfs2_fs->credentials), dist, &resp_create);
if (ret < 0) { /* XXX: should only do this for EEXISTS */
ret = PVFS_sys_lookup(fs_id, pvfs_name,
@ -79,7 +103,7 @@ static void fake_an_open(PVFS_fs_id fs_id, char *pvfs_name, int access_mode,
o_status->error = ret;
return;
}
} else if (access_mode & MPI_MODE_EXCL) {
} else if (access_mode & ADIO_EXCL) {
/* lookup should not succeed if opened with EXCL */
o_status->error = -1; /* XXX: what should it be? */
return;
@ -92,10 +116,14 @@ static void fake_an_open(PVFS_fs_id fs_id, char *pvfs_name, int access_mode,
}
/* if MPI_File_open was called with MPI_MODE_CREATE|MPI_MODE_EXCL, then we have
* a little problem: our usual open-and-broadcast test will not work because
* only one person (the first aggregator) will perform the open w/ CREATE|EXCL
*/
/* ADIOI_PVFS2_Open:
* one process opens (or creates) the file, then broadcasts the result to the
* remaining processors.
*
* ADIO_Open used to perform an optimization when MPI_MODE_CREATE (and before
* that, MPI_MODE_EXCL) was set. Because PVFS2 handles file lookup and
* creation more scalably than other file systems, ADIO_Open now skips any
* special handling when CREATE is set. */
void ADIOI_PVFS2_Open(ADIO_File fd, int *error_code)
{
int rank, ret;
@ -109,7 +137,7 @@ void ADIOI_PVFS2_Open(ADIO_File fd, int *error_code)
* doing the error checking. define a struct for both the object reference
* and the error code to broadcast to all the processors */
open_status o_status;
open_status o_status = {0, {0, 0}};
MPI_Datatype open_status_type;
MPI_Datatype types[2] = {MPI_INT, MPI_BYTE};
int lens[2] = {1, sizeof(PVFS_object_ref)};
@ -137,12 +165,11 @@ void ADIOI_PVFS2_Open(ADIO_File fd, int *error_code)
return;
}
/* currently everyone gets their own credentials */
ADIOI_PVFS2_makecredentials(&(pvfs2_fs->credentials));
/* we only have to do this on one node. we'll broadcast the handle to
* everyone else in the communicator */
if (rank == fd->hints->ranklist[0]) {
/* one process resolves name and will later bcast to others */
if (rank == fd->hints->ranklist[0] && fd->fs_ptr == NULL) {
/* given the filename, figure out which pvfs filesystem it is on */
ret = PVFS_util_resolve(fd->filename, &cur_fs,
pvfs_path, PVFS_NAME_MAX);
@ -152,38 +179,15 @@ void ADIOI_PVFS2_Open(ADIO_File fd, int *error_code)
o_status.error = -1;
} else {
fake_an_open(cur_fs, pvfs_path,
fd->access_mode, fd->hints->striping_factor,
pvfs2_fs, &o_status);
fd->access_mode, fd->hints->striping_factor,
fd->hints->striping_unit,
pvfs2_fs, &o_status);
}
}
/* NOTE: if MPI_MODE_EXCL was set, ADIO_Open will call
* ADIOI_PVFS2_Open from just one processor. This really confuses MPI when
* one procesor on a communicator broadcasts to no listners.
*
* Since ADIO_Open will close the file and call ADIOI_PVFS2_Open again (but
* w/o EXCL), we can bail out right here and return early */
if ((fd->access_mode & MPI_MODE_EXCL)) {
if (o_status.error == 0)
{
*error_code = MPI_SUCCESS;
fd->fs_ptr = pvfs2_fs;
}
else
{
/* --BEGIN ERROR HANDLING-- */
ADIOI_Free(pvfs2_fs);
*error_code = MPIO_Err_create_code(MPI_SUCCESS,
MPIR_ERR_RECOVERABLE,
myname, __LINE__,
ADIOI_PVFS2_error_convert(o_status.error),
"Unknown error", 0);
/* TODO: FIX STRING */
/* --END ERROR HANDLING-- */
}
MPI_Type_free(&open_status_type);
return;
}
/* store credentials and object reference in fd */
pvfs2_fs->object_ref = o_status.object_ref;
fd->fs_ptr = pvfs2_fs;
}
/* broadcast status and (possibly valid) object reference */
MPI_Address(&o_status.error, &offsets[0]);
@ -192,7 +196,13 @@ void ADIOI_PVFS2_Open(ADIO_File fd, int *error_code)
MPI_Type_struct(2, lens, offsets, types, &open_status_type);
MPI_Type_commit(&open_status_type);
MPI_Bcast(MPI_BOTTOM, 1, open_status_type, 0, fd->comm);
/* Assertion: if we hit this Bcast, then all processes collectively
* called this open.
*
* That's because deferred open never happens with PVFS2.
*/
MPI_Bcast(MPI_BOTTOM, 1, open_status_type, fd->hints->ranklist[0],
fd->comm);
MPI_Type_free(&open_status_type);
/* --BEGIN ERROR HANDLING-- */

@ -1,5 +1,6 @@
/* -*- Mode: C; c-basic-offset:4 ; -*- */
/*
/* -*- Mode: C; c-basic-offset:4 ; -*-
* vim: ts=8 sts=4 sw=4 noexpandtab
*
* Copyright (C) 1997 University of Chicago.
* See COPYRIGHT notice in top-level directory.
*/
@ -64,7 +65,7 @@ void ADIOI_PVFS2_ReadContig(ADIO_File fd, void *buf, int count,
myname, __LINE__,
ADIOI_PVFS2_error_convert(ret),
"Error in PVFS_sys_read", 0);
return;
goto fn_exit;
}
/* --END ERROR HANDLING-- */
@ -79,6 +80,9 @@ void ADIOI_PVFS2_ReadContig(ADIO_File fd, void *buf, int count,
#endif
*error_code = MPI_SUCCESS;
fn_exit:
PVFS_Request_free(&mem_req);
PVFS_Request_free(&file_req);
return;
}
@ -97,7 +101,7 @@ void ADIOI_PVFS2_ReadStrided(ADIO_File fd, void *buf, int count,
int filetype_size, etype_size, buftype_size;
MPI_Aint filetype_extent, buftype_extent;
int buf_count, buftype_is_contig, filetype_is_contig;
ADIO_Offset off, disp, start_off;
ADIO_Offset off, disp, start_off, initial_off;
int flag, st_frd_size, st_n_filetypes;
int mem_list_count, file_list_count;
@ -129,6 +133,17 @@ void ADIOI_PVFS2_ReadStrided(ADIO_File fd, void *buf, int count,
ADIOI_Datatype_iscontig(datatype, &buftype_is_contig);
ADIOI_Datatype_iscontig(fd->filetype, &filetype_is_contig);
/* the HDF5 tests showed a bug in this list processing code (see many many
* lines down below). We added a workaround, but common HDF5 file types
* are actually contiguous and do not need the expensive workarond */
if (!filetype_is_contig) {
flat_file = ADIOI_Flatlist;
while (flat_buf->type != fd->filetype) flat_file = flat_file->next;
if (flat_file->count == 1)
filetype_is_contig = 1;
}
MPI_Type_size(fd->filetype, &filetype_size);
if ( ! filetype_size ) {
*error_code = MPI_SUCCESS;
@ -211,6 +226,8 @@ void ADIOI_PVFS2_ReadStrided(ADIO_File fd, void *buf, int count,
"Error in PVFS_sys_read", 0);
goto error_state;
}
PVFS_Request_free(&mem_req);
PVFS_Request_free(&file_req);
total_bytes_read += resp_io.total_completed;
/* --END ERROR HANDLING-- */
@ -251,6 +268,8 @@ void ADIOI_PVFS2_ReadStrided(ADIO_File fd, void *buf, int count,
while (flat_file->type != fd->filetype) flat_file = flat_file->next;
disp = fd->disp;
initial_off = offset;
/* for each case - ADIO_Individual pointer or explicit, find the file
offset in bytes (offset), n_filetypes (how many filetypes into
@ -418,6 +437,9 @@ void ADIOI_PVFS2_ReadStrided(ADIO_File fd, void *buf, int count,
goto error_state;
}
/* --END ERROR HANDING-- */
PVFS_Request_free(&mem_req);
PVFS_Request_free(&file_req);
total_bytes_read += resp_io.total_completed;
mem_offsets += mem_lengths;
@ -487,6 +509,8 @@ void ADIOI_PVFS2_ReadStrided(ADIO_File fd, void *buf, int count,
goto error_state;
}
/* --END ERROR HANDLING-- */
PVFS_Request_free(&mem_req);
PVFS_Request_free(&file_req);
total_bytes_read += resp_io.total_completed;
}
}
@ -651,10 +675,37 @@ void ADIOI_PVFS2_ReadStrided(ADIO_File fd, void *buf, int count,
max_mem_list = mem_list_count;
if (max_file_list < file_list_count)
max_file_list = file_list_count;
if (max_mem_list == max_mem_list == MAX_ARRAY_SIZE)
if (max_mem_list == MAX_ARRAY_SIZE)
break;
} /* while (size_read < bufsize) */
/* one last check before we actually carry out the operation:
* this code has hard-to-fix bugs when a noncontiguous file type has
* such large pieces that the sum of the lengths of the memory type is
* not larger than one of those pieces (and vice versa for large memory
* types and many pices of file types. In these cases, give up and
* fall back to naive reads and writes. The testphdf5 test created a
* type with two very large memory regions and 600 very small file
* regions. The same test also created a type with one very large file
* region and many (700) very small memory regions. both cases caused
* problems for this code */
if ( ( (file_list_count == 1) &&
(new_file_read < flat_file->blocklens[0] ) ) ||
((mem_list_count == 1) &&
(new_buffer_read < flat_buf->blocklens[0]) ) ||
((file_list_count == MAX_ARRAY_SIZE) &&
(new_file_read < flat_buf->blocklens[0]) ) ||
( (mem_list_count == MAX_ARRAY_SIZE) &&
(new_buffer_read < flat_file->blocklens[0])) )
{
ADIOI_Delete_flattened(datatype);
ADIOI_GEN_ReadStrided_naive(fd, buf, count, datatype,
file_ptr_type, initial_off, status, error_code);
return;
}
mem_offsets = (PVFS_size*)ADIOI_Malloc(max_mem_list*sizeof(PVFS_size));
mem_lengths = (int *)ADIOI_Malloc(max_mem_list*sizeof(int));
file_offsets = (int64_t *)ADIOI_Malloc(max_file_list*sizeof(int64_t));
@ -861,6 +912,8 @@ void ADIOI_PVFS2_ReadStrided(ADIO_File fd, void *buf, int count,
"Error in PVFS_sys_read", 0);
}
/* --END ERROR HANDLING-- */
PVFS_Request_free(&mem_req);
PVFS_Request_free(&file_req);
total_bytes_read += resp_io.total_completed;
size_read += new_buffer_read;
start_k = k;

@ -35,9 +35,9 @@ void ADIOI_PVFS2_Resize(ADIO_File fd, ADIO_Offset size, int *error_code)
if (rank == fd->hints->ranklist[0]) {
ret = PVFS_sys_truncate(pvfs_fs->object_ref,
size, &(pvfs_fs->credentials));
MPI_Bcast(&ret, 1, MPI_INT, 0, fd->comm);
MPI_Bcast(&ret, 1, MPI_INT, fd->hints->ranklist[0], fd->comm);
} else {
MPI_Bcast(&ret, 1, MPI_INT, 0, fd->comm);
MPI_Bcast(&ret, 1, MPI_INT, fd->hints->ranklist[0], fd->comm);
}
/* --BEGIN ERROR HANDLING-- */
if (ret != 0) {

@ -60,7 +60,7 @@ void ADIOI_PVFS2_WriteContig(ADIO_File fd, void *buf, int count,
myname, __LINE__,
ADIOI_PVFS2_error_convert(ret),
"Error in PVFS_sys_write", 0);
return;
goto fn_exit;
}
/* --END ERROR HANDLING-- */
@ -76,7 +76,7 @@ void ADIOI_PVFS2_WriteContig(ADIO_File fd, void *buf, int count,
myname, __LINE__,
ADIOI_PVFS2_error_convert(ret),
"Error in PVFS_sys_write", 0);
return;
goto fn_exit;
}
/* --END ERROR HANDLING-- */
fd->fp_ind += (int)resp_io.total_completed;
@ -86,6 +86,9 @@ void ADIOI_PVFS2_WriteContig(ADIO_File fd, void *buf, int count,
MPIR_Status_set_bytes(status, datatype, (int)resp_io.total_completed);
#endif
*error_code = MPI_SUCCESS;
fn_exit:
PVFS_Request_free(&file_req);
PVFS_Request_free(&mem_req);
return;
}
@ -108,7 +111,7 @@ void ADIOI_PVFS2_WriteStrided(ADIO_File fd, void *buf, int count,
int filetype_size, etype_size, buftype_size;
MPI_Aint filetype_extent, buftype_extent;
int buf_count, buftype_is_contig, filetype_is_contig;
ADIO_Offset off, disp, start_off;
ADIO_Offset off, disp, start_off, initial_off;
int flag, st_fwr_size, st_n_filetypes;
int err_flag=0;
@ -134,7 +137,8 @@ void ADIOI_PVFS2_WriteStrided(ADIO_File fd, void *buf, int count,
MPI_Offset total_bytes_written=0;
static char myname[] = "ADIOI_PVFS2_WRITESTRIDED";
/* TODO: increase this to the maximum value */
/* note: don't increase this: several parts of PVFS2 now
* assume this limit*/
#define MAX_ARRAY_SIZE 64
/* --BEGIN ERROR HANDLING-- */
@ -151,6 +155,16 @@ void ADIOI_PVFS2_WriteStrided(ADIO_File fd, void *buf, int count,
ADIOI_Datatype_iscontig(datatype, &buftype_is_contig);
ADIOI_Datatype_iscontig(fd->filetype, &filetype_is_contig);
/* the HDF5 tests showed a bug in this list processing code (see many many
* lines down below). We added a workaround, but common HDF5 file types
* are actually contiguous and do not need the expensive workarond */
if (!filetype_is_contig) {
flat_file = ADIOI_Flatlist;
while (flat_buf->type != fd->filetype) flat_file = flat_file->next;
if (flat_file->count == 1)
filetype_is_contig = 1;
}
MPI_Type_size(fd->filetype, &filetype_size);
if ( ! filetype_size ) {
*error_code = MPI_SUCCESS;
@ -210,14 +224,13 @@ void ADIOI_PVFS2_WriteStrided(ADIO_File fd, void *buf, int count,
if (!(b_blks_wrote % MAX_ARRAY_SIZE) ||
(b_blks_wrote == total_blks_to_write)) {
/* in the case of the last read list call,
/* in the case of the last write list call,
adjust mem_list_count */
if (b_blks_wrote == total_blks_to_write) {
mem_list_count = total_blks_to_write % MAX_ARRAY_SIZE;
/* in case last read list call fills max arrays */
/* in case last write list call fills max arrays */
if (!mem_list_count) mem_list_count = MAX_ARRAY_SIZE;
}
err_flag = PVFS_Request_hindexed(mem_list_count,
mem_lengths, mem_offsets,
PVFS_BYTE, &mem_req);
@ -252,7 +265,7 @@ void ADIOI_PVFS2_WriteStrided(ADIO_File fd, void *buf, int count,
&resp_io);
total_bytes_written += resp_io.total_completed;
/* in the case of error or the last read list call,
/* in the case of error or the last write list call,
* leave here */
/* --BEGIN ERROR HANDLING-- */
if (err_flag) {
@ -268,6 +281,8 @@ void ADIOI_PVFS2_WriteStrided(ADIO_File fd, void *buf, int count,
file_offsets += file_lengths;
file_lengths = 0;
PVFS_Request_free(&mem_req);
PVFS_Request_free(&file_req);
}
} /* for (i=0; i<flat_buf->count; i++) */
j++;
@ -300,6 +315,7 @@ void ADIOI_PVFS2_WriteStrided(ADIO_File fd, void *buf, int count,
while (flat_file->type != fd->filetype) flat_file = flat_file->next;
disp = fd->disp;
initial_off = offset;
/* for each case - ADIO_Individual pointer or explicit, find offset
(file offset in bytes), n_filetypes (how many filetypes into file
@ -368,7 +384,7 @@ void ADIOI_PVFS2_WriteStrided(ADIO_File fd, void *buf, int count,
mem_list_count = 1;
/* determine how many blocks in file to read */
/* determine how many blocks in file to write */
f_data_wrote = ADIOI_MIN(st_fwr_size, bufsize);
total_blks_to_write = 1;
j++;
@ -387,7 +403,7 @@ void ADIOI_PVFS2_WriteStrided(ADIO_File fd, void *buf, int count,
mem_offsets = buf;
mem_lengths = 0;
/* if at least one full readlist, allocate file arrays
/* if at least one full writelist, allocate file arrays
at max array size and don't free until very end */
if (n_write_lists) {
file_offsets = (int64_t*)ADIOI_Malloc(MAX_ARRAY_SIZE*
@ -395,7 +411,7 @@ void ADIOI_PVFS2_WriteStrided(ADIO_File fd, void *buf, int count,
file_lengths = (int32_t*)ADIOI_Malloc(MAX_ARRAY_SIZE*
sizeof(int32_t));
}
/* if there's no full readlist allocate file arrays according
/* if there's no full writelist allocate file arrays according
to needed size (extra_blks) */
else {
file_offsets = (int64_t*)ADIOI_Malloc(extra_blks*
@ -473,9 +489,12 @@ void ADIOI_PVFS2_WriteStrided(ADIO_File fd, void *buf, int count,
mem_offsets += mem_lengths;
mem_lengths = 0;
PVFS_Request_free(&file_req);
PVFS_Request_free(&mem_req);
} /* for (i=0; i<n_write_lists; i++) */
/* for file arrays smaller than MAX_ARRAY_SIZE (last read_list call) */
/* for file arrays smaller than MAX_ARRAY_SIZE (last write_list call) */
if (extra_blks) {
file_list_count = extra_blks;
if(!i) {
@ -542,6 +561,8 @@ void ADIOI_PVFS2_WriteStrided(ADIO_File fd, void *buf, int count,
}
/* --END ERROR HANDLING-- */
total_bytes_written += resp_io.total_completed;
PVFS_Request_free(&mem_req);
PVFS_Request_free(&file_req);
}
}
else {
@ -574,8 +595,8 @@ void ADIOI_PVFS2_WriteStrided(ADIO_File fd, void *buf, int count,
(new_buffer_write < bufsize-size_wrote)) {
/* find mem_list_count and file_list_count such that both are
less than MAX_ARRAY_SIZE, the sum of their lengths are
equal, and the sum of all the data read and data to be
read in the next immediate read list is less than
equal, and the sum of all the data written and data to be
written in the next immediate write list is less than
bufsize */
if(mem_list_count) {
if((new_buffer_write + flat_buf->blocklens[k] +
@ -604,7 +625,7 @@ void ADIOI_PVFS2_WriteStrided(ADIO_File fd, void *buf, int count,
new_file_write = 0;
file_list_count = 0;
while ((file_list_count < MAX_ARRAY_SIZE) &&
(new_file_write < new_buffer_write)) {
(new_file_write < new_buffer_write)) {
if(file_list_count) {
if((new_file_write + flat_file->blocklens[j]) >
new_buffer_write) {
@ -706,10 +727,37 @@ void ADIOI_PVFS2_WriteStrided(ADIO_File fd, void *buf, int count,
max_mem_list = mem_list_count;
if (max_file_list < file_list_count)
max_file_list = file_list_count;
if (max_mem_list == max_mem_list == MAX_ARRAY_SIZE)
if (max_mem_list == MAX_ARRAY_SIZE)
break;
} /* while (size_wrote < bufsize) */
/* one last check before we actually carry out the operation:
* this code has hard-to-fix bugs when a noncontiguous file type has
* such large pieces that the sum of the lengths of the memory type is
* not larger than one of those pieces (and vice versa for large memory
* types and many pices of file types. In these cases, give up and
* fall back to naive reads and writes. The testphdf5 test created a
* type with two very large memory regions and 600 very small file
* regions. The same test also created a type with one very large file
* region and many (700) very small memory regions. both cases caused
* problems for this code */
if ( ( (file_list_count == 1) &&
(new_file_write < flat_file->blocklens[0] ) ) ||
((mem_list_count == 1) &&
(new_buffer_write < flat_buf->blocklens[0]) ) ||
((file_list_count == MAX_ARRAY_SIZE) &&
(new_file_write < flat_buf->blocklens[0]) ) ||
( (mem_list_count == MAX_ARRAY_SIZE) &&
(new_buffer_write < flat_file->blocklens[0])) )
{
ADIOI_Delete_flattened(datatype);
ADIOI_GEN_WriteStrided_naive(fd, buf, count, datatype,
file_ptr_type, initial_off, status, error_code);
return;
}
mem_offsets = (PVFS_size*)ADIOI_Malloc(max_mem_list*sizeof(PVFS_size));
mem_lengths = (int *)ADIOI_Malloc(max_mem_list*sizeof(int));
file_offsets = (int64_t *)ADIOI_Malloc(max_file_list*sizeof(int64_t));
@ -736,8 +784,8 @@ void ADIOI_PVFS2_WriteStrided(ADIO_File fd, void *buf, int count,
(new_buffer_write < bufsize-size_wrote)) {
/* find mem_list_count and file_list_count such that both are
less than MAX_ARRAY_SIZE, the sum of their lengths are
equal, and the sum of all the data read and data to be
read in the next immediate read list is less than
equal, and the sum of all the data written and data to be
written in the next immediate write list is less than
bufsize */
if(mem_list_count) {
if((new_buffer_write + flat_buf->blocklens[k] +
@ -824,7 +872,7 @@ void ADIOI_PVFS2_WriteStrided(ADIO_File fd, void *buf, int count,
} /* while ((mem_list_count < MAX_ARRAY_SIZE) &&
(new_buffer_write < bufsize-size_wrote)) */
/* fills the allocated readlist arrays */
/* fills the allocated writelist arrays */
k = start_k;
j = start_j;
for (i=0; i<mem_list_count; i++) {
@ -931,6 +979,8 @@ void ADIOI_PVFS2_WriteStrided(ADIO_File fd, void *buf, int count,
total_bytes_written += resp_io.total_completed;
start_k = k;
start_j = j;
PVFS_Request_free(&mem_req);
PVFS_Request_free(&file_req);
} /* while (size_wrote < bufsize) */
ADIOI_Free(mem_offsets);
ADIOI_Free(mem_lengths);

@ -0,0 +1,6 @@
Makefile
.deps
*.bb
*.bbg
*.lo
.*-cache

@ -0,0 +1,19 @@
<dir>
<file name="ad_sfs_flush.c" info="1118265391"/>
<file name="ad_sfs_fcntl.c" info="1118265391"/>
<file name="ad_sfs.c" info="1118265390"/>
<file name="ad_sfs.h" info="1118265390"/>
<file name="ad_sfs_open.c" info="1118265391"/>
</dir>
<data>
<fileinfo name="ad_sfs_flush.c">
</fileinfo>
<fileinfo name="ad_sfs_fcntl.c">
</fileinfo>
<fileinfo name="ad_sfs.c">
</fileinfo>
<fileinfo name="ad_sfs.h">
</fileinfo>
<fileinfo name="ad_sfs_open.c">
</fileinfo>
</data>

@ -0,0 +1,8 @@
Makefile
.deps
*.bb
*.bbg
.libs
.libstamp*
*.lo
.*-cache

@ -0,0 +1,64 @@
<dir>
<file name="ad_testfs_rdcoll.c" info="1118265392"/>
<file name="ad_testfs_seek.c" info="1118265392"/>
<file name="ad_testfs_done.c" info="1118265391"/>
<file name="ad_testfs_close.c" info="1118265391"/>
<file name="ad_testfs_write.c" info="1116890865"/>
<file name="ad_testfs_wrcoll.c" info="1118265393"/>
<file name="ad_testfs.h" info="1118265391"/>
<file name="ad_testfs_delete.c" info="1118265391"/>
<file name="ad_testfs_open.c" info="1118265392"/>
<file name="ad_testfs.c" info="1118265391"/>
<file name="ad_testfs_wait.c" info="1118265393"/>
<file name="ad_testfs_iwrite.c" info="1118265392"/>
<file name="ad_testfs_resize.c" info="1118265392"/>
<file name="ad_testfs_getsh.c" info="1118265392"/>
<file name="ad_testfs_fcntl.c" info="1118265391"/>
<file name="ad_testfs_setsh.c" info="1118265393"/>
<file name="ad_testfs_read.c" info="1116890865"/>
<file name="ad_testfs_flush.c" info="1118265392"/>
<file name="ad_testfs_hints.c" info="1118265392"/>
<file name="ad_testfs_iread.c" info="1118265392"/>
</dir>
<data>
<fileinfo name="ad_testfs_rdcoll.c">
</fileinfo>
<fileinfo name="ad_testfs_seek.c">
</fileinfo>
<fileinfo name="ad_testfs_done.c">
</fileinfo>
<fileinfo name="ad_testfs_close.c">
</fileinfo>
<fileinfo name="ad_testfs_write.c">
</fileinfo>
<fileinfo name="ad_testfs_wrcoll.c">
</fileinfo>
<fileinfo name="ad_testfs.h">
</fileinfo>
<fileinfo name="ad_testfs_delete.c">
</fileinfo>
<fileinfo name="ad_testfs_open.c">
</fileinfo>
<fileinfo name="ad_testfs.c">
</fileinfo>
<fileinfo name="ad_testfs_wait.c">
</fileinfo>
<fileinfo name="ad_testfs_iwrite.c">
</fileinfo>
<fileinfo name="ad_testfs_resize.c">
</fileinfo>
<fileinfo name="ad_testfs_getsh.c">
</fileinfo>
<fileinfo name="ad_testfs_fcntl.c">
</fileinfo>
<fileinfo name="ad_testfs_setsh.c">
</fileinfo>
<fileinfo name="ad_testfs_read.c">
</fileinfo>
<fileinfo name="ad_testfs_flush.c">
</fileinfo>
<fileinfo name="ad_testfs_hints.c">
</fileinfo>
<fileinfo name="ad_testfs_iread.c">
</fileinfo>
</data>

@ -0,0 +1,8 @@
Makefile
.deps
*.bb
*.bbg
.libs
.libstamp*
*.lo
.*-cache

@ -0,0 +1,13 @@
<dir>
<file name="ad_ufs.c" info="1118265393"/>
<file name="ad_ufs_open.c" info="1132240241"/>
<file name="ad_ufs.h" info="1116890866"/>
</dir>
<data>
<fileinfo name="ad_ufs.c">
</fileinfo>
<fileinfo name="ad_ufs_open.c">
</fileinfo>
<fileinfo name="ad_ufs.h">
</fileinfo>
</data>

@ -84,6 +84,6 @@ void ADIOI_UFS_Open(ADIO_File fd, int *error_code)
"**io %s", strerror(errno));
}
}
/* --END ERROR HANDLING */
/* --END ERROR HANDLING-- */
else *error_code = MPI_SUCCESS;
}

@ -0,0 +1,9 @@
Makefile
*.safe
.deps
*.bb
*.bbg
.libs
.libstamp*
*.lo
.*-cache

@ -0,0 +1,40 @@
<dir>
<file name="ad_xfs_iread.c" info="1116890867"/>
<file name="ad_xfs_hints.c" info="1118265394"/>
<file name="ad_xfs_open.c" info="1118265394"/>
<file name="ad_xfs_fcntl.c" info="1118265394"/>
<file name="ad_xfs_write.c" info="1123803226"/>
<file name="ad_xfs_iwrite.c" info="1118265394"/>
<file name="ad_xfs.c" info="1118265393"/>
<file name="ad_xfs_resize.c" info="1118265394"/>
<file name="ad_xfs_read.c" info="1123803226"/>
<file name="ad_xfs_wait.c" info="1118265394"/>
<file name="ad_xfs.h" info="1118265393"/>
<file name="ad_xfs_done.c" info="1118265393"/>
</dir>
<data>
<fileinfo name="ad_xfs_iread.c">
</fileinfo>
<fileinfo name="ad_xfs_hints.c">
</fileinfo>
<fileinfo name="ad_xfs_open.c">
</fileinfo>
<fileinfo name="ad_xfs_fcntl.c">
</fileinfo>
<fileinfo name="ad_xfs_write.c">
</fileinfo>
<fileinfo name="ad_xfs_iwrite.c">
</fileinfo>
<fileinfo name="ad_xfs.c">
</fileinfo>
<fileinfo name="ad_xfs_resize.c">
</fileinfo>
<fileinfo name="ad_xfs_read.c">
</fileinfo>
<fileinfo name="ad_xfs_wait.c">
</fileinfo>
<fileinfo name="ad_xfs.h">
</fileinfo>
<fileinfo name="ad_xfs_done.c">
</fileinfo>
</data>

@ -0,0 +1,10 @@
Makefile
Debug*
Release*
.deps
*.bb
*.bbg
.libs
.libstamp*
*.lo
.*-cache

@ -0,0 +1,154 @@
<dir>
<file name="ad_read_str_naive.c" info="1151530141"/>
<file name="iscontig.c" info="1123623159"/>
<file name="ad_darray.c" info="1120759275"/>
<file name="ad_flush.c" info="1118265395"/>
<file name="async_list.c" info="1118265398"/>
<file name="req_malloc.c" info="1131442746"/>
<file name="status_setb.c" info="1120935953"/>
<file name="ad_read_str.c" info="1118265396"/>
<file name="ad_get_sh_fp.c" info="1136502931"/>
<file name="ad_delete.c" info="1118265395"/>
<file name="lock.c" info="1126197468"/>
<file name="ad_iwrite.c" info="1118265396"/>
<file name="ad_read_coll.c" info="1120935952"/>
<file name="ad_write.c" info="1118265397"/>
<file name="ad_open.c" info="1149889504"/>
<file name="ad_fstype.c" info="1123803226"/>
<file name="ad_wait.c" info="1118265397"/>
<file name="ad_seek.c" info="1120935952"/>
<file name="ad_set_sh_fp.c" info="1136502931"/>
<file name="flatten.c" info="1152132013"/>
<file name="ad_set_view.c" info="1118265397"/>
<file name="ad_write_str_naive.c" info="1151530141"/>
<file name="cb_config_list.c" info="1153848190"/>
<file name="ad_done.c" info="1118265395"/>
<file name="ad_init.c" info="1120935952"/>
<file name="eof_offset.c" info="1120759276"/>
<file name="ad_done_fake.c" info="1118265395"/>
<file name="ad_write_coll.c" info="1154634330"/>
<file name="ad_iread.c" info="1118265395"/>
<file name="ad_aggregate.c" info="1120935952"/>
<file name="error.c" info="1118265398"/>
<file name="ad_iread_fake.c" info="1118265396"/>
<file name="ad_end.c" info="1122311888"/>
<file name="byte_offset.c" info="1118265398"/>
<file name="get_fp_posn.c" info="1120759276"/>
<file name="ad_fcntl.c" info="1118265395"/>
<file name="ad_wait_fake.c" info="1118265397"/>
<file name="ad_hints.c" info="1151530140"/>
<file name="ad_resize.c" info="1136423741"/>
<file name="malloc.c" info="1123803226"/>
<file name="adi_close.c" info="1118265398"/>
<file name="ad_iopen.c" info="1118265395"/>
<file name="ad_prealloc.c" info="1123510658"/>
<file name="ad_write_str.c" info="1120183732"/>
<file name="ad_close.c" info="1149889504"/>
<file name="shfp_fname.c" info="1123803226"/>
<file name="strfns.c" info="1130806561"/>
<file name="ad_read.c" info="1118265396"/>
<file name="ad_subarray.c" info="1120759275"/>
<file name="ad_iwrite_fake.c" info="1118265396"/>
</dir>
<data>
<fileinfo name="ad_read_str_naive.c">
</fileinfo>
<fileinfo name="iscontig.c">
</fileinfo>
<fileinfo name="ad_darray.c">
</fileinfo>
<fileinfo name="ad_flush.c">
</fileinfo>
<fileinfo name="async_list.c">
</fileinfo>
<fileinfo name="req_malloc.c">
</fileinfo>
<fileinfo name="status_setb.c">
</fileinfo>
<fileinfo name="ad_read_str.c">
</fileinfo>
<fileinfo name="ad_get_sh_fp.c">
</fileinfo>
<fileinfo name="ad_delete.c">
</fileinfo>
<fileinfo name="lock.c">
</fileinfo>
<fileinfo name="ad_iwrite.c">
</fileinfo>
<fileinfo name="ad_read_coll.c">
</fileinfo>
<fileinfo name="ad_write.c">
</fileinfo>
<fileinfo name="ad_open.c">
</fileinfo>
<fileinfo name="ad_fstype.c">
</fileinfo>
<fileinfo name="ad_wait.c">
</fileinfo>
<fileinfo name="ad_seek.c">
</fileinfo>
<fileinfo name="ad_set_sh_fp.c">
</fileinfo>
<fileinfo name="flatten.c">
</fileinfo>
<fileinfo name="ad_set_view.c">
</fileinfo>
<fileinfo name="ad_write_str_naive.c">
</fileinfo>
<fileinfo name="cb_config_list.c">
</fileinfo>
<fileinfo name="ad_done.c">
</fileinfo>
<fileinfo name="ad_init.c">
</fileinfo>
<fileinfo name="eof_offset.c">
</fileinfo>
<fileinfo name="ad_done_fake.c">
</fileinfo>
<fileinfo name="ad_write_coll.c">
</fileinfo>
<fileinfo name="ad_iread.c">
</fileinfo>
<fileinfo name="ad_aggregate.c">
</fileinfo>
<fileinfo name="error.c">
</fileinfo>
<fileinfo name="ad_iread_fake.c">
</fileinfo>
<fileinfo name="ad_end.c">
</fileinfo>
<fileinfo name="byte_offset.c">
</fileinfo>
<fileinfo name="get_fp_posn.c">
</fileinfo>
<fileinfo name="ad_fcntl.c">
</fileinfo>
<fileinfo name="ad_wait_fake.c">
</fileinfo>
<fileinfo name="ad_hints.c">
</fileinfo>
<fileinfo name="ad_resize.c">
</fileinfo>
<fileinfo name="malloc.c">
</fileinfo>
<fileinfo name="adi_close.c">
</fileinfo>
<fileinfo name="ad_iopen.c">
</fileinfo>
<fileinfo name="ad_prealloc.c">
</fileinfo>
<fileinfo name="ad_write_str.c">
</fileinfo>
<fileinfo name="ad_close.c">
</fileinfo>
<fileinfo name="shfp_fname.c">
</fileinfo>
<fileinfo name="strfns.c">
</fileinfo>
<fileinfo name="ad_read.c">
</fileinfo>
<fileinfo name="ad_subarray.c">
</fileinfo>
<fileinfo name="ad_iwrite_fake.c">
</fileinfo>
</data>

@ -71,7 +71,6 @@ libadio_common_la_SOURCES = \
io_romio_eof_offset.c \
io_romio_error.c \
io_romio_flatten.c \
io_romio_gencheck.c \
io_romio_get_fp_posn.c \
io_romio_iscontig.c \
io_romio_lock.c \

@ -6,6 +6,7 @@
*/
#include "adio.h"
#include "adio_extern.h"
#ifdef HAVE_UNISTD_H
#include <unistd.h>
@ -53,21 +54,20 @@ void ADIO_Close(ADIO_File fd, int *error_code)
/* if we are doing aggregation and deferred open, then it's possible
* that rank 0 does not have access to the file. make sure only an
* aggregator deletes the file.*/
if (fd->agg_comm != MPI_COMM_NULL ) {
MPI_Comm_rank(fd->agg_comm, &myrank);
MPI_Barrier(fd->agg_comm);
if (!myrank) ADIO_Delete(fd->filename, &err);
} else {
MPI_Comm_rank(fd->comm, &myrank);
MPI_Barrier(fd->comm);
if (!myrank) ADIO_Delete(fd->filename, &err);
MPI_Comm_rank(fd->comm, &myrank);
if (myrank == fd->hints->ranklist[0]) {
ADIO_Delete(fd->filename, &err);
}
MPI_Barrier(fd->comm);
}
if (fd->fortran_handle != -1) {
ADIOI_Ftable[fd->fortran_handle] = MPI_FILE_NULL;
}
ADIOI_Free(fd->hints->ranklist);
ADIOI_Free(fd->hints->cb_config_list);
ADIOI_Free(fd->hints);
ADIOI_Free(fd->fns);
MPI_Comm_free(&(fd->comm));
/* deferred open: if we created an aggregator communicator, free it */
if (fd->agg_comm != MPI_COMM_NULL) {

@ -35,7 +35,7 @@ void ADIO_Get_shared_fp(ADIO_File fd, int incr, ADIO_Offset *shared_fp,
fd->file_system,
fd->fns,
ADIO_CREATE | ADIO_RDWR | ADIO_DELETE_ON_CLOSE,
0, MPI_BYTE, MPI_BYTE, 0,
0, MPI_BYTE, MPI_BYTE,
MPI_INFO_NULL,
ADIO_PERM_NULL, error_code);
if (*error_code != MPI_SUCCESS) return;

@ -370,8 +370,9 @@ void ADIOI_GEN_SetInfo(ADIO_File fd, MPI_Info users_info, int *error_code)
fd->hints->deferred_open = 0;
}
if ((fd->file_system == ADIO_PIOFS) || (fd->file_system == ADIO_PVFS)) {
/* no data sieving for writes in PIOFS and PVFS, because they do not
if ((fd->file_system == ADIO_PIOFS) || (fd->file_system == ADIO_PVFS) ||
(fd->file_system == ADIO_PVFS2) ) {
/* no data sieving for writes in PIOFS, PVFS and PVFS2, because they do not
support file locking */
MPI_Info_get(info, "ind_wr_buffer_size", MPI_MAX_INFO_VAL,
value, &flag);

@ -12,27 +12,26 @@
#include "mpio.h"
static int is_aggregator(int rank, ADIO_File fd);
static int uses_generic_read(ADIO_File fd);
static int uses_generic_write(ADIO_File fd);
MPI_File ADIO_Open(MPI_Comm orig_comm,
MPI_Comm comm, char *filename, int file_system,
ADIOI_Fns *ops,
int access_mode, ADIO_Offset disp, MPI_Datatype etype,
MPI_Datatype filetype,
int iomode /* ignored */,
MPI_Info info, int perm, int *error_code)
{
MPI_File mpi_fh;
ADIO_File fd;
ADIO_cb_name_array array;
int orig_amode_excl, orig_amode_wronly, err, rank, procs, agg_rank;
int orig_amode_excl, orig_amode_wronly, err, rank, procs;
char *value;
static char myname[] = "ADIO_OPEN";
int rank_ct, max_error_code;
int *tmp_ranklist;
MPI_Comm aggregator_comm = MPI_COMM_NULL; /* just for deferred opens */
ADIOI_UNREFERENCED_ARG(iomode);
*error_code = MPI_SUCCESS;
/* obtain MPI_File handle */
@ -47,10 +46,9 @@ MPI_File ADIO_Open(MPI_Comm orig_comm,
fd->comm = comm; /* dup'ed in MPI_File_open */
fd->filename = ADIOI_Strdup(filename);
fd->file_system = file_system;
fd->fs_ptr = NULL;
/* TODO: VERIFY THAT WE DON'T NEED TO ALLOCATE THESE, THEN DON'T. */
fd->fns = (ADIOI_Fns *) ADIOI_Malloc(sizeof(ADIOI_Fns));
*fd->fns = *ops;
fd->fns = ops;
fd->disp = disp;
fd->split_coll_count = 0;
@ -64,6 +62,8 @@ MPI_File ADIO_Open(MPI_Comm orig_comm,
fd->async_count = 0;
fd->fortran_handle = -1;
fd->err_handler = ADIOI_DFLT_ERR_HANDLER;
/* create and initialize info object */
@ -77,6 +77,22 @@ MPI_File ADIO_Open(MPI_Comm orig_comm,
fd->info = MPI_INFO_NULL;
ADIO_SetInfo(fd, info, &err);
/* deferred open:
* we can only do this optimization if 'fd->hints->deferred_open' is set
* (which means the user hinted 'no_indep_rw' and collective buffering).
* Furthermore, we only do this if our collective read/write routines use
* our generic function, and not an fs-specific routine (we can defer opens
* only if we use our aggreagation code). */
if (fd->hints->deferred_open &&
!(uses_generic_read(fd) \
&& uses_generic_write(fd))) {
fd->hints->deferred_open = 0;
}
if (fd->file_system == ADIO_PVFS2)
/* disable deferred open on PVFS2 so that scalable broadcast will
* always use the propper communicator */
fd->hints->deferred_open = 0;
/* gather the processor name array if we don't already have it */
/* this has to be done here so that we can cache the name array in both
@ -111,9 +127,7 @@ MPI_File ADIO_Open(MPI_Comm orig_comm,
MPI_Info_set(fd->info, "cb_nodes", value);
ADIOI_Free(value);
}
/* bcast the rank map (could do an allgather above and avoid
* this...would that really be any better?)
*/
ADIOI_cb_bcast_rank_map(fd);
if (fd->hints->cb_nodes <= 0) {
*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
@ -123,22 +137,14 @@ MPI_File ADIO_Open(MPI_Comm orig_comm,
goto fn_exit;
}
/* deferred open:
* we can only do this if 'fd->hints->deferred_open' is set (which means
* the user hinted 'no_indep_rw' and collective buffering). Furthermore,
* we only do this if our collective read/write routines use our generic
* function, and not an fs-specific routine (we can defer opens only if we
* use our aggreagation code).
*
* if we are an aggregator, create a new communicator. we'll use this
* aggregator communicator for opens and closes. otherwise, we have a NULL
* communicator until we try to do independent IO */
/* deferred open: if we are an aggregator, create a new communicator.
* we'll use this aggregator communicator for opens and closes.
* otherwise, we have a NULL communicator until we try to do independent
* IO */
fd->agg_comm = MPI_COMM_NULL;
fd->is_open = 0;
fd->io_worker = 0;
if (fd->hints->deferred_open &&
ADIOI_Uses_generic_read(fd) &&
ADIOI_Uses_generic_write(fd) ) {
if (fd->hints->deferred_open) {
/* MPI_Comm_split will create a communication group of aggregators.
* for non-aggregators it will return MPI_COMM_NULL . we rely on
* fd->agg_comm == MPI_COMM_NULL for non-aggregators in several
@ -146,54 +152,58 @@ MPI_File ADIO_Open(MPI_Comm orig_comm,
if (is_aggregator(rank, fd)) {
MPI_Comm_split(fd->comm, 1, 0, &aggregator_comm);
fd->agg_comm = aggregator_comm;
MPI_Comm_rank(fd->agg_comm, &agg_rank);
if (agg_rank == 0) {
fd->io_worker = 1;
}
} else {
MPI_Comm_split(fd->comm, MPI_UNDEFINED, 0, &aggregator_comm);
fd->agg_comm = aggregator_comm;
}
} else {
if (rank == 0) {
fd->io_worker = 1;
}
}
orig_amode_excl = access_mode;
/* we used to do this EXCL|CREAT workaround in MPI_File_open, but if we are
* doing deferred open, we more easily know who the aggregators are in
* ADIO_Open */
if ((access_mode & MPI_MODE_CREATE) && (access_mode & MPI_MODE_EXCL)) {
/* the open should fail if the file exists. Only *1* process should
check this. Otherwise, if all processes try to check and the file
does not exist, one process will create the file and others who
reach later will return error. */
if(fd->io_worker) {
fd->access_mode = access_mode;
(*(fd->fns->ADIOI_xxx_Open))(fd, error_code);
MPI_Bcast(error_code, 1, MPI_INT, 0, fd->comm);
/* if no error, close the file and reopen normally below */
if (*error_code == MPI_SUCCESS)
(*(fd->fns->ADIOI_xxx_Close))(fd, error_code);
/* optimization: by having just one process create a file, close it, then
* have all N processes open it, we can possibly avoid contention for write
* locks on a directory for some file systems.
*
* we used to special-case EXCL|CREATE, since when N processes are trying
* to create a file exclusively, only 1 will succeed and the rest will
* (spuriously) fail. Since we are now carrying out the CREATE on one
* process anyway, the EXCL case falls out and we don't need to explicitly
* worry about it, other than turning off both the EXCL and CREATE flags
*/
/* pvfs2 handles opens specially, so it is actually more efficent for that
* file system if we skip this optimization */
if (access_mode & ADIO_CREATE && fd->file_system != ADIO_PVFS2) {
if(rank == fd->hints->ranklist[0]) {
/* remove delete_on_close flag if set */
if (access_mode & ADIO_DELETE_ON_CLOSE)
fd->access_mode = access_mode ^ ADIO_DELETE_ON_CLOSE;
else
fd->access_mode = access_mode;
(*(fd->fns->ADIOI_xxx_Open))(fd, error_code);
MPI_Bcast(error_code, 1, MPI_INT, \
fd->hints->ranklist[0], fd->comm);
/* if no error, close the file and reopen normally below */
if (*error_code == MPI_SUCCESS)
(*(fd->fns->ADIOI_xxx_Close))(fd, error_code);
fd->access_mode = access_mode; /* back to original */
}
else MPI_Bcast(error_code, 1, MPI_INT, 0, fd->comm);
else MPI_Bcast(error_code, 1, MPI_INT, fd->hints->ranklist[0], fd->comm);
if (*error_code != MPI_SUCCESS) {
goto fn_exit;
}
else {
/* turn off EXCL for real open */
access_mode = access_mode ^ MPI_MODE_EXCL;
/* turn off CREAT (and EXCL if set) for real multi-processor open */
access_mode ^= ADIO_CREATE;
if (access_mode & ADIO_EXCL)
access_mode ^= ADIO_EXCL;
}
}
/* if we are doing deferred open, non-aggregators should return now */
if (fd->hints->deferred_open &&
ADIOI_Uses_generic_read(fd) &&
ADIOI_Uses_generic_write(fd) ) {
if (fd->hints->deferred_open ) {
if (fd->agg_comm == MPI_COMM_NULL) {
/* we might have turned off EXCL for the aggregators.
* restore access_mode that non-aggregators get the right
@ -240,9 +250,7 @@ MPI_File ADIO_Open(MPI_Comm orig_comm,
/* in the deferred open case, only those who have actually
opened the file should close it */
if (fd->hints->deferred_open &&
ADIOI_Uses_generic_read(fd) &&
ADIOI_Uses_generic_write(fd) ) {
if (fd->hints->deferred_open) {
if (fd->agg_comm != MPI_COMM_NULL) {
(*(fd->fns->ADIOI_xxx_Close))(fd, error_code);
}
@ -251,9 +259,10 @@ MPI_File ADIO_Open(MPI_Comm orig_comm,
(*(fd->fns->ADIOI_xxx_Close))(fd, error_code);
}
}
if (fd->fns) ADIOI_Free(fd->fns);
if (fd->filename) ADIOI_Free(fd->filename);
if (fd->hints->ranklist) ADIOI_Free(fd->hints->ranklist);
if (fd->hints->cb_config_list) ADIOI_Free(fd->hints->cb_config_list);
if (fd->hints) ADIOI_Free(fd->hints);
if (fd->info != MPI_INFO_NULL) MPI_Info_free(&(fd->info));
ADIOI_Free(fd);
fd = ADIO_FILE_NULL;
@ -286,3 +295,28 @@ int is_aggregator(int rank, ADIO_File fd ) {
}
return 0;
}
/*
* we special-case TESTFS because all it does is wrap logging info around GEN
*/
static int uses_generic_read(ADIO_File fd)
{
ADIOI_Fns *fns = fd->fns;
if (fns->ADIOI_xxx_ReadStridedColl == ADIOI_GEN_ReadStridedColl ||
fd->file_system == ADIO_TESTFS )
{
return 1;
}
return 0;
}
static int uses_generic_write(ADIO_File fd)
{
ADIOI_Fns *fns = fd->fns;
if (fns->ADIOI_xxx_WriteStridedColl == ADIOI_GEN_WriteStridedColl ||
fd->file_system == ADIO_TESTFS )
{
return 1;
}
return 0;
}

@ -63,7 +63,7 @@ void ADIOI_GEN_ReadStrided_naive(ADIO_File fd, void *buf, int count,
/* if atomicity is true, lock (exclusive) the region to be accessed */
if ((fd->atomicity) && (fd->file_system != ADIO_PIOFS) &&
(fd->file_system != ADIO_PVFS))
(fd->file_system != ADIO_PVFS) && (fd->file_system != ADIO_PVFS2))
{
ADIOI_WRITE_LOCK(fd, start_off, SEEK_SET, end_offset-start_off+1);
}
@ -94,7 +94,7 @@ void ADIOI_GEN_ReadStrided_naive(ADIO_File fd, void *buf, int count,
}
if ((fd->atomicity) && (fd->file_system != ADIO_PIOFS) &&
(fd->file_system != ADIO_PVFS))
(fd->file_system != ADIO_PVFS) && (fd->file_system != ADIO_PVFS2))
{
ADIOI_UNLOCK(fd, start_off, SEEK_SET, end_offset-start_off+1);
}
@ -214,7 +214,7 @@ void ADIOI_GEN_ReadStrided_naive(ADIO_File fd, void *buf, int count,
/* if atomicity is true, lock (exclusive) the region to be accessed */
if ((fd->atomicity) && (fd->file_system != ADIO_PIOFS) &&
(fd->file_system != ADIO_PVFS))
(fd->file_system != ADIO_PVFS) && (fd->file_system != ADIO_PVFS2))
{
ADIOI_WRITE_LOCK(fd, start_off, SEEK_SET, end_offset-start_off+1);
}
@ -352,7 +352,7 @@ void ADIOI_GEN_ReadStrided_naive(ADIO_File fd, void *buf, int count,
/* unlock the file region if we locked it */
if ((fd->atomicity) && (fd->file_system != ADIO_PIOFS) &&
(fd->file_system != ADIO_PVFS))
(fd->file_system != ADIO_PVFS) && (fd->file_system != ADIO_PVFS2))
{
ADIOI_UNLOCK(fd, start_off, SEEK_SET, end_offset-start_off+1);
}

@ -13,10 +13,18 @@
void ADIOI_GEN_Resize(ADIO_File fd, ADIO_Offset size, int *error_code)
{
int err;
int err, rank;
static char myname[] = "ADIOI_GEN_RESIZE";
err = ftruncate(fd->fd_sys, size);
MPI_Comm_rank(fd->comm, &rank);
/* first aggregator performs ftruncate() */
if (rank == fd->hints->ranklist[0]) {
err = ftruncate(fd->fd_sys, size);
}
/* bcast return value */
MPI_Bcast(&err, 1, MPI_INT, fd->hints->ranklist[0], fd->comm);
/* --BEGIN ERROR HANDLING-- */
if (err == -1) {

@ -30,7 +30,7 @@ void ADIO_Set_shared_fp(ADIO_File fd, ADIO_Offset offset, int *error_code)
fd->file_system,
fd->fns,
ADIO_CREATE | ADIO_RDWR | ADIO_DELETE_ON_CLOSE,
0, MPI_BYTE, MPI_BYTE, 0,
0, MPI_BYTE, MPI_BYTE,
MPI_INFO_NULL,
ADIO_PERM_NULL, error_code);
}

@ -83,6 +83,7 @@ void ADIOI_GEN_WriteStridedColl(ADIO_File fd, void *buf, int count,
unsigned long long max_pe_request = 0;
unsigned long long min_rd_request = ULONG_MAX;
unsigned long long max_rd_request = 0;
int old_error;
MPI_Info_get(fd->info, "ompi_enable_parallel_optimizations", MPI_MAX_INFO_VAL, value,
&info_flag);
@ -274,6 +275,34 @@ void ADIOI_GEN_WriteStridedColl(ADIO_File fd, void *buf, int count,
len_list, contig_access_count, min_st_offset,
fd_size, fd_start, fd_end, buf_idx, error_code);
/* If this collective write is followed by an independent write,
* it's possible to have those subsequent writes on other processes
* race ahead and sneak in before the read-modify-write completes.
* We carry out a collective communication at the end here so no one
* can start independent i/o before collective I/O completes.
*
* optimization: if only one process performing i/o, we can perform
* a less-expensive Bcast
*
* need to do some gymnastics with the error codes so that if something
* went wrong, all processes report error, but if a process has a more
* specific error code, we can still have that process report the
* additional information */
old_error = *error_code;
if (*error_code != MPI_SUCCESS) *error_code = MPI_ERR_IO;
if (fd->hints->cb_nodes == 1)
MPI_Bcast(error_code, 1, MPI_INT,
fd->hints->ranklist[0], fd->comm);
else
MPI_Allreduce(MPI_IN_PLACE, error_code, 1, MPI_INT,
MPI_MAX, fd->comm);
if ( (old_error != MPI_SUCCESS) && (old_error != MPI_ERR_IO) )
*error_code = old_error;
if (!buftype_is_contig) ADIOI_Delete_flattened(datatype);
/* free all memory allocated for collective I/O */
@ -334,6 +363,7 @@ static void ADIOI_Exch_and_write(ADIO_File fd, void *buf, MPI_Datatype
array to a file, where each local array is 8Mbytes, requiring
at least another 8Mbytes of temp space is unacceptable. */
/* TODO: 'hole' not used outside of ADIOI_W_Exchange_data */
int hole, i, j, m, size=0, ntimes, max_ntimes, buftype_is_contig;
ADIO_Offset st_loc=-1, end_loc=-1, off, done, req_off;
char *write_buf=NULL;

@ -64,7 +64,7 @@ void ADIOI_GEN_WriteStrided_naive(ADIO_File fd, void *buf, int count,
/* if atomicity is true, lock (exclusive) the region to be accessed */
if ((fd->atomicity) && (fd->file_system != ADIO_PIOFS) &&
(fd->file_system != ADIO_PVFS))
(fd->file_system != ADIO_PVFS) && (fd->file_system != ADIO_PVFS2))
{
ADIOI_WRITE_LOCK(fd, start_off, SEEK_SET, end_offset-start_off+1);
}
@ -95,7 +95,7 @@ void ADIOI_GEN_WriteStrided_naive(ADIO_File fd, void *buf, int count,
}
if ((fd->atomicity) && (fd->file_system != ADIO_PIOFS) &&
(fd->file_system != ADIO_PVFS))
(fd->file_system != ADIO_PVFS) && (fd->file_system != ADIO_PVFS2))
{
ADIOI_UNLOCK(fd, start_off, SEEK_SET, end_offset-start_off+1);
}
@ -215,7 +215,7 @@ void ADIOI_GEN_WriteStrided_naive(ADIO_File fd, void *buf, int count,
/* if atomicity is true, lock (exclusive) the region to be accessed */
if ((fd->atomicity) && (fd->file_system != ADIO_PIOFS) &&
(fd->file_system != ADIO_PVFS))
(fd->file_system != ADIO_PVFS) && (fd->file_system != ADIO_PVFS2))
{
ADIOI_WRITE_LOCK(fd, start_off, SEEK_SET, end_offset-start_off+1);
}
@ -353,7 +353,7 @@ void ADIOI_GEN_WriteStrided_naive(ADIO_File fd, void *buf, int count,
/* unlock the file region if we locked it */
if ((fd->atomicity) && (fd->file_system != ADIO_PIOFS) &&
(fd->file_system != ADIO_PVFS))
(fd->file_system != ADIO_PVFS) && (fd->file_system != ADIO_PVFS2))
{
ADIOI_UNLOCK(fd, start_off, SEEK_SET, end_offset-start_off+1);
}

@ -109,6 +109,7 @@ int ADIOI_cb_gather_name_array(MPI_Comm comm,
int *procname_len = NULL, my_procname_len, *disp = NULL, i;
int commsize, commrank, found;
ADIO_cb_name_array array = NULL;
int alloc_size;
if (cb_config_list_keyval == MPI_KEYVAL_INVALID) {
MPI_Keyval_create((MPI_Copy_function *) ADIOI_cb_copy_name_array,
@ -166,25 +167,27 @@ int ADIOI_cb_gather_name_array(MPI_Comm comm,
}
#endif
alloc_size = 0;
for (i=0; i < commsize; i++) {
/* add one to the lengths because we need to count the
* terminator, and we are going to use this list of lengths
* again in the gatherv.
*/
procname_len[i]++;
procname[i] = ADIOI_Malloc(procname_len[i]);
if (procname[i] == NULL) {
return -1;
}
alloc_size += ++procname_len[i];
}
procname[0] = ADIOI_Malloc(alloc_size);
if (procname[0] == NULL) {
return -1;
}
for (i=1; i < commsize; i++) {
procname[i] = procname[i-1] + procname_len[i-1];
}
/* create our list of displacements for the gatherv. we're going
* to do everything relative to the start of the region allocated
* for procname[0]
*
* I suppose it is theoretically possible that the distance between
* malloc'd regions could be more than will fit in an int. We don't
* cover that case.
*/
disp = ADIOI_Malloc(commsize * sizeof(int));
disp[0] = 0;
@ -398,10 +401,13 @@ int ADIOI_cb_delete_name_array(MPI_Comm comm,
array->refct--;
if (array->refct <= 0) {
/* time to free the structures (names, array of ptrs to names, struct)
/* time to free the structures (names, array of ptrs to names, struct)
*/
for (i=0; i < array->namect; i++) {
ADIOI_Free(array->names[i]);
if (array->namect) {
/* Note that array->names[i], where i > 0,
* are just pointers into the allocated region array->names[0]
*/
ADIOI_Free(array->names[0]);
}
if (array->names != NULL) ADIOI_Free(array->names);
ADIOI_Free(array);

@ -1,6 +1,7 @@
/* -*- Mode: C; c-basic-offset:4 ; -*- */
/*
* $Id: flatten.c,v 1.22 2005/08/12 18:56:56 thakur Exp $
/* -*- Mode: C; c-basic-offset:4 ; -*-
* vim: ts=8 sts=4 sw=4 noexpandtab
*
* $Id: flatten.c,v 1.24 2006/07/05 20:40:13 robl Exp $
*
* Copyright (C) 1997 University of Chicago.
* See COPYRIGHT notice in top-level directory.
@ -217,7 +218,8 @@ void ADIOI_Flatten(MPI_Datatype datatype, ADIOI_Flatlist_node *flat,
MPI_Type_size(types[0], &old_size);
flat->blocklens[j] = ints[1] * old_size;
for (i=j+1; i<j+top_count; i++) {
flat->indices[i] = flat->indices[i-1] + ints[2]*old_size;
flat->indices[i] = flat->indices[i-1] +
(unsigned) ints[2] * (unsigned) old_size;
flat->blocklens[i] = flat->blocklens[j];
}
*curr_index = i;
@ -731,14 +733,17 @@ int ADIOI_Count_contiguous_blocks(MPI_Datatype datatype, int *curr_index)
* together, resulting in a shorter blocklist (and thus fewer
* contiguous operations).
*
* Q: IS IT SAFE TO REMOVE THE 0-LENGTH BLOCKS TOO?
* NOTE: a further optimization would be to remove zero length blocks. However,
* we do not do this as parts of the code use the presence of zero length
* blocks to indicate UB and LB.
*
*/
void ADIOI_Optimize_flattened(ADIOI_Flatlist_node *flat_type)
{
int i, j, opt_blocks;
int *opt_blocklens;
ADIO_Offset *opt_indices;
opt_blocks = 1;
/* save number of noncontiguous blocks in opt_blocks */

@ -1,31 +0,0 @@
/* -*- Mode: C; c-basic-offset:4 ; -*- */
/*
*
* Copyright (C) 2002 University of Chicago.
* See COPYRIGHT notice in top-level directory.
*/
#include "adio.h"
/* we special-case TESTFS because all it does is wrap logging info around GEN */
int ADIOI_Uses_generic_read(ADIO_File fd)
{
ADIOI_Fns *fns = fd->fns;
if (fns->ADIOI_xxx_ReadStridedColl == ADIOI_GEN_ReadStridedColl ||
fd->file_system == ADIO_TESTFS )
{
return 1;
}
return 0;
}
int ADIOI_Uses_generic_write(ADIO_File fd)
{
ADIOI_Fns *fns = fd->fns;
if (fns->ADIOI_xxx_WriteStridedColl == ADIOI_GEN_WriteStridedColl ||
fd->file_system == ADIO_TESTFS )
{
return 1;
}
return 0;
}

@ -23,7 +23,12 @@ struct ADIOI_RequestD *ADIOI_Malloc_request(void)
if (!ADIOI_Req_avail_head) {
ADIOI_Req_avail_head = (ADIOI_Req_node *)
ADIOI_Malloc(NUM*sizeof(ADIOI_Req_node));
ADIOI_Malloc(NUM*sizeof(ADIOI_Req_node));
if (ADIOI_Req_avail_head == NULL)
{
/* FIXME: Insert error here */
return NULL;
}
curr = ADIOI_Req_avail_head;
for (i=1; i<NUM; i++) {
curr->next = ADIOI_Req_avail_head+i;

@ -0,0 +1,7 @@
romioconf.h
romioconf.h.in
.deps
.libs
.libstamp*
*.lo
*-cache

@ -0,0 +1,31 @@
<dir>
<file name="adioi_error.h" info="1136505238"/>
<file name="adio.h" info="1149889504"/>
<file name="adioi_errmsg.h" info="1116890869"/>
<file name="mpio_error.h" info="1116890870"/>
<file name="adioi.h" info="1123873016"/>
<file name="adio_extern.h" info="1118265399"/>
<file name="adioi_fs_proto.h" info="1118265399"/>
<file name="adio_cb_config_list.h" info="1118265399"/>
<file name="mpipr.h" info="1123638196"/>
</dir>
<data>
<fileinfo name="adioi_error.h">
</fileinfo>
<fileinfo name="adio.h">
</fileinfo>
<fileinfo name="adioi_errmsg.h">
</fileinfo>
<fileinfo name="mpio_error.h">
</fileinfo>
<fileinfo name="adioi.h">
</fileinfo>
<fileinfo name="adio_extern.h">
</fileinfo>
<fileinfo name="adioi_fs_proto.h">
</fileinfo>
<fileinfo name="adio_cb_config_list.h">
</fileinfo>
<fileinfo name="mpipr.h">
</fileinfo>
</data>

@ -227,7 +227,6 @@ typedef struct ADIOI_FileD {
ADIOI_Fns *fns; /* struct of I/O functions to use */
MPI_Comm comm; /* communicator indicating who called open */
MPI_Comm agg_comm; /* deferred open: aggregators who called open */
int io_worker; /* bool: if one proc should do io, is it me? */
int is_open; /* deferred open: 0: not open yet 1: is open */
char *filename;
int file_system; /* type of file system */
@ -251,6 +250,7 @@ typedef struct ADIOI_FileD {
int async_count; /* count of outstanding nonblocking operations */
int perm;
int atomicity; /* true=atomic, false=nonatomic */
int fortran_handle; /* handle for Fortran interface if needed */
MPI_Errhandler err_handler;
void *fs_ptr; /* file-system specific information */
} ADIOI_FileD;
@ -342,7 +342,7 @@ void ADIO_End(int *error_code);
MPI_File ADIO_Open(MPI_Comm orig_comm, MPI_Comm comm, char *filename,
int file_system, ADIOI_Fns *ops,
int access_mode, ADIO_Offset disp, MPI_Datatype etype,
MPI_Datatype filetype, int iomode,
MPI_Datatype filetype,
MPI_Info info, int perm, int *error_code);
void ADIO_ImmediateOpen(ADIO_File fd, int *error_code);
void ADIO_Close(ADIO_File fd, int *error_code);

@ -1,5 +1,5 @@
/* -*- Mode: C; c-basic-offset:4 ; -*- */
/* $Id: adioi_error.h,v 1.11 2005/02/18 00:39:02 robl Exp $
/* $Id: adioi_error.h,v 1.12 2006/01/05 23:53:58 robl Exp $
*
* (C) 2001 by Argonne National Laboratory.
* See COPYRIGHT in top-level directory.
@ -45,7 +45,7 @@ if (datatype == MPI_DATATYPE_NULL) { \
}
#define MPIO_CHECK_READABLE(fh, myname, error_code) \
if (fh->access_mode & MPI_MODE_WRONLY) { \
if (fh->access_mode & ADIO_WRONLY) { \
error_code = MPIO_Err_create_code(MPI_SUCCESS, \
MPIR_ERR_RECOVERABLE, \
myname, __LINE__, \
@ -56,7 +56,7 @@ if (fh->access_mode & MPI_MODE_WRONLY) { \
}
#define MPIO_CHECK_WRITABLE(fh, myname, error_code) \
if (fh->access_mode & MPI_MODE_RDONLY) { \
if (fh->access_mode & ADIO_RDONLY) { \
error_code = MPIO_Err_create_code(MPI_SUCCESS, \
MPIR_ERR_RECOVERABLE, \
myname, __LINE__, \
@ -68,7 +68,7 @@ if (fh->access_mode & MPI_MODE_RDONLY) { \
}
#define MPIO_CHECK_NOT_SEQUENTIAL_MODE(fh, myname, error_code) \
if (fh->access_mode & MPI_MODE_SEQUENTIAL) { \
if (fh->access_mode & ADIO_SEQUENTIAL) { \
error_code = MPIO_Err_create_code(MPI_SUCCESS, \
MPIR_ERR_RECOVERABLE, \
myname, __LINE__, \

@ -762,13 +762,13 @@ AC_CHECK_HEADERS(unistd.h fcntl.h malloc.h stddef.h)
CROSS_SIZEOF_INT=${CROSS_SIZEOF_INT:-0}
CROSS_SIZEOF_VOID_P=${CROSS_SIZEOF_VOID_P:-0}
AC_CHECK_SIZEOF(int,$CROSS_SIZEOF_INT)
AC_CHECK_SIZEOF(void*,$CROSS_SIZEOF_VOID_P)
AC_CHECK_SIZEOF(void *,$CROSS_SIZEOF_VOID_P)
AC_CACHE_CHECK([for int large enough for pointers],
pac_cv_int_hold_pointer,[
if test "$ac_cv_sizeof_int" = "0" -o \
"$ac_cv_sizeof_voidp" = "0" ; then
"$ac_cv_sizeof_void_p" = "0" ; then
pac_cv_int_hold_pointer=unknown
elif test "$ac_cv_sizeof_int" -lt "$ac_cv_sizeof_voidp" ; then
elif test "$ac_cv_sizeof_int" -lt "$ac_cv_sizeof_void_p" ; then
pac_cv_int_hold_pointer=no
else
pac_cv_int_hold_pointer=yes
@ -841,6 +841,47 @@ else
PAC_MPI_OFFSET_KIND_4BYTE
fi
fi
#
# Test that we can use the FORTRAN_MPI_OFFSET type. If the environment
# is a strict Fortran 90/95 or later compiler, the "integer*8" format
# may not work.
if test "$NOF77" = 0 ; then
rm -f conftest*
ac_cv_f77_offset_type_works=no
AC_MSG_CHECKING([that we can use $FORTRAN_MPI_OFFSET to declare MPI_DISPLACMENT_CURRENT])
cat >conftest.f <<EOF
program main
$FORTRAN_MPI_OFFSET j
end
EOF
if $F77 -o conftest conftest.f >>config.log 2>&1 && test -x conftest ; then
ac_cv_f77_offset_type_works=yes
fi
rm -f conftest*
AC_MSG_RESULT($ac_cv_f77_offset_type_works)
if test "$ac_cv_f77_offset_type_works" != "yes" ; then
AC_MSG_CHECKING([whether we can use KIND with the selected F77 compiler $F77])
ac_cv_f77_allows_offset_kind=no
rm -f conftest*
cat >conftest.f <<EOF
program main
integer (kind=$MPI_OFFSET_KIND) j
end
EOF
if $F77 -o conftest conftest.f >>config.log 2>&1 && test -x conftest ; then
ac_cv_f77_allows_offset_kind=yes
fi
rm -f conftest*
AC_MSG_RESULT($ac_cv_f77_allows_offset_kind)
if test "$ac_cv_f77_allows_offset_kind" ; then
FORTRAN_MPI_OFFSET="integer (kind=$MPI_OFFSET_KIND)"
else
AC_MSG_WARN([Could not find a way to declare an integer type corresponding to MPI_Offset in Fortran.])
fi
fi
fi
#
# check if MPI_Info functions are defined in the MPI implementation
if test $WITHIN_KNOWN_MPI_IMPL = no ; then
@ -927,6 +968,36 @@ EOF
ac_link2='${CC-cc} -o conftest $CFLAGS $CPPFLAGS $LDFLAGS conftest1.c conftest2.c $LIBS >conftest.out 2>&1'
if eval $ac_link2 ; then
AC_MSG_RESULT(yes)
AC_MSG_CHECKING([that the compiler correctly implements weak symbols])
# The gcc 3.4.x compiler accepts the pragma weak, but does not
# correctly implement it on systems where the loader doesn't
# support weak symbols (e.g., cygwin). This is a bug in gcc, but it
# it is one that *we* have to detect.
rm -f conftest*
cat >>conftest1.c <<EOF
extern int PFoo(int);
#pragma weak PFoo = Foo
int Foo(int);
int Foo(int a) { return a; }
EOF
cat >>conftest2.c <<EOF
extern int Foo(int);
int PFoo(int a) { return a+1;}
int main(int argc, char **argv) {
return Foo(0);}
EOF
if eval $ac_link2 ; then
AC_MSG_RESULT(yes)
has_pragma_weak=1
else
AC_MSG_RESULT(no)
echo "$ac_link2" >> config.log
echo "Failed program was" >> config.log
cat conftest1.c >>config.log
cat conftest2.c >>config.log
if test -s conftest.out ; then cat conftest.out >> config.log ; fi
has_pragma_weak=0
fi
else
echo "$ac_link2" 1>&AC_FD_CC
echo "Failed program was" 1>&AC_FD_CC
@ -1056,8 +1127,8 @@ if test -n "$file_system_xfs"; then
AC_CACHE_CHECK([for memory alignment needed for direct I/O],
pac_cv_memalignval,
[
/bin/rm -f confmemalignval
/bin/rm -f /tmp/romio_tmp.bin
rm -f confmemalignval
rm -f /tmp/romio_tmp.bin
AC_TRY_RUN([
#include <stdio.h>
#include <unistd.h>
@ -1077,8 +1148,8 @@ if test -n "$file_system_xfs"; then
pac_cv_memalignval=`cat confmemalignval`,
pac_cv_memalignval="unknown",pac_cv_memalignval="unknown"
)
/bin/rm -f confmemalignval
/bin/rm -f /tmp/romio_tmp.bin
rm -f confmemalignval
rm -f /tmp/romio_tmp.bin
])
if test -n "$pac_cv_memalignval" -a "$pac_cv_memalignval" != 0 -a \
"$pac_cv_memalignval" != "unknown" ; then
@ -1155,19 +1226,65 @@ fi
save_libs=$LIBS
LIBS=
#
# Some systems need pthreads to get AIO to work
# Some systems need pthreads to get AIO to work. However, we don't want
# to add pthreads just because it is there, as that can cause problems
# with some implementations of pthreads and compilers (e.g., gcc version 3
# would fail if there was an int a[100000] on the stack if the application
# was *linked* with pthreads, but would succeed if the application was
# *not linked* with pthreads.
#
if test "x$disable_aio" = "xno" ; then
AC_SEARCH_LIBS(pthread_create,pthread,
ROMIO_LIBLIST="$ROMIO_LIBLIST $LIBS"
MPI_LIB="$MPI_LIB $LIBS"
)
foundPTHREAD=no
# Do we have aio_write in aio or rt?
saveLIBS=$LIBS
LIBS=
AC_SEARCH_LIBS(aio_write,aio rt,
ROMIO_LIBLIST="$ROMIO_LIBLIST $LIBS"
MPI_LIB="$MPI_LIB $LIBS"
aio_write_found=yes
)
AC_SEARCH_LIBS(aio_write,aio rt,foundAIO=yes,foundAIO=no)
if test "$foundAIO" = yes ; then
AIOLIBS=$LIBS
LIBS="$saveLIBS $LIBS"
else
LIBS="$saveLIBS"
fi
# If not, try finding pthread_create first, and if found, try the
# test again.
if test "$foundAIO" = no ; then
saveLIBS=$LIBS
LIBS=
AC_SEARCH_LIBS(pthread_create,pthread,foundPTHREAD=yes,
foundPTHREAD=no)
if test "$foundPTHREAD" = yes ; then
AC_SEARCH_LIBS(aio_write,aio rt,foundAIO=yes,foundAIO=no)
if test "$foundAIO" = yes ; then
AIO_LIBS=$LIBS
LIBS="$saveLIBS $LIBS"
else
LIBS=$saveLIBS
fi
else
LIBS=$saveLIBS
fi
fi
if test "$foundAIO" = yes ; then
ROMIO_LIBLIST="$ROMIO_LIBLIST $AIOLIBS"
MPI_LIB="$MPI_LIB $AIOLIBS"
aio_write_found=yes
fi
dnl AC_SEARCH_LIBS(pthread_create,pthread,
dnl ROMIO_LIBLIST="$ROMIO_LIBLIST $LIBS"
dnl MPI_LIB="$MPI_LIB $LIBS"
dnl )
dnl LIBS=
dnl AC_SEARCH_LIBS(aio_write,aio rt,
dnl ROMIO_LIBLIST="$ROMIO_LIBLIST $LIBS"
dnl MPI_LIB="$MPI_LIB $LIBS"
dnl aio_write_found=yes
dnl )
fi
LIBS=$save_libs

@ -18,7 +18,7 @@
# Portions taken from original ROMIO Makefile*
#
EXTRA_DIST = users-guide.bbl users-guide.ps.gz users-guide.tex README
EXTRA_DIST = users-guide.bbl users-guide.ps.gz users-guide.tex source-guide.tex README
all:
latex users-guide.tex

494
ompi/mca/io/romio/romio/doc/source-guide.tex Обычный файл

@ -0,0 +1,494 @@
% \documentstyle[11pt,psfig]{article}
\documentclass[11pt]{article}
\hoffset=-.7in
\voffset=-.6in
\textwidth=6.5in
\textheight=8.5in
\begin{document}
\vspace*{-1in}
\thispagestyle{empty}
\begin{center}
ARGONNE NATIONAL LABORATORY \\
9700 South Cass Avenue \\
Argonne, IL 60439
\end{center}
\vskip .5 in
\begin{center}
\rule{1.75in}{.01in} \\
\vspace{.1in}
ANL/MCS-TM-XXX \\
\rule{1.75in}{.01in} \\
\vskip 1.3 in
{\Large\bf A Guide to the ROMIO MPI-IO Implementation } \\
by \\ [2ex]
{\large\it Robert Ross, Robert Latham, and Rajeev Thakur}
\vspace{1in}
Mathematics and Computer Science Division
\bigskip
Technical Memorandum No.\ XXX
% \vspace{1.4in}
% Revised May 2004
\end{center}
\vfill
{\small
\noindent
This work was supported by the Mathematical, Information, and
Computational Sciences Division subprogram of the Office of Advanced
Scientific Computing Research, U.S. Department of Energy, under
Contract W-31-109-Eng-38; and by the Scalable I/O Initiative, a
multiagency project funded by the Defense Advanced Research Projects
Agency (Contract DABT63-94-C-0049), the Department of Energy, the
National Aeronautics and Space Administration, and the National
Science Foundation.}
\newpage
%% Line Spacing (e.g., \ls{1} for single, \ls{2} for double, even \ls{1.5})
%%
\newcommand{\ls}[1]
{\dimen0=\fontdimen6\the\font
\lineskip=#1\dimen0
\advance\lineskip.5\fontdimen5\the\font
\advance\lineskip-\dimen0
\lineskiplimit=.9\lineskip
\baselineskip=\lineskip
\advance\baselineskip\dimen0
\normallineskip\lineskip
\normallineskiplimit\lineskiplimit
\normalbaselineskip\baselineskip
\ignorespaces
}
\renewcommand{\baselinestretch}{1}
\newcommand {\ix} {\hspace*{2em}}
\newcommand {\mc} {\multicolumn}
\tableofcontents
\thispagestyle{empty}
\newpage
\pagenumbering{arabic}
\setcounter{page}{1}
\begin{center}
{\bf Users Guide for ROMIO: A High-Performance,\\[1ex]
Portable MPI-IO Implementation} \\ [2ex]
by \\ [2ex]
{\it Rajeev Thakur, Robert Ross, Ewing Lusk, and William Gropp}
\end{center}
\addcontentsline{toc}{section}{Abstract}
\begin{abstract}
\noindent
ROMIO is a high-performance, portable implementation of MPI-IO (the
I/O chapter in \mbox{MPI-2}).
This document describes the internals of the ROMIO implementation.
\end{abstract}
\section{Introduction}
The ROMIO MPI-IO implementation, originally written by Rajeev Thakur, has been
in existence since XXX.
... Discussion of the evolution of ROMIO ...
Architecturally, ROMIO is broken up into three layers: a layer implementing
the MPI I/O routines in terms of an abstract device for I/O (ADIO), a layer of
common code implementing a subset of the ADIO interface, and a set of storage
system specific functions that complete the ADIO implementation in terms of
that storage type. These three layers work together to provide I/O support
for MPI applications.
In this document we will discuss the details of the ROMIO implementation,
including the major components, how those components are implemented, and
where those components are located in the ROMIO source tree.
\section{The Directory Structure}
The ROMIO directory structure consists of two main branches, the MPI-IO branch
(mpi-io) and the ADIO branch (adio). The MPI-IO branch contains code that
implements the functions defined in the MPI-2 specification for I/O, such as
MPI\_File\_open. These functions are then written in terms of other functions
that provide an abstract interface to I/O resources, the ADIO functions.
There is an additional glue subdirectory in the MPI-IO branch that defines
functions related to the MPI implementation as a whole, such as how to
allocate MPI\_File structures and how to report errors.
Code for the ADIO functions is located under the ADIO branch. This code is
responsible for performing I/O operations on whatever underlying storage is
available. There are two categories of directories in this branch. The first
is the common directory. This directory contains two distinct types of
source: source that is used by all ADIO implementations and source that is
common across many ADIO implementations. This distinction will become more
apparent when we discuss file system implementations.
The second category of directory in the ADIO branch is the file system
specific directory (e.g. ad\_ufs, ad\_pvfs2). These directories provide code
that is specific to a particular file system type and is only built if that
file system type is selected at configure time.
\section{The Configure Process}
... What can be specified, AIO stuff, where romioconf exists, how to add
another Makefile.in into the list.
\section{File System Implementations}
Each file system implementation exists in its own subdirectory under the adio
directory in the source tree. Each of these subdirectories must contain at
least two files, a Makefile.in (describing how to build the code in the
directory) and a C source file describing the mapping of ADIO operations to C
functions.
The common practice is to name this file based on the name of the ADIO
implementation. In the ad\_ufs implementation this file is called ad\_ufs.c,
and contains the following:
\begin{verbatim}
struct ADIOI_Fns_struct ADIO_UFS_operations = {
ADIOI_UFS_Open, /* Open */
ADIOI_GEN_ReadContig, /* ReadContig */
ADIOI_GEN_WriteContig, /* WriteContig */
ADIOI_GEN_ReadStridedColl, /* ReadStridedColl */
ADIOI_GEN_WriteStridedColl, /* WriteStridedColl */
ADIOI_GEN_SeekIndividual, /* SeekIndividual */
ADIOI_GEN_Fcntl, /* Fcntl */
ADIOI_GEN_SetInfo, /* SetInfo */
ADIOI_GEN_ReadStrided, /* ReadStrided */
ADIOI_GEN_WriteStrided, /* WriteStrided */
ADIOI_GEN_Close, /* Close */
ADIOI_GEN_IreadContig, /* IreadContig */
ADIOI_GEN_IwriteContig, /* IwriteContig */
ADIOI_GEN_IODone, /* ReadDone */
ADIOI_GEN_IODone, /* WriteDone */
ADIOI_GEN_IOComplete, /* ReadComplete */
ADIOI_GEN_IOComplete, /* WriteComplete */
ADIOI_GEN_IreadStrided, /* IreadStrided */
ADIOI_GEN_IwriteStrided, /* IwriteStrided */
ADIOI_GEN_Flush, /* Flush */
ADIOI_GEN_Resize, /* Resize */
ADIOI_GEN_Delete, /* Delete */
};
\end{verbatim}
The ADIOI\_Fns\_struct structure is defined in adio/include/adioi.h. This
structure holds pointers to appropriate functions for a given file system
type. "Generic" functions, defined in adio/common, are denoted by the
"ADIOI\_GEN" prefix, while file system specific functions use a file system
related prefix. In this example, the only file system specific function is
ADIOI\_UFS\_Open. All other operations use the generic versions.
Typically a third file, a header with file system specific defines and
includes, is also provided and named based on the name of the ADIO
implementation (e.g. ad\_ufs.h).
Because the UFS implementation provides its own open function, that code must be provided in the ad\_ufs subdirectory. That function is implemented in adio/ad\_ufs/ad\_ufs\_open.c.
\section{Generic Functions}
As we saw in the discussion above, generic ADIO function implementations are
used to minimize the amount of code in the ROMIO tree by sharing common
functionality between ADIO implementations. As the ROMIO implementation has
grown, a few categories of generic implementations have developed. At this
time, these are all lumped into the adio/common subdirectory together, which
can be confusing.
The easiest category of generic functions to understand is the ones that
implement functionality in terms of some other ADIO function.
ADIOI\_GEN\_ReadStridedColl is a good example of this type of function and is
implemented in adio/common/ad\_read\_coll.c. This function implements
collective read operations (e.g. MPI\_File\_read\_at\_all). We will discuss how
it works later in this document, but for the time being it is sufficient to
note that it is written in terms of ADIO ReadStrided or ReadContig calls.
A second category of generic functions are ones that implement functionality
in terms of POSIX I/O calls. ADIOI\_GEN\_ReadContig (adio/common/ad\_read.c) is
a good example of this type of function. These "generic" functions are the
result of a large number of ADIO implementations that are largely POSIX I/O
based, such as the UFS, XFS, and PANFS implementations. We have discussed
moving these functions into a separate common/posix subdirectory and renaming
them with ADIOI\_POSIX prefixes, but this has not been done as of the writing
of this document.
The next category of generic functions holds functions that do not actually
require I/O at all. ADIOI\_GEN\_SeekIndividual (adio/common/ad\_seek.c) is a
good example of this. Since we don't need to actually perform I/O at seek
time, we can just update local variables at each process. In fact, one could
argue that we no longer need the ADIO SeekIndividual function at all - all the
ADIO implementations simply use this generic version (with the exception of
TESTFS, which prints the value as well).
The next category of generic functions are the "FAKE" functions (e.g.
ADIOI\_FAKE\_IODone implemented in adio/common/ad\_done\_fake.c). These functions
are all related to asynchronous I/O (AIO) operations. These implement the AIO
operations in terms of blocking operations - in other words, they follow the
standard but do not allow for overlap of I/O and computation or communication.
These are used in cases where AIO support is otherwise unavailable or
unimplemented.
The final category of generic functions are the "naБяve" functions (e.g.
ADIOI\_GEN\_WriteStrided\_naive in adio/common/ad\_write\_str\_naive.c). These
functions avoid the use of certain optimizations, such as data sieving.
Other Things in adio/common
... what else is in there?
\subsection{Calling ADIO Functions}
Throughout the code you will see calls to functions such as ADIO\_ReadContig.
There is no such function - this is actually a macro defined in
adio/include/adioi.h that calls the particular function out of the correct
ADIOI\_Fns\_struct for the file being accessed. This is done for convenience.
Exceptions!!! ADIO\_Open, ADIO\_Close...
\section{ROMIO Implementation Details}
The ROMIO Implementation relies on some basic concepts in order to operate and
to optimize I/O access. In this section we will discuss these concepts and
how they are implemented within ROMIO. Before we do that though, we will
discuss the core data structure of ROMIO, the ADIO\_File structure.
\subsection{ADIO\_File}
... discussion ...
\subsection{I/O Aggregation and Aggregators}
When performing collective I/O operations, it is often to our advantage to
combine operations or eliminate redundant operations altogether. We call this
combining process "aggregation", and processes that perform these combined
operations aggregators.
Aggregators are defined at the time the file is opened. A collection of MPI
hints can be used to tune what processes become aggregators for a given file
(see ROMIO User's Guide). The aggregators will then interact with the file
system during collective operations.
Note that it is possible to implement a system where ALL I/O operations pass
exclusively through aggregators, including independent I/O operations from
non-aggregators. However, this would require a guarantee of progress from the
aggregators that for portability would mean adding a thread to manage I/O. We
have chosen not to pursue this path at this time, so independent operations
continue to be serviced by the process making the call.
... how implemented ...
Rank 0 in the communicator opening a file \emph{always} processes the
cb\_config\_list hint using ADIOI\_cb\_config\_list\_parse. A previous call to
ADIOI\_cb\_gather\_name\_array had collected the processor names from all hosts
into an array that is cached on the communicator (so we don't have to gather
it more than once). This creates an ordered array of ranks (relative to the
communicator used to open the file) that will be aggregators. This array is
distributed to all processes using ADIOI\_cb\_bcast\_rank\_map. Aggregators are
referenced by their rank in the communicator used to open the file. These
ranks are stored in fd->hints->ranklist[].
Note that this could be a big list for very large runs. If we were to
restrict aggregators to a rank order subset, we could use a bitfield instead.
If the user specified hints and met conditions for deferred open, then a
separate communicator is also set up (fd->agg\_comm) that contains all the
aggregators, in order of their original ranks (not their order in the rank
list). Otherwise this communicator is set to MPI\_COMM\_NULL, and in any case
it is set to this for non-aggregators. This communicator is currently only
used at ADIO\_Close (adio/common/ad\_close.c), but could be useful in two-phase
I/O as well (discussed later).
\subsection{Deferred Open}
We do not always want all processes to attempt to actually open a file when
MPI\_File\_open is called. We might want to avoid this open because in fact
some processes (non-aggregators) cannot access the file at all and would get
an error, or we might want to avoid this open to avoid a storm of system calls
hitting the file system all at once. In either case, ROMIO implements a
"deferred open" mode that allows some processes to avoid opening the file
until such time as they perform an independent I/O operation on the file (see
ROMIO User's Guide).
Deferred open has a broad impact on the ROMIO implementation, because with its
addition there are now many places where we must first check to see if we have
called the file system specific ADIO Open call before performing I/O. This
impact is limited to the MPI-IO layer by semantically guaranteeing the FS ADIO
Open call has been made by the process prior to calling a read or write
function.
... how implemented ...
\subsection{Two-Phase I/O}
Two-Phase I/O is a technique for increasing the efficiency of I/O operations
by reordering data between processes, either before writes, or after reads.
ROMIO implements two-phase I/O as part of the generic implementations of
ADIO\_WriteStridedColl and ADIO\_ReadStridedColl. These implementations in turn
rely heavily on the aggregation code to determine what processes will actually
perform I/O on behalf of the application as a whole.
\subsection{Data Sieving}
Data sieving is a single-process technique for reducing the number of I/O
operations used to service a MPI read or write operation by accessing a
contiguous region of the file that contains more than one desired region at
once. Because often I/O operations require data movement across the network,
this is usually a more efficient way to access data.
Data sieving is implemented in the common strided I/O routines
(adio/common/ad\_write\_str.c and adio/common/ad\_read\_str.c). These functions
use the contig read and write routines to perform actual I/O. In the case of
a write operation, a read/modify/write sequence is used. In that case, as
well as in the atomic mode case, locking is required on the region. Some of
the ADIO implementations do not currently support locking, and in those cases
it would be erroneous to use the generic strided I/O routines.
\subsection{Shared File Pointers}
Because no file systems supported by ROMIO currently support a shared file
pointer mode, ROMIO must implement shared file pointers under the covers on
its own.
Currently ROMIO implements shared file pointers by storing the file pointer
value in a separate file...
Note that the ROMIO team has devised a portable method for implementing shared
file pointers using only MPI-1 and MPI-2 functions. However, this method has
not yet been implemented in ROMIO.
file name is selected at end of mpi-io/open.c.
\subsection{Error Handling}
\subsection{MPI and MPIO Requests}
\section*{Appendix A: ADIO Functions and Semantics}
ADIOI\_Open(ADIO\_File fd, int *error\_code)
Open is used in a strange way in ROMIO, as described previously.
The Open function is used to perform whatever operations are necessary prior
to actually accessing a file using read or write. The file name for the file
is stored in fd->filename prior to Open being called.
Note that when deferred open is in effect, all processes may not immediately
call Open at MPI\_File\_open time, but instead call open if they perform
independent I/O. This can result in somewhat unusual error returns to
processes (e.g. learning that a file is not accessible at write time).
ADIOI\_ReadContig(ADIO\_File fd, void *buf, int count, MPI\_Datatype datatype,
int file\_ptr\_type, ADIO\_Offset offset, ADIO\_Status *status, int *error\_code)
ReadContig is used to read a contiguous region from a file into a contiguous
buffer. The datatype (which refers to the buffer) can be assumed to be
contiguous. The offset is in bytes and is an absolute offset if
ADIO\_EXPLICIT\_OFFSET was passed as the file\_ptr\_type or relative to the
current individual file pointer if ADIO\_INDIVIDUAL was passed as
file\_ptr\_type. Open has been called by this process prior to the call to
ReadContig. There is no guarantee that any other processes will call this
function at the same time.
ADIOI\_WriteContig(ADIO\_File fd, void *buf, int count, MPI\_Datatype datatype,
int file\_ptr\_type, ADIO\_Offset offset, ADIO\_Status *status, int *error\_code)
WriteContig is used to write a contiguous region to a file from a contiguous
buffer. The datatype (which refers to the buffer) can be assumed to be
contiguous. The offset is in bytes and is an absolute offset if
ADIO\_EXPLICIT\_OFFSET was passed as the file\_ptr\_type or relative to the
current individual file pointer if ADIO\_INDIVIDUAL was passed as
file\_ptr\_type. Open has been called by this process prior to the call to
WriteContig. There is no guarantee that any other processes will call this
function at the same time.
ADIOI\_ReadStridedColl
ADIOI\_WriteStridedColl
ADIOI\_SeekIndividual
ADIOI\_Fcntl
ADIOI\_SetInfo
ADIOI\_ReadStrided
ADIOI\_WriteStrided
ADIOI\_Close(ADIO\_File fd, int *error\_code)
Close is responsible for releasing any resources associated with an open file.
It is called on all processes that called the corresponding ADIOI Open, which
might not be all the processes that opened the file (due to deferred open).
Thus it is not safe to perform collective communication among all processes in
the communicator during Close, although collective communication between
aggregators would be safe (if desired).
For performance reasons ROMIO does not guarantee that all file data is written
to "storage" at MPI\_File\_close, instead only performing synchronization
operations at MPI\_File\_sync time. As a result, our Close implementations do
not typically call a sync. However, any locally cached data, if any, should
be passed on to the underlying storage system at this time.
Note that ADIOI\_GEN\_Close is implemented in adio/common/adi\_close.c;
ad\_close.c implements ADIO\_Close, which is called by all processes that opened
the file.
ADIOI\_IreadContig
ADIOI\_IwriteContig
ADIOI\_ReadDone
ADIOI\_WriteDone
ADIOI\_ReadComplete
ADIOI\_WriteComplete
ADIOI\_IreadStrided
ADIOI\_IwriteStrided
ADIOI\_Flush
ADIOI\_Resize(ADIO\_File fd, ADIO\_Offset size, int *error\_code)
Resize is called collectively by all processes that opened the file referenced
by fd. It is not required that the Resize implementation block until all
processes have completed resize operations, but each process should be able to
see the correct size with a corresponding MPI\_File\_get\_size operation (an
independent operation that results in an ADIO Fcntl to obtain the file size).
ADIOI\_Delete(char *filename, int *error\_code)
Delete is called independently, and because only a filename is passed, there
is no opportunity to coordinate deletion if an application were to choose to
have all processes call MPI\_File\_delete. That's not likely to be an issue
though.
\section*{Appendix B: Status of ADIO Implementations}
... who wrote what, status, etc.
Appendix C: Adding a New ADIO Implementation
References
\end{document}

6
ompi/mca/io/romio/romio/include/.cvsignore Обычный файл

@ -0,0 +1,6 @@
mpio.h mpiof.h
.deps
.libs
.libstamp*
*.lo
.*-cache

@ -1,3 +1,4 @@
#! /bin/sh
LIBS="$LIBS @ROMIO_LIBLIST@"
MPI_OFFSET_TYPE="@MPI_OFFSET_TYPE@"
FORTRAN_MPI_OFFSET="@FORTRAN_MPI_OFFSET@"

@ -0,0 +1,17 @@
.TH MPIO_Request_c2f 3 "11/3/1998" " " "MPI-2"
.SH NAME
MPIO_Request_c2f \- Translates a C I/O-request handle to a Fortran I/O-request handle
.SH SYNOPSIS
.nf
#include "mpi.h"
MPI_Fint MPIO_Request_c2f(MPIO_Request request)
.fi
.SH INPUT PARAMETERS
.PD 0
.TP
.B request
- C I/O-request handle (handle)
.PD 1
.SH RETURN VALUE
Fortran I/O-request handle (integer)

@ -0,0 +1,17 @@
.TH MPIO_Request_f2c 3 "11/3/1998" " " "MPI-2"
.SH NAME
MPIO_Request_f2c \- Translates a Fortran I/O-request handle to a C I/O-request handle
.SH SYNOPSIS
.nf
#include "mpi.h"
MPIO_Request MPIO_Request_f2c(MPI_Fint request)
.fi
.SH INPUT PARAMETERS
.PD 0
.TP
.B request
- Fortran I/O-request handle (integer)
.PD 1
.SH RETURN VALUE
C I/O-request handle (handle)

@ -0,0 +1,36 @@
.TH MPIO_Test 3 "2/5/1998" " " "MPI-2"
.SH NAME
MPIO_Test \- Test the completion of a nonblocking read or write
.SH SYNOPSIS
.nf
#include "mpi.h"
int MPIO_Test(MPIO_Request *request, int *flag, MPI_Status *status)
.fi
.SH INPUT PARAMETERS
.PD 0
.TP
.B request
- request object (handle)
.PD 1
.SH OUTPUT PARAMETERS
.PD 0
.TP
.B flag
- true if operation completed (logical)
.PD 1
.PD 0
.TP
.B status
- status object (Status)
.PD 1
.SH NOTES FOR FORTRAN
All MPI routines in Fortran (except for 'MPI_WTIME' and 'MPI_WTICK')
have an additional argument 'ierr' at the end of the argument list.
'ierr' is an integer and has the same meaning as the return value of
the routine in C. In Fortran, MPI routines are subroutines and are
invoked with the 'call' statement.
All MPI objects (e.g., 'MPI_Datatype', 'MPI_Comm', 'MPI_File') are of
type 'INTEGER' in Fortran.

@ -0,0 +1,31 @@
.TH MPIO_Wait 3 "2/5/1998" " " "MPI-2"
.SH NAME
MPIO_Wait \- Waits for the completion of a nonblocking read or write
.SH SYNOPSIS
.nf
#include "mpi.h"
int MPIO_Wait(MPIO_Request *request, MPI_Status *status)
.fi
.SH INPUT PARAMETERS
.PD 0
.TP
.B request
- request object (handle)
.PD 1
.SH OUTPUT PARAMETERS
.PD 0
.TP
.B status
- status object (Status)
.PD 1
.SH NOTES FOR FORTRAN
All MPI routines in Fortran (except for 'MPI_WTIME' and 'MPI_WTICK')
have an additional argument 'ierr' at the end of the argument list.
'ierr' is an integer and has the same meaning as the return value of
the routine in C. In Fortran, MPI routines are subroutines and are
invoked with the 'call' statement.
All MPI objects (e.g., 'MPI_Datatype', 'MPI_Comm', 'MPI_File') are of
type 'INTEGER' in Fortran.

@ -0,0 +1,17 @@
.TH MPI_File_c2f 3 "11/3/1998" " " "MPI-2"
.SH NAME
MPI_File_c2f \- Translates a C file handle to a Fortran file handle
.SH SYNOPSIS
.nf
#include "mpi.h"
MPI_Fint MPI_File_c2f(MPI_File fh)
.fi
.SH INPUT PARAMETERS
.PD 0
.TP
.B fh
- C file handle (handle)
.PD 1
.SH RETURN VALUE
Fortran file handle (integer)

@ -0,0 +1,24 @@
.TH MPI_File_close 3 "8/31/1998" " " "MPI-2"
.SH NAME
MPI_File_close \- Closes a file
.SH SYNOPSIS
.nf
#include "mpi.h"
int MPI_File_close(MPI_File *fh)
.fi
.SH INPUT PARAMETERS
.PD 0
.TP
.B fh
- file handle (handle)
.PD 1
.SH NOTES FOR FORTRAN
All MPI routines in Fortran (except for 'MPI_WTIME' and 'MPI_WTICK')
have an additional argument 'ierr' at the end of the argument list.
'ierr' is an integer and has the same meaning as the return value of
the routine in C. In Fortran, MPI routines are subroutines and are
invoked with the 'call' statement.
All MPI objects (e.g., 'MPI_Datatype', 'MPI_Comm', 'MPI_File') are of
type 'INTEGER' in Fortran.

@ -0,0 +1,29 @@
.TH MPI_File_delete 3 "2/12/1998" " " "MPI-2"
.SH NAME
MPI_File_delete \- Deletes a file
.SH SYNOPSIS
.nf
#include "mpi.h"
int MPI_File_delete(char *filename, MPI_Info info)
.fi
.SH INPUT PARAMETERS
.PD 0
.TP
.B filename
- name of file to delete (string)
.PD 1
.PD 0
.TP
.B info
- info object (handle)
.PD 1
.SH NOTES FOR FORTRAN
All MPI routines in Fortran (except for 'MPI_WTIME' and 'MPI_WTICK')
have an additional argument 'ierr' at the end of the argument list.
'ierr' is an integer and has the same meaning as the return value of
the routine in C. In Fortran, MPI routines are subroutines and are
invoked with the 'call' statement.
All MPI objects (e.g., 'MPI_Datatype', 'MPI_Comm', 'MPI_File') are of
type 'INTEGER' in Fortran.

@ -0,0 +1,17 @@
.TH MPI_File_f2c 3 "11/3/1998" " " "MPI-2"
.SH NAME
MPI_File_f2c \- Translates a Fortran file handle to a C file handle
.SH SYNOPSIS
.nf
#include "mpi.h"
MPI_File MPI_File_f2c(MPI_Fint fh)
.fi
.SH INPUT PARAMETERS
.PD 0
.TP
.B fh
- Fortran file handle (integer)
.PD 1
.SH RETURN VALUE
C file handle (handle)

@ -0,0 +1,31 @@
.TH MPI_File_get_amode 3 "2/5/1998" " " "MPI-2"
.SH NAME
MPI_File_get_amode \- Returns the file access mode
.SH SYNOPSIS
.nf
#include "mpi.h"
int MPI_File_get_amode(MPI_File fh, int *amode)
.fi
.SH INPUT PARAMETERS
.PD 0
.TP
.B fh
- file handle (handle)
.PD 1
.SH OUTPUT PARAMETERS
.PD 0
.TP
.B amode
- access mode (integer)
.PD 1
.SH NOTES FOR FORTRAN
All MPI routines in Fortran (except for 'MPI_WTIME' and 'MPI_WTICK')
have an additional argument 'ierr' at the end of the argument list.
'ierr' is an integer and has the same meaning as the return value of
the routine in C. In Fortran, MPI routines are subroutines and are
invoked with the 'call' statement.
All MPI objects (e.g., 'MPI_Datatype', 'MPI_Comm', 'MPI_File') are of
type 'INTEGER' in Fortran.

@ -0,0 +1,31 @@
.TH MPI_File_get_atomicity 3 "2/5/1998" " " "MPI-2"
.SH NAME
MPI_File_get_atomicity \- Returns the atomicity mode
.SH SYNOPSIS
.nf
#include "mpi.h"
int MPI_File_get_atomicity(MPI_File fh, int *flag)
.fi
.SH INPUT PARAMETERS
.PD 0
.TP
.B fh
- file handle (handle)
.PD 1
.SH OUTPUT PARAMETERS
.PD 0
.TP
.B flag
- true if atomic mode, false if nonatomic mode (logical)
.PD 1
.SH NOTES FOR FORTRAN
All MPI routines in Fortran (except for 'MPI_WTIME' and 'MPI_WTICK')
have an additional argument 'ierr' at the end of the argument list.
'ierr' is an integer and has the same meaning as the return value of
the routine in C. In Fortran, MPI routines are subroutines and are
invoked with the 'call' statement.
All MPI objects (e.g., 'MPI_Datatype', 'MPI_Comm', 'MPI_File') are of
type 'INTEGER' in Fortran.

@ -0,0 +1,36 @@
.TH MPI_File_get_byte_offset 3 "3/4/1999" " " "MPI-2"
.SH NAME
MPI_File_get_byte_offset \- Returns the absolute byte position in the file corresponding to "offset" etypes relative to the current view
.SH SYNOPSIS
.nf
#include "mpi.h"
int MPI_File_get_byte_offset(MPI_File fh, MPI_Offset offset, MPI_Offset *disp)
.fi
.SH INPUT PARAMETERS
.PD 0
.TP
.B fh
- file handle (handle)
.PD 1
.PD 0
.TP
.B offset
- offset (nonnegative integer)
.PD 1
.SH OUTPUT PARAMETERS
.PD 0
.TP
.B disp
- absolute byte position of offset (nonnegative integer)
.PD 1
.SH NOTES FOR FORTRAN
All MPI routines in Fortran (except for 'MPI_WTIME' and 'MPI_WTICK')
have an additional argument 'ierr' at the end of the argument list.
'ierr' is an integer and has the same meaning as the return value of
the routine in C. In Fortran, MPI routines are subroutines and are
invoked with the 'call' statement.
All MPI objects (e.g., 'MPI_Datatype', 'MPI_Comm', 'MPI_File') are of
type 'INTEGER' in Fortran.

@ -0,0 +1,31 @@
.TH MPI_File_get_group 3 "2/5/1998" " " "MPI-2"
.SH NAME
MPI_File_get_group \- Returns the group of processes that opened the file
.SH SYNOPSIS
.nf
#include "mpi.h"
int MPI_File_get_group(MPI_File fh, MPI_Group *group)
.fi
.SH INPUT PARAMETERS
.PD 0
.TP
.B fh
- file handle (handle)
.PD 1
.SH OUTPUT PARAMETERS
.PD 0
.TP
.B group
- group that opened the file (handle)
.PD 1
.SH NOTES FOR FORTRAN
All MPI routines in Fortran (except for 'MPI_WTIME' and 'MPI_WTICK')
have an additional argument 'ierr' at the end of the argument list.
'ierr' is an integer and has the same meaning as the return value of
the routine in C. In Fortran, MPI routines are subroutines and are
invoked with the 'call' statement.
All MPI objects (e.g., 'MPI_Datatype', 'MPI_Comm', 'MPI_File') are of
type 'INTEGER' in Fortran.

@ -0,0 +1,31 @@
.TH MPI_File_get_info 3 "2/5/1998" " " "MPI-2"
.SH NAME
MPI_File_get_info \- Returns the hints for a file that are actually being used by MPI
.SH SYNOPSIS
.nf
#include "mpi.h"
int MPI_File_get_info(MPI_File fh, MPI_Info *info_used)
.fi
.SH INPUT PARAMETERS
.PD 0
.TP
.B fh
- file handle (handle)
.PD 1
.SH OUTPUT PARAMETERS
.PD 0
.TP
.B info_used
- info object (handle)
.PD 1
.SH NOTES FOR FORTRAN
All MPI routines in Fortran (except for 'MPI_WTIME' and 'MPI_WTICK')
have an additional argument 'ierr' at the end of the argument list.
'ierr' is an integer and has the same meaning as the return value of
the routine in C. In Fortran, MPI routines are subroutines and are
invoked with the 'call' statement.
All MPI objects (e.g., 'MPI_Datatype', 'MPI_Comm', 'MPI_File') are of
type 'INTEGER' in Fortran.

@ -0,0 +1,31 @@
.TH MPI_File_get_position 3 "8/31/1998" " " "MPI-2"
.SH NAME
MPI_File_get_position \- Returns the current position of the individual file pointer in etype units relative to the current view
.SH SYNOPSIS
.nf
#include "mpi.h"
int MPI_File_get_position(MPI_File fh, MPI_Offset *offset)
.fi
.SH INPUT PARAMETERS
.PD 0
.TP
.B fh
- file handle (handle)
.PD 1
.SH OUTPUT PARAMETERS
.PD 0
.TP
.B offset
- offset of individual file pointer (nonnegative integer)
.PD 1
.SH NOTES FOR FORTRAN
All MPI routines in Fortran (except for 'MPI_WTIME' and 'MPI_WTICK')
have an additional argument 'ierr' at the end of the argument list.
'ierr' is an integer and has the same meaning as the return value of
the routine in C. In Fortran, MPI routines are subroutines and are
invoked with the 'call' statement.
All MPI objects (e.g., 'MPI_Datatype', 'MPI_Comm', 'MPI_File') are of
type 'INTEGER' in Fortran.

@ -0,0 +1,31 @@
.TH MPI_File_get_position_shared 3 "8/31/1998" " " "MPI-2"
.SH NAME
MPI_File_get_position_shared \- Returns the current position of the shared file pointer in etype units relative to the current view
.SH SYNOPSIS
.nf
#include "mpi.h"
int MPI_File_get_position_shared(MPI_File fh, MPI_Offset *offset)
.fi
.SH INPUT PARAMETERS
.PD 0
.TP
.B fh
- file handle (handle)
.PD 1
.SH OUTPUT PARAMETERS
.PD 0
.TP
.B offset
- offset of shared file pointer (nonnegative integer)
.PD 1
.SH NOTES FOR FORTRAN
All MPI routines in Fortran (except for 'MPI_WTIME' and 'MPI_WTICK')
have an additional argument 'ierr' at the end of the argument list.
'ierr' is an integer and has the same meaning as the return value of
the routine in C. In Fortran, MPI routines are subroutines and are
invoked with the 'call' statement.
All MPI objects (e.g., 'MPI_Datatype', 'MPI_Comm', 'MPI_File') are of
type 'INTEGER' in Fortran.

@ -0,0 +1,31 @@
.TH MPI_File_get_size 3 "2/5/1998" " " "MPI-2"
.SH NAME
MPI_File_get_size \- Returns the file size
.SH SYNOPSIS
.nf
#include "mpi.h"
int MPI_File_get_size(MPI_File fh, MPI_Offset *size)
.fi
.SH INPUT PARAMETERS
.PD 0
.TP
.B fh
- file handle (handle)
.PD 1
.SH OUTPUT PARAMETERS
.PD 0
.TP
.B size
- size of the file in bytes (nonnegative integer)
.PD 1
.SH NOTES FOR FORTRAN
All MPI routines in Fortran (except for 'MPI_WTIME' and 'MPI_WTICK')
have an additional argument 'ierr' at the end of the argument list.
'ierr' is an integer and has the same meaning as the return value of
the routine in C. In Fortran, MPI routines are subroutines and are
invoked with the 'call' statement.
All MPI objects (e.g., 'MPI_Datatype', 'MPI_Comm', 'MPI_File') are of
type 'INTEGER' in Fortran.

@ -0,0 +1,37 @@
.TH MPI_File_get_type_extent 3 "2/5/1998" " " "MPI-2"
.SH NAME
MPI_File_get_type_extent \- Returns the extent of datatype in the file
.SH SYNOPSIS
.nf
#include "mpi.h"
int MPI_File_get_type_extent(MPI_File fh, MPI_Datatype datatype,
MPI_Aint *extent)
.fi
.SH INPUT PARAMETERS
.PD 0
.TP
.B fh
- file handle (handle)
.PD 1
.PD 0
.TP
.B datatype
- datatype (handle)
.PD 1
.SH OUTPUT PARAMETERS
.PD 0
.TP
.B extent
- extent of the datatype (nonnegative integer)
.PD 1
.SH NOTES FOR FORTRAN
All MPI routines in Fortran (except for 'MPI_WTIME' and 'MPI_WTICK')
have an additional argument 'ierr' at the end of the argument list.
'ierr' is an integer and has the same meaning as the return value of
the routine in C. In Fortran, MPI routines are subroutines and are
invoked with the 'call' statement.
All MPI objects (e.g., 'MPI_Datatype', 'MPI_Comm', 'MPI_File') are of
type 'INTEGER' in Fortran.

@ -0,0 +1,47 @@
.TH MPI_File_get_view 3 "4/6/1998" " " "MPI-2"
.SH NAME
MPI_File_get_view \- Returns the file view
.SH SYNOPSIS
.nf
#include "mpi.h"
int MPI_File_get_view(MPI_File fh, MPI_Offset *disp, MPI_Datatype *etype,
MPI_Datatype *filetype, char *datarep)
.fi
.SH INPUT PARAMETERS
.PD 0
.TP
.B fh
- file handle (handle)
.PD 1
.SH OUTPUT PARAMETERS
.PD 0
.TP
.B disp
- displacement (nonnegative integer)
.PD 1
.PD 0
.TP
.B etype
- elementary datatype (handle)
.PD 1
.PD 0
.TP
.B filetype
- filetype (handle)
.PD 1
.PD 0
.TP
.B datarep
- data representation (string)
.PD 1
.SH NOTES FOR FORTRAN
All MPI routines in Fortran (except for 'MPI_WTIME' and 'MPI_WTICK')
have an additional argument 'ierr' at the end of the argument list.
'ierr' is an integer and has the same meaning as the return value of
the routine in C. In Fortran, MPI routines are subroutines and are
invoked with the 'call' statement.
All MPI objects (e.g., 'MPI_Datatype', 'MPI_Comm', 'MPI_File') are of
type 'INTEGER' in Fortran.

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше