1
1

* Merge in all changes from the mpi-devel branch

This commit was SVN r5040.
Этот коммит содержится в:
Brian Barrett 2005-03-26 18:49:16 +00:00
родитель 65017ac13c
Коммит 372434f9dc
286 изменённых файлов: 2394 добавлений и 1519 удалений

295
MPI_DEVEL_MERGE_README Обычный файл
Просмотреть файл

@ -0,0 +1,295 @@
svn merge -r4825:5039 svn+ssh://svn.open-mpi.org/l/svn/ompi/tmp/mpi-devel .
U include/mpi.h
C src/event/event.c
Skipped missing target: 'src/runtime/ompi_rte_wait.c'
U src/mpi/c/status_c2f.c
U src/mpi/c/Makefile.am
U src/mpi/c/get_elements.c
U src/mpi/c/status_set_elements.c
U src/mpi/c/status_f2c.c
U src/mpi/c/type_create_darray.c
U src/mpi/c/get_count.c
D src/mpi/c/wtick.c
D src/mpi/c/wtime.c
U src/mpi/f77/keyval_free_f.c
U src/mpi/f77/type_free_keyval_f.c
U src/mpi/f77/rsend_init_f.c
U src/mpi/f77/gatherv_f.c
U src/mpi/f77/open_port_f.c
U src/mpi/f77/type_ub_f.c
U src/mpi/f77/file_get_view_f.c
U src/mpi/f77/sendrecv_f.c
U src/mpi/f77/file_set_atomicity_f.c
U src/mpi/f77/type_get_name_f.c
U src/mpi/f77/file_set_size_f.c
U src/mpi/f77/win_complete_f.c
U src/mpi/f77/win_get_attr_f.c
U src/mpi/f77/file_read_all_end_f.c
U src/mpi/f77/type_create_resized_f.c
U src/mpi/f77/scatterv_f.c
U src/mpi/f77/comm_test_inter_f.c
U src/mpi/f77/bindings.h
U src/mpi/f77/file_read_at_f.c
U src/mpi/f77/type_create_f90_integer_f.c
U src/mpi/f77/file_open_f.c
U src/mpi/f77/file_delete_f.c
U src/mpi/f77/file_iwrite_f.c
U src/mpi/f77/file_iread_shared_f.c
U src/mpi/f77/pack_external_size_f.c
U src/mpi/f77/comm_spawn_multiple_f.c
U src/mpi/f77/dims_create_f.c
U src/mpi/f77/unpack_external_f.c
U src/mpi/f77/file_iread_f.c
U src/mpi/f77/type_match_size_f.c
U src/mpi/f77/reduce_scatter_f.c
U src/mpi/f77/win_get_name_f.c
U src/mpi/f77/type_create_hvector_f.c
U src/mpi/f77/recv_f.c
U src/mpi/f77/win_lock_f.c
U src/mpi/f77/constants.h
U src/mpi/f77/send_init_f.c
U src/mpi/f77/grequest_start_f.c
U src/mpi/f77/info_delete_f.c
U src/mpi/f77/pack_external_f.c
U src/mpi/f77/scan_f.c
U src/mpi/f77/get_address_f.c
U src/mpi/f77/file_iwrite_at_f.c
U src/mpi/f77/testany_f.c
U src/mpi/f77/status_set_cancelled_f.c
U src/mpi/f77/info_free_f.c
U src/mpi/f77/win_fence_f.c
U src/mpi/f77/type_lb_f.c
U src/mpi/f77/win_post_f.c
U src/mpi/f77/isend_f.c
U src/mpi/f77/info_dup_f.c
U src/mpi/f77/wait_f.c
U src/mpi/f77/finalized_f.c
U src/mpi/f77/comm_split_f.c
U src/mpi/f77/type_create_struct_f.c
U src/mpi/f77/unpublish_name_f.c
U src/mpi/f77/win_delete_attr_f.c
U src/mpi/f77/type_hvector_f.c
U src/mpi/f77/file_seek_shared_f.c
U src/mpi/f77/ssend_f.c
U src/mpi/f77/unpack_f.c
U src/mpi/f77/type_set_attr_f.c
U src/mpi/f77/win_create_errhandler_f.c
U src/mpi/f77/group_compare_f.c
U src/mpi/f77/file_read_ordered_end_f.c
U src/mpi/f77/file_write_at_all_f.c
U src/mpi/f77/type_get_true_extent_f.c
U src/mpi/f77/is_thread_main_f.c
U src/mpi/f77/testall_f.c
U src/mpi/f77/pack_size_f.c
U src/mpi/f77/file_write_at_all_begin_f.c
U src/mpi/f77/file_sync_f.c
U src/mpi/f77/info_get_nthkey_f.c
U src/mpi/f77/comm_set_errhandler_f.c
U src/mpi/f77/type_get_envelope_f.c
U src/mpi/f77/file_write_at_f.c
U src/mpi/f77/file_read_ordered_begin_f.c
U src/mpi/f77/graph_create_f.c
U src/mpi/f77/file_set_view_f.c
U src/mpi/f77/errhandler_get_f.c
U src/mpi/f77/file_read_at_all_f.c
U src/mpi/f77/type_hindexed_f.c
U src/mpi/f77/type_dup_f.c
U src/mpi/f77/request_get_status_f.c
U src/mpi/f77/info_get_f.c
U src/mpi/f77/type_set_name_f.c
U src/mpi/f77/pcontrol_f.c
U src/mpi/f77/op_create_f.c
U src/mpi/f77/win_set_attr_f.c
U src/mpi/f77/group_difference_f.c
U src/mpi/f77/put_f.c
U src/mpi/f77/lookup_name_f.c
U src/mpi/f77/win_free_keyval_f.c
U src/mpi/f77/keyval_create_f.c
U src/mpi/f77/win_create_f.c
U src/mpi/f77/accumulate_f.c
U src/mpi/f77/get_version_f.c
U src/mpi/f77/info_get_valuelen_f.c
U src/mpi/f77/file_write_all_begin_f.c
U src/mpi/f77/file_get_group_f.c
U src/mpi/f77/file_write_at_all_end_f.c
U src/mpi/f77/query_thread_f.c
U src/mpi/f77/prototypes_mpi.h
U src/mpi/f77/win_call_errhandler_f.c
U src/mpi/f77/attr_put_f.c
U src/mpi/f77/info_get_nkeys_f.c
U src/mpi/f77/file_iread_at_f.c
U src/mpi/f77/type_create_hindexed_f.c
U src/mpi/f77/exscan_f.c
U src/mpi/f77/group_size_f.c
U src/mpi/f77/file_preallocate_f.c
U src/mpi/f77/start_f.c
U src/mpi/f77/group_incl_f.c
U src/mpi/f77/publish_name_f.c
U src/mpi/f77/group_excl_f.c
U src/mpi/f77/file_read_f.c
U src/mpi/f77/graphdims_get_f.c
U src/mpi/f77/file_write_shared_f.c
U src/mpi/f77/comm_create_errhandler_f.c
U src/mpi/f77/type_struct_f.c
U src/mpi/f77/issend_f.c
U src/mpi/f77/add_error_string_f.c
U src/mpi/f77/file_get_position_shared_f.c
U src/mpi/f77/file_get_info_f.c
U src/mpi/f77/send_f.c
U src/mpi/f77/comm_get_name_f.c
U src/mpi/f77/group_free_f.c
U src/mpi/f77/file_read_shared_f.c
U src/mpi/f77/group_union_f.c
U src/mpi/f77/probe_f.c
U src/mpi/f77/type_get_extent_f.c
U src/mpi/f77/graph_neighbors_count_f.c
U src/mpi/f77/info_create_f.c
U src/mpi/f77/type_create_indexed_block_f.c
U src/mpi/f77/test_f.c
U src/mpi/f77/type_indexed_f.c
U src/mpi/f77/comm_size_f.c
U src/mpi/f77/waitsome_f.c
U src/mpi/f77/comm_create_keyval_f.c
U src/mpi/f77/initialized_f.c
U src/mpi/f77/file_read_all_begin_f.c
U src/mpi/f77/errhandler_create_f.c
U src/mpi/f77/type_create_f90_complex_f.c
U src/mpi/f77/gather_f.c
U src/mpi/f77/type_size_f.c
U src/mpi/f77/op_free_f.c
U src/mpi/f77/intercomm_create_f.c
U src/mpi/f77/group_intersection_f.c
U src/mpi/f77/file_iwrite_shared_f.c
U src/mpi/f77/type_commit_f.c
U src/mpi/f77/testsome_f.c
U src/mpi/f77/file_read_at_all_end_f.c
U src/mpi/f77/win_set_errhandler_f.c
U src/mpi/f77/free_mem_f.c
U src/mpi/f77/type_free_f.c
U src/mpi/f77/recv_init_f.c
U src/mpi/f77/grequest_complete_f.c
U src/mpi/f77/allgatherv_f.c
U src/mpi/f77/request_free_f.c
U src/mpi/f77/file_read_at_all_begin_f.c
U src/mpi/f77/status_set_elements_f.c
U src/mpi/f77/bsend_f.c
U src/mpi/f77/comm_set_attr_f.c
U src/mpi/f77/type_contiguous_f.c
U src/mpi/f77/file_read_all_f.c
U src/mpi/f77/allgather_f.c
U src/mpi/f77/type_create_darray_f.c
U src/mpi/f77/group_rank_f.c
U src/mpi/f77/file_create_errhandler_f.c
U src/mpi/f77/rsend_f.c
U src/mpi/f77/get_elements_f.c
U src/mpi/f77/constants_f.c
U src/mpi/f77/type_create_subarray_f.c
U src/mpi/f77/intercomm_merge_f.c
U src/mpi/f77/error_class_f.c
U src/mpi/f77/type_create_f90_real_f.c
U src/mpi/f77/win_free_f.c
U src/mpi/f77/file_set_errhandler_f.c
U src/mpi/f77/graph_neighbors_f.c
U src/mpi/f77/file_set_info_f.c
U src/mpi/f77/comm_set_name_f.c
U src/mpi/f77/file_get_type_extent_f.c
U src/mpi/f77/reduce_f.c
U src/mpi/f77/file_get_size_f.c
U src/mpi/f77/waitany_f.c
U src/mpi/f77/get_processor_name_f.c
U src/mpi/f77/error_string_f.c
U src/mpi/f77/iprobe_f.c
U src/mpi/f77/startall_f.c
U src/mpi/f77/init_thread_f.c
U src/mpi/f77/win_create_keyval_f.c
U src/mpi/f77/file_write_ordered_f.c
U src/mpi/f77/ssend_init_f.c
U src/mpi/f77/errhandler_set_f.c
U src/mpi/f77/file_write_ordered_begin_f.c
U src/mpi/f77/sendrecv_replace_f.c
U src/mpi/f77/info_set_f.c
U src/mpi/f77/irecv_f.c
U src/mpi/f77/profile/prototypes_pmpi.h
U src/mpi/f77/group_range_incl_f.c
U src/mpi/f77/topo_test_f.c
U src/mpi/f77/file_write_all_f.c
U src/mpi/f77/group_range_excl_f.c
U src/mpi/f77/register_datarep_f.c
U src/mpi/f77/errhandler_free_f.c
U src/mpi/f77/file_write_f.c
U src/mpi/f77/type_delete_attr_f.c
U src/mpi/f77/type_get_contents_f.c
U src/mpi/f77/waitall_f.c
U src/mpi/f77/finalize_f.c
U src/mpi/f77/win_get_errhandler_f.c
U src/mpi/f77/file_get_byte_offset_f.c
U src/mpi/f77/type_extent_f.c
U src/mpi/f77/win_get_group_f.c
U src/mpi/f77/test_cancelled_f.c
U src/mpi/f77/file_read_ordered_f.c
U src/mpi/f77/get_f.c
U src/mpi/f77/file_write_ordered_end_f.c
U src/mpi/f77/file_seek_f.c
U src/mpi/f77/cart_shift_f.c
U src/mpi/f77/get_count_f.c
U src/mpi/f77/graph_map_f.c
U src/mpi/f77/graph_get_f.c
U src/mpi/f77/file_get_position_f.c
U src/mpi/f77/type_vector_f.c
U src/mpi/f77/init_f.c
U src/mpi/f77/comm_spawn_f.c
U src/mpi/f77/ibsend_f.c
U src/mpi/f77/scatter_f.c
U src/mpi/f77/file_write_all_end_f.c
U src/mpi/f77/group_translate_ranks_f.c
U src/mpi/f77/type_get_attr_f.c
U src/mpi/f77/bsend_init_f.c
U src/mpi/f77/pack_f.c
U src/mpi/f77/type_create_keyval_f.c
U src/mpi/f77/file_get_errhandler_f.c
U src/mpi/f77/irsend_f.c
C src/mpi/runtime/ompi_mpi_params.c
U src/request/grequest.c
U src/request/grequest.h
U src/request/req_wait.c
U src/request/request.h
Skipped missing target: 'src/mca/pcm/rsh/pcm_rsh_spawn.c'
Skipped missing target: 'src/mca/pcm/rsh'
Skipped missing target: 'src/mca/pcm'
U src/mca/pml/teg/src/pml_teg_isend.c
U src/mca/pml/teg/src/pml_teg_irecv.c
U src/mca/pml/teg/src/pml_teg_recvreq.h
U src/mca/pml/base/pml_base_sendreq.h
U src/mca/pml/base/pml_base_bsend.c
A src/mca/pml/example
A src/mca/pml/example/pml_example_recvreq.h
A src/mca/pml/example/pml_example.c
A src/mca/pml/example/pml_example_iprobe.c
A src/mca/pml/example/.ompi_ignore
A src/mca/pml/example/pml_example.h
A src/mca/pml/example/pml_example_cancel.c
A src/mca/pml/example/pml_example_start.c
A src/mca/pml/example/configure.params
A src/mca/pml/example/pml_example_ptl.c
A src/mca/pml/example/pml_example_recvfrag.c
A src/mca/pml/example/pml_example_sendreq.c
A src/mca/pml/example/pml_example_component.c
A src/mca/pml/example/pml_example_isend.c
A src/mca/pml/example/pml_example_proc.c
A src/mca/pml/example/pml_example_ptl.h
A src/mca/pml/example/pml_example_progress.c
A src/mca/pml/example/pml_example_recvfrag.h
A src/mca/pml/example/Makefile.am
A src/mca/pml/example/pml_example_sendreq.h
A src/mca/pml/example/pml_example_recvreq.c
A src/mca/pml/example/pml_example_proc.h
A src/mca/pml/example/pml_example_irecv.c
U src/mca/ptl/sm/src/ptl_sm_component.c
C src/mca/ptl/gm/src/ptl_gm.c
C src/mca/ptl/gm/src/ptl_gm_priv.c
C src/mca/ptl/gm/src/ptl_gm_component.c
U src/mca/ptl/gm/src/ptl_gm.h
U src/mca/ptl/tcp/src/ptl_tcp.c
C src/mca/iof/base/iof_base_flush.c

Просмотреть файл

@ -1084,8 +1084,31 @@ OMPI_DECLSPEC int MPI_Win_test(MPI_Win win, int *flag);
OMPI_DECLSPEC int MPI_Win_unlock(int rank, MPI_Win win);
OMPI_DECLSPEC int MPI_Win_wait(MPI_Win win);
#endif
OMPI_DECLSPEC double MPI_Wtick(void);
OMPI_DECLSPEC double MPI_Wtime(void);
/* These 2 functions will shortly became macros, giving access to the high performance
* timers available on the specific architecture. Until then we let them here.
* Beware: We dont have profiling interface for these 2 functions.
*/
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <stdio.h>
static inline double MPI_Wtick(void)
{
return (double)0.000001;
}
static inline double MPI_Wtime(void)
{
struct timeval tv;
double wtime;
gettimeofday(&tv, NULL);
wtime = tv.tv_sec;
wtime += (double)tv.tv_usec / 1000000.0;
return wtime;
}
/*
* Profiling MPI API

Просмотреть файл

@ -207,7 +207,7 @@ static int ompi_timeout_next(struct timeval *tv)
}
#endif
#if OMPI_ENABLE_PROGRESS_THREADS
/* run loop for dispatch thread */
static void* ompi_event_run(ompi_object_t* arg)
{
@ -230,7 +230,7 @@ static void* ompi_event_run(ompi_object_t* arg)
#endif
return NULL;
}
#endif /* OMPI_ENABLE_PROGRESS_THREADS */
#if OMPI_ENABLE_PROGRESS_THREADS
static void ompi_event_pipe_handler(int sd, short flags, void* user)

Просмотреть файл

@ -268,7 +268,7 @@ int mca_pml_base_bsend_request_start(ompi_request_t* request)
/*
* Request completed - free buffer and decrement pending count
*/
*/
int mca_pml_base_bsend_request_fini(ompi_request_t* request)
{
mca_pml_base_send_request_t* sendreq = (mca_pml_base_send_request_t*)request;

Просмотреть файл

@ -100,6 +100,7 @@ typedef struct mca_pml_base_send_request_t mca_pml_base_send_request_t;
request->req_base.req_persistent = persistent; \
request->req_base.req_pml_complete = (persistent ? true : false); \
request->req_base.req_free_called = false; \
request->req_base.req_ompi.req_status._cancelled = 0; \
\
/* initialize datatype convertor for this request */ \
if(count > 0) { \

0
src/mca/pml/example/.ompi_ignore Обычный файл
Просмотреть файл

62
src/mca/pml/example/Makefile.am Обычный файл
Просмотреть файл

@ -0,0 +1,62 @@
#
# Copyright (c) 2004-2005 The Trustees of Indiana University.
# All rights reserved.
# Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
# All rights reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
# Use the top-level Makefile.options
include $(top_ompi_srcdir)/config/Makefile.options
# Make the output library in this directory, and name it either
# mca_<type>_<name>.la (for DSO builds) or libmca_<type>_<name>.la
# (for static builds).
if OMPI_BUILD_pml_example_DSO
component_noinst =
component_install = mca_pml_example.la
else
component_noinst = libmca_pml_example.la
component_install =
endif
local_sources = \
pml_example.c \
pml_example.h \
pml_example_cancel.c \
pml_example_component.c \
pml_example_iprobe.c \
pml_example_irecv.c \
pml_example_isend.c \
pml_example_ptl.c \
pml_example_ptl.h \
pml_example_proc.c \
pml_example_proc.h \
pml_example_progress.c \
pml_example_recvfrag.c \
pml_example_recvfrag.h \
pml_example_recvreq.c \
pml_example_recvreq.h \
pml_example_sendreq.c \
pml_example_sendreq.h \
pml_example_start.c
mcacomponentdir = $(libdir)/openmpi
mcacomponent_LTLIBRARIES = $(component_install)
mca_pml_example_la_SOURCES = $(local_sources)
mca_pml_example_la_LIBADD =
mca_pml_example_la_LDFLAGS = -module -avoid-version
noinst_LTLIBRARIES = $(component_noinst)
libmca_pml_example_la_SOURCES = $(local_sources)
libmca_pml_example_la_LIBADD =
libmca_pml_example_la_LDFLAGS = -module -avoid-version

20
src/mca/pml/example/configure.params Обычный файл
Просмотреть файл

@ -0,0 +1,20 @@
# -*- shell-script -*-
#
# Copyright (c) 2004-2005 The Trustees of Indiana University.
# All rights reserved.
# Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
# All rights reserved.
# Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
# Specific to this module
PARAM_INIT_FILE=pml_teg.c
PARAM_CONFIG_HEADER_FILE="example_config.h"
PARAM_CONFIG_FILES="Makefile"

66
src/mca/pml/example/pml_example.c Обычный файл
Просмотреть файл

@ -0,0 +1,66 @@
/*
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "pml_example.h"
#include "pml_example_recvreq.h"
#include "pml_example_sendreq.h"
mca_pml_example_t mca_pml_example = {
{
mca_pml_example_add_procs,
mca_pml_example_del_procs,
mca_pml_example_add_ptls,
mca_pml_example_control,
mca_pml_example_progress,
mca_pml_example_add_comm,
mca_pml_example_del_comm,
mca_pml_example_irecv_init,
mca_pml_example_irecv,
mca_pml_example_recv,
mca_pml_example_isend_init,
mca_pml_example_isend,
mca_pml_example_send,
mca_pml_example_iprobe,
mca_pml_example_probe,
mca_pml_example_start
}
};
int mca_pml_example_add_comm(ompi_communicator_t* comm)
{
return OMPI_SUCCESS;
}
int mca_pml_example_del_comm(ompi_communicator_t* comm)
{
return OMPI_SUCCESS;
}
int mca_pml_example_add_ptls(ompi_list_t *ptls)
{
return OMPI_SUCCESS;
}
int mca_pml_example_control(int param, void* value, size_t size)
{
return OMPI_SUCCESS;
}
int mca_pml_example_add_procs(ompi_proc_t** procs, size_t nprocs)
{
return OMPI_SUCCESS;
}
int mca_pml_example_del_procs(ompi_proc_t** procs, size_t nprocs)
{
return OMPI_SUCCESS;
}

134
src/mca/pml/example/pml_example.h Обычный файл
Просмотреть файл

@ -0,0 +1,134 @@
/*
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef PML_EXAMPLE_H_HAS_BEEN_INCLUDED
#define PML_EXAMPLE_H_HAS_BEEN_INCLUDED
#include "threads/thread.h"
#include "threads/condition.h"
#include "class/ompi_free_list.h"
#include "util/cmd_line.h"
#include "request/request.h"
#include "mca/pml/pml.h"
#include "mca/ptl/ptl.h"
#if defined(c_plusplus) || defined(__cplusplus)
extern "C" {
#endif
struct mca_pml_example_t {
mca_pml_base_module_t super;
mca_ptl_base_component_t **example_ptl_components;
size_t example_num_ptl_components;
mca_ptl_base_module_t** example_ptl_modules;
size_t example_num_ptl_modules;
ompi_list_t example_procs;
ompi_mutex_t example_lock;
/* free list of requests */
ompi_free_list_t example_send_requests;
ompi_free_list_t example_recv_requests;
/* list of pending send requests */
ompi_list_t example_send_pending;
};
typedef struct mca_pml_example_t mca_pml_example_t;
extern mca_pml_example_t mca_pml_example;
/*
* PML interface functions.
*/
extern int mca_pml_example_add_comm( struct ompi_communicator_t* comm );
extern int mca_pml_example_del_comm( struct ompi_communicator_t* comm );
extern int mca_pml_example_add_procs( struct ompi_proc_t **procs, size_t nprocs );
extern int mca_pml_example_del_procs( struct ompi_proc_t **procs, size_t nprocs );
extern int mca_pml_example_add_ptls( ompi_list_t *ptls );
extern int mca_pml_example_control( int param, void *size, size_t value );
extern int mca_pml_example_iprobe( int dst,
int tag,
struct ompi_communicator_t* comm,
int *matched,
ompi_status_public_t* status );
extern int mca_pml_example_probe( int dst,
int tag,
struct ompi_communicator_t* comm,
ompi_status_public_t* status );
extern int mca_pml_example_cancel( ompi_request_t* request );
extern int mca_pml_example_cancelled( ompi_request_t* request, int *flag );
extern int mca_pml_example_isend_init( void *buf,
size_t count,
ompi_datatype_t *datatype,
int dst,
int tag,
mca_pml_base_send_mode_t mode,
struct ompi_communicator_t* comm,
struct ompi_request_t **request );
extern int mca_pml_example_isend( void *buf,
size_t count,
ompi_datatype_t *datatype,
int dst,
int tag,
mca_pml_base_send_mode_t mode,
struct ompi_communicator_t* comm,
struct ompi_request_t **request );
extern int mca_pml_example_send( void *buf,
size_t count,
ompi_datatype_t *datatype,
int dst,
int tag,
mca_pml_base_send_mode_t mode,
struct ompi_communicator_t* comm );
extern int mca_pml_example_irecv_init( void *buf,
size_t count,
ompi_datatype_t *datatype,
int src,
int tag,
struct ompi_communicator_t* comm,
struct ompi_request_t **request );
extern int mca_pml_example_irecv( void *buf,
size_t count,
ompi_datatype_t *datatype,
int src,
int tag,
struct ompi_communicator_t* comm,
struct ompi_request_t **request );
extern int mca_pml_example_recv( void *buf,
size_t count,
ompi_datatype_t *datatype,
int src,
int tag,
struct ompi_communicator_t* comm,
ompi_status_public_t* status );
extern int mca_pml_example_progress(void);
extern int mca_pml_example_start( size_t count, ompi_request_t** requests );
#if defined(c_plusplus) || defined(__cplusplus)
}
#endif
#endif /* PML_EXAMPLE_H_HAS_BEEN_INCLUDED */

26
src/mca/pml/example/pml_example_cancel.c Обычный файл
Просмотреть файл

@ -0,0 +1,26 @@
/*
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "pml_example.h"
int mca_pml_example_cancel(ompi_request_t* request)
{
return OMPI_SUCCESS;
}
int mca_pml_example_cancelled(ompi_request_t* request, int* flag)
{
if(NULL != flag)
*flag = 0;
return OMPI_SUCCESS;
}

87
src/mca/pml/example/pml_example_component.c Обычный файл
Просмотреть файл

@ -0,0 +1,87 @@
/*
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "event/event.h"
#include "pml_example.h"
#include "mca/base/mca_base_param.h"
static int mca_pml_example_component_open(void);
static int mca_pml_example_component_close(void);
static mca_pml_base_module_t* mca_pml_example_component_init( int* priority,
bool *allow_multi_user_threads, bool *have_hidden_threads );
static int mca_pml_example_component_fini(void);
mca_pml_base_component_1_0_0_t mca_pml_example_component = {
/* First, the mca_base_component_t struct containing meta
* information about the component itself */
{
/* Indicate that we are a pml v1.0.0 component (which also implies
* a specific MCA version) */
MCA_PML_BASE_VERSION_1_0_0,
"example", /* MCA component name */
1, /* MCA component major version */
0, /* MCA component minor version */
0, /* MCA component release version */
mca_pml_example_component_open, /* component open */
mca_pml_example_component_close /* component close */
},
/* Next the MCA v1.0.0 component meta data */
{
/* Whether the component is checkpointable or not */
false
},
mca_pml_example_component_init, /* component init */
mca_pml_example_component_fini /* component finalize */
};
static inline int mca_pml_example_param_register_int( const char* param_name,
int default_value )
{
int id = mca_base_param_register_int("pml","example",param_name,NULL,default_value);
int param_value = default_value;
mca_base_param_lookup_int(id,&param_value);
return param_value;
}
static int mca_pml_example_component_open(void)
{
return OMPI_SUCCESS;
}
static int mca_pml_example_component_close(void)
{
return OMPI_SUCCESS;
}
static mca_pml_base_module_t*
mca_pml_example_component_init( int* priority,
bool *allow_multi_user_threads,
bool *have_hidden_threads )
{
*priority = mca_pml_example_param_register_int( "priority", 0 );
*have_hidden_threads = false;
*allow_multi_user_threads &= true;
return &mca_pml_example.super;
}
static int mca_pml_example_component_fini(void)
{
return OMPI_SUCCESS;
}

27
src/mca/pml/example/pml_example_iprobe.c Обычный файл
Просмотреть файл

@ -0,0 +1,27 @@
/*
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "request/request.h"
#include "pml_example.h"
int mca_pml_example_iprobe( int src, int tag,
struct ompi_communicator_t *comm,
int *matched, ompi_status_public_t * status )
{
return OMPI_SUCCESS;
}
int mca_pml_example_probe( int src, int tag,
struct ompi_communicator_t *comm,
ompi_status_public_t * status )
{
return OMPI_SUCCESS;
}

48
src/mca/pml/example/pml_example_irecv.c Обычный файл
Просмотреть файл

@ -0,0 +1,48 @@
/*
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "pml_example.h"
#include "request/request.h"
int mca_pml_example_irecv_init( void *addr,
size_t count,
ompi_datatype_t * datatype,
int src,
int tag,
struct ompi_communicator_t *comm,
struct ompi_request_t **request )
{
return OMPI_SUCCESS;
}
int mca_pml_example_irecv( void *addr,
size_t count,
ompi_datatype_t * datatype,
int src,
int tag,
struct ompi_communicator_t *comm,
struct ompi_request_t **request )
{
return OMPI_SUCCESS;
}
int mca_pml_example_recv( void *addr,
size_t count,
ompi_datatype_t * datatype,
int src,
int tag,
struct ompi_communicator_t *comm,
ompi_status_public_t * status )
{
return OMPI_SUCCESS;
}

49
src/mca/pml/example/pml_example_isend.c Обычный файл
Просмотреть файл

@ -0,0 +1,49 @@
/*
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "pml_example.h"
int mca_pml_example_isend_init( void* buf,
size_t count,
ompi_datatype_t* datatype,
int dst,
int tag,
mca_pml_base_send_mode_t sendmode,
ompi_communicator_t* comm,
ompi_request_t** request )
{
return OMPI_SUCCESS;
}
int mca_pml_example_isend( void* buf,
size_t count,
ompi_datatype_t* datatype,
int dst,
int tag,
mca_pml_base_send_mode_t sendmode,
ompi_communicator_t* comm,
ompi_request_t** request )
{
return OMPI_SUCCESS;
}
int mca_pml_example_send( void *buf,
size_t count,
ompi_datatype_t* datatype,
int dst,
int tag,
mca_pml_base_send_mode_t sendmode,
ompi_communicator_t* comm )
{
return OMPI_SUCCESS;
}

13
src/mca/pml/example/pml_example_proc.c Обычный файл
Просмотреть файл

@ -0,0 +1,13 @@
/*
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "pml_example.h"

16
src/mca/pml/example/pml_example_proc.h Обычный файл
Просмотреть файл

@ -0,0 +1,16 @@
/*
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef PML_EXAMPLE_PROC_H_HAS_BEEN_INCLUDED
#define PML_EXAMPLE_PROC_H_HAS_BEEN_INCLUDED
#endif /* PML_EXAMPLE_PROC_H_HAS_BEEN_INCLUDED */

17
src/mca/pml/example/pml_example_progress.c Обычный файл
Просмотреть файл

@ -0,0 +1,17 @@
/*
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "pml_example.h"
int mca_pml_example_progress(void)
{
return 0;
}

13
src/mca/pml/example/pml_example_ptl.c Обычный файл
Просмотреть файл

@ -0,0 +1,13 @@
/*
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "pml_example.h"

16
src/mca/pml/example/pml_example_ptl.h Обычный файл
Просмотреть файл

@ -0,0 +1,16 @@
/*
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef PML_EXAMPLE_PTL_H_HAS_BEEN_INCLUDED
#define PML_EXAMPLE_PTL_H_HAS_BEEN_INCLUDED
#endif /* PML_EXAMPLE_PTL_H_HAS_BEEN_INCLUDED */

20
src/mca/pml/example/pml_example_recvfrag.c Обычный файл
Просмотреть файл

@ -0,0 +1,20 @@
/*
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "pml_example.h"
#include "pml_example_recvfrag.h"
bool mca_pml_example_recv_frag_match( mca_ptl_base_module_t* ptl,
mca_ptl_base_recv_frag_t* frag,
mca_ptl_base_match_header_t* header )
{
return false;
}

20
src/mca/pml/example/pml_example_recvfrag.h Обычный файл
Просмотреть файл

@ -0,0 +1,20 @@
/*
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef PML_EXAMPLE_RECVFRAG_H_HAS_BEEN_INCLUDED
#define PML_EXAMPLE_RECVFRAG_H_HAS_BEEN_INCLUDED
#include "mca/ptl/base/ptl_base_recvfrag.h"
bool mca_pml_example_recv_frag_match( mca_ptl_base_module_t* ptl,
mca_ptl_base_recv_frag_t* frag,
mca_ptl_base_match_header_t* header );
#endif /* PML_EXAMPLE_RECVFRAG_H_HAS_BEEN_INCLUDED */

25
src/mca/pml/example/pml_example_recvreq.c Обычный файл
Просмотреть файл

@ -0,0 +1,25 @@
/*
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "pml_example.h"
#include "pml_example_recvreq.h"
/*
* Update the recv request status to reflect the number of bytes
* received and actually delivered to the application.
*/
void mca_pml_example_recv_request_progress( struct mca_ptl_base_module_t* ptl,
mca_pml_base_recv_request_t* req,
size_t bytes_received,
size_t bytes_delivered )
{
}

21
src/mca/pml/example/pml_example_recvreq.h Обычный файл
Просмотреть файл

@ -0,0 +1,21 @@
/*
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef PML_EXAMPLE_RECVREQ_H_HAS_BEEN_INCLUDED
#define PML_EXAMPLE_RECVREQ_H_HAS_BEEN_INCLUDED
#include "mca/pml/base/pml_base_recvreq.h"
void mca_pml_example_recv_request_progress( struct mca_ptl_base_module_t* ptl,
mca_pml_base_recv_request_t* req,
size_t bytes_received,
size_t bytes_delivered );
#endif /* PML_EXAMPLE_RECVREQ_H_HAS_BEEN_INCLUDED */

20
src/mca/pml/example/pml_example_sendreq.c Обычный файл
Просмотреть файл

@ -0,0 +1,20 @@
/*
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "pml_example.h"
#include "pml_example_sendreq.h"
void mca_pml_example_send_request_progress( struct mca_ptl_base_module_t* ptl,
mca_pml_base_send_request_t* req,
size_t bytes_sent )
{
}

20
src/mca/pml/example/pml_example_sendreq.h Обычный файл
Просмотреть файл

@ -0,0 +1,20 @@
/*
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef PML_EXAMPLE_SENDREQ_H_HAS_BEEN_INCLUDED
#define PML_EXAMPLE_SENDREQ_H_HAS_BEEN_INCLUDED
#include "mca/pml/base/pml_base_sendreq.h"
void mca_pml_example_send_request_progress( struct mca_ptl_base_module_t* ptl,
mca_pml_base_send_request_t* req,
size_t bytes_sent );
#endif /* PML_EXAMPLE_SENDREQ_H_HAS_BEEN_INCLUDED */

17
src/mca/pml/example/pml_example_start.c Обычный файл
Просмотреть файл

@ -0,0 +1,17 @@
/*
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "pml_example.h"
int mca_pml_example_start(size_t count, ompi_request_t** requests)
{
return OMPI_SUCCESS;
}

Просмотреть файл

@ -88,8 +88,7 @@ int mca_pml_teg_recv(void *addr,
count, datatype, src, tag, comm, false);
if ((rc = mca_pml_teg_recv_request_start(recvreq)) != OMPI_SUCCESS) {
MCA_PML_TEG_RECV_REQUEST_RETURN(recvreq);
return rc;
goto recv_finish;
}
if (recvreq->req_base.req_ompi.req_complete == false) {
@ -108,12 +107,11 @@ int mca_pml_teg_recv(void *addr,
ompi_request_waiting--;
}
}
/* return status */
if (NULL != status) {
recv_finish:
if (NULL != status) { /* return status */
*status = recvreq->req_base.req_ompi.req_status;
}
MCA_PML_TEG_RECV_REQUEST_RETURN(recvreq);
return OMPI_SUCCESS;
return recvreq->req_base.req_ompi.req_status.MPI_ERROR;
}

Просмотреть файл

@ -72,10 +72,8 @@ int mca_pml_teg_isend(void *buf,
comm, sendmode, false);
MCA_PML_TEG_SEND_REQUEST_START(sendreq, rc);
if (rc != OMPI_SUCCESS)
return rc;
*request = (ompi_request_t *) sendreq;
return OMPI_SUCCESS;
return rc;
}

Просмотреть файл

@ -87,6 +87,7 @@ static inline int mca_pml_teg_recv_request_start(mca_pml_base_recv_request_t* re
request->req_base.req_ompi.req_complete = false;
request->req_base.req_ompi.req_state = OMPI_REQUEST_ACTIVE;
request->req_base.req_ompi.req_status.MPI_ERROR = OMPI_SUCCESS;
request->req_base.req_ompi.req_status._cancelled = 0;
/* attempt to match posted recv */
if(request->req_base.req_peer == OMPI_ANY_SOURCE) {

Просмотреть файл

@ -325,7 +325,7 @@ mca_ptl_gm_matched( mca_ptl_base_module_t * ptl,
mca_ptl_gm_module_t *gm_ptl;
mca_ptl_gm_recv_frag_t *recv_frag;
mca_ptl_gm_peer_t* peer;
struct iovec iov = { NULL, 0};
struct iovec iov = { NULL, 0 };
gm_ptl = (mca_ptl_gm_module_t *)ptl;
request = frag->frag_request;
@ -333,19 +333,18 @@ mca_ptl_gm_matched( mca_ptl_base_module_t * ptl,
peer = (mca_ptl_gm_peer_t*)recv_frag->frag_recv.frag_base.frag_peer;
if( frag->frag_base.frag_header.hdr_common.hdr_flags & MCA_PTL_FLAGS_ACK ) { /* need to send an ack back */
ompi_list_item_t *item;
ompi_list_item_t *item;
OMPI_FREE_LIST_TRY_GET( &(gm_ptl->gm_send_dma_frags), item );
OMPI_FREE_LIST_TRY_GET( &(gm_ptl->gm_send_dma_frags), item );
if( NULL == item ) {
ompi_output(0,"[%s:%d] unable to alloc a gm fragment\n", __FILE__,__LINE__);
OMPI_THREAD_LOCK (&mca_ptl_gm_component.gm_lock);
ompi_list_append (&mca_ptl_gm_module.gm_pending_acks, (ompi_list_item_t *)frag);
OMPI_THREAD_UNLOCK (&mca_ptl_gm_component.gm_lock);
} else {
ompi_atomic_sub( &(gm_ptl->num_send_tokens), 1 );
assert( gm_ptl->num_send_tokens >= 0 );
hdr = (mca_ptl_base_header_t*)item;
ompi_atomic_sub( &(gm_ptl->num_send_tokens), 1 );
assert( gm_ptl->num_send_tokens >= 0 );
hdr = (mca_ptl_base_header_t*)item;
hdr->hdr_ack.hdr_common.hdr_type = MCA_PTL_HDR_TYPE_ACK;
hdr->hdr_ack.hdr_common.hdr_flags = 0;
@ -370,12 +369,12 @@ mca_ptl_gm_matched( mca_ptl_base_module_t * ptl,
unsigned int max_data, out_size;
int freeAfter;
iov.iov_len = mca_ptl_gm_component.gm_segment_size - sizeof(mca_ptl_base_rendezvous_header_t);
if( frag->frag_base.frag_size < iov.iov_len ) {
iov.iov_len = frag->frag_base.frag_size;
}
/* Here we expect that frag_addr is the begin of the buffer header included */
iov.iov_base = frag->frag_base.frag_addr;
iov.iov_len = mca_ptl_gm_component.gm_segment_size - sizeof(mca_ptl_base_rendezvous_header_t);
if( frag->frag_base.frag_size < iov.iov_len ) {
iov.iov_len = frag->frag_base.frag_size;
}
/* Here we expect that frag_addr is the begin of the buffer header included */
iov.iov_base = frag->frag_base.frag_addr;
ompi_convertor_copy( peer->peer_proc->proc_ompi->proc_convertor,
&frag->frag_base.frag_convertor );

Просмотреть файл

@ -51,7 +51,7 @@ extern "C" {
uint32_t gm_free_list_num; /**< initial size of free lists */
uint32_t gm_free_list_max; /**< maximum size of free lists */
uint32_t gm_free_list_inc; /**< number of elements to alloc when growing free lists */
uint32_t gm_segment_size; /**< size of the allocated segment */
uint32_t gm_segment_size; /**< size of the allocated segment */
uint32_t gm_eager_limit; /**< number of bytes before the rendez-vous protocol. If the
**< size of the message is less than this number then GM
**< use a eager protocol. */
@ -61,6 +61,7 @@ extern "C" {
struct mca_ptl_gm_proc_t* gm_local;
ompi_list_t gm_procs;
ompi_list_t gm_send_req;
ompi_free_list_t gm_unexpected_frags_data;
ompi_mutex_t gm_lock; /**< lock for accessing module state */
};

Просмотреть файл

@ -147,9 +147,9 @@ int mca_ptl_gm_component_close (void)
if (NULL != mca_ptl_gm_component.gm_ptl_modules)
free (mca_ptl_gm_component.gm_ptl_modules);
OBJ_DESTRUCT (&mca_ptl_gm_component.gm_procs);
OBJ_DESTRUCT (&mca_ptl_gm_component.gm_send_req);
OBJ_DESTRUCT (&mca_ptl_gm_component.gm_lock);
OBJ_DESTRUCT( &mca_ptl_gm_component.gm_procs );
OBJ_DESTRUCT( &mca_ptl_gm_component.gm_send_req );
OBJ_DESTRUCT( &mca_ptl_gm_component.gm_lock );
return OMPI_SUCCESS;
}
@ -279,8 +279,8 @@ mca_ptl_gm_discover_boards( mca_ptl_gm_module_t** pptl,
pptl[index]->port_id = port_no;
pptl[index]->gm_port = gm_port;
pptl[index]->local_id = local_id;
pptl[index]->global_id = global_id;
pptl[index]->local_id = local_id;
pptl[index]->global_id = global_id;
/* everything is OK let's mark it as usable and go to the next one */
if( (++index) >= max_ptls ) {
@ -341,22 +341,22 @@ mca_ptl_gm_init_sendrecv (mca_ptl_gm_module_t * ptl)
ptl->gm_send_dma_memory = gm_dma_malloc( ptl->gm_port,
(mca_ptl_gm_component.gm_segment_size * ptl->num_send_tokens) + GM_PAGE_LEN );
if( NULL == ptl->gm_send_dma_memory ) {
ompi_output( 0, "unable to allocate registered memory\n" );
return OMPI_ERR_OUT_OF_RESOURCE;
ompi_output( 0, "unable to allocate registered memory\n" );
return OMPI_ERR_OUT_OF_RESOURCE;
}
for (i = 0; i < ptl->num_send_tokens; i++) {
sfragment->send_buf = NULL;
OMPI_FREE_LIST_RETURN( &(ptl->gm_send_frags), (ompi_list_item_t *)sfragment );
OMPI_FREE_LIST_RETURN( &(ptl->gm_send_dma_frags),
sfragment->send_buf = NULL;
OMPI_FREE_LIST_RETURN( &(ptl->gm_send_frags), (ompi_list_item_t *)sfragment );
OMPI_FREE_LIST_RETURN( &(ptl->gm_send_dma_frags),
(ompi_list_item_t *)((char*)ptl->gm_send_dma_memory +
i * mca_ptl_gm_component.gm_segment_size) );
sfragment++;
i * mca_ptl_gm_component.gm_segment_size) );
sfragment++;
}
/*****************RECEIVE*****************************/
/* allow remote memory access */
if( GM_SUCCESS != gm_allow_remote_memory_access (ptl->gm_port) ) {
ompi_output (0, "unable to allow remote memory access\n");
ompi_output (0, "unable to allow remote memory access\n");
}
OBJ_CONSTRUCT (&(ptl->gm_recv_outstanding_queue), ompi_list_t);
@ -365,9 +365,9 @@ mca_ptl_gm_init_sendrecv (mca_ptl_gm_module_t * ptl)
OBJ_CONSTRUCT (&(ptl->gm_recv_frags_free), ompi_free_list_t);
ompi_free_list_init( &(ptl->gm_recv_frags_free),
sizeof (mca_ptl_gm_recv_frag_t),
OBJ_CLASS (mca_ptl_gm_recv_frag_t),
0, /* by default I will provide all items */
sizeof (mca_ptl_gm_recv_frag_t),
OBJ_CLASS (mca_ptl_gm_recv_frag_t),
0, /* by default I will provide all items */
ptl->num_recv_tokens * 10, /* the maximum number of items in the free list */
ptl->num_recv_tokens, /* if it need to allocate some more */
NULL );
@ -379,25 +379,25 @@ mca_ptl_gm_init_sendrecv (mca_ptl_gm_module_t * ptl)
/*allocate the registered memory */
ptl->gm_recv_dma_memory =
gm_dma_malloc( ptl->gm_port, (mca_ptl_gm_component.gm_segment_size * ptl->num_recv_tokens) + GM_PAGE_LEN );
gm_dma_malloc( ptl->gm_port, (mca_ptl_gm_component.gm_segment_size * ptl->num_recv_tokens) + GM_PAGE_LEN );
if( NULL == ptl->gm_recv_dma_memory ) {
ompi_output( 0, "unable to allocate registered memory for receive\n" );
return OMPI_ERR_OUT_OF_RESOURCE;
ompi_output( 0, "unable to allocate registered memory for receive\n" );
return OMPI_ERR_OUT_OF_RESOURCE;
}
for( i = 0; i < 2; i++ ) {
OMPI_FREE_LIST_RETURN( &(ptl->gm_recv_frags_free), (ompi_list_item_t *)free_rfragment );
free_rfragment++;
OMPI_FREE_LIST_RETURN( &(ptl->gm_recv_frags_free), (ompi_list_item_t *)free_rfragment );
free_rfragment++;
gm_provide_receive_buffer( ptl->gm_port, (char*)ptl->gm_recv_dma_memory + i * mca_ptl_gm_component.gm_segment_size,
GM_SIZE, GM_HIGH_PRIORITY );
gm_provide_receive_buffer( ptl->gm_port, (char*)ptl->gm_recv_dma_memory + i * mca_ptl_gm_component.gm_segment_size,
GM_SIZE, GM_HIGH_PRIORITY );
}
for( i = 2; i < ptl->num_recv_tokens; i++ ) {
OMPI_FREE_LIST_RETURN( &(ptl->gm_recv_frags_free), (ompi_list_item_t *)free_rfragment );
free_rfragment++;
OMPI_FREE_LIST_RETURN( &(ptl->gm_recv_frags_free), (ompi_list_item_t *)free_rfragment );
free_rfragment++;
gm_provide_receive_buffer( ptl->gm_port, (char*)ptl->gm_recv_dma_memory + i * mca_ptl_gm_component.gm_segment_size,
GM_SIZE, GM_LOW_PRIORITY );
gm_provide_receive_buffer( ptl->gm_port, (char*)ptl->gm_recv_dma_memory + i * mca_ptl_gm_component.gm_segment_size,
GM_SIZE, GM_LOW_PRIORITY );
}
OBJ_CONSTRUCT( &(ptl->gm_pending_acks), ompi_list_t );
@ -415,7 +415,7 @@ mca_ptl_gm_init( mca_ptl_gm_component_t * gm )
/* let's try to find if GM is available */
if( GM_SUCCESS != gm_init() ) {
ompi_output( 0, "[%s:%d] error in initializing the gm library\n", __FILE__, __LINE__ );
return OMPI_ERR_OUT_OF_RESOURCE;
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* First discover all available boards. For each board we will create a unique PTL */
mca_ptl_gm_component.gm_ptl_modules = calloc( mca_ptl_gm_component.gm_max_ptl_modules,
@ -427,9 +427,9 @@ mca_ptl_gm_init( mca_ptl_gm_component_t * gm )
mca_ptl_gm_component.gm_num_ptl_modules =
mca_ptl_gm_discover_boards( mca_ptl_gm_component.gm_ptl_modules,
mca_ptl_gm_component.gm_max_ptl_modules,
mca_ptl_gm_component.gm_max_boards_number,
mca_ptl_gm_component.gm_max_port_number );
mca_ptl_gm_component.gm_max_ptl_modules,
mca_ptl_gm_component.gm_max_boards_number,
mca_ptl_gm_component.gm_max_port_number );
/* In the case when we are in a multi-threaded environment each
* PTL will have its own thread. At this point all structures are
@ -444,11 +444,16 @@ mca_ptl_gm_init( mca_ptl_gm_component_t * gm )
break;
if( ompi_using_threads() ) {
#if OMPI_HAVE_POSIX_THREADS
ptl->thread.t_run = (ompi_thread_fn_t)mca_ptl_gm_thread_progress;
ptl->thread.t_arg = (void*)ptl;
ptl->thread.t_run = (ompi_thread_fn_t)mca_ptl_gm_thread_progress;
ptl->thread.t_arg = (void*)ptl;
#endif /* OMPI_HAVE_POSIX_THREADS */
<<<<<<< .working
if( OMPI_SUCCESS != ompi_thread_start( &(ptl->thread) ) ) {
break;
=======
if( OMPI_SUCCESS != ompi_thread_start( &(ptl->thread) ) )
break;
>>>>>>> .merge-right.r5039
}
}
}
@ -462,6 +467,15 @@ mca_ptl_gm_init( mca_ptl_gm_component_t * gm )
}
mca_ptl_gm_component.gm_num_ptl_modules = save_counter;
/* A free list containing all memory used for keep data for unexpected requests. */
ompi_free_list_init( &(mca_ptl_gm_component.gm_unexpected_frags_data),
mca_ptl_gm_component.gm_segment_size,
OBJ_CLASS (ompi_list_item_t),
0, /* do not allocate any items I'll provide them */
0, /* maximum number of list allocated elements will be zero */
0,
NULL ); /* not using mpool */
return (mca_ptl_gm_component.gm_num_ptl_modules > 0 ? OMPI_SUCCESS : OMPI_ERR_OUT_OF_RESOURCE);
}
@ -520,12 +534,16 @@ mca_ptl_gm_component_control (int param, void *value, size_t size)
char* gm_get_local_buffer( void )
{
return malloc( sizeof(char) * mca_ptl_gm_component.gm_segment_size );
ompi_list_item_t* item;
int rc;
OMPI_FREE_LIST_WAIT( &(mca_ptl_gm_component.gm_unexpected_frags_data), item, rc );
return (char*)item;
}
void gm_release_local_buffer( char* ptr )
{
free(ptr);
OMPI_FREE_LIST_RETURN( &(mca_ptl_gm_component.gm_unexpected_frags_data), (ompi_list_item_t*)ptr );
}
/*

Просмотреть файл

@ -101,57 +101,57 @@ int mca_ptl_gm_receiver_advance_pipeline( mca_ptl_gm_recv_frag_t* frag, int only
/* start the current get */
get_line = &(frag->pipeline.lines[frag->pipeline.pos_transfert]);
if( (PTL_GM_PIPELINE_TRANSFERT & get_line->flags) == PTL_GM_PIPELINE_TRANSFERT ) {
peer->get_started = true;
gm_get( peer->peer_ptl->gm_port, get_line->remote_memory.lval,
get_line->local_memory.pval, get_line->length,
GM_LOW_PRIORITY, peer->local_id, peer->port_number, mca_ptl_gm_get_callback, frag );
get_line->flags ^= PTL_GM_PIPELINE_REMOTE;
DO_DEBUG( count += sprintf( buffer + count, " start get %lld (%d)", get_line->length, frag->pipeline.pos_transfert ); );
frag->pipeline.pos_transfert = (frag->pipeline.pos_transfert + 1) % GM_PIPELINE_DEPTH;
peer->get_started = true;
gm_get( peer->peer_ptl->gm_port, get_line->remote_memory.lval,
get_line->local_memory.pval, get_line->length,
GM_LOW_PRIORITY, peer->local_id, peer->port_number, mca_ptl_gm_get_callback, frag );
get_line->flags ^= PTL_GM_PIPELINE_REMOTE;
DO_DEBUG( count += sprintf( buffer + count, " start get %lld (%d)", get_line->length, frag->pipeline.pos_transfert ); );
frag->pipeline.pos_transfert = (frag->pipeline.pos_transfert + 1) % GM_PIPELINE_DEPTH;
} else if( 1 == onlyifget ) return OMPI_SUCCESS;
/* register the next segment */
reg_line = &(frag->pipeline.lines[frag->pipeline.pos_register]);
length = frag->frag_recv.frag_base.frag_size - frag->frag_bytes_processed;
if( (0 != length) && !(reg_line->flags & PTL_GM_PIPELINE_REGISTER) ) {
reg_line->hdr_flags = get_line->hdr_flags;
reg_line->length = length;
if( reg_line->length > mca_ptl_gm_component.gm_rdma_frag_size )
reg_line->length = mca_ptl_gm_component.gm_rdma_frag_size;
reg_line->offset = get_line->offset + get_line->length;
reg_line->local_memory.lval = 0L;
reg_line->local_memory.pval = (char*)frag->frag_recv.frag_base.frag_addr +
reg_line->offset;
status = gm_register_memory( peer->peer_ptl->gm_port, reg_line->local_memory.pval,
reg_line->length );
if( GM_SUCCESS != status ) {
ompi_output( 0, "Cannot register receiver memory (%p, %ld) bytes offset %ld\n",
reg_line->local_memory.pval,
reg_line->length, reg_line->offset );
return OMPI_ERROR;
}
DO_DEBUG( count += sprintf( buffer + count, " start register %lld (%d)", reg_line->length, frag->pipeline.pos_register ); );
reg_line->flags |= PTL_GM_PIPELINE_REGISTER;
frag->frag_bytes_processed += reg_line->length;
frag->pipeline.pos_register = (frag->pipeline.pos_register + 1) % GM_PIPELINE_DEPTH;
reg_line->hdr_flags = get_line->hdr_flags;
reg_line->length = length;
if( reg_line->length > mca_ptl_gm_component.gm_rdma_frag_size )
reg_line->length = mca_ptl_gm_component.gm_rdma_frag_size;
reg_line->offset = get_line->offset + get_line->length;
reg_line->local_memory.lval = 0L;
reg_line->local_memory.pval = (char*)frag->frag_recv.frag_base.frag_addr +
reg_line->offset;
status = gm_register_memory( peer->peer_ptl->gm_port, reg_line->local_memory.pval,
reg_line->length );
if( GM_SUCCESS != status ) {
ompi_output( 0, "Cannot register receiver memory (%p, %ld) bytes offset %ld\n",
reg_line->local_memory.pval,
reg_line->length, reg_line->offset );
return OMPI_ERROR;
}
DO_DEBUG( count += sprintf( buffer + count, " start register %lld (%d)", reg_line->length, frag->pipeline.pos_register ); );
reg_line->flags |= PTL_GM_PIPELINE_REGISTER;
frag->frag_bytes_processed += reg_line->length;
frag->pipeline.pos_register = (frag->pipeline.pos_register + 1) % GM_PIPELINE_DEPTH;
}
/* deregister the previous one */
dereg_line = &(frag->pipeline.lines[frag->pipeline.pos_deregister]);
if( dereg_line->flags & PTL_GM_PIPELINE_DEREGISTER ) { /* something usefull */
/*status = gm_deregister_memory( peer->peer_ptl->gm_port,
status = gm_deregister_memory( peer->peer_ptl->gm_port,
dereg_line->local_memory.pval,
dereg_line->length );
if( GM_SUCCESS != status ) {
if( GM_SUCCESS != status ) {
ompi_output( 0, "unpinning receiver memory from get (%p, %u) failed \n",
dereg_line->local_memory.pval,
dereg_line->length );
}*/
}
dereg_line->flags ^= (PTL_GM_PIPELINE_DEREGISTER|PTL_GM_PIPELINE_REGISTER);
assert( dereg_line->flags == 0 );
frag->frag_bytes_validated += dereg_line->length;
DO_DEBUG( count += sprintf( buffer + count, " start deregister %lld (%d)", dereg_line->length, frag->pipeline.pos_deregister ); )
frag->pipeline.pos_deregister = (frag->pipeline.pos_deregister + 1) % GM_PIPELINE_DEPTH;
DO_DEBUG( count += sprintf( buffer + count, " start deregister %lld (%d)", dereg_line->length, frag->pipeline.pos_deregister ); )
frag->pipeline.pos_deregister = (frag->pipeline.pos_deregister + 1) % GM_PIPELINE_DEPTH;
}
if( frag->frag_recv.frag_base.frag_size <= frag->frag_bytes_validated ) {
@ -159,7 +159,7 @@ int mca_ptl_gm_receiver_advance_pipeline( mca_ptl_gm_recv_frag_t* frag, int only
frag->frag_recv.frag_request, frag->frag_recv.frag_base.frag_size,
frag->frag_recv.frag_base.frag_size );
OMPI_FREE_LIST_RETURN( &(peer->peer_ptl->gm_recv_frags_free), (ompi_list_item_t*)frag );
DO_DEBUG( count += sprintf( buffer + count, " finish" ); )
DO_DEBUG( count += sprintf( buffer + count, " finish" ); )
}
DO_DEBUG( ompi_output( 0, "%s", buffer ); )
return OMPI_SUCCESS;
@ -180,21 +180,21 @@ int mca_ptl_gm_sender_advance_pipeline( mca_ptl_gm_send_frag_t* frag )
/* send current segment */
send_line = &(frag->pipeline.lines[frag->pipeline.pos_transfert]);
if( (send_line->flags & PTL_GM_PIPELINE_TRANSFERT) == PTL_GM_PIPELINE_TRANSFERT ) {
ompi_list_item_t* item;
int32_t rc;
ompi_list_item_t* item;
int32_t rc;
OMPI_FREE_LIST_WAIT( &(peer->peer_ptl->gm_send_dma_frags), item, rc );
ompi_atomic_sub( &(peer->peer_ptl->num_send_tokens), 1 );
hdr = (mca_ptl_gm_frag_header_t*)item;
OMPI_FREE_LIST_WAIT( &(peer->peer_ptl->gm_send_dma_frags), item, rc );
ompi_atomic_sub( &(peer->peer_ptl->num_send_tokens), 1 );
hdr = (mca_ptl_gm_frag_header_t*)item;
hdr->hdr_frag.hdr_common.hdr_type = MCA_PTL_HDR_TYPE_FRAG;
hdr->hdr_frag.hdr_common.hdr_flags = send_line->hdr_flags | frag->frag_send.frag_base.frag_header.hdr_common.hdr_flags;
hdr->hdr_frag.hdr_src_ptr.lval = 0L; /* for VALGRIND/PURIFY - REPLACE WITH MACRO */
hdr->hdr_frag.hdr_src_ptr.pval = frag;
hdr->hdr_frag.hdr_dst_ptr = frag->frag_send.frag_base.frag_header.hdr_ack.hdr_dst_match;
hdr->hdr_frag.hdr_frag_offset = send_line->offset;
hdr->hdr_frag.hdr_frag_length = send_line->length;
hdr->registered_memory = send_line->local_memory;
hdr->hdr_frag.hdr_common.hdr_type = MCA_PTL_HDR_TYPE_FRAG;
hdr->hdr_frag.hdr_common.hdr_flags = send_line->hdr_flags | frag->frag_send.frag_base.frag_header.hdr_common.hdr_flags;
hdr->hdr_frag.hdr_src_ptr.lval = 0L; /* for VALGRIND/PURIFY - REPLACE WITH MACRO */
hdr->hdr_frag.hdr_src_ptr.pval = frag;
hdr->hdr_frag.hdr_dst_ptr = frag->frag_send.frag_base.frag_header.hdr_ack.hdr_dst_match;
hdr->hdr_frag.hdr_frag_offset = send_line->offset;
hdr->hdr_frag.hdr_frag_length = send_line->length;
hdr->registered_memory = send_line->local_memory;
gm_send_with_callback( peer->peer_ptl->gm_port, hdr,
GM_SIZE, sizeof(mca_ptl_gm_frag_header_t),
@ -203,54 +203,54 @@ int mca_ptl_gm_sender_advance_pipeline( mca_ptl_gm_send_frag_t* frag )
peer->port_number,
send_continue_callback, (void*)hdr );
send_line->flags ^= PTL_GM_PIPELINE_REMOTE;
frag->pipeline.pos_transfert = (frag->pipeline.pos_transfert + 1) % GM_PIPELINE_DEPTH;
DO_DEBUG( count += sprintf( buffer + count, " send new fragment %lld", send_line->length ); )
send_line->flags ^= PTL_GM_PIPELINE_REMOTE;
frag->pipeline.pos_transfert = (frag->pipeline.pos_transfert + 1) % GM_PIPELINE_DEPTH;
DO_DEBUG( count += sprintf( buffer + count, " send new fragment %lld", send_line->length ); )
}
/* deregister previous segment */
dereg_line = &(frag->pipeline.lines[frag->pipeline.pos_deregister]);
if( dereg_line->flags & PTL_GM_PIPELINE_DEREGISTER ) { /* something usefull */
/*status = gm_deregister_memory( peer->peer_ptl->gm_port,
dereg_line->local_memory.pval, dereg_line->length );
if( GM_SUCCESS != status ) {
ompi_output( 0, "unpinning receiver memory from get (%p, %u) failed \n",
dereg_line->local_memory.pval, dereg_line->length );
}*/
dereg_line->flags ^= (PTL_GM_PIPELINE_REGISTER | PTL_GM_PIPELINE_DEREGISTER);
assert( dereg_line->flags == 0 );
status = gm_deregister_memory( peer->peer_ptl->gm_port,
dereg_line->local_memory.pval, dereg_line->length );
if( GM_SUCCESS != status ) {
ompi_output( 0, "unpinning receiver memory from get (%p, %u) failed \n",
dereg_line->local_memory.pval, dereg_line->length );
}
dereg_line->flags ^= (PTL_GM_PIPELINE_REGISTER | PTL_GM_PIPELINE_DEREGISTER);
assert( dereg_line->flags == 0 );
frag->frag_bytes_validated += dereg_line->length;
frag->pipeline.pos_deregister = (frag->pipeline.pos_deregister + 1) % GM_PIPELINE_DEPTH;
DO_DEBUG( count += sprintf( buffer + count, " start deregister %lld", dereg_line->length ); )
frag->pipeline.pos_deregister = (frag->pipeline.pos_deregister + 1) % GM_PIPELINE_DEPTH;
DO_DEBUG( count += sprintf( buffer + count, " start deregister %lld", dereg_line->length ); )
}
/* register next segment */
reg_line = &(frag->pipeline.lines[frag->pipeline.pos_register]);
if( !(reg_line->flags & PTL_GM_PIPELINE_REGISTER) ) {
reg_line->length = frag->frag_send.frag_base.frag_size - frag->frag_bytes_processed;
if( 0 != reg_line->length ) {
reg_line->hdr_flags = frag->frag_send.frag_base.frag_header.hdr_common.hdr_flags;
if( reg_line->length > mca_ptl_gm_component.gm_rdma_frag_size ) {
reg_line->length = mca_ptl_gm_component.gm_rdma_frag_size;
} else {
reg_line->hdr_flags |= PTL_FLAG_GM_LAST_FRAGMENT;
}
reg_line->offset = send_line->offset + send_line->length;
reg_line->local_memory.lval = 0L;
reg_line->local_memory.pval = (char*)frag->frag_send.frag_base.frag_addr +
reg_line->offset;
status = gm_register_memory( peer->peer_ptl->gm_port, reg_line->local_memory.pval,
reg_line->length );
if( GM_SUCCESS != status ) {
ompi_output( 0, "Cannot register sender memory (%p, %ld) bytes offset %ld\n",
reg_line->local_memory.pval, reg_line->length, reg_line->offset );
return OMPI_ERROR;
}
reg_line->flags |= PTL_GM_PIPELINE_TRANSFERT;
frag->frag_bytes_processed += reg_line->length;
frag->pipeline.pos_register = (frag->pipeline.pos_register + 1) % GM_PIPELINE_DEPTH;
DO_DEBUG( count += sprintf( buffer + count, " start register %lld", reg_line->length ); )
}
reg_line->length = frag->frag_send.frag_base.frag_size - frag->frag_bytes_processed;
if( 0 != reg_line->length ) {
reg_line->hdr_flags = frag->frag_send.frag_base.frag_header.hdr_common.hdr_flags;
if( reg_line->length > mca_ptl_gm_component.gm_rdma_frag_size ) {
reg_line->length = mca_ptl_gm_component.gm_rdma_frag_size;
} else {
reg_line->hdr_flags |= PTL_FLAG_GM_LAST_FRAGMENT;
}
reg_line->offset = send_line->offset + send_line->length;
reg_line->local_memory.lval = 0L;
reg_line->local_memory.pval = (char*)frag->frag_send.frag_base.frag_addr +
reg_line->offset;
status = gm_register_memory( peer->peer_ptl->gm_port, reg_line->local_memory.pval,
reg_line->length );
if( GM_SUCCESS != status ) {
ompi_output( 0, "Cannot register sender memory (%p, %ld) bytes offset %ld\n",
reg_line->local_memory.pval, reg_line->length, reg_line->offset );
return OMPI_ERROR;
}
reg_line->flags |= PTL_GM_PIPELINE_TRANSFERT;
frag->frag_bytes_processed += reg_line->length;
frag->pipeline.pos_register = (frag->pipeline.pos_register + 1) % GM_PIPELINE_DEPTH;
DO_DEBUG( count += sprintf( buffer + count, " start register %lld", reg_line->length ); )
}
}
DO_DEBUG( ompi_output( 0, "%s", buffer ); )
@ -277,7 +277,7 @@ int mca_ptl_gm_peer_send_continue( mca_ptl_gm_peer_t *ptl_peer,
* before attempting to send the fragment
*/
mca_pml_base_send_request_offset( sendreq,
fragment->frag_send.frag_base.frag_size );
fragment->frag_send.frag_base.frag_size );
DO_DEBUG( ompi_output( 0, "sender start new send length %ld\n", *size ); )
/* The first DMA memory buffer has been alocated in same time as the fragment */
item = (ompi_list_item_t*)fragment->send_buf;
@ -285,42 +285,42 @@ int mca_ptl_gm_peer_send_continue( mca_ptl_gm_peer_t *ptl_peer,
remaining_bytes = fragment->frag_send.frag_base.frag_size - fragment->frag_bytes_processed;
if( remaining_bytes <= mca_ptl_gm_component.gm_eager_limit ) { /* small protocol */
int32_t freeAfter;
uint32_t max_data, in_size;
struct iovec iov;
ompi_convertor_t *convertor = &(fragment->frag_send.frag_base.frag_convertor);
int32_t freeAfter;
uint32_t max_data, in_size;
struct iovec iov;
ompi_convertor_t *convertor = &(fragment->frag_send.frag_base.frag_convertor);
/* If we have an eager send then we should send the rest of the data. */
/* If we have an eager send then we should send the rest of the data. */
while( 0 < remaining_bytes ) {
if( NULL == item ) {
OMPI_FREE_LIST_WAIT( &(ptl_peer->peer_ptl->gm_send_dma_frags), item, rc );
ompi_atomic_sub( &(ptl_peer->peer_ptl->num_send_tokens), 1 );
hdr = (mca_ptl_gm_frag_header_t*)item;
}
if( NULL == item ) {
OMPI_FREE_LIST_WAIT( &(ptl_peer->peer_ptl->gm_send_dma_frags), item, rc );
ompi_atomic_sub( &(ptl_peer->peer_ptl->num_send_tokens), 1 );
hdr = (mca_ptl_gm_frag_header_t*)item;
}
iov.iov_base = (char*)item + sizeof(mca_ptl_base_frag_header_t);
iov.iov_len = mca_ptl_gm_component.gm_segment_size - sizeof(mca_ptl_base_frag_header_t);
if( iov.iov_len >= remaining_bytes )
iov.iov_len = remaining_bytes;
if( iov.iov_len >= remaining_bytes )
iov.iov_len = remaining_bytes;
max_data = iov.iov_len;
in_size = 1;
if( ompi_convertor_pack(convertor, &(iov), &in_size, &max_data, &freeAfter) < 0)
return OMPI_ERROR;
hdr->hdr_frag.hdr_common.hdr_type = MCA_PTL_HDR_TYPE_FRAG;
hdr->hdr_frag.hdr_common.hdr_flags = flags;
hdr->hdr_frag.hdr_src_ptr.lval = 0L; /* for VALGRIND/PURIFY - REPLACE WITH MACRO */
hdr->hdr_frag.hdr_src_ptr.pval = fragment;
hdr->hdr_frag.hdr_dst_ptr = sendreq->req_peer_match;
hdr->hdr_frag.hdr_frag_offset = fragment->frag_offset + fragment->frag_bytes_processed;
hdr->hdr_frag.hdr_frag_length = iov.iov_len;
hdr->hdr_frag.hdr_common.hdr_type = MCA_PTL_HDR_TYPE_FRAG;
hdr->hdr_frag.hdr_common.hdr_flags = flags;
hdr->hdr_frag.hdr_src_ptr.lval = 0L; /* for VALGRIND/PURIFY - REPLACE WITH MACRO */
hdr->hdr_frag.hdr_src_ptr.pval = fragment;
hdr->hdr_frag.hdr_dst_ptr = sendreq->req_peer_match;
hdr->hdr_frag.hdr_frag_offset = fragment->frag_offset + fragment->frag_bytes_processed;
hdr->hdr_frag.hdr_frag_length = iov.iov_len;
fragment->frag_bytes_processed += iov.iov_len;
remaining_bytes -= iov.iov_len;
if( remaining_bytes == 0 )
hdr->hdr_frag.hdr_common.hdr_flags |= PTL_FLAG_GM_LAST_FRAGMENT;
fragment->frag_bytes_processed += iov.iov_len;
remaining_bytes -= iov.iov_len;
if( remaining_bytes == 0 )
hdr->hdr_frag.hdr_common.hdr_flags |= PTL_FLAG_GM_LAST_FRAGMENT;
/* for the last piece set the header type to FIN */
/* for the last piece set the header type to FIN */
gm_send_with_callback( ptl_peer->peer_ptl->gm_port, hdr,
GM_SIZE,
iov.iov_len +
@ -329,15 +329,15 @@ int mca_ptl_gm_peer_send_continue( mca_ptl_gm_peer_t *ptl_peer,
ptl_peer->local_id,
ptl_peer->port_number,
send_continue_callback, (void*)hdr );
item = NULL; /* force to retrieve a new one on the next loop */
item = NULL; /* force to retrieve a new one on the next loop */
}
*size = fragment->frag_bytes_processed;
if( !(flags & MCA_PTL_FLAGS_ACK) ) {
ptl_peer->peer_ptl->super.ptl_send_progress( (mca_ptl_base_module_t*)ptl_peer->peer_ptl,
fragment->frag_send.frag_request,
(*size) );
}
return OMPI_SUCCESS;
if( !(flags & MCA_PTL_FLAGS_ACK) ) {
ptl_peer->peer_ptl->super.ptl_send_progress( (mca_ptl_base_module_t*)ptl_peer->peer_ptl,
fragment->frag_send.frag_request,
(*size) );
}
return OMPI_SUCCESS;
}
pipeline = &(fragment->pipeline.lines[0]);
/* Large set of data => we have to setup a rendez-vous protocol. Here we can
@ -366,11 +366,11 @@ int mca_ptl_gm_peer_send_continue( mca_ptl_gm_peer_t *ptl_peer,
pipeline->length = fragment->frag_send.frag_base.frag_size % mca_ptl_gm_component.gm_rdma_frag_size;
if( pipeline->length < (mca_ptl_gm_component.gm_rdma_frag_size >> 1) ) {
if( 0 == pipeline->length )
pipeline->length = mca_ptl_gm_component.gm_rdma_frag_size;
else
if( fragment->frag_send.frag_base.frag_size > mca_ptl_gm_component.gm_rdma_frag_size )
pipeline->length = (mca_ptl_gm_component.gm_rdma_frag_size >> 1);
if( 0 == pipeline->length )
pipeline->length = mca_ptl_gm_component.gm_rdma_frag_size;
else
if( fragment->frag_send.frag_base.frag_size > mca_ptl_gm_component.gm_rdma_frag_size )
pipeline->length = (mca_ptl_gm_component.gm_rdma_frag_size >> 1);
}
pipeline->offset = fragment->frag_offset;
pipeline->hdr_flags = fragment->frag_send.frag_base.frag_header.hdr_common.hdr_flags;
@ -384,7 +384,8 @@ int mca_ptl_gm_peer_send_continue( mca_ptl_gm_peer_t *ptl_peer,
}
pipeline->flags = PTL_GM_PIPELINE_TRANSFERT;
fragment->frag_bytes_processed += pipeline->length;
DO_DEBUG( ompi_output( 0, "sender %p start register %lld (%d)", fragment, pipeline->length, fragment->pipeline.pos_register ); );
DO_DEBUG( ompi_output( 0, "sender %p start register %lld (%d)", fragment, pipeline->length,
fragment->pipeline.pos_register ); );
fragment->pipeline.pos_register = (fragment->pipeline.pos_register + 1) % GM_PIPELINE_DEPTH;
return OMPI_SUCCESS;
/* Now we are waiting for the ack message. Meanwhile we can register the sender first piece
@ -413,10 +414,10 @@ static void send_match_callback( struct gm_port* port, void* context, gm_status_
*/
int mca_ptl_gm_peer_send( struct mca_ptl_base_module_t* ptl,
struct mca_ptl_base_peer_t* ptl_base_peer,
struct mca_pml_base_send_request_t *sendreq,
size_t offset,
size_t size,
int flags )
struct mca_pml_base_send_request_t *sendreq,
size_t offset,
size_t size,
int flags )
{
struct iovec iov;
size_t size_in, size_out;
@ -427,6 +428,7 @@ int mca_ptl_gm_peer_send( struct mca_ptl_base_module_t* ptl,
unsigned int in_size, max_data = 0;
mca_ptl_gm_peer_t* ptl_peer = (mca_ptl_gm_peer_t*)ptl_base_peer;
ompi_list_item_t *item;
gm_send_completion_callback_t completion_cb;
char* sendbuf;
OMPI_FREE_LIST_WAIT( &(((mca_ptl_gm_module_t*)ptl)->gm_send_dma_frags), item, rc );
@ -445,27 +447,27 @@ int mca_ptl_gm_peer_send( struct mca_ptl_base_module_t* ptl,
hdr->hdr_common.hdr_type = MCA_PTL_HDR_TYPE_RNDV;
if( size_in > 0 ) {
convertor = &sendreq->req_convertor;
convertor = &sendreq->req_convertor;
if( (size_in + header_length) <= mca_ptl_gm_component.gm_segment_size )
iov.iov_len = size_in;
else
iov.iov_len = mca_ptl_gm_component.gm_segment_size - header_length;
if( (size_in + header_length) <= mca_ptl_gm_component.gm_segment_size )
iov.iov_len = size_in;
else
iov.iov_len = mca_ptl_gm_component.gm_segment_size - header_length;
/* copy the data to the registered buffer */
iov.iov_base = ((char*)hdr) + header_length;
max_data = iov.iov_len;
in_size = 1;
if((rc = ompi_convertor_pack(convertor, &(iov), &in_size, &max_data, &freeAfter)) < 0)
return OMPI_ERROR;
/* copy the data to the registered buffer */
iov.iov_base = ((char*)hdr) + header_length;
max_data = iov.iov_len;
in_size = 1;
if((rc = ompi_convertor_pack(convertor, &(iov), &in_size, &max_data, &freeAfter)) < 0)
return OMPI_ERROR;
/* must update the offset after actual fragment size is determined
* before attempting to send the fragment
*/
mca_pml_base_send_request_offset( sendreq, max_data );
/* must update the offset after actual fragment size is determined
* before attempting to send the fragment
*/
mca_pml_base_send_request_offset( sendreq, max_data );
} else {
iov.iov_len = 0; /* no data will be transmitted */
iov.iov_len = 0; /* no data will be transmitted */
}
/* adjust size and request offset to reflect actual number of bytes
@ -481,9 +483,8 @@ int mca_ptl_gm_peer_send( struct mca_ptl_base_module_t* ptl,
send_match_callback, (void *)hdr );
if( !(flags & MCA_PTL_FLAGS_ACK) ) {
ptl_peer->peer_ptl->super.ptl_send_progress( (mca_ptl_base_module_t*)ptl_peer->peer_ptl,
sendreq,
max_data );
ptl_peer->peer_ptl->super.ptl_send_progress( (mca_ptl_base_module_t*)ptl_peer->peer_ptl,
sendreq, max_data );
}
return OMPI_SUCCESS;
@ -496,36 +497,35 @@ mca_ptl_gm_ctrl_frag( struct mca_ptl_gm_module_t *ptl,
mca_pml_base_send_request_t *req;
if( MCA_PTL_HDR_TYPE_ACK == header->hdr_common.hdr_type ) {
if( header->hdr_common.hdr_flags & PTL_FLAG_GM_HAS_FRAGMENT ) {
mca_ptl_gm_send_frag_t* frag = (mca_ptl_gm_send_frag_t*)(header->hdr_ack.hdr_src_ptr.pval);
/* update the fragment header with the most up2date informations */
frag->frag_send.frag_base.frag_header.hdr_ack.hdr_dst_match = header->hdr_ack.hdr_dst_match;
req = frag->frag_send.frag_request;
assert(req != NULL);
req->req_peer_match = header->hdr_ack.hdr_dst_match;
req->req_peer_addr = header->hdr_ack.hdr_dst_addr;
req->req_peer_size = header->hdr_ack.hdr_dst_size;
if( (req->req_peer_size != 0) && (req->req_peer_addr.pval == NULL) ) {
ptl->super.ptl_send_progress( (mca_ptl_base_module_t*)ptl,
req, frag->frag_send.frag_base.frag_size );
OMPI_FREE_LIST_RETURN( &(ptl->gm_send_frags), (ompi_list_item_t *)frag );
} else {
if( header->hdr_common.hdr_flags & PTL_FLAG_GM_HAS_FRAGMENT ) {
frag->frag_send.frag_base.frag_header.hdr_common.hdr_flags |= PTL_FLAG_GM_HAS_FRAGMENT;
}
}
} else { /* initial reply to a rendez-vous request */
req = (mca_pml_base_send_request_t*)(header->hdr_ack.hdr_src_ptr.pval);
req->req_peer_match = header->hdr_ack.hdr_dst_match;
if( header->hdr_common.hdr_flags & PTL_FLAG_GM_HAS_FRAGMENT ) {
mca_ptl_gm_send_frag_t* frag = (mca_ptl_gm_send_frag_t*)(header->hdr_ack.hdr_src_ptr.pval);
/* update the fragment header with the most up2date informations */
frag->frag_send.frag_base.frag_header.hdr_ack.hdr_dst_match = header->hdr_ack.hdr_dst_match;
req = frag->frag_send.frag_request;
assert(req != NULL);
req->req_peer_match = header->hdr_ack.hdr_dst_match;
req->req_peer_addr = header->hdr_ack.hdr_dst_addr;
req->req_peer_size = header->hdr_ack.hdr_dst_size;
ptl->super.ptl_send_progress( (mca_ptl_base_module_t*)ptl,
req, req->req_offset );
}
if( (req->req_peer_size != 0) && (req->req_peer_addr.pval == NULL) ) {
ptl->super.ptl_send_progress( (mca_ptl_base_module_t*)ptl,
req, frag->frag_send.frag_base.frag_size );
OMPI_FREE_LIST_RETURN( &(ptl->gm_send_frags), (ompi_list_item_t *)frag );
} else {
if( header->hdr_common.hdr_flags & PTL_FLAG_GM_HAS_FRAGMENT ) {
frag->frag_send.frag_base.frag_header.hdr_common.hdr_flags |= PTL_FLAG_GM_HAS_FRAGMENT;
}
}
} else { /* initial reply to a rendez-vous request */
req = (mca_pml_base_send_request_t*)(header->hdr_ack.hdr_src_ptr.pval);
req->req_peer_match = header->hdr_ack.hdr_dst_match;
req->req_peer_addr = header->hdr_ack.hdr_dst_addr;
req->req_peer_size = header->hdr_ack.hdr_dst_size;
ptl->super.ptl_send_progress( (mca_ptl_base_module_t*)ptl, req, req->req_offset );
}
} else if( MCA_PTL_HDR_TYPE_NACK == header->hdr_common.hdr_type ) {
} else {
OMPI_OUTPUT((0, "Unkonwn header type in ptl_gm_ctrl_frag\n"));
OMPI_OUTPUT((0, "Unkonwn header type in ptl_gm_ctrl_frag\n"));
}
return NULL;
}
@ -537,7 +537,7 @@ mca_ptl_gm_ctrl_frag( struct mca_ptl_gm_module_t *ptl,
*/
static mca_ptl_gm_recv_frag_t*
mca_ptl_gm_recv_frag_match( struct mca_ptl_gm_module_t *ptl,
mca_ptl_base_header_t* hdr )
mca_ptl_base_header_t* hdr )
{
mca_ptl_gm_recv_frag_t* recv_frag;
bool matched;
@ -549,12 +549,13 @@ mca_ptl_gm_recv_frag_match( struct mca_ptl_gm_module_t *ptl,
if( MCA_PTL_HDR_TYPE_MATCH == hdr->hdr_rndv.hdr_match.hdr_common.hdr_type ) {
recv_frag->frag_recv.frag_base.frag_addr =
(char*)hdr + sizeof(mca_ptl_base_match_header_t);
recv_frag->frag_recv.frag_base.frag_size = hdr->hdr_match.hdr_msg_length;
} else {
assert( MCA_PTL_HDR_TYPE_RNDV == hdr->hdr_rndv.hdr_match.hdr_common.hdr_type );
recv_frag->frag_recv.frag_base.frag_addr =
(char*)hdr + sizeof(mca_ptl_base_rendezvous_header_t);
recv_frag->frag_recv.frag_base.frag_size = hdr->hdr_rndv.hdr_match.hdr_msg_length;
}
recv_frag->frag_recv.frag_base.frag_size = hdr->hdr_rndv.hdr_match.hdr_msg_length;
recv_frag->frag_recv.frag_is_buffered = false;
recv_frag->have_allocated_buffer = false;
@ -566,7 +567,7 @@ mca_ptl_gm_recv_frag_match( struct mca_ptl_gm_module_t *ptl,
length = mca_ptl_gm_component.gm_segment_size - sizeof(mca_ptl_base_rendezvous_header_t);
if( recv_frag->frag_recv.frag_base.frag_size < length ) {
length = recv_frag->frag_recv.frag_base.frag_size;
length = recv_frag->frag_recv.frag_base.frag_size;
}
/* get some memory and copy the data inside. We can then release the receive buffer */
if( 0 != length ) {
@ -642,14 +643,14 @@ static void mca_ptl_gm_get_callback( struct gm_port *port, void * context, gm_st
switch( status ) {
case GM_SUCCESS:
/* send an ack message to the sender */
mca_ptl_gm_send_quick_fin_message( peer, &(frag->frag_recv.frag_base) );
peer->get_started = false;
/* mark the memory as being ready to be deregistered */
frag->pipeline.lines[frag->pipeline.pos_deregister].flags |= PTL_GM_PIPELINE_DEREGISTER;
DO_DEBUG( ompi_output( 0, "receiver %p get_callback processed %lld validated %lld",
frag, frag->frag_bytes_processed, frag->frag_bytes_validated ); )
mca_ptl_gm_receiver_advance_pipeline( frag, 0 );
/* send an ack message to the sender */
mca_ptl_gm_send_quick_fin_message( peer, &(frag->frag_recv.frag_base) );
peer->get_started = false;
/* mark the memory as being ready to be deregistered */
frag->pipeline.lines[frag->pipeline.pos_deregister].flags |= PTL_GM_PIPELINE_DEREGISTER;
DO_DEBUG( ompi_output( 0, "receiver %p get_callback processed %lld validated %lld",
frag, frag->frag_bytes_processed, frag->frag_bytes_validated ); )
mca_ptl_gm_receiver_advance_pipeline( frag, 0 );
break;
case GM_SEND_TIMED_OUT:
ompi_output( 0, "mca_ptl_gm_get_callback timed out\n" );
@ -674,102 +675,102 @@ mca_ptl_gm_recv_frag_frag( struct mca_ptl_gm_module_t* ptl,
mca_ptl_gm_recv_frag_t* frag;
if( hdr->hdr_frag.hdr_common.hdr_flags & PTL_FLAG_GM_HAS_FRAGMENT ) {
frag = (mca_ptl_gm_recv_frag_t*)hdr->hdr_frag.hdr_dst_ptr.pval;
request = (mca_pml_base_recv_request_t*)frag->frag_recv.frag_request;
/* here we can have a synchronisation problem if several threads work in same time
* with the same request. The only question is if it's possible ?
*/
convertor = &(frag->frag_recv.frag_base.frag_convertor);
frag = (mca_ptl_gm_recv_frag_t*)hdr->hdr_frag.hdr_dst_ptr.pval;
request = (mca_pml_base_recv_request_t*)frag->frag_recv.frag_request;
/* here we can have a synchronisation problem if several threads work in same time
* with the same request. The only question is if it's possible ?
*/
convertor = &(frag->frag_recv.frag_base.frag_convertor);
} else {
request = (mca_pml_base_recv_request_t*)hdr->hdr_frag.hdr_dst_ptr.pval;
request = (mca_pml_base_recv_request_t*)hdr->hdr_frag.hdr_dst_ptr.pval;
if( hdr->hdr_frag.hdr_frag_length <= (mca_ptl_gm_component.gm_segment_size - sizeof(mca_ptl_base_frag_header_t)) ) {
ompi_proc_t* proc = ompi_comm_peer_lookup( request->req_base.req_comm,
request->req_base.req_ompi.req_status.MPI_SOURCE );
convertor = &local_convertor;
convertor->stack_size = 0; /* dont let the convertor free the stack */
if( hdr->hdr_frag.hdr_frag_length <= (mca_ptl_gm_component.gm_segment_size - sizeof(mca_ptl_base_frag_header_t)) ) {
ompi_proc_t* proc = ompi_comm_peer_lookup( request->req_base.req_comm,
request->req_base.req_ompi.req_status.MPI_SOURCE );
convertor = &local_convertor;
convertor->stack_size = 0; /* dont let the convertor free the stack */
ompi_convertor_copy( proc->proc_convertor, convertor );
frag = NULL;
} else { /* large message => we have to create a receive fragment */
frag = mca_ptl_gm_alloc_recv_frag( (struct mca_ptl_base_module_t*)ptl );
frag->frag_recv.frag_request = request;
frag->frag_offset = hdr->hdr_frag.hdr_frag_offset;
frag->matched = true;
frag->frag_recv.frag_base.frag_addr = frag->frag_recv.frag_request->req_base.req_addr;
frag->frag_recv.frag_base.frag_size = hdr->hdr_frag.hdr_frag_length;
frag->frag_recv.frag_base.frag_peer = (struct mca_ptl_base_peer_t*)
mca_pml_teg_proc_lookup_remote_peer( request->req_base.req_comm,
request->req_base.req_ompi.req_status.MPI_SOURCE,
(struct mca_ptl_base_module_t*)ptl );
convertor = &(frag->frag_recv.frag_base.frag_convertor);
}
ompi_convertor_init_for_recv( convertor, 0,
request->req_base.req_datatype,
request->req_base.req_count,
request->req_base.req_addr,
hdr->hdr_frag.hdr_frag_offset, NULL );
frag = NULL;
} else { /* large message => we have to create a receive fragment */
frag = mca_ptl_gm_alloc_recv_frag( (struct mca_ptl_base_module_t*)ptl );
frag->frag_recv.frag_request = request;
frag->frag_offset = hdr->hdr_frag.hdr_frag_offset;
frag->matched = true;
frag->frag_recv.frag_base.frag_addr = frag->frag_recv.frag_request->req_base.req_addr;
frag->frag_recv.frag_base.frag_size = hdr->hdr_frag.hdr_frag_length;
frag->frag_recv.frag_base.frag_peer = (struct mca_ptl_base_peer_t*)
mca_pml_teg_proc_lookup_remote_peer( request->req_base.req_comm,
request->req_base.req_ompi.req_status.MPI_SOURCE,
(struct mca_ptl_base_module_t*)ptl );
convertor = &(frag->frag_recv.frag_base.frag_convertor);
}
ompi_convertor_init_for_recv( convertor, 0,
request->req_base.req_datatype,
request->req_base.req_count,
request->req_base.req_addr,
hdr->hdr_frag.hdr_frag_offset, NULL );
}
if( NULL == frag ) {
iov.iov_base = (char*)hdr + sizeof(mca_ptl_base_frag_header_t);
iov.iov_len = hdr->hdr_frag.hdr_frag_length;
iov_count = 1;
max_data = hdr->hdr_frag.hdr_frag_length;
freeAfter = 0; /* unused here */
rc = ompi_convertor_unpack( convertor, &iov, &iov_count, &max_data, &freeAfter );
assert( 0 == freeAfter );
ptl->super.ptl_recv_progress( (mca_ptl_base_module_t*)ptl, request, max_data, max_data );
iov.iov_base = (char*)hdr + sizeof(mca_ptl_base_frag_header_t);
iov.iov_len = hdr->hdr_frag.hdr_frag_length;
iov_count = 1;
max_data = hdr->hdr_frag.hdr_frag_length;
freeAfter = 0; /* unused here */
rc = ompi_convertor_unpack( convertor, &iov, &iov_count, &max_data, &freeAfter );
assert( 0 == freeAfter );
ptl->super.ptl_recv_progress( (mca_ptl_base_module_t*)ptl, request, max_data, max_data );
} else {
gm_status_t status;
mca_ptl_gm_pipeline_line_t* pipeline;
gm_status_t status;
mca_ptl_gm_pipeline_line_t* pipeline;
frag->frag_recv.frag_base.frag_header.hdr_frag = hdr->hdr_frag;
if( NULL == hdr->registered_memory.pval ) { /* first round of the local rendez-vous protocol */
/* send an ack message to the sender ... quick hack (TODO) */
frag->frag_recv.frag_base.frag_header.hdr_frag.hdr_frag_length = 0;
mca_ptl_gm_send_quick_fin_message( (mca_ptl_gm_peer_t*)frag->frag_recv.frag_base.frag_peer,
&(frag->frag_recv.frag_base) );
frag->frag_recv.frag_base.frag_header.hdr_frag.hdr_frag_length = hdr->hdr_frag.hdr_frag_length;
pipeline = &(frag->pipeline.lines[0]);
pipeline->length = frag->frag_recv.frag_base.frag_size % mca_ptl_gm_component.gm_rdma_frag_size;
if( pipeline->length < (mca_ptl_gm_component.gm_rdma_frag_size >> 1) ) {
if( 0 == pipeline->length )
pipeline->length = mca_ptl_gm_component.gm_rdma_frag_size;
else
if( frag->frag_recv.frag_base.frag_size > mca_ptl_gm_component.gm_rdma_frag_size )
pipeline->length = (mca_ptl_gm_component.gm_rdma_frag_size >> 1);
}
pipeline->local_memory.lval = 0L;
pipeline->local_memory.pval = (char*)request->req_base.req_addr + hdr->hdr_frag.hdr_frag_offset;
status = gm_register_memory( ptl->gm_port,
pipeline->local_memory.pval, pipeline->length );
if( status != GM_SUCCESS ) {
ompi_output( 0, "Cannot register receiver memory (%p, %ld) bytes offset %ld\n",
(char*)request->req_base.req_addr + hdr->hdr_frag.hdr_frag_offset,
pipeline->length, hdr->hdr_frag.hdr_frag_offset );
return NULL;
}
pipeline->offset = hdr->hdr_frag.hdr_frag_offset;
pipeline->flags |= PTL_GM_PIPELINE_REGISTER;
frag->frag_bytes_processed += pipeline->length;
DO_DEBUG( ompi_output( 0, "receiver %p start register %lld (%d)\n", frag, pipeline->length, frag->pipeline.pos_register ); );
frag->pipeline.pos_register = (frag->pipeline.pos_register + 1) % GM_PIPELINE_DEPTH;
} else {
/* There is a kind of rendez-vous protocol used internally by the GM driver. If the amount of data
* to transfert is large enough, then the sender will start sending a frag message with the
* remote_memory set to NULL (but with the length set to the length of the first fragment).
* It will allow the receiver to start to register it's own memory. Later when the receiver
* get a fragment with the remote_memory field not NULL it can start getting the data.
*/
pipeline = &(frag->pipeline.lines[frag->pipeline.pos_remote]);
DO_DEBUG( ompi_output( 0, "receiver %p get remote memory length %lld (%d)\n", frag, hdr->hdr_frag.hdr_frag_length, frag->pipeline.pos_remote ); );
frag->pipeline.pos_remote = (frag->pipeline.pos_remote + 1) % GM_PIPELINE_DEPTH;
assert( (pipeline->flags & PTL_GM_PIPELINE_REMOTE) == 0 );
pipeline->remote_memory = hdr->registered_memory;
pipeline->flags |= PTL_GM_PIPELINE_REMOTE;
/*if( false == ((mca_ptl_gm_peer_t*)frag->frag_recv.frag_base.frag_peer)->get_started )*/
mca_ptl_gm_receiver_advance_pipeline( frag, 1 );
}
frag->frag_recv.frag_base.frag_header.hdr_frag = hdr->hdr_frag;
if( NULL == hdr->registered_memory.pval ) { /* first round of the local rendez-vous protocol */
/* send an ack message to the sender ... quick hack (TODO) */
frag->frag_recv.frag_base.frag_header.hdr_frag.hdr_frag_length = 0;
mca_ptl_gm_send_quick_fin_message( (mca_ptl_gm_peer_t*)frag->frag_recv.frag_base.frag_peer,
&(frag->frag_recv.frag_base) );
frag->frag_recv.frag_base.frag_header.hdr_frag.hdr_frag_length = hdr->hdr_frag.hdr_frag_length;
pipeline = &(frag->pipeline.lines[0]);
pipeline->length = frag->frag_recv.frag_base.frag_size % mca_ptl_gm_component.gm_rdma_frag_size;
if( pipeline->length < (mca_ptl_gm_component.gm_rdma_frag_size >> 1) ) {
if( 0 == pipeline->length )
pipeline->length = mca_ptl_gm_component.gm_rdma_frag_size;
else
if( frag->frag_recv.frag_base.frag_size > mca_ptl_gm_component.gm_rdma_frag_size )
pipeline->length = (mca_ptl_gm_component.gm_rdma_frag_size >> 1);
}
pipeline->local_memory.lval = 0L;
pipeline->local_memory.pval = (char*)request->req_base.req_addr + hdr->hdr_frag.hdr_frag_offset;
status = gm_register_memory( ptl->gm_port,
pipeline->local_memory.pval, pipeline->length );
if( status != GM_SUCCESS ) {
ompi_output( 0, "Cannot register receiver memory (%p, %ld) bytes offset %ld\n",
(char*)request->req_base.req_addr + hdr->hdr_frag.hdr_frag_offset,
pipeline->length, hdr->hdr_frag.hdr_frag_offset );
return NULL;
}
pipeline->offset = hdr->hdr_frag.hdr_frag_offset;
pipeline->flags |= PTL_GM_PIPELINE_REGISTER;
frag->frag_bytes_processed += pipeline->length;
DO_DEBUG( ompi_output( 0, "receiver %p start register %lld (%d)\n", frag, pipeline->length, frag->pipeline.pos_register ); );
frag->pipeline.pos_register = (frag->pipeline.pos_register + 1) % GM_PIPELINE_DEPTH;
} else {
/* There is a kind of rendez-vous protocol used internally by the GM driver. If the amount of data
* to transfert is large enough, then the sender will start sending a frag message with the
* remote_memory set to NULL (but with the length set to the length of the first fragment).
* It will allow the receiver to start to register it's own memory. Later when the receiver
* get a fragment with the remote_memory field not NULL it can start getting the data.
*/
pipeline = &(frag->pipeline.lines[frag->pipeline.pos_remote]);
DO_DEBUG( ompi_output( 0, "receiver %p get remote memory length %lld (%d)\n", frag, hdr->hdr_frag.hdr_frag_length, frag->pipeline.pos_remote ); );
frag->pipeline.pos_remote = (frag->pipeline.pos_remote + 1) % GM_PIPELINE_DEPTH;
assert( (pipeline->flags & PTL_GM_PIPELINE_REMOTE) == 0 );
pipeline->remote_memory = hdr->registered_memory;
pipeline->flags |= PTL_GM_PIPELINE_REMOTE;
/*if( false == ((mca_ptl_gm_peer_t*)frag->frag_recv.frag_base.frag_peer)->get_started )*/
mca_ptl_gm_receiver_advance_pipeline( frag, 1 );
}
}
return NULL;
@ -787,21 +788,21 @@ mca_ptl_gm_recv_frag_fin( struct mca_ptl_gm_module_t* ptl,
frag->frag_send.frag_base.frag_header.hdr_ack.hdr_dst_match = hdr->hdr_ack.hdr_dst_match;
if( 0 == hdr->hdr_ack.hdr_dst_size ) {
/* I just receive the ack for the first fragment => setup the pipeline */
/* I just receive the ack for the first fragment => setup the pipeline */
mca_ptl_gm_sender_advance_pipeline( frag );
} else {
/* mark the memory as ready to be deregistered */
frag->pipeline.lines[frag->pipeline.pos_deregister].flags |= PTL_GM_PIPELINE_DEREGISTER;
/* mark the memory as ready to be deregistered */
frag->pipeline.lines[frag->pipeline.pos_deregister].flags |= PTL_GM_PIPELINE_DEREGISTER;
}
/* continue the pipeline ... send the next segment */
mca_ptl_gm_sender_advance_pipeline( frag );
if( frag->frag_send.frag_base.frag_size == frag->frag_bytes_validated ) {
/* mark the request as done before deregistering the memory */
ptl->super.ptl_send_progress( (mca_ptl_base_module_t*)ptl,
frag->frag_send.frag_request,
frag->frag_bytes_validated );
OMPI_FREE_LIST_RETURN( &(ptl->gm_send_frags), (ompi_list_item_t*)frag );
/* mark the request as done before deregistering the memory */
ptl->super.ptl_send_progress( (mca_ptl_base_module_t*)ptl,
frag->frag_send.frag_request,
frag->frag_bytes_validated );
OMPI_FREE_LIST_RETURN( &(ptl->gm_send_frags), (ompi_list_item_t*)frag );
}
return NULL;
@ -820,17 +821,16 @@ void mca_ptl_gm_outstanding_recv( struct mca_ptl_gm_module_t *ptl )
ompi_list_remove_first( (ompi_list_t *)&(ptl->gm_recv_outstanding_queue) );
matched = ptl->super.ptl_match( &(ptl->super),
&(frag->frag_recv),
&(frag->frag_recv.frag_base.frag_header.hdr_match) );
matched = ptl->super.ptl_match( &(ptl->super), &(frag->frag_recv),
&(frag->frag_recv.frag_base.frag_header.hdr_match) );
if(!matched) {
ompi_list_append((ompi_list_t *)&(ptl->gm_recv_outstanding_queue),
(ompi_list_item_t *) frag);
ompi_list_append((ompi_list_t *)&(ptl->gm_recv_outstanding_queue),
(ompi_list_item_t *) frag);
} else {
/* if allocated buffer, free the buffer */
/* return the recv descriptor to the free list */
OMPI_FREE_LIST_RETURN(&(ptl->gm_recv_frags_free), (ompi_list_item_t *)frag);
/* if allocated buffer, free the buffer */
/* return the recv descriptor to the free list */
OMPI_FREE_LIST_RETURN(&(ptl->gm_recv_frags_free), (ompi_list_item_t *)frag);
}
}
}
@ -860,38 +860,38 @@ int mca_ptl_gm_analyze_recv_event( struct mca_ptl_gm_module_t* ptl, gm_recv_even
switch (gm_ntohc(event->recv.type)) {
case GM_FAST_RECV_EVENT:
case GM_FAST_PEER_RECV_EVENT:
priority = GM_LOW_PRIORITY;
priority = GM_LOW_PRIORITY;
case GM_FAST_HIGH_RECV_EVENT:
case GM_FAST_HIGH_PEER_RECV_EVENT:
header = (mca_ptl_base_header_t *)gm_ntohp(event->recv.message);
goto have_event;
header = (mca_ptl_base_header_t *)gm_ntohp(event->recv.message);
goto have_event;
case GM_RECV_EVENT:
case GM_PEER_RECV_EVENT:
priority = GM_LOW_PRIORITY;
priority = GM_LOW_PRIORITY;
case GM_HIGH_RECV_EVENT:
case GM_HIGH_PEER_RECV_EVENT:
header = release_buf;
goto have_event;
header = release_buf;
goto have_event;
case GM_NO_RECV_EVENT:
break;
default:
gm_unknown(ptl->gm_port, event);
gm_unknown(ptl->gm_port, event);
}
return 0;
have_event:
if( header->hdr_common.hdr_type >= MCA_PTL_HDR_TYPE_MAX ) {
ompi_output( 0, "[%s:%d] unexpected frag type %d\n",
__FILE__, __LINE__, header->hdr_common.hdr_type );
ompi_output( 0, "[%s:%d] unexpected frag type %d\n",
__FILE__, __LINE__, header->hdr_common.hdr_type );
} else {
function = frag_management_fct[header->hdr_common.hdr_type];
if( NULL == function ) {
ompi_output( 0, "[%s:%d] NOT yet implemented function for the header type %d\n",
__FILE__, __LINE__, header->hdr_common.hdr_type );
} else {
frag = function( ptl, header );
}
function = frag_management_fct[header->hdr_common.hdr_type];
if( NULL == function ) {
ompi_output( 0, "[%s:%d] NOT yet implemented function for the header type %d\n",
__FILE__, __LINE__, header->hdr_common.hdr_type );
} else {
frag = function( ptl, header );
}
}
gm_provide_receive_buffer( ptl->gm_port, release_buf, GM_SIZE, priority );
@ -902,23 +902,23 @@ void mca_ptl_gm_dump_header( char* str, mca_ptl_base_header_t* hdr )
{
switch( hdr->hdr_common.hdr_type ) {
case MCA_PTL_HDR_TYPE_MATCH:
goto print_match_hdr;
goto print_match_hdr;
case MCA_PTL_HDR_TYPE_RNDV:
goto print_rndv_hdr;
goto print_rndv_hdr;
case MCA_PTL_HDR_TYPE_FRAG:
goto print_frag_hdr;
goto print_frag_hdr;
case MCA_PTL_HDR_TYPE_ACK:
goto print_ack_hdr;
goto print_ack_hdr;
case MCA_PTL_HDR_TYPE_NACK:
goto print_ack_hdr;
goto print_ack_hdr;
case MCA_PTL_HDR_TYPE_GET:
goto print_match_hdr;
goto print_match_hdr;
case MCA_PTL_HDR_TYPE_FIN:
goto print_ack_hdr;
goto print_ack_hdr;
case MCA_PTL_HDR_TYPE_FIN_ACK:
goto print_match_hdr;
goto print_match_hdr;
default:
ompi_output( 0, "unknown header of type %d\n", hdr->hdr_common.hdr_type );
ompi_output( 0, "unknown header of type %d\n", hdr->hdr_common.hdr_type );
}
return;

Просмотреть файл

@ -213,7 +213,7 @@ int mca_ptl_sm_component_close(void)
#if OMPI_ENABLE_PROGRESS_THREADS == 1
/* close/cleanup fifo create for event notification */
if(mca_ptl_sm_component.sm_fifo_fd >= 0) {
if(mca_ptl_sm_component.sm_fifo_fd > 0) {
/* write a done message down the pipe */
unsigned char cmd = DONE;
if( write(mca_ptl_sm_component.sm_fifo_fd,&cmd,sizeof(cmd)) !=

Просмотреть файл

@ -192,8 +192,11 @@ void mca_ptl_tcp_request_fini(struct mca_ptl_base_module_t* ptl, struct mca_pml_
void mca_ptl_tcp_recv_frag_return(struct mca_ptl_base_module_t* ptl, struct mca_ptl_tcp_recv_frag_t* frag)
{
if(frag->frag_recv.frag_is_buffered)
if(frag->frag_recv.frag_is_buffered) {
free(frag->frag_recv.frag_base.frag_addr);
frag->frag_recv.frag_is_buffered = false;
frag->frag_recv.frag_base.frag_addr = NULL;
}
OMPI_FREE_LIST_RETURN(&mca_ptl_tcp_component.tcp_recv_frags, (ompi_list_item_t*)frag);
}

Просмотреть файл

@ -52,9 +52,7 @@ headers = bindings.h
#
libmpi_c_la_SOURCES = \
attr_fn.c \
wtick.c \
wtime.c
attr_fn.c
#
# libmpi_c_mpi.la is only built in some cases (see above)

Просмотреть файл

@ -46,13 +46,11 @@ int MPI_Get_count(MPI_Status *status, MPI_Datatype datatype, int *count)
if( ompi_ddt_type_size( datatype, &size ) == MPI_SUCCESS ) {
if( size == 0 ) {
*count = 0;
return MPI_SUCCESS;
} else {
*count = status->_count / size;
if( ((*count) * size) != status->_count )
*count = MPI_UNDEFINED;
}
*count = status->_count / size;
if( ((*count) * size) == status->_count )
return MPI_SUCCESS;
*count = MPI_UNDEFINED;
}
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME);
return MPI_SUCCESS;
}

Просмотреть файл

@ -51,7 +51,6 @@ int MPI_Get_elements(MPI_Status *status, MPI_Datatype datatype, int *count)
if( (datatype->flags & DT_FLAG_BASIC) == DT_FLAG_BASIC ) {
if( size != 0 ) {
*count = MPI_UNDEFINED;
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME);
}
return MPI_SUCCESS;
}

Просмотреть файл

@ -36,6 +36,8 @@ static const char FUNC_NAME[] = "MPI_Status_c2f";
int MPI_Status_c2f(MPI_Status *c_status, MPI_Fint *f_status)
{
int i, *c_ints;
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -49,10 +51,16 @@ int MPI_Status_c2f(MPI_Status *c_status, MPI_Fint *f_status)
}
}
c_ints = (int*)c_status;
for( i = 0; i < (int)(sizeof(MPI_Status) / sizeof(int)); i++ )
f_status[i] = OMPI_INT_2_FINT(c_ints[i]);
/*
f_status[0] = OMPI_INT_2_FINT(c_status->MPI_SOURCE);
f_status[1] = OMPI_INT_2_FINT(c_status->MPI_TAG);
f_status[2] = OMPI_INT_2_FINT(c_status->MPI_ERROR);
f_status[3] = OMPI_INT_2_FINT(c_status->_count);
f_status[4] = OMPI_INT_2_FINT(c_status->_cancelled);
*/
return MPI_SUCCESS;
}

Просмотреть файл

@ -35,6 +35,8 @@ static const char FUNC_NAME[] = "MPI_Status_f2c";
int MPI_Status_f2c(MPI_Fint *f_status, MPI_Status *c_status)
{
int i, *c_ints;
if (MPI_PARAM_CHECK) {
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
@ -58,11 +60,16 @@ int MPI_Status_f2c(MPI_Fint *f_status, MPI_Status *c_status)
/* We can't use OMPI_FINT_2_INT here because of some complications
with include files. :-( So just do the casting manually. */
c_ints = (int*)c_status;
for( i = 0; i < (int)(sizeof(MPI_Status) / sizeof(int)); i++ )
c_ints[i] = (int)f_status[i];
/*
c_status->MPI_SOURCE = (int) f_status[0];
c_status->MPI_TAG = (int) f_status[1];
c_status->MPI_ERROR = (int) f_status[2];
c_status->_count = (int) f_status[3];
c_status->_cancelled = (int) f_status[4];
*/
return MPI_SUCCESS;
}

Просмотреть файл

@ -50,5 +50,11 @@ int MPI_Status_set_elements(MPI_Status *status, MPI_Datatype datatype,
if (status != MPI_STATUS_IGNORE) {
status->_count = count;
}
/* I dont have the MPI standard with me but I strongly suppose
* that the expected behaviour of this function should take in
* account the real size of the datatype. Otherwise this argument
* is completly useless ...
*/
/* This function is not yet implemented */
OMPI_ERRHANDLER_RETURN(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
}

Просмотреть файл

@ -32,6 +32,10 @@
static const char FUNC_NAME[] = "MPI_Type_create_darray";
static MPI_Datatype cyclic( int32_t darg, int32_t gsize, int32_t r, int32_t psize, MPI_Datatype oldtype )
{
return &ompi_mpi_datatype_null;
}
int MPI_Type_create_darray(int size,
int rank,
@ -45,9 +49,11 @@ int MPI_Type_create_darray(int size,
MPI_Datatype *newtype)
{
int32_t i;
int32_t i, step, end_loop, *r;
MPI_Datatype temptype;
if (MPI_PARAM_CHECK) {
int prod_psize = 1;
OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
if( (rank < 0) || (size < 0) || (rank >= size) ) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME);
@ -68,17 +74,50 @@ int MPI_Type_create_darray(int size,
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME);
} else if( (gsize_array[i] < 1) || (darg_array[i] < 0) || (psize_array[i] < 0) ) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME);
} else if( (MPI_DISTRIBUTE_DFLT_DARG != darg_array[i]) && (MPI_DISTRIBUTE_BLOCK == distrib_array[i]) &&
} else if( (MPI_DISTRIBUTE_DFLT_DARG != darg_array[i]) &&
(MPI_DISTRIBUTE_BLOCK == distrib_array[i]) &&
((darg_array[i] * psize_array[i]) < gsize_array[i]) ) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME);
}
} else if( 1 > psize_array[i] )
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME);
prod_psize *= psize_array[i];
}
if( prod_psize != size )
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME);
}
if( ndims < 1 ) {
*newtype = &ompi_mpi_datatype_null;
return MPI_SUCCESS;
}
r = (int*)malloc( ndims * sizeof(int) );
{
int t_rank = rank;
int t_size = size;
for( i = 0; i < ndims; i++ ) {
t_size = t_size / psize_array[i];
r[i] = t_rank / t_size;
t_rank = t_rank % t_size;
}
}
if( MPI_ORDER_FORTRAN == order ) {
i = 0;
step = 1;
end_loop = ndims;
} else {
i = ndims - 1;
step = -1;
end_loop = -1;
}
temptype = cyclic( darg_array[i], gsize_array[i], r[i], psize_array[i], oldtype );
for( i += step; i != end_loop; i += step ) {
*newtype = cyclic( darg_array[i], gsize_array[i], r[i], psize_array[i], temptype );
ompi_ddt_destroy( &temptype );
temptype = *newtype;
}
free( r );
/* This function is not yet implemented */
{

Просмотреть файл

@ -1,34 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#include "mpi.h"
#include "mpi/c/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Wtick = PMPI_Wtick
#endif
#if OMPI_PROFILING_DEFINES
#include "mpi/c/profile/defines.h"
#endif
double MPI_Wtick(void)
{
return (double)0.000001;
}

Просмотреть файл

@ -1,42 +0,0 @@
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University.
* All rights reserved.
* Copyright (c) 2004-2005 The Trustees of the University of Tennessee.
* All rights reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <stdio.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include "mpi.h"
#include "mpi/c/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILING_DEFINES
#pragma weak MPI_Wtime = PMPI_Wtime
#endif
#if OMPI_PROFILING_DEFINES
#include "mpi/c/profile/defines.h"
#endif
double MPI_Wtime(void)
{
struct timeval tv;
double wtime;
gettimeofday(&tv, NULL);
wtime = tv.tv_sec;
wtime += (double)tv.tv_usec / 1000000.0;
return wtime;
}

Просмотреть файл

@ -66,7 +66,7 @@ void mpi_accumulate_f(char *origin_addr, MPI_Fint *origin_count,
MPI_Win c_win = MPI_Win_f2c(*win);
MPI_Op c_op = MPI_Op_f2c(*op);
*ierr = OMPI_INT_2_FINT(MPI_Accumulate(origin_addr,
*ierr = OMPI_INT_2_FINT(MPI_Accumulate(OMPI_ADDR(origin_addr),
OMPI_FINT_2_INT(*origin_count),
c_origin_datatype,
OMPI_FINT_2_INT(*target_rank),

Просмотреть файл

@ -16,9 +16,9 @@
#include "ompi_config.h"
#include "mpi/f77/strings.h"
#include "mpi/f77/bindings.h"
#include "mpi/f77/constants.h"
#include "mpi/f77/strings.h"
#include "communicator/communicator.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -66,11 +66,11 @@ void mpi_allgather_f(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype,
c_sendtype = MPI_Type_f2c(*sendtype);
c_recvtype = MPI_Type_f2c(*recvtype);
*ierr = OMPI_INT_2_FINT(MPI_Allgather(sendbuf,
*ierr = OMPI_INT_2_FINT(MPI_Allgather(sendbuf,
OMPI_FINT_2_INT(*sendcount),
c_sendtype,
recvbuf,
OMPI_FINT_2_INT(*recvcount),
recvbuf,
OMPI_FINT_2_INT(*recvcount),
c_recvtype, c_comm));
}

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
@ -74,10 +73,10 @@ void mpi_allgatherv_f(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype,
OMPI_ARRAY_FINT_2_INT(recvcounts, size);
OMPI_ARRAY_FINT_2_INT(displs, size);
*ierr = OMPI_INT_2_FINT(MPI_Allgatherv(sendbuf,
*ierr = OMPI_INT_2_FINT(MPI_Allgatherv(sendbuf,
OMPI_FINT_2_INT(*sendcount),
c_sendtype,
recvbuf,
c_sendtype,
recvbuf,
OMPI_ARRAY_NAME_CONVERT(recvcounts),
OMPI_ARRAY_NAME_CONVERT(displs),
c_recvtype, c_comm));

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -20,6 +20,10 @@
#include "ompi_config.h"
#include "mpi.h"
#include "errhandler/errhandler.h"
#include "attribute/attribute.h"
#include "op/op.h"
#include "request/grequest.h"
/*
* We now build all four fortran bindings and dont care too much about

Просмотреть файл

@ -16,8 +16,8 @@
#include "ompi_config.h"
#include "mpi/f77/bindings.h"
#include "mpi/f77/constants.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_BSEND = mpi_bsend_f
@ -63,7 +63,7 @@ void mpi_bsend_f(char *buf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *dest,
c_comm = MPI_Comm_f2c (*comm);
*ierr = OMPI_INT_2_FINT(MPI_Bsend(buf, OMPI_FINT_2_INT(*count),
*ierr = OMPI_INT_2_FINT(MPI_Bsend(OMPI_ADDR(buf), OMPI_FINT_2_INT(*count),
c_type, OMPI_FINT_2_INT(*dest),
OMPI_FINT_2_INT(*tag), c_comm));
}

Просмотреть файл

@ -17,6 +17,7 @@
#include "ompi_config.h"
#include "mpi/f77/bindings.h"
#include "mpi/f77/constants.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_BSEND_INIT = mpi_bsend_init_f
@ -63,7 +64,7 @@ void mpi_bsend_init_f(char *buf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *
c_comm = MPI_Comm_f2c (*comm);
*ierr = OMPI_INT_2_FINT(MPI_Bsend_init(buf, OMPI_FINT_2_INT(*count),
*ierr = OMPI_INT_2_FINT(MPI_Bsend_init(OMPI_ADDR(buf), OMPI_FINT_2_INT(*count),
c_type,
OMPI_FINT_2_INT(*dest),
OMPI_FINT_2_INT(*tag),

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -29,7 +29,7 @@ OMPI_GENERATE_F77_BINDINGS (PMPI_COMM_CREATE_ERRHANDLER,
pmpi_comm_create_errhandler_,
pmpi_comm_create_errhandler__,
pmpi_comm_create_errhandler_f,
(void *function, MPI_Fint *errhandler, MPI_Fint *ierr),
(ompi_errhandler_fortran_handler_fn_t* function, MPI_Fint *errhandler, MPI_Fint *ierr),
(function, errhandler, ierr) )
#endif
@ -46,7 +46,7 @@ OMPI_GENERATE_F77_BINDINGS (MPI_COMM_CREATE_ERRHANDLER,
mpi_comm_create_errhandler_,
mpi_comm_create_errhandler__,
mpi_comm_create_errhandler_f,
(void *function, MPI_Fint *errhandler, MPI_Fint *ierr),
(ompi_errhandler_fortran_handler_fn_t* function, MPI_Fint *errhandler, MPI_Fint *ierr),
(function, errhandler, ierr) )
#endif
@ -55,7 +55,7 @@ OMPI_GENERATE_F77_BINDINGS (MPI_COMM_CREATE_ERRHANDLER,
#include "mpi/f77/profile/defines.h"
#endif
void mpi_comm_create_errhandler_f(void *function,
void mpi_comm_create_errhandler_f(ompi_errhandler_fortran_handler_fn_t *function,
MPI_Fint *errhandler, MPI_Fint *ierr)
{
MPI_Errhandler c_errhandler;
@ -64,7 +64,7 @@ void mpi_comm_create_errhandler_f(void *function,
(void*) for function pointers in this function */
*ierr = OMPI_INT_2_FINT(
MPI_Comm_create_errhandler((MPI_Comm_errhandler_fn*) function,
MPI_Comm_create_errhandler((MPI_Comm_errhandler_fn*)function,
&c_errhandler));
*errhandler = MPI_Errhandler_c2f(c_errhandler);
}

Просмотреть файл

@ -17,7 +17,6 @@
#include "ompi_config.h"
#include "mpi/f77/bindings.h"
#include "attribute/attribute.h"
#include "communicator/communicator.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
@ -31,7 +30,7 @@ OMPI_GENERATE_F77_BINDINGS (PMPI_COMM_CREATE_KEYVAL,
pmpi_comm_create_keyval_,
pmpi_comm_create_keyval__,
pmpi_comm_create_keyval_f,
(void *comm_copy_attr_fn, void *comm_delete_attr_fn, MPI_Fint *comm_keyval, char *extra_state, MPI_Fint *ierr),
(MPI_F_copy_function* comm_copy_attr_fn, MPI_F_delete_function* comm_delete_attr_fn, MPI_Fint *comm_keyval, char *extra_state, MPI_Fint *ierr),
(comm_copy_attr_fn, comm_delete_attr_fn, comm_keyval, extra_state, ierr) )
#endif
@ -48,7 +47,7 @@ OMPI_GENERATE_F77_BINDINGS (MPI_COMM_CREATE_KEYVAL,
mpi_comm_create_keyval_,
mpi_comm_create_keyval__,
mpi_comm_create_keyval_f,
(void *comm_copy_attr_fn, void *comm_delete_attr_fn, MPI_Fint *comm_keyval, char *extra_state, MPI_Fint *ierr),
(MPI_F_copy_function* comm_copy_attr_fn, MPI_F_delete_function* comm_delete_attr_fn, MPI_Fint *comm_keyval, char *extra_state, MPI_Fint *ierr),
(comm_copy_attr_fn, comm_delete_attr_fn, comm_keyval, extra_state, ierr) )
#endif
@ -59,8 +58,8 @@ OMPI_GENERATE_F77_BINDINGS (MPI_COMM_CREATE_KEYVAL,
static const char FUNC_NAME[] = "MPI_Comm_create_keyval_f";
void mpi_comm_create_keyval_f(void *comm_copy_attr_fn,
void *comm_delete_attr_fn,
void mpi_comm_create_keyval_f(MPI_F_copy_function* comm_copy_attr_fn,
MPI_F_delete_function* comm_delete_attr_fn,
MPI_Fint *comm_keyval,
char *extra_state, MPI_Fint *ierr)
{

Просмотреть файл

@ -16,10 +16,10 @@
#include "ompi_config.h"
#include "include/constants.h"
#include "communicator/communicator.h"
#include "mpi/f77/bindings.h"
#include "mpi/f77/strings.h"
#include "include/constants.h"
#include "communicator/communicator.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_COMM_GET_NAME = mpi_comm_get_name_f

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,8 +16,8 @@
#include "ompi_config.h"
#include "errhandler/errhandler.h"
#include "mpi/f77/bindings.h"
#include "errhandler/errhandler.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_COMM_SET_ERRHANDLER = mpi_comm_set_errhandler_f

Просмотреть файл

@ -16,9 +16,9 @@
#include "ompi_config.h"
#include "mpi/f77/bindings.h"
#include "include/constants.h"
#include "communicator/communicator.h"
#include "mpi/f77/bindings.h"
#include "mpi/f77/strings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -18,11 +18,10 @@
#include <stdio.h>
#include "mpi.h"
#include "util/argv.h"
#include "mpi/f77/bindings.h"
#include "mpi/f77/constants.h"
#include "mpi/f77/strings.h"
#include "util/argv.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_COMM_SPAWN = mpi_comm_spawn_f

Просмотреть файл

@ -19,11 +19,10 @@
#include <stdio.h>
#include <stdlib.h>
#include "mpi.h"
#include "util/argv.h"
#include "mpi/f77/bindings.h"
#include "mpi/f77/constants.h"
#include "mpi/f77/strings.h"
#include "util/argv.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -37,7 +37,7 @@
*/
#if defined(HAVE_LONG_DOUBLE) && OMPI_ALIGNMENT_LONG_DOUBLE == 16
typedef struct { long double bogus; } ompi_fortran_common_t;
typedef struct { long double bogus[1]; } ompi_fortran_common_t;
#else
typedef struct { double bogus[2]; } ompi_fortran_common_t;
#endif
@ -102,7 +102,6 @@ DECL(MPI_FORTRAN_STATUSES_IGNORE, mpi_fortran_statuses_ignore,
* Create macros to do the checking. Only check for all 4 if we have
* weak symbols. Otherwise, just check for the one relevant symbol.
*/
#if OMPI_HAVE_WEAK_SYMBOLS
#define OMPI_IS_FORTRAN_BOTTOM(addr) \
(addr == (void*) &MPI_FORTRAN_BOTTOM || \
@ -193,4 +192,7 @@ DECL(MPI_FORTRAN_STATUSES_IGNORE, mpi_fortran_statuses_ignore,
#endif /* weak / specific symbol type */
/* Convert between Fortran and C MPI_BOTTOM */
#define OMPI_ADDR(addr) (OMPI_IS_FORTRAN_BOTTOM(addr) ? MPI_BOTTOM : (addr))
#endif /* OMPI_F77_CONSTANTS_H */

Просмотреть файл

@ -20,13 +20,13 @@
#include "constants.h"
#define INST(upper_case, lower_case, single_u, double_u) \
ompi_fortran_common_t upper_case; \
ompi_fortran_common_t lower_case; \
ompi_fortran_common_t upper_case; \
ompi_fortran_common_t single_u; \
ompi_fortran_common_t double_u
INST(MPI_FORTRAN_STATUS_IGNORE, mpi_fortran_status_ignore,
mpi_fortran_status_ignore_, mpi_fortran_status_ignore__);
INST(MPI_FORTRAN_BOTTOM, mpi_fortran_bottom,
mpi_fortran_bottom_, mpi_fortran_bottom__);
INST(MPI_FORTRAN_ARGV_NULL, mpi_fortran_argv_null,
mpi_fortran_argv_null_, mpi_fortran_argv_null__);
INST(MPI_FORTRAN_ARGVS_NULL, mpi_fortran_argvs_null,

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -29,7 +29,7 @@ OMPI_GENERATE_F77_BINDINGS (PMPI_ERRHANDLER_CREATE,
pmpi_errhandler_create_,
pmpi_errhandler_create__,
pmpi_errhandler_create_f,
(void *function, MPI_Fint *errhandler, MPI_Fint *ierr),
(ompi_errhandler_fortran_handler_fn_t* function, MPI_Fint *errhandler, MPI_Fint *ierr),
(function, errhandler, ierr) )
#endif
@ -46,7 +46,7 @@ OMPI_GENERATE_F77_BINDINGS (MPI_ERRHANDLER_CREATE,
mpi_errhandler_create_,
mpi_errhandler_create__,
mpi_errhandler_create_f,
(void *function, MPI_Fint *errhandler, MPI_Fint *ierr),
(ompi_errhandler_fortran_handler_fn_t* function, MPI_Fint *errhandler, MPI_Fint *ierr),
(function, errhandler, ierr) )
#endif
@ -55,7 +55,7 @@ OMPI_GENERATE_F77_BINDINGS (MPI_ERRHANDLER_CREATE,
#include "mpi/f77/profile/defines.h"
#endif
void mpi_errhandler_create_f(void *function,
void mpi_errhandler_create_f(ompi_errhandler_fortran_handler_fn_t* function,
MPI_Fint *errhandler, MPI_Fint *ierr)
{
MPI_Errhandler c_errhandler;

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,9 +16,8 @@
#include "ompi_config.h"
#include "mpi.h"
#include "errhandler/errhandler.h"
#include "mpi/f77/bindings.h"
#include "errhandler/errhandler.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_ERRHANDLER_SET = mpi_errhandler_set_f

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
@ -30,7 +29,7 @@ OMPI_GENERATE_F77_BINDINGS (PMPI_FILE_CREATE_ERRHANDLER,
pmpi_file_create_errhandler_,
pmpi_file_create_errhandler__,
pmpi_file_create_errhandler_f,
(void *function, MPI_Fint *errhandler, MPI_Fint *ierr),
(ompi_errhandler_fortran_handler_fn_t* function, MPI_Fint *errhandler, MPI_Fint *ierr),
(function, errhandler, ierr) )
#endif
@ -47,7 +46,7 @@ OMPI_GENERATE_F77_BINDINGS (MPI_FILE_CREATE_ERRHANDLER,
mpi_file_create_errhandler_,
mpi_file_create_errhandler__,
mpi_file_create_errhandler_f,
(void *function, MPI_Fint *errhandler, MPI_Fint *ierr),
(ompi_errhandler_fortran_handler_fn_t* function, MPI_Fint *errhandler, MPI_Fint *ierr),
(function, errhandler, ierr) )
#endif
@ -56,7 +55,7 @@ OMPI_GENERATE_F77_BINDINGS (MPI_FILE_CREATE_ERRHANDLER,
#include "mpi/f77/profile/defines.h"
#endif
void mpi_file_create_errhandler_f(void *function,
void mpi_file_create_errhandler_f(ompi_errhandler_fortran_handler_fn_t* function,
MPI_Fint *errhandler, MPI_Fint *ierr)
{
MPI_Errhandler c_errhandler;

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,8 +16,8 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#include "mpi/f77/constants.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_FILE_IREAD_AT = mpi_file_iread_at_f
@ -26,12 +26,12 @@
#pragma weak pmpi_file_iread_at__ = mpi_file_iread_at_f
#elif OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (PMPI_FILE_IREAD_AT,
pmpi_file_iread_at,
pmpi_file_iread_at_,
pmpi_file_iread_at__,
pmpi_file_iread_at_f,
(MPI_Fint *fh, MPI_Offset *offset, char *buf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *request, MPI_Fint *ierr),
(fh, offset, buf, count, datatype, request, ierr) )
pmpi_file_iread_at,
pmpi_file_iread_at_,
pmpi_file_iread_at__,
pmpi_file_iread_at_f,
(MPI_Fint *fh, MPI_Offset *offset, char *buf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *request, MPI_Fint *ierr),
(fh, offset, buf, count, datatype, request, ierr) )
#endif
#if OMPI_HAVE_WEAK_SYMBOLS
@ -42,13 +42,13 @@ OMPI_GENERATE_F77_BINDINGS (PMPI_FILE_IREAD_AT,
#endif
#if ! OMPI_HAVE_WEAK_SYMBOLS && ! OMPI_PROFILE_LAYER
OMPI_GENERATE_F77_BINDINGS (MPI_FILE_IREAD_AT,
mpi_file_iread_at,
mpi_file_iread_at_,
mpi_file_iread_at__,
mpi_file_iread_at_f,
(MPI_Fint *fh, MPI_Offset *offset, char *buf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *request, MPI_Fint *ierr),
(fh, offset, buf, count, datatype, request, ierr) )
OMPI_GENERATE_F77_BINDINGS (MPI_FILE_IREAD_AT,
mpi_file_iread_at,
mpi_file_iread_at_,
mpi_file_iread_at__,
mpi_file_iread_at_f,
(MPI_Fint *fh, MPI_Offset *offset, char *buf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *request, MPI_Fint *ierr),
(fh, offset, buf, count, datatype, request, ierr) )
#endif
@ -57,18 +57,19 @@ OMPI_GENERATE_F77_BINDINGS (MPI_FILE_IREAD_AT,
#endif
void mpi_file_iread_at_f(MPI_Fint *fh, MPI_Offset *offset,
char *buf, MPI_Fint *count,
MPI_Fint *datatype, MPI_Fint *request, MPI_Fint *ierr)
char *buf, MPI_Fint *count,
MPI_Fint *datatype, MPI_Fint *request, MPI_Fint *ierr)
{
MPI_File c_fh = MPI_File_f2c(*fh);
MPI_Datatype c_type = MPI_Type_f2c(*datatype);
MPI_Request c_request;
MPI_File c_fh = MPI_File_f2c(*fh);
MPI_Datatype c_type = MPI_Type_f2c(*datatype);
MPI_Request c_request;
*ierr = OMPI_INT_2_FINT(MPI_File_iread_at(c_fh, (MPI_Offset) *offset,
buf, OMPI_FINT_2_INT(*count),
c_type,
&c_request));
if (MPI_SUCCESS == *ierr) {
*request = MPI_Request_c2f(c_request);
}
*ierr = OMPI_INT_2_FINT(MPI_File_iread_at(c_fh, (MPI_Offset) *offset,
OMPI_ADDR(buf),
OMPI_FINT_2_INT(*count),
c_type,
&c_request));
if (MPI_SUCCESS == *ierr) {
*request = MPI_Request_c2f(c_request);
}
}

Просмотреть файл

@ -16,8 +16,8 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#include "mpi/f77/constants.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_FILE_IREAD = mpi_file_iread_f
@ -59,15 +59,15 @@ OMPI_GENERATE_F77_BINDINGS (MPI_FILE_IREAD,
void mpi_file_iread_f(MPI_Fint *fh, char *buf, MPI_Fint *count,
MPI_Fint *datatype, MPI_Fint *request, MPI_Fint *ierr)
{
MPI_File c_fh = MPI_File_f2c(*fh);
MPI_Datatype c_type = MPI_Type_f2c(*datatype);
MPI_Request c_request;
MPI_File c_fh = MPI_File_f2c(*fh);
MPI_Datatype c_type = MPI_Type_f2c(*datatype);
MPI_Request c_request;
*ierr = OMPI_INT_2_FINT(MPI_File_iread(c_fh, buf,
OMPI_FINT_2_INT(*count),
c_type, &c_request));
if (MPI_SUCCESS == *ierr) {
*request = MPI_Request_c2f(c_request);
}
*ierr = OMPI_INT_2_FINT(MPI_File_iread(c_fh, OMPI_ADDR(buf),
OMPI_FINT_2_INT(*count),
c_type, &c_request));
if (MPI_SUCCESS == *ierr) {
*request = MPI_Request_c2f(c_request);
}
}

Просмотреть файл

@ -16,8 +16,8 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#include "mpi/f77/constants.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_FILE_IREAD_SHARED = mpi_file_iread_shared_f
@ -60,16 +60,16 @@ void mpi_file_iread_shared_f(MPI_Fint *fh, char *buf, MPI_Fint *count,
MPI_Fint *datatype, MPI_Fint *request,
MPI_Fint *ierr)
{
MPI_File c_fh = MPI_File_f2c(*fh);
MPI_Datatype c_type = MPI_Type_f2c(*datatype);
MPI_Request c_request;
*ierr = OMPI_INT_2_FINT(MPI_File_iread_shared(c_fh,
buf,
OMPI_FINT_2_INT(*count),
c_type,
&c_request));
if (MPI_SUCCESS == *ierr) {
*request = MPI_Request_c2f(c_request);
}
MPI_File c_fh = MPI_File_f2c(*fh);
MPI_Datatype c_type = MPI_Type_f2c(*datatype);
MPI_Request c_request;
*ierr = OMPI_INT_2_FINT(MPI_File_iread_shared(c_fh,
OMPI_ADDR(buf),
OMPI_FINT_2_INT(*count),
c_type,
&c_request));
if (MPI_SUCCESS == *ierr) {
*request = MPI_Request_c2f(c_request);
}
}

Просмотреть файл

@ -16,8 +16,8 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#include "mpi/f77/constants.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_FILE_IWRITE_AT = mpi_file_iwrite_at_f
@ -60,15 +60,15 @@ void mpi_file_iwrite_at_f(MPI_Fint *fh, MPI_Offset *offset, char *buf,
MPI_Fint *count, MPI_Fint *datatype,
MPI_Fint *request, MPI_Fint *ierr)
{
MPI_File c_fh = MPI_File_f2c(*fh);
MPI_Datatype c_type = MPI_Type_f2c(*datatype);
MPI_Request c_request;
*ierr = OMPI_INT_2_FINT(MPI_File_iwrite_at(c_fh, (MPI_Offset) *offset,
buf, OMPI_FINT_2_INT(*count),
c_type,
&c_request));
if (MPI_SUCCESS == *ierr) {
*request = MPI_Request_c2f(c_request);
}
MPI_File c_fh = MPI_File_f2c(*fh);
MPI_Datatype c_type = MPI_Type_f2c(*datatype);
MPI_Request c_request;
*ierr = OMPI_INT_2_FINT(MPI_File_iwrite_at(c_fh, (MPI_Offset) *offset,
OMPI_ADDR(buf),
OMPI_FINT_2_INT(*count),
c_type, &c_request));
if (MPI_SUCCESS == *ierr) {
*request = MPI_Request_c2f(c_request);
}
}

Просмотреть файл

@ -16,8 +16,8 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#include "mpi/f77/constants.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_FILE_IWRITE = mpi_file_iwrite_f
@ -58,15 +58,15 @@ OMPI_GENERATE_F77_BINDINGS (MPI_FILE_IWRITE,
void mpi_file_iwrite_f(MPI_Fint *fh, char *buf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *request, MPI_Fint *ierr)
{
MPI_File c_fh = MPI_File_f2c(*fh);
MPI_Datatype c_type = MPI_Type_f2c(*datatype);
MPI_Request c_request;
*ierr = OMPI_INT_2_FINT(MPI_File_iwrite(c_fh, buf,
MPI_File c_fh = MPI_File_f2c(*fh);
MPI_Datatype c_type = MPI_Type_f2c(*datatype);
MPI_Request c_request;
*ierr = OMPI_INT_2_FINT(MPI_File_iwrite(c_fh, OMPI_ADDR(buf),
OMPI_FINT_2_INT(*count),
c_type, &c_request));
if (MPI_SUCCESS == *ierr) {
*request = MPI_Request_c2f(c_request);
}
if (MPI_SUCCESS == *ierr) {
*request = MPI_Request_c2f(c_request);
}
}

Просмотреть файл

@ -16,8 +16,8 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#include "mpi/f77/constants.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_FILE_IWRITE_SHARED = mpi_file_iwrite_shared_f
@ -60,17 +60,16 @@ void mpi_file_iwrite_shared_f(MPI_Fint *fh, char *buf, MPI_Fint *count,
MPI_Fint *datatype, MPI_Fint *request,
MPI_Fint *ierr)
{
MPI_File c_fh = MPI_File_f2c(*fh);
MPI_Datatype c_type = MPI_Type_f2c(*datatype);
MPI_Request c_request;
*ierr = OMPI_INT_2_FINT(MPI_File_iwrite_shared(c_fh,
buf,
OMPI_FINT_2_INT(*count),
c_type,
&c_request));
if (MPI_SUCCESS == *ierr) {
*request = MPI_Request_c2f(c_request);
}
MPI_File c_fh = MPI_File_f2c(*fh);
MPI_Datatype c_type = MPI_Type_f2c(*datatype);
MPI_Request c_request;
*ierr = OMPI_INT_2_FINT(MPI_File_iwrite_shared(c_fh,
OMPI_ADDR(buf),
OMPI_FINT_2_INT(*count),
c_type,
&c_request));
if (MPI_SUCCESS == *ierr) {
*request = MPI_Request_c2f(c_request);
}
}

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER

Просмотреть файл

@ -16,8 +16,8 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#include "mpi/f77/constants.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_FILE_READ_ALL_BEGIN = mpi_file_read_all_begin_f
@ -60,10 +60,10 @@ void mpi_file_read_all_begin_f(MPI_Fint *fh, char *buf,
MPI_Fint *count, MPI_Fint *datatype,
MPI_Fint *ierr)
{
MPI_File c_fh = MPI_File_f2c(*fh);
MPI_Datatype c_type = MPI_Type_f2c(*datatype);
MPI_File c_fh = MPI_File_f2c(*fh);
MPI_Datatype c_type = MPI_Type_f2c(*datatype);
*ierr = OMPI_INT_2_FINT(MPI_File_read_all_begin(c_fh, buf,
OMPI_FINT_2_INT(*count),
c_type));
*ierr = OMPI_INT_2_FINT(MPI_File_read_all_begin(c_fh, OMPI_ADDR(buf),
OMPI_FINT_2_INT(*count),
c_type));
}

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#include "mpi/f77/constants.h"
#include "errhandler/errhandler.h"

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#include "mpi/f77/constants.h"
#include "errhandler/errhandler.h"
@ -62,37 +61,35 @@ OMPI_GENERATE_F77_BINDINGS (MPI_FILE_READ_ALL,
void mpi_file_read_all_f(MPI_Fint *fh, char *buf, MPI_Fint *count,
MPI_Fint *datatype, MPI_Fint *status, MPI_Fint *ierr)
{
MPI_File c_fh = MPI_File_f2c(*fh);
MPI_Datatype c_type = MPI_Type_f2c(*datatype);
MPI_Status *c_status;
MPI_File c_fh = MPI_File_f2c(*fh);
MPI_Datatype c_type = MPI_Type_f2c(*datatype);
MPI_Status *c_status;
#if OMPI_SIZEOF_FORTRAN_INT != SIZEOF_INT
MPI_Status c_status2;
MPI_Status c_status2;
#endif
/* See if we got MPI_STATUS_IGNORE */
if (OMPI_IS_FORTRAN_STATUS_IGNORE(status)) {
c_status = MPI_STATUS_IGNORE;
} else {
/* If sizeof(int) == sizeof(INTEGER), then there's no
translation necessary -- let the underlying functions write
directly into the Fortran status */
/* See if we got MPI_STATUS_IGNORE */
if (OMPI_IS_FORTRAN_STATUS_IGNORE(status)) {
c_status = MPI_STATUS_IGNORE;
} else {
/* If sizeof(int) == sizeof(INTEGER), then there's no
translation necessary -- let the underlying functions write
directly into the Fortran status */
#if OMPI_SIZEOF_FORTRAN_INT == SIZEOF_INT
c_status = (MPI_Status *) status;
c_status = (MPI_Status *) status;
#else
c_status = &c_status2;
c_status = &c_status2;
#endif
}
}
*ierr = OMPI_INT_2_FINT(MPI_File_read_all(c_fh, buf,
OMPI_FINT_2_INT(*count),
c_type, c_status));
*ierr = OMPI_INT_2_FINT(MPI_File_read_all(c_fh, OMPI_ADDR(buf),
OMPI_FINT_2_INT(*count),
c_type, c_status));
#if OMPI_SIZEOF_FORTRAN_INT != SIZEOF_INT
if (MPI_STATUS_IGNORE != c_status) {
MPI_Status_c2f(c_status, status);
}
if (MPI_STATUS_IGNORE != c_status) {
MPI_Status_c2f(c_status, status);
}
#endif
}

Просмотреть файл

@ -16,8 +16,8 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#include "mpi/f77/constants.h"
#if OMPI_HAVE_WEAK_SYMBOLS && OMPI_PROFILE_LAYER
#pragma weak PMPI_FILE_READ_AT_ALL_BEGIN = mpi_file_read_at_all_begin_f
@ -60,12 +60,12 @@ void mpi_file_read_at_all_begin_f(MPI_Fint *fh, MPI_Offset *offset,
char *buf, MPI_Fint *count,
MPI_Fint *datatype, MPI_Fint *ierr)
{
MPI_File c_fh = MPI_File_f2c(*fh);
MPI_Datatype c_type = MPI_Type_f2c(*datatype);
MPI_File c_fh = MPI_File_f2c(*fh);
MPI_Datatype c_type = MPI_Type_f2c(*datatype);
*ierr = OMPI_FINT_2_INT(MPI_File_read_at_all_begin(c_fh,
(MPI_Offset) *offset,
buf,
OMPI_FINT_2_INT(*count),
c_type));
*ierr = OMPI_FINT_2_INT(MPI_File_read_at_all_begin(c_fh,
(MPI_Offset) *offset,
OMPI_ADDR(buf),
OMPI_FINT_2_INT(*count),
c_type));
}

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#include "mpi/f77/constants.h"
#include "errhandler/errhandler.h"

Просмотреть файл

@ -16,7 +16,6 @@
#include "ompi_config.h"
#include "mpi.h"
#include "mpi/f77/bindings.h"
#include "mpi/f77/constants.h"
#include "errhandler/errhandler.h"
@ -64,40 +63,38 @@ void mpi_file_read_at_all_f(MPI_Fint *fh, MPI_Offset *offset,
MPI_Fint *datatype, MPI_Fint *status,
MPI_Fint *ierr)
{
MPI_File c_fh = MPI_File_f2c(*fh);
MPI_Datatype c_type = MPI_Type_f2c(*datatype);
MPI_Status *c_status;
MPI_File c_fh = MPI_File_f2c(*fh);
MPI_Datatype c_type = MPI_Type_f2c(*datatype);
MPI_Status *c_status;
#if OMPI_SIZEOF_FORTRAN_INT != SIZEOF_INT
MPI_Status c_status2;
MPI_Status c_status2;
#endif
/* See if we got MPI_STATUS_IGNORE */
if (OMPI_IS_FORTRAN_STATUS_IGNORE(status)) {
c_status = MPI_STATUS_IGNORE;
} else {
/* If sizeof(int) == sizeof(INTEGER), then there's no
translation necessary -- let the underlying functions write
directly into the Fortran status */
/* See if we got MPI_STATUS_IGNORE */
if (OMPI_IS_FORTRAN_STATUS_IGNORE(status)) {
c_status = MPI_STATUS_IGNORE;
} else {
/* If sizeof(int) == sizeof(INTEGER), then there's no
translation necessary -- let the underlying functions write
directly into the Fortran status */
#if OMPI_SIZEOF_FORTRAN_INT == SIZEOF_INT
c_status = (MPI_Status *) status;
c_status = (MPI_Status *) status;
#else
c_status = &c_status2;
c_status = &c_status2;
#endif
}
*ierr = OMPI_FINT_2_INT(MPI_File_read_at_all(c_fh,
(MPI_Offset) *offset,
buf,
OMPI_FINT_2_INT(*count),
c_type,
c_status));
*ierr = OMPI_FINT_2_INT(MPI_File_read_at_all(c_fh,
(MPI_Offset) *offset,
OMPI_ADDR(buf),
OMPI_FINT_2_INT(*count),
c_type,
c_status));
#if OMPI_SIZEOF_FORTRAN_INT != SIZEOF_INT
if (MPI_STATUS_IGNORE != c_status) {
MPI_Status_c2f(c_status, status);
}
if (MPI_STATUS_IGNORE != c_status) {
MPI_Status_c2f(c_status, status);
}
#endif
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше