1
1

Per the OMPI developer conference, remove the last vestiges of OMPI_USE_PROGRESS_THREADS

This commit was SVN r32070.
Этот коммит содержится в:
Ralph Castain 2014-06-24 17:05:11 +00:00
родитель 1949f485ac
Коммит 12d92d0c22
30 изменённых файлов: 25 добавлений и 207 удалений

Просмотреть файл

@ -275,11 +275,5 @@ AC_DEFINE_UNQUOTED([OMPI_BUILD_FORTRAN_F08_SUBARRAYS],
[$OMPI_BUILD_FORTRAN_F08_SUBARRAYS],
[Whether we built the 'use mpi_f08' prototype subarray-based implementation or not (i.e., whether to build the use-mpi-f08-desc prototype or the regular use-mpi-f08 implementation)])
dnl We no longer support the old OMPI_ENABLE_PROGRESS_THREADS. At
dnl some point, this should die.
AC_DEFINE([OMPI_ENABLE_PROGRESS_THREADS],
[0],
[Whether we want OMPI progress threads enabled])
])dnl

Просмотреть файл

@ -775,11 +775,7 @@ int mca_bcol_iboffload_adjust_cq(mca_bcol_iboffload_device_t *device,
if (NULL == *ib_cq) {
*ib_cq = ibv_create_cq_compat(device->dev.ib_dev_context, cq_size,
#if OMPI_ENABLE_PROGRESS_THREADS == 1
device, device->ib_channel,
#else
NULL, NULL,
#endif
0);
if (NULL == *ib_cq) {

Просмотреть файл

@ -178,8 +178,7 @@ int mca_bml_r2_ft_event(int state)
* This will cause the BTL components to discover the available
* network options on this machine, and post proper modex informaiton.
*/
if( OMPI_SUCCESS != (ret = mca_btl_base_select(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_THREAD_MULTIPLE) ) ) {
if( OMPI_SUCCESS != (ret = mca_btl_base_select(1, OMPI_ENABLE_THREAD_MULTIPLE) ) ) {
opal_output(0, "bml:r2: ft_event(Restart): Failed to select in BTL framework\n");
return ret;
}
@ -273,8 +272,7 @@ int mca_bml_r2_ft_event(int state)
* This will cause the BTL components to discover the available
* network options on this machine, and post proper modex informaiton.
*/
if( OMPI_SUCCESS != (ret = mca_btl_base_select(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_THREAD_MULTIPLE) ) ) {
if( OMPI_SUCCESS != (ret = mca_btl_base_select(1, OMPI_ENABLE_THREAD_MULTIPLE) ) ) {
opal_output(0, "bml:r2: ft_event(Restart): Failed to select in BTL framework\n");
return ret;
}

Просмотреть файл

@ -179,11 +179,7 @@ static int adjust_cq(mca_btl_openib_device_t *device, const int cq)
if(NULL == device->ib_cq[cq]) {
device->ib_cq[cq] = create_cq_compat(device->ib_dev_context, cq_size,
#if OMPI_ENABLE_PROGRESS_THREADS == 1
device, device->ib_channel,
#else
NULL, NULL,
#endif
0);
if (NULL == device->ib_cq[cq]) {
@ -192,7 +188,6 @@ static int adjust_cq(mca_btl_openib_device_t *device, const int cq)
return OMPI_ERROR;
}
#if OMPI_ENABLE_PROGRESS_THREADS == 1
if(ibv_req_notify_cq(device->ib_cq[cq], 0)) {
mca_btl_openib_show_init_error(__FILE__, __LINE__,
"ibv_req_notify_cq",
@ -210,7 +205,6 @@ static int adjust_cq(mca_btl_openib_device_t *device, const int cq)
}
}
OPAL_THREAD_UNLOCK(&device->device_lock);
#endif
}
#ifdef HAVE_IBV_RESIZE_CQ
else if (cq_size > mca_btl_openib_component.ib_cq_size[cq]){
@ -736,14 +730,13 @@ static int prepare_device_for_use (mca_btl_openib_device_t *device)
return OMPI_ERROR;
}
}
#if OMPI_ENABLE_PROGRESS_THREADS == 1
/* Prepare data for thread, but not starting it */
OBJ_CONSTRUCT(&device->thread, opal_thread_t);
device->thread.t_run = mca_btl_openib_progress_thread;
device->thread.t_arg = device;
device->progress = false;
#endif
#endif
#if HAVE_XRC
/* if user configured to run with XRC qp and the device doesn't

Просмотреть файл

@ -372,11 +372,9 @@ struct mca_btl_base_endpoint_t;
typedef struct mca_btl_openib_device_t {
opal_object_t super;
struct ibv_device *ib_dev; /* the ib device */
#if OMPI_ENABLE_PROGRESS_THREADS == 1
struct ibv_comp_channel *ib_channel; /* Channel event for the device */
opal_thread_t thread; /* Progress thread */
volatile bool progress; /* Progress status */
#endif
opal_mutex_t device_lock; /* device level lock */
struct ibv_context *ib_dev_context;
struct ibv_device_attr ib_dev_attr;
@ -505,9 +503,7 @@ struct mca_btl_openib_reg_t {
};
typedef struct mca_btl_openib_reg_t mca_btl_openib_reg_t;
#if OMPI_ENABLE_PROGRESS_THREADS == 1
extern void* mca_btl_openib_progress_thread(opal_object_t*);
#endif
/**

Просмотреть файл

@ -868,9 +868,7 @@ static void device_construct(mca_btl_openib_device_t *device)
device->ib_dev_context = NULL;
device->ib_pd = NULL;
device->mpool = NULL;
#if OMPI_ENABLE_PROGRESS_THREADS
device->ib_channel = NULL;
#endif
device->btls = 0;
device->endpoints = NULL;
device->device_btls = NULL;
@ -905,7 +903,6 @@ static void device_destruct(mca_btl_openib_device_t *device)
int i;
#if OPAL_HAVE_THREADS
#if OMPI_ENABLE_PROGRESS_THREADS
if(device->progress) {
device->progress = false;
if (pthread_cancel(device->thread.t_handle)) {
@ -918,7 +915,6 @@ static void device_destruct(mca_btl_openib_device_t *device)
BTL_VERBOSE(("Failed to close comp_channel"));
goto device_error;
}
#endif
/* signaling to async_tread to stop poll for this device */
if (mca_btl_openib_component.use_async_event_thread &&
-1 != mca_btl_openib_component.async_pipe[1]) {
@ -1628,7 +1624,7 @@ static int init_one_device(opal_list_t *btl_list, struct ibv_device* ib_dev)
device->use_eager_rdma = values.use_eager_rdma;
}
/* Eager RDMA is not currently supported with progress threads */
if (device->use_eager_rdma && OMPI_ENABLE_PROGRESS_THREADS) {
if (device->use_eager_rdma) {
device->use_eager_rdma = 0;
opal_show_help("help-mpi-btl-openib.txt",
"eager RDMA and progress threads", true);
@ -1648,7 +1644,6 @@ static int init_one_device(opal_list_t *btl_list, struct ibv_device* ib_dev)
goto error;
}
#if OMPI_ENABLE_PROGRESS_THREADS
device->ib_channel = ibv_create_comp_channel(device->ib_dev_context);
if (NULL == device->ib_channel) {
BTL_ERROR(("error creating channel for %s errno says %s",
@ -1656,7 +1651,6 @@ static int init_one_device(opal_list_t *btl_list, struct ibv_device* ib_dev)
strerror(errno)));
goto error;
}
#endif
ret = OMPI_SUCCESS;
@ -2024,11 +2018,9 @@ static int init_one_device(opal_list_t *btl_list, struct ibv_device* ib_dev)
}
error:
#if OMPI_ENABLE_PROGRESS_THREADS
if (device->ib_channel) {
ibv_destroy_comp_channel(device->ib_channel);
}
#endif
if (device->mpool) {
mca_mpool_base_module_destroy(device->mpool);
}
@ -3445,7 +3437,6 @@ error:
return count;
}
#if OMPI_ENABLE_PROGRESS_THREADS
void* mca_btl_openib_progress_thread(opal_object_t* arg)
{
opal_thread_t* thread = (opal_thread_t*)arg;
@ -3483,7 +3474,6 @@ void* mca_btl_openib_progress_thread(opal_object_t* arg)
return PTHREAD_CANCELED;
}
#endif
static int progress_one_device(mca_btl_openib_device_t *device)
{

Просмотреть файл

@ -21,9 +21,7 @@
#ifndef MCA_BTL_SELF_ENDPOINT_H
#define MCA_BTL_SELF_ENDPOINT_H
#if OMPI_ENABLE_PROGRESS_THREADS == 1
#include "opal/mca/event/event.h"
#endif
/**
* An abstraction that represents a connection to a endpoint process.

Просмотреть файл

@ -424,10 +424,7 @@ static struct mca_btl_base_endpoint_t *
create_sm_endpoint(int local_proc, struct ompi_proc_t *proc)
{
struct mca_btl_base_endpoint_t *ep;
#if OMPI_ENABLE_PROGRESS_THREADS == 1
char path[PATH_MAX];
#endif
ep = (struct mca_btl_base_endpoint_t*)
malloc(sizeof(struct mca_btl_base_endpoint_t));
@ -437,7 +434,6 @@ create_sm_endpoint(int local_proc, struct ompi_proc_t *proc)
OBJ_CONSTRUCT(&ep->pending_sends, opal_list_t);
OBJ_CONSTRUCT(&ep->endpoint_lock, opal_mutex_t);
#if OMPI_ENABLE_PROGRESS_THREADS == 1
sprintf(path, "%s"OPAL_PATH_SEP"sm_fifo.%lu",
ompi_process_info.job_session_dir,
(unsigned long)proc->proc_name.vpid);
@ -448,7 +444,6 @@ create_sm_endpoint(int local_proc, struct ompi_proc_t *proc)
free(ep);
return NULL;
}
#endif
return ep;
}

Просмотреть файл

@ -113,10 +113,8 @@ typedef struct sm_fifo_t sm_fifo_t;
* Shared Memory resource managment
*/
#if OMPI_ENABLE_PROGRESS_THREADS == 1
#define DATA (char)0
#define DONE (char)1
#endif
typedef struct mca_btl_sm_mem_node_t {
mca_mpool_base_module_t* sm_mpool; /**< shared memory pool */
@ -169,11 +167,9 @@ struct mca_btl_sm_component_t {
int mem_node;
int num_mem_nodes;
#if OMPI_ENABLE_PROGRESS_THREADS == 1
char sm_fifo_path[PATH_MAX]; /**< path to fifo used to signal this process */
int sm_fifo_fd; /**< file descriptor corresponding to opened fifo */
opal_thread_t sm_fifo_thread;
#endif
struct mca_btl_sm_t **sm_btls;
struct mca_btl_sm_frag_t **table;
size_t sm_num_btls;
@ -542,11 +538,8 @@ extern void mca_btl_sm_dump(struct mca_btl_base_module_t* btl,
*/
int mca_btl_sm_ft_event(int state);
#if OMPI_ENABLE_PROGRESS_THREADS == 1
void mca_btl_sm_component_event_thread(opal_object_t*);
#endif
#if OMPI_ENABLE_PROGRESS_THREADS == 1
#define MCA_BTL_SM_SIGNAL_PEER(peer) \
{ \
unsigned char cmd = DATA; \
@ -554,9 +547,6 @@ void mca_btl_sm_component_event_thread(opal_object_t*);
opal_output(0, "mca_btl_sm_send: write fifo failed: errno=%d\n", errno); \
} \
}
#else
#define MCA_BTL_SM_SIGNAL_PEER(peer)
#endif
END_C_DECLS

Просмотреть файл

@ -347,7 +347,6 @@ static int mca_btl_sm_component_close(void)
OBJ_RELEASE(mca_btl_sm_component.sm_seg);
}
#if OMPI_ENABLE_PROGRESS_THREADS == 1
/* close/cleanup fifo create for event notification */
if(mca_btl_sm_component.sm_fifo_fd > 0) {
/* write a done message down the pipe */
@ -361,7 +360,6 @@ static int mca_btl_sm_component_close(void)
close(mca_btl_sm_component.sm_fifo_fd);
unlink(mca_btl_sm_component.sm_fifo_path);
}
#endif
CLEANUP:
@ -745,7 +743,6 @@ mca_btl_sm_component_init(int *num_btls,
return NULL;
}
#if OMPI_ENABLE_PROGRESS_THREADS == 1
/* create a named pipe to receive events */
sprintf( mca_btl_sm_component.sm_fifo_path,
"%s"OPAL_PATH_SEP"sm_fifo.%lu", ompi_process_info.job_session_dir,
@ -767,7 +764,6 @@ mca_btl_sm_component_init(int *num_btls,
mca_btl_sm_component.sm_fifo_thread.t_run =
(opal_thread_fn_t)mca_btl_sm_component_event_thread;
opal_thread_start(&mca_btl_sm_component.sm_fifo_thread);
#endif
mca_btl_sm_component.sm_btls =
(mca_btl_sm_t **)malloc(mca_btl_sm_component.sm_max_btls *
@ -925,7 +921,6 @@ mca_btl_sm_component_init(int *num_btls,
* SM component progress.
*/
#if OMPI_ENABLE_PROGRESS_THREADS == 1
void mca_btl_sm_component_event_thread(opal_object_t* thread)
{
while(1) {
@ -941,7 +936,6 @@ void mca_btl_sm_component_event_thread(opal_object_t* thread)
mca_btl_sm_component_progress();
}
}
#endif
void btl_sm_process_pending_sends(struct mca_btl_base_endpoint_t *ep)
{

Просмотреть файл

@ -33,9 +33,7 @@ struct mca_btl_base_endpoint_t {
* SMP specfic data structures. */
int peer_smp_rank; /**< My peer's SMP process rank. Used for accessing
* SMP specfic data structures. */
#if OMPI_ENABLE_PROGRESS_THREADS == 1
int fifo_fd; /**< pipe/fifo used to signal endpoint that data is queued */
#endif
opal_list_t pending_sends; /**< pending data to send */
/** lock for concurrent access to endpoint state */

Просмотреть файл

@ -436,10 +436,7 @@ static struct mca_btl_base_endpoint_t *
create_sm_endpoint(int local_proc, struct ompi_proc_t *proc)
{
struct mca_btl_base_endpoint_t *ep;
#if OMPI_ENABLE_PROGRESS_THREADS == 1
char path[PATH_MAX];
#endif
ep = (struct mca_btl_base_endpoint_t*)
malloc(sizeof(struct mca_btl_base_endpoint_t));
@ -449,7 +446,6 @@ create_sm_endpoint(int local_proc, struct ompi_proc_t *proc)
OBJ_CONSTRUCT(&ep->pending_sends, opal_list_t);
OBJ_CONSTRUCT(&ep->endpoint_lock, opal_mutex_t);
#if OMPI_ENABLE_PROGRESS_THREADS == 1
sprintf(path, "%s"OPAL_PATH_SEP"sm_fifo.%lu",
ompi_process_info.job_session_dir,
(unsigned long)proc->proc_name.vpid);
@ -460,7 +456,6 @@ create_sm_endpoint(int local_proc, struct ompi_proc_t *proc)
free(ep);
return NULL;
}
#endif
#if OPAL_CUDA_SUPPORT
{
mca_mpool_base_resources_t resources; /* unused, but needed */

Просмотреть файл

@ -110,10 +110,8 @@ typedef struct sm_fifo_t sm_fifo_t;
* Shared Memory resource managment
*/
#if OMPI_ENABLE_PROGRESS_THREADS == 1
#define DATA (char)0
#define DONE (char)1
#endif
typedef struct mca_btl_smcuda_mem_node_t {
mca_mpool_base_module_t* sm_mpool; /**< shared memory pool */
@ -166,11 +164,9 @@ struct mca_btl_smcuda_component_t {
int mem_node;
int num_mem_nodes;
#if OMPI_ENABLE_PROGRESS_THREADS == 1
char sm_fifo_path[PATH_MAX]; /**< path to fifo used to signal this process */
int sm_fifo_fd; /**< file descriptor corresponding to opened fifo */
opal_thread_t sm_fifo_thread;
#endif
struct mca_btl_smcuda_t **sm_btls;
struct mca_btl_smcuda_frag_t **table;
size_t sm_num_btls;
@ -533,11 +529,8 @@ extern void mca_btl_smcuda_dump(struct mca_btl_base_module_t* btl,
*/
int mca_btl_smcuda_ft_event(int state);
#if OMPI_ENABLE_PROGRESS_THREADS == 1
void mca_btl_smcuda_component_event_thread(opal_object_t*);
#endif
#if OMPI_ENABLE_PROGRESS_THREADS == 1
#define MCA_BTL_SMCUDA_SIGNAL_PEER(peer) \
{ \
unsigned char cmd = DATA; \
@ -545,9 +538,6 @@ void mca_btl_smcuda_component_event_thread(opal_object_t*);
opal_output(0, "mca_btl_smcuda_send: write fifo failed: errno=%d\n", errno); \
} \
}
#else
#define MCA_BTL_SMCUDA_SIGNAL_PEER(peer)
#endif
END_C_DECLS

Просмотреть файл

@ -278,7 +278,6 @@ static int mca_btl_smcuda_component_close(void)
OBJ_RELEASE(mca_btl_smcuda_component.sm_seg);
}
#if OMPI_ENABLE_PROGRESS_THREADS == 1
/* close/cleanup fifo create for event notification */
if(mca_btl_smcuda_component.sm_fifo_fd > 0) {
/* write a done message down the pipe */
@ -292,7 +291,6 @@ static int mca_btl_smcuda_component_close(void)
close(mca_btl_smcuda_component.sm_fifo_fd);
unlink(mca_btl_smcuda_component.sm_fifo_path);
}
#endif
CLEANUP:
@ -883,7 +881,6 @@ mca_btl_smcuda_component_init(int *num_btls,
return NULL;
}
#if OMPI_ENABLE_PROGRESS_THREADS == 1
/* create a named pipe to receive events */
sprintf( mca_btl_smcuda_component.sm_fifo_path,
"%s"OPAL_PATH_SEP"sm_fifo.%lu", ompi_process_info.job_session_dir,
@ -905,7 +902,6 @@ mca_btl_smcuda_component_init(int *num_btls,
mca_btl_smcuda_component.sm_fifo_thread.t_run =
(opal_thread_fn_t)mca_btl_smcuda_component_event_thread;
opal_thread_start(&mca_btl_smcuda_component.sm_fifo_thread);
#endif
mca_btl_smcuda_component.sm_btls =
(mca_btl_smcuda_t **)malloc(mca_btl_smcuda_component.sm_max_btls *
@ -951,7 +947,6 @@ mca_btl_smcuda_component_init(int *num_btls,
* SM component progress.
*/
#if OMPI_ENABLE_PROGRESS_THREADS == 1
void mca_btl_smcuda_component_event_thread(opal_object_t* thread)
{
while(1) {
@ -967,7 +962,6 @@ void mca_btl_smcuda_component_event_thread(opal_object_t* thread)
mca_btl_smcuda_component_progress();
}
}
#endif
void btl_smcuda_process_pending_sends(struct mca_btl_base_endpoint_t *ep)
{

Просмотреть файл

@ -213,23 +213,19 @@ int mca_io_base_file_select(ompi_file_t *file,
}
if (OMPI_SUCCESS !=
(ret = mca_fs_base_find_available(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_THREAD_MULTIPLE))) {
(ret = mca_fs_base_find_available(1, OMPI_ENABLE_THREAD_MULTIPLE))) {
return err;
}
if (OMPI_SUCCESS !=
(ret = mca_fcoll_base_find_available(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_THREAD_MULTIPLE))) {
(ret = mca_fcoll_base_find_available(1, OMPI_ENABLE_THREAD_MULTIPLE))) {
return err;
}
if (OMPI_SUCCESS !=
(ret = mca_fbtl_base_find_available(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_THREAD_MULTIPLE))) {
(ret = mca_fbtl_base_find_available(1, OMPI_ENABLE_THREAD_MULTIPLE))) {
return err;
}
if (OMPI_SUCCESS !=
(ret = mca_sharedfp_base_find_available(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_THREAD_MULTIPLE))) {
(ret = mca_sharedfp_base_find_available(1, OMPI_ENABLE_THREAD_MULTIPLE))) {
return err;
}
}

Просмотреть файл

@ -38,7 +38,7 @@ static int mca_io_base_open(mca_base_open_flag_t flags)
return ret;
}
return mca_io_base_find_available(OMPI_ENABLE_PROGRESS_THREADS, OMPI_ENABLE_THREAD_MULTIPLE);
return mca_io_base_find_available(1, OMPI_ENABLE_THREAD_MULTIPLE);
}
MCA_BASE_FRAMEWORK_DECLARE(ompi, io, "I/O", NULL, mca_io_base_open, NULL,

Просмотреть файл

@ -442,10 +442,6 @@ ompi_osc_rdma_test(ompi_win_t *win,
ompi_group_t *group;
int ret = OMPI_SUCCESS;
#if !OMPI_ENABLE_PROGRESS_THREADS
opal_progress();
#endif
if (NULL == module->pw_group) {
return OMPI_ERR_RMA_SYNC;
}

Просмотреть файл

@ -54,7 +54,7 @@ mca_pml_base_component_2_0_0_t mca_pml_v_component =
mca_pml_v_component_finalize /* component finalize */
};
static bool pml_v_enable_progress_treads = OMPI_ENABLE_PROGRESS_THREADS;
static bool pml_v_enable_progress_treads = 1;
static bool pml_v_enable_mpi_thread_multiple = OMPI_ENABLE_THREAD_MULTIPLE;
static char *ompi_pml_vprotocol_include_list;

Просмотреть файл

@ -38,8 +38,7 @@ int mca_topo_base_lazy_init(void)
if (0 == opal_list_get_size(&ompi_topo_base_framework.framework_components)) {
ompi_topo_base_framework.framework_open(MCA_BASE_OPEN_FIND_COMPONENTS);
if (OMPI_SUCCESS !=
(err = mca_topo_base_find_available(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_THREAD_MULTIPLE))) {
(err = mca_topo_base_find_available(1, OMPI_ENABLE_THREAD_MULTIPLE))) {
return err;
}
}

Просмотреть файл

@ -44,10 +44,6 @@ static const char FUNC_NAME[] = "MPI_Request_get_status";
int MPI_Request_get_status(MPI_Request request, int *flag,
MPI_Status *status)
{
#if OMPI_ENABLE_PROGRESS_THREADS == 0
int do_it_once = 0;
#endif
MEMCHECKER(
memchecker_request(&request);
);
@ -64,9 +60,6 @@ int MPI_Request_get_status(MPI_Request request, int *flag,
}
}
#if OMPI_ENABLE_PROGRESS_THREADS == 0
recheck_request_status:
#endif
opal_atomic_mb();
if( (request == MPI_REQUEST_NULL) || (request->req_state == OMPI_REQUEST_INACTIVE) ) {
*flag = true;
@ -88,16 +81,6 @@ int MPI_Request_get_status(MPI_Request request, int *flag,
}
return MPI_SUCCESS;
}
#if OMPI_ENABLE_PROGRESS_THREADS == 0
if( 0 == do_it_once ) {
/* If we run the opal_progress then check the status of the
request before leaving. We will call the opal_progress only
once per call. */
opal_progress();
do_it_once++;
goto recheck_request_status;
}
#endif
*flag = false;
return MPI_SUCCESS;
}

Просмотреть файл

@ -32,11 +32,7 @@ int ompi_request_default_test( ompi_request_t ** rptr,
ompi_status_public_t * status )
{
ompi_request_t *request = *rptr;
#if OMPI_ENABLE_PROGRESS_THREADS == 0
int do_it_once = 0;
recheck_request_status:
#endif
opal_atomic_mb();
if( request->req_state == OMPI_REQUEST_INACTIVE ) {
*completed = true;
@ -81,17 +77,6 @@ int ompi_request_default_test( ompi_request_t ** rptr,
later! */
return ompi_request_free(rptr);
}
#if OMPI_ENABLE_PROGRESS_THREADS == 0
if( 0 == do_it_once ) {
/**
* If we run the opal_progress then check the status of the request before
* leaving. We will call the opal_progress only once per call.
*/
opal_progress();
do_it_once++;
goto recheck_request_status;
}
#endif
*completed = false;
return OMPI_SUCCESS;
}
@ -163,9 +148,6 @@ int ompi_request_default_test_any(
*index = MPI_UNDEFINED;
if(num_requests_null_inactive != count) {
*completed = false;
#if OMPI_ENABLE_PROGRESS_THREADS == 0
opal_progress();
#endif
} else {
*completed = true;
if (MPI_STATUS_IGNORE != status) {
@ -201,9 +183,6 @@ int ompi_request_default_test_all(
if (num_completed != count) {
*completed = false;
#if OMPI_ENABLE_PROGRESS_THREADS == 0
opal_progress();
#endif
return OMPI_SUCCESS;
}
@ -312,9 +291,6 @@ int ompi_request_default_test_some(
*outcount = num_requests_done;
if (num_requests_done == 0) {
#if OMPI_ENABLE_PROGRESS_THREADS == 0
opal_progress();
#endif
return OMPI_SUCCESS;
}

Просмотреть файл

@ -29,9 +29,7 @@
#include "ompi/mca/crcp/crcp.h"
#include "ompi/mca/pml/base/pml_base_request.h"
#if OMPI_ENABLE_PROGRESS_THREADS
static int ompi_progress_thread_count=0;
#endif
int ompi_request_default_wait(
ompi_request_t ** req_ptr,
@ -89,16 +87,13 @@ int ompi_request_default_wait_any(
int *index,
ompi_status_public_t * status)
{
#if OMPI_ENABLE_PROGRESS_THREADS
int c;
#endif
size_t i=0, num_requests_null_inactive=0;
int rc = OMPI_SUCCESS;
int completed = -1;
ompi_request_t **rptr=NULL;
ompi_request_t *request=NULL;
#if OMPI_ENABLE_PROGRESS_THREADS
/* poll for completion */
OPAL_THREAD_ADD32(&ompi_progress_thread_count,1);
for (c = 0; completed < 0 && c < opal_progress_spin_count; c++) {
@ -127,7 +122,6 @@ int ompi_request_default_wait_any(
opal_progress();
}
OPAL_THREAD_ADD32(&ompi_progress_thread_count,-1);
#endif
/* give up and sleep until completion */
OPAL_THREAD_LOCK(&ompi_request_lock);
@ -165,10 +159,7 @@ int ompi_request_default_wait_any(
ompi_request_waiting--;
OPAL_THREAD_UNLOCK(&ompi_request_lock);
#if OMPI_ENABLE_PROGRESS_THREADS
finished:
#endif /* OMPI_ENABLE_PROGRESS_THREADS */
if(num_requests_null_inactive == count) {
*index = MPI_UNDEFINED;
if (MPI_STATUS_IGNORE != status) {
@ -447,9 +438,7 @@ int ompi_request_default_wait_some(
int * indices,
ompi_status_public_t * statuses)
{
#if OMPI_ENABLE_PROGRESS_THREADS
int c;
#endif
size_t i, num_requests_null_inactive=0, num_requests_done=0;
int rc = MPI_SUCCESS;
ompi_request_t **rptr=NULL;
@ -460,7 +449,6 @@ int ompi_request_default_wait_some(
indices[i] = 0;
}
#if OMPI_ENABLE_PROGRESS_THREADS
/* poll for completion */
OPAL_THREAD_ADD32(&ompi_progress_thread_count,1);
for (c = 0; c < opal_progress_spin_count; c++) {
@ -490,7 +478,6 @@ int ompi_request_default_wait_some(
opal_progress();
}
OPAL_THREAD_ADD32(&ompi_progress_thread_count,-1);
#endif
/*
* We only get here when outcount still is 0.
@ -525,10 +512,7 @@ int ompi_request_default_wait_some(
ompi_request_waiting--;
OPAL_THREAD_UNLOCK(&ompi_request_lock);
#if OMPI_ENABLE_PROGRESS_THREADS
finished:
#endif /* OMPI_ENABLE_PROGRESS_THREADS */
#if OPAL_ENABLE_FT_CR == 1
if( opal_cr_is_enabled) {
rptr = requests;

Просмотреть файл

@ -370,11 +370,9 @@ static inline int ompi_request_free(ompi_request_t** request)
static inline void ompi_request_wait_completion(ompi_request_t *req)
{
if(false == req->req_complete) {
#if OMPI_ENABLE_PROGRESS_THREADS
if(opal_progress_spin(&req->req_complete)) {
return;
}
#endif
OPAL_THREAD_LOCK(&ompi_request_lock);
ompi_request_waiting++;
while(false == req->req_complete) {

Просмотреть файл

@ -139,10 +139,6 @@ int ompi_mpi_finalize(void)
*/
(void)mca_pml_base_bsend_detach(NULL, NULL);
#if OMPI_ENABLE_PROGRESS_THREADS == 0
opal_progress_set_event_flag(OPAL_EVLOOP_ONCE | OPAL_EVLOOP_NONBLOCK);
#endif
/* Redo ORTE calling opal_progress_event_users_increment() during
MPI lifetime, to get better latency when not using TCP */
opal_progress_event_users_increment();

Просмотреть файл

@ -543,8 +543,7 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
goto error;
}
if (OMPI_SUCCESS !=
(ret = ompi_op_base_find_available(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_THREAD_MULTIPLE))) {
(ret = ompi_op_base_find_available(1, OMPI_ENABLE_THREAD_MULTIPLE))) {
error = "ompi_op_base_find_available() failed";
goto error;
}
@ -601,15 +600,13 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
/* Select which MPI components to use */
if (OMPI_SUCCESS !=
(ret = mca_mpool_base_init(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_THREAD_MULTIPLE))) {
(ret = mca_mpool_base_init(1, OMPI_ENABLE_THREAD_MULTIPLE))) {
error = "mca_mpool_base_init() failed";
goto error;
}
if (OMPI_SUCCESS !=
(ret = mca_pml_base_select(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_THREAD_MULTIPLE))) {
(ret = mca_pml_base_select(1, OMPI_ENABLE_THREAD_MULTIPLE))) {
error = "mca_pml_base_select() failed";
goto error;
}
@ -658,15 +655,13 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
}
if (OMPI_SUCCESS !=
(ret = mca_coll_base_find_available(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_THREAD_MULTIPLE))) {
(ret = mca_coll_base_find_available(1, OMPI_ENABLE_THREAD_MULTIPLE))) {
error = "mca_coll_base_find_available() failed";
goto error;
}
if (OMPI_SUCCESS !=
(ret = ompi_osc_base_find_available(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_THREAD_MULTIPLE))) {
(ret = ompi_osc_base_find_available(1, OMPI_ENABLE_THREAD_MULTIPLE))) {
error = "ompi_osc_base_find_available() failed";
goto error;
}
@ -757,8 +752,7 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
/* If thread support was enabled, then setup OPAL to allow for
them. */
if ((OMPI_ENABLE_PROGRESS_THREADS == 1) ||
(*provided != MPI_THREAD_SINGLE)) {
if ((*provided != MPI_THREAD_SINGLE)) {
opal_set_using_threads(true);
}
@ -838,17 +832,7 @@ int ompi_mpi_init(int argc, char **argv, int requested, int *provided)
gettimeofday(&ompistart, NULL);
}
#if OMPI_ENABLE_PROGRESS_THREADS == 0
/* Start setting up the event engine for MPI operations. Don't
block in the event library, so that communications don't take
forever between procs in the dynamic code. This will increase
CPU utilization for the remainder of MPI_INIT when we are
blocking on RTE-level events, but may greatly reduce non-TCP
latency. */
opal_progress_set_event_flag(OPAL_EVLOOP_NONBLOCK);
#endif
/* wire up the mpi interface, if requested. Do this after the
/* wire up the mpi interface, if requested. Do this after the
non-block switch for non-TCP performance. Do before the
polling change as anyone with a complex wire-up is going to be
using the oob. */

Просмотреть файл

@ -275,14 +275,12 @@ void ompi_info_do_config(bool want_all)
(void)asprintf(&threads, "%s (MPI_THREAD_MULTIPLE: %s, OPAL support: %s, OMPI progress: %s, ORTE progress: yes, Event lib: yes)",
(OPAL_HAVE_POSIX_THREADS ? "posix" : "type unknown"), /* "type unknown" can presumably never happen */
OMPI_ENABLE_THREAD_MULTIPLE ? "yes" : "no",
OPAL_ENABLE_MULTI_THREADS ? "yes" : "no",
OMPI_ENABLE_PROGRESS_THREADS ? "yes" : "no");
OPAL_ENABLE_MULTI_THREADS ? "yes" : "no", "yes");
#else
(void)asprintf(&threads, "%s (MPI_THREAD_MULTIPLE: %s, OPAL support: %s, OMPI progress: %s, Event lib: yes)",
(OPAL_HAVE_POSIX_THREADS ? "posix" : "type unknown"), /* "type unknown" can presumably never happen */
OMPI_ENABLE_THREAD_MULTIPLE ? "yes" : "no",
OPAL_ENABLE_MULTI_THREADS ? "yes" : "no",
OMPI_ENABLE_PROGRESS_THREADS ? "yes" : "no");
OPAL_ENABLE_MULTI_THREADS ? "yes" : "no", "yes");
#endif
} else {
threads = strdup("no");

Просмотреть файл

@ -50,9 +50,6 @@ static bool event_started = false;
void mca_oob_ud_event_start_monitor (mca_oob_ud_device_t *device)
{
if (!event_started) {
#if !ORTE_ENABLE_PROGRESS_THREADS
opal_progress_event_users_increment ();
#endif
opal_event_set (orte_event_base, &device->event, device->ib_channel->fd,
OPAL_EV_READ, mca_oob_ud_event_dispatch, (void *) device);
opal_event_add (&device->event, NULL);
@ -63,9 +60,6 @@ void mca_oob_ud_event_start_monitor (mca_oob_ud_device_t *device)
void mca_oob_ud_event_stop_monitor (mca_oob_ud_device_t *device)
{
if (event_started) {
#if !ORTE_ENABLE_PROGRESS_THREADS
opal_progress_event_users_decrement ();
#endif
opal_event_del (&device->event);
mca_oob_ud_stop_events (device);
event_started = false;

Просмотреть файл

@ -395,11 +395,9 @@ static inline void oshmem_request_wait_any_completion(void)
static inline void oshmem_request_wait_completion(oshmem_request_t *req)
{
if (false == req->req_complete) {
#if OMPI_ENABLE_PROGRESS_THREADS
if(opal_progress_spin(&req->req_complete)) {
return;
}
#endif
OPAL_THREAD_LOCK(&oshmem_request_lock);
oshmem_request_waiting++;
while (false == req->req_complete) {

Просмотреть файл

@ -346,16 +346,14 @@ static int _shmem_init(int argc, char **argv, int requested, int *provided)
}
if (OSHMEM_SUCCESS
!= (ret = mca_spml_base_select(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_THREAD_MULTIPLE))) {
!= (ret = mca_spml_base_select(1, OMPI_ENABLE_THREAD_MULTIPLE))) {
error = "mca_spml_base_select() failed";
goto error;
}
if (OSHMEM_SUCCESS
!= (ret =
mca_scoll_base_find_available(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_THREAD_MULTIPLE))) {
mca_scoll_base_find_available(1, OMPI_ENABLE_THREAD_MULTIPLE))) {
error = "mca_scoll_base_find_available() failed";
goto error;
}
@ -430,8 +428,7 @@ static int _shmem_init(int argc, char **argv, int requested, int *provided)
if (OSHMEM_SUCCESS
!= (ret =
mca_atomic_base_find_available(OMPI_ENABLE_PROGRESS_THREADS,
OMPI_ENABLE_THREAD_MULTIPLE))) {
mca_atomic_base_find_available(1, OMPI_ENABLE_THREAD_MULTIPLE))) {
error = "mca_atomic_base_find_available() failed";
goto error;
}

Просмотреть файл

@ -260,14 +260,12 @@ void oshmem_info_do_config(bool want_all)
(void)asprintf(&threads, "%s (MPI_THREAD_MULTIPLE: %s, OPAL support: %s, OMPI progress: %s, ORTE progress: yes, Event lib: yes)",
(OPAL_HAVE_POSIX_THREADS ? "posix" : "type unknown"), /* "type unknown" can presumably never happen */
OMPI_ENABLE_THREAD_MULTIPLE ? "yes" : "no",
OPAL_ENABLE_MULTI_THREADS ? "yes" : "no",
OMPI_ENABLE_PROGRESS_THREADS ? "yes" : "no");
OPAL_ENABLE_MULTI_THREADS ? "yes" : "no", "yes");
#else
(void)asprintf(&threads, "%s (MPI_THREAD_MULTIPLE: %s, OPAL support: %s, OMPI progress: %s, Event lib: yes)",
(OPAL_HAVE_POSIX_THREADS ? "posix" : "type unknown"), /* "type unknown" can presumably never happen */
OMPI_ENABLE_THREAD_MULTIPLE ? "yes" : "no",
OPAL_ENABLE_MULTI_THREADS ? "yes" : "no",
OMPI_ENABLE_PROGRESS_THREADS ? "yes" : "no");
OPAL_ENABLE_MULTI_THREADS ? "yes" : "no", "yes");
#endif
} else {
threads = strdup("no");