1
1
openmpi/ompi/runtime/ompi_module_exchange.c
Ralph Castain 0995a6f3b9 Revert r29917 and replace it with a fix that resolves the thread deadlock while retaining the desired debug info. In an earlier commit, we had changed the modex accordingly:
* automatically retrieve the hostname (and all RTE info) for all procs during MPI_Init if nprocs < cutoff

* if nprocs > cutoff, retrieve the hostname (and all RTE info) for a proc upon the first call to modex_recv for that proc. This would provide the hostname for debugging purposes as we only report errors on messages, and so we must have called modex_recv to get the endpoint info

* BTLs are not to call modex_recv until they need the endpoint info for first message - i.e., not during add_procs so we don't call it for every process in the job, but only those with whom we communicate

My understanding is that only some BTLs have been modified to meet that third requirement, but those include the Cray ones where jobs are big enough that launch times were becoming an issue. Other BTLs would hopefully be modified as time went on and interest in using them at scale arose. Meantime, those BTLs would call modex_recv on every proc, and we would therefore be no worse than the prior behavior.

This commit revises the MPI-RTE interface to pass the ompi_proc_t instead of the ompi_process_name_t for the proc so that the hostname can be easily inserted. I have advised the ORNL folks of the change.

cmr=v1.7.4:reviewer=jsquyres:subject=Fix thread deadlock

This commit was SVN r29931.

The following SVN revision numbers were found above:
  r29917 --> open-mpi/ompi@1a972e2c9d
2013-12-17 03:26:00 +00:00

195 строки
5.1 KiB
C

/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2011 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2006-2012 Los Alamos National Security, LLC. All rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/constants.h"
#include "opal/mca/mca.h"
#include "opal/mca/base/base.h"
#include "opal/dss/dss.h"
#include "ompi/mca/rte/rte.h"
#include "ompi/proc/proc.h"
#include "ompi/runtime/ompi_module_exchange.h"
int ompi_modex_send(const mca_base_component_t *source_component,
const void *data, size_t size)
{
int rc;
char *key;
opal_byte_object_t bo;
key = mca_base_component_to_string(source_component);
if (NULL == key) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
bo.bytes = (uint8_t *)data;
bo.size = size;
/* the store API makes a copy of the provided data */
rc = ompi_rte_db_store(OMPI_PROC_MY_NAME, key, &bo, OPAL_BYTE_OBJECT);
free(key);
return rc;
}
int
ompi_modex_recv(const mca_base_component_t *component,
const ompi_proc_t *proc,
void **buffer,
size_t *size)
{
int rc;
char *key;
opal_byte_object_t *boptr;
/* set defaults */
*buffer = NULL;
*size = 0;
key = mca_base_component_to_string(component);
if (NULL == key) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* the fetch API returns a pointer to the data */
rc = ompi_rte_db_fetch(proc, key, (void**)&boptr, OPAL_BYTE_OBJECT);
if (OMPI_SUCCESS == rc) {
/* xfer the data - it was allocated in the call */
*buffer = (void*)boptr->bytes;
*size = boptr->size;
/* we no longer require the struct itself since all we
* wanted was the data inside it
*/
free(boptr);
}
free(key);
return rc;
}
/* return a pointer to the data, but don't create a new copy of it */
int ompi_modex_recv_pointer(const mca_base_component_t *component,
const ompi_proc_t *proc,
void **buffer, opal_data_type_t type)
{
int rc;
char *name = mca_base_component_to_string(component);
/* set defaults */
*buffer = NULL;
if (NULL == name) {
return OMPI_ERR_OUT_OF_RESOURCE;
}
/* the fetch_poointer API returns a pointer to the data */
rc = ompi_rte_db_fetch_pointer(proc, name, buffer, type);
free(name);
return rc;
}
int
ompi_modex_send_string(const char* key,
const void *buffer, size_t size)
{
int rc;
opal_byte_object_t bo;
bo.bytes = (uint8_t *)buffer;
bo.size = size;
/* the store API makes a copy of the provided data */
rc = ompi_rte_db_store(OMPI_PROC_MY_NAME, key, &bo, OPAL_BYTE_OBJECT);
return rc;
}
int
ompi_modex_recv_string(const char* key,
const ompi_proc_t *source_proc,
void **buffer, size_t *size)
{
int rc;
opal_byte_object_t *boptr;
/* set defaults */
*buffer = NULL;
*size = 0;
/* the fetch API returns a copy of the data */
rc = ompi_rte_db_fetch(source_proc, key, (void**)&boptr, OPAL_BYTE_OBJECT);
if (OMPI_SUCCESS == rc) {
/* xfer the data for local use */
*buffer = boptr->bytes;
*size = boptr->size;
}
/* we no longer require the struct itself since all we
* wanted was the data inside it
*/
free(boptr);
return rc;
}
/* return a pointer to the data, but don't create a new copy of it */
int ompi_modex_recv_string_pointer(const char* key,
const ompi_proc_t *source_proc,
void **buffer, opal_data_type_t type)
{
int rc;
/* set defaults */
*buffer = NULL;
/* the fetch_pointer API returns a pointer to the data */
rc = ompi_rte_db_fetch_pointer(source_proc, key, (void**)buffer, type);
return rc;
}
int
ompi_modex_send_key_value(const char* key,
const void *value,
opal_data_type_t dtype)
{
int rc;
/* the store API makes a copy of the provided data */
rc = ompi_rte_db_store(OMPI_PROC_MY_NAME, key, value, dtype);
return rc;
}
int ompi_modex_recv_key_value(const char* key,
const ompi_proc_t *source_proc,
void **value, opal_data_type_t type)
{
int rc;
/* the fetch API returns the data */
rc = ompi_rte_db_fetch(source_proc, key, (void**)value, type);
return rc;
}