1
1
openmpi/ompi/mca/mtl/portals4/mtl_portals4.c

212 строки
6.9 KiB
C
Исходник Обычный вид История

/*
* Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2010-2012 Sandia National Laboratories. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include <portals4.h>
#include "ompi/proc/proc.h"
#include "ompi/mca/mtl/mtl.h"
#include "opal/class/opal_list.h"
#include "ompi/runtime/ompi_module_exchange.h"
#include "mtl_portals4.h"
#include "mtl_portals4_recv_short.h"
extern mca_mtl_base_component_2_0_0_t mca_mtl_portals4_component;
mca_mtl_portals4_module_t ompi_mtl_portals4 = {
{
8191, /* max cid - 2^13 - 1 */
(1UL << 30), /* max tag value - must allow negatives */
0, /* request reserve space */
0, /* flags */
ompi_mtl_portals4_add_procs,
ompi_mtl_portals4_del_procs,
ompi_mtl_portals4_finalize,
ompi_mtl_portals4_send,
ompi_mtl_portals4_isend,
ompi_mtl_portals4_irecv,
ompi_mtl_portals4_iprobe,
ompi_mtl_portals4_imrecv,
ompi_mtl_portals4_improbe,
ompi_mtl_portals4_cancel,
ompi_mtl_portals4_add_comm,
ompi_mtl_portals4_del_comm
}
};
int
ompi_mtl_portals4_add_procs(struct mca_mtl_base_module_t *mtl,
size_t nprocs,
struct ompi_proc_t** procs)
{
int ret, me;
size_t i;
bool new_found = false;
/* Get the list of ptl_process_id_t from the runtime and copy into structure */
for (i = 0 ; i < nprocs ; ++i) {
ptl_process_t *modex_id;
size_t size;
if( procs[i] == ompi_proc_local_proc ) {
me = i;
}
George did the work and deserves all the credit for it. Ralph did the merge, and deserves whatever blame results from errors in it :-) WHAT: Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies. This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP. Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose. UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs. A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic. This commit was SVN r32317.
2014-07-26 04:47:28 +04:00
if (procs[i]->super.proc_arch != ompi_proc_local()->super.proc_arch) {
opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
"Portals 4 MTL does not support heterogeneous operations.");
opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
"Proc %s architecture %x, mine %x.",
OMPI_NAME_PRINT(&procs[i]->proc_name),
George did the work and deserves all the credit for it. Ralph did the merge, and deserves whatever blame results from errors in it :-) WHAT: Open our low-level communication infrastructure by moving all necessary components (btl/rcache/allocator/mpool) down in OPAL All the components required for inter-process communications are currently deeply integrated in the OMPI layer. Several groups/institutions have express interest in having a more generic communication infrastructure, without all the OMPI layer dependencies. This communication layer should be made available at a different software level, available to all layers in the Open MPI software stack. As an example, our ORTE layer could replace the current OOB and instead use the BTL directly, gaining access to more reactive network interfaces than TCP. Similarly, external software libraries could take advantage of our highly optimized AM (active message) communication layer for their own purpose. UTK with support from Sandia, developped a version of Open MPI where the entire communication infrastucture has been moved down to OPAL (btl/rcache/allocator/mpool). Most of the moved components have been updated to match the new schema, with few exceptions (mainly BTLs where I have no way of compiling/testing them). Thus, the completion of this RFC is tied to being able to completing this move for all BTLs. For this we need help from the rest of the Open MPI community, especially those supporting some of the BTLs. A non-exhaustive list of BTLs that qualify here is: mx, portals4, scif, udapl, ugni, usnic. This commit was SVN r32317.
2014-07-26 04:47:28 +04:00
procs[i]->super.proc_arch, ompi_proc_local()->super.proc_arch);
return OMPI_ERR_NOT_SUPPORTED;
}
ret = ompi_modex_recv(&mca_mtl_portals4_component.mtl_version,
procs[i], (void**) &modex_id, &size);
if (OMPI_SUCCESS != ret) {
opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
"%s:%d: ompi_modex_recv failed: %d\n",
__FILE__, __LINE__, ret);
return ret;
} else if (sizeof(ptl_process_t) != size) {
opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
"%s:%d: ompi_modex_recv failed: %d\n",
__FILE__, __LINE__, ret);
return OMPI_ERR_BAD_PARAM;
}
if (NULL == procs[i]->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_PORTALS4]) {
ptl_process_t *peer_id;
peer_id = malloc(sizeof(ptl_process_t));
if (NULL == peer_id) {
opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
"%s:%d: malloc failed: %d\n",
__FILE__, __LINE__, ret);
return OMPI_ERR_OUT_OF_RESOURCE;
}
*peer_id = *modex_id;
procs[i]->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_PORTALS4] = peer_id;
new_found = true;
} else {
ptl_process_t *proc = (ptl_process_t*) procs[i]->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_PORTALS4];
if (proc->phys.nid != modex_id->phys.nid ||
proc->phys.pid != modex_id->phys.pid) {
opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
"%s:%d: existing peer and modex peer don't match\n",
__FILE__, __LINE__);
return OMPI_ERROR;
}
}
}
#if OMPI_MTL_PORTALS4_FLOW_CONTROL
if (new_found) {
ret = ompi_mtl_portals4_flowctl_add_procs(me, nprocs, procs);
if (OMPI_SUCCESS != ret) {
opal_output_verbose(1, ompi_mtl_base_framework.framework_output,
"%s:%d: flowctl_add_procs failed: %d\n",
__FILE__, __LINE__, ret);
return ret;
}
}
#endif
return OMPI_SUCCESS;
}
int
ompi_mtl_portals4_del_procs(struct mca_mtl_base_module_t *mtl,
size_t nprocs,
struct ompi_proc_t** procs)
{
size_t i;
for (i = 0 ; i < nprocs ; ++i) {
if (NULL != procs[i]->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_PORTALS4]) {
free(procs[i]->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_PORTALS4]);
procs[i]->proc_endpoints[OMPI_PROC_ENDPOINT_TAG_PORTALS4] = NULL;
}
}
return OMPI_SUCCESS;
}
int
ompi_mtl_portals4_finalize(struct mca_mtl_base_module_t *mtl)
{
opal_progress_unregister(ompi_mtl_portals4_progress);
while (0 != ompi_mtl_portals4_progress()) { }
#if OMPI_MTL_PORTALS4_FLOW_CONTROL
ompi_mtl_portals4_flowctl_fini();
#endif
ompi_mtl_portals4_recv_short_fini();
PtlMEUnlink(ompi_mtl_portals4.long_overflow_me_h);
PtlMDRelease(ompi_mtl_portals4.zero_md_h);
#if OMPI_PORTALS4_MAX_MD_SIZE < OMPI_PORTALS4_MAX_VA_SIZE
{
int i;
int num_mds = ompi_mtl_portals4_get_num_mds();
for (i = 0 ; i < num_mds ; ++i) {
PtlMDRelease(ompi_mtl_portals4.send_md_hs[i]);
}
free(ompi_mtl_portals4.send_md_hs);
}
#else
PtlMDRelease(ompi_mtl_portals4.send_md_h);
#endif
PtlPTFree(ompi_mtl_portals4.ni_h, ompi_mtl_portals4.read_idx);
PtlPTFree(ompi_mtl_portals4.ni_h, ompi_mtl_portals4.recv_idx);
PtlEQFree(ompi_mtl_portals4.send_eq_h);
PtlEQFree(ompi_mtl_portals4.recv_eq_h);
PtlNIFini(ompi_mtl_portals4.ni_h);
PtlFini();
return OMPI_SUCCESS;
}
int
ompi_mtl_portals4_add_comm(struct mca_mtl_base_module_t *mtl,
struct ompi_communicator_t *comm)
{
return OMPI_SUCCESS;
}
int
ompi_mtl_portals4_del_comm(struct mca_mtl_base_module_t *mtl,
struct ompi_communicator_t *comm)
{
return OMPI_SUCCESS;
}