Update to PMIx master
Signed-off-by: Ralph Castain <rhc@pmix.org>
Этот коммит содержится в:
родитель
3c45542c51
Коммит
c5c93e3391
@ -30,7 +30,7 @@ greek=
|
||||
# command, or with the date (if "git describe" fails) in the form of
|
||||
# "date<date>".
|
||||
|
||||
repo_rev=git628a724c
|
||||
repo_rev=git7e40284d
|
||||
|
||||
# If tarball_version is not empty, it is used as the version string in
|
||||
# the tarball filename, regardless of all other versions listed in
|
||||
@ -44,7 +44,7 @@ tarball_version=
|
||||
|
||||
# The date when this release was created
|
||||
|
||||
date="Jul 21, 2019"
|
||||
date="Jul 26, 2019"
|
||||
|
||||
# The shared library version of each of PMIx's public libraries.
|
||||
# These versions are maintained in accordance with the "Library
|
||||
|
@ -1212,23 +1212,33 @@ fi
|
||||
AM_CONDITIONAL([WANT_PYTHON_BINDINGS], [test $WANT_PYTHON_BINDINGS -eq 1])
|
||||
|
||||
if test "$WANT_PYTHON_BINDINGS" = "1"; then
|
||||
AM_PATH_PYTHON([3.4], [python_happy=1], [python_happy=0])
|
||||
if test "$python_happy" = "0"; then
|
||||
AM_PATH_PYTHON([3.4])
|
||||
AC_SUBST([PMIX_PYTHON_PATH], [#!"$PYTHON"], "Full Python executable path")
|
||||
pyvers=`python --version`
|
||||
python_version=${pyvers#"Python"}
|
||||
major=$(echo $python_version | cut -d. -f1)
|
||||
minor=$(echo $python_version | cut -d. -f2)
|
||||
if test "$major" -lt "3"; then
|
||||
AC_MSG_WARN([Python bindings were enabled, but no suitable])
|
||||
AC_MSG_WARN([interpreter was found. PMIx requires at least])
|
||||
AC_MSG_WARN([Python v3.4 to provide Python bindings])
|
||||
AC_MSG_ERROR([Cannot continue])
|
||||
fi
|
||||
python_version=`python --version 2>&1`
|
||||
if test "$major" -eq "3" && test "$minor" -lt "4"; then
|
||||
AC_MSG_WARN([Python bindings were enabled, but no suitable])
|
||||
AC_MSG_WARN([interpreter was found. PMIx requires at least])
|
||||
AC_MSG_WARN([Python v3.4 to provide Python bindings])
|
||||
AC_MSG_ERROR([Cannot continue])
|
||||
fi
|
||||
|
||||
PMIX_SUMMARY_ADD([[Bindings]],[[Python]], [pmix_python], [yes ($python_version)])
|
||||
AC_SUBST([PMIX_PYTHON_PATH], [#!"$PYTHON"], "Full Python executable path")
|
||||
|
||||
AC_MSG_CHECKING([if Cython package installed])
|
||||
have_cython=`$srcdir/config/pmix_check_cython.py 2> /dev/null`
|
||||
if test "$have_cython" = "0"; then
|
||||
AC_MSG_RESULT([yes])
|
||||
AC_MSG_CHECKING([Cython version])
|
||||
cython_version=`cython --version 2>&1`
|
||||
cython_version=`python -c "from Cython.Compiler.Version import version; print(version)"`
|
||||
AC_MSG_RESULT([$cython_version])
|
||||
PMIX_SUMMARY_ADD([[Bindings]],[[Cython]], [pmix_cython], [yes ($cython_version)])
|
||||
else
|
||||
|
@ -254,6 +254,9 @@ int main(int argc, char **argv)
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Commit failed: %d\n", myproc.nspace, myproc.rank, rc);
|
||||
goto done;
|
||||
}
|
||||
if (0 == myproc.rank) {
|
||||
sleep(2);
|
||||
}
|
||||
|
||||
/* call fence to synchronize with our peers - instruct
|
||||
* the fence operation to collect and return all "put"
|
||||
|
@ -49,9 +49,7 @@ int main(int argc, char **argv)
|
||||
char nsp2[PMIX_MAX_NSLEN+1];
|
||||
pmix_app_t *app;
|
||||
char hostname[1024], dir[1024];
|
||||
pmix_proc_t *peers;
|
||||
size_t npeers, ntmp=0;
|
||||
char *nodelist;
|
||||
size_t ntmp=0;
|
||||
|
||||
if (0 > gethostname(hostname, sizeof(hostname))) {
|
||||
exit(1);
|
||||
@ -71,14 +69,14 @@ int main(int argc, char **argv)
|
||||
(void)strncpy(proc.nspace, myproc.nspace, PMIX_MAX_NSLEN);
|
||||
proc.rank = PMIX_RANK_WILDCARD;
|
||||
|
||||
/* get our universe size */
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_UNIV_SIZE, NULL, 0, &val))) {
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Get universe size failed: %d\n", myproc.nspace, myproc.rank, rc);
|
||||
/* get our job size */
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_JOB_SIZE, NULL, 0, &val))) {
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Get job size failed: %d\n", myproc.nspace, myproc.rank, rc);
|
||||
goto done;
|
||||
}
|
||||
nprocs = val->data.uint32;
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
fprintf(stderr, "Client %s:%d universe size %d\n", myproc.nspace, myproc.rank, nprocs);
|
||||
fprintf(stderr, "Client %s:%d job size %d\n", myproc.nspace, myproc.rank, nprocs);
|
||||
|
||||
/* call fence to sync */
|
||||
(void)strncpy(proc.nspace, myproc.nspace, PMIX_MAX_NSLEN);
|
||||
@ -103,13 +101,6 @@ int main(int argc, char **argv)
|
||||
app->env = (char**)malloc(2 * sizeof(char*));
|
||||
app->env[0] = strdup("PMIX_ENV_VALUE=3");
|
||||
app->env[1] = NULL;
|
||||
PMIX_INFO_CREATE(app->info, 2);
|
||||
(void)strncpy(app->info[0].key, "DARTH", PMIX_MAX_KEYLEN);
|
||||
app->info[0].value.type = PMIX_INT8;
|
||||
app->info[0].value.data.int8 = 12;
|
||||
(void)strncpy(app->info[1].key, "VADER", PMIX_MAX_KEYLEN);
|
||||
app->info[1].value.type = PMIX_DOUBLE;
|
||||
app->info[1].value.data.dval = 12.34;
|
||||
|
||||
fprintf(stderr, "Client ns %s rank %d: calling PMIx_Spawn\n", myproc.nspace, myproc.rank);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Spawn(NULL, 0, app, 1, nsp2))) {
|
||||
@ -122,65 +113,28 @@ int main(int argc, char **argv)
|
||||
val = NULL;
|
||||
(void)strncpy(proc.nspace, nsp2, PMIX_MAX_NSLEN);
|
||||
proc.rank = PMIX_RANK_WILDCARD;
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_UNIV_SIZE, NULL, 0, &val)) ||
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_JOB_SIZE, NULL, 0, &val)) ||
|
||||
NULL == val) {
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Get universe size failed: %d\n", myproc.nspace, myproc.rank, rc);
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Get job size failed: %d\n", myproc.nspace, myproc.rank, rc);
|
||||
goto done;
|
||||
}
|
||||
ntmp = val->data.uint32;
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
fprintf(stderr, "Client %s:%d universe %s size %d\n", myproc.nspace, myproc.rank, nsp2, (int)ntmp);
|
||||
}
|
||||
fprintf(stderr, "Client %s:%d job %s size %d\n", myproc.nspace, myproc.rank, nsp2, (int)ntmp);
|
||||
|
||||
/* just cycle the connect/disconnect functions */
|
||||
(void)strncpy(proc.nspace, myproc.nspace, PMIX_MAX_NSLEN);
|
||||
proc.rank = PMIX_RANK_WILDCARD;
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Connect(&proc, 1, NULL, 0))) {
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Connect failed: %d\n", myproc.nspace, myproc.rank, rc);
|
||||
/* get a proc-specific value */
|
||||
val = NULL;
|
||||
(void)strncpy(proc.nspace, nsp2, PMIX_MAX_NSLEN);
|
||||
proc.rank = 1;
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_LOCAL_RANK, NULL, 0, &val)) ||
|
||||
NULL == val) {
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Get local rank failed: %d\n", myproc.nspace, myproc.rank, rc);
|
||||
goto done;
|
||||
}
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Connect succeeded\n",
|
||||
myproc.nspace, myproc.rank);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Disconnect(&proc, 1, NULL, 0))) {
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Disonnect failed: %d\n", myproc.nspace, myproc.rank, rc);
|
||||
goto done;
|
||||
ntmp = (int)val->data.uint16;
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
fprintf(stderr, "Client %s:%d job %s local rank %d\n", myproc.nspace, myproc.rank, nsp2, (int)ntmp);
|
||||
}
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Disconnect succeeded\n", myproc.nspace, myproc.rank);
|
||||
|
||||
/* finally, test the resolve functions */
|
||||
if (0 == myproc.rank) {
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Resolve_peers(hostname, NULL, &peers, &npeers))) {
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Resolve_peers failed for nspace %s: %d\n", myproc.nspace, myproc.rank, nsp2, rc);
|
||||
goto done;
|
||||
}
|
||||
if ((nprocs+ntmp) != npeers) {
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Resolve_peers returned incorrect npeers: %d vs %d\n", myproc.nspace, myproc.rank, (int)(nprocs+ntmp), (int)npeers);
|
||||
goto done;
|
||||
}
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Resolve_peers returned %d npeers\n", myproc.nspace, myproc.rank, (int)npeers);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Resolve_nodes(nsp2, &nodelist))) {
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Resolve_nodes failed for nspace %s: %d\n", myproc.nspace, myproc.rank, nsp2, rc);
|
||||
goto done;
|
||||
}
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Resolve_nodes %s", myproc.nspace, myproc.rank, nodelist);
|
||||
} else {
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Resolve_peers(hostname, myproc.nspace, &peers, &npeers))) {
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Resolve_peers failed for nspace %s: %d\n", myproc.nspace, myproc.rank, myproc.nspace, rc);
|
||||
goto done;
|
||||
}
|
||||
if (nprocs != npeers) {
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Resolve_peers returned incorrect npeers: %d vs %d\n", myproc.nspace, myproc.rank, nprocs, (int)npeers);
|
||||
goto done;
|
||||
}
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Resolve_peers returned %d npeers\n", myproc.nspace, myproc.rank, (int)npeers);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Resolve_nodes(myproc.nspace, &nodelist))) {
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Resolve_nodes failed: %d\n", myproc.nspace, myproc.rank, rc);
|
||||
goto done;
|
||||
}
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Resolve_nodes %s\n", myproc.nspace, myproc.rank, nodelist);
|
||||
}
|
||||
PMIX_PROC_FREE(peers, npeers);
|
||||
free(nodelist);
|
||||
|
||||
done:
|
||||
/* call fence to sync */
|
||||
|
@ -101,7 +101,7 @@ PMIX_EXPORT pmix_status_t PMIx_Get(const pmix_proc_t *proc,
|
||||
(NULL == key) ? "NULL" : key);
|
||||
|
||||
/* try to get data directly, without threadshift */
|
||||
if (PMIX_RANK_UNDEF != proc->rank) {
|
||||
if (PMIX_RANK_UNDEF != proc->rank && NULL != key) {
|
||||
if (PMIX_SUCCESS == (rc = _getfn_fastpath(proc, key, info, ninfo, val))) {
|
||||
goto done;
|
||||
}
|
||||
@ -327,7 +327,6 @@ static void _getnb_cbfunc(struct pmix_peer_t *pr,
|
||||
}
|
||||
|
||||
if (PMIX_SUCCESS != ret) {
|
||||
PMIX_ERROR_LOG(ret);
|
||||
goto done;
|
||||
}
|
||||
if (PMIX_RANK_UNDEF == proc.rank) {
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
|
||||
/*
|
||||
* Copyright (c) 2014-2018 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2014-2019 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2016 Mellanox Technologies, Inc.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 2016 IBM Corporation. All rights reserved.
|
||||
@ -83,14 +83,18 @@ PMIX_EXPORT pmix_status_t PMIx_Log(const pmix_info_t data[], size_t ndata,
|
||||
* recv routine so we know which callback to use when
|
||||
* the return message is recvd */
|
||||
PMIX_CONSTRUCT(&cb, pmix_cb_t);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Log_nb(data, ndata, directives,
|
||||
ndirs, opcbfunc, &cb))) {
|
||||
rc = PMIx_Log_nb(data, ndata, directives, ndirs, opcbfunc, &cb);
|
||||
if (PMIX_SUCCESS == rc) {
|
||||
/* wait for the operation to complete */
|
||||
PMIX_WAIT_THREAD(&cb.lock);
|
||||
} else {
|
||||
PMIX_DESTRUCT(&cb);
|
||||
if (PMIX_OPERATION_SUCCEEDED == rc) {
|
||||
rc = PMIX_SUCCESS;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* wait for the operation to complete */
|
||||
PMIX_WAIT_THREAD(&cb.lock);
|
||||
rc = cb.status;
|
||||
PMIX_DESTRUCT(&cb);
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2010 Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2018-2019 Intel, Inc. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -158,7 +158,7 @@ static int if_bsdx_open(void)
|
||||
/* fill values into the pmix_pif_t */
|
||||
memcpy(&a4, &(sin_addr->sin_addr), sizeof(struct in_addr));
|
||||
|
||||
pmix_strncpy(intf->if_name, cur_ifaddrs->ifa_name, IF_NAMESIZE-1);
|
||||
pmix_strncpy(intf->if_name, cur_ifaddrs->ifa_name, PMIX_IF_NAMESIZE-1);
|
||||
intf->if_index = pmix_list_get_size(&pmix_if_list) + 1;
|
||||
((struct sockaddr_in*) &intf->if_addr)->sin_addr = a4;
|
||||
((struct sockaddr_in*) &intf->if_addr)->sin_family = AF_INET;
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2010 Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2018-2019 Intel, Inc. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -183,7 +183,7 @@ static int if_bsdx_ipv6_open(void)
|
||||
return PMIX_ERR_OUT_OF_RESOURCE;
|
||||
}
|
||||
intf->af_family = AF_INET6;
|
||||
pmix_strncpy(intf->if_name, cur_ifaddrs->ifa_name, IF_NAMESIZE-1);
|
||||
pmix_strncpy(intf->if_name, cur_ifaddrs->ifa_name, PMIX_IF_NAMESIZE-1);
|
||||
intf->if_index = pmix_list_get_size(&pmix_if_list) + 1;
|
||||
((struct sockaddr_in6*) &intf->if_addr)->sin6_addr = a6;
|
||||
((struct sockaddr_in6*) &intf->if_addr)->sin6_family = AF_INET6;
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2010 Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2018-2019 Intel, Inc. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -82,12 +82,15 @@ static int if_linux_ipv6_open(void)
|
||||
if ((f = fopen("/proc/net/if_inet6", "r"))) {
|
||||
/* IF_NAMESIZE is normally 16 on Linux,
|
||||
but the next scanf allows up to 21 bytes */
|
||||
char ifname[21];
|
||||
char ifname[PMIX_IF_NAMESIZE];
|
||||
unsigned int idx, pfxlen, scope, dadstat;
|
||||
struct in6_addr a6;
|
||||
int iter;
|
||||
uint32_t flag;
|
||||
unsigned int addrbyte[16];
|
||||
unsigned int addrbyte[PMIX_IF_NAMESIZE];
|
||||
|
||||
memset(addrbyte, 0, PMIX_IF_NAMESIZE*sizeof(unsigned int));
|
||||
memset(ifname, 0, PMIX_IF_NAMESIZE*sizeof(char));
|
||||
|
||||
while (fscanf(f, "%2x%2x%2x%2x%2x%2x%2x%2x%2x%2x%2x%2x%2x%2x%2x%2x %x %x %x %x %20s\n",
|
||||
&addrbyte[0], &addrbyte[1], &addrbyte[2], &addrbyte[3],
|
||||
@ -129,7 +132,7 @@ static int if_linux_ipv6_open(void)
|
||||
}
|
||||
|
||||
/* now construct the pmix_pif_t */
|
||||
pmix_strncpy(intf->if_name, ifname, IF_NAMESIZE-1);
|
||||
pmix_strncpy(intf->if_name, ifname, PMIX_IF_NAMESIZE-1);
|
||||
intf->if_index = pmix_list_get_size(&pmix_if_list)+1;
|
||||
intf->if_kernel_index = (uint16_t) idx;
|
||||
((struct sockaddr_in6*) &intf->if_addr)->sin6_addr = a6;
|
||||
|
@ -3,7 +3,7 @@
|
||||
* Copyright (c) 2010-2013 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
|
||||
* reserved.
|
||||
* Copyright (c) 2016-2018 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2016-2019 Intel, Inc. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -73,7 +73,7 @@ BEGIN_C_DECLS
|
||||
|
||||
typedef struct pmix_pif_t {
|
||||
pmix_list_item_t super;
|
||||
char if_name[IF_NAMESIZE+1];
|
||||
char if_name[PMIX_IF_NAMESIZE+1];
|
||||
int if_index;
|
||||
uint16_t if_kernel_index;
|
||||
uint16_t af_family;
|
||||
|
@ -3,7 +3,7 @@
|
||||
* Copyright (c) 2010 Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 Research Organization for Information Science
|
||||
* and Technology (RIST). All rights reserved.
|
||||
* Copyright (c) 2016-2018 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2016-2019 Intel, Inc. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -190,7 +190,7 @@ static int if_solaris_ipv6_open(void)
|
||||
}
|
||||
intf->af_family = AF_INET6;
|
||||
|
||||
pmix_strncpy (intf->if_name, lifreq->lifr_name, IF_NAMESIZE-1);
|
||||
pmix_strncpy (intf->if_name, lifreq->lifr_name, PMIX_IF_NAMESIZE-1);
|
||||
intf->if_index = pmix_list_get_size(&pmix_if_list)+1;
|
||||
memcpy(&intf->if_addr, my_addr, sizeof (*my_addr));
|
||||
intf->if_mask = 64;
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
|
||||
/*
|
||||
* Copyright (c) 2018 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2018-2019 Intel, Inc. All rights reserved.
|
||||
*
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
@ -109,8 +109,8 @@ pmix_status_t pmix_plog_base_log(const pmix_proc_t *source,
|
||||
* channel that can successfully handle this request,
|
||||
* and any channel directives */
|
||||
for (n=0; n < ndirs; n++) {
|
||||
if (0 == strncmp(directives[n].key, PMIX_LOG_ONCE, PMIX_MAX_KEYLEN)) {
|
||||
logonce = true;
|
||||
if (PMIX_CHECK_KEY(&directives[n], PMIX_LOG_ONCE)) {
|
||||
logonce = PMIX_INFO_TRUE(&directives[n]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -237,14 +237,10 @@ pmix_status_t pmix_plog_base_log(const pmix_proc_t *source,
|
||||
|
||||
rc = mycount->status; // save the status as it could change when the lock is released
|
||||
if (0 == mycount->nreqs) {
|
||||
/* execute their callback */
|
||||
if (NULL != mycount->cbfunc) {
|
||||
mycount->cbfunc(mycount->status, mycount->cbdata);
|
||||
}
|
||||
PMIX_RELEASE_THREAD(&mycount->lock);
|
||||
PMIX_RELEASE(mycount);
|
||||
PMIX_RELEASE_THREAD(&pmix_plog_globals.lock);
|
||||
return PMIX_SUCCESS;
|
||||
return PMIX_OPERATION_SUCCEEDED;
|
||||
}
|
||||
PMIX_RELEASE_THREAD(&mycount->lock);
|
||||
PMIX_RELEASE_THREAD(&pmix_plog_globals.lock);
|
||||
|
@ -10,7 +10,7 @@
|
||||
* Copyright (c) 2004-2005 The Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 2007 Sun Microsystems, Inc. All rights reserved.
|
||||
* Copyright (c) 2014-2018 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2014-2019 Intel, Inc. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -101,6 +101,9 @@ static pmix_status_t mylog(const pmix_proc_t *source,
|
||||
/* check to see if there are any stdfd entries */
|
||||
rc = PMIX_ERR_TAKE_NEXT_OPTION;
|
||||
for (n=0; n < ndata; n++) {
|
||||
if (PMIX_INFO_OP_IS_COMPLETE(&data[n])) {
|
||||
continue;
|
||||
}
|
||||
if (0 == strncmp(data[n].key, PMIX_LOG_STDERR, PMIX_MAX_KEYLEN)) {
|
||||
bo.bytes = data[n].value.data.string;
|
||||
bo.size = strlen(bo.bytes);
|
||||
@ -117,6 +120,5 @@ static pmix_status_t mylog(const pmix_proc_t *source,
|
||||
rc = PMIX_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -930,7 +930,7 @@ static char **split_and_resolve(char **orig_str, char *name)
|
||||
{
|
||||
int i, ret, save, if_index;
|
||||
char **argv, *str, *tmp;
|
||||
char if_name[IF_NAMESIZE];
|
||||
char if_name[PMIX_IF_NAMESIZE];
|
||||
struct sockaddr_storage argv_inaddr, if_inaddr;
|
||||
uint32_t argv_prefix;
|
||||
|
||||
|
@ -1381,8 +1381,8 @@ static void _dmodex_req(int sd, short args, void *cbdata)
|
||||
PMIX_ACQUIRE_OBJECT(cd);
|
||||
|
||||
pmix_output_verbose(2, pmix_server_globals.base_output,
|
||||
"DMODX LOOKING FOR %s:%d",
|
||||
cd->proc.nspace, cd->proc.rank);
|
||||
"DMODX LOOKING FOR %s",
|
||||
PMIX_NAME_PRINT(&cd->proc));
|
||||
|
||||
/* this should be one of my clients, but a race condition
|
||||
* could cause this request to arrive prior to us having
|
||||
@ -1515,8 +1515,9 @@ PMIX_EXPORT pmix_status_t PMIx_server_dmodex_request(const pmix_proc_t *proc,
|
||||
}
|
||||
|
||||
pmix_output_verbose(2, pmix_server_globals.base_output,
|
||||
"pmix:server dmodex request%s:%d",
|
||||
proc->nspace, proc->rank);
|
||||
"%s pmix:server dmodex request for proc %s",
|
||||
PMIX_NAME_PRINT(&pmix_globals.myid),
|
||||
PMIX_NAME_PRINT(proc));
|
||||
|
||||
cd = PMIX_NEW(pmix_setup_caddy_t);
|
||||
pmix_strncpy(cd->proc.nspace, proc->nspace, PMIX_MAX_NSLEN);
|
||||
|
@ -126,6 +126,7 @@ pmix_status_t pmix_server_get(pmix_buffer_t *buf,
|
||||
pmix_dmdx_request_t *req;
|
||||
bool local;
|
||||
bool localonly = false;
|
||||
bool diffnspace = false;
|
||||
struct timeval tv = {0, 0};
|
||||
pmix_buffer_t pbkt, pkt;
|
||||
pmix_byte_object_t bo;
|
||||
@ -133,10 +134,10 @@ pmix_status_t pmix_server_get(pmix_buffer_t *buf,
|
||||
pmix_proc_t proc;
|
||||
char *data;
|
||||
size_t sz, n;
|
||||
pmix_peer_t *peer;
|
||||
|
||||
pmix_output_verbose(2, pmix_server_globals.get_output,
|
||||
"recvd GET");
|
||||
"%s recvd GET",
|
||||
PMIX_NAME_PRINT(&pmix_globals.myid));
|
||||
|
||||
/* setup */
|
||||
memset(nspace, 0, sizeof(nspace));
|
||||
@ -198,6 +199,12 @@ pmix_status_t pmix_server_get(pmix_buffer_t *buf,
|
||||
}
|
||||
}
|
||||
|
||||
/* check if the nspace of the requestor is different from
|
||||
* the nspace of the target process */
|
||||
if (!PMIX_CHECK_NSPACE(nspace, cd->peer->info->pname.nspace)) {
|
||||
diffnspace = true;
|
||||
}
|
||||
|
||||
pmix_output_verbose(2, pmix_server_globals.get_output,
|
||||
"%s:%d EXECUTE GET FOR %s:%d ON BEHALF OF %s:%d",
|
||||
pmix_globals.myid.nspace,
|
||||
@ -294,10 +301,10 @@ pmix_status_t pmix_server_get(pmix_buffer_t *buf,
|
||||
return PMIX_SUCCESS;
|
||||
}
|
||||
|
||||
/* this nspace is known, so we can process the request.
|
||||
* if the rank is wildcard, then they are asking for the
|
||||
* job-level info for this nspace - provide it */
|
||||
if (PMIX_RANK_WILDCARD == rank) {
|
||||
/* the target nspace is known, so we can process the request.
|
||||
* if the rank is wildcard, or the nspace is different, then
|
||||
* they are asking for the job-level info for this nspace - provide it */
|
||||
if (PMIX_RANK_WILDCARD == rank || diffnspace) {
|
||||
/* see if we have the job-level info - we won't have it
|
||||
* if we have no local procs and haven't already asked
|
||||
* for it, so there is no guarantee we have it */
|
||||
@ -309,21 +316,32 @@ pmix_status_t pmix_server_get(pmix_buffer_t *buf,
|
||||
* can retrieve the info from that GDS. Otherwise,
|
||||
* we need to retrieve it from our own */
|
||||
PMIX_CONSTRUCT(&cb, pmix_cb_t);
|
||||
peer = pmix_globals.mypeer;
|
||||
/* this data is for a local client, so give the gds the
|
||||
* option of returning a complete copy of the data,
|
||||
* or returning a pointer to local storage */
|
||||
cb.proc = &proc;
|
||||
cb.scope = PMIX_SCOPE_UNDEF;
|
||||
cb.copy = false;
|
||||
PMIX_GDS_FETCH_KV(rc, peer, &cb);
|
||||
PMIX_GDS_FETCH_KV(rc, pmix_globals.mypeer, &cb);
|
||||
if (PMIX_SUCCESS != rc) {
|
||||
PMIX_DESTRUCT(&cb);
|
||||
return rc;
|
||||
}
|
||||
/* if the requested rank is not WILDCARD, then retrieve the
|
||||
* job-specific data for that rank - a scope of UNDEF
|
||||
* will direct the GDS to provide it. Anything found will
|
||||
* simply be added to the cb.kvs list */
|
||||
if (PMIX_RANK_WILDCARD != rank) {
|
||||
proc.rank = rank;
|
||||
PMIX_GDS_FETCH_KV(rc, pmix_globals.mypeer, &cb);
|
||||
if (PMIX_SUCCESS != rc) {
|
||||
PMIX_DESTRUCT(&cb);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
PMIX_CONSTRUCT(&pkt, pmix_buffer_t);
|
||||
/* assemble the provided data into a byte object */
|
||||
PMIX_GDS_ASSEMB_KVS_REQ(rc, peer, &proc, &cb.kvs, &pkt, cd);
|
||||
PMIX_GDS_ASSEMB_KVS_REQ(rc, pmix_globals.mypeer, &proc, &cb.kvs, &pkt, cd);
|
||||
if (PMIX_SUCCESS != rc) {
|
||||
PMIX_ERROR_LOG(rc);
|
||||
PMIX_DESTRUCT(&cb);
|
||||
@ -333,7 +351,7 @@ pmix_status_t pmix_server_get(pmix_buffer_t *buf,
|
||||
PMIX_DESTRUCT(&pkt);
|
||||
/* pack it into the payload */
|
||||
PMIX_CONSTRUCT(&pbkt, pmix_buffer_t);
|
||||
PMIX_BFROPS_PACK(rc, cd->peer, &pbkt, &bo, 1, PMIX_BYTE_OBJECT);
|
||||
PMIX_BFROPS_PACK(rc, pmix_globals.mypeer, &pbkt, &bo, 1, PMIX_BYTE_OBJECT);
|
||||
free(bo.bytes);
|
||||
if (PMIX_SUCCESS != rc) {
|
||||
PMIX_ERROR_LOG(rc);
|
||||
|
@ -13,7 +13,7 @@
|
||||
* reserved.
|
||||
* Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved.
|
||||
* Copyright (c) 2013 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2016 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2016-2019 Intel, Inc. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
* Additional copyrights may follow
|
||||
@ -38,9 +38,7 @@
|
||||
#include <netinet/in.h>
|
||||
#endif
|
||||
|
||||
#ifndef IF_NAMESIZE
|
||||
#define IF_NAMESIZE 32
|
||||
#endif
|
||||
#define PMIX_IF_NAMESIZE 256
|
||||
|
||||
BEGIN_C_DECLS
|
||||
|
||||
|
@ -13,7 +13,7 @@
|
||||
* All rights reserved.
|
||||
* Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2011 Oak Ridge National Labs. All rights reserved.
|
||||
* Copyright (c) 2013-2018 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2013-2019 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2015 Mellanox Technologies, Inc. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
@ -115,12 +115,15 @@ int main(int argc, char **argv)
|
||||
pmix_value_t *val = &value;
|
||||
char *tmp;
|
||||
pmix_proc_t proc;
|
||||
uint32_t nprocs, n;
|
||||
uint32_t nprocs, n, k, nlocal;
|
||||
int cnt, j;
|
||||
volatile bool active;
|
||||
pmix_info_t *iptr;
|
||||
size_t ninfo;
|
||||
pmix_status_t code;
|
||||
char **peers;
|
||||
bool all_local, local;
|
||||
pmix_rank_t *locals = NULL;
|
||||
|
||||
/* init us and declare we are a test programming model */
|
||||
PMIX_INFO_CREATE(iptr, 2);
|
||||
@ -164,11 +167,11 @@ int main(int argc, char **argv)
|
||||
usleep(10);
|
||||
}
|
||||
|
||||
/* get our universe size */
|
||||
/* get our job size */
|
||||
(void)strncpy(proc.nspace, myproc.nspace, PMIX_MAX_NSLEN);
|
||||
proc.rank = PMIX_RANK_WILDCARD;
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_UNIV_SIZE, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get universe size failed: %s",
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_JOB_SIZE, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get job size failed: %s",
|
||||
myproc.nspace, myproc.rank, PMIx_Error_string(rc));
|
||||
goto done;
|
||||
}
|
||||
@ -185,6 +188,27 @@ int main(int argc, char **argv)
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* get a list of our local peers */
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_LOCAL_PEERS, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get local peers failed: %s",
|
||||
myproc.nspace, myproc.rank, PMIx_Error_string(rc));
|
||||
goto done;
|
||||
}
|
||||
/* split the returned string to get the rank of each local peer */
|
||||
peers = pmix_argv_split(val->data.string, ',');
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
nlocal = pmix_argv_count(peers);
|
||||
if (nprocs == nlocal) {
|
||||
all_local = true;
|
||||
} else {
|
||||
all_local = false;
|
||||
locals = (pmix_rank_t*)malloc(pmix_argv_count(peers) * sizeof(pmix_rank_t));
|
||||
for (cnt=0; NULL != peers[cnt]; cnt++) {
|
||||
locals[cnt] = strtoul(peers[cnt], NULL, 10);
|
||||
}
|
||||
}
|
||||
pmix_argv_free(peers);
|
||||
|
||||
for (cnt=0; cnt < MAXCNT; cnt++) {
|
||||
(void)asprintf(&tmp, "%s-%d-local-%d", myproc.nspace, myproc.rank, cnt);
|
||||
value.type = PMIX_UINT64;
|
||||
@ -225,6 +249,19 @@ int main(int argc, char **argv)
|
||||
for (j=0; j <= cnt; j++) {
|
||||
for (n=0; n < nprocs; n++) {
|
||||
proc.rank = n;
|
||||
if (all_local) {
|
||||
local = true;
|
||||
} else {
|
||||
local = false;
|
||||
/* see if this proc is local to us */
|
||||
for (k=0; k < nlocal; k++) {
|
||||
if (proc.rank == locals[k]) {
|
||||
local = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (local) {
|
||||
(void)asprintf(&tmp, "%s-%d-local-%d", myproc.nspace, n, j);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, tmp, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d cnt %d: PMIx_Get %s failed: %s",
|
||||
@ -251,18 +288,30 @@ int main(int argc, char **argv)
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
free(tmp);
|
||||
|
||||
if (n != myproc.rank) {
|
||||
/* now check that we don't get data for a remote proc - note that we
|
||||
* always can get our own remote data as we published it */
|
||||
if (proc.rank != myproc.rank) {
|
||||
(void)asprintf(&tmp, "%s-%d-remote-%d", proc.nspace, n, j);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, tmp, NULL, 0, &val))) {
|
||||
if (PMIX_SUCCESS == (rc = PMIx_Get(&proc, tmp, NULL, 0, &val))) {
|
||||
/* this data should _not_ be found as we are on the same node
|
||||
* and the data was "put" with a PMIX_REMOTE scope */
|
||||
continue;
|
||||
}
|
||||
pmix_output(0, "Client ns %s rank %d cnt %d: PMIx_Get %s returned remote data for a local proc",
|
||||
pmix_output(0, "ERROR: Client ns %s rank %d cnt %d: PMIx_Get %s returned remote data for a local proc",
|
||||
myproc.nspace, myproc.rank, j, tmp);
|
||||
}
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
free(tmp);
|
||||
}
|
||||
} else {
|
||||
(void)asprintf(&tmp, "%s-%d-remote-%d", proc.nspace, n, j);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, tmp, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d cnt %d: PMIx_Get %s failed for remote proc",
|
||||
myproc.nspace, myproc.rank, j, tmp);
|
||||
}
|
||||
if (NULL != val) {
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
}
|
||||
free(tmp);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -129,13 +129,16 @@ int main(int argc, char **argv)
|
||||
pmix_value_t *val = &value;
|
||||
char *tmp;
|
||||
pmix_proc_t proc;
|
||||
uint32_t nprocs, n;
|
||||
uint32_t nprocs, n, k, nlocal;
|
||||
int cnt, j;
|
||||
bool doabort = false;
|
||||
volatile bool active;
|
||||
pmix_info_t info, *iptr;
|
||||
size_t ninfo;
|
||||
pmix_status_t code;
|
||||
char **peers;
|
||||
bool all_local, local;
|
||||
pmix_rank_t *locals = NULL;
|
||||
|
||||
if (1 < argc) {
|
||||
if (0 == strcmp("-abort", argv[1])) {
|
||||
@ -196,17 +199,17 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
|
||||
/* get our universe size */
|
||||
/* get our job size */
|
||||
(void)strncpy(proc.nspace, myproc.nspace, PMIX_MAX_NSLEN);
|
||||
proc.rank = PMIX_RANK_WILDCARD;
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_UNIV_SIZE, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get universe size failed: %s",
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_JOB_SIZE, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get job size failed: %s",
|
||||
myproc.nspace, myproc.rank, PMIx_Error_string(rc));
|
||||
goto done;
|
||||
}
|
||||
nprocs = val->data.uint32;
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
pmix_output(0, "Client %s:%d universe size %d", myproc.nspace, myproc.rank, nprocs);
|
||||
pmix_output(0, "Client %s:%d job size %d", myproc.nspace, myproc.rank, nprocs);
|
||||
|
||||
/* put a few values */
|
||||
(void)asprintf(&tmp, "%s-%d-internal", myproc.nspace, myproc.rank);
|
||||
@ -218,6 +221,27 @@ int main(int argc, char **argv)
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* get a list of our local peers */
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_LOCAL_PEERS, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get local peers failed: %s",
|
||||
myproc.nspace, myproc.rank, PMIx_Error_string(rc));
|
||||
goto done;
|
||||
}
|
||||
/* split the returned string to get the rank of each local peer */
|
||||
peers = pmix_argv_split(val->data.string, ',');
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
nlocal = pmix_argv_count(peers);
|
||||
if (nprocs == nlocal) {
|
||||
all_local = true;
|
||||
} else {
|
||||
all_local = false;
|
||||
locals = (pmix_rank_t*)malloc(pmix_argv_count(peers) * sizeof(pmix_rank_t));
|
||||
for (cnt=0; NULL != peers[cnt]; cnt++) {
|
||||
locals[cnt] = strtoul(peers[cnt], NULL, 10);
|
||||
}
|
||||
}
|
||||
pmix_argv_free(peers);
|
||||
|
||||
for (cnt=0; cnt < MAXCNT; cnt++) {
|
||||
(void)asprintf(&tmp, "%s-%d-local-%d", myproc.nspace, myproc.rank, cnt);
|
||||
value.type = PMIX_UINT64;
|
||||
@ -258,6 +282,19 @@ int main(int argc, char **argv)
|
||||
for (j=0; j <= cnt; j++) {
|
||||
for (n=0; n < nprocs; n++) {
|
||||
proc.rank = n;
|
||||
if (all_local) {
|
||||
local = true;
|
||||
} else {
|
||||
local = false;
|
||||
/* see if this proc is local to us */
|
||||
for (k=0; k < nlocal; k++) {
|
||||
if (proc.rank == locals[k]) {
|
||||
local = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (local) {
|
||||
(void)asprintf(&tmp, "%s-%d-local-%d", myproc.nspace, n, j);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, tmp, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d cnt %d: PMIx_Get %s failed: %s",
|
||||
@ -285,16 +322,31 @@ int main(int argc, char **argv)
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
free(tmp);
|
||||
|
||||
if (n != myproc.rank) {
|
||||
/* now check that we don't get data for a remote proc - note that we
|
||||
* always can get our own remote data as we published it */
|
||||
if (proc.rank != myproc.rank) {
|
||||
(void)asprintf(&tmp, "%s-%d-remote-%d", proc.nspace, n, j);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, tmp, NULL, 0, &val))) {
|
||||
/* this data should _not_ be found as we are on the same node
|
||||
* and the data was "put" with a PMIX_REMOTE scope */
|
||||
pmix_output(0, "Client ns %s rank %d cnt %d: PMIx_Get %s returned correct", myproc.nspace, myproc.rank, j, tmp);
|
||||
continue;
|
||||
}
|
||||
pmix_output(0, "Client ns %s rank %d cnt %d: PMIx_Get %s returned remote data for a local proc",
|
||||
} else {
|
||||
pmix_output(0, "ERROR: Client ns %s rank %d cnt %d: PMIx_Get %s returned remote data for a local proc",
|
||||
myproc.nspace, myproc.rank, j, tmp);
|
||||
}
|
||||
if (NULL != val) {
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
}
|
||||
free(tmp);
|
||||
}
|
||||
} else {
|
||||
(void)asprintf(&tmp, "%s-%d-remote-%d", proc.nspace, n, j);
|
||||
if (PMIX_SUCCESS == (rc = PMIx_Get(&proc, tmp, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d cnt %d: PMIx_Get %s returned correct", myproc.nspace, myproc.rank, j, tmp);
|
||||
} else {
|
||||
pmix_output(0, "Client ns %s rank %d cnt %d: PMIx_Get %s failed for remote proc",
|
||||
myproc.nspace, myproc.rank, j, tmp);
|
||||
}
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
free(tmp);
|
||||
}
|
||||
@ -332,7 +384,7 @@ int main(int argc, char **argv)
|
||||
|
||||
/* log something */
|
||||
PMIX_INFO_CONSTRUCT(&info);
|
||||
PMIX_INFO_LOAD(&info, PMIX_LOG_STDERR, "test log msg", PMIX_STRING);
|
||||
PMIX_INFO_LOAD(&info, PMIX_LOG_STDERR, "test log msg\n", PMIX_STRING);
|
||||
active = true;
|
||||
rc = PMIx_Log_nb(&info, 1, NULL, 0, opcbfunc, (void*)&active);
|
||||
if (PMIX_SUCCESS != rc) {
|
||||
|
@ -37,91 +37,8 @@
|
||||
#include "src/util/output.h"
|
||||
#include "src/util/printf.h"
|
||||
|
||||
#define MAXCNT 1
|
||||
|
||||
static volatile bool completed = false;
|
||||
static pmix_proc_t myproc;
|
||||
|
||||
static void notification_fn(size_t evhdlr_registration_id,
|
||||
pmix_status_t status,
|
||||
const pmix_proc_t *source,
|
||||
pmix_info_t info[], size_t ninfo,
|
||||
pmix_info_t results[], size_t nresults,
|
||||
pmix_event_notification_cbfunc_fn_t cbfunc,
|
||||
void *cbdata)
|
||||
{
|
||||
pmix_output(0, "Client %s:%d NOTIFIED with status %s", myproc.nspace, myproc.rank, PMIx_Error_string(status));
|
||||
if (NULL != cbfunc) {
|
||||
cbfunc(PMIX_SUCCESS, NULL, 0, NULL, NULL, cbdata);
|
||||
}
|
||||
completed = true;
|
||||
}
|
||||
|
||||
static void errhandler_reg_callbk(pmix_status_t status,
|
||||
size_t errhandler_ref,
|
||||
void *cbdata)
|
||||
{
|
||||
volatile bool *active = (volatile bool*)cbdata;
|
||||
|
||||
pmix_output(0, "Client: ERRHANDLER REGISTRATION CALLBACK CALLED WITH STATUS %d, ref=%lu",
|
||||
status, (unsigned long)errhandler_ref);
|
||||
*active = false;
|
||||
}
|
||||
|
||||
static void opcbfunc(pmix_status_t status, void *cbdata)
|
||||
{
|
||||
volatile bool *active = (volatile bool*)cbdata;
|
||||
*active = false;
|
||||
}
|
||||
|
||||
/* this is an event notification function that we explicitly request
|
||||
* be called when the PMIX_MODEL_DECLARED notification is issued.
|
||||
* We could catch it in the general event notification function and test
|
||||
* the status to see if the status matched, but it often is simpler
|
||||
* to declare a use-specific notification callback point. In this case,
|
||||
* we are asking to know whenever a model is declared as a means
|
||||
* of testing server self-notification */
|
||||
static void model_callback(size_t evhdlr_registration_id,
|
||||
pmix_status_t status,
|
||||
const pmix_proc_t *source,
|
||||
pmix_info_t info[], size_t ninfo,
|
||||
pmix_info_t results[], size_t nresults,
|
||||
pmix_event_notification_cbfunc_fn_t cbfunc,
|
||||
void *cbdata)
|
||||
{
|
||||
size_t n;
|
||||
|
||||
/* just let us know it was received */
|
||||
fprintf(stderr, "%s:%d Model event handler called with status %d(%s)\n",
|
||||
myproc.nspace, myproc.rank, status, PMIx_Error_string(status));
|
||||
for (n=0; n < ninfo; n++) {
|
||||
if (PMIX_STRING == info[n].value.type) {
|
||||
fprintf(stderr, "%s:%d\t%s:\t%s\n",
|
||||
myproc.nspace, myproc.rank,
|
||||
info[n].key, info[n].value.data.string);
|
||||
}
|
||||
}
|
||||
|
||||
/* we must NOT tell the event handler state machine that we
|
||||
* are the last step as that will prevent it from notifying
|
||||
* anyone else that might be listening for declarations */
|
||||
if (NULL != cbfunc) {
|
||||
cbfunc(PMIX_SUCCESS, NULL, 0, NULL, NULL, cbdata);
|
||||
}
|
||||
}
|
||||
|
||||
/* event handler registration is done asynchronously */
|
||||
static void model_registration_callback(pmix_status_t status,
|
||||
size_t evhandler_ref,
|
||||
void *cbdata)
|
||||
{
|
||||
volatile bool *active = (volatile bool*)cbdata;
|
||||
|
||||
fprintf(stderr, "simpclient EVENT HANDLER REGISTRATION RETURN STATUS %d, ref=%lu\n",
|
||||
status, (unsigned long)evhandler_ref);
|
||||
*active = false;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int rc;
|
||||
@ -130,30 +47,14 @@ int main(int argc, char **argv)
|
||||
char *tmp;
|
||||
pmix_proc_t proc;
|
||||
uint32_t nprocs, n, *u32;
|
||||
int cnt, j;
|
||||
bool doabort = false;
|
||||
volatile bool active;
|
||||
pmix_info_t info, *iptr;
|
||||
size_t ninfo, m;
|
||||
pmix_status_t code;
|
||||
pmix_coord_t *coords;
|
||||
|
||||
if (1 < argc) {
|
||||
if (0 == strcmp("-abort", argv[1])) {
|
||||
doabort = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* init us and declare we are a test programming model */
|
||||
PMIX_INFO_CREATE(iptr, 2);
|
||||
PMIX_INFO_LOAD(&iptr[0], PMIX_PROGRAMMING_MODEL, "TEST", PMIX_STRING);
|
||||
PMIX_INFO_LOAD(&iptr[1], PMIX_MODEL_LIBRARY_NAME, "PMIX", PMIX_STRING);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Init(&myproc, iptr, 2))) {
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Init(&myproc, NULL, 0))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Init failed: %s",
|
||||
myproc.nspace, myproc.rank, PMIx_Error_string(rc));
|
||||
exit(rc);
|
||||
}
|
||||
PMIX_INFO_FREE(iptr, 2);
|
||||
pmix_output(0, "Client ns %s rank %d: Running", myproc.nspace, myproc.rank);
|
||||
|
||||
/* test something */
|
||||
@ -166,37 +67,6 @@ int main(int argc, char **argv)
|
||||
}
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
|
||||
/* test something */
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&myproc, PMIX_SERVER_URI, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get failed: %s",
|
||||
myproc.nspace, myproc.rank, PMIx_Error_string(rc));
|
||||
exit(rc);
|
||||
}
|
||||
pmix_output(0, "CLIENT SERVER URI: %s", val->data.string);
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
|
||||
/* register a handler specifically for when models declare */
|
||||
active = true;
|
||||
ninfo = 1;
|
||||
PMIX_INFO_CREATE(iptr, ninfo);
|
||||
PMIX_INFO_LOAD(&iptr[0], PMIX_EVENT_HDLR_NAME, "SIMPCLIENT-MODEL", PMIX_STRING);
|
||||
code = PMIX_MODEL_DECLARED;
|
||||
PMIx_Register_event_handler(&code, 1, iptr, ninfo,
|
||||
model_callback, model_registration_callback, (void*)&active);
|
||||
while (active) {
|
||||
usleep(10);
|
||||
}
|
||||
PMIX_INFO_FREE(iptr, ninfo);
|
||||
|
||||
/* register our errhandler */
|
||||
active = true;
|
||||
PMIx_Register_event_handler(NULL, 0, NULL, 0,
|
||||
notification_fn, errhandler_reg_callbk, (void*)&active);
|
||||
while (active) {
|
||||
usleep(10);
|
||||
}
|
||||
|
||||
|
||||
/* get our job size */
|
||||
(void)strncpy(proc.nspace, myproc.nspace, PMIX_MAX_NSLEN);
|
||||
proc.rank = PMIX_RANK_WILDCARD;
|
||||
@ -264,154 +134,6 @@ int main(int argc, char **argv)
|
||||
free(tmp);
|
||||
}
|
||||
|
||||
/* put a few values */
|
||||
(void)asprintf(&tmp, "%s-%d-internal", myproc.nspace, myproc.rank);
|
||||
value.type = PMIX_UINT32;
|
||||
value.data.uint32 = 1234;
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Store_internal(&myproc, tmp, &value))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Store_internal failed: %s",
|
||||
myproc.nspace, myproc.rank, PMIx_Error_string(rc));
|
||||
goto done;
|
||||
}
|
||||
|
||||
for (cnt=0; cnt < MAXCNT; cnt++) {
|
||||
(void)asprintf(&tmp, "%s-%d-local-%d", myproc.nspace, myproc.rank, cnt);
|
||||
value.type = PMIX_UINT64;
|
||||
value.data.uint64 = 1234;
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Put(PMIX_LOCAL, tmp, &value))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Put internal failed: %s",
|
||||
myproc.nspace, myproc.rank, PMIx_Error_string(rc));
|
||||
goto done;
|
||||
}
|
||||
|
||||
(void)asprintf(&tmp, "%s-%d-remote-%d", myproc.nspace, myproc.rank, cnt);
|
||||
value.type = PMIX_STRING;
|
||||
value.data.string = "1234";
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Put(PMIX_REMOTE, tmp, &value))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Put internal failed: %s",
|
||||
myproc.nspace, myproc.rank, PMIx_Error_string(rc));
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Commit())) {
|
||||
pmix_output(0, "Client ns %s rank %d cnt %d: PMIx_Commit failed: %s",
|
||||
myproc.nspace, myproc.rank, cnt, PMIx_Error_string(rc));
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* call fence to ensure the data is received */
|
||||
PMIX_PROC_CONSTRUCT(&proc);
|
||||
(void)strncpy(proc.nspace, myproc.nspace, PMIX_MAX_NSLEN);
|
||||
proc.rank = PMIX_RANK_WILDCARD;
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Fence(&proc, 1, NULL, 0))) {
|
||||
pmix_output(0, "Client ns %s rank %d cnt %d: PMIx_Fence failed: %s",
|
||||
myproc.nspace, myproc.rank, cnt, PMIx_Error_string(rc));
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* check the returned data */
|
||||
(void)strncpy(proc.nspace, myproc.nspace, PMIX_MAX_NSLEN);
|
||||
for (j=0; j <= cnt; j++) {
|
||||
for (n=0; n < nprocs; n++) {
|
||||
proc.rank = n;
|
||||
(void)asprintf(&tmp, "%s-%d-local-%d", myproc.nspace, n, j);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, tmp, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d cnt %d: PMIx_Get %s failed: %s",
|
||||
myproc.nspace, myproc.rank, j, tmp, PMIx_Error_string(rc));
|
||||
continue;
|
||||
}
|
||||
if (NULL == val) {
|
||||
pmix_output(0, "Client ns %s rank %d: NULL value returned",
|
||||
myproc.nspace, myproc.rank);
|
||||
break;
|
||||
}
|
||||
if (PMIX_UINT64 != val->type) {
|
||||
pmix_output(0, "Client ns %s rank %d cnt %d: PMIx_Get %s returned wrong type: %d", myproc.nspace, myproc.rank, j, tmp, val->type);
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
free(tmp);
|
||||
continue;
|
||||
}
|
||||
if (1234 != val->data.uint64) {
|
||||
pmix_output(0, "Client ns %s rank %d cnt %d: PMIx_Get %s returned wrong value: %d", myproc.nspace, myproc.rank, j, tmp, (int)val->data.uint64);
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
free(tmp);
|
||||
continue;
|
||||
}
|
||||
pmix_output(0, "Client ns %s rank %d cnt %d: PMIx_Get %s returned correct", myproc.nspace, myproc.rank, j, tmp);
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
free(tmp);
|
||||
|
||||
if (n != myproc.rank) {
|
||||
(void)asprintf(&tmp, "%s-%d-remote-%d", proc.nspace, n, j);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, tmp, NULL, 0, &val))) {
|
||||
/* this data should _not_ be found as we are on the same node
|
||||
* and the data was "put" with a PMIX_REMOTE scope */
|
||||
pmix_output(0, "Client ns %s rank %d cnt %d: PMIx_Get %s returned correct", myproc.nspace, myproc.rank, j, tmp);
|
||||
continue;
|
||||
}
|
||||
pmix_output(0, "Client ns %s rank %d cnt %d: PMIx_Get %s returned remote data for a local proc",
|
||||
myproc.nspace, myproc.rank, j, tmp);
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
free(tmp);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* now get the data blob for myself */
|
||||
pmix_output(0, "Client ns %s rank %d testing internal modex blob",
|
||||
myproc.nspace, myproc.rank);
|
||||
if (PMIX_SUCCESS == (rc = PMIx_Get(&myproc, NULL, NULL, 0, &val))) {
|
||||
if (PMIX_DATA_ARRAY != val->type) {
|
||||
pmix_output(0, "Client ns %s rank %d did not return an array for its internal modex blob",
|
||||
myproc.nspace, myproc.rank);
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
} else if (PMIX_INFO != val->data.darray->type) {
|
||||
pmix_output(0, "Client ns %s rank %d returned an internal modex array of type %s instead of PMIX_INFO",
|
||||
myproc.nspace, myproc.rank, PMIx_Data_type_string(val->data.darray->type));
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
} else if (0 == val->data.darray->size) {
|
||||
pmix_output(0, "Client ns %s rank %d returned an internal modex array of zero length",
|
||||
myproc.nspace, myproc.rank);
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
} else {
|
||||
pmix_info_t *iptr = (pmix_info_t*)val->data.darray->array;
|
||||
for (n=0; n < val->data.darray->size; n++) {
|
||||
pmix_output(0, "\tKey: %s", iptr[n].key);
|
||||
}
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
}
|
||||
} else {
|
||||
pmix_output(0, "Client ns %s rank %d internal modex blob FAILED with error %s(%d)",
|
||||
myproc.nspace, myproc.rank, PMIx_Error_string(rc), rc);
|
||||
}
|
||||
|
||||
/* log something */
|
||||
PMIX_INFO_CONSTRUCT(&info);
|
||||
PMIX_INFO_LOAD(&info, PMIX_LOG_STDERR, "test log msg", PMIX_STRING);
|
||||
active = true;
|
||||
rc = PMIx_Log_nb(&info, 1, NULL, 0, opcbfunc, (void*)&active);
|
||||
if (PMIX_SUCCESS != rc) {
|
||||
pmix_output(0, "Client ns %s rank %d - log_nb returned %s",
|
||||
myproc.nspace, myproc.rank, PMIx_Error_string(rc));
|
||||
} else {
|
||||
while (active) {
|
||||
usleep(10);
|
||||
}
|
||||
}
|
||||
PMIX_INFO_DESTRUCT(&info);
|
||||
|
||||
/* if requested and our rank is 0, call abort */
|
||||
if (doabort) {
|
||||
if (0 == myproc.rank) {
|
||||
PMIx_Abort(PMIX_ERR_PROC_REQUESTED_ABORT, "CALLING ABORT", NULL, 0);
|
||||
} else {
|
||||
while(!completed) {
|
||||
usleep(10);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
/* finalize us */
|
||||
pmix_output(0, "Client ns %s rank %d: Finalizing", myproc.nspace, myproc.rank);
|
||||
|
@ -13,7 +13,7 @@
|
||||
* All rights reserved.
|
||||
* Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2011 Oak Ridge National Labs. All rights reserved.
|
||||
* Copyright (c) 2013-2018 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2013-2019 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2015 Mellanox Technologies, Inc. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
@ -97,16 +97,17 @@ int main(int argc, char **argv)
|
||||
}
|
||||
pmix_output(0, "Client ns %s rank %d: Running", myproc.nspace, myproc.rank);
|
||||
|
||||
/* get our universe size */
|
||||
/* get our job size */
|
||||
(void)strncpy(proc.nspace, myproc.nspace, PMIX_MAX_NSLEN);
|
||||
proc.rank = PMIX_RANK_WILDCARD;
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_UNIV_SIZE, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get universe size failed: %d", myproc.nspace, myproc.rank, rc);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_JOB_SIZE, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get job size failed: %s",
|
||||
myproc.nspace, myproc.rank, PMIx_Error_string(rc));
|
||||
goto done;
|
||||
}
|
||||
nprocs = val->data.uint32;
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
pmix_output(0, "Client %s:%d universe size %d", myproc.nspace, myproc.rank, nprocs);
|
||||
pmix_output(0, "Client %s:%d job size %d", myproc.nspace, myproc.rank, nprocs);
|
||||
completed = false;
|
||||
|
||||
/* register our errhandler */
|
||||
|
@ -13,7 +13,7 @@
|
||||
* All rights reserved.
|
||||
* Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2011 Oak Ridge National Labs. All rights reserved.
|
||||
* Copyright (c) 2013-2018 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2013-2019 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2015 Mellanox Technologies, Inc. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
@ -98,9 +98,12 @@ int main(int argc, char **argv)
|
||||
pmix_value_t *val = &value;
|
||||
char *tmp;
|
||||
pmix_proc_t proc;
|
||||
uint32_t n, num_gets;
|
||||
uint32_t n, num_gets, k, nlocal;
|
||||
bool active;
|
||||
bool dofence = true;
|
||||
bool local, all_local;
|
||||
char **peers;
|
||||
pmix_rank_t *locals;
|
||||
|
||||
if (NULL != getenv("PMIX_SIMPDMODEX_ASYNC")) {
|
||||
dofence = false;
|
||||
@ -113,16 +116,17 @@ int main(int argc, char **argv)
|
||||
}
|
||||
pmix_output(0, "Client ns %s rank %d: Running", myproc.nspace, myproc.rank);
|
||||
|
||||
/* get our universe size */
|
||||
/* get our job size */
|
||||
(void)strncpy(proc.nspace, myproc.nspace, PMIX_MAX_NSLEN);
|
||||
proc.rank = PMIX_RANK_WILDCARD;
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_UNIV_SIZE, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get universe size failed: %d", myproc.nspace, myproc.rank, rc);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_JOB_SIZE, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get job size failed: %s",
|
||||
myproc.nspace, myproc.rank, PMIx_Error_string(rc));
|
||||
goto done;
|
||||
}
|
||||
nprocs = val->data.uint32;
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
pmix_output(0, "Client %s:%d universe size %d", myproc.nspace, myproc.rank, nprocs);
|
||||
pmix_output(0, "Client %s:%d job size %d", myproc.nspace, myproc.rank, nprocs);
|
||||
|
||||
/* put a few values */
|
||||
(void)asprintf(&tmp, "%s-%d-internal", myproc.nspace, myproc.rank);
|
||||
@ -174,9 +178,43 @@ int main(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
|
||||
/* get a list of our local peers */
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_LOCAL_PEERS, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get local peers failed: %s",
|
||||
myproc.nspace, myproc.rank, PMIx_Error_string(rc));
|
||||
goto done;
|
||||
}
|
||||
/* split the returned string to get the rank of each local peer */
|
||||
peers = pmix_argv_split(val->data.string, ',');
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
nlocal = pmix_argv_count(peers);
|
||||
if (nprocs == nlocal) {
|
||||
all_local = true;
|
||||
} else {
|
||||
all_local = false;
|
||||
locals = (pmix_rank_t*)malloc(pmix_argv_count(peers) * sizeof(pmix_rank_t));
|
||||
for (n=0; NULL != peers[n]; n++) {
|
||||
locals[n] = strtoul(peers[n], NULL, 10);
|
||||
}
|
||||
}
|
||||
pmix_argv_free(peers);
|
||||
|
||||
/* get the committed data - ask for someone who doesn't exist as well */
|
||||
num_gets = 0;
|
||||
for (n=0; n < nprocs; n++) {
|
||||
if (all_local) {
|
||||
local = true;
|
||||
} else {
|
||||
local = false;
|
||||
/* see if this proc is local to us */
|
||||
for (k=0; k < nlocal; k++) {
|
||||
if (proc.rank == locals[k]) {
|
||||
local = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (local) {
|
||||
(void)asprintf(&tmp, "%s-%d-local", myproc.nspace, n);
|
||||
proc.rank = n;
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get_nb(&proc, tmp,
|
||||
@ -185,6 +223,7 @@ int main(int argc, char **argv)
|
||||
goto done;
|
||||
}
|
||||
++num_gets;
|
||||
} else {
|
||||
(void)asprintf(&tmp, "%s-%d-remote", myproc.nspace, n);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get_nb(&proc, tmp,
|
||||
NULL, 0, valcbfunc, tmp))) {
|
||||
@ -193,6 +232,7 @@ int main(int argc, char **argv)
|
||||
}
|
||||
++num_gets;
|
||||
}
|
||||
}
|
||||
|
||||
if (dofence) {
|
||||
/* wait for the first fence to finish */
|
||||
|
@ -13,7 +13,7 @@
|
||||
* All rights reserved.
|
||||
* Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2011 Oak Ridge National Labs. All rights reserved.
|
||||
* Copyright (c) 2013-2018 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2013-2019 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2015 Mellanox Technologies, Inc. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
@ -62,16 +62,17 @@ int main(int argc, char **argv)
|
||||
}
|
||||
pmix_output(0, "Client ns %s rank %d: Running", myproc.nspace, myproc.rank);
|
||||
|
||||
/* get our universe size */
|
||||
/* get our job size */
|
||||
(void)strncpy(proc.nspace, myproc.nspace, PMIX_MAX_NSLEN);
|
||||
proc.rank = PMIX_RANK_WILDCARD;
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_UNIV_SIZE, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get universe size failed: %d", myproc.nspace, myproc.rank, rc);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_JOB_SIZE, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get job size failed: %s",
|
||||
myproc.nspace, myproc.rank, PMIx_Error_string(rc));
|
||||
goto done;
|
||||
}
|
||||
nprocs = val->data.uint32;
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
pmix_output(0, "Client %s:%d universe size %d", myproc.nspace, myproc.rank, nprocs);
|
||||
pmix_output(0, "Client %s:%d job size %d", myproc.nspace, myproc.rank, nprocs);
|
||||
|
||||
/* call fence to sync */
|
||||
PMIX_PROC_CONSTRUCT(&proc);
|
||||
@ -85,19 +86,12 @@ int main(int argc, char **argv)
|
||||
/* rank=0 calls spawn */
|
||||
if (0 == myproc.rank) {
|
||||
PMIX_APP_CREATE(app, 1);
|
||||
app->cmd = strdup("gumby");
|
||||
app->cmd = strdup("./simpclient");
|
||||
app->maxprocs = 2;
|
||||
pmix_argv_append_nosize(&app->argv, "gumby");
|
||||
pmix_argv_append_nosize(&app->argv, "simpclient");
|
||||
pmix_argv_append_nosize(&app->argv, "-n");
|
||||
pmix_argv_append_nosize(&app->argv, "2");
|
||||
pmix_setenv("PMIX_ENV_VALUE", "3", true, &app->env);
|
||||
PMIX_INFO_CREATE(app->info, 2);
|
||||
(void)strncpy(app->info[0].key, "DARTH", PMIX_MAX_KEYLEN);
|
||||
app->info[0].value.type = PMIX_INT8;
|
||||
app->info[0].value.data.int8 = 12;
|
||||
(void)strncpy(app->info[1].key, "VADER", PMIX_MAX_KEYLEN);
|
||||
app->info[1].value.type = PMIX_DOUBLE;
|
||||
app->info[1].value.data.dval = 12.34;
|
||||
|
||||
pmix_output(0, "Client ns %s rank %d: calling PMIx_Spawn", myproc.nspace, myproc.rank);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Spawn(NULL, 0, app, 1, nsp2))) {
|
||||
@ -106,25 +100,18 @@ int main(int argc, char **argv)
|
||||
}
|
||||
PMIX_APP_FREE(app, 1);
|
||||
|
||||
/* check to see if we got the expected info back */
|
||||
if (0 != strncmp(nsp2, "DYNSPACE", PMIX_MAX_NSLEN)) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Spawn returned incorrect nspace: %s", myproc.nspace, myproc.rank, nsp2);
|
||||
goto done;
|
||||
} else {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Spawn succeeded returning nspace: %s", myproc.nspace, myproc.rank, nsp2);
|
||||
}
|
||||
/* get their universe size */
|
||||
/* get their job size */
|
||||
(void)strncpy(proc.nspace, nsp2, PMIX_MAX_NSLEN);
|
||||
proc.rank = PMIX_RANK_WILDCARD;
|
||||
val = NULL;
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_UNIV_SIZE, NULL, 0, &val)) ||
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_JOB_SIZE, NULL, 0, &val)) ||
|
||||
NULL == val) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get universe size failed: %d", myproc.nspace, myproc.rank, rc);
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get job %s size failed: %d", myproc.nspace, myproc.rank, nsp2, rc);
|
||||
goto done;
|
||||
}
|
||||
ntmp = val->data.uint32;
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
pmix_output(0, "Client %s:%d universe %s size %d", myproc.nspace, myproc.rank, nsp2, (int)ntmp);
|
||||
pmix_output(0, "Client %s:%d job %s size %d", myproc.nspace, myproc.rank, nsp2, (int)ntmp);
|
||||
}
|
||||
|
||||
/* just cycle the connect/disconnect functions */
|
||||
|
@ -13,7 +13,7 @@
|
||||
* All rights reserved.
|
||||
* Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2011 Oak Ridge National Labs. All rights reserved.
|
||||
* Copyright (c) 2013-2017 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2013-2019 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2015 Mellanox Technologies, Inc. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
@ -83,16 +83,17 @@ int main(int argc, char **argv)
|
||||
}
|
||||
pmix_output(0, "Client ns %s rank %d: Running", myproc.nspace, myproc.rank);
|
||||
|
||||
/* get our universe size */
|
||||
/* get our job size */
|
||||
(void)strncpy(proc.nspace, myproc.nspace, PMIX_MAX_NSLEN);
|
||||
proc.rank = PMIX_RANK_WILDCARD;
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_UNIV_SIZE, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get universe size failed: %d", myproc.nspace, myproc.rank, rc);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_JOB_SIZE, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get job size failed: %s",
|
||||
myproc.nspace, myproc.rank, PMIx_Error_string(rc));
|
||||
goto done;
|
||||
}
|
||||
nprocs = val->data.uint32;
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
pmix_output(0, "Client %s:%d universe size %d", myproc.nspace, myproc.rank, nprocs);
|
||||
pmix_output(0, "Client %s:%d job size %d", myproc.nspace, myproc.rank, nprocs);
|
||||
completed = false;
|
||||
|
||||
/* register our errhandler */
|
||||
|
@ -13,7 +13,7 @@
|
||||
* All rights reserved.
|
||||
* Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2011 Oak Ridge National Labs. All rights reserved.
|
||||
* Copyright (c) 2013-2018 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2013-2019 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2015 Mellanox Technologies, Inc. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
@ -133,14 +133,17 @@ int main(int argc, char **argv)
|
||||
(void)strncpy(proc.nspace, myproc.nspace, PMIX_MAX_NSLEN);
|
||||
proc.rank = PMIX_RANK_WILDCARD;
|
||||
|
||||
/* get our universe size */
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_UNIV_SIZE, NULL, 0, &val))) {
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Get universe size failed: %d\n", myproc.nspace, myproc.rank, rc);
|
||||
/* get our job size */
|
||||
(void)strncpy(proc.nspace, myproc.nspace, PMIX_MAX_NSLEN);
|
||||
proc.rank = PMIX_RANK_WILDCARD;
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_JOB_SIZE, NULL, 0, &val))) {
|
||||
fprintf(stderr, "Client ns %s rank %d: PMIx_Get job size failed: %s\n",
|
||||
myproc.nspace, myproc.rank, PMIx_Error_string(rc));
|
||||
goto done;
|
||||
}
|
||||
nprocs = val->data.uint32;
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
fprintf(stderr, "Client %s:%d universe size %d\n", myproc.nspace, myproc.rank, nprocs);
|
||||
fprintf(stderr, "Client %s:%d job size %d\n", myproc.nspace, myproc.rank, nprocs);
|
||||
|
||||
/* inform the RM that we are preemptible, and that our checkpoint methods are
|
||||
* "signal" on SIGUSR2 and event on PMIX_JCTRL_CHECKPOINT */
|
||||
|
@ -13,7 +13,7 @@
|
||||
* All rights reserved.
|
||||
* Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2011 Oak Ridge National Labs. All rights reserved.
|
||||
* Copyright (c) 2013-2017 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2013-2019 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2015 Mellanox Technologies, Inc. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
@ -54,16 +54,17 @@ int main(int argc, char **argv)
|
||||
}
|
||||
pmix_output(0, "Client ns %s rank %d: Running", myproc.nspace, myproc.rank);
|
||||
|
||||
/* get our universe size */
|
||||
/* get our job size */
|
||||
(void)strncpy(proc.nspace, myproc.nspace, PMIX_MAX_NSLEN);
|
||||
proc.rank = PMIX_RANK_WILDCARD;
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_UNIV_SIZE, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get universe size failed: %d", myproc.nspace, myproc.rank, rc);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_JOB_SIZE, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get job size failed: %s",
|
||||
myproc.nspace, myproc.rank, PMIx_Error_string(rc));
|
||||
goto done;
|
||||
}
|
||||
nprocs = val->data.uint32;
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
pmix_output(0, "Client %s:%d universe size %d", myproc.nspace, myproc.rank, nprocs);
|
||||
pmix_output(0, "Client %s:%d job size %d", myproc.nspace, myproc.rank, nprocs);
|
||||
|
||||
/* call fence to ensure the data is received */
|
||||
PMIX_PROC_CONSTRUCT(&proc);
|
||||
|
@ -13,7 +13,7 @@
|
||||
* All rights reserved.
|
||||
* Copyright (c) 2009-2012 Cisco Systems, Inc. All rights reserved.
|
||||
* Copyright (c) 2011 Oak Ridge National Labs. All rights reserved.
|
||||
* Copyright (c) 2013-2018 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2013-2019 Intel, Inc. All rights reserved.
|
||||
* Copyright (c) 2015 Mellanox Technologies, Inc. All rights reserved.
|
||||
* $COPYRIGHT$
|
||||
*
|
||||
@ -103,17 +103,17 @@ int main(int argc, char **argv)
|
||||
usleep(10);
|
||||
}
|
||||
|
||||
/* get our universe size */
|
||||
/* get our job size */
|
||||
(void)strncpy(proc.nspace, myproc.nspace, PMIX_MAX_NSLEN);
|
||||
proc.rank = PMIX_RANK_WILDCARD;
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_UNIV_SIZE, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get universe size failed: %s",
|
||||
if (PMIX_SUCCESS != (rc = PMIx_Get(&proc, PMIX_JOB_SIZE, NULL, 0, &val))) {
|
||||
pmix_output(0, "Client ns %s rank %d: PMIx_Get job size failed: %s",
|
||||
myproc.nspace, myproc.rank, PMIx_Error_string(rc));
|
||||
goto done;
|
||||
}
|
||||
nprocs = val->data.uint32;
|
||||
PMIX_VALUE_RELEASE(val);
|
||||
pmix_output(0, "Client %s:%d universe size %d", myproc.nspace, myproc.rank, nprocs);
|
||||
pmix_output(0, "Client %s:%d job size %d", myproc.nspace, myproc.rank, nprocs);
|
||||
|
||||
/* if we are rank=0, then do a fence with timeout */
|
||||
if (0 == myproc.rank) {
|
||||
|
@ -182,6 +182,8 @@ static pmix_list_t pubdata;
|
||||
static pmix_event_t handler;
|
||||
static pmix_list_t children;
|
||||
static bool istimeouttest = false;
|
||||
static bool nettest = false;
|
||||
static bool arrays = false;
|
||||
|
||||
static void set_namespace(int nprocs, char *ranks, char *nspace,
|
||||
pmix_op_cbfunc_t cbfunc, myxfer_t *x);
|
||||
@ -208,6 +210,32 @@ static void opcbfunc(pmix_status_t status, void *cbdata)
|
||||
DEBUG_WAKEUP_THREAD(&x->lock);
|
||||
}
|
||||
|
||||
static void setup_cbfunc(pmix_status_t status,
|
||||
pmix_info_t info[], size_t ninfo,
|
||||
void *provided_cbdata,
|
||||
pmix_op_cbfunc_t cbfunc, void *cbdata)
|
||||
{
|
||||
myxfer_t *x = (myxfer_t*)provided_cbdata;
|
||||
size_t n;
|
||||
|
||||
/* transfer it to the caddy for return to the main thread */
|
||||
if (0 < ninfo) {
|
||||
PMIX_INFO_CREATE(x->info, ninfo);
|
||||
x->ninfo = ninfo;
|
||||
for (n=0; n < ninfo; n++) {
|
||||
PMIX_INFO_XFER(&x->info[n], &info[n]);
|
||||
}
|
||||
}
|
||||
|
||||
/* let the library release the data and cleanup from
|
||||
* the operation */
|
||||
if (NULL != cbfunc) {
|
||||
cbfunc(PMIX_SUCCESS, cbdata);
|
||||
}
|
||||
|
||||
DEBUG_WAKEUP_THREAD(&x->lock);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
char **client_env=NULL;
|
||||
@ -224,6 +252,11 @@ int main(int argc, char **argv)
|
||||
size_t ninfo;
|
||||
mylock_t mylock;
|
||||
int ncycles=1, m, delay=0;
|
||||
bool hwloc = false;
|
||||
#if PMIX_HAVE_HWLOC
|
||||
char *hwloc_file = NULL;
|
||||
#endif
|
||||
sigset_t unblock;
|
||||
|
||||
/* smoke test */
|
||||
if (PMIX_SUCCESS != 0) {
|
||||
@ -259,30 +292,115 @@ int main(int argc, char **argv)
|
||||
0 == strcmp("--sleep", argv[n])) &&
|
||||
NULL != argv[n+1]) {
|
||||
delay = strtol(argv[n+1], NULL, 10);
|
||||
#if PMIX_HAVE_HWLOC
|
||||
} else if (0 == strcmp("-hwloc", argv[n]) ||
|
||||
0 == strcmp("--hwloc", argv[n])) {
|
||||
/* test hwloc support */
|
||||
hwloc = true;
|
||||
} else if (0 == strcmp("-hwloc-file", argv[n]) ||
|
||||
0 == strcmp("--hwloc-file", argv[n])) {
|
||||
if (NULL == argv[n+1]) {
|
||||
fprintf(stderr, "The --hwloc-file option requires an argument\n");
|
||||
exit(1);
|
||||
}
|
||||
hwloc_file = strdup(argv[n+1]);
|
||||
hwloc = true;
|
||||
++n;
|
||||
#endif
|
||||
} else if (0 == strcmp("-h", argv[n])) {
|
||||
/* print the options and exit */
|
||||
fprintf(stderr, "usage: simptest <options>\n");
|
||||
fprintf(stderr, " -n N Number of clients to run\n");
|
||||
fprintf(stderr, " -e foo Name of the client executable to run (default: simpclient\n");
|
||||
fprintf(stderr, " -reps N Cycle for N repetitions");
|
||||
fprintf(stderr, " -hwloc Test hwloc support\n");
|
||||
fprintf(stderr, " -hwloc-file FILE Use file to import topology\n");
|
||||
fprintf(stderr, " -net-test Test network endpt assignments\n");
|
||||
fprintf(stderr, " -arrays Use the job session array to pass registration info\n");
|
||||
exit(0);
|
||||
} else if (0 == strcmp("-net-test", argv[n]) ||
|
||||
0 == strcmp("--net-test", argv[n])) {
|
||||
/* test network support */
|
||||
nettest = true;
|
||||
} else if (0 == strcmp("-arrays", argv[n]) ||
|
||||
0 == strcmp("--arrays", argv[n])) {
|
||||
/* test network support */
|
||||
arrays = true;
|
||||
}
|
||||
}
|
||||
if (NULL == executable) {
|
||||
if (nettest) {
|
||||
executable = strdup("./simpcoord");
|
||||
} else {
|
||||
executable = strdup("./quietclient");
|
||||
}
|
||||
}
|
||||
/* check for executable existence and permissions */
|
||||
if (0 != access(executable, X_OK)) {
|
||||
fprintf(stderr, "Executable %s not found or missing executable permissions\n", executable);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* ensure that SIGCHLD is unblocked as we need to capture it */
|
||||
if (0 != sigemptyset(&unblock)) {
|
||||
fprintf(stderr, "SIGEMPTYSET FAILED\n");
|
||||
exit(1);
|
||||
}
|
||||
if (0 != sigaddset(&unblock, SIGCHLD)) {
|
||||
fprintf(stderr, "SIGADDSET FAILED\n");
|
||||
exit(1);
|
||||
}
|
||||
if (0 != sigprocmask(SIG_UNBLOCK, &unblock, NULL)) {
|
||||
fprintf(stderr, "SIG_UNBLOCK FAILED\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* setup the server library and tell it to support tool connections */
|
||||
#if PMIX_HAVE_HWLOC
|
||||
if (hwloc) {
|
||||
#if HWLOC_API_VERSION < 0x20000
|
||||
ninfo = 4;
|
||||
#else
|
||||
ninfo = 5;
|
||||
#endif
|
||||
} else {
|
||||
ninfo = 4;
|
||||
}
|
||||
#else
|
||||
ninfo = 3;
|
||||
#endif
|
||||
|
||||
PMIX_INFO_CREATE(info, ninfo);
|
||||
PMIX_INFO_LOAD(&info[0], PMIX_SERVER_TOOL_SUPPORT, NULL, PMIX_BOOL);
|
||||
PMIX_INFO_LOAD(&info[1], PMIX_USOCK_DISABLE, NULL, PMIX_BOOL);
|
||||
PMIX_INFO_LOAD(&info[2], PMIX_SERVER_GATEWAY, NULL, PMIX_BOOL);
|
||||
PMIX_INFO_LOAD(&info[2], PMIX_SERVER_SCHEDULER, NULL, PMIX_BOOL);
|
||||
#if PMIX_HAVE_HWLOC
|
||||
if (hwloc) {
|
||||
if (NULL != hwloc_file) {
|
||||
PMIX_INFO_LOAD(&info[3], PMIX_TOPOLOGY_FILE, hwloc_file, PMIX_STRING);
|
||||
} else {
|
||||
PMIX_INFO_LOAD(&info[3], PMIX_TOPOLOGY, NULL, PMIX_STRING);
|
||||
}
|
||||
#if HWLOC_API_VERSION >= 0x20000
|
||||
PMIX_INFO_LOAD(&info[4], PMIX_HWLOC_SHARE_TOPO, NULL, PMIX_BOOL);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
if (nettest) {
|
||||
/* set a known network configuration for the pnet/test component */
|
||||
putenv("PMIX_MCA_pnet_test_nverts=nodes:5;plane:d:3;plane:s:2;plane:d:5");
|
||||
putenv("PMIX_MCA_pnet=test");
|
||||
}
|
||||
|
||||
if (PMIX_SUCCESS != (rc = PMIx_server_init(&mymodule, info, ninfo))) {
|
||||
fprintf(stderr, "Init failed with error %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
PMIX_INFO_FREE(info, ninfo);
|
||||
if (nettest) {
|
||||
unsetenv("PMIX_MCA_pnet");
|
||||
unsetenv("PMIX_MCA_pnet_test_nverts");
|
||||
}
|
||||
|
||||
/* register the default errhandler */
|
||||
DEBUG_CONSTRUCT_LOCK(&mylock);
|
||||
@ -444,43 +562,186 @@ int main(int argc, char **argv)
|
||||
static void set_namespace(int nprocs, char *ranks, char *nspace,
|
||||
pmix_op_cbfunc_t cbfunc, myxfer_t *x)
|
||||
{
|
||||
char *regex, *ppn;
|
||||
char hostname[PMIX_MAXHOSTNAMELEN];
|
||||
char *regex, *ppn, *rks;
|
||||
int n, m, k;
|
||||
pmix_data_array_t *array;
|
||||
pmix_info_t *info, *iptr, *ip;
|
||||
myxfer_t cd, lock;
|
||||
pmix_status_t rc;
|
||||
|
||||
gethostname(hostname, sizeof(hostname));
|
||||
x->ninfo = 7;
|
||||
if (arrays) {
|
||||
x->ninfo = 15 + nprocs;
|
||||
} else {
|
||||
x->ninfo = 16 + nprocs;
|
||||
}
|
||||
|
||||
PMIX_INFO_CREATE(x->info, x->ninfo);
|
||||
(void)strncpy(x->info[0].key, PMIX_UNIV_SIZE, PMIX_MAX_KEYLEN);
|
||||
x->info[0].value.type = PMIX_UINT32;
|
||||
x->info[0].value.data.uint32 = nprocs;
|
||||
n = 0;
|
||||
|
||||
(void)strncpy(x->info[1].key, PMIX_SPAWNED, PMIX_MAX_KEYLEN);
|
||||
x->info[1].value.type = PMIX_UINT32;
|
||||
x->info[1].value.data.uint32 = 0;
|
||||
PMIx_generate_regex("test000,test001,test002", ®ex);
|
||||
PMIx_generate_ppn("0;1;2", &ppn);
|
||||
|
||||
(void)strncpy(x->info[2].key, PMIX_LOCAL_SIZE, PMIX_MAX_KEYLEN);
|
||||
x->info[2].value.type = PMIX_UINT32;
|
||||
x->info[2].value.data.uint32 = nprocs;
|
||||
if (arrays) {
|
||||
(void)strncpy(x->info[n].key, PMIX_JOB_INFO_ARRAY, PMIX_MAX_KEYLEN);
|
||||
x->info[n].value.type = PMIX_DATA_ARRAY;
|
||||
PMIX_DATA_ARRAY_CREATE(x->info[n].value.data.darray, 2, PMIX_INFO);
|
||||
iptr = (pmix_info_t*)x->info[n].value.data.darray->array;
|
||||
(void)strncpy(iptr[0].key, PMIX_NODE_MAP, PMIX_MAX_KEYLEN);
|
||||
iptr[0].value.type = PMIX_STRING;
|
||||
iptr[0].value.data.string = regex;
|
||||
(void)strncpy(iptr[1].key, PMIX_PROC_MAP, PMIX_MAX_KEYLEN);
|
||||
iptr[1].value.type = PMIX_STRING;
|
||||
iptr[1].value.data.string = ppn;
|
||||
++n;
|
||||
} else {
|
||||
(void)strncpy(x->info[n].key, PMIX_NODE_MAP, PMIX_MAX_KEYLEN);
|
||||
x->info[n].value.type = PMIX_STRING;
|
||||
x->info[n].value.data.string = regex;
|
||||
++n;
|
||||
|
||||
(void)strncpy(x->info[3].key, PMIX_LOCAL_PEERS, PMIX_MAX_KEYLEN);
|
||||
x->info[3].value.type = PMIX_STRING;
|
||||
x->info[3].value.data.string = strdup(ranks);
|
||||
/* if we have some empty nodes, then fill their spots */
|
||||
(void)strncpy(x->info[n].key, PMIX_PROC_MAP, PMIX_MAX_KEYLEN);
|
||||
x->info[n].value.type = PMIX_STRING;
|
||||
x->info[n].value.data.string = ppn;
|
||||
++n;
|
||||
}
|
||||
|
||||
PMIx_generate_regex(hostname, ®ex);
|
||||
(void)strncpy(x->info[4].key, PMIX_NODE_MAP, PMIX_MAX_KEYLEN);
|
||||
x->info[4].value.type = PMIX_STRING;
|
||||
x->info[4].value.data.string = regex;
|
||||
/* we have the required info to run setup_app, so do that now */
|
||||
PMIX_INFO_CREATE(iptr, 4);
|
||||
PMIX_INFO_XFER(&iptr[0], &x->info[0]);
|
||||
PMIX_INFO_XFER(&iptr[1], &x->info[1]);
|
||||
PMIX_INFO_LOAD(&iptr[2], PMIX_SETUP_APP_ENVARS, NULL, PMIX_BOOL);
|
||||
PMIX_LOAD_KEY(iptr[3].key, PMIX_ALLOC_NETWORK);
|
||||
iptr[3].value.type = PMIX_DATA_ARRAY;
|
||||
PMIX_DATA_ARRAY_CREATE(iptr[3].value.data.darray, 2, PMIX_INFO);
|
||||
ip = (pmix_info_t*)iptr[3].value.data.darray->array;
|
||||
asprintf(&rks, "%s.net", nspace);
|
||||
PMIX_INFO_LOAD(&ip[0], PMIX_ALLOC_NETWORK_ID, rks, PMIX_STRING);
|
||||
free(rks);
|
||||
PMIX_INFO_LOAD(&ip[1], PMIX_ALLOC_NETWORK_SEC_KEY, NULL, PMIX_BOOL);
|
||||
PMIX_CONSTRUCT(&cd, myxfer_t);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_server_setup_application(nspace, iptr, 4,
|
||||
setup_cbfunc, &cd))) {
|
||||
pmix_output(0, "[%s:%d] PMIx_server_setup_application failed: %s", __FILE__, __LINE__, PMIx_Error_string(rc));
|
||||
DEBUG_DESTRUCT_LOCK(&cd.lock);
|
||||
} else {
|
||||
DEBUG_WAIT_THREAD(&cd.lock);
|
||||
}
|
||||
|
||||
PMIx_generate_ppn(ranks, &ppn);
|
||||
(void)strncpy(x->info[5].key, PMIX_PROC_MAP, PMIX_MAX_KEYLEN);
|
||||
x->info[5].value.type = PMIX_STRING;
|
||||
x->info[5].value.data.string = ppn;
|
||||
/* use the results to setup the local subsystems */
|
||||
PMIX_CONSTRUCT(&lock, myxfer_t);
|
||||
if (PMIX_SUCCESS != (rc = PMIx_server_setup_local_support(nspace, cd.info, cd.ninfo,
|
||||
opcbfunc, &lock))) {
|
||||
pmix_output(0, "[%s:%d] PMIx_server_setup_local_support failed: %s", __FILE__, __LINE__, PMIx_Error_string(rc));
|
||||
} else {
|
||||
DEBUG_WAIT_THREAD(&lock.lock);
|
||||
}
|
||||
PMIX_DESTRUCT(&lock);
|
||||
PMIX_DESTRUCT(&cd);
|
||||
|
||||
(void)strncpy(x->info[6].key, PMIX_JOB_SIZE, PMIX_MAX_KEYLEN);
|
||||
x->info[6].value.type = PMIX_UINT32;
|
||||
x->info[6].value.data.uint32 = nprocs;
|
||||
(void)strncpy(x->info[n].key, PMIX_UNIV_SIZE, PMIX_MAX_KEYLEN);
|
||||
x->info[n].value.type = PMIX_UINT32;
|
||||
x->info[n].value.data.uint32 = nprocs;
|
||||
++n;
|
||||
|
||||
(void)strncpy(x->info[n].key, PMIX_SPAWNED, PMIX_MAX_KEYLEN);
|
||||
x->info[n].value.type = PMIX_UINT32;
|
||||
x->info[n].value.data.uint32 = 0;
|
||||
++n;
|
||||
|
||||
(void)strncpy(x->info[n].key, PMIX_LOCAL_SIZE, PMIX_MAX_KEYLEN);
|
||||
x->info[n].value.type = PMIX_UINT32;
|
||||
x->info[n].value.data.uint32 = nprocs;
|
||||
++n;
|
||||
|
||||
(void)strncpy(x->info[n].key, PMIX_LOCAL_PEERS, PMIX_MAX_KEYLEN);
|
||||
x->info[n].value.type = PMIX_STRING;
|
||||
x->info[n].value.data.string = strdup(ranks);
|
||||
++n;
|
||||
|
||||
(void)strncpy(x->info[n].key, PMIX_JOB_SIZE, PMIX_MAX_KEYLEN);
|
||||
x->info[n].value.type = PMIX_UINT32;
|
||||
x->info[n].value.data.uint32 = nprocs;
|
||||
++n;
|
||||
|
||||
(void)strncpy(x->info[n].key, PMIX_JOBID, PMIX_MAX_KEYLEN);
|
||||
x->info[n].value.type = PMIX_STRING;
|
||||
x->info[n].value.data.string = strdup("1234");
|
||||
++n;
|
||||
|
||||
(void)strncpy(x->info[n].key, PMIX_NPROC_OFFSET, PMIX_MAX_KEYLEN);
|
||||
x->info[n].value.type = PMIX_UINT32;
|
||||
x->info[n].value.data.uint32 = 0;
|
||||
++n;
|
||||
|
||||
(void)strncpy(x->info[n].key, PMIX_NODEID, PMIX_MAX_KEYLEN);
|
||||
x->info[n].value.type = PMIX_UINT32;
|
||||
x->info[n].value.data.uint32 = 0;
|
||||
++n;
|
||||
|
||||
(void)strncpy(x->info[n].key, PMIX_NODE_SIZE, PMIX_MAX_KEYLEN);
|
||||
x->info[n].value.type = PMIX_UINT32;
|
||||
x->info[n].value.data.uint32 = nprocs;
|
||||
++n;
|
||||
|
||||
(void)strncpy(x->info[n].key, PMIX_NUM_NODES, PMIX_MAX_KEYLEN);
|
||||
x->info[n].value.type = PMIX_UINT32;
|
||||
x->info[n].value.data.uint32 = 1;
|
||||
++n;
|
||||
|
||||
(void)strncpy(x->info[n].key, PMIX_UNIV_SIZE, PMIX_MAX_KEYLEN);
|
||||
x->info[n].value.type = PMIX_UINT32;
|
||||
x->info[n].value.data.uint32 = nprocs;
|
||||
++n;
|
||||
|
||||
(void)strncpy(x->info[n].key, PMIX_MAX_PROCS, PMIX_MAX_KEYLEN);
|
||||
x->info[n].value.type = PMIX_UINT32;
|
||||
x->info[n].value.data.uint32 = nprocs;
|
||||
++n;
|
||||
|
||||
(void)strncpy(x->info[n].key, PMIX_JOB_NUM_APPS, PMIX_MAX_KEYLEN);
|
||||
x->info[n].value.type = PMIX_UINT32;
|
||||
x->info[n].value.data.uint32 = 1;
|
||||
++n;
|
||||
|
||||
(void)strncpy(x->info[n].key, PMIX_LOCALLDR, PMIX_MAX_KEYLEN);
|
||||
x->info[n].value.type = PMIX_PROC_RANK;
|
||||
x->info[n].value.data.uint32 = 0;
|
||||
++n;
|
||||
|
||||
/* add the proc-specific data */
|
||||
for (m=0; m < nprocs; m++) {
|
||||
(void)strncpy(x->info[n].key, PMIX_PROC_DATA, PMIX_MAX_KEYLEN);
|
||||
x->info[n].value.type = PMIX_DATA_ARRAY;
|
||||
PMIX_DATA_ARRAY_CREATE(array, 5, PMIX_INFO);
|
||||
x->info[n].value.data.darray = array;
|
||||
info = (pmix_info_t*)array->array;
|
||||
k = 0;
|
||||
(void)strncpy(info[k].key, PMIX_RANK, PMIX_MAX_KEYLEN);
|
||||
info[k].value.type = PMIX_PROC_RANK;
|
||||
info[k].value.data.rank = m;
|
||||
++k;
|
||||
(void)strncpy(info[k].key, PMIX_GLOBAL_RANK, PMIX_MAX_KEYLEN);
|
||||
info[k].value.type = PMIX_PROC_RANK;
|
||||
info[k].value.data.rank = m;
|
||||
++k;
|
||||
(void)strncpy(info[k].key, PMIX_LOCAL_RANK, PMIX_MAX_KEYLEN);
|
||||
info[k].value.type = PMIX_UINT16;
|
||||
info[k].value.data.uint16 = m;
|
||||
++k;
|
||||
|
||||
(void)strncpy(info[k].key, PMIX_NODE_RANK, PMIX_MAX_KEYLEN);
|
||||
info[k].value.type = PMIX_UINT16;
|
||||
info[k].value.data.uint16 = m;
|
||||
++k;
|
||||
|
||||
(void)strncpy(info[k].key, PMIX_NODEID, PMIX_MAX_KEYLEN);
|
||||
info[k].value.type = PMIX_UINT32;
|
||||
info[k].value.data.uint32 = 0;
|
||||
++k;
|
||||
/* move to next proc */
|
||||
++n;
|
||||
}
|
||||
PMIx_server_register_nspace(nspace, nprocs, x->info, x->ninfo,
|
||||
cbfunc, x);
|
||||
}
|
||||
|
Загрузка…
Ссылка в новой задаче
Block a user