1
1

When configuring with CFLAGS="-Wall -Werror", the

test for the pthread-library fails, due to pthread_t th
not being initialized.
 
Fix remaining warnings (hopefully correctly casted).
 
Spelling buglet in help-output for mpirun

This commit was SVN r4272.
Этот коммит содержится в:
Rainer Keller 2005-02-02 18:28:17 +00:00
родитель c5f183fd78
Коммит 5e2f3b4af0
6 изменённых файлов: 15 добавлений и 8 удалений

Просмотреть файл

@ -29,8 +29,12 @@ AC_DEFUN([OMPI_INTL_PTHREAD_TRY_LINK], [
# Make sure that we can run a small application in C or C++, which
# ever is the current language. Do make sure that C or C++ is the
# current language.
#
# As long as this is not being run....
# pthread_t may be anything from an int to a struct -- init with self-tid.
#
AC_TRY_LINK([#include <pthread.h>],
[pthread_t th; pthread_join(th, 0);
[pthread_t th=pthread_self(); pthread_join(th, 0);
pthread_attr_init(0); pthread_cleanup_push(0, 0);
pthread_create(0,0,0,0); pthread_cleanup_pop(0); ],
[$1], [$2])

Просмотреть файл

@ -877,7 +877,10 @@ else
F77=":"
fi
#
PAC_C_INLINE
# This is checked by the top-level OpenMPI configure-script.
# Avoid redefinition
#
# PAC_C_INLINE
# Header files
PAC_CHECK_HEADERS(unistd.h)

Просмотреть файл

@ -111,7 +111,7 @@ mca_io_base_component_1_0_0_t mca_io_romio_component = {
/* Progression of non-blocking requests */
progress
(mca_io_base_component_progress_fn_t) progress
};

Просмотреть файл

@ -186,7 +186,7 @@ int mca_ptl_sm_add_procs_same_base_addr(
struct mca_ptl_base_peer_t *peer = peers[proc];
#if OMPI_HAVE_THREADS == 1
char path[PATH_MAX];
int flags;
/* int flags; */
#endif
/* initialize the peers information */

Просмотреть файл

@ -275,7 +275,7 @@ mca_ptl_base_module_t** mca_ptl_sm_component_init(
}
OBJ_CONSTRUCT(&mca_ptl_sm_component.sm_fifo_thread, ompi_thread_t);
mca_ptl_sm_component.sm_fifo_thread.t_run = mca_ptl_sm_component_event_thread;
mca_ptl_sm_component.sm_fifo_thread.t_run = (ompi_thread_fn_t) mca_ptl_sm_component_event_thread;
ompi_thread_start(&mca_ptl_sm_component.sm_fifo_thread);
#endif

Просмотреть файл

@ -25,8 +25,8 @@ Start the given program using Open MPI
[mpirun:allocate-resources]
%s was unable to allocate enough resources to start your application.
This might be a transient error (too many nodes in the cluster were
unavailable at the time of the request) or a permenant error (you
requsted more nodes than exist in your cluster).
unavailable at the time of the request) or a permanent error (you
requested more nodes than exist in your cluster).
While probably only useful to Open MPI developers, the error returned
was %d.
@ -41,7 +41,7 @@ detailed usage guide.
made to clean up all processes that did start. The error returned was
%d.
[mpirun:proc-reg-failed]
All proccess started by %s failed to reach MPI_Init(). This is a
All processes started by %s failed to reach MPI_Init(). This is a
fatal error, and not much information is available at this time as to
why the processes didn't start. It is possible that an error message
describing the problem has already been printed above.