1
1

Last set of explicit conversions. We are now close to the zero warnings on

all platforms. The only exceptions (and I will not deal with them
anytime soon) are on Windows:
- the write functions which require the length to be an int when it's
  a size_t on all UNIX variants.
- all iovec manipulation functions where the iov_len is again an int
  when it's a size_t on most of the UNIXes.
As these only happens on Windows, so I think we're set for now :)

This commit was SVN r12215.
Этот коммит содержится в:
George Bosilca 2006-10-20 03:57:44 +00:00
родитель e81d38f322
Коммит 06563b5dec
33 изменённых файлов: 109 добавлений и 69 удалений

Просмотреть файл

@ -1,18 +1,64 @@
The actual version of Open MPI compile and run under Windows. The simplest
way to get access to a windows distribution is to download one from the
download section on the Open MPI web site (http://www.open-mpi.org).
download section on the Open MPI web site (http://www.open-mpi.org) or to
check it out via SVN (information on the above mentioned web-site).
The rest of this document is only for confirmed developpers, who has spare
The rest of the document is divided on 2. The first section is for an easy setup,
based on some files we distribute (32 and 64 bits versions). The second one is
only for advanced users, who want to deeply dig into the software.
First approach: Simple and strait-forward
Step 1: untar in the root directory of the Open MPI distribution one of our tgz
file from contrib/platform/win32/
Step 2: go in ompi/datatype and copy
datatype_pack.c to datatype_pack_checksum.c
datatype_unpack.c to datatype_unpack_checksum.c
Step 3: Add the following to ompi/tools/ompi_info/ompi_info.h after you
change the relevant information inside (replace everything between @).
#define OMPI_CONFIGURE_USER "@USER_NAME@"
#define OMPI_CONFIGURE_HOST "@HOST_NAME@"
#define OMPI_CONFIGURE_DATE "@TODAY_DATE@"
#define OMPI_BUILD_USER OMPI_CONFIGURE_USER
#define OMPI_BUILD_HOST OMPI_CONFIGURE_HOST
#define OMPI_BUILD_DATE OMPI_CONFIGURE_DATE
#define OMPI_BUILD_CFLAGS "/Od /Gm /EHsc /RTC1 /MDd"
#define OMPI_BUILD_CPPFLAGS "-I${HOME}/ompi-trunk -I${HOME}opal/include -I${HOME}/ompi-trunk/orte/include -I${HOME}/ompi-trunk/ompi/include"
#define OMPI_BUILD_CXXFLAGS "/Od /Gm /EHsc /RTC1 /MDd"
#define OMPI_BUILD_CXXCPPFLAGS "-I${HOME}/ompi-trunk -I../../.. -I$(HOME}/ompi-trunk/opal/include -I${HOME}/ompi-trunk/orte/include -I${HOME}/ompi-trunk/ompi/include"
#define OMPI_BUILD_FFLAGS ""
#define OMPI_BUILD_FCFLAGS ""
#define OMPI_BUILD_LDFLAGS " "
#define OMPI_BUILD_LIBS " "
#define OMPI_CC_ABSOLUTE "cl"
#define OMPI_CXX_ABSOLUTE "cl"
#define OMPI_F77_ABSOLUTE "none"
#define OMPI_F90_ABSOLUTE "none"
#define OMPI_F90_BUILD_SIZE "small"
Step 4: Open the Open MPI project (.sln file) from the root directory of the distribution
Step 5: Choose which version you want to build (from the project manager)
Step 6: Add the build directory to your PATH
Step *: Have fun ...
Step *+1: If you have any problems, find any bugs please feel free to report to
users@open-mpi.org
Second approach: Confirmed users
The rest of this document is only for confirmed developers, who has spare
time or an urgent necessity to compile their own windows version.
Compiling Open MPI natively on Windows require several tools. Of course
one need the Microsoft Visual Studio for their C/C++ compiler as well as
for the ml (assembler compiler) and the link utilities. But the current
version require some GNU tools as well. Here is the list of such tools:
1. Download any Unix for Windows environment. I have sucesfully used
1. Download any Unix for Windows environment. I have successfully used
cygwin and minGW.
2. Make the default shell ash.exe (install it if it's not installed by
default) as it will higly decrease the configuration and compilation
default) as it will highly decrease the configuration and compilation
time.
3. Download a very recent libtool (I'm using the pre 2.0 from their CVS HEAD).
@ -23,6 +69,3 @@ UNIX environments: configure and make.
All questions, complaints and requests about the Windows port should be sent
by email to bosilca at open-mpi dot org.
And don't forget: have as much fun as I had doing this port :)
george.

Просмотреть файл

@ -308,7 +308,7 @@ void ompi_attr_create_predefined_callback(
ORTE_ERROR_LOG(rc);
return;
}
universe_size += *sptr;
universe_size += (unsigned int)(*sptr);
}
}
}

Просмотреть файл

@ -57,7 +57,7 @@ ompi_bitmap_init(ompi_bitmap_t *bm, int size)
int actual_size;
if ((size <= 0) || (size > OMPI_FORTRAN_HANDLE_MAX) || (NULL == bm)) {
return OMPI_ERR_BAD_PARAM;
return OMPI_ERR_BAD_PARAM;
}
bm->legal_numbits = size;

Просмотреть файл

@ -71,7 +71,7 @@ int ompi_comm_init(void)
OBJ_CONSTRUCT(&ompi_mpi_comm_world, ompi_communicator_t);
group = OBJ_NEW(ompi_group_t);
group->grp_proc_pointers = ompi_proc_world(&size);
group->grp_proc_count = size;
group->grp_proc_count = (int)size;
group->grp_flags |= OMPI_GROUP_INTRINSIC;
ompi_set_group_rank(group, ompi_proc_local());
ompi_group_increment_proc_count (group);
@ -81,7 +81,7 @@ int ompi_comm_init(void)
ompi_mpi_comm_world.c_my_rank = group->grp_my_rank;
ompi_mpi_comm_world.c_local_group = group;
ompi_mpi_comm_world.c_remote_group = group;
ompi_mpi_comm_world.c_cube_dim = opal_cube_dim(size);
ompi_mpi_comm_world.c_cube_dim = opal_cube_dim((int)size);
ompi_mpi_comm_world.error_handler = &ompi_mpi_errors_are_fatal;
OBJ_RETAIN( &ompi_mpi_errors_are_fatal );
OMPI_COMM_SET_PML_ADDED(&ompi_mpi_comm_world);
@ -103,7 +103,7 @@ int ompi_comm_init(void)
group = OBJ_NEW(ompi_group_t);
group->grp_proc_pointers = ompi_proc_self(&size);
group->grp_my_rank = 0;
group->grp_proc_count = size;
group->grp_proc_count = (int)size;
group->grp_flags |= OMPI_GROUP_INTRINSIC;
ompi_mpi_comm_self.c_contextid = 1;

Просмотреть файл

@ -705,9 +705,9 @@ static int __dump_data_desc( dt_elem_desc_t* pDesc, int nbElems, char* ptr, size
for( i = 0; i < nbElems; i++ ) {
index += _dump_data_flags( pDesc->elem.common.flags, ptr + index, length );
if( length <= index ) break;
if( length <= (size_t)index ) break;
index += snprintf( ptr + index, length - index, "%15s ", ompi_ddt_basicDatatypes[pDesc->elem.common.type]->name );
if( length <= index ) break;
if( length <= (size_t)index ) break;
if( DT_LOOP == pDesc->elem.common.type )
index += snprintf( ptr + index, length - index, "%d times the next %d elements extent %d\n",
(int)pDesc->loop.loops, (int)pDesc->loop.items,
@ -722,7 +722,7 @@ static int __dump_data_desc( dt_elem_desc_t* pDesc, int nbElems, char* ptr, size
(int)pDesc->elem.extent, (long)(pDesc->elem.count * ompi_ddt_basicDatatypes[pDesc->elem.common.type]->size) );
pDesc++;
if( length <= index ) break;
if( length <= (size_t)index ) break;
}
return index;
}
@ -739,7 +739,7 @@ static inline int __dt_contain_basic_datatypes( const ompi_datatype_t* pData, ch
if( pData->bdt_used & mask )
index += snprintf( ptr + index, length - index, "%s ", ompi_ddt_basicDatatypes[i]->name );
mask <<= 1;
if( length <= index ) break;
if( length <= (size_t)index ) break;
}
return index;
}

Просмотреть файл

@ -201,7 +201,7 @@ static inline mca_bml_base_btl_t* mca_bml_base_btl_array_get_next(mca_bml_base_b
if( 1 == array->arr_size ) {
return &array->bml_btls[0]; /* force the return to avoid a jump */
} else {
uint32_t current_position = array->arr_index; /* force to always start from zero */
size_t current_position = array->arr_index; /* force to always start from zero */
if( (current_position + 1) == array->arr_size ) {
array->arr_index = 0; /* next time serve from the beginning */
} else {

Просмотреть файл

@ -152,15 +152,15 @@ int mca_btl_sm_add_procs_same_base_addr(
struct mca_btl_base_endpoint_t **peers,
ompi_bitmap_t* reachability)
{
int return_code=OMPI_SUCCESS;
int return_code=OMPI_SUCCESS, cnt,len;
size_t i,j,proc,size,n_to_allocate,length;
int n_local_procs,cnt,len;
uint32_t n_local_procs;
ompi_proc_t* my_proc; /* pointer to caller's proc structure */
mca_btl_sm_t *btl_sm;
ompi_fifo_t *my_fifos;
ompi_fifo_t * volatile *fifo_tmp;
bool same_sm_base;
ssize_t diff;
ptrdiff_t diff;
volatile char **tmp_ptr;
volatile int *tmp_int_ptr;

Просмотреть файл

@ -86,7 +86,7 @@ struct mca_btl_sm_component_t {
int sm_free_list_inc; /**< number of elements to alloc when growing free lists */
int sm_exclusivity; /**< exclusivity setting */
int sm_latency; /**< lowest latency */
int sm_max_procs; /**< upper limit on the number of processes using the shared memory pool */
uint32_t sm_max_procs; /**< upper limit on the number of processes using the shared memory pool */
int sm_extra_procs; /**< number of extra procs to allow */
char* sm_mpool_name; /**< name of shared memory pool module */
mca_mpool_base_module_t* sm_mpool; /**< shared memory pool */
@ -111,8 +111,7 @@ struct mca_btl_sm_component_t {
int *sm_proc_connect; /* scratch array used by the 0'th btl to
* set indicate sm connectivty. Used by
* the 1'st btl */
size_t num_smp_procs; /**< current number of smp procs on this
host */
uint32_t num_smp_procs; /**< current number of smp procs on this host */
int num_smp_procs_same_base_addr; /* number of procs with same
base shared memory virtual
address as this process */

Просмотреть файл

@ -41,7 +41,7 @@ ompi_coll_tuned_bcast_intra_chain ( void *buff, int count,
int num_segments; /* Number of segmenets */
int sendcount; /* the same like segcount, except for the last segment */
int new_sendcount; /* used to mane the size for the next pipelined receive */
int realsegsize;
size_t realsegsize;
char *tmpbuf = (char*)buff;
size_t typelng;
ptrdiff_t type_extent, lb;
@ -106,7 +106,6 @@ ompi_coll_tuned_bcast_intra_chain ( void *buff, int count,
err = ompi_ddt_get_extent (datatype, &lb, &type_extent);
realsegsize = segcount*type_extent;
/* set the buffer pointer */
tmpbuf = (char *)buff;
@ -180,7 +179,6 @@ ompi_coll_tuned_bcast_intra_chain ( void *buff, int count,
chain->chain_next[i],
MCA_COLL_BASE_TAG_BCAST,
MCA_PML_BASE_SEND_STANDARD, comm));
if (err != MPI_SUCCESS) OPAL_OUTPUT((ompi_coll_tuned_stream,"sendcount %d i %d chain_next %d", sendcount, i, chain->chain_next[i]));
if (err != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }
} /* end of for each child */
}
@ -243,7 +241,7 @@ ompi_coll_tuned_bcast_intra_split_bintree ( void* buffer,
uint32_t counts[2];
int num_segments[2]; /* Number of segmenets */
int sendcount[2]; /* the same like segcount, except for the last segment */
int realsegsize[2];
size_t realsegsize[2];
char *tmpbuf[2];
size_t type_size;
ptrdiff_t type_extent, lb;
@ -503,7 +501,7 @@ ompi_coll_tuned_bcast_intra_bintree ( void* buffer,
int segcount; /* Number of elements sent with each segment */
int num_segments; /* Number of segmenets */
int sendcount; /* the same like segcount, except for the last segment */
int realsegsize;
size_t realsegsize;
char *tmpbuf;
size_t type_size;
ptrdiff_t type_extent, lb;

Просмотреть файл

@ -43,12 +43,12 @@ int ompi_coll_tuned_reduce_intra_chain( void *sendbuf, void *recvbuf, int count,
{
int ret, line, rank, size, i = 0;
int recvcount, sendcount, prevcount, inbi, previnbi;
int segcount, segindex, num_segments, realsegsize;
int segcount, segindex, num_segments;
char *inbuf[2] = {(char*)NULL, (char*)NULL};
char *accumbuf = (char*)NULL;
char *sendtmpbuf = (char*)NULL;
ptrdiff_t ext, lb;
size_t typelng;
size_t typelng, realsegsize;
ompi_request_t* reqs[2];
ompi_coll_chain_t* chain;
@ -80,7 +80,7 @@ int ompi_coll_tuned_reduce_intra_chain( void *sendbuf, void *recvbuf, int count,
ompi_ddt_get_extent( datatype, &lb, &ext );
ompi_ddt_type_size( datatype, &typelng );
if( segsize > typelng ) {
segcount = segsize/typelng;
segcount = (int)(segsize / typelng);
num_segments = count/segcount;
if( (count % segcount) != 0 ) num_segments++;
} else {

Просмотреть файл

@ -48,11 +48,11 @@ static inline unsigned int my_log2(unsigned long val) {
return count > 0 ? count-1: 0;
}
static inline void *down_align_addr(void* addr, unsigned int shift) {
return (void*) (((unsigned long) addr) & (~(unsigned long) 0) << shift);
return (void*) (((intptr_t) addr) & (~(intptr_t) 0) << shift);
}
static inline void *up_align_addr(void*addr, unsigned int shift) {
return (void*) ((((unsigned long) addr) | ~((~(unsigned long) 0) << shift)));
return (void*) ((((intptr_t) addr) | ~((~(intptr_t) 0) << shift)));
}
struct mca_mpool_base_selected_module_t {

Просмотреть файл

@ -47,7 +47,7 @@ void mca_mpool_base_mem_cb(void* base, size_t size, void* cbdata,
}
base_addr = down_align_addr( base, mca_mpool_base_page_size_log);
bound_addr = up_align_addr((void*) ((unsigned long) base + size - 1), mca_mpool_base_page_size_log);
bound_addr = up_align_addr((void*) ((ptrdiff_t) base + size - 1), mca_mpool_base_page_size_log);
for(item = opal_list_get_first(&mca_mpool_base_modules);
item != opal_list_get_end(&mca_mpool_base_modules);
item = opal_list_get_next(item)) {

Просмотреть файл

@ -91,7 +91,7 @@ ompi_osc_rdma_module_fence(int assert, ompi_win_t *win)
int ret = OMPI_SUCCESS, i;
if (0 != (assert & MPI_MODE_NOPRECEDE)) {
int num_pending;
size_t num_pending;
/* check that the user didn't lie to us - since NOPRECEDED
must be specified by all processes if it is specified by
@ -176,7 +176,7 @@ ompi_osc_rdma_module_fence(int assert, ompi_win_t *win)
atomicall add however many we're going to wait for */
OPAL_THREAD_ADD32(&(P2P_MODULE(win)->p2p_num_pending_in), incoming_reqs);
OPAL_THREAD_ADD32(&(P2P_MODULE(win)->p2p_num_pending_out),
opal_list_get_size(&(P2P_MODULE(win)->p2p_copy_pending_sendreqs)));
(int32_t)opal_list_get_size(&(P2P_MODULE(win)->p2p_copy_pending_sendreqs)));
opal_output_verbose(50, ompi_osc_base_output,
"fence: waiting on %d in and %d out",
@ -497,7 +497,7 @@ ompi_osc_rdma_module_unlock(int target,
/* try to start all the requests. We've copied everything we need
out of pending_sendreqs, so don't need the lock here */
out_count = opal_list_get_size(&(P2P_MODULE(win)->p2p_copy_pending_sendreqs));
out_count = (int32_t)opal_list_get_size(&(P2P_MODULE(win)->p2p_copy_pending_sendreqs));
OPAL_THREAD_ADD32(&(P2P_MODULE(win)->p2p_num_pending_out), out_count);

Просмотреть файл

@ -107,13 +107,13 @@ int mca_pml_dr_add_procs(ompi_proc_t** procs, size_t nprocs)
ompi_bitmap_t reachable;
struct mca_bml_base_endpoint_t **bml_endpoints = NULL;
int rc;
size_t i;
int32_t i;
if(nprocs == 0)
return OMPI_SUCCESS;
OBJ_CONSTRUCT(&reachable, ompi_bitmap_t);
rc = ompi_bitmap_init(&reachable, nprocs);
rc = ompi_bitmap_init(&reachable, (int)nprocs);
if(OMPI_SUCCESS != rc)
return rc;
@ -157,7 +157,7 @@ int mca_pml_dr_add_procs(ompi_proc_t** procs, size_t nprocs)
NULL);
/* initialize pml endpoint data */
for (i = 0 ; i < nprocs ; ++i) {
for (i = 0 ; i < (int32_t)nprocs ; ++i) {
int idx;
mca_pml_dr_endpoint_t *endpoint;
@ -184,7 +184,7 @@ int mca_pml_dr_add_procs(ompi_proc_t** procs, size_t nprocs)
endpoint->bml_endpoint = bml_endpoints[i];
}
for(i = 0; i < nprocs; i++) {
for(i = 0; i < (int32_t)nprocs; i++) {
mca_pml_dr_endpoint_t* ep = (mca_pml_dr_endpoint_t*)
ompi_pointer_array_get_item(&mca_pml_dr.endpoints, i);
ep->src = mca_pml_dr.my_rank;

Просмотреть файл

@ -114,7 +114,7 @@ int mca_pml_ob1_add_procs(ompi_proc_t** procs, size_t nprocs)
return OMPI_SUCCESS;
OBJ_CONSTRUCT(&reachable, ompi_bitmap_t);
rc = ompi_bitmap_init(&reachable, nprocs);
rc = ompi_bitmap_init(&reachable, (int)nprocs);
if(OMPI_SUCCESS != rc)
return rc;
@ -278,7 +278,7 @@ int mca_pml_ob1_send_fin_btl(
void mca_pml_ob1_process_pending_packets(mca_bml_base_btl_t* bml_btl)
{
mca_pml_ob1_pckt_pending_t *pckt;
int i, rc, s = opal_list_get_size(&mca_pml_ob1.pckt_pending);
int32_t i, rc, s = (int32_t)opal_list_get_size(&mca_pml_ob1.pckt_pending);
for(i = 0; i < s; i++) {
OPAL_THREAD_LOCK(&mca_pml_ob1.lock);
@ -324,7 +324,7 @@ void mca_pml_ob1_process_pending_packets(mca_bml_base_btl_t* bml_btl)
void mca_pml_ob1_process_pending_rdma(void)
{
mca_pml_ob1_rdma_frag_t* frag;
int i, rc, s = opal_list_get_size(&mca_pml_ob1.rdma_pending);
int32_t i, rc, s = (int32_t)opal_list_get_size(&mca_pml_ob1.rdma_pending);
for(i = 0; i < s; i++) {
OPAL_THREAD_LOCK(&mca_pml_ob1.lock);

Просмотреть файл

@ -319,7 +319,7 @@ static inline int mca_pml_ob1_send_request_start_btl(
rc = mca_pml_ob1_send_request_start_buffered(sendreq, bml_btl, size);
} else if
(ompi_convertor_need_buffers(&sendreq->req_send.req_convertor) == false) {
if( 0 != (sendreq->req_rdma_cnt = mca_pml_ob1_rdma_btls(
if( 0 != (sendreq->req_rdma_cnt = (uint32_t)mca_pml_ob1_rdma_btls(
sendreq->req_endpoint,
(unsigned char*)sendreq->req_send.req_addr,
sendreq->req_send.req_bytes_packed,

Просмотреть файл

@ -56,7 +56,7 @@ int MPI_Comm_get_name(MPI_Comm comm, char *name, int *length)
#endif
if ( comm->c_flags & OMPI_COMM_NAMEISSET ) {
strncpy ( name, comm->c_name, MPI_MAX_OBJECT_NAME );
*length = strlen ( comm->c_name );
*length = (int)strlen( comm->c_name );
}
else {
memset ( name, 0, MPI_MAX_OBJECT_NAME );

Просмотреть файл

@ -77,7 +77,7 @@ int MPI_Comm_join(int fd, MPI_Comm *intercomm)
if (ORTE_SUCCESS != (rc = orte_ns.get_proc_name_string (&name, &(myproc[0]->proc_name)))) {
return rc;
}
llen = strlen(name)+1;
llen = (uint32_t)(strlen(name)+1);
len = htonl(llen);
ompi_socket_send( fd, (char *) &len, sizeof(uint32_t));
@ -121,11 +121,11 @@ static int ompi_socket_send (int fd, char *buf, int len )
char *c_ptr;
int ret = OMPI_SUCCESS;
num = (size_t) len;
c_ptr = buf;
num = len;
c_ptr = buf;
do {
s_num = (size_t) num;
s_num = (size_t) num;
a = write ( fd, c_ptr, s_num );
if ( a == -1 ) {
if ( errno == EINTR ) {

Просмотреть файл

@ -47,7 +47,7 @@ int MPI_Error_string(int errorcode, char *string, int *resultlen)
tmpstring = ompi_mpi_errcode_get_string (errorcode);
strcpy(string, tmpstring);
*resultlen = strlen(string);
*resultlen = (int)strlen(string);
return MPI_SUCCESS;
}

Просмотреть файл

@ -48,7 +48,7 @@ int MPI_Get_count(MPI_Status *status, MPI_Datatype datatype, int *count)
if( size == 0 ) {
*count = 0;
} else {
*count = status->_count / size;
*count = (int)(status->_count / size);
if( (int)((*count) * size) != status->_count )
*count = MPI_UNDEFINED;
}

Просмотреть файл

@ -47,7 +47,7 @@ int MPI_Get_elements(MPI_Status *status, MPI_Datatype datatype, int *count)
/* If the size of the datatype is zero let's return a count of zero */
return MPI_SUCCESS;
}
*count = status->_count / size;
*count = (int)(status->_count / size);
size = status->_count - (*count) * size;
/* if basic type we should return the same result as MPI_Get_count */
if( ompi_ddt_is_predefined(datatype) ) {

Просмотреть файл

@ -54,7 +54,7 @@ int MPI_Get_processor_name(char *name, int *resultlen)
/* A simple implementation of this function using gethostname*/
gethostname (tmp, MPI_MAX_PROCESSOR_NAME);
len = strlen (tmp);
len = (int)strlen (tmp);
strncpy ( name, tmp, len);
if ( MPI_MAX_PROCESSOR_NAME > len ) {

Просмотреть файл

@ -59,7 +59,7 @@ int MPI_Info_delete(MPI_Info info, char *key) {
FUNC_NAME);
}
key_length = (key) ? strlen (key) : 0;
key_length = (key) ? (int)strlen (key) : 0;
if ((NULL == key) || (0 == key_length) ||
(MPI_MAX_INFO_KEY <= key_length)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_INFO_KEY,

Просмотреть файл

@ -76,7 +76,7 @@ int MPI_Info_get(MPI_Info info, char *key, int valuelen,
FUNC_NAME);
}
key_length = (key) ? strlen (key) : 0;
key_length = (key) ? (int)strlen (key) : 0;
if ((NULL == key) || (0 == key_length) ||
(MPI_MAX_INFO_KEY <= key_length)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_INFO_KEY,

Просмотреть файл

@ -69,7 +69,7 @@ int MPI_Info_get_valuelen(MPI_Info info, char *key, int *valuelen,
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_INFO,
FUNC_NAME);
}
key_length = (key) ? strlen (key) : 0;
key_length = (key) ? (int)strlen (key) : 0;
if ((NULL == key) || (0 == key_length) ||
(MPI_MAX_INFO_KEY <= key_length)) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_INFO_KEY,

Просмотреть файл

@ -77,14 +77,14 @@ int MPI_Info_set(MPI_Info info, char *key, char *value)
FUNC_NAME);
}
key_length = (key) ? strlen (key) : 0;
key_length = (key) ? (int)strlen (key) : 0;
if ((NULL == key) || (0 == key_length) ||
(MPI_MAX_INFO_KEY <= key_length)) {
return OMPI_ERRHANDLER_INVOKE (MPI_COMM_WORLD, MPI_ERR_INFO_KEY,
FUNC_NAME);
}
value_length = (value) ? strlen (value) : 0;
value_length = (value) ? (int)strlen (value) : 0;
if ((NULL == value) || (0 == value_length) ||
(MPI_MAX_INFO_VAL <= value_length)) {
return OMPI_ERRHANDLER_INVOKE (MPI_COMM_WORLD, MPI_ERR_INFO_VALUE,

Просмотреть файл

@ -52,7 +52,7 @@ int MPI_Type_get_name(MPI_Datatype type, char *type_name, int *resultlen)
least length MPI_MAX_OBJECT_LEN, and b) if this is a call from
Fortran, the string may require null padding on the right. */
*resultlen = strlen(type->name);
*resultlen = (int)strlen(type->name);
strncpy(type_name, type->name, MPI_MAX_OBJECT_NAME);
return MPI_SUCCESS;
}

Просмотреть файл

@ -48,7 +48,7 @@ int MPI_Type_set_name (MPI_Datatype type, char *type_name)
}
memset(type->name, 0, MPI_MAX_OBJECT_NAME);
length = strlen( type_name );
length = (int)strlen( type_name );
if( length >= MPI_MAX_OBJECT_NAME ) {
length = MPI_MAX_OBJECT_NAME - 1;
}

Просмотреть файл

@ -45,6 +45,6 @@ int MPI_Type_size(MPI_Datatype type, int *size)
/* Simple */
*size = type->size;
*size = (int)type->size;
return MPI_SUCCESS;
}

Просмотреть файл

@ -82,7 +82,7 @@ void ompi_info::out(const string& pretty_message, const string &plain_message,
string v = value;
string filler;
int num_spaces = centerpoint - pretty_message.length();
int num_spaces = (int)(centerpoint - pretty_message.length());
if (num_spaces > 0) {
spaces = string(num_spaces, ' ');
}

Просмотреть файл

@ -82,7 +82,7 @@ void ompi_info::do_params(bool want_all, bool want_internal)
count = opal_cmd_line_get_ninsts(cmd_line, "param");
for (i = 0; i < count; ++i) {
type = opal_cmd_line_get_param(cmd_line, "param", i, 0);
type = opal_cmd_line_get_param(cmd_line, "param", (int)i, 0);
if (type_all == type) {
want_all = true;
break;
@ -97,8 +97,8 @@ void ompi_info::do_params(bool want_all, bool want_internal)
}
} else {
for (i = 0; i < count; ++i) {
type = opal_cmd_line_get_param(cmd_line, "param", i, 0);
component = opal_cmd_line_get_param(cmd_line, "param", i, 1);
type = opal_cmd_line_get_param(cmd_line, "param", (int)i, 0);
component = opal_cmd_line_get_param(cmd_line, "param", (int)i, 1);
for (found = false, i = 0; i < mca_types.size(); ++i) {
if (mca_types[i] == type) {

Просмотреть файл

@ -92,8 +92,8 @@ void ompi_info::do_version(bool want_all, opal_cmd_line_t *cmd_line)
} else {
count = opal_cmd_line_get_ninsts(cmd_line, "version");
for (i = 0; i < count; ++i) {
arg1 = opal_cmd_line_get_param(cmd_line, "version", i, 0);
scope = opal_cmd_line_get_param(cmd_line, "version", i, 1);
arg1 = opal_cmd_line_get_param(cmd_line, "version", (int)i, 0);
scope = opal_cmd_line_get_param(cmd_line, "version", (int)i, 1);
// Version of Open MPI

Просмотреть файл

@ -175,7 +175,7 @@ ompi_win_get_name(ompi_win_t *win, char *win_name, int *length)
{
OPAL_THREAD_LOCK(&(win->w_lock));
strncpy(win_name, win->w_name, MPI_MAX_OBJECT_NAME);
*length = strlen(win->w_name);
*length = (int)strlen(win->w_name);
OPAL_THREAD_UNLOCK(&(win->w_lock));
return OMPI_SUCCESS;