1
1
-properly initialize variables in the oob_tcp_msg struct
-properly close peer sockets in the tcp oob
-fix compare in bucket allocator to use the correct variable
-remove duplicate free in teg

-updated the oob tests
-add more output to tcp oob when there are failures

This commit was SVN r1866.
Этот коммит содержится в:
Tim Prins 2004-08-04 14:33:02 +00:00
родитель 93762e9aad
Коммит fe9f18c03b
9 изменённых файлов: 278 добавлений и 171 удалений

Просмотреть файл

@ -27,7 +27,7 @@ mca_allocator_bucket_t * mca_allocator_bucket_init(mca_allocator_base_module_t *
int i;
size_t size;
/* if a bad value is used for the number of buckets, default to 30 */
if(i <= 0) {
if(num_buckets <= 0) {
num_buckets = 30;
}
/* initialize the array of buckets */

Просмотреть файл

@ -253,6 +253,7 @@ static void mca_oob_tcp_recv_handler(int sd, short flags, void* user)
/* is the peer instance willing to accept this connection */
if(mca_oob_tcp_peer_accept(peer, sd) == false) {
ompi_output(0, "mca_oob_tcp_recv_handler: peer instance not willing to accept connection.");
close(sd);
return;
}
@ -309,6 +310,12 @@ mca_oob_t* mca_oob_tcp_init(bool *allow_multi_user_threads, bool *have_hidden_th
int mca_oob_tcp_finalize(void)
{
/* TODO: need to cleanup all peers - check for pending send/recvs. etc. */
mca_oob_tcp_peer_t * peer;
while(NULL != (peer = (mca_oob_tcp_peer_t *)
ompi_list_remove_first(&mca_oob_tcp_component.tcp_peer_list))) {
mca_oob_tcp_peer_close(peer);
OBJ_DESTRUCT(peer);
}
return OMPI_SUCCESS;
}

Просмотреть файл

@ -89,6 +89,7 @@ bool mca_oob_tcp_msg_send_handler(mca_oob_tcp_msg_t* msg, struct mca_oob_tcp_pee
else if (errno == EAGAIN)
return false;
else {
ompi_output(0, "mca_oob_tcp_msg_send_handler: bad return from writev. errno=%d", errno);
mca_oob_tcp_peer_close(peer);
return false;
}
@ -130,6 +131,7 @@ bool mca_oob_tcp_msg_recv_handler(mca_oob_tcp_msg_t* msg, struct mca_oob_tcp_pee
else if (errno == EAGAIN)
return false;
else {
ompi_output(0, "mca_oob_tcp_msg_recv_handler: bad return from writev. errno=%d", errno);
mca_oob_tcp_peer_close(peer);
return false;
}

Просмотреть файл

@ -14,6 +14,7 @@
#include "oob_tcp_peer.h"
#include "oob_tcp_hdr.h"
#include <errno.h>
#include "util/output.h"
struct mca_oob_tcp_peer_t;

Просмотреть файл

@ -160,7 +160,9 @@ mca_oob_tcp_peer_t * mca_oob_tcp_peer_lookup(const ompi_process_name_t* name, bo
peer->peer_name = *name;
peer->peer_sd = -1;
peer->peer_state = MCA_OOB_TCP_CLOSED;
peer->peer_recv_msg = NULL;
peer->peer_send_msg = NULL;
peer->peer_retries = 0;
/******
* need to add the peer's address to the structure
******/
@ -243,6 +245,7 @@ static int mca_oob_tcp_peer_start_connect(mca_oob_tcp_peer_t* peer)
ompi_event_add(&peer->peer_send_event, 0);
return OMPI_SUCCESS;
}
ompi_output(0, "mca_oob_tcp_msg_peer_start_connect: unable to connect to peer. errno=%d", errno);
mca_oob_tcp_peer_close(peer);
peer->peer_retries++;
return OMPI_ERR_UNREACH;
@ -253,6 +256,7 @@ static int mca_oob_tcp_peer_start_connect(mca_oob_tcp_peer_t* peer)
peer->peer_state = MCA_OOB_TCP_CONNECT_ACK;
ompi_event_add(&peer->peer_recv_event, 0);
} else {
ompi_output(0, "mca_oob_tcp_peer_start_connect: unable to send connect ack to peer. errno=%d", errno);
mca_oob_tcp_peer_close(peer);
}
return rc;
@ -301,6 +305,7 @@ static void mca_oob_tcp_peer_complete_connect(mca_oob_tcp_peer_t* peer)
peer->peer_state = MCA_OOB_TCP_CONNECT_ACK;
ompi_event_add(&peer->peer_recv_event, 0);
} else {
ompi_output(0, "mca_oob_tcp_peer_complete_connect: unable to send connect ack.");
mca_oob_tcp_peer_close(peer);
}
}
@ -395,6 +400,7 @@ static int mca_oob_tcp_peer_recv_blocking(mca_oob_tcp_peer_t* peer, void* data,
/* remote closed connection */
if(retval == 0) {
ompi_output(0, "mca_oob_tcp_peer_recv_blocking: remote connection closed");
mca_oob_tcp_peer_close(peer);
return -1;
}

Просмотреть файл

@ -75,6 +75,8 @@ int mca_oob_tcp_recv(
msg->msg_cbdata = NULL;
msg->msg_complete = false;
msg->msg_peer = *peer;
msg->msg_rwbuf = NULL;
msg->msg_rwiov = NULL;
ompi_list_append(&mca_oob_tcp_component.tcp_msg_post, (ompi_list_item_t *) msg);
OMPI_THREAD_UNLOCK(&mca_oob_tcp_component.tcp_match_lock);
@ -161,6 +163,8 @@ int mca_oob_tcp_recv_nb(
msg->msg_cbdata = cbdata;
msg->msg_complete = false;
msg->msg_peer = *peer;
msg->msg_rwbuf = NULL;
msg->msg_rwiov = NULL;
ompi_list_append(&mca_oob_tcp_component.tcp_msg_post, (ompi_list_item_t *) msg);
OMPI_THREAD_UNLOCK(&mca_oob_tcp_component.tcp_match_lock);
return 0;

Просмотреть файл

@ -120,9 +120,6 @@ int mca_pml_teg_component_close(void)
mca_pml_teg.teg_recv_requests.super.ompi_list_length);
}
if(NULL != mca_pml_teg.teg_ptl_components) {
free(mca_pml_teg.teg_ptl_components);
}
if(NULL != mca_pml_teg.teg_ptl_components) {
free(mca_pml_teg.teg_ptl_components);
}

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Basic test for the oob
* Basic test for the oob.
* The idea is to try most combinations of sending and recieving
* to run:
* mpirun -np 2 -- oob_test
*/
@ -15,9 +16,11 @@
#define MSG_TYPE_1 1
#define MSG_TYPE_2 2
#define NUM_TESTS 4
#define NUM_TESTS 8
bool testdone[NUM_TESTS];
void do_sends(ompi_process_name_t * peer);
void do_recvs(ompi_process_name_t * peer);
bool compare_iovec(const struct iovec * msg1, const struct iovec * msg2, int n);
@ -26,6 +29,7 @@ bool compare_iovec(const struct iovec * msg1, const struct iovec * msg2,
int i;
for(i = 0; i < n; i++) {
if(msg1[i].iov_len != msg2[i].iov_len) {
fprintf(stderr, "len problem %d", i);
return false;
}
if(0 != memcmp(msg1[i].iov_base, msg2[i].iov_base, msg1[i].iov_len)) {
@ -55,46 +59,48 @@ void callback(int status, const ompi_process_name_t * peer,
}
}
int main(int argc, char ** argv)
{
int rc;
ompi_process_name_t peer;
/* setup message */
uint32_t msg_type_1 = MSG_TYPE_1;
uint32_t msg_type_2 = MSG_TYPE_2;
char send1[] = "hello";
uint32_t send2[] = {3, 5, 5, 9, 20};
uint16_t send3[] = {32, 4, 23};
/* data */
/* setup message */
uint32_t msg_type_1 = MSG_TYPE_1;
uint32_t msg_type_2 = MSG_TYPE_2;
char send1[] = "hello";
uint32_t send2[] = {3, 5, 5, 9, 20};
uint16_t send3[] = {32, 4, 23};
/* now we set up the send iovect */
struct iovec send_msg1[4] = {{(void *) &msg_type_1, sizeof(msg_type_1)},
{(void *) &send1, sizeof(send1)},
{(void *) &send2, sizeof(send2)},
{(void *) &send3, sizeof(send3)}};
/* now we set up the send iovect */
struct iovec send_msg1[4] = {{(void *) &msg_type_1, sizeof(msg_type_1)},
{(void *) &send1, sizeof(send1)},
{(void *) &send2, sizeof(send2)},
{(void *) &send3, sizeof(send3)}};
struct iovec send_msg2[3] = {{(void *) &msg_type_2, sizeof(msg_type_2)},
{(void *) &send2, sizeof(send2)},
{(void *) &send3, sizeof(send3)}};
struct iovec send_msg2[3] = {{(void *) &msg_type_2, sizeof(msg_type_2)},
{(void *) &send2, sizeof(send2)},
{(void *) &send3, sizeof(send3)}};
/* if we want the send/ recieve functions to do the packing for us,
* we have to provide an array that describes our data types
*/
mca_oob_base_type_t types[] = {MCA_OOB_BASE_INT32, MCA_OOB_BASE_BYTE,
/* if we want the send/ recieve functions to do the packing for us,
* we have to provide an array that describes our data types
*/
mca_oob_base_type_t types[] = {MCA_OOB_BASE_INT32, MCA_OOB_BASE_BYTE,
MCA_OOB_BASE_INT32, MCA_OOB_BASE_INT16};
/* we'll pass the array an identical iovec */
uint32_t msg_type;
char recv1[5];
uint32_t recv2[5];
uint16_t recv3[3];
struct iovec recv_msg1[4] = {{(void *) &msg_type, sizeof(msg_type)},
{(void *) &recv1, sizeof(recv1)},
{(void *) &recv2, sizeof(recv2)},
{(void *) &recv3, sizeof(recv3)}};
/* we'll pass the array an identical iovec */
uint32_t msg_type;
char recv1[6];
uint32_t recv2[5];
uint16_t recv3[3];
struct iovec recv_msg1[4] = {{(void *) &msg_type, sizeof(msg_type)},
{(void *) &recv1, sizeof(recv1)},
{(void *) &recv2, sizeof(recv2)},
{(void *) &recv3, sizeof(recv3)}};
struct iovec recv_msg2[3] = {{(void *) &msg_type, sizeof(msg_type)},
{(void *) &recv2, sizeof(recv2)},
{(void *) &recv3, sizeof(recv3)}};
struct iovec recv_msg2[3] = {{(void *) &msg_type, sizeof(msg_type)},
{(void *) &recv2, sizeof(recv2)},
{(void *) &recv3, sizeof(recv3)}};
int main(int argc, char ** argv)
{
ompi_process_name_t peer;
MPI_Init(&argc, &argv);
/* setup peer address */
@ -103,91 +109,162 @@ MCA_OOB_BASE_INT32, MCA_OOB_BASE_INT16};
peer.vpid, peer.jobid, peer.cellid, getpid());
if(peer.vpid == 1) {
test_init("oob send");
test_init("oob send then recieve");
/* local vpid is 1 - peer is 0 */
peer.vpid = 0;
/* send without doing any packing */
if( 0 > (rc = mca_oob_send_nb(&peer, send_msg1, 4, 0, 0, &callback, (void *) 0))){
test_failure("mca_oob_send_nb.");
} else {
test_success();
}
/* send with packing */
if( 0 > (rc = mca_oob_send_hton_nb(&peer, send_msg1, types, 4, 0, 0, &callback,
(void *) 1))) {
test_failure("mca_oob_send_hton_nb.");
} else {
test_success();
}
/* blocking send of message type 2 */
if( 0 > (rc = mca_oob_send(&peer, send_msg2, 3, 0, 0))) {
test_failure("mca_oob_send.");
} else {
test_success();
}
test_finalize();
/* done */
do_sends(&peer);
do_recvs(&peer);
} else {
test_init("oob recv");
test_init("oob recieve then send");
/* local vpid is 0 - peer is 1 */
peer.vpid = 1;
/*first, we'll recieve the nonpacked send - assuming we know the message type */
if( 0 > (rc = mca_oob_recv_nb(&peer, recv_msg1, 4, 0, 0, &callback, (void *) 2))) {
test_failure("mca_oob_recv_nb.");
} else {
test_success();
}
/* now we'll recieve the packed send - assuming we know the message type */
if( 0 > mca_oob_recv_ntoh_nb(&peer, recv_msg1, types, 4, 0, 0, &callback, (void
*) 3)) {
test_failure("mca_oob_recv_ntoh_nb.");
} else {
test_success();
}
/* now we'll do a blocking recv - waiting for the 3rd message to arrive - and
* peek the first element of the iovec array to determine the message type.
*/
if( 0 > (rc = mca_oob_recv(&peer, recv_msg1, 1, 0, MCA_OOB_PEEK))) {
test_failure("mca_oob_recv w/peek.");
} else {
test_success();
}
/* check the type of message - before doing the actual receive */
switch(msg_type) {
case MSG_TYPE_1:
if( 0 > (rc = mca_oob_recv(&peer, recv_msg1, 4, 0, 0))) {
test_failure("mca_oob_recv_nb of peeked message.");
} else {
test_success();
}
if(!compare_iovec(recv_msg1, send_msg1, 4)) {
test_failure("compare is wrong");
}
break;
case MSG_TYPE_2:
if( 0 > (rc = mca_oob_recv(&peer, recv_msg2, 3, 0, 0))) {
test_failure("mca_oob_recv_nb of peeked message.");
} else {
test_success();
}
if(!compare_iovec(recv_msg2, send_msg2, 3)) {
test_failure("compare is wrong");
}
break;
default:
test_failure("Message peek did not return a valid type number.");
break;
}
test_finalize();
do_recvs(&peer);
do_sends(&peer);
}
/* done */
test_finalize();
MPI_Finalize();
return 0;
}
void do_sends(ompi_process_name_t * peer) {
/* non blocking send without doing any packing */
if( 0 > mca_oob_send_nb(peer, send_msg1, 4, 0, 0, &callback, (void *) 0)){
test_failure("mca_oob_send_nb.");
} else {
test_success();
}
if( 0 > mca_oob_send_nb(peer, send_msg1, 4, 0, 0, &callback, (void *) 1)){
test_failure("mca_oob_send_nb.");
} else {
test_success();
}
/* nonblocking send with packing */
if( 0 > mca_oob_send_hton_nb(peer, send_msg1, types, 4, 0, 0, &callback,
(void *) 2)) {
test_failure("mca_oob_send_hton_nb.");
} else {
test_success();
}
if( 0 > mca_oob_send_hton_nb(peer, send_msg1, types, 4, 0, 0, &callback,
(void *) 3)) {
test_failure("mca_oob_send_hton_nb.");
} else {
test_success();
}
/* blocking send */
if( 0 > mca_oob_send(peer, send_msg2, 3, 0, 0)) {
test_failure("mca_oob_send.");
} else {
test_success();
}
if( 0 > mca_oob_send(peer, send_msg2, 3, 0, 0)) {
test_failure("mca_oob_send.");
} else {
test_success();
}
/* blocking send with packing */
if( 0 > mca_oob_send_hton(peer, send_msg1, types, 4, 0, 0)) {
test_failure("mca_oob_send_hton.");
} else {
test_success();
}
if( 0 > mca_oob_send_hton(peer, send_msg1, types, 4, 0, 0)) {
test_failure("mca_oob_send_hton.");
} else {
test_success();
}
}
void do_recvs(ompi_process_name_t * peer) {
/*first, we'll recieve the nonpacked send - assuming we know the
* message type */
if( 0 > mca_oob_recv_nb(peer, recv_msg1, 4, 0, 0, &callback, (void *) 4)) {
test_failure("mca_oob_recv_nb.");
} else {
test_success();
}
if( 0 > mca_oob_recv(peer, recv_msg1, 4, 0, 0)) {
test_failure("mca_oob_recv.");
} else {
test_success();
}
if(!compare_iovec(recv_msg1, send_msg1, 4)) {
test_failure("compare 1 is wrong");
}
/* now we'll recieve the packed send - assuming we know the message type */
if( 0 > mca_oob_recv_ntoh(peer, recv_msg1, types, 4, 0,0)) {
test_failure("mca_oob_recv_ntoh.");
} else {
test_success();
}
if(!compare_iovec(recv_msg1, send_msg1, 4)) {
test_failure("compare 2 is wrong");
}
if( 0 > mca_oob_recv_ntoh_nb(peer, recv_msg1, types, 4, 0, 0, &callback,
(void *) 5)) {
test_failure("mca_oob_recv_ntoh_nb.");
} else {
test_success();
}
/* now we'll do a blocking recv - waiting for the 3rd message to arrive
* - and peek the first element of the iovec array to determine
* the message type. */
if( 0 > mca_oob_recv(peer, recv_msg2, 1, 0, MCA_OOB_PEEK)) {
test_failure("mca_oob_recv w/peek.");
} else {
test_success();
}
/* check the type of message - before doing the actual receive */
switch(msg_type) {
case MSG_TYPE_1:
if( 0 > mca_oob_recv(peer, recv_msg1, 4, 0, 0)) {
test_failure("mca_oob_recv of peeked message.");
} else {
test_success();
}
if(!compare_iovec(recv_msg1, send_msg1, 4)) {
test_failure("compare 3 is wrong");
}
break;
case MSG_TYPE_2:
if( 0 > mca_oob_recv(peer, recv_msg2, 3, 0, 0)) {
test_failure("mca_oob_recv of peeked message.");
} else {
test_success();
}
if(!compare_iovec(recv_msg2, send_msg2, 3)) {
test_failure("compare 4 is wrong");
}
break;
default:
test_failure("Message peek did not return a valid type number.");
break;
}
if( 0 > mca_oob_recv_nb(peer, recv_msg2, 3, 0, 0, &callback, (void *) 6)) {
test_failure("mca_oob_recv_nb.");
} else {
test_success();
}
/* now we will recieve the packed data */
if( 0 > mca_oob_recv_ntoh(peer, recv_msg1, types, 4, 0, 0)) {
test_failure("mca_oob_recv_ntoh.");
} else {
test_success();
}
if(!compare_iovec(recv_msg1, send_msg1, 4)) {
test_failure("compare 5 is wrong");
}
if( 0 > mca_oob_recv_ntoh_nb(peer, recv_msg1, types, 4, 0, 0, &callback,
(void *) 7)) {
test_failure("mca_oob_recv_ntoh_nb.");
} else {
test_success();
}
}

Просмотреть файл

@ -14,6 +14,44 @@
#define MSG_TYPE_2 2
#define NUM_TESTS 5
/* setup message */
uint32_t msg_type_1 = MSG_TYPE_1;
uint32_t msg_type_2 = MSG_TYPE_2;
char send1[] = "hello";
uint32_t send2[] = {3, 5, 5, 9, 20};
uint16_t send3[] = {32, 4, 23};
/* now we set up the send iovect */
struct iovec send_msg1[4] = {{(void *) &msg_type_1, sizeof(msg_type_1)},
{(void *) &send1, sizeof(send1)},
{(void *) &send2, sizeof(send2)},
{(void *) &send3, sizeof(send3)}};
struct iovec send_msg2[3] = {{(void *) &msg_type_2, sizeof(msg_type_2)},
{(void *) &send2, sizeof(send2)},
{(void *) &send3, sizeof(send3)}};
/* if we want the send/ recieve functions to do the packing for us,
* we have to provide an array that describes our data types
*/
mca_oob_base_type_t types[] = {MCA_OOB_BASE_INT32, MCA_OOB_BASE_BYTE,
MCA_OOB_BASE_INT32, MCA_OOB_BASE_INT16};
/* we'll pass the array an identical iovec */
uint32_t msg_type;
char recv1[6];
uint32_t recv2[5];
uint16_t recv3[3];
struct iovec recv_msg1[4] = {{(void *) &msg_type, sizeof(msg_type)},
{(void *) &recv1, sizeof(recv1)},
{(void *) &recv2, sizeof(recv2)},
{(void *) &recv3, sizeof(recv3)}};
struct iovec recv_msg2[3] = {{(void *) &msg_type, sizeof(msg_type)},
{(void *) &recv2, sizeof(recv2)},
{(void *) &recv3, sizeof(recv3)}};
bool testdone[NUM_TESTS];
bool compare_iovec(const struct iovec * msg1, const struct iovec * msg2, int n);
@ -38,6 +76,7 @@ void callback(int status, const ompi_process_name_t * peer,
void callback(int status, const ompi_process_name_t * peer,
const struct iovec * msg, int count, int tag, void * cbdata)
{
fprintf(stderr, "caqllback called on num %d.\n", (int) cbdata);
if(0 != tag) {
test_failure("Bad tag.");
}
@ -53,44 +92,8 @@ void callback(int status, const ompi_process_name_t * peer,
int main(int argc, char ** argv)
{
int rc;
ompi_process_name_t peer;
/* setup message */
uint32_t msg_type_1 = MSG_TYPE_1;
uint32_t msg_type_2 = MSG_TYPE_2;
char send1[] = "hello";
uint32_t send2[] = {3, 5, 5, 9, 20};
uint16_t send3[] = {32, 4, 23};
/* now we set up the send iovect */
struct iovec send_msg1[4] = {{(void *) &msg_type_1, sizeof(msg_type_1)},
{(void *) &send1, sizeof(send1)},
{(void *) &send2, sizeof(send2)},
{(void *) &send3, sizeof(send3)}};
struct iovec send_msg2[3] = {{(void *) &msg_type_2, sizeof(msg_type_2)},
{(void *) &send2, sizeof(send2)},
{(void *) &send3, sizeof(send3)}};
/* if we want the send/ recieve functions to do the packing for us,
* we have to provide an array that describes our data types
*/
mca_oob_base_type_t types[] = {MCA_OOB_BASE_INT32, MCA_OOB_BASE_BYTE,
MCA_OOB_BASE_INT32, MCA_OOB_BASE_INT16};
/* we'll pass the array an identical iovec */
uint32_t msg_type;
char recv1[5];
uint32_t recv2[5];
uint16_t recv3[3];
struct iovec recv_msg1[4] = {{(void *) &msg_type, sizeof(msg_type)},
{(void *) &recv1, sizeof(recv1)},
{(void *) &recv2, sizeof(recv2)},
{(void *) &recv3, sizeof(recv3)}};
struct iovec recv_msg2[3] = {{(void *) &msg_type, sizeof(msg_type)},
{(void *) &recv2, sizeof(recv2)},
{(void *) &recv3, sizeof(recv3)}};
MPI_Init(&argc, &argv);
/* setup peer address */
@ -101,60 +104,70 @@ MCA_OOB_BASE_INT32, MCA_OOB_BASE_INT16};
test_init("oob self");
/* do a non blocking send without packing followed by a
* non blocking recieve */
if( 0 > (rc = mca_oob_send_nb(&peer, send_msg1, 4, 0, 0, &callback, (void *) 0))){
if( 0 > mca_oob_send_nb(&peer, send_msg1, 4, 0, 0, &callback, (void *) 0)){
test_failure("mca_oob_send_nb.");
} else {
test_success();
}
if( 0 > (rc = mca_oob_recv_nb(&peer, recv_msg1, 4, 0, 0, &callback, (void *) 2))) {
if( 0 > mca_oob_recv_nb(&peer, recv_msg1, 4, 0, 0, &callback, (void *) 2)) {
test_failure("mca_oob_recv_nb.");
} else {
test_success();
}
/* Nonblocking send followed by a blocking recieve with packing */
if( 0 > (rc = mca_oob_send_hton_nb(&peer, send_msg1, types, 4, 0, 0, &callback,
(void *) 1))) {
if( 0 > mca_oob_send_hton_nb(&peer, send_msg1, types, 4, 0, 0, &callback,
(void *) 1)) {
test_failure("mca_oob_send_hton_nb.");
} else {
test_success();
}
if( 0 > mca_oob_recv_ntoh_nb(&peer, recv_msg1, types, 4, 0, 0, &callback, (void
*) 3)) {
test_failure("mca_oob_recv_ntoh_nb.");
if( 0 > mca_oob_recv_ntoh(&peer, recv_msg1, types, 4, 0, 0)) {
test_failure("mca_oob_recv_ntoh.");
} else {
test_success();
}
/* non blocking send of message type 2 */
if( 0 > (rc = mca_oob_send_nb(&peer, send_msg2, 3, 0, 0, &callback,
(void *) 4))) {
if(!compare_iovec(recv_msg1, send_msg1, 4)) {
test_failure("compare 1 is wrong");
}
/* non blocking send of message type 2 followed by blocking recieve*/
if( 0 > mca_oob_send_nb(&peer, send_msg2, 3, 0, 0, &callback,
(void *) 4)) {
test_failure("mca_oob_send.");
} else {
test_success();
}
if( 0 > (rc = mca_oob_recv(&peer, recv_msg1, 1, 0, MCA_OOB_PEEK))) {
/* check the type of message - before doing the actual receive */
if( 0 > mca_oob_recv(&peer, recv_msg1, 1, 0, MCA_OOB_PEEK)) {
test_failure("mca_oob_recv w/peek.");
} else {
test_success();
}
/* check the type of message - before doing the actual receive */
switch(msg_type) {
case MSG_TYPE_1:
if( 0 > (rc = mca_oob_recv(&peer, recv_msg1, 4, 0, 0))) {
if( 0 > mca_oob_recv(&peer, recv_msg1, 4, 0, 0)) {
test_failure("mca_oob_recv_nb of peeked message.");
} else {
test_success();
}
if(!compare_iovec(recv_msg1, send_msg1, 3)) {
test_failure("compare 1 is wrong");
}
break;
case MSG_TYPE_2:
if( 0 > (rc = mca_oob_recv(&peer, recv_msg2, 3, 0, 0))) {
if( 0 > mca_oob_recv(&peer, recv_msg2, 3, 0, 0)) {
test_failure("mca_oob_recv_nb of peeked message.");
} else {
test_success();
}
break;
if(!compare_iovec(recv_msg1, send_msg1, 3)) {
test_failure("compare 1 is wrong");
}
break;
default:
test_failure("Message peek did not return a valid type number.");
break;