[mvapich-discuss] ONE OF THE PROCESSES TERMINATED BADLY: CLEANING
UP APPLICATION
TERMINATED WITH THE EXIT STRING: Terminated (signal 15)
roger federer
ooops.789 at gmail.com
Wed Nov 16 17:11:35 EST 2011
hello....i am trying to implement a simulation program in MPI, but the
program worked well for quite number of times but all of a sudden it
started giving me the following error *"ONE OF THE PROCESSES TERMINATED
BADLY: CLEANING UP APPLICATION TERMINATED WITH THE EXIT STRING: Terminated
(signal 15) "*
can you go through the code, which is very simple to understand....and let
me know what is the problem for the error i am receiving.
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <mpe.h>
#define MAX(a,b) ((a) > (b) ? a : b)
struct clocktype
{
int lc;
int *vec;
};
typedef struct clocktype clocks;
int main(int argc, char *argv[])
{
clocks* P;
int rank, i, n, j, k, rnum, buffer_size, size;
int *buffer;
int lrecv, lsend, vrecv, vsend, l, v;
int event1a, event1b;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Comm_size(MPI_COMM_WORLD,&size);
if(argc<2)
{
printf("Process %d : Number of messages to be executed is not
provided\n", rank);
MPI_Finalize();
return 0;
}
n=atoi(argv[1]);
if(n==0)
{
printf("Process %d : Invalid number of messages", rank);
MPI_Finalize();
return 0;
}
if(size==1 && n>0)
{
printf("Invalid number of processes & messages for simulation to
occur\n");
MPI_Finalize();
return 0;
}
buffer=(int *)malloc((size+3)*sizeof(int));
buffer_size=(size+3)*sizeof(int);
for(i=0;i<size+3;i++)
buffer[i]=0;
buffer[size+3]=n;
P=(clocks *)malloc(size*sizeof(clocks));
for(i=0;i<size;i++)
P[i].vec=(int *)malloc((size+3)*sizeof(int));
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
for(j=0;j<size+3;j++)
P[rank].vec[j]=0;
P[rank].lc=0;
if(rank==0)
{
printf("\n There are %d processes in the system", size);
l=P[rank].lc;
l++;
P[rank].lc=l;
buffer[size+1]=P[rank].lc;
v=P[rank].vec[rank];
v++;
P[rank].vec[rank]=v;
buffer[rank]=P[rank].vec[rank];
rnum=rgenerator(rank,size);
buffer[size+2]++;
printf("\n Send event from %d to process %d : %d : [ ",rank,
rnum,P[rank].lc);
for(j=0;j<size;j++)
printf("%d ",P[rank].vec[j]);
printf("]");
MPI_Send(buffer, buffer_size, MPI_INT, rnum, rnum, MPI_COMM_WORLD);
while(1)
{
if(MPI_Recv(buffer, buffer_size, MPI_INT, MPI_ANY_SOURCE,
MPI_ANY_TAG, MPI_COMM_WORLD, &status)==MPI_SUCCESS)
{
if((buffer[size+2]==buffer[size+3]) &&
(status.MPI_TAG==10000))
{
printf("\n Received event from process %d",
status.MPI_SOURCE);
for(i=1;i<size;i++)
{
MPI_Send(buffer, buffer_size, MPI_INT, i, 20000,
MPI_COMM_WORLD);
}
printf("\n Quit message sent to all processes");
for(i=1;i<size;i++)
{
MPI_Recv(buffer, buffer_size, MPI_INT, i, 1060,
MPI_COMM_WORLD, &status);
printf("\n Process %d : lamport : %d vector : [
", i, buffer[size]);
for(j=0;j<size;j++)
printf("%d ",buffer[j]);
printf("]");
}
MPI_Finalize();
return 0;
}
else if((buffer[size+2]==buffer[size+3]) &&
(status.MPI_TAG==rank))
{
l=P[rank].lc;
lrecv=buffer[size+1];
lrecv++;
lsend=MAX(l, lrecv);
P[rank].lc=lsend;
buffer[size+1]=lsend;
for(i=0;i<size;i++)
{
if(i!=rank)
P[rank].vec[i]=buffer[i];
}
v=P[rank].vec[rank];
vrecv=buffer[rank];
vrecv++;
vsend=MAX(v, vrecv);
P[rank].vec[rank]=vsend;
buffer[rank]=vsend;
printf("\n Receive event from process %d by process %d
: %d : [ ", status.MPI_SOURCE, rank, P[rank].lc);
for(j=0;j<size;j++)
printf("%d ",P[rank].vec[j]);
printf("]");
MPI_Send(buffer, buffer_size, MPI_INT, 0, 10000,
MPI_COMM_WORLD);
}
else
{
l=P[rank].lc;
lrecv=buffer[size+1];
lrecv++;
lsend=MAX(l, lrecv);
P[rank].lc=lsend;
buffer[size+1]=lsend;
for(i=0;i<size;i++)
{
if(i!=rank)
P[rank].vec[i]=buffer[i];
}
v=P[rank].vec[rank];
vrecv=buffer[rank];
vrecv++;
vsend=MAX(v, vrecv);
P[rank].vec[rank]=vsend;
buffer[rank]=vsend;
printf("\n Receive event from process %d by process %d
: %d : [ ", status.MPI_SOURCE, rank, P[rank].lc);
for(j=0;j<size;j++)
printf("%d ",P[rank].vec[j]);
printf("]");
rnum=rgenerator(rank, size);
for(k=0;k<=((rnum%10)+1);k++)
{
MPE_Log_get_state_eventIDs(&event1a, &event1b);
MPE_Describe_state( event1a, event1b, "internal
events", "red" );
MPE_Log_event(event1a, 0, NULL);
l=P[rank].lc;
lrecv=buffer[size+1];
lrecv++;
//lsend=MAX(l, lrecv);
lsend=lrecv;
P[rank].lc=lsend;
buffer[size+1]=lsend;
for(i=0;i<size;i++)
{
if(i!=rank)
P[rank].vec[i]=buffer[i];
}
v=P[rank].vec[rank];
vrecv=buffer[rank];
vrecv++;
//vsend=MAX(v, vrecv);
vsend=vrecv;
P[rank].vec[rank]=vsend;
buffer[rank]=vsend;
MPE_Log_event(event1b, 0, NULL);
if(k!=((rnum%10)+1))
{
printf("\n Execution event in process %d : %d :
[ ",rank, P[rank].lc);
for(j=0;j<size;j++)
printf("%d ",P[rank].vec[j]);
printf("]");
}
if(k==((rnum%10)+1))
{
buffer[size+2]=buffer[size+2]+1;
printf("\n Send event from %d to process %d :
%d : [ ",rank, rnum,P[rank].lc);
for(j=0;j<size;j++)
printf("%d ",P[rank].vec[j]);
printf("]");
}
}
//rnum=rgenerator(rank,size);
MPI_Send(buffer, buffer_size, MPI_INT, rnum, rnum,
MPI_COMM_WORLD);
}
}
}
}
else
{
while(1)
{
if(MPI_Recv(buffer, buffer_size, MPI_INT, MPI_ANY_SOURCE,
MPI_ANY_TAG, MPI_COMM_WORLD, &status)==MPI_SUCCESS)
{
if((buffer[size+2]==buffer[size+3]) &&
(status.MPI_TAG==20000))
{
for(i=0;i<size;i++)
{
buffer[i]=P[rank].vec[i];
}
buffer[size]=P[rank].lc;
MPI_Send(buffer, buffer_size , MPI_INT, 0, 1060,
MPI_COMM_WORLD);
printf("\n process %d quitting", rank);
MPI_Finalize();
return 0;
}
else if((buffer[size+2]==buffer[size+3]) &&
(status.MPI_TAG==rank))
{
l=P[rank].lc;
lrecv=buffer[size+1];
lrecv++;
lsend=MAX(l, lrecv);
P[rank].lc=lsend;
buffer[size+1]=lsend;
for(i=0;i<size;i++)
{
if(i!=rank)
P[rank].vec[i]=buffer[i];
}
v=P[rank].vec[rank];
vrecv=buffer[rank];
vrecv++;
vsend=MAX(v, vrecv);
P[rank].vec[rank]=vsend;
buffer[rank]=vsend;
printf("\n Receive event from process %d by process %d
: %d : [ ", status.MPI_SOURCE, rank, P[rank].lc);
for(j=0;j<size;j++)
printf("%d ",P[rank].vec[j]);
printf("]");
MPI_Send(buffer, buffer_size, MPI_INT, 0, 10000,
MPI_COMM_WORLD);
}
else
{
l=P[rank].lc;
lrecv=buffer[size+1];
lrecv++;
lsend=MAX(l, lrecv);
P[rank].lc=lsend;
buffer[size+1]=lsend;
for(i=0;i<size;i++)
{
if(i!=rank)
P[rank].vec[i]=buffer[i];
}
v=P[rank].vec[rank];
vrecv=buffer[rank];
vrecv++;
vsend=MAX(v, vrecv);
P[rank].vec[rank]=vsend;
buffer[rank]=vsend;
printf("\n Receive event from process %d by process %d
: %d : [ ", status.MPI_SOURCE, rank, P[rank].lc);
for(j=0;j<size;j++)
printf("%d ",P[rank].vec[j]);
printf("]");
rnum=rgenerator(rank,size);
for(k=0;k<=((rnum%10)+1);k++)
{
MPE_Log_get_state_eventIDs(&event1a, &event1b);
MPE_Describe_state( event1a, event1b, "internal
events", "red" );
MPE_Log_event(event1a, 0, NULL);
l=P[rank].lc;
lrecv=buffer[size+1];
lrecv++;
lsend=MAX(l, lrecv);
lsend=lrecv;
P[rank].lc=lsend;
buffer[size+1]=lsend;
for(i=0;i<size;i++)
{
if(i!=rank)
P[rank].vec[i]=buffer[i];
}
v=P[rank].vec[rank];
vrecv=buffer[rank];
vrecv++;
vsend=MAX(v, vrecv);
vsend=vrecv;
P[rank].vec[rank]=vsend;
buffer[rank]=vsend;
MPE_Log_event(event1b, 0, NULL);
if(k!=((rnum%10)+1))
{
printf("\n Execution event in process %d : %d :
[ ",rank, P[rank].lc);
for(j=0;j<size;j++)
printf("%d ",P[rank].vec[j]);
printf("]");
}
if(k==((rnum%10)+1))
{
buffer[size+2]=buffer[size+2]+1;
printf("\n Send event from %d to process %d :
%d : [ ",rank, rnum,P[rank].lc);
for(j=0;j<size;j++)
printf("%d ",P[rank].vec[j]);
printf("]");
}
}
MPI_Send(buffer, buffer_size, MPI_INT, rnum, rnum,
MPI_COMM_WORLD);
}
}
}
}
MPI_Finalize();
return 0;
}
int rgenerator(int rank1, int size1)
{
int k;
do
{
k = rand() % size1;
if(k==rank1)
{
if(k+1==size1)
k--;
else
k++;
}
}while(k==rank1);
return k;
}
-------------- next part --------------
An HTML attachment was scrubbed...
URL: http://mail.cse.ohio-state.edu/pipermail/mvapich-discuss/attachments/20111116/7defffce/attachment-0001.html
More information about the mvapich-discuss
mailing list