I have a very weird problem. I've written a con in mpi that one process should print something, but amazingly the code is terminated without any output. I can't understand where it's wrong...
PS: this code is supposed to multiply two matrices.
int main( int argc, char *argv[] )
{
int M = atoi(argv[1]);
// N = 2 ^ M
N = (unsigned int) pow (2.0, M); //you need to modify this code!
int my_rank, comm_sz,mt;
int rows,offset,extra,averow ,dest;
int i,j,k;
srand(time(0));
time_t t1, t2;
double dt; //t2-t1
double tavg=0.0;
//input array
A = (double*) malloc ( sizeof(double) * N * N );
B = (double*) malloc ( sizeof(double) * N * N );
C = (double*) malloc ( sizeof(double) * N * N );
//int r; for (r = 0; r < REP; r++)
//{
//fill in matrix A and B with random numbers
//t1
//t1 = time(0);
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &comm_sz);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
if (my_rank =0){
printf("mpi_mm has started with %d tasks.\n",comm_sz);
printf("Initializing arrays...\n");
fillmatrix(A,N);
fillmatrix(B,N);
averow = N/comm_sz;
extra = N%comm_sz;
offset = 0;
mt = 0;
MPI_Bcast(B,N*N,MPI_DOUBLE,0,MPI_COMM_WORLD);
for ( dest=1;dest<=comm_sz;dest++){
rows = (dest <=extra) ? averow+1 : averow;
MPI_Send(&offset,1,MPI_INT,dest,mt,MPI_COMM_WORLD);
MPI_Send(&rows,1,MPI_INT,dest,mt,MPI_COMM_WORLD);
MPI_Send(&A[offset*N],rows*N,MPI_DOUBLE,dest,mt,MPI_COMM_WORLD);
offset=offset+rows;}
mt = 1;
for (i=1; i<=comm_sz; i++){
MPI_Recv(&offset, 1, MPI_INT, i, mt, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(&rows, 1, MPI_INT, i, mt, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(&C[offset*N], rows*N, MPI_DOUBLE, i, mt, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("Received results from task %d\n",i);
}
/* Print results */
printf("******************************************************\n");
printf("Result Matrix:\n");
for (i=0; i<N; i++)
{
printf("\n");
for (j=0; j<N; j++)
printf("%6.2f ", C[i*N+j]);
}
printf("\n******************************************************\n");
printf ("Done.\n");
}
if(my_rank !=0){
MPI_Bcast(B,N*N,MPI_DOUBLE,0,MPI_COMM_WORLD);
mt = 0;
MPI_Recv(&offset,1,MPI_INT,0,mt,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
MPI_Recv(&rows,1,MPI_INT,0,mt,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
MPI_Recv(&A,rows*N,MPI_DOUBLE,0,mt,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
for(i=0;i<N;i++)
for(j=0;j<rows;j++){
C[j*N+i] =0.0;
for(k=0;k<N;k++)
C[j*N+i] += A[j*N+k]*B[k*N+i];
}
mt = 1;
MPI_Send(&offset, 1, MPI_INT, 0, mt, MPI_COMM_WORLD);
MPI_Send(&rows, 1, MPI_INT, 0, mt, MPI_COMM_WORLD);
MPI_Send(&C, rows*N, MPI_DOUBLE, 0, mt, MPI_COMM_WORLD);
}
MPI_Finalize();
Found it.
You say
if (my_rank =0)
This should be
if (my_rank == 0)
;-)
Related
I am getting the following error output while executing MPI_Recv:
MPI_Recv(buf=0x000000D62C56FC60, count=1, MPI_INT, src=3, tag=0, MPI_COMM_WORLD, status=0x0000000000000001) failed
Message truncated; 8 bytes received but buffer size is 4
My function needs to find the number of a row which has a maximum element at the ind position.
My function's code is found below:
int find_row(Matr matr, int ind)
{
int max = ind;
for (int i = ind + 1 + CurP; i < N; i += Pnum)
if (matr[i][ind] > matr[max][ind])
max = i;
int ans = max;
if (CurP != 0)
{
MPI_Send(&max, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
}
else
{
MPI_Barrier(MPI_COMM_WORLD);
for (int i = 1; i < Pnum; i++)
{
MPI_Recv(&max, 1, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("max %d %Lf! Process %d;\n", max, matr[max][ind], i);
fflush(stdout);
if (matr[max][ind] > matr[ans][ind])
ans = max;
}
}
return ans;
}
Matr is the following type definition: typedef vector<vector<long double> >& Matr;
CurP and Pnum are initialized in the following way:
MPI_Comm_size(MPI_COMM_WORLD, &Pnum);
MPI_Comm_rank(MPI_COMM_WORLD, &CurP);
Please help me solve this issue. Thanks!
It's my fail. I execute MPI_Bcast from not all processes in another part of my code.
I am running MPI applications on a cluster with Sun Grid Engine, using OpenMPI.
Has anyone ever experience MPI communications which hang while the applications are running?
For example: Process on rank 0 calls MPI_Send to process on rank 1, and process on rank 1 calls MPI_Recv from process on rank 0. The ranks are correct, the tags are correct, however the communication just does not happen therefore the application never terminates.
N.B. The applications work on my laptop and other machines so it is not a case that I have some IDs wrong... Also, these applications work on the cluster the first time, but when I submit the exact same job again the MPI communication hangs as explained above.
If anyone has experienced something similar, any help would be appreciated, thanks.
EDIT:
I have implemented a very simple example of one of the applications:
#include <mpi.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <stdarg.h>
#include <pthread.h>
#include <sys/time.h>
#define NUM_CELLS 5 //Total number of integers to sort
/* Initial process to create numbers and send to first cell */
void *start(void *data)
{
int i, num;
time_t t;
srand((unsigned) time(&t));
//Create array of random numbers and print
for(i = 0; i < NUM_CELLS; i++)
{
num = rand() % 100;
printf("0 SEND\n");
MPI_Send(&num, 1, MPI_INT, 1, 1, MPI_COMM_WORLD);
printf("0 SENT\n");
}
return NULL;
}
/* Process for individual sort cell in sort pump */
void *sort_cell(void *data)
{
int *pos = (int *)data;
int num=2, i;
for(i = 0; i < NUM_CELLS; i++)
{
//Receive num
printf("%d WAIT\n", *pos);
if(*pos == 1)
MPI_Recv(&num, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
else if(*pos == 2)
MPI_Recv(&num, 1, MPI_INT, 1, 2, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
else if(*pos == 3)
MPI_Recv(&num, 1, MPI_INT, 2, 3, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
else if(*pos == 4)
MPI_Recv(&num, 1, MPI_INT, 3, 4, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
else if(*pos == 5)
MPI_Recv(&num, 1, MPI_INT, 4, 5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("%d RECV\n", *pos);
//Keep larger number and send smaller number to next cell
printf("%d SEND\n", *pos);
if(*pos == 1)
MPI_Send(&num, 1, MPI_INT, 2, 2, MPI_COMM_WORLD);
else if(*pos == 2)
MPI_Send(&num, 1, MPI_INT, 3, 3, MPI_COMM_WORLD);
else if(*pos == 3)
MPI_Send(&num, 1, MPI_INT, 4, 4, MPI_COMM_WORLD);
else if(*pos == 4)
MPI_Send(&num, 1, MPI_INT, 1, 5, MPI_COMM_WORLD);
printf("%d SENT\n", *pos);
}
return NULL;
}
int main(int argc, char **argv)
{
int i;
double elapsedTime;
struct timeval t1, t2;
//Start timer
gettimeofday(&t1, NULL);
int my_rank, provided;
MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
//Stop timer
gettimeofday(&t2, NULL);
elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; //sec to ms
elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; //us to ms
printf("Rank %d - Setup time: %f milliseconds\n", my_rank, elapsedTime);
//Start timer
gettimeofday(&t1, NULL);
//Execute processes in parallel according to mapping
if(my_rank == 0)
{
int num_threads = 1;
pthread_t threads[num_threads];
pthread_create(&threads[0], NULL, start, NULL);
for(i = 0; i < num_threads; i++)
(void) pthread_join(threads[i], NULL);
}
if(my_rank == 1)
{
int pos = 1;
int num_threads = 2;
pthread_t threads[num_threads];
pthread_create(&threads[0], NULL, sort_cell, (void *) &pos);
int pos1 = 5;
pthread_create(&threads[1], NULL, sort_cell, (void *) &pos1);
for(i = 0; i < num_threads; i++)
(void) pthread_join(threads[i], NULL);
}
if(my_rank == 2)
{
int pos = 2;
int num_threads = 1;
pthread_t threads[num_threads];
pthread_create(&threads[0], NULL, sort_cell, (void *) &pos);
for(i = 0; i < num_threads; i++)
(void) pthread_join(threads[i], NULL);
}
if(my_rank == 3)
{
int pos = 3;
int num_threads = 1;
pthread_t threads[num_threads];
pthread_create(&threads[0], NULL, sort_cell, (void *) &pos);
for(i = 0; i < num_threads; i++)
(void) pthread_join(threads[i], NULL);
}
if(my_rank == 4)
{
int pos = 4;
int num_threads = 1;
pthread_t threads[num_threads];
pthread_create(&threads[0], NULL, sort_cell, (void *) &pos);
for(i = 0; i < num_threads; i++)
(void) pthread_join(threads[i], NULL);
}
MPI_Barrier(MPI_COMM_WORLD);
//Stop timer
gettimeofday(&t2, NULL);
elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; //sec to ms
elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; //us to ms
printf("Rank %d - Execution time: %f milliseconds\n", my_rank, elapsedTime);
MPI_Finalize();
return 0;
}
Depending on the order of the sort_cells on the ranks, it works and I cannot understand why..
The application is as follows:
Sort_cell -> Sort_cell -> Sort_cell -> Sort_cell -> Sort_cell -> Sort_cell
ranks: 0 1 2 3 4 1
(where each Sort_cell is a process run on the specified ranks respectively)
If these are run on ranks which are grouped, ie: 00111, 01233, 00112, etc it works.. but once i execute on a stray rank (like the above example) ie: 00110, 01221, 01231, etc the MPI communication hangs. Any suggestions?
So I have a some code where I am using MPI_Bcast to send information from the root node to all nodes, but instead I want to get my P0 to send chunks of the array to individual processes.
How do I do this with MPI_Send and MPI_Receive?
I've never used them before and I don't know if I need to loop my MPI_Receive to effectively send everything or what.
I've put giant caps lock comments in the code where I need to replace my MPI_Bcast(), sorry in advance for the waterfall of code.
Code:
#include "mpi.h"
#include <stdio.h>
#include <math.h>
#define MAXSIZE 10000000
int add(int *A, int low, int high)
{
int res = 0, i;
for(i=low; i<=high; i++)
res += A[i];
return(res);
}
int main(argc,argv)
int argc;
char *argv[];
{
int myid, numprocs, x;
int data[MAXSIZE];
int i, low, high, myres, res;
double elapsed_time;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
if (myid == 0)
{
for(i=0; i<MAXSIZE; i++)
data[i]=1;
}
/* star the timer */
elapsed_time = -MPI_Wtime();
//THIS IS WHERE I GET CONFUSED ABOUT MPI_SEND AND MPI_RECIEVE!!!
MPI_Bcast(data, MAXSIZE, MPI_INT, 0, MPI_COMM_WORLD);
x = MAXSIZE/numprocs;
low = myid * x;
high = low + x - 1;
if (myid == numprocs - 1)
high = MAXSIZE-1;
myres = add(data, low, high);
printf("I got %d from %d\n", myres, myid);
MPI_Reduce(&myres, &res, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
/* stop the timer*/
elapsed_time += MPI_Wtime();
if (myid == 0)
printf("The sum is %d, time taken = %f.\n", res,elapsed_time);
MPI_Barrier(MPI_COMM_WORLD);
printf("The sum is %d at process %d.\n", res,myid);
MPI_Finalize();
return 0;
}
You need MPI_Scatter. A good intro is here: http://mpitutorial.com/tutorials/mpi-scatter-gather-and-allgather/
I think in your code it could look like this:
elements_per_proc = MAXSIZE/numprocs;
// Create a buffer that will hold a chunk of the global array
int *data_chunk = malloc(sizeof(int) * elements_per_proc);
MPI_Scatter(data, elements_per_proc, MPI_INT, data_chunk,
elements_per_proc, MPI_INT, 0, MPI_COMM_WORLD);
If you really want use MPI_Send and MPI_Recv, then you can use something like this:
int x = MAXSIZE / numprocs;
int *procData = new int[x];
if (rank == 0) {
for (int i = 1; i < num; i++) {
MPI_Send(data + i*x, x, MPI_INT, i, 0, MPI_COMM_WORLD);
}
} else {
MPI_Recv(procData, x, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
}
I'm trying to divide the string str_data1 and send it to the slave processors in MPI_COMM_WORLD, but I am getting an error on the slaves.
The error looks something like this:
2
2
3
�E0� �E0�� �E0O�
#include <stdio.h>
#include <string.h>
#include "mpi.h"
int main(int argc, char* argv[]) {
int rank;
int p;
MPI_Status status;
int msg_size = 0;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
char *str_data1 = "Samsung Galaxy Tab S 10.5 Wi-Fi, Tablet PC Android";
int len1 = strlen(str_data1), i;
char *str1[len1];
char *a[len1];
if (rank == 0) {
char *ds = strdup(str_data1);
int n = 0;
a[n] = strtok(ds, " ,");
while (a[n] && n < len1) {
a[++n] = strtok(NULL, " ,");
}
int chunk = n / p;
int str_size = chunk;
for (i = 1; i < p; i++) {
if (i == p - 1) {
str_size = n - chunk * i;
}
MPI_Send(&str_size, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
MPI_Send(&a, str_size + 1, MPI_CHAR, i, 0, MPI_COMM_WORLD);
}
} else {
MPI_Recv(&msg_size, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
char messagsg_size];
printf(" \n %d ", msg_size);
MPI_Recv(&message, msg_size + 1, MPI_CHAR, 0, 0, MPI_COMM_WORLD,
&status);
printf(" \n %s ", message);
}
MPI_Finalize();
return 0;
}
does anyone have any clues what im doing wrong? Thanks.
Some newer compilers will give you nice warning about these sorts of things. It's been very nice ever since Clang added this. If you were to use that compiler, you'd see this:
$ mpic++ asdf.c
clang: warning: treating 'c' input as 'c++' when in C++ mode, this behavior is deprecated
asdf.c:17:23: warning: conversion from string literal to 'char *' is deprecated [-Wc++11-compat-deprecated-writable-strings]
char *str_data1 = "Samsung Galaxy Tab S 10.5 Wi-Fi, Tablet PC Android";
^
asdf.c:39:22: warning: argument type 'char *(*)[len1]' doesn't match specified 'MPI' type tag that requires 'char *' [-Wtype-safety]
MPI_Send(&a, str_size + 1, MPI_CHAR, i, 0, MPI_COMM_WORLD);
^~ ~~~~~~~~
asdf.c:47:18: warning: argument type 'char (*)[msg_size]' doesn't match specified 'MPI' type tag that requires 'char *' [-Wtype-safety]
MPI_Recv(&message, msg_size + 1, MPI_CHAR, 0, 0, MPI_COMM_WORLD,
^~~~~~~~ ~~~~~~~~
3 warnings generated.
That shows you that you're using the wrong types for your character arrays. You should be using either just character arrays:
char a[len1];
or character pointers:
char *a;
Either way, you need to do some code rework to make that work correctly. Specifically, this section:
char *ds = strdup(str_data1);
int n = 0;
a[n] = strtok(ds, " ,");
while (a[n] && n < len1) {
a[++n] = strtok(NULL, " ,");
}
I don't think that string tokenizer is doing what you think it is since it would just be overwriting itself over and over.
Thanks Wesly.. i tired with structure.. looks like its working..
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include<mpi.h>
struct tokan {
char buff[30];
} t[50];
int main(int argc, char* argv[]) {
int rank;
int p;
MPI_Status status;
int msg_size = 0, i = 0, j = 0, msg_size1 = 0;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
int n = 0, m = 0, k = 0, N = 0, M = 0;
char *str1, *str2, *str_data2, *str_data1;
if (rank == 0) {
str_data1 =
"processes in this communicator will now abort, and potentially your MPI job";
str_data2 =
"The behavior is undefined if lhs or rhs are not pointers to null-terminated byte strings";
str1 = (char *) malloc(strlen(str_data1) * sizeof(char));
str2 = (char *) malloc(strlen(str_data2) * sizeof(char));
strcpy(str1, str_data1);
strcpy(str2, str_data2);
int len;
M = strlen(str2);
N = strlen(str1);
char *ptr;
k = 0;
char *ds = strdup(str_data1);
ptr = strtok(ds, " ,");
while (ptr != NULL) {
++n;
len = strlen(ptr);
for (j = 0; j < len; j++) {
t[k].buff[j] = *(ptr + j);
}
//printf(" %s ", t[k].buff);
k++;
ptr = strtok(NULL, " ,");
}
int chunk = n / p;
int str_size = chunk, cnt = 0;
j = chunk;
for (i = 1; i < p; i++) {
if (i == p - 1) {
str_size = n - chunk * i;
}
MPI_Send(&str_size, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
for (cnt = 0; cnt < str_size; cnt++) {
/*printf("process 0: %c %d %d %d %d %d %d\n", t[j].buff, str_size, n,
chunk, cnt, j, l);*/
MPI_Send(t[j].buff, 100, MPI_CHAR, i, 0, MPI_COMM_WORLD);
j++;
}
}
str_size = chunk;
for (i = 1; i < p; i++) {
MPI_Send(&M, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
//printf("process 0: %s %d %d %d %d\n", str2, n, chunk, cnt, j);
MPI_Send(str2, M, MPI_CHAR, i, 0, MPI_COMM_WORLD);
}
} else {
MPI_Recv(&msg_size, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
int k;
for (k = 0; k < msg_size; k++) {
MPI_Recv(t[k].buff, 100, MPI_CHAR, 0, 0, MPI_COMM_WORLD, &status);
printf(" \nstr1: %s %d %d %d\n", t[k].buff, rank, msg_size, k);
}
printf("***************");
MPI_Recv(&M, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
str2 = (char *) malloc((M + 1) * sizeof(char));
MPI_Recv(str2, M, MPI_CHAR, 0, 0, MPI_COMM_WORLD, &status);
str2[M] = '\0';
printf(" \nstr2: %s %d %d %d \n", str2, rank, k, M);
}
if (rank == 0)
free(str1);
free(str2);
MPI_Finalize();
return 0;
}
This is a MPI code for LU Decomposition.
I have used the following strategy -
There is a master(rank 0) and others are slaves. The master sends rows to each slave.
Since each slave might receive more than row, I store all the received rows in a
buffer and then perform LU Decomposition on it. After doing that I send back the
buffer to the master. The master does not do any computation. It just sends and receives.
for(i=0; i<n; i++)
map[i] = i%(numProcs-1) + 1;
for(i=0; i<n-1; i++)
{
if(rank == 0)
{
status = pivot(LU,i,n);
for(j=0; j<n; j++)
row1[j] = LU[n*i+j];
}
MPI_Bcast(&status, 1, MPI_INT, 0, MPI_COMM_WORLD);
if(status == -1)
return -1;
MPI_Bcast(row1, n, MPI_DOUBLE, 0, MPI_COMM_WORLD);
int tag1 = 1, tag2 = 2, tag3 = 3, tag4 = 4;
if(rank == 0)
{
int pno, start, index, l, rowsReceived = 0;
MPI_Request req;
MPI_Status stat;
for(j=i+1; j<n; j++)
MPI_Isend(&LU[n*j], n, MPI_DOUBLE, map[j], map[j], MPI_COMM_WORLD, &req);
if(i>=n-(numProcs-1))
cnt++;
for(j=0; j<numProcs-1-cnt; j++)
{
MPI_Recv(&pno, 1, MPI_INT, MPI_ANY_SOURCE, tag2, MPI_COMM_WORLD, &stat);
//printf("1. Recv from %d and j : %d and i : %d\n",pno,j,i);
MPI_Recv(&rowsReceived, 1, MPI_INT, pno, tag3, MPI_COMM_WORLD, &stat);
MPI_Recv(rowFinal, n*rowsReceived, MPI_DOUBLE, pno, tag4, MPI_COMM_WORLD, &stat);
/* Will not go more than numProcs anyways */
for(k=i+1; k<n; k++)
{
if(map[k] == pno)
{
start = k;
break;
}
}
for(k=0; k<rowsReceived; k++)
{
index = start + k*(numProcs-1);
for(l=0; l<n; l++)
LU[n*index+l] = rowFinal[n*k+l];
}
}
}
else
{
int rowsReceived = 0;
MPI_Status stat, stats[3];
MPI_Request reqs[3];
for(j=i+1; j<n; j++)
if(map[j] == rank)
rowsReceived += 1;
for(j=0; j<rowsReceived; j++)
{
MPI_Recv(&rowFinal[n*j], n, MPI_DOUBLE, 0, rank, MPI_COMM_WORLD, &stat);
}
for(j=0; j<rowsReceived; j++)
{
double factor = rowFinal[n*j+i]/row1[i];
for(k=i+1; k<n; k++)
rowFinal[n*j+k] -= (row1[k]*factor);
rowFinal[n*j+i] = factor;
}
if(rowsReceived != 0)
{
//printf("Data sent from %d iteration : %d\n",rank,i);
MPI_Isend(&rank, 1, MPI_INT, 0, tag2, MPI_COMM_WORLD, &reqs[0]);
MPI_Isend(&rowsReceived, 1, MPI_INT, 0, tag3, MPI_COMM_WORLD, &reqs[1]);
MPI_Isend(rowFinal, n*rowsReceived, MPI_DOUBLE, 0, tag4, MPI_COMM_WORLD, &reqs[2]);
}
//MPI_Waitall(3,reqs,stats);
}
}
The problem that I am facing is that sometimes the program hangs. My guess is
that the sends and receives are not being matched but I am not being able to
figure out where the problem lies.
I ran test cases on matrices of size 1000x1000, 2000x2000, 3000x3000, 5000x5000
and 7000x7000. Presently the code hangs for 7000x7000. Could someone please help
me out?
Things to note :-
map implements the mapping scheme, which row goes to which slave.
rowsReceived tells each slave the no of rows it will receive. I dont need to
calculate that each and every time, but I will fix it later.
row1 is the buffer in which the active row will be stored.
rowFinal is the buffer of the rows being received and being modified.
cnt is not important and can be ignored. For that the check for
rowReceived!=0 needs to be removed.
It looks like you are never completing your nonblocking operations. You have a bunch of calls to MPI_Isend and MPI_Irecv throughout the code, but you're never doing an MPI_Wait or MPI_Test (or one of the similar calls). Without that completion call, those nonblocking calls will never complete.