LU Decomposition MPI - mpi

This is a MPI code for LU Decomposition.
I have used the following strategy -
There is a master(rank 0) and others are slaves. The master sends rows to each slave.
Since each slave might receive more than row, I store all the received rows in a
buffer and then perform LU Decomposition on it. After doing that I send back the
buffer to the master. The master does not do any computation. It just sends and receives.
for(i=0; i<n; i++)
map[i] = i%(numProcs-1) + 1;
for(i=0; i<n-1; i++)
{
if(rank == 0)
{
status = pivot(LU,i,n);
for(j=0; j<n; j++)
row1[j] = LU[n*i+j];
}
MPI_Bcast(&status, 1, MPI_INT, 0, MPI_COMM_WORLD);
if(status == -1)
return -1;
MPI_Bcast(row1, n, MPI_DOUBLE, 0, MPI_COMM_WORLD);
int tag1 = 1, tag2 = 2, tag3 = 3, tag4 = 4;
if(rank == 0)
{
int pno, start, index, l, rowsReceived = 0;
MPI_Request req;
MPI_Status stat;
for(j=i+1; j<n; j++)
MPI_Isend(&LU[n*j], n, MPI_DOUBLE, map[j], map[j], MPI_COMM_WORLD, &req);
if(i>=n-(numProcs-1))
cnt++;
for(j=0; j<numProcs-1-cnt; j++)
{
MPI_Recv(&pno, 1, MPI_INT, MPI_ANY_SOURCE, tag2, MPI_COMM_WORLD, &stat);
//printf("1. Recv from %d and j : %d and i : %d\n",pno,j,i);
MPI_Recv(&rowsReceived, 1, MPI_INT, pno, tag3, MPI_COMM_WORLD, &stat);
MPI_Recv(rowFinal, n*rowsReceived, MPI_DOUBLE, pno, tag4, MPI_COMM_WORLD, &stat);
/* Will not go more than numProcs anyways */
for(k=i+1; k<n; k++)
{
if(map[k] == pno)
{
start = k;
break;
}
}
for(k=0; k<rowsReceived; k++)
{
index = start + k*(numProcs-1);
for(l=0; l<n; l++)
LU[n*index+l] = rowFinal[n*k+l];
}
}
}
else
{
int rowsReceived = 0;
MPI_Status stat, stats[3];
MPI_Request reqs[3];
for(j=i+1; j<n; j++)
if(map[j] == rank)
rowsReceived += 1;
for(j=0; j<rowsReceived; j++)
{
MPI_Recv(&rowFinal[n*j], n, MPI_DOUBLE, 0, rank, MPI_COMM_WORLD, &stat);
}
for(j=0; j<rowsReceived; j++)
{
double factor = rowFinal[n*j+i]/row1[i];
for(k=i+1; k<n; k++)
rowFinal[n*j+k] -= (row1[k]*factor);
rowFinal[n*j+i] = factor;
}
if(rowsReceived != 0)
{
//printf("Data sent from %d iteration : %d\n",rank,i);
MPI_Isend(&rank, 1, MPI_INT, 0, tag2, MPI_COMM_WORLD, &reqs[0]);
MPI_Isend(&rowsReceived, 1, MPI_INT, 0, tag3, MPI_COMM_WORLD, &reqs[1]);
MPI_Isend(rowFinal, n*rowsReceived, MPI_DOUBLE, 0, tag4, MPI_COMM_WORLD, &reqs[2]);
}
//MPI_Waitall(3,reqs,stats);
}
}
The problem that I am facing is that sometimes the program hangs. My guess is
that the sends and receives are not being matched but I am not being able to
figure out where the problem lies.
I ran test cases on matrices of size 1000x1000, 2000x2000, 3000x3000, 5000x5000
and 7000x7000. Presently the code hangs for 7000x7000. Could someone please help
me out?
Things to note :-
map implements the mapping scheme, which row goes to which slave.
rowsReceived tells each slave the no of rows it will receive. I dont need to
calculate that each and every time, but I will fix it later.
row1 is the buffer in which the active row will be stored.
rowFinal is the buffer of the rows being received and being modified.
cnt is not important and can be ignored. For that the check for
rowReceived!=0 needs to be removed.

It looks like you are never completing your nonblocking operations. You have a bunch of calls to MPI_Isend and MPI_Irecv throughout the code, but you're never doing an MPI_Wait or MPI_Test (or one of the similar calls). Without that completion call, those nonblocking calls will never complete.

Related

Incorrect buffer size in MPI

I am getting the following error output while executing MPI_Recv:
MPI_Recv(buf=0x000000D62C56FC60, count=1, MPI_INT, src=3, tag=0, MPI_COMM_WORLD, status=0x0000000000000001) failed
Message truncated; 8 bytes received but buffer size is 4
My function needs to find the number of a row which has a maximum element at the ind position.
My function's code is found below:
int find_row(Matr matr, int ind)
{
int max = ind;
for (int i = ind + 1 + CurP; i < N; i += Pnum)
if (matr[i][ind] > matr[max][ind])
max = i;
int ans = max;
if (CurP != 0)
{
MPI_Send(&max, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
}
else
{
MPI_Barrier(MPI_COMM_WORLD);
for (int i = 1; i < Pnum; i++)
{
MPI_Recv(&max, 1, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("max %d %Lf! Process %d;\n", max, matr[max][ind], i);
fflush(stdout);
if (matr[max][ind] > matr[ans][ind])
ans = max;
}
}
return ans;
}
Matr is the following type definition: typedef vector<vector<long double> >& Matr;
CurP and Pnum are initialized in the following way:
MPI_Comm_size(MPI_COMM_WORLD, &Pnum);
MPI_Comm_rank(MPI_COMM_WORLD, &CurP);
Please help me solve this issue. Thanks!
It's my fail. I execute MPI_Bcast from not all processes in another part of my code.

Removing MPI_Bcast()

So I have a some code where I am using MPI_Bcast to send information from the root node to all nodes, but instead I want to get my P0 to send chunks of the array to individual processes.
How do I do this with MPI_Send and MPI_Receive?
I've never used them before and I don't know if I need to loop my MPI_Receive to effectively send everything or what.
I've put giant caps lock comments in the code where I need to replace my MPI_Bcast(), sorry in advance for the waterfall of code.
Code:
#include "mpi.h"
#include <stdio.h>
#include <math.h>
#define MAXSIZE 10000000
int add(int *A, int low, int high)
{
int res = 0, i;
for(i=low; i<=high; i++)
res += A[i];
return(res);
}
int main(argc,argv)
int argc;
char *argv[];
{
int myid, numprocs, x;
int data[MAXSIZE];
int i, low, high, myres, res;
double elapsed_time;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
if (myid == 0)
{
for(i=0; i<MAXSIZE; i++)
data[i]=1;
}
/* star the timer */
elapsed_time = -MPI_Wtime();
//THIS IS WHERE I GET CONFUSED ABOUT MPI_SEND AND MPI_RECIEVE!!!
MPI_Bcast(data, MAXSIZE, MPI_INT, 0, MPI_COMM_WORLD);
x = MAXSIZE/numprocs;
low = myid * x;
high = low + x - 1;
if (myid == numprocs - 1)
high = MAXSIZE-1;
myres = add(data, low, high);
printf("I got %d from %d\n", myres, myid);
MPI_Reduce(&myres, &res, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
/* stop the timer*/
elapsed_time += MPI_Wtime();
if (myid == 0)
printf("The sum is %d, time taken = %f.\n", res,elapsed_time);
MPI_Barrier(MPI_COMM_WORLD);
printf("The sum is %d at process %d.\n", res,myid);
MPI_Finalize();
return 0;
}
You need MPI_Scatter. A good intro is here: http://mpitutorial.com/tutorials/mpi-scatter-gather-and-allgather/
I think in your code it could look like this:
elements_per_proc = MAXSIZE/numprocs;
// Create a buffer that will hold a chunk of the global array
int *data_chunk = malloc(sizeof(int) * elements_per_proc);
MPI_Scatter(data, elements_per_proc, MPI_INT, data_chunk,
elements_per_proc, MPI_INT, 0, MPI_COMM_WORLD);
If you really want use MPI_Send and MPI_Recv, then you can use something like this:
int x = MAXSIZE / numprocs;
int *procData = new int[x];
if (rank == 0) {
for (int i = 1; i < num; i++) {
MPI_Send(data + i*x, x, MPI_INT, i, 0, MPI_COMM_WORLD);
}
} else {
MPI_Recv(procData, x, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
}

How to wait till all process receive particular data using MPI blocking fucntions?

I am distributing adjacency matrix row by row to the processes using MPI_Send and MPI_Recv functions. I used MPI_Barrier but the program is getting stuck! How do I wait till all the processes get their part of a matrix?
/* Distribute the adjacency matrix */
if( my_rank == 0) {
int row_start_l, row_end_l, dest_l;
for(dest_l=1; dest_l < comm_size; dest_l++) {
row_start_l = dest_l*(num_nodes/comm_size);
if( dest_l != (comm_size - 1) ) {
row_end_l = (dest_l + 1)*(num_nodes/comm_size) - 1;
}
else {
row_end_l = num_nodes - 1;
}
for(i = row_start_l; i <= row_end_l; i++) {
MPI_Send(&g.matrix[i][1], 1, MPI_INT, dest_l, TAG_AM_DATA, MPI_COMM_WORLD);
// Send Adjacency matrix to appropriate destinations. You can first send the appropriate size
MPI_Send(&g.matrix[i], (g.matrix[i][1])+2, MPI_INT, dest_l, TAG_AM_DATA, MPI_COMM_WORLD);
}
}
for(j=0; j < num_nodes; ) {
for(k=0; k < 120; k++) {
if(j >= num_nodes) {
break;
}
sendrecv_buffer_double[k] = g.column_denominator[j];
j++;
}
MPI_Bcast(&k, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&sendrecv_buffer_double[0], k, MPI_DOUBLE, 0, MPI_COMM_WORLD);
} cnt++;
}
else {
int recv_count;
int recvd;
cnt++;
adj_matrix = (int **)malloc(my_num_rows*sizeof(int*));
for(i=0; i < my_num_rows; i++) {
MPI_Recv(&recv_count, 1, MPI_INT, 0, TAG_AM_DATA, MPI_COMM_WORLD, &status);
adj_matrix[i] = (int *)malloc((2 + recv_count)*sizeof(int));
// Receive adjacency matrix from root.
MPI_Recv(&adj_matrix[i], 2+recv_count, MPI_INT, 0, TAG_AM_DATA, MPI_COMM_WORLD, &status);
}
recvd = 0;
while(recvd < num_nodes) {
MPI_Bcast(&recv_count, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&g.column_denominator[recvd], recv_count, MPI_DOUBLE, 0, MPI_COMM_WORLD);
recvd += recv_count;
}
}
// Wait till all the processes have their assigned parts of the matrix.
MPI_Barrier(MPI_COMM_WORLD);//I am getting error here
Error message:
Process 0 reading the input file.. Number of nodes = 100 Process 0
done reading.. Fatal error in PMPI_Barrier: Message truncated, error
stack: PMPI_Barrier(426)...................:
MPI_Barrier(MPI_COMM_WORLD) failed
MPIR_Barrier_impl(308)..............:
MPIR_Bcast_impl(1369)...............:
MPIR_Bcast_intra(1199)..............:
MPIR_Bcast_binomial(149)............:
MPIC_Recv(109)......................:
MPIDI_CH3U_Request_unpack_uebuf(605): Message truncated; 8 bytes
received but buffer size is 1
I'm not too sure of what your "adjacency matrix" looks like and how is must be distributed, but I guess this is a job for MPI_Scatter() rather than a series of MPI_Bcast()...

MPI, process ends without output?

I have a very weird problem. I've written a con in mpi that one process should print something, but amazingly the code is terminated without any output. I can't understand where it's wrong...
PS: this code is supposed to multiply two matrices.
int main( int argc, char *argv[] )
{
int M = atoi(argv[1]);
// N = 2 ^ M
N = (unsigned int) pow (2.0, M); //you need to modify this code!
int my_rank, comm_sz,mt;
int rows,offset,extra,averow ,dest;
int i,j,k;
srand(time(0));
time_t t1, t2;
double dt; //t2-t1
double tavg=0.0;
//input array
A = (double*) malloc ( sizeof(double) * N * N );
B = (double*) malloc ( sizeof(double) * N * N );
C = (double*) malloc ( sizeof(double) * N * N );
//int r; for (r = 0; r < REP; r++)
//{
//fill in matrix A and B with random numbers
//t1
//t1 = time(0);
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &comm_sz);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
if (my_rank =0){
printf("mpi_mm has started with %d tasks.\n",comm_sz);
printf("Initializing arrays...\n");
fillmatrix(A,N);
fillmatrix(B,N);
averow = N/comm_sz;
extra = N%comm_sz;
offset = 0;
mt = 0;
MPI_Bcast(B,N*N,MPI_DOUBLE,0,MPI_COMM_WORLD);
for ( dest=1;dest<=comm_sz;dest++){
rows = (dest <=extra) ? averow+1 : averow;
MPI_Send(&offset,1,MPI_INT,dest,mt,MPI_COMM_WORLD);
MPI_Send(&rows,1,MPI_INT,dest,mt,MPI_COMM_WORLD);
MPI_Send(&A[offset*N],rows*N,MPI_DOUBLE,dest,mt,MPI_COMM_WORLD);
offset=offset+rows;}
mt = 1;
for (i=1; i<=comm_sz; i++){
MPI_Recv(&offset, 1, MPI_INT, i, mt, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(&rows, 1, MPI_INT, i, mt, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(&C[offset*N], rows*N, MPI_DOUBLE, i, mt, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("Received results from task %d\n",i);
}
/* Print results */
printf("******************************************************\n");
printf("Result Matrix:\n");
for (i=0; i<N; i++)
{
printf("\n");
for (j=0; j<N; j++)
printf("%6.2f ", C[i*N+j]);
}
printf("\n******************************************************\n");
printf ("Done.\n");
}
if(my_rank !=0){
MPI_Bcast(B,N*N,MPI_DOUBLE,0,MPI_COMM_WORLD);
mt = 0;
MPI_Recv(&offset,1,MPI_INT,0,mt,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
MPI_Recv(&rows,1,MPI_INT,0,mt,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
MPI_Recv(&A,rows*N,MPI_DOUBLE,0,mt,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
for(i=0;i<N;i++)
for(j=0;j<rows;j++){
C[j*N+i] =0.0;
for(k=0;k<N;k++)
C[j*N+i] += A[j*N+k]*B[k*N+i];
}
mt = 1;
MPI_Send(&offset, 1, MPI_INT, 0, mt, MPI_COMM_WORLD);
MPI_Send(&rows, 1, MPI_INT, 0, mt, MPI_COMM_WORLD);
MPI_Send(&C, rows*N, MPI_DOUBLE, 0, mt, MPI_COMM_WORLD);
}
MPI_Finalize();
Found it.
You say
if (my_rank =0)
This should be
if (my_rank == 0)
;-)

MPI Receive from many proceses

Here is my code:
if (rank != 0) {
// trimitem numarul de pixeli prelucrati
rc = MPI_Send(&pixeli, 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
// trimitem coordonatele de unde am inceput prelucrarea
rc = MPI_Send(&first_line, 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
rc = MPI_Send(&first_col, 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
for (i = 0; i < pixeli; i++) {
rc = MPI_Send(&results[i], 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
}
}
else {
for (i = 1; i < numtasks; i++) {
rc = MPI_Recv(&received_pixels, 1, MPI_INT, i, tag, MPI_COMM_WORLD, &Stat);
results_recv = (int*) calloc (received_pixels, sizeof(int));
rc = MPI_Recv(&start_line_recv, 1, MPI_INT, i, tag, MPI_COMM_WORLD, &Stat);
rc = MPI_Recv(&start_col_recv, 1, MPI_INT, i, tag, MPI_COMM_WORLD, &Stat);
for (j = 0; j < received_pixels; j++) {
rc = MPI_Recv(&results_recv[j], 1, MPI_INT, i, tag, MPI_COMM_WORLD, &Stat);
}
free(results_recv);
}
If I run this with 2 proceses it is ok because one will send and the other one will receive.
If I run this with 4 proceses I receive the following error messages:
Fatal error in MPI_Recv: Other MPI error, error stack:
MPI_Recv(186)...........................: MPI_Recv(buf=0xbff05324, count=1, MPI_INT, src=1, tag=1, MPI_COMM_WORLD, status=0xbff053ec) failed
MPIDI_CH3I_Progress(461)................:
MPID_nem_handle_pkt(636)................:
MPIDI_CH3_PktHandler_EagerShortSend(308): Failed to allocate memory for an unexpected message. 261895 unexpected messages queued.
What should I do to fix this?
These lines:
for (i = 0; i < pixeli; i++) {
rc = MPI_Send(&results[i], 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
}
and the corresponding MPI_Recvs look like they're essentially reimplementing MPI_Gather. Using the MPI_Gather call with size set to pixeli instead of 1 may allow the implementation to schedule the sends and receives more efficiently, but more importantly, it will probably drastically cut down on the total number of send/receive pairs needed to complete the whole batch of communication. You could do similar by removing the for loop and doing:
rc = MPI_Send(&results[i], pixeli, MPI_INT, 0, tag, MPI_COMM_WORLD);
but again, using the builtin MPI_Gather would be the preferred way of doing it.
The shortest answer is to tell you to use synchronious communications, that is MPI_Ssend() instead of MPI_Send().
The trouble is that you send to many messages which are buffered (i guess...but i though MPI_Send() was blocking...). The memory consumption goes up until failure...Synchronious messages avoid buffering but it does not reduce the number of messages and it may be slower.
You can reduce the number of messages and increase performances by sending many pixels at once : second argument of MPI_Send() or MPI_Recv()...
Sending a buffer of 3 int [pixeli,first_line,first_col] would also limit communications.
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <string.h>
#include "mpi.h"
int main(int argc,char *argv[])
{
int rank, size;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
int pixeli=1000000;
int received_pixels;
int first_line,first_col,start_line_recv,start_col_recv;
int tag=0;
int results[pixeli];
int i,j;
for(i=0;i<pixeli;i++){
results[i]=rank*pixeli+i;
}
int* results_recv;
int rc;
MPI_Status Stat;
if (rank != 0) {
// trimitem numarul de pixeli prelucrati
rc = MPI_Ssend(&pixeli, 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
// trimitem coordonatele de unde am inceput prelucrarea
rc = MPI_Ssend(&first_line, 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
rc = MPI_Ssend(&first_col, 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
MPI_Send(&results[0], pixeli, MPI_INT, 0, tag, MPI_COMM_WORLD);
//for (i = 0; i < pixeli; i++) {
// rc = MPI_Send(&results[i], 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
//}
}
else {
for (i = 1; i < size; i++) {
rc = MPI_Recv(&received_pixels, 1, MPI_INT, i, tag, MPI_COMM_WORLD, &Stat);
results_recv = (int*) calloc (received_pixels, sizeof(int));
rc = MPI_Recv(&start_line_recv, 1, MPI_INT, i, tag, MPI_COMM_WORLD, &Stat);
rc = MPI_Recv(&start_col_recv, 1, MPI_INT, i, tag, MPI_COMM_WORLD, &Stat);
MPI_Recv(&results_recv[0], received_pixels, MPI_INT, i, tag, MPI_COMM_WORLD, &Stat);
//for (j = 0; j < received_pixels; j++) {
// rc = MPI_Recv(&results_recv[j], 1, MPI_INT, i, tag, MPI_COMM_WORLD, &Stat);
//printf("proc %d %d\n",rank,results_recv[j]);
//}
free(results_recv);
}
}
MPI_Finalize();
return 0;
}
Bye,
Francis

Resources