Here is my code:
if (rank != 0) {
// trimitem numarul de pixeli prelucrati
rc = MPI_Send(&pixeli, 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
// trimitem coordonatele de unde am inceput prelucrarea
rc = MPI_Send(&first_line, 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
rc = MPI_Send(&first_col, 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
for (i = 0; i < pixeli; i++) {
rc = MPI_Send(&results[i], 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
}
}
else {
for (i = 1; i < numtasks; i++) {
rc = MPI_Recv(&received_pixels, 1, MPI_INT, i, tag, MPI_COMM_WORLD, &Stat);
results_recv = (int*) calloc (received_pixels, sizeof(int));
rc = MPI_Recv(&start_line_recv, 1, MPI_INT, i, tag, MPI_COMM_WORLD, &Stat);
rc = MPI_Recv(&start_col_recv, 1, MPI_INT, i, tag, MPI_COMM_WORLD, &Stat);
for (j = 0; j < received_pixels; j++) {
rc = MPI_Recv(&results_recv[j], 1, MPI_INT, i, tag, MPI_COMM_WORLD, &Stat);
}
free(results_recv);
}
If I run this with 2 proceses it is ok because one will send and the other one will receive.
If I run this with 4 proceses I receive the following error messages:
Fatal error in MPI_Recv: Other MPI error, error stack:
MPI_Recv(186)...........................: MPI_Recv(buf=0xbff05324, count=1, MPI_INT, src=1, tag=1, MPI_COMM_WORLD, status=0xbff053ec) failed
MPIDI_CH3I_Progress(461)................:
MPID_nem_handle_pkt(636)................:
MPIDI_CH3_PktHandler_EagerShortSend(308): Failed to allocate memory for an unexpected message. 261895 unexpected messages queued.
What should I do to fix this?
These lines:
for (i = 0; i < pixeli; i++) {
rc = MPI_Send(&results[i], 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
}
and the corresponding MPI_Recvs look like they're essentially reimplementing MPI_Gather. Using the MPI_Gather call with size set to pixeli instead of 1 may allow the implementation to schedule the sends and receives more efficiently, but more importantly, it will probably drastically cut down on the total number of send/receive pairs needed to complete the whole batch of communication. You could do similar by removing the for loop and doing:
rc = MPI_Send(&results[i], pixeli, MPI_INT, 0, tag, MPI_COMM_WORLD);
but again, using the builtin MPI_Gather would be the preferred way of doing it.
The shortest answer is to tell you to use synchronious communications, that is MPI_Ssend() instead of MPI_Send().
The trouble is that you send to many messages which are buffered (i guess...but i though MPI_Send() was blocking...). The memory consumption goes up until failure...Synchronious messages avoid buffering but it does not reduce the number of messages and it may be slower.
You can reduce the number of messages and increase performances by sending many pixels at once : second argument of MPI_Send() or MPI_Recv()...
Sending a buffer of 3 int [pixeli,first_line,first_col] would also limit communications.
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <string.h>
#include "mpi.h"
int main(int argc,char *argv[])
{
int rank, size;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
int pixeli=1000000;
int received_pixels;
int first_line,first_col,start_line_recv,start_col_recv;
int tag=0;
int results[pixeli];
int i,j;
for(i=0;i<pixeli;i++){
results[i]=rank*pixeli+i;
}
int* results_recv;
int rc;
MPI_Status Stat;
if (rank != 0) {
// trimitem numarul de pixeli prelucrati
rc = MPI_Ssend(&pixeli, 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
// trimitem coordonatele de unde am inceput prelucrarea
rc = MPI_Ssend(&first_line, 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
rc = MPI_Ssend(&first_col, 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
MPI_Send(&results[0], pixeli, MPI_INT, 0, tag, MPI_COMM_WORLD);
//for (i = 0; i < pixeli; i++) {
// rc = MPI_Send(&results[i], 1, MPI_INT, 0, tag, MPI_COMM_WORLD);
//}
}
else {
for (i = 1; i < size; i++) {
rc = MPI_Recv(&received_pixels, 1, MPI_INT, i, tag, MPI_COMM_WORLD, &Stat);
results_recv = (int*) calloc (received_pixels, sizeof(int));
rc = MPI_Recv(&start_line_recv, 1, MPI_INT, i, tag, MPI_COMM_WORLD, &Stat);
rc = MPI_Recv(&start_col_recv, 1, MPI_INT, i, tag, MPI_COMM_WORLD, &Stat);
MPI_Recv(&results_recv[0], received_pixels, MPI_INT, i, tag, MPI_COMM_WORLD, &Stat);
//for (j = 0; j < received_pixels; j++) {
// rc = MPI_Recv(&results_recv[j], 1, MPI_INT, i, tag, MPI_COMM_WORLD, &Stat);
//printf("proc %d %d\n",rank,results_recv[j]);
//}
free(results_recv);
}
}
MPI_Finalize();
return 0;
}
Bye,
Francis
Related
I'm trying to send information from one processor to another in a ring way from an offset processor using MPI_Sendrecv but i got deadlock. What is wrong in my code? Basically i need to use MPI_SendRecv to solve this kind of problem.
#include <stdio.h>
#include <unistd.h>
#include <mpi.h>
int main (int argc, char *argv[])
{
int offset = 9;
int size, rank, value, next, prev, sendval, recval, namelen;
double t0, t;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Get_processor_name(processor_name, &namelen);
value = 5;
if (size > 1)
{
next = (rank + 1)% size;
prev = (size+rank - 1)% size;
sendval = value + rank;
if (rank == offset)
{
MPI_Sendrecv(&sendval, 1, MPI_INT, next, 1, &recval, 1, MPI_INT, prev, 10, MPI_COMM_WORLD, &status);
}
else
{
MPI_Recv(&recval, 1, MPI_INT, prev, 10, MPI_COMM_WORLD, &status);
MPI_Send(&sendval, 1, MPI_INT, next, 10, MPI_COMM_WORLD);
}
}
MPI_Finalize();
return 0;
}
You have mismatched message tags:
MPI_Sendrecv(&sendval, 1, MPI_INT, next, 1, &recval, 1, MPI_INT, prev, 10, MPI_COMM_WORLD, &status);
// ^
...
MPI_Recv(&recval, 1, MPI_INT, prev, 10, MPI_COMM_WORLD, &status);
// ^^
The tag in the send part of the send-receive operation should also be 10.
I have a matrix which I split into parts. Each part is worked upon by a processor, Finally i want each processor to have the full matrix. For e.g. A[100][4], split into 2 processor X works on rows 0 to 49, processor Y works on rows 50 to 99. Finally i want both processors to have A[100][4] with new values. Here is my code.
#include"mpi.h"
#include<iostream>
#include<cmath>
using namespace std;
#define MASTER 0
#define BEGIN 1
const int COLS=100;
int main(int argc, char *argv[])
{
double t1=MPI_Wtime(),trans_grad[COLS][8];
int taskid,
numtasks,
numworkers,columns,
column_mean,
offset,
tag=BEGIN,
mtag,
source,
ext,left,
size,
msgtype,dest;
MPI_Status status;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&taskid);
MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
numworkers=numtasks-1;
int columns_send[numworkers+1],sum_s;
column_mean=COLS/numworkers;
ext=COLS%numworkers;
offset=0;
if (taskid==MASTER)
{
for (int i=1;i<=numworkers;i++)
{
dest=i;
columns=(i<=ext) ? column_mean+1:column_mean;
MPI_Send(&offset, 1, MPI_INT, dest, tag, MPI_COMM_WORLD);
MPI_Send(&columns, 1, MPI_INT, dest, tag, MPI_COMM_WORLD);
offset=offset+columns;
}
MPI_Finalize();
}
if (taskid!=MASTER)
{
for (int i=0;i<COLS;i++)
{
for (int j=0;j<8;j++)
{
trans_grad[i][j]=0.0;
}
}
source=MASTER;
msgtype=BEGIN;
MPI_Recv(&offset, 1, MPI_INT, source, msgtype, MPI_COMM_WORLD, &status);
MPI_Recv(&columns, 1, MPI_INT, source, msgtype, MPI_COMM_WORLD, &status);
columns_send[taskid]=columns;
for (int i=offset;i<offset+columns;i++)
{
for (int j=0;j<8;j++)
{
trans_grad[i][j]=taskid+1.5;
}
}
for (int j=1;j<=numworkers;j++)
{
if (j!=taskid)
{
mtag=taskid;
source=j;
MPI_Send(&columns_send[taskid], 1, MPI_INT, j, mtag, MPI_COMM_WORLD);
MPI_Recv(&columns_send[j], 1, MPI_INT, source, source, MPI_COMM_WORLD,&status);
}
}
sum_s=0;
for (int yy=taskid-1;yy>=1;yy--)
{
sum_s=sum_s-columns_send[yy];
}
for (int j=1;j<=numworkers;j++)
{
if (j!=taskid)
{
mtag=taskid;
source=j;
MPI_Send(&trans_grad[offset][0], columns_send[taskid]*8, MPI_DOUBLE, j, mtag, MPI_COMM_WORLD);
MPI_Recv(&trans_grad[offset+sum_s][0], columns_send[j]*8, MPI_DOUBLE, source, source, MPI_COMM_WORLD,&status);
}
sum_s=sum_s+columns_send[j];
}
MPI_Finalize();
}
}
The program works fine with COLS=100 and 150 but it just gets stuck for COLS =500, 1000.
I have no idea why this is happening.
This is a MPI code for LU Decomposition.
I have used the following strategy -
There is a master(rank 0) and others are slaves. The master sends rows to each slave.
Since each slave might receive more than row, I store all the received rows in a
buffer and then perform LU Decomposition on it. After doing that I send back the
buffer to the master. The master does not do any computation. It just sends and receives.
for(i=0; i<n; i++)
map[i] = i%(numProcs-1) + 1;
for(i=0; i<n-1; i++)
{
if(rank == 0)
{
status = pivot(LU,i,n);
for(j=0; j<n; j++)
row1[j] = LU[n*i+j];
}
MPI_Bcast(&status, 1, MPI_INT, 0, MPI_COMM_WORLD);
if(status == -1)
return -1;
MPI_Bcast(row1, n, MPI_DOUBLE, 0, MPI_COMM_WORLD);
int tag1 = 1, tag2 = 2, tag3 = 3, tag4 = 4;
if(rank == 0)
{
int pno, start, index, l, rowsReceived = 0;
MPI_Request req;
MPI_Status stat;
for(j=i+1; j<n; j++)
MPI_Isend(&LU[n*j], n, MPI_DOUBLE, map[j], map[j], MPI_COMM_WORLD, &req);
if(i>=n-(numProcs-1))
cnt++;
for(j=0; j<numProcs-1-cnt; j++)
{
MPI_Recv(&pno, 1, MPI_INT, MPI_ANY_SOURCE, tag2, MPI_COMM_WORLD, &stat);
//printf("1. Recv from %d and j : %d and i : %d\n",pno,j,i);
MPI_Recv(&rowsReceived, 1, MPI_INT, pno, tag3, MPI_COMM_WORLD, &stat);
MPI_Recv(rowFinal, n*rowsReceived, MPI_DOUBLE, pno, tag4, MPI_COMM_WORLD, &stat);
/* Will not go more than numProcs anyways */
for(k=i+1; k<n; k++)
{
if(map[k] == pno)
{
start = k;
break;
}
}
for(k=0; k<rowsReceived; k++)
{
index = start + k*(numProcs-1);
for(l=0; l<n; l++)
LU[n*index+l] = rowFinal[n*k+l];
}
}
}
else
{
int rowsReceived = 0;
MPI_Status stat, stats[3];
MPI_Request reqs[3];
for(j=i+1; j<n; j++)
if(map[j] == rank)
rowsReceived += 1;
for(j=0; j<rowsReceived; j++)
{
MPI_Recv(&rowFinal[n*j], n, MPI_DOUBLE, 0, rank, MPI_COMM_WORLD, &stat);
}
for(j=0; j<rowsReceived; j++)
{
double factor = rowFinal[n*j+i]/row1[i];
for(k=i+1; k<n; k++)
rowFinal[n*j+k] -= (row1[k]*factor);
rowFinal[n*j+i] = factor;
}
if(rowsReceived != 0)
{
//printf("Data sent from %d iteration : %d\n",rank,i);
MPI_Isend(&rank, 1, MPI_INT, 0, tag2, MPI_COMM_WORLD, &reqs[0]);
MPI_Isend(&rowsReceived, 1, MPI_INT, 0, tag3, MPI_COMM_WORLD, &reqs[1]);
MPI_Isend(rowFinal, n*rowsReceived, MPI_DOUBLE, 0, tag4, MPI_COMM_WORLD, &reqs[2]);
}
//MPI_Waitall(3,reqs,stats);
}
}
The problem that I am facing is that sometimes the program hangs. My guess is
that the sends and receives are not being matched but I am not being able to
figure out where the problem lies.
I ran test cases on matrices of size 1000x1000, 2000x2000, 3000x3000, 5000x5000
and 7000x7000. Presently the code hangs for 7000x7000. Could someone please help
me out?
Things to note :-
map implements the mapping scheme, which row goes to which slave.
rowsReceived tells each slave the no of rows it will receive. I dont need to
calculate that each and every time, but I will fix it later.
row1 is the buffer in which the active row will be stored.
rowFinal is the buffer of the rows being received and being modified.
cnt is not important and can be ignored. For that the check for
rowReceived!=0 needs to be removed.
It looks like you are never completing your nonblocking operations. You have a bunch of calls to MPI_Isend and MPI_Irecv throughout the code, but you're never doing an MPI_Wait or MPI_Test (or one of the similar calls). Without that completion call, those nonblocking calls will never complete.
I'm trying to create a log file from each processor and then send that to the root as a char array. I first send the length and then I send the data. The length sends fine, but the data is always garbage! Here is my code:
MPI_Barrier (MPI_COMM_WORLD);
string out = "";
MPI_Status status[2];
MPI_Request reqs[num_procs];
string log = "TEST";
int length = log.length();
char* temp = (char *) malloc(length+1);
strcpy(temp, log.c_str());
if (my_id != 0)
{
MPI_Send (&length, 1, MPI_INT, 0, 1, MPI_COMM_WORLD);
MPI_Send (&temp, length+1, MPI_CHAR, 0, 1, MPI_COMM_WORLD);
}
else {
int length;
for (int i = 1; i < num_procs; i++)
{
MPI_Recv (&length, 2, MPI_INT, i, 1, MPI_COMM_WORLD, &status[0]);
char* rec_buf;
rec_buf = (char *) malloc(length+1);
MPI_Recv (rec_buf, length+1, MPI_CHAR, i, 1, MPI_COMM_WORLD, &status[1]);
out += rec_buf;
free(rec_buf);
}
}
MPI_Barrier (MPI_COMM_WORLD);
free(temp);
You are passing a char** to MPI_Send instead of a char* this causes memory corruption, or in your case the garbled output you are getting. Everything should be fine if you use
MPI_Send (temp, length+1, MPI_CHAR, 0, 1, MPI_COMM_WORLD);
(note the removed & in front of the first argument, temp.)
The node00 can send the information to node01 whose rank is 1 sucessfully, but it will be blocked on the second send to the node01. Why did it happen? Thanks a lot. I think there is no deadlock in the code.
Given the MPI programming, there are 5 nodes
.....
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Get_processor_name(processor_name, &namelen);
MPI_Status status;
int buff;
if(rank ==0)
{
buff=123;
for(int i=1;i<size;i++){
MPI_Send(&buff, 1, MPI_INT, i, tag, MPI_COMM_WORLD); //succeed
MPI_Send(&buff, 1, MPI_INT, i, tag, MPI_COMM_WORLD); //blocked
}
}
else
{
MPI_Recv(&buff, 1, MPI_INT, 0, tag, MPI_COMM_WORLD, &status);//succeed
MPI_Recv(&buff, 1, MPI_INT, 0, tag, MPI_COMM_WORLD, &status);//blocked
}
............
Why don't you use MPI_Isend and MPI_Irecv.These are non blocking calls.
#include<stdio.h>
#include<math.h>
#include<mpi.h>
#define tag 777
int rank;
int size;
int main(int argc,char *argv[])
{
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
//MPI_Get_processor_name(processor_name, &namelen);
MPI_Status status;
int buff;
int i;
if(rank ==0)
{ printf("hai");
buff=123;
for(i=1;i<size;i++){
MPI_Send(&buff, 1, MPI_INT, i, tag, MPI_COMM_WORLD); //succeed
MPI_Send(&buff, 1, MPI_INT, i, tag, MPI_COMM_WORLD); //blocked
}
}
else
{
printf("hello");
MPI_Recv(&buff, 1, MPI_INT, 0, tag, MPI_COMM_WORLD, &status);//succeed
MPI_Recv(&buff, 1, MPI_INT, 0, tag, MPI_COMM_WORLD, &status);//blocked
}
//MPI_Finalize(); //finalize MPI operations
return 0;
}
You should use mpi_finialize, like this:
int main(int argc, char *argv[])
{
int rank, size;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
int buff, tag = 99;
MPI_Status status;
if (rank == 0)
{
buff = 123;
for (int i = 1; i < size; i++)
{
MPI_Send(&buff, 1, MPI_INT, i, tag, MPI_COMM_WORLD); //succeed
MPI_Send(&buff, 1, MPI_INT, i, tag, MPI_COMM_WORLD); //blocked
}
}
else
{
MPI_Recv(&buff, 1, MPI_INT, 0, tag, MPI_COMM_WORLD, &status); //succeed
printf("%d\n", buff);
MPI_Recv(&buff, 1, MPI_INT, 0, tag, MPI_COMM_WORLD, &status); //blocked
printf("%d\n", buff);
}
MPI_Finalize();
return 0;
}