I am new to MPI and I am trying to write an implementation of Fox's algorithm (AxB=C where A and B are matrices of dimension nxn). My program works fine but I would like to see if I can speed it up specifically by overlapping the communication during the shifting of the the blocks in matrix B with the computation of the product matrices (the block matrices of B are shifted cyclically up in the algorithm). Each process in the 2D Cartesian grid has a block from matrices A, B and C as per the algorithm. What I currently have is this, which is inside Fox's algorithm
if (stage > 0){
//shifting b values in all proccess
MPI_Bcast(a_temp, n_local*n_local, MPI_DOUBLE, (rowID + stage) % q , row_comm);
MPI_Isend(b, n_local*n_local, MPI_DOUBLE, nbrs[UP], 111, grid_comm,&my_request1);
MPI_Irecv(b, n_local*n_local, MPI_DOUBLE, nbrs[DOWN], 111, grid_comm,&my_request2);
MPI_Wait(&my_request1, &status);
MPI_Wait(&my_request2, &status);
multiplyMatrix(a_temp,b,c,n_local);
}
The submatrices a_temp, b, b_temp are pointers of type double that point to chunks n/numprocess*n/numprocesses (this is the size of the block matrices e.g. b = (double *) calloc(n/numprocess*n/numprocesses, sizeof(double)) ).
I would like to have the multiplyMatrix function before the MPI_Wait calls (that would constitute the overlapping of communication and computation) but I am not sure how to do that. Do I need to have two separate buffers and alternate between them at different stages?
(I know I can use MPI_Sendrecv_replace but that does not help with overlapping since it uses blocking send and receive. The same is true for MPI_Sendrecv)
I actually figured out how to do this. This question should probably be removed. But since I am new to MPI I will post these solutions here and if anyone has suggestions for improvements I would be happy if they share them. Method 1:
// Fox's algorithm
double * b_buffers[2];
b_buffers[0] = (double *) malloc(n_local*n_local*sizeof(double));
b_buffers[1] = b;
for (stage =0;stage < q; stage++){
//copying a into a_temp and Broadcasting a_temp of each proccess to all other proccess in its row
for (i=0;i< n_local*n_local; i++)
a_temp[i]=a[i];
if (stage == 0) {
MPI_Bcast(a_temp, n_local*n_local, MPI_DOUBLE, (rowID + stage) % q , row_comm);
multiplyMatrix(a_temp,b,c,n_local);
MPI_Isend(b, n_local*n_local, MPI_DOUBLE, nbrs[UP], 111, grid_comm,&my_request1);
MPI_Irecv(b, n_local*n_local, MPI_DOUBLE, nbrs[DOWN], 111, grid_comm,&my_request2);
MPI_Wait(&my_request2, &status);
MPI_Wait(&my_request1, &status);
}
if (stage > 0)
{
//shifting b values in all procces
MPI_Bcast(a_temp, n_local*n_local, MPI_DOUBLE, (rowID + stage) % q , row_comm);
MPI_Isend(b_buffers[(stage)%2], n_local*n_local, MPI_DOUBLE, nbrs[UP], 111, grid_comm,&my_request1);
MPI_Irecv(b_buffers[(stage+1)%2], n_local*n_local, MPI_DOUBLE, nbrs[DOWN], 111, grid_comm,&my_request2);
multiplyMatrix(a_temp, b_buffers[(stage)%2], c, n_local);
MPI_Wait(&my_request2, &status);
MPI_Wait(&my_request1, &status);
}
}
Method 2:
// Fox's algorithm
for (stage =0;stage < q; stage++){
//copying a into a_temp and Broadcasting a_temp of each proccess to all other proccess in its row
for (i=0;i< n_local*n_local; i++)
a_temp[i]=a[i];
if (stage == 0) {
MPI_Bcast(a_temp, n_local*n_local, MPI_DOUBLE, (rowID + stage) % q , row_comm);
multiplyMatrix(a_temp,b,c,n_local);
MPI_Isend(b, n_local*n_local, MPI_DOUBLE, nbrs[UP], 111, grid_comm,&my_request1);
MPI_Irecv(b, n_local*n_local, MPI_DOUBLE, nbrs[DOWN], 111, grid_comm,&my_request2);
MPI_Wait(&my_request2, &status);
MPI_Wait(&my_request1, &status);
}
if (stage > 0)
{
//shifting b values in all proccess
memcpy(b_temp, b, n_local*n_local*sizeof(double));
MPI_Bcast(a_temp, n_local*n_local, MPI_DOUBLE, (rowID + stage) % q , row_comm);
MPI_Isend(b, n_local*n_local, MPI_DOUBLE, nbrs[UP], 111, grid_comm,&my_request1);
MPI_Irecv(b, n_local*n_local, MPI_DOUBLE, nbrs[DOWN], 111, grid_comm,&my_request2);
multiplyMatrix(a_temp, b_temp, c, n_local);
MPI_Wait(&my_request2, &status);
MPI_Wait(&my_request1, &status);
}
Both of these seem to work, but as I said I am new to MPI and if you have any comments or suggestions please share.
Related
In this code I am trying to broadcast using non blocking send and receive as a practice. I have multiple questions and issues.
1.Should I pair Isend() and Irecv() to use the same request?
2.When the message is an array, how should it be passed? in this case, message or &message?
3.Why I cannot run this code on less or more than 8 processors? if the rank doesn't exit, shouldn't it just go on without executing that piece of code?
4.The snippet on the at the bottom is there in order to print the total time once, but the waitall() does not work, and I do not understand why.
5. When passing arrays longer than 2^12, I get segmentation error, while I have checked the limits of Isend() and Irecv() and they supposed to handle even bigger length messages.
6.I used long double for record the time, is this a common or good practice? when I used smaller variables like float or double I would get nan.
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<mpi.h>
int main(int argc, char *argv[]){
MPI_Init(&argc, &argv);
int i, rank, size, ready;
long int N = pow(2, 10);
float* message = (float *)malloc(sizeof(float *) * N + 1);
long double start, end;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
//MPI_Request* request = (MPI_Request *)malloc(sizeof(MPI_Request *) * size);
MPI_Request request[size-1];
/*Stage I: -np 8*/
if(rank == 0){
for(i = 0; i < N; i++){
message[i] = N*rand();
message[i] /= rand();
}
start = MPI_Wtime();
MPI_Isend(&message, N, MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &request[0]);
MPI_Isend(&message, N, MPI_FLOAT, 2, 0, MPI_COMM_WORLD, &request[1]);
MPI_Isend(&message, N, MPI_FLOAT, 4, 0, MPI_COMM_WORLD, &request[3]);
printf("Processor root-rank %d- sent the message...\n", rank);
}
if (rank == 1){
MPI_Irecv(&message, N, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &request[0]);
MPI_Wait(&request[0], MPI_STATUS_IGNORE);
printf("Processor rank 1 received the message.\n");
MPI_Isend(&message, N, MPI_FLOAT, 3, 0, MPI_COMM_WORLD, &request[2]);
MPI_Isend(&message, N, MPI_FLOAT, 5, 0, MPI_COMM_WORLD, &request[4]);
}
if(rank == 2){
MPI_Irecv(&message, N, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &request[1]);
MPI_Wait(&request[1], MPI_STATUS_IGNORE);
printf("Processor rank 2 received the message.\n");
MPI_Isend(&message, N, MPI_FLOAT, 6, 0, MPI_COMM_WORLD, &request[5]);
}
if(rank == 3){
MPI_Irecv(&message, N, MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &request[2]);
MPI_Wait(&request[2], MPI_STATUS_IGNORE);
printf("Processor rank 3 received the message.\n");
MPI_Isend(&message, N, MPI_FLOAT, 7, 0, MPI_COMM_WORLD, &request[6]);
}
if(rank == 4){
MPI_Irecv(&message, N, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &request[3]);
MPI_Wait(&request[3], MPI_STATUS_IGNORE);
printf("Processor rank 4 received the message.\n");
}
if(rank == 5){
MPI_Irecv(&message, N, MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &request[4]);
MPI_Wait(&request[4], MPI_STATUS_IGNORE);
printf("Processor rank 5 received the message.\n");
}
if(rank == 6){
MPI_Irecv(&message, N, MPI_FLOAT, 2, 0, MPI_COMM_WORLD, &request[5]);
MPI_Wait(&request[5], MPI_STATUS_IGNORE);
printf("Processor rank 6 received the message.\n");
}
if(rank == 7){
MPI_Irecv(&message, N, MPI_FLOAT, 3, 0, MPI_COMM_WORLD, &request[6]);
MPI_Wait(&request[6], MPI_STATUS_IGNORE);
printf("Processor rank 7 received the message.\n");
}
/*MPI_Testall(size-1,request,&ready, MPI_STATUS_IGNORE);*/
/* if (ready){*/
end = MPI_Wtime();
printf("Total Time: %Lf\n", end - start);
/*}*/
MPI_Finalize();
}
Each MPI task runs in its own address space, so there is no correlation between request[1] on rank 0 and request[1] on rank 2. That means you do not have to "pair" the requests. That being said, if you think "pairing" the requests improves the readability of your code, you might want to do so even if this is not required.
the buffer parameter of MPI_Isend() and MPI_Irecv() is a pointer to the start of the data, this is message (and not &message) here.
if you run with let's say 2 MPI tasks, MPI_Send(..., dest=2, ...) on rank 0 will fail because there 2 is an invalid rank in the MPI_COMM_WORLD communicator.
many requests are uninitialized when MPI_Waitall() (well, MPI_Testall() here) is invoked. One option is to first initialize all of them to MPI_REQUEST_NULL.
using &message results in memory corruption and that likely explains the crash.
From the MPI standard, the prototype is double MPI_Wtime(), so you'd rather use double here (the NaN likely come from the memory corruption described above)
I am distributing adjacency matrix row by row to the processes using MPI_Send and MPI_Recv functions. I used MPI_Barrier but the program is getting stuck! How do I wait till all the processes get their part of a matrix?
/* Distribute the adjacency matrix */
if( my_rank == 0) {
int row_start_l, row_end_l, dest_l;
for(dest_l=1; dest_l < comm_size; dest_l++) {
row_start_l = dest_l*(num_nodes/comm_size);
if( dest_l != (comm_size - 1) ) {
row_end_l = (dest_l + 1)*(num_nodes/comm_size) - 1;
}
else {
row_end_l = num_nodes - 1;
}
for(i = row_start_l; i <= row_end_l; i++) {
MPI_Send(&g.matrix[i][1], 1, MPI_INT, dest_l, TAG_AM_DATA, MPI_COMM_WORLD);
// Send Adjacency matrix to appropriate destinations. You can first send the appropriate size
MPI_Send(&g.matrix[i], (g.matrix[i][1])+2, MPI_INT, dest_l, TAG_AM_DATA, MPI_COMM_WORLD);
}
}
for(j=0; j < num_nodes; ) {
for(k=0; k < 120; k++) {
if(j >= num_nodes) {
break;
}
sendrecv_buffer_double[k] = g.column_denominator[j];
j++;
}
MPI_Bcast(&k, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&sendrecv_buffer_double[0], k, MPI_DOUBLE, 0, MPI_COMM_WORLD);
} cnt++;
}
else {
int recv_count;
int recvd;
cnt++;
adj_matrix = (int **)malloc(my_num_rows*sizeof(int*));
for(i=0; i < my_num_rows; i++) {
MPI_Recv(&recv_count, 1, MPI_INT, 0, TAG_AM_DATA, MPI_COMM_WORLD, &status);
adj_matrix[i] = (int *)malloc((2 + recv_count)*sizeof(int));
// Receive adjacency matrix from root.
MPI_Recv(&adj_matrix[i], 2+recv_count, MPI_INT, 0, TAG_AM_DATA, MPI_COMM_WORLD, &status);
}
recvd = 0;
while(recvd < num_nodes) {
MPI_Bcast(&recv_count, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&g.column_denominator[recvd], recv_count, MPI_DOUBLE, 0, MPI_COMM_WORLD);
recvd += recv_count;
}
}
// Wait till all the processes have their assigned parts of the matrix.
MPI_Barrier(MPI_COMM_WORLD);//I am getting error here
Error message:
Process 0 reading the input file.. Number of nodes = 100 Process 0
done reading.. Fatal error in PMPI_Barrier: Message truncated, error
stack: PMPI_Barrier(426)...................:
MPI_Barrier(MPI_COMM_WORLD) failed
MPIR_Barrier_impl(308)..............:
MPIR_Bcast_impl(1369)...............:
MPIR_Bcast_intra(1199)..............:
MPIR_Bcast_binomial(149)............:
MPIC_Recv(109)......................:
MPIDI_CH3U_Request_unpack_uebuf(605): Message truncated; 8 bytes
received but buffer size is 1
I'm not too sure of what your "adjacency matrix" looks like and how is must be distributed, but I guess this is a job for MPI_Scatter() rather than a series of MPI_Bcast()...
When doing the final reduction (summation of a bunch of matrices in my program), as follows
struct Tomo {
typedef Eigen::Matrix<int, HISTOGRAM_BOXES, HISTOGRAM_BOXES, Eigen::RowMajor> HistoMtx;
HistoMtx exp_val;
HistoMtx u;
[...]
struct buffer_set {
Tomo * X;
Tomo * Y;
Tomo * Z;
} buffers[2];
[...]
if(rank == 0){
/* MASTER NODE */
for(int source=1; source<size; source++){
printf("Reducing from %i\n", source);
for(int i=0;i<env_count;i++){
MPI_Recv(buffers[1].X[i].exp_val.data(), buffers[1].X[i].exp_val.size(), MPI_INT, source, 0, MPI_COMM_WORLD, &status);
MPI_Recv(buffers[1].Y[i].exp_val.data(), buffers[1].Y[i].exp_val.size(), MPI_INT, source, 0, MPI_COMM_WORLD, &status);
MPI_Recv(buffers[1].Z[i].exp_val.data(), buffers[1].Z[i].exp_val.size(), MPI_INT, source, 0, MPI_COMM_WORLD, &status);
MPI_Recv(buffers[1].X[i].u.data(), buffers[1].X[i].u.size(), MPI_INT, source, 0, MPI_COMM_WORLD, &status);
MPI_Recv(buffers[1].Y[i].u.data(), buffers[1].Y[i].u.size(), MPI_INT, source, 0, MPI_COMM_WORLD, &status);
MPI_Recv(buffers[1].Z[i].u.data(), buffers[1].Z[i].u.size(), MPI_INT, source, 0, MPI_COMM_WORLD, &status);
}
merge_buffers(0, 1);
}
WriteH5File("h5file.h5", 0);
}else{
/* SLAVE NODES */
for(int i=0;i<env_count;i++){
MPI_Send(buffers[0].X[i].exp_val.data(), buffers[0].X[i].exp_val.size(), MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Send(buffers[0].Y[i].exp_val.data(), buffers[0].Y[i].exp_val.size(), MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Send(buffers[0].Z[i].exp_val.data(), buffers[0].Z[i].exp_val.size(), MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Send(buffers[0].X[i].u.data(), buffers[0].X[i].u.size(), MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Send(buffers[0].Y[i].u.data(), buffers[0].Y[i].u.size(), MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Send(buffers[0].Z[i].u.data(), buffers[0].Z[i].u.size(), MPI_INT, 0, 0, MPI_COMM_WORLD);
}
}
the pbs_mom process dies. When running the program in an interactive session, I find the following in my logs
[compute-35-3.local:01139] [[33012,0],2] ORTED_CMD_PROCESSOR: STUCK IN INFINITE LOOP - ABORTING
[compute-35-3:01139] *** Process received signal ***
I don't understand what this means or what would trigger it. It seems quite internal to OpenMPI.
Could be an issue with the underlying network or something else that might require administrator attention. For example:
http://www.open-mpi.org/community/lists/users/2010/08/14130.php
http://lists.mcs.anl.gov/pipermail/petsc-users/2013-August/018470.html
I am trying to write my own MPI function that would compute the smallest number in a vector and broadcast that to all processes. I treat the processes as a binary tree, and find the minimum as I move from leaves to the root. Then I send message from the root to the leaves through its children. But I get a segmentation fault when I trying to receive the minimum value from the left child (process rank 3) of process rank 1 in an execution with just 4 processes ranked from 0 to 3.
void Communication::ReduceMin(double &partialMin, double &totalMin)
{
MPI_Barrier(MPI_COMM_WORLD);
double *leftChild, *rightChild;
leftChild = (double *)malloc(sizeof(double));
rightChild = (double *)malloc(sizeof(double));
leftChild[0]=rightChild[0]=1e10;
cout<<"COMM REDMIN: "<<myRank<<" "<<partialMin<<" "<<nProcs<<endl;
MPI_Status *status;
//MPI_Recv from 2*i+1 amd 2*i+2
if(nProcs > 2*myRank+1)
{
cout<<myRank<<" waiting from "<<2*myRank+1<<" for "<<leftChild[0]<<endl;
MPI_Recv((void *)&leftChild[0], 1, MPI_DOUBLE, 2*myRank+1, 2*myRank+1, MPI_COMM_WORLD, status); //SEG FAULT HERE
cout<<myRank<<" got from "<<2*myRank+1<<endl;
}
if(nProcs > 2*myRank+2)
{
cout<<myRank<<" waiting from "<<2*myRank+2<<endl;
MPI_Recv((void *)rightChild, 1, MPI_DOUBLE, 2*myRank+2, 2*myRank+2, MPI_COMM_WORLD, status);
cout<<myRank<<" got from "<<2*myRank+1<<endl;
}
//sum it up
cout<<myRank<<" finding the min"<<endl;
double myMin = min(min(leftChild[0], rightChild[0]), partialMin);
//MPI_Send to (i+1)/2-1
if(myRank!=0)
{
cout<<myRank<<" sending "<<myMin<<" to "<<(myRank+1)/2 -1 <<endl;
MPI_Send((void *)&myMin, 1, MPI_DOUBLE, (myRank+1)/2 - 1, myRank, MPI_COMM_WORLD);
}
double min;
//MPI_Recv from (i+1)/2-1
if(myRank!=0)
{
cout<<myRank<<" waiting from "<<(myRank+1)/2-1<<endl;
MPI_Recv((void *)&min, 1, MPI_DOUBLE, (myRank+1)/2 - 1, (myRank+1)/2 - 1, MPI_COMM_WORLD, status);
cout<<myRank<<" got from "<<(myRank+1)/2-1<<endl;
}
totalMin = min;
//MPI_send to 2*i+1 and 2*i+2
if(nProcs > 2*myRank+1)
{
cout<<myRank<<" sending to "<<2*myRank+1<<endl;
MPI_Send((void *)&min, 1, MPI_DOUBLE, 2*myRank+1, myRank, MPI_COMM_WORLD);
}
if(nProcs > 2*myRank+2)
{
cout<<myRank<<" sending to "<<2*myRank+1<<endl;
MPI_Send((void *)&min, 1, MPI_DOUBLE, 2*myRank+2, myRank, MPI_COMM_WORLD);
}
}
PS: I know I can use
MPI_Barrier(MPI_COMM_WORLD);
MPI_Reduce((void *)&partialMin, (void *)&totalMin, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Bcast((void *)&totalMin, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
But I want to write my own code for fun.
The error is in the way you use the status argument in the receive calls. Instead of passing the address of an MPI_Status instance, you simply pass an uninitialised pointer and that leads to the crash:
MPI_Status *status; // status declared as a pointer and never initialised
...
MPI_Recv((void *)&leftChild[0], 1, MPI_DOUBLE, 2*myRank+1, 2*myRank+1,
MPI_COMM_WORLD, status); // status is an invalid pointer here
You should change your code to:
MPI_Status status;
...
MPI_Recv((void *)&leftChild[0], 1, MPI_DOUBLE, 2*myRank+1, 2*myRank+1,
MPI_COMM_WORLD, &status);
Since you do not examine at all the status in your code, you can simply pass MPI_STATUS_IGNORE in all calls:
MPI_Recv((void *)&leftChild[0], 1, MPI_DOUBLE, 2*myRank+1, 2*myRank+1,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
I appreciate it if somebody tell me why this simple MPI send and receive code doesn't run on two processors, when the value of n=40(at line 20), but works for n <=30. In other words, if the message size goes beyond an specific number (which is not that large, roughly a 1-D array of size 8100) the MPI deadlocks.
#include "mpi.h"
#include "stdio.h"
#include "stdlib.h"
#include "iostream"
#include "math.h"
using namespace std;
int main(int argc, char *argv[])
{
int processor_count, processor_rank;
double *buff_H, *buff_send_H;
int N_pa_prim1, l, n, N_p0;
MPI_Status status;
MPI_Init (&argc, &argv);
MPI_Comm_size (MPI_COMM_WORLD, &processor_count);
MPI_Comm_rank (MPI_COMM_WORLD, &processor_rank);
N_pa_prim1=14; l=7; n=40; N_p0=7;
buff_H = new double [n*n*N_p0+1]; //Receive buffer allocation
buff_send_H = new double [n*n*N_p0+1]; //Send buffer allocation
for (int j = 0; j < n*n*N_p0+1; j++)
buff_send_H[j] = 1e-8*rand();
if (processor_rank == 0)
MPI_Send(buff_send_H, n*n*N_p0+1, MPI_DOUBLE, 1, 163, MPI_COMM_WORLD);
else if(processor_rank == 1)
MPI_Send(buff_send_H, n*n*N_p0+1, MPI_DOUBLE, 0, 163, MPI_COMM_WORLD);
MPI_Recv(buff_H, n*n*N_p0+1, MPI_DOUBLE, MPI_ANY_SOURCE, 163, MPI_COMM_WORLD, &status);
cout << "Received successfully by " << processor_rank << endl;
MPI_Finalize();
return 0;
}
The deadlocking is correct behaviour; you have a deadlock in your code.
The MPI Specification allows MPI_Send to behave as MPI_Ssend -- that is, to be blocking. A blocking communications primitive does not return until the communications "have completed" in some sense, which (in the case of a blocking send) probably means the receive has started.
Your code looks like:
If Processor 0:
Send to processor 1
If Processor 1:
Send to processor 0
Receive
That is -- the receive doesn't start until the sends have completed. You're sending, but they'll never return, because no one is receiving! (The fact that this works for small messages is an implementation artifact - most mpi implementations use so called a so-called "eager protocol" for "small enough" messages; but this can't be counted upon in general.)
Note that there are other logic errors here, too -- this program will also deadlock for more than 2 processors, as processors of rank >= 2 will be waiting for a message which never comes.
You can fix your program by alternating sends and receives by rank:
if (processor_rank == 0) {
MPI_Send(buff_send_H, n*n*N_p0+1, MPI_DOUBLE, 1, 163, MPI_COMM_WORLD);
MPI_Recv(buff_H, n*n*N_p0+1, MPI_DOUBLE, MPI_ANY_SOURCE, 163, MPI_COMM_WORLD, &status);
} else if (processor_rank == 1) {
MPI_Recv(buff_H, n*n*N_p0+1, MPI_DOUBLE, MPI_ANY_SOURCE, 163, MPI_COMM_WORLD, &status);
MPI_Send(buff_send_H, n*n*N_p0+1, MPI_DOUBLE, 0, 163, MPI_COMM_WORLD);
}
or by using MPI_Sendrecv (which is a blocking (send + receive), rather than a blocking send + a blocking receive):
int sendto;
if (processor_rank == 0)
sendto = 1;
else if (processor_rank == 1)
sendto = 0;
if (processor_rank == 0 || processor_rank == 1) {
MPI_Sendrecv(buff_send_H, n*n*N_p0+1, MPI_DOUBLE, sendto, 163,
buff_H, n*n*N_p0+1, MPI_DOUBLE, MPI_ANY_SOURCE, 163,
MPI_COMM_WORLD, &status);
}
Or by using non-blocking sends and receives:
MPI_Request reqs[2];
MPI_Status statuses[2];
if (processor_rank == 0) {
MPI_Isend(buff_send_H, n*n*N_p0+1, MPI_DOUBLE, 1, 163, MPI_COMM_WORLD, &reqs[0]);
} else if (processor_rank == 1) {
MPI_Isend(buff_send_H, n*n*N_p0+1, MPI_DOUBLE, 0, 163, MPI_COMM_WORLD, &reqs[0]);
}
if (processor_rank == 0 || processor_rank == 1)
MPI_Irecv(buff_H, n*n*N_p0+1, MPI_DOUBLE, MPI_ANY_SOURCE, 163, MPI_COMM_WORLD, &reqs[1]);
MPI_Waitall(2, reqs, statuses);
Thank you Jonathan for your help. Here I have chosen the third solution and written a similar code to yours except adding "for" loops to send a number of messages. This time it doesn't deadlock; however processors keep on receiving only the last message. (since the messages are long, I've only printed their last elements to check the consistency)
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
using namespace std;
int main(int argc, char *argv[])
{
int processor_count, processor_rank;
//Initialize MPI
MPI_Init (&argc, &argv);
MPI_Comm_size (MPI_COMM_WORLD, &processor_count);
MPI_Comm_rank (MPI_COMM_WORLD, &processor_rank);
double **buff_H, *buff_send_H;
int N_pa_prim1, l, n, N_p0, count, temp;
N_pa_prim1=5; l=7; n=50; N_p0=7;
MPI_Request reqs[N_pa_prim1];
MPI_Status statuses[N_pa_prim1];
buff_H = new double *[N_pa_prim1]; //Receive buffer allocation
for (int i = 0; i < N_pa_prim1; i++)
buff_H[i] = new double [n*n*N_p0+1];
buff_send_H = new double [n*n*N_p0+1]; //Send buffer allocation
if (processor_rank == 0) {
for (int i = 0; i < N_pa_prim1; i++){
for (int j = 0; j < n*n*N_p0+1; j++)
buff_send_H[j] = 2.0325e-8*rand();
cout << processor_rank << "\t" << buff_send_H[n*n*N_p0] << "\t" << "Send" << "\t" << endl;
MPI_Isend(buff_send_H, n*n*N_p0+1, MPI_DOUBLE, 1, 163, MPI_COMM_WORLD, &reqs[i]);
}
}
else if (processor_rank == 1) {
for (int i = 0; i < N_pa_prim1; i++){
for (int j = 0; j < n*n*N_p0+1; j++)
buff_send_H[j] = 3.5871e-8*rand();
cout << processor_rank << "\t" << buff_send_H[n*n*N_p0] << "\t" << "Send" << "\t" << endl;
MPI_Isend(buff_send_H, n*n*N_p0+1, MPI_DOUBLE, 0, 163, MPI_COMM_WORLD, &reqs[i]);
}
}
for (int i = 0; i < N_pa_prim1; i++)
MPI_Irecv(buff_H[i], n*n*N_p0+1, MPI_DOUBLE, MPI_ANY_SOURCE, 163, MPI_COMM_WORLD, &reqs[N_pa_prim1+i]);
MPI_Waitall(2*N_pa_prim1, reqs, statuses);
for (int i = 0; i < N_pa_prim1; i++)
cout << processor_rank << "\t" << buff_H[i][n*n*N_p0] << "\t" << "Receive" << endl;
MPI_Finalize();
return 0;
}