Related
So I have a some code where I am using MPI_Bcast to send information from the root node to all nodes, but instead I want to get my P0 to send chunks of the array to individual processes.
How do I do this with MPI_Send and MPI_Receive?
I've never used them before and I don't know if I need to loop my MPI_Receive to effectively send everything or what.
I've put giant caps lock comments in the code where I need to replace my MPI_Bcast(), sorry in advance for the waterfall of code.
Code:
#include "mpi.h"
#include <stdio.h>
#include <math.h>
#define MAXSIZE 10000000
int add(int *A, int low, int high)
{
int res = 0, i;
for(i=low; i<=high; i++)
res += A[i];
return(res);
}
int main(argc,argv)
int argc;
char *argv[];
{
int myid, numprocs, x;
int data[MAXSIZE];
int i, low, high, myres, res;
double elapsed_time;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
if (myid == 0)
{
for(i=0; i<MAXSIZE; i++)
data[i]=1;
}
/* star the timer */
elapsed_time = -MPI_Wtime();
//THIS IS WHERE I GET CONFUSED ABOUT MPI_SEND AND MPI_RECIEVE!!!
MPI_Bcast(data, MAXSIZE, MPI_INT, 0, MPI_COMM_WORLD);
x = MAXSIZE/numprocs;
low = myid * x;
high = low + x - 1;
if (myid == numprocs - 1)
high = MAXSIZE-1;
myres = add(data, low, high);
printf("I got %d from %d\n", myres, myid);
MPI_Reduce(&myres, &res, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
/* stop the timer*/
elapsed_time += MPI_Wtime();
if (myid == 0)
printf("The sum is %d, time taken = %f.\n", res,elapsed_time);
MPI_Barrier(MPI_COMM_WORLD);
printf("The sum is %d at process %d.\n", res,myid);
MPI_Finalize();
return 0;
}
You need MPI_Scatter. A good intro is here: http://mpitutorial.com/tutorials/mpi-scatter-gather-and-allgather/
I think in your code it could look like this:
elements_per_proc = MAXSIZE/numprocs;
// Create a buffer that will hold a chunk of the global array
int *data_chunk = malloc(sizeof(int) * elements_per_proc);
MPI_Scatter(data, elements_per_proc, MPI_INT, data_chunk,
elements_per_proc, MPI_INT, 0, MPI_COMM_WORLD);
If you really want use MPI_Send and MPI_Recv, then you can use something like this:
int x = MAXSIZE / numprocs;
int *procData = new int[x];
if (rank == 0) {
for (int i = 1; i < num; i++) {
MPI_Send(data + i*x, x, MPI_INT, i, 0, MPI_COMM_WORLD);
}
} else {
MPI_Recv(procData, x, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
}
I am distributing adjacency matrix row by row to the processes using MPI_Send and MPI_Recv functions. I used MPI_Barrier but the program is getting stuck! How do I wait till all the processes get their part of a matrix?
/* Distribute the adjacency matrix */
if( my_rank == 0) {
int row_start_l, row_end_l, dest_l;
for(dest_l=1; dest_l < comm_size; dest_l++) {
row_start_l = dest_l*(num_nodes/comm_size);
if( dest_l != (comm_size - 1) ) {
row_end_l = (dest_l + 1)*(num_nodes/comm_size) - 1;
}
else {
row_end_l = num_nodes - 1;
}
for(i = row_start_l; i <= row_end_l; i++) {
MPI_Send(&g.matrix[i][1], 1, MPI_INT, dest_l, TAG_AM_DATA, MPI_COMM_WORLD);
// Send Adjacency matrix to appropriate destinations. You can first send the appropriate size
MPI_Send(&g.matrix[i], (g.matrix[i][1])+2, MPI_INT, dest_l, TAG_AM_DATA, MPI_COMM_WORLD);
}
}
for(j=0; j < num_nodes; ) {
for(k=0; k < 120; k++) {
if(j >= num_nodes) {
break;
}
sendrecv_buffer_double[k] = g.column_denominator[j];
j++;
}
MPI_Bcast(&k, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&sendrecv_buffer_double[0], k, MPI_DOUBLE, 0, MPI_COMM_WORLD);
} cnt++;
}
else {
int recv_count;
int recvd;
cnt++;
adj_matrix = (int **)malloc(my_num_rows*sizeof(int*));
for(i=0; i < my_num_rows; i++) {
MPI_Recv(&recv_count, 1, MPI_INT, 0, TAG_AM_DATA, MPI_COMM_WORLD, &status);
adj_matrix[i] = (int *)malloc((2 + recv_count)*sizeof(int));
// Receive adjacency matrix from root.
MPI_Recv(&adj_matrix[i], 2+recv_count, MPI_INT, 0, TAG_AM_DATA, MPI_COMM_WORLD, &status);
}
recvd = 0;
while(recvd < num_nodes) {
MPI_Bcast(&recv_count, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&g.column_denominator[recvd], recv_count, MPI_DOUBLE, 0, MPI_COMM_WORLD);
recvd += recv_count;
}
}
// Wait till all the processes have their assigned parts of the matrix.
MPI_Barrier(MPI_COMM_WORLD);//I am getting error here
Error message:
Process 0 reading the input file.. Number of nodes = 100 Process 0
done reading.. Fatal error in PMPI_Barrier: Message truncated, error
stack: PMPI_Barrier(426)...................:
MPI_Barrier(MPI_COMM_WORLD) failed
MPIR_Barrier_impl(308)..............:
MPIR_Bcast_impl(1369)...............:
MPIR_Bcast_intra(1199)..............:
MPIR_Bcast_binomial(149)............:
MPIC_Recv(109)......................:
MPIDI_CH3U_Request_unpack_uebuf(605): Message truncated; 8 bytes
received but buffer size is 1
I'm not too sure of what your "adjacency matrix" looks like and how is must be distributed, but I guess this is a job for MPI_Scatter() rather than a series of MPI_Bcast()...
I'm trying to divide the string str_data1 and send it to the slave processors in MPI_COMM_WORLD, but I am getting an error on the slaves.
The error looks something like this:
2
2
3
�E0� �E0�� �E0O�
#include <stdio.h>
#include <string.h>
#include "mpi.h"
int main(int argc, char* argv[]) {
int rank;
int p;
MPI_Status status;
int msg_size = 0;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
char *str_data1 = "Samsung Galaxy Tab S 10.5 Wi-Fi, Tablet PC Android";
int len1 = strlen(str_data1), i;
char *str1[len1];
char *a[len1];
if (rank == 0) {
char *ds = strdup(str_data1);
int n = 0;
a[n] = strtok(ds, " ,");
while (a[n] && n < len1) {
a[++n] = strtok(NULL, " ,");
}
int chunk = n / p;
int str_size = chunk;
for (i = 1; i < p; i++) {
if (i == p - 1) {
str_size = n - chunk * i;
}
MPI_Send(&str_size, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
MPI_Send(&a, str_size + 1, MPI_CHAR, i, 0, MPI_COMM_WORLD);
}
} else {
MPI_Recv(&msg_size, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
char messagsg_size];
printf(" \n %d ", msg_size);
MPI_Recv(&message, msg_size + 1, MPI_CHAR, 0, 0, MPI_COMM_WORLD,
&status);
printf(" \n %s ", message);
}
MPI_Finalize();
return 0;
}
does anyone have any clues what im doing wrong? Thanks.
Some newer compilers will give you nice warning about these sorts of things. It's been very nice ever since Clang added this. If you were to use that compiler, you'd see this:
$ mpic++ asdf.c
clang: warning: treating 'c' input as 'c++' when in C++ mode, this behavior is deprecated
asdf.c:17:23: warning: conversion from string literal to 'char *' is deprecated [-Wc++11-compat-deprecated-writable-strings]
char *str_data1 = "Samsung Galaxy Tab S 10.5 Wi-Fi, Tablet PC Android";
^
asdf.c:39:22: warning: argument type 'char *(*)[len1]' doesn't match specified 'MPI' type tag that requires 'char *' [-Wtype-safety]
MPI_Send(&a, str_size + 1, MPI_CHAR, i, 0, MPI_COMM_WORLD);
^~ ~~~~~~~~
asdf.c:47:18: warning: argument type 'char (*)[msg_size]' doesn't match specified 'MPI' type tag that requires 'char *' [-Wtype-safety]
MPI_Recv(&message, msg_size + 1, MPI_CHAR, 0, 0, MPI_COMM_WORLD,
^~~~~~~~ ~~~~~~~~
3 warnings generated.
That shows you that you're using the wrong types for your character arrays. You should be using either just character arrays:
char a[len1];
or character pointers:
char *a;
Either way, you need to do some code rework to make that work correctly. Specifically, this section:
char *ds = strdup(str_data1);
int n = 0;
a[n] = strtok(ds, " ,");
while (a[n] && n < len1) {
a[++n] = strtok(NULL, " ,");
}
I don't think that string tokenizer is doing what you think it is since it would just be overwriting itself over and over.
Thanks Wesly.. i tired with structure.. looks like its working..
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include<mpi.h>
struct tokan {
char buff[30];
} t[50];
int main(int argc, char* argv[]) {
int rank;
int p;
MPI_Status status;
int msg_size = 0, i = 0, j = 0, msg_size1 = 0;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
int n = 0, m = 0, k = 0, N = 0, M = 0;
char *str1, *str2, *str_data2, *str_data1;
if (rank == 0) {
str_data1 =
"processes in this communicator will now abort, and potentially your MPI job";
str_data2 =
"The behavior is undefined if lhs or rhs are not pointers to null-terminated byte strings";
str1 = (char *) malloc(strlen(str_data1) * sizeof(char));
str2 = (char *) malloc(strlen(str_data2) * sizeof(char));
strcpy(str1, str_data1);
strcpy(str2, str_data2);
int len;
M = strlen(str2);
N = strlen(str1);
char *ptr;
k = 0;
char *ds = strdup(str_data1);
ptr = strtok(ds, " ,");
while (ptr != NULL) {
++n;
len = strlen(ptr);
for (j = 0; j < len; j++) {
t[k].buff[j] = *(ptr + j);
}
//printf(" %s ", t[k].buff);
k++;
ptr = strtok(NULL, " ,");
}
int chunk = n / p;
int str_size = chunk, cnt = 0;
j = chunk;
for (i = 1; i < p; i++) {
if (i == p - 1) {
str_size = n - chunk * i;
}
MPI_Send(&str_size, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
for (cnt = 0; cnt < str_size; cnt++) {
/*printf("process 0: %c %d %d %d %d %d %d\n", t[j].buff, str_size, n,
chunk, cnt, j, l);*/
MPI_Send(t[j].buff, 100, MPI_CHAR, i, 0, MPI_COMM_WORLD);
j++;
}
}
str_size = chunk;
for (i = 1; i < p; i++) {
MPI_Send(&M, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
//printf("process 0: %s %d %d %d %d\n", str2, n, chunk, cnt, j);
MPI_Send(str2, M, MPI_CHAR, i, 0, MPI_COMM_WORLD);
}
} else {
MPI_Recv(&msg_size, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
int k;
for (k = 0; k < msg_size; k++) {
MPI_Recv(t[k].buff, 100, MPI_CHAR, 0, 0, MPI_COMM_WORLD, &status);
printf(" \nstr1: %s %d %d %d\n", t[k].buff, rank, msg_size, k);
}
printf("***************");
MPI_Recv(&M, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
str2 = (char *) malloc((M + 1) * sizeof(char));
MPI_Recv(str2, M, MPI_CHAR, 0, 0, MPI_COMM_WORLD, &status);
str2[M] = '\0';
printf(" \nstr2: %s %d %d %d \n", str2, rank, k, M);
}
if (rank == 0)
free(str1);
free(str2);
MPI_Finalize();
return 0;
}
I have a very weird problem. I've written a con in mpi that one process should print something, but amazingly the code is terminated without any output. I can't understand where it's wrong...
PS: this code is supposed to multiply two matrices.
int main( int argc, char *argv[] )
{
int M = atoi(argv[1]);
// N = 2 ^ M
N = (unsigned int) pow (2.0, M); //you need to modify this code!
int my_rank, comm_sz,mt;
int rows,offset,extra,averow ,dest;
int i,j,k;
srand(time(0));
time_t t1, t2;
double dt; //t2-t1
double tavg=0.0;
//input array
A = (double*) malloc ( sizeof(double) * N * N );
B = (double*) malloc ( sizeof(double) * N * N );
C = (double*) malloc ( sizeof(double) * N * N );
//int r; for (r = 0; r < REP; r++)
//{
//fill in matrix A and B with random numbers
//t1
//t1 = time(0);
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &comm_sz);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
if (my_rank =0){
printf("mpi_mm has started with %d tasks.\n",comm_sz);
printf("Initializing arrays...\n");
fillmatrix(A,N);
fillmatrix(B,N);
averow = N/comm_sz;
extra = N%comm_sz;
offset = 0;
mt = 0;
MPI_Bcast(B,N*N,MPI_DOUBLE,0,MPI_COMM_WORLD);
for ( dest=1;dest<=comm_sz;dest++){
rows = (dest <=extra) ? averow+1 : averow;
MPI_Send(&offset,1,MPI_INT,dest,mt,MPI_COMM_WORLD);
MPI_Send(&rows,1,MPI_INT,dest,mt,MPI_COMM_WORLD);
MPI_Send(&A[offset*N],rows*N,MPI_DOUBLE,dest,mt,MPI_COMM_WORLD);
offset=offset+rows;}
mt = 1;
for (i=1; i<=comm_sz; i++){
MPI_Recv(&offset, 1, MPI_INT, i, mt, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(&rows, 1, MPI_INT, i, mt, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(&C[offset*N], rows*N, MPI_DOUBLE, i, mt, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("Received results from task %d\n",i);
}
/* Print results */
printf("******************************************************\n");
printf("Result Matrix:\n");
for (i=0; i<N; i++)
{
printf("\n");
for (j=0; j<N; j++)
printf("%6.2f ", C[i*N+j]);
}
printf("\n******************************************************\n");
printf ("Done.\n");
}
if(my_rank !=0){
MPI_Bcast(B,N*N,MPI_DOUBLE,0,MPI_COMM_WORLD);
mt = 0;
MPI_Recv(&offset,1,MPI_INT,0,mt,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
MPI_Recv(&rows,1,MPI_INT,0,mt,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
MPI_Recv(&A,rows*N,MPI_DOUBLE,0,mt,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
for(i=0;i<N;i++)
for(j=0;j<rows;j++){
C[j*N+i] =0.0;
for(k=0;k<N;k++)
C[j*N+i] += A[j*N+k]*B[k*N+i];
}
mt = 1;
MPI_Send(&offset, 1, MPI_INT, 0, mt, MPI_COMM_WORLD);
MPI_Send(&rows, 1, MPI_INT, 0, mt, MPI_COMM_WORLD);
MPI_Send(&C, rows*N, MPI_DOUBLE, 0, mt, MPI_COMM_WORLD);
}
MPI_Finalize();
Found it.
You say
if (my_rank =0)
This should be
if (my_rank == 0)
;-)
This is a MPI code for LU Decomposition.
I have used the following strategy -
There is a master(rank 0) and others are slaves. The master sends rows to each slave.
Since each slave might receive more than row, I store all the received rows in a
buffer and then perform LU Decomposition on it. After doing that I send back the
buffer to the master. The master does not do any computation. It just sends and receives.
for(i=0; i<n; i++)
map[i] = i%(numProcs-1) + 1;
for(i=0; i<n-1; i++)
{
if(rank == 0)
{
status = pivot(LU,i,n);
for(j=0; j<n; j++)
row1[j] = LU[n*i+j];
}
MPI_Bcast(&status, 1, MPI_INT, 0, MPI_COMM_WORLD);
if(status == -1)
return -1;
MPI_Bcast(row1, n, MPI_DOUBLE, 0, MPI_COMM_WORLD);
int tag1 = 1, tag2 = 2, tag3 = 3, tag4 = 4;
if(rank == 0)
{
int pno, start, index, l, rowsReceived = 0;
MPI_Request req;
MPI_Status stat;
for(j=i+1; j<n; j++)
MPI_Isend(&LU[n*j], n, MPI_DOUBLE, map[j], map[j], MPI_COMM_WORLD, &req);
if(i>=n-(numProcs-1))
cnt++;
for(j=0; j<numProcs-1-cnt; j++)
{
MPI_Recv(&pno, 1, MPI_INT, MPI_ANY_SOURCE, tag2, MPI_COMM_WORLD, &stat);
//printf("1. Recv from %d and j : %d and i : %d\n",pno,j,i);
MPI_Recv(&rowsReceived, 1, MPI_INT, pno, tag3, MPI_COMM_WORLD, &stat);
MPI_Recv(rowFinal, n*rowsReceived, MPI_DOUBLE, pno, tag4, MPI_COMM_WORLD, &stat);
/* Will not go more than numProcs anyways */
for(k=i+1; k<n; k++)
{
if(map[k] == pno)
{
start = k;
break;
}
}
for(k=0; k<rowsReceived; k++)
{
index = start + k*(numProcs-1);
for(l=0; l<n; l++)
LU[n*index+l] = rowFinal[n*k+l];
}
}
}
else
{
int rowsReceived = 0;
MPI_Status stat, stats[3];
MPI_Request reqs[3];
for(j=i+1; j<n; j++)
if(map[j] == rank)
rowsReceived += 1;
for(j=0; j<rowsReceived; j++)
{
MPI_Recv(&rowFinal[n*j], n, MPI_DOUBLE, 0, rank, MPI_COMM_WORLD, &stat);
}
for(j=0; j<rowsReceived; j++)
{
double factor = rowFinal[n*j+i]/row1[i];
for(k=i+1; k<n; k++)
rowFinal[n*j+k] -= (row1[k]*factor);
rowFinal[n*j+i] = factor;
}
if(rowsReceived != 0)
{
//printf("Data sent from %d iteration : %d\n",rank,i);
MPI_Isend(&rank, 1, MPI_INT, 0, tag2, MPI_COMM_WORLD, &reqs[0]);
MPI_Isend(&rowsReceived, 1, MPI_INT, 0, tag3, MPI_COMM_WORLD, &reqs[1]);
MPI_Isend(rowFinal, n*rowsReceived, MPI_DOUBLE, 0, tag4, MPI_COMM_WORLD, &reqs[2]);
}
//MPI_Waitall(3,reqs,stats);
}
}
The problem that I am facing is that sometimes the program hangs. My guess is
that the sends and receives are not being matched but I am not being able to
figure out where the problem lies.
I ran test cases on matrices of size 1000x1000, 2000x2000, 3000x3000, 5000x5000
and 7000x7000. Presently the code hangs for 7000x7000. Could someone please help
me out?
Things to note :-
map implements the mapping scheme, which row goes to which slave.
rowsReceived tells each slave the no of rows it will receive. I dont need to
calculate that each and every time, but I will fix it later.
row1 is the buffer in which the active row will be stored.
rowFinal is the buffer of the rows being received and being modified.
cnt is not important and can be ignored. For that the check for
rowReceived!=0 needs to be removed.
It looks like you are never completing your nonblocking operations. You have a bunch of calls to MPI_Isend and MPI_Irecv throughout the code, but you're never doing an MPI_Wait or MPI_Test (or one of the similar calls). Without that completion call, those nonblocking calls will never complete.