Read input integer from external file for MPI - mpi

How could I read external input file for mpi? I need to read one integer from external file (zadanie4_vstup.txt), to compute simple factorial. I have tried to substitute the second argument in MPI_Init() with address of int variable (n), but it looks it is nonsense.
Thank you.
#include <stdio.h>
#include <mpi.h>
int main(int argc, char ** argv)
{
FILE *fr, *fw;
fr = fopen("zadanie4_vstup.txt", "r");
fw = fopen("zadanie4_vystup.txt", "w");
int nproc, me;
int fakt=1, i, buff, n;
MPI_Status stat;
fscanf(fr, "%d", &n);
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nproc);
MPI_Comm_rank(MPI_COMM_WORLD, &me);
#pragma omp parallel for private(i) reduction(*:fakt)
for(i=me*n/nproc+1; i<=(me+1)*n/nproc; i++) {
fakt *= i;
}
if(nproc > 1) {
if(me == 0) {
for(i=1; i<nproc; i++) {
MPI_Recv(&buff, 1, MPI_INT, i, 0, MPI_COMM_WORLD, &stat);
fakt*=buff;
}
} else {
MPI_Send(&fakt, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
}
}
if(me == 0) {
fprintf(fw, "%d! = %d\n", n, fakt);
}
fclose(fr);
fclose(fw);
MPI_Finalize();
}

here is a version of your program that reads n on the command line.
note i simplified the communications by using MPI_Reduce()
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
int main(int argc, char *argv[]) {
int nproc, me;
int fakt=1, res, i, buff, n;
MPI_Status stat;
MPI_Init(&argc, &argv);
n = atoi(argv[1]);
MPI_Comm_size(MPI_COMM_WORLD, &nproc);
MPI_Comm_rank(MPI_COMM_WORLD, &me);
#pragma omp parallel for private(i) reduction(*:fakt)
for(i=me*n/nproc+1; i<=(me+1)*n/nproc; i++) {
fakt *= i;
}
MPI_Reduce(&fakt, &res, 1, MPI_INT, MPI_PROD, 0, MPI_COMM_WORLD);
if(me == 0) {
printf("%d! = %d\n", n, res);
}
MPI_Finalize();
return 0;
}
for example
$ mpirun -np 4 ./fakt 6
6! = 720

Related

MPI_SendRecv deadlock on execution

I'm trying to send information from one processor to another in a ring way from an offset processor using MPI_Sendrecv but i got deadlock. What is wrong in my code? Basically i need to use MPI_SendRecv to solve this kind of problem.
#include <stdio.h>
#include <unistd.h>
#include <mpi.h>
int main (int argc, char *argv[])
{
int offset = 9;
int size, rank, value, next, prev, sendval, recval, namelen;
double t0, t;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Get_processor_name(processor_name, &namelen);
value = 5;
if (size > 1)
{
next = (rank + 1)% size;
prev = (size+rank - 1)% size;
sendval = value + rank;
if (rank == offset)
{
MPI_Sendrecv(&sendval, 1, MPI_INT, next, 1, &recval, 1, MPI_INT, prev, 10, MPI_COMM_WORLD, &status);
}
else
{
MPI_Recv(&recval, 1, MPI_INT, prev, 10, MPI_COMM_WORLD, &status);
MPI_Send(&sendval, 1, MPI_INT, next, 10, MPI_COMM_WORLD);
}
}
MPI_Finalize();
return 0;
}
You have mismatched message tags:
MPI_Sendrecv(&sendval, 1, MPI_INT, next, 1, &recval, 1, MPI_INT, prev, 10, MPI_COMM_WORLD, &status);
// ^
...
MPI_Recv(&recval, 1, MPI_INT, prev, 10, MPI_COMM_WORLD, &status);
// ^^
The tag in the send part of the send-receive operation should also be 10.

Using ScatterV to split an array to multiple processes

I am working with MPI and I have to send parts of an array to different processes. As an example, consider 3 processes. Then I need to send the red elements to the first process, the greed to the second and the black to the third process.
I know I could use Scatterv twice, but I want to minimize the communication between processes and the real array that I'm splitting apart is huge. Does anyone have a suggestion on how I can accomplish this?
Here is my attempt with a derived data type:
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
void print_array(int *array,int n){
int i;
printf("\t[");
for (i=0; i<n; i++) {
printf(" %d",array[i]);
}
printf("]\n");
}
int main(int argc, char **argv){
int rank,world_size,i,n = 16, block_count = 2;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
int *array = malloc(n * sizeof(int));
for (i=0; i<n; i++) { array[i]=i;}
if (rank==0) { print_array(array,n);}
int *sendcounts = malloc(world_size * sizeof(int));
int *reccounts = malloc(world_size * sizeof(int));
int *displs = malloc(world_size * sizeof(int));
sendcounts[0]=3; sendcounts[1]=3; sendcounts[2]=2;
displs[0]=0; displs[1]=3; displs[2]=6;
for (i=0; i<world_size; i++) {
reccounts[i] = sendcounts[i]*block_count;
}
int root = 0;
int *recvbuf = malloc(reccounts[rank] * sizeof(int));
MPI_Datatype newtype;
MPI_Type_contiguous(block_count, MPI_INT, &newtype);
MPI_Type_commit(&newtype);
if (rank==0) {
MPI_Scatterv(array, sendcounts, displs,
newtype, recvbuf, sendcounts[rank],
newtype, root, MPI_COMM_WORLD);
}
else {
MPI_Scatterv(NULL, sendcounts, displs,
newtype, recvbuf, reccounts[rank],
newtype, root, MPI_COMM_WORLD);
}
MPI_Type_free (&newtype);
print_array(recvbuf,reccounts[rank]);
free(array);array = NULL;
free(sendcounts);sendcounts = NULL;
free(displs);displs = NULL;
free(recvbuf);recvbuf = NULL;
MPI_Finalize();
return 0;
}
There is a way, but it is a bit convoluted.
The idea is you create a derived datatype with two elements at offset 0 and 8, and then resize this datatype so the upper bound is the size of one element.
Then you can MPI_Scatterv() once with counts={3,3,2} and displs={0,3,6}.
Note you also need to create a derived datatype on the receive side, otherwise MPI task 1 would receive {3, 11, 4, 12, 5, 13} when i guess you expect {3, 4, 5, 11, 12, 13}

Removing MPI_Bcast()

So I have a some code where I am using MPI_Bcast to send information from the root node to all nodes, but instead I want to get my P0 to send chunks of the array to individual processes.
How do I do this with MPI_Send and MPI_Receive?
I've never used them before and I don't know if I need to loop my MPI_Receive to effectively send everything or what.
I've put giant caps lock comments in the code where I need to replace my MPI_Bcast(), sorry in advance for the waterfall of code.
Code:
#include "mpi.h"
#include <stdio.h>
#include <math.h>
#define MAXSIZE 10000000
int add(int *A, int low, int high)
{
int res = 0, i;
for(i=low; i<=high; i++)
res += A[i];
return(res);
}
int main(argc,argv)
int argc;
char *argv[];
{
int myid, numprocs, x;
int data[MAXSIZE];
int i, low, high, myres, res;
double elapsed_time;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
if (myid == 0)
{
for(i=0; i<MAXSIZE; i++)
data[i]=1;
}
/* star the timer */
elapsed_time = -MPI_Wtime();
//THIS IS WHERE I GET CONFUSED ABOUT MPI_SEND AND MPI_RECIEVE!!!
MPI_Bcast(data, MAXSIZE, MPI_INT, 0, MPI_COMM_WORLD);
x = MAXSIZE/numprocs;
low = myid * x;
high = low + x - 1;
if (myid == numprocs - 1)
high = MAXSIZE-1;
myres = add(data, low, high);
printf("I got %d from %d\n", myres, myid);
MPI_Reduce(&myres, &res, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
/* stop the timer*/
elapsed_time += MPI_Wtime();
if (myid == 0)
printf("The sum is %d, time taken = %f.\n", res,elapsed_time);
MPI_Barrier(MPI_COMM_WORLD);
printf("The sum is %d at process %d.\n", res,myid);
MPI_Finalize();
return 0;
}
You need MPI_Scatter. A good intro is here: http://mpitutorial.com/tutorials/mpi-scatter-gather-and-allgather/
I think in your code it could look like this:
elements_per_proc = MAXSIZE/numprocs;
// Create a buffer that will hold a chunk of the global array
int *data_chunk = malloc(sizeof(int) * elements_per_proc);
MPI_Scatter(data, elements_per_proc, MPI_INT, data_chunk,
elements_per_proc, MPI_INT, 0, MPI_COMM_WORLD);
If you really want use MPI_Send and MPI_Recv, then you can use something like this:
int x = MAXSIZE / numprocs;
int *procData = new int[x];
if (rank == 0) {
for (int i = 1; i < num; i++) {
MPI_Send(data + i*x, x, MPI_INT, i, 0, MPI_COMM_WORLD);
}
} else {
MPI_Recv(procData, x, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
}

MPI send and receive error not running

I have written the following code as test
I am receiving from each processor an array and I am placing them in ad 2D array each row is for an array from a different processor
#include <iostream>
#include <mpi.h>
using namespace std;
int main(int argc, char* argv[])
{
int *sendBuff;
int **table;
int size, rank;
MPI_Status stat;
int pass = 1;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
sendBuff = new int[10];
printf("task %d passed %d\n", rank, pass); //1
pass++;
if (rank == 0)
{
table = new int*[size];
}
for (int i = 0; i < 10; i++)
{
sendBuff[i] = rank;
}
printf("task %d passed %d\n", rank, pass); //2
pass++;
if (rank != 0)
{
MPI_Send(&sendBuff, 10, MPI_INT, 0, rank, MPI_COMM_WORLD);
}
printf("task %d passed %d\n", rank, pass); //3
pass++;
if (rank == 0)
{
table[0] = sendBuff;
for (int i = 1; i < size; i++)
{
MPI_Recv(&table[i], 10, MPI_INT, i, i, MPI_COMM_WORLD, &stat);
}
}
printf("task %d passed %d\n", rank, pass); //4
pass++;
delete[] sendBuff;
if (rank == 0)
{
for (int i = 0; i < size; i++)
{
delete[] table[i];
}
delete[] table;
}
MPI_Finalize();
return 0;
}
but It is not runing
I run using
mpirun -np 4 a.out
and I get the following:
[arch:03429] *** Process received signal ***
[arch:03429] Signal: Aborted (6)
[arch:03429] Signal code: (-6)
[arch:03429] [ 0] /usr/lib/libpthread.so.0(+0xf870) [0x7fd2675bd870]
[arch:03429] [ 1] /usr/lib/libc.so.6(gsignal+0x39) [0x7fd2672383d9]
[arch:03429] [ 2] /usr/lib/libc.so.6(abort+0x148) [0x7fd2672397d8]
[arch:03429] [ 3] /usr/lib/libc.so.6(+0x72e64) [0x7fd267275e64]
[arch:03429] [ 4] /usr/lib/libc.so.6(+0x7862e) [0x7fd26727b62e]
[arch:03429] [ 5] /usr/lib/libc.so.6(+0x79307) [0x7fd26727c307]
[arch:03429] [ 6] a.out() [0x408704]
[arch:03429] [ 7] /usr/lib/libc.so.6(__libc_start_main+0xf5) [0x7fd267224bc5]
[arch:03429] [ 8] a.out() [0x408429]
[arch:03429] *** End of error message ***
--------------------------------------------------------------------------
mpirun noticed that process rank 0 with PID 3429 on node arch exited on signal 6 (Aborted).
--------------------------------------------------------------------------
Any help?
As Hristo Iliev pointed out, the array sendBuf should be the argument of MPI_Send. It works the same way for table[i].
Another fact : MPI_Send and MPI_Recv do not allocate memory. These functions just copy a message from one place to another. Both sendBuff and table[i] should be allocated previously. And writting table[0]=sendBuff would therefore trigger a memory leak.
Here is a code that may help you :
#include <iostream>
#include <mpi.h>
using namespace std;
int main(int argc, char* argv[])
{
int *sendBuff;
int **table;
int size, rank;
MPI_Status stat;
int pass = 1;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
sendBuff = new int[10];
printf("firts task %d passed %d\n", rank, pass); //1
pass++;
if (rank == 0)
{
table = new int*[size];
}
for (int i = 0; i < 10; i++)
{
sendBuff[i] = rank;
}
printf("second task %d passed %d\n", rank, pass); //2
pass++;
if (rank != 0)
{
MPI_Send(sendBuff, 10, MPI_INT, 0, rank, MPI_COMM_WORLD);
}
printf("thrid task %d passed %d\n", rank, pass); //3
pass++;
if (rank == 0)
{
table[0]=new int[10];
for(int i=0;i<10;i++){
table[0][i]=sendBuff[i];
}
// table[0] = sendBuff;
for (int i = 1; i < size; i++)
{
table[i]=new int[10];
MPI_Recv(table[i], 10, MPI_INT, i, i, MPI_COMM_WORLD, &stat);
}
}
printf("fourth task %d passed %d\n", rank, pass); //4
pass++;
if (rank == 0)
{
for (int i = 0; i < size; i++)
{
delete [] table[i];
table[i]=NULL;
}
delete [] table;
}
delete [] sendBuff;
MPI_Finalize();
return 0;
}
A function that may help you : MPI_Gather(...). It seems to be what you are looking for ! Watch for memory allocation if you want to use it : all the values of table should be allocated as one contiguous chunk of memory.
http://www.mcs.anl.gov/research/projects/mpi/www/www3/MPI_Gather.html
Bye,
Francis

Open MPI's MPI_reduce not combining array sums

I am very new to Open MPI. I have made a small program that computes the sum of an array, by splitting array into pieces equal to the number of processes. The problem in my program is that each process is computing right sum of its share of the array, but the individually computed sums are not summed by MPI_reduce function. I tried my best to solve and also consulted the Open MPI manual, but there is still something that I might be missing. I would be grateful for any kind of guidance. Below is the program I made:
#include "mpi.h"
#include <stdio.h>
int main(int argc, char *argv[])
{
int n, rank, nrofProcs, i;
int sum, ans;
// 0,1,2, 3,4,5, 6,7,8, 9
int myarr[] = {1,5,9, 2,8,3, 7,4,6, 10};
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nrofProcs);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
n = 10;
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD);
sum = 0.0;
int remaining = n % nrofProcs;
int lower =rank*(n/nrofProcs);
int upper = (lower+(n/nrofProcs))-1;
for (i = lower; i <= upper; i++)
{
sum = sum + myarr[i];
}
if(rank==nrofProcs-1)
{
while(i<=remaining)
{
sum = sum + myarr[i];
i++;
}
}
/* (PROBLEM IS HERE, IT IS NOT COMBINING "sums") */
MPI_Reduce(&sum, &ans, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
// if (rank == 0)
printf( "rank: %d, Sum ans: %d\n", rank, sum);
/* shut down MPI */
MPI_Finalize();
return 0;
}
Output:
rank: 2, Sum ans: 17
rank: 1, Sum ans: 13
rank: 0, Sum ans: 15
(Output should be rank: 0, Sum ans: 55)
Sorry, I made some mistakes, that I corrected after running parallel debugging on my program. Here I am sharing code to split an array of length N on M processes, where N and M can have any value:
/*
An MPI program split an array of length N on M processes, where N and M can have any value
*/
#include <math.h>
#include "mpi.h"
#include <iostream>
#include <vector>
using namespace std;
int main(int argc, char *argv[])
{
int n, rank, nrofProcs, i;
int sum, ans;
// 0,1,2, 3,4,5, 6,7,8, 9, 10
int myarr[] = {1,5,9, 2,8,3, 7,4,6,11,10};
vector<int> myvec (myarr, myarr + sizeof(myarr) / sizeof(int) );
n = myvec.size(); // number of items in array
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nrofProcs);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD);
sum = 0.0;
int remaining = n % nrofProcs;
int lower =rank*(n/nrofProcs);
int upper = (lower+(n/nrofProcs))-1;
for (i = lower; i <= upper; i++)
{
sum = sum + myvec[i];
}
if(rank==nrofProcs-1)
{
int ctr=0;
while(ctr<remaining)
{
sum = sum + myvec[i];
ctr++;
i++;
}
}
/* combine everyone's calculations */
MPI_Reduce(&sum, &ans, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0)
cout << "rank: " <<rank << " Sum ans: " << ans<< endl;
/* shut down MPI */
MPI_Finalize();
return 0;
}

Resources