How to scatter an entire 2D array to all processes using MPI - mpi

As the title suggests I'm looking to take an entire 2D array and scatter that whole array to the other processes. Here is the code I am using:
int main(int argc, char **argv)
{
MPI_Init(&argc,&argv);
int i,j,size,rank;
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (argc!=3)
{
printf("Usage : pairwise numberOfSequences lengthOfSequences\n eg. pairwise 10000 50\n");
exit(0);
}
sscanf(argv[1],"%d",&N);
sscanf(argv[2],"%d",&M);
char strings[N][M+1];
if(rank == 0)
{
for (i=0; i<N; i++)
{
//Here I read from a file into the array strings and this works
scanf("%s\n",strings[i]);
printf("Rank:%d i value:%d # %s\n",rank,i,strings[i]);
}
}
MPI_Barrier(MPI_COMM_WORLD);
char array_for_all[N][M+1];
//I think here my parameters are wrong for Scatter.
MPI_Scatter(strings, N*(M+1),MPI_CHAR,array_for_all, N*(M+1), MPI_CHAR, 0,MPI_COMM_WORLD);
for (i=0; i<N; i++)
{
printf("Rank:%d i value:%d # %s\n",rank,i,array_for_all[i]);
}
I'm not to sure if I am implementing the scatter correctly. I dont want to send parts of the array to each process, I want to send the whole array to each process or is there a better way of doing this?

As signaled by #Wesley , the right way to go is to use MPI_Bcast(), to broadcast the data over all processus. The first argument should be the pointer to the data : &strings[0][0].
#include <stdio.h>
#include <mpi.h>
#include <stdlib.h>
#include <string.h>
int main(int argc, char **argv)
{
MPI_Init(&argc,&argv);
int N,M;
int i,j,size,rank;
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (argc!=3)
{
printf("Usage : pairwise numberOfSequences lengthOfSequences\n eg. pairwise 10000 50\n");
exit(0);
}
sscanf(argv[1],"%d",&N);
sscanf(argv[2],"%d",&M);
char strings[N][M+1];
if(rank == 0)
{
for (i=0; i<N; i++)
{
//Here I read from a file into the array strings and this works
scanf("%s",strings[i]);
printf("Rank:%d i value:%d # %s\n",rank,i,strings[i]);fflush(stdout);
}
printf("input over, now output :\n");fflush(stdout);
}
//I think here my parameters are wrong for Scatter.
MPI_Bcast(&strings[0][0],N*(M+1), MPI_CHAR, 0,MPI_COMM_WORLD);
// MPI_Scatter(strings, N*(M+1),MPI_CHAR,array_for_all, N*(M+1), MPI_CHAR, 0,MPI_COMM_WORLD);
for (i=0; i<N; i++)
{
printf("Rank:%d i value:%d # %s\n",rank,i,strings[i]);fflush(stdout);
}
MPI_Finalize();
return 0;
}

Related

Freeing an array of pointer but heap block

I wrote a program that has to read a 2D array from a text file and save it into a double pointer which will act as a 2D array.
Here's the code :
#include <stdio.h>
#include <stdlib.h>
char **create_map(char* filename);
int n;
int m;
char **map;
int main(int argc, char* argv[]) {
int i;
map = create_map(argv[1]);
for(i = 0; i < n; i++) {
free(map[i]);
}
free(map);
return 0;
}
char **create_map(char *filename) {
int i = 0;
char *row;
char **map;
FILE *file = fopen(filename, "r");
fscanf(file, "%d %d", &n, &m);
map = malloc(sizeof(char *) * n);
row = malloc(sizeof(char)*m);
while(fscanf(file, "%s\n", row) != EOF) {
map[i] = malloc(sizeof(char)*m);
strcpy(map[i], row);
i++;
}
free(map[9]);
free(row);
fclose(file);
return map;
}
The content of the file is stored successfully in the map variable, but when it comes to freeing some space the debugger prints "warning: Heap block at 0000029967AF5770 modified at 0000029967AF578A past requested size of a".
Why the memory can't be freed?
Where's the error?
Thank you in advance.

MPI_Test returning true flags for requests despite never sending?

I have some code that for testing purposes, I removed all sends and only have non-blocking receives. You can imagine my surprise when using MPI_Test the flags were indicating some of the requests were actually being completed. I have my code setup in a cartesian grid, with a small replica below, although this doesn't reproduce the error:
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h> // for sleep
#include <mpi.h>
void test(int pos);
MPI_Comm comm_cart;
int main(int argc, char *argv[])
{
int i, j;
int rank, size;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
/* code for mpi cartesian gird topology */
int dim[1];
dim[0] = 2;
int periods[1];
periods[0] = 0;
int reorder = 1;
int coords[1];
MPI_Cart_create(MPI_COMM_WORLD, 1, dim, periods, 1, &comm_cart);
MPI_Cart_coords(comm_cart, rank, 2, coords);
test(coords[0]);
MPI_Finalize();
return (0);
}
void test(int pos)
{
float placeholder[4];
int other = (pos+1) % 2;
MPI_Request reqs[8];
int flags[4];
for(int iter = 0; iter < 20; iter++){
// Test requests from previous time cycle
for(int i=0;i<4;i++){
if(iter == 0) break;
MPI_Test(&reqs[0], &flags[0] , MPI_STATUS_IGNORE);
printf("Flag: %d\n", flags[0]);
}
MPI_Irecv(&placeholder[0], 1, MPI_FLOAT, other, 0, comm_cart, &reqs[0]);
}
}
Any help would be appreciated.
The issue is with MPI_Test and MPI_PROC_NULLs. Quite often when using MPI_Cart_shift, you end up with MPI_PROC_NULLs as if you're on the edge of the grid, a neighbouring cell simply doesn't exist in some directions.
I can't find any documentation for this anywhere, so I had to discover it myself, but when you do an MPI_Irecv with an MPI_PROC_NULL source, it will instantly complete and when tested using MPI_Test, the flag will return true for a completed request. Example code below:
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
int main(int argc, char *argv[])
{
MPI_Init(&argc, &argv);
int t;
int flag;
MPI_Request req;
MPI_Irecv(&t, 1, MPI_INT, MPI_PROC_NULL, 0, MPI_COMM_WORLD, &req);
MPI_Test(&req, &flag, MPI_STATUS_IGNORE);
printf("Flag: %d\n", flag);
MPI_Finalize();
return (0);
}
Which returns the following when run:
Flag: 1
Flag: 1

MPI_Gatherv throws up segmentation fault

My aim is to print abbcccdddd with an input of abcd. Its the first time I am using Gatherv.
int main(int argc, char *argv[])
{
MPI_Init(&argc,&argv);
int rank,size;
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Comm_size(MPI_COMM_WORLD,&size);
int n[100];
MPI_Status status;
int x,i=0,m;
int total[4];
char str[100],str1[100],a1[100],a[100];
if(rank==0){
scanf("%s",str);
}
MPI_Scatter(str,1,MPI_CHAR,a,1,MPI_CHAR,0,MPI_COMM_WORLD);
x=rank;
while(i<=rank){
a1[i]=a[0];
i++;
}
int disps[4];
int y=rank;
disps[rank]=0;
while(y!=0){
disps[rank]+=y;
y--;
}
total[rank]=rank+1;
printf("%s\n",a1 );
char k[100];
printf("%d %d\n",total[rank],disps[rank] );
MPI_Gatherv(a1,rank+1,MPI_CHAR,k,total,disps,MPI_CHAR,0,MPI_COMM_WORLD);
if(rank==0){
printf("Total %s\n",k);
}
MPI_Finalize();
/* code */
return 0;
}
After running the code , the printf values of total and disps are correct but the program throws up a segmentation fault
The root cause is both total and disps should be assembled on the root rank (rank 0 in your example).
They are currently "distributed" and hence not initialized on the root rank, which leads to an undefined behavior such as a crash.

How to send gmp variable such as mpz_t throught MPI_Send

#include <stdio.h>
#include <gmp.h>
#include <mpi.h>
int main(int *argc, char **argv)
{
int rank;
mpz_t a;
mpz_t b;
mpz_init(a);
mpz_init(b);
mpz_set_ui(a,23000000000000000000000000001);
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if(rank==0)
{
MPI_Send(&a,1,MPI_INT,1,100,MPI_COMM_WORLD);
}
if(rank==1)
{
MPI_Recv(&b,1,MPI_INT,0,100,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
}
MPI_Finalize();
return 1;
}
This code produce error so please help me how to send mpz_t variable through MPI_Send function.
I found a presentation www.macs.hw.ac.uk/~hwloidl/Courses/F21DP/gph_milan15_handout.pdf where, on slides 86 and 88, they send a multiprecision integer by marshalling it to/from a string using gmp_sprintf() and gmp_sscanf().
Actually, I notice they're using rationals of type mpq_t but I guess the same approach would work for mpz_t.

Is MPI_Alltoall used correctly?

I intend to achieve a simple task using MPI collective communication but being new to MPI, I have found the collective routines somewhat non-intuitive. I have 4 slaves, each of which must read a unique string and send the string to all the other slaves.
I looked into MPI_Bcast, MPI_Scatter, and MPI_Alltoall. I settled for MPI_Alltoall but the program ends with bad termination.
The program is:
int main(int argc,char *argv[])
{
int my_rank, num_workers;
MPI_Comm SLAVES_WORLD;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &num_workers);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
createSlavesCommunicator(&SLAVES_WORLD);
char send_msg[20], recv_buf[20];
sprintf(send_msg, "test string %d", my_rank);
MPI_Alltoall(send_buf, strlen(send_buf), MPI_CHAR, recv_buf, 20, MPI_CHAR, MPI_COMM_WORLD);
printf("slave %d recvd message %s\n", my_rank, recv_buf);
}
void createSlavesCommunicator(MPI_Comm *SLAVES_WORLD)
{
MPI_Group SLAVES_GROUP, MPI_COMM_GROUP;
int ranks_to_excl[1];
ranks_to_excl[0] = 0;
MPI_Comm_group(MPI_COMM_WORLD, &MPI_COMM_GROUP);
MPI_Group_excl(MPI_COMM_GROUP, 1, ranks_to_excl, &SLAVES_GROUP);
MPI_Comm_create(MPI_COMM_WORLD, SLAVES_GROUP, SLAVES_WORLD);
}
MPI_AlltoAll() sends messages from everyone to everyone. Input and output buffer should be much larger than 20 if each process sends 20 char. Starting from your code, here is how MPI_AlltoAll() works :
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <string.h>
#include "mpi.h"
int main(int argc,char *argv[])
{
int my_rank, num_workers;
MPI_Comm SLAVES_WORLD;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &num_workers);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
//createSlavesCommunicator(&SLAVES_WORLD);
char send_msg[20*num_workers], recv_buf[20*num_workers];
int i;
for(i=0;i<num_workers;i++){
sprintf(&send_msg[i*20], "test from %d to %d", my_rank,i);
}
MPI_Barrier(MPI_COMM_WORLD);
//MPI_Alltoall(send_msg, strlen(send_msg), MPI_CHAR, recv_buf, 20, MPI_CHAR, MPI_COMM_WORLD);
MPI_Alltoall(send_msg, 20, MPI_CHAR, recv_buf, 20, MPI_CHAR, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
for(i=0;i<num_workers;i++){
printf("slave %d recvd message %s\n", my_rank, &recv_buf[20*i]);
}
MPI_Finalize();
return 0;
}
Looking at your question, it seems that MPI_AllGather() is the function that would do the trick...
"The block of data sent from the jth process is received by every process and placed in the jth block of the buffer recvbuf. "
http://www.mcs.anl.gov/research/projects/mpi/www/www3/MPI_Allgather.html
Bye,
Francis

Resources