MPI_OP_Create function - mpi

I want write function, which will compare numbers from all processes and return minimal (number must be positive).
void findIndexForMinNorm(double *invec, double *inoutvec, int *, MPI_Datatype *){
if(invec[0] > 0){
if(inoutvec[0] > invec[0] || inoutvec[0] < 0){
inoutvec[0] = invec[0];
/*inoutvec[1] = invec[1];*/
}
}
}
inoutvec is common for all processes or not?

I think it's easiest to illustrate with some code. As Gilles explained, MPI will take care of all the communications and do the reduction across processes - all you need to specify is the pairwise comparison function. Note that the prototype for the reduction operation is fixed by MPI and allows for a vector reduction: the third argument is the count for the length of the vector on each process (not the number of processes which is implicitly the size of the communicator). Other than some minor issues with void vs double pointers your comparison function can be registered as-is and used for a reduction operation:
#include <stdio.h>
#include <mpi.h>
void findminnorm(void *invec, void *inoutvec, int *len, MPI_Datatype *datatype)
{
int i;
double *invecdble = (double *) invec;
double *inoutvecdble = (double *) inoutvec;
for (i=0; i < *len; i++)
{
if (invecdble[i] > 0)
{
if (inoutvecdble[i] > invecdble[i] || inoutvecdble[i] < 0)
{
inoutvecdble[i] = invecdble[i];
}
}
}
}
#define N 2
int main()
{
int i;
double input[N], output[N];
int rank, size;
MPI_Op MPI_MINNORM;
MPI_Init(NULL,NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Op_create(findminnorm, 1, &MPI_MINNORM);
for (i=0; i < N; i++)
{
input[i] = (-size/2+rank+i)*(i+1);
printf("On rank %d, input[%d] = %f\n", rank, i, input[i]);
output[i] = -1;
}
MPI_Allreduce(&input, &output, N, MPI_DOUBLE, MPI_MINNORM, MPI_COMM_WORLD);
for (i=0; i < N; i++)
{
printf("On rank %d, output[%d] = %f\n", rank, i, output[i]);
}
MPI_Finalize();
}
The initialisation is a bit random but I think it serves to illustrate the point (although your comparison function should really cope with the situation where all inputs are negative):
mpirun -n 5 ./minnorm | grep input | sort
On rank 0, input[0] = -2.000000
On rank 0, input[1] = -2.000000
On rank 1, input[0] = -1.000000
On rank 1, input[1] = 0.000000
On rank 2, input[0] = 0.000000
On rank 2, input[1] = 2.000000
On rank 3, input[0] = 1.000000
On rank 3, input[1] = 4.000000
On rank 4, input[0] = 2.000000
On rank 4, input[1] = 6.000000
mpirun -n 5 ./minnorm | grep output | sort
On rank 0, output[0] = 1.000000
On rank 0, output[1] = 2.000000
On rank 1, output[0] = 1.000000
On rank 1, output[1] = 2.000000
On rank 2, output[0] = 1.000000
On rank 2, output[1] = 2.000000
On rank 3, output[0] = 1.000000
On rank 3, output[1] = 2.000000
On rank 4, output[0] = 1.000000
On rank 4, output[1] = 2.000000

inoutvec is not common for all processes.
Your operation only have to compute inoutvec = min(invec, inoutvec) and the MPI library will take care of the communications and invoking your operator with the appropriate inoutvec.
From the MPI standard chapter 5.9.5 page 185:
Advice to implementors. We outline below a naive and inefficient
implementation of MPI_REDUCE not supporting the in place option.
MPI_Comm_size(comm, &groupsize);
MPI_Comm_rank(comm, &rank);
if (rank > 0) {
MPI_Recv(tempbuf, count, datatype, rank-1,...);
User_reduce(tempbuf, sendbuf, count, datatype);
}
if (rank < groupsize-1) {
MPI_Send(sendbuf, count, datatype, rank+1, ...);
}
/* answer now resides in process groupsize-1 ... now send to root
*/
if (rank == root) {
MPI_Irecv(recvbuf, count, datatype, groupsize-1,..., &req);
}
if (rank == groupsize-1) {
MPI_Send(sendbuf, count, datatype, root, ...);
}
if (rank == root) {
MPI_Wait(&req, &status);
}

Related

Do I need more events when timing multiple work-items?

If I have more than one work-item to execute some kernel code, do I need to have more events to track the execution time for each work-item?
I have some strange results, 1 work-item takes about 4 seconds to execute and 100 work-items also take about 4 seconds to execute. I can't see how this could be possible since my Nvidia GeForce GT 525M only has 2 compute units, each with 48 processing elements. This leads me to believe the event I listed as an argument in clEnqueueNDRangeKernel tracks only one work-item. Is that true and if so, how can I get it to track all the work-items?
This is what the Khronos user guide says about the event argument in clEnqueueNDRangeKernel:
event returns an event object that identifies this particular kernel execution instance
What is the meaning of "this particular kernel execution instance"? Isn't that a single work-item?
EDIT:
Relevant host code:
static const size_t numberOfWorkItems = 48;
const size_t globalWorkSize[] = { numberOfWorkItems, 0, 0 };
cl_event events;
ret = clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL, globalWorkSize, NULL, 0, NULL, &events);
ret = clEnqueueReadBuffer(command_queue, memobj, CL_TRUE, 0, sizeof(cl_mem), val, 0, NULL, NULL);
clWaitForEvents(1, &events);
cl_ulong time_start;
cl_ulong time_end;
clGetEventProfilingInfo(events, CL_PROFILING_COMMAND_QUEUED, sizeof(cl_ulong), &time_start, NULL);
clGetEventProfilingInfo(events, CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &time_end, NULL);
double nanoSeconds = (double) (time_end - time_start);
printf("OpenCl Execution time is: %f milliseconds \n",nanoSeconds / 1000000.0);
printf("Result: %lu\n", val[0]);
Kernel code:
kernel void parallel_operation(__global ulong *val) {
size_t i = get_global_id(0);
int n = 48;
local unsigned int result[48];
for (int z = 0; z < n; z++) {
result[z] = 0;
}
// here comes the long operation
for (ulong k = 0; k < 2000; k++) {
for (ulong j = 0; j < 10000; j++) {
result[i] += (j * 3) % 5;
}
}
barrier(CLK_LOCAL_MEM_FENCE);
if (i == 0) {
for (int z = 1; z < n; z++) {
result[0] += result[z];
}
*val = result[0];
}
}
You are measuring the execution time of your entire kernel function. Or in other words, the time between the first work-item starts and the last work-item finishes. To my knowledge there is no possibility to measure the execution time of one single work-item in OpenCL.

Incorrect buffer size in MPI

I am getting the following error output while executing MPI_Recv:
MPI_Recv(buf=0x000000D62C56FC60, count=1, MPI_INT, src=3, tag=0, MPI_COMM_WORLD, status=0x0000000000000001) failed
Message truncated; 8 bytes received but buffer size is 4
My function needs to find the number of a row which has a maximum element at the ind position.
My function's code is found below:
int find_row(Matr matr, int ind)
{
int max = ind;
for (int i = ind + 1 + CurP; i < N; i += Pnum)
if (matr[i][ind] > matr[max][ind])
max = i;
int ans = max;
if (CurP != 0)
{
MPI_Send(&max, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
}
else
{
MPI_Barrier(MPI_COMM_WORLD);
for (int i = 1; i < Pnum; i++)
{
MPI_Recv(&max, 1, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("max %d %Lf! Process %d;\n", max, matr[max][ind], i);
fflush(stdout);
if (matr[max][ind] > matr[ans][ind])
ans = max;
}
}
return ans;
}
Matr is the following type definition: typedef vector<vector<long double> >& Matr;
CurP and Pnum are initialized in the following way:
MPI_Comm_size(MPI_COMM_WORLD, &Pnum);
MPI_Comm_rank(MPI_COMM_WORLD, &CurP);
Please help me solve this issue. Thanks!
It's my fail. I execute MPI_Bcast from not all processes in another part of my code.

A mpi program to add the numbers from 1 to 16000000, different results

*The master task first initializes an array and then distributes an equal portion that array to the other tasks. After the other tasks receive their portion of the array, they perform an addition operation to each array element.They also maintain a sum for their portion of the array. The master task does likewise with its portion of the array. As each of the non-master
tasks finish, they send their updated portion of the array to the master.
An MPI collective communication call is used to collect the sums maintained by each task. Finally, the master task displays selected parts of the final array and the global sum of all array elements. *
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#define ARRAYSIZE 16000000
#define MASTER
float data[ARRAYSIZE];
int main (int argc, char *argv[])
{
int numtasks, taskid, rc, dest, source, offset, i, j, tag1,
tag2, chunksize;
float mysum, sum;
float update(int myoffset, int chunk, int myid);
MPI_Status status;
/******Initializations******/
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
if (numtasks % 4 != 0) {
printf("Quitting. Number of MPI tasks must be divisible by 4. \n");
MPI_Abort(MPI_COMM_WORLD, rc);
exit(0);
}
MPI_Comm_rank(MPI_COMM_WORLD, &taskid);
chunksize = ARRAYSIZE / numtasks;
tag2 = 1;
tag1 = 2;
/******Master task only ******/
if (taskid == MASTER){
/* Initialize the array */
sum = 0;
for (i = 0; i < ARRAYSIZE; i++){
data[i] = i * 1.0;
sum = sum + data[i];
}
printf("Initialized array sum = %e\n", sum);
/* Send each task its portion of the array - mater keeps 1st part */
offset = chunksize;
for (dest = 1; dest < numtasks; dest++){
MPI_Send(&offset, 1, MPI_INT, dest, tag1, MPI_COMM_WORLD);
MPI_Send(&data[offset], chunksize, MPI_FLOAT, dest, tag2, MPI_COMM_WRLD);
printf("Sent %d elements to task %d offset = %d\n, chunksize, dest, offset);
offset = offset + chunksize;
}
/* Master does its part of the work */
offset = 0;
mysum = update(offset, chunksize, taskid);
/* Get final sum */
MPI_Reduce(&mysum, &sum, 1, MPI_FLOAT, MPI_SUM, MASTER, MPI_COMM_WORLD);
printf("***Final sum = %e ***\n", sum);
} /* end of master section */
/******Non-master tasks only ******/
if (taskid > MASTER){
/* Receive my portion of array from the master task */
source = MASTER;
MPI_Recv(&offset, 1, MPI_INT, source, tag1, MPI_COMM_WORLD, &status);
MPI_Recv(&data[offset], chunksize, MPI_FLOAT, source, tag2, MPI_COMM_WORLD, &status);
mysum = update(offset, chunksize, taskid);
MPI_Reduce(&mysum, &sum, 1, MPI_FLOAT, MPI_SUM, MASTER, MPI_COMM_WORLD);
} /* end of non-master */
MPI_Finalize();
} /* end of main */
float update(int myoffset, int chunk, int myid){
int i;
float mysum;
/* Perform addition to each of my array elements and keep my sum */
mysum = 0;
for (i = myoffset; i < myoffset + chunk; i++){
mysum = mysum + data[i];
}
printf("Task %d mysum = %e\n", myid, mysum);
return mysum;
}
/******The result of this program is: ******/
MPI task 0 has started...
MPI task 1 has started...
MPI task 2 has started...
MPI task 3 has started...
Initialized array sum = 1.335708e+14
Sent 4000000 elements to task 1 offset= 4000000
Sent 4000000 elements to task 2 offset= 8000000
Task 1 mysum = 2.442024e+13
Sent 4000000 elements to task 3 offset= 12000000
Task 2 mysum = 3.991501e+13
Task 3 mysum = 5.809336e+13
Task 0 mysum = 7.994294e+12
Sample results:
0.000000e+00 1.000000e+00 2.000000e+00 3.000000e+00 4.000000e+00
4.000000e+06 4.000001e+06 4.000002e+06 4.000003e+06 4.000004e+06
8.000000e+06 8.000001e+06 8.000002e+06 8.000003e+06 8.000004e+06
1.200000e+07 1.200000e+07 1.200000e+07 1.200000e+07 1.200000e+07
*** Final sum= 1.304229e+14 ***
*So my question is why these two sum don't hold the same value**
You are storing the result in a 32-bit floating-point number (i.e. a float) which simply isn't enough to maintain all the accuracy you need. What you are seeing is a classic example of how rounding errors accumulate differently depending on what order you add numbers together.
If you just replace all your floats by doubles then it is OK:
mpiexec -n 4 ./arraysum
Initialized array sum = 1.280000e+14
Sent 4000000 elements to task 1 offset = 4000000
Task 1 mysum = 2.400000e+13
Sent 4000000 elements to task 2 offset = 8000000
Task 2 mysum = 4.000000e+13
Sent 4000000 elements to task 3 offset = 12000000
Task 0 mysum = 7.999998e+12
Task 3 mysum = 5.600000e+13
***Final sum = 1.280000e+14 ***

Removing MPI_Bcast()

So I have a some code where I am using MPI_Bcast to send information from the root node to all nodes, but instead I want to get my P0 to send chunks of the array to individual processes.
How do I do this with MPI_Send and MPI_Receive?
I've never used them before and I don't know if I need to loop my MPI_Receive to effectively send everything or what.
I've put giant caps lock comments in the code where I need to replace my MPI_Bcast(), sorry in advance for the waterfall of code.
Code:
#include "mpi.h"
#include <stdio.h>
#include <math.h>
#define MAXSIZE 10000000
int add(int *A, int low, int high)
{
int res = 0, i;
for(i=low; i<=high; i++)
res += A[i];
return(res);
}
int main(argc,argv)
int argc;
char *argv[];
{
int myid, numprocs, x;
int data[MAXSIZE];
int i, low, high, myres, res;
double elapsed_time;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
if (myid == 0)
{
for(i=0; i<MAXSIZE; i++)
data[i]=1;
}
/* star the timer */
elapsed_time = -MPI_Wtime();
//THIS IS WHERE I GET CONFUSED ABOUT MPI_SEND AND MPI_RECIEVE!!!
MPI_Bcast(data, MAXSIZE, MPI_INT, 0, MPI_COMM_WORLD);
x = MAXSIZE/numprocs;
low = myid * x;
high = low + x - 1;
if (myid == numprocs - 1)
high = MAXSIZE-1;
myres = add(data, low, high);
printf("I got %d from %d\n", myres, myid);
MPI_Reduce(&myres, &res, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
/* stop the timer*/
elapsed_time += MPI_Wtime();
if (myid == 0)
printf("The sum is %d, time taken = %f.\n", res,elapsed_time);
MPI_Barrier(MPI_COMM_WORLD);
printf("The sum is %d at process %d.\n", res,myid);
MPI_Finalize();
return 0;
}
You need MPI_Scatter. A good intro is here: http://mpitutorial.com/tutorials/mpi-scatter-gather-and-allgather/
I think in your code it could look like this:
elements_per_proc = MAXSIZE/numprocs;
// Create a buffer that will hold a chunk of the global array
int *data_chunk = malloc(sizeof(int) * elements_per_proc);
MPI_Scatter(data, elements_per_proc, MPI_INT, data_chunk,
elements_per_proc, MPI_INT, 0, MPI_COMM_WORLD);
If you really want use MPI_Send and MPI_Recv, then you can use something like this:
int x = MAXSIZE / numprocs;
int *procData = new int[x];
if (rank == 0) {
for (int i = 1; i < num; i++) {
MPI_Send(data + i*x, x, MPI_INT, i, 0, MPI_COMM_WORLD);
}
} else {
MPI_Recv(procData, x, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
}

Open MPI's MPI_reduce not combining array sums

I am very new to Open MPI. I have made a small program that computes the sum of an array, by splitting array into pieces equal to the number of processes. The problem in my program is that each process is computing right sum of its share of the array, but the individually computed sums are not summed by MPI_reduce function. I tried my best to solve and also consulted the Open MPI manual, but there is still something that I might be missing. I would be grateful for any kind of guidance. Below is the program I made:
#include "mpi.h"
#include <stdio.h>
int main(int argc, char *argv[])
{
int n, rank, nrofProcs, i;
int sum, ans;
// 0,1,2, 3,4,5, 6,7,8, 9
int myarr[] = {1,5,9, 2,8,3, 7,4,6, 10};
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nrofProcs);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
n = 10;
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD);
sum = 0.0;
int remaining = n % nrofProcs;
int lower =rank*(n/nrofProcs);
int upper = (lower+(n/nrofProcs))-1;
for (i = lower; i <= upper; i++)
{
sum = sum + myarr[i];
}
if(rank==nrofProcs-1)
{
while(i<=remaining)
{
sum = sum + myarr[i];
i++;
}
}
/* (PROBLEM IS HERE, IT IS NOT COMBINING "sums") */
MPI_Reduce(&sum, &ans, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
// if (rank == 0)
printf( "rank: %d, Sum ans: %d\n", rank, sum);
/* shut down MPI */
MPI_Finalize();
return 0;
}
Output:
rank: 2, Sum ans: 17
rank: 1, Sum ans: 13
rank: 0, Sum ans: 15
(Output should be rank: 0, Sum ans: 55)
Sorry, I made some mistakes, that I corrected after running parallel debugging on my program. Here I am sharing code to split an array of length N on M processes, where N and M can have any value:
/*
An MPI program split an array of length N on M processes, where N and M can have any value
*/
#include <math.h>
#include "mpi.h"
#include <iostream>
#include <vector>
using namespace std;
int main(int argc, char *argv[])
{
int n, rank, nrofProcs, i;
int sum, ans;
// 0,1,2, 3,4,5, 6,7,8, 9, 10
int myarr[] = {1,5,9, 2,8,3, 7,4,6,11,10};
vector<int> myvec (myarr, myarr + sizeof(myarr) / sizeof(int) );
n = myvec.size(); // number of items in array
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nrofProcs);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD);
sum = 0.0;
int remaining = n % nrofProcs;
int lower =rank*(n/nrofProcs);
int upper = (lower+(n/nrofProcs))-1;
for (i = lower; i <= upper; i++)
{
sum = sum + myvec[i];
}
if(rank==nrofProcs-1)
{
int ctr=0;
while(ctr<remaining)
{
sum = sum + myvec[i];
ctr++;
i++;
}
}
/* combine everyone's calculations */
MPI_Reduce(&sum, &ans, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0)
cout << "rank: " <<rank << " Sum ans: " << ans<< endl;
/* shut down MPI */
MPI_Finalize();
return 0;
}

Resources