OpenCL - Insert values in every n elements of an array - opencl

I have an array of 100 elements, and what I want to do is copy these 100 elements into every nth element of another array.
Let's say n was 3
The new array would have [val1 0 0 val2 0 0 val3 0 0 ...] after the values were copied to every nth element. Now in opencl, I tried creating a pointer which would point to the current index and simply I would just add n to this value every time. However, the current index always just keeps the same value in it. Below is the code I have.
__kernel void ddc(__global float *inputArray, __global float *outputArray, __const int interpolateFactor, __global int *currentIndex){
int i = get_global_id(0);
outputArray[currentIndex[0]] = inputArray[i];
currentIndex[0] = currentIndex[0] + (interpolateFactor - 1);
printf("index %i \n", currentIndex[0]);
}
Host code for the currentIndex part:
int *index;
index = (int*)malloc(2*sizeof(int));
index[0] = 0;
cl_mem currentIndex;
currentIndex = clCreateBuffer(
context,
CL_MEM_WRITE_ONLY,
2 * sizeof(int),
NULL,
&status);
status = clEnqueueWriteBuffer(
cmdQueue,
currentIndex,
CL_FALSE,
0,
2 * sizeof(int),
index,
0,
NULL,
NULL);
printf("Index enqueueWriteBuffer status: %i \n", status);
status |= clSetKernelArg(
kernel,
4,
sizeof(cl_mem),
&currentIndex);
printf("Kernel Arg currentIndex Factor status: %i \n", status);
If you are wondering why I am using an array with two elements, it's because I wasn't sure how to just reference a single variable. I just implemented it the same way I had the input and output array working. When I run the kernel with an interpolateFactor of 3, currentIndex is always printing 2.

So if I understood right what you want to do is save the next index that should be used to currentIndex. This will not work. The value will not instantly update for other workitems. If you wanted to do it this way you would have to execute all the kernels sequentially.
What you could do is
__kernel void ddc(__global float *inputArray, __global float *outputArray, __const int interpolateFactor, int start){
int i = get_global_id(0);
outputArray[start+i*(interpolateFactor-1)] = inputArray[i];
}
assuming you can start from any other spot than 0. Otherwise you could just ditch it completely.
To get it working like that you do
int start = 0;
status |= clSetKernelArg(
kernel,
3, // This should be 3 right? Might have been the problem to begin with.
sizeof(int),
&start);
Hopefully this helps.

Related

OpenCL kernel math outputs incorrect results

I am currently trying to implement an OpenCL kernel. The kernel is supposed to output a number of previously calculated elements divided by the total number of elements remapped to a value from 0 to 255.
The kernel runs in a single work group with 256 work items where LX is the local ID:
#define LX get_local_id(0)
kernel void reduceStatistic(global int *inout, int nr_workgroups, int nr_pixels)
{
int i = 1;
for (; i < nr_workgroups; i++)
{
inout[LX] += inout[LX + i * 256];
}
inout[LX] = (int)floor(((float)inout[LX] / (float)nr_pixels) * 256.0f);
}
The calculation before the remapping operation is for clean up after a previous calculation on the same buffer.
The first item of inout[LX] after the cleanup is 17176, the nr_pixels is 160000 so this should result in a value of 27 using the calculation above. The code, however, returns 6.
The relevant host-side code is as follows:
// nr_workgroups is of type int
cl_mem outputBuffer = clCreateBuffer(mgr->context, CL_MEM_READ_WRITE, nr_workgroups * 256 * sizeof(cl_int), NULL, NULL);
// another kernel writes into outputBuffer
// set kernel arguments
clSetKernelArg(mgr->reduceStatisticKernel, 0, sizeof(outputBuffer), &outputBuffer);
clSetKernelArg(mgr->reduceStatisticKernel, 1, sizeof(cl_int), &nr_workgroups);
clSetKernelArg(mgr->reduceStatisticKernel, 2, sizeof(cl_int), &imgSeqSize);
size_t global_work_size_statistics[1] = { 256 };
size_t local_work_size_statistics[1] = { 256 };
// run the kernel
clEnqueueNDRangeKernel(mgr->commandQueue, mgr->reduceStatisticKernel, 1, NULL, global_work_size_statistics, local_work_size_statistics, 0, NULL, NULL);
// read result
cl_int *reducedResult = new cl_int[256];
clEnqueueReadBuffer(mgr->commandQueue, outputBuffer, CL_TRUE, 0, 256 * sizeof(cl_int), reducedResult, 0, NULL, NULL);
Help much appreciated! (:
We established in the comments that the global buffer index calculation is wrong:
inout[LX] += inout[LX + i * 265];
----------^^^
Should be 256
Going out of range on a buffer leads to undefined behaviour, so this is always one of the prime culprits to look for.

OpenCL - using atomic reduction for double

I know atomic functions with OpenCL-1.x are not recommended but I just want to understand an atomic example.
The following kernel code is not working well, it produces random final values for the computation of sum of all array values (sum reduction) :
#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
void atom_add_double(volatile __local double *val, double delta)
{
union {
double f;
ulong i;
} old, new;
do
{
old.f = *val;
new.f = old.f + delta;
}
while (atom_cmpxchg((volatile __local ulong *)val, old.i, new.i) != old.i);
}
__kernel void sumGPU ( __global const double *input,
__local double *localInput,
__global double *finalSum
)
{
uint lid = get_local_id(0);
uint gid = get_global_id(0);
uint localSize = get_local_size(0);
uint groupid = get_group_id(0);
local double partialSum;
local double finalSumTemp;
// Initialize sums
if (lid==0)
{
partialSum = 0.0;
finalSumTemp = 0.0;
}
barrier(CLK_LOCAL_MEM_FENCE);
// Set in local memory
int idx = groupid * localSize + lid;
localInput[lid] = input[idx];
// Compute atom_add into each workGroup
barrier(CLK_LOCAL_MEM_FENCE);
atom_add_double(&partialSum, localInput[lid]);
// See and Check if barrier below is necessary
barrier(CLK_LOCAL_MEM_FENCE);
// Final sum of partialSums
if (lid==0)
{
atom_add_double(&finalSumTemp, partialSum);
*finalSum = finalSumTemp;
}
}
The version with global id strategy works good but the version above, which passes by the using of local memory (shared memory), doesn't give the expected results (the value of *finalSum is random for each execution).
Here the Buffers and kernel args that I have put in my host code :
// Write to buffers
ret = clEnqueueWriteBuffer(command_queue, inputBuffer, CL_TRUE, 0,
nWorkItems * sizeof(double), xInput, 0, NULL, NULL);
ret = clEnqueueWriteBuffer(command_queue, finalSumBuffer, CL_TRUE, 0,
sizeof(double), finalSumGPU, 0, NULL, NULL);
// Set the arguments of the kernel
clSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&inputBuffer);
clSetKernelArg(kernel, 1, local_item_size*sizeof(double), NULL);
clSetKernelArg(kernel, 2, sizeof(cl_mem), (void *)&finalSumBuffer);
and Finally, I read finalSumBuffer to get the sum value.
I think my issue comes rather from the kernel code but I can't find where is the error.
If anyone could see what's wrong, this would be nice to tell me.
Thanks
UPDATE 1 :
I nearly manage to perform this reduction. Following the propositions suggested by huseyin tugrul buyukisik, I have modified the kernel code like this :
#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
void atom_add_double(volatile __local double *val, double delta)
{
union {
double d;
ulong i;
} old, new;
do
{
old.d = *val;
new.d = old.d + delta;
}
while (atom_cmpxchg((volatile __local ulong *)val, old.i, new.i) != old.i);
}
__kernel void sumGPU ( __global const double *input,
__local double *localInput,
__local double *partialSum,
__global double *finalSum
)
{
uint lid = get_local_id(0);
uint gid = get_global_id(0);
uint localSize = get_local_size(0);
uint groupid = get_group_id(0);
// Initialize partial sums
if (lid==0)
partialSum[groupid] = 0.0;
barrier(CLK_LOCAL_MEM_FENCE);
// Set in local memory
int idx = groupid * localSize + lid;
localInput[lid] = input[idx];
// Compute atom_add into each workGroup
barrier(CLK_LOCAL_MEM_FENCE);
atom_add_double(&partialSum[groupid], localInput[lid]);
// See and Check if barrier below is necessary
barrier(CLK_LOCAL_MEM_FENCE);
// Compute final sum
if (lid==0)
*finalSum += partialSum[groupid];
}
As said huseyin , I don't need to use atomic functions for the final sum of all partial sums.
So I did at the end :
// Compute final sum
if (lid==0)
*finalSum += partialSum[groupid];
But unfortunately, the final sum doesn't give the value expected and the value is random (for example, with nwork-items = 1024 and size-WorkGroup = 16, I get random values in the order of [1e+3 - 1e+4] instead of 5.248e+05 expected.
Here are the setting of arguments into the host code :
// Set the arguments of the kernel
clSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&inputBuffer);
clSetKernelArg(kernel, 1, local_item_size*sizeof(double), NULL);
clSetKernelArg(kernel, 2, nWorkGroups*sizeof(double), NULL);
clSetKernelArg(kernel, 3, sizeof(cl_mem), (void *)&finalSumBuffer);
Could you see where is my error in the kernel code ?
Thanks
Not an error but logic issue:
atom_add_double(&finalSumTemp, partialSum);
is working only once per group (by zero-local-indexed thread).
So you are just doing
finalSumTemp = partialSum
so atomics here is not needed.
There is race condition for
*finalSum = finalSumTemp;
between workgroups where each zero-index local thread writes to same address. So this should be the atomic addition (for learning purposes) or could be written on different cells to be added on host side such as sum_group1+sum_group2+... = total sum.
int idx = groupid * localSize + lid;
localInput[lid] = input[idx];
here using groupid is suspicious for multi-device summation. Because each device has its own global range and workgroup id indexings so two device could have same group id values for two different groups. Some device related offset should be used when multiple devices are used. Such as:
idx= get_global_id(0) + deviceOffset[deviceId];
Also if atomic operation is inavoidable, and if exactly N times operated, it could be moved to a single thread(such as 0-indexed thread) and looped for N times(probably being faster) in a second kernel unless that atomic operation latency can't be hidden by other means.

A mpi program to add the numbers from 1 to 16000000, different results

*The master task first initializes an array and then distributes an equal portion that array to the other tasks. After the other tasks receive their portion of the array, they perform an addition operation to each array element.They also maintain a sum for their portion of the array. The master task does likewise with its portion of the array. As each of the non-master
tasks finish, they send their updated portion of the array to the master.
An MPI collective communication call is used to collect the sums maintained by each task. Finally, the master task displays selected parts of the final array and the global sum of all array elements. *
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#define ARRAYSIZE 16000000
#define MASTER
float data[ARRAYSIZE];
int main (int argc, char *argv[])
{
int numtasks, taskid, rc, dest, source, offset, i, j, tag1,
tag2, chunksize;
float mysum, sum;
float update(int myoffset, int chunk, int myid);
MPI_Status status;
/******Initializations******/
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
if (numtasks % 4 != 0) {
printf("Quitting. Number of MPI tasks must be divisible by 4. \n");
MPI_Abort(MPI_COMM_WORLD, rc);
exit(0);
}
MPI_Comm_rank(MPI_COMM_WORLD, &taskid);
chunksize = ARRAYSIZE / numtasks;
tag2 = 1;
tag1 = 2;
/******Master task only ******/
if (taskid == MASTER){
/* Initialize the array */
sum = 0;
for (i = 0; i < ARRAYSIZE; i++){
data[i] = i * 1.0;
sum = sum + data[i];
}
printf("Initialized array sum = %e\n", sum);
/* Send each task its portion of the array - mater keeps 1st part */
offset = chunksize;
for (dest = 1; dest < numtasks; dest++){
MPI_Send(&offset, 1, MPI_INT, dest, tag1, MPI_COMM_WORLD);
MPI_Send(&data[offset], chunksize, MPI_FLOAT, dest, tag2, MPI_COMM_WRLD);
printf("Sent %d elements to task %d offset = %d\n, chunksize, dest, offset);
offset = offset + chunksize;
}
/* Master does its part of the work */
offset = 0;
mysum = update(offset, chunksize, taskid);
/* Get final sum */
MPI_Reduce(&mysum, &sum, 1, MPI_FLOAT, MPI_SUM, MASTER, MPI_COMM_WORLD);
printf("***Final sum = %e ***\n", sum);
} /* end of master section */
/******Non-master tasks only ******/
if (taskid > MASTER){
/* Receive my portion of array from the master task */
source = MASTER;
MPI_Recv(&offset, 1, MPI_INT, source, tag1, MPI_COMM_WORLD, &status);
MPI_Recv(&data[offset], chunksize, MPI_FLOAT, source, tag2, MPI_COMM_WORLD, &status);
mysum = update(offset, chunksize, taskid);
MPI_Reduce(&mysum, &sum, 1, MPI_FLOAT, MPI_SUM, MASTER, MPI_COMM_WORLD);
} /* end of non-master */
MPI_Finalize();
} /* end of main */
float update(int myoffset, int chunk, int myid){
int i;
float mysum;
/* Perform addition to each of my array elements and keep my sum */
mysum = 0;
for (i = myoffset; i < myoffset + chunk; i++){
mysum = mysum + data[i];
}
printf("Task %d mysum = %e\n", myid, mysum);
return mysum;
}
/******The result of this program is: ******/
MPI task 0 has started...
MPI task 1 has started...
MPI task 2 has started...
MPI task 3 has started...
Initialized array sum = 1.335708e+14
Sent 4000000 elements to task 1 offset= 4000000
Sent 4000000 elements to task 2 offset= 8000000
Task 1 mysum = 2.442024e+13
Sent 4000000 elements to task 3 offset= 12000000
Task 2 mysum = 3.991501e+13
Task 3 mysum = 5.809336e+13
Task 0 mysum = 7.994294e+12
Sample results:
0.000000e+00 1.000000e+00 2.000000e+00 3.000000e+00 4.000000e+00
4.000000e+06 4.000001e+06 4.000002e+06 4.000003e+06 4.000004e+06
8.000000e+06 8.000001e+06 8.000002e+06 8.000003e+06 8.000004e+06
1.200000e+07 1.200000e+07 1.200000e+07 1.200000e+07 1.200000e+07
*** Final sum= 1.304229e+14 ***
*So my question is why these two sum don't hold the same value**
You are storing the result in a 32-bit floating-point number (i.e. a float) which simply isn't enough to maintain all the accuracy you need. What you are seeing is a classic example of how rounding errors accumulate differently depending on what order you add numbers together.
If you just replace all your floats by doubles then it is OK:
mpiexec -n 4 ./arraysum
Initialized array sum = 1.280000e+14
Sent 4000000 elements to task 1 offset = 4000000
Task 1 mysum = 2.400000e+13
Sent 4000000 elements to task 2 offset = 8000000
Task 2 mysum = 4.000000e+13
Sent 4000000 elements to task 3 offset = 12000000
Task 0 mysum = 7.999998e+12
Task 3 mysum = 5.600000e+13
***Final sum = 1.280000e+14 ***

Passing variables into kernels

I'm new to OpenCL and am reading the book OpenCL in Action. There is a simple problem that I don't understand it: how to pass values to and return them from kernels.
First of all, are we supposed to always pass arguments by address into kernels?
Then, I have two simple sample of kernels below. In the first one, while output is pointer as function parameter, in body of kernel we never used *output. While in the other kernel, *s1 and *s2 are used as function parameters and we actually assign value to *s1 and *s2 instead of s1 and s2. Can anyone tell me why in the first kernel the value is assigned to output (and not *output) while in the second kernel we have the value assigned to *s1 and *s2 (and not s1 and s2).
I looked at many resources to find a general way to pass and return values to and from kernels by I couldn't find any general rule.
Here is the kernels:
1:
__kernel void id_check(__global float *output) {
/* Access work-item/work-group information */
size_t global_id_0 = get_global_id(0);
size_t global_id_1 = get_global_id(1);
size_t global_size_0 = get_global_size(0);
size_t offset_0 = get_global_offset(0);
size_t offset_1 = get_global_offset(1);
size_t local_id_0 = get_local_id(0);
size_t local_id_1 = get_local_id(1);
/* Determine array index */
int index_0 = global_id_0 - offset_0;
int index_1 = global_id_1 - offset_1;
int index = index_1 * global_size_0 + index_0;
/* Set float data */
float f = global_id_0 * 10.0f + global_id_1 * 1.0f;
f += local_id_0 * 0.1f + local_id_1 * 0.01f;
output[index] = f;
}
2:
__kernel void select_test(__global float4 *s1,
__global uchar2 *s2) {
/* Execute select */
int4 mask1 = (int4)(-1, 0, -1, 0);
float4 input1 = (float4)(0.25f, 0.5f, 0.75f, 1.0f);
float4 input2 = (float4)(1.25f, 1.5f, 1.75f, 2.0f);
*s1 = select(input1, input2, mask1);
/* Execute bitselect */
uchar2 mask2 = (uchar2)(0xAA, 0x55);
uchar2 input3 = (uchar2)(0x0F, 0x0F);
uchar2 input4 = (uchar2)(0x33, 0x33);
*s2 = bitselect(input3, input4, mask2);
}
Your problem is not with OpenCL, is with C language itself. Please read a book on how C language works. It is a VERY basic question what you are asking.
When you have a pointer, (output, s1, s2) you can access it by many ways. output refers to the pointer (adress), *output refers to the value at the first element (or single element pointed by the pointer), and output[i] refers to the ith element value.
*output and output[0] are the same, as well as *(output+1) and output[1].

Open global_work_size misunderstanding

I'm trying to understand a simple OpenCL example, which is vector addition. The kernel is the following:
__kernel void addVec(__global double* a, __global double* b, __global double* c)
{
size_t id = get_global_id(0);
c[id] = a[id] + b[id];
}
For example, my input arrays have a size of 1 million elements each.
In my host program, I set global_work_size to be exactly the size of the vectors input arrays (1 million).
But when i set it to a smaller value, for example 1000, it also works with this kernel!
I don't understand why the global_work_size can be lesser than the problem dimension, and still, the OpenCL program compute every elements of the input arrays.
Could someone clarify on this?
EDIT: here is the code where I copy the data:
size_t arraySize = 1000000;
const size_t global_work_size[1] = {512};
double *host_a = malloc(arraySize*sizeof(double));
double *host_b = malloc(arraySize*sizeof(double));
double *host_c = calloc(arraySize, sizeof(double));
...
// Create the input and output arrays in device memory for our calculation
device_a = clCreateBuffer(context, CL_MEM_READ_ONLY, arraySize*sizeof(double), NULL, NULL);
device_b = clCreateBuffer(context, CL_MEM_READ_ONLY, arraySize*sizeof(double), NULL, NULL);
device_c = clCreateBuffer(context, CL_MEM_WRITE_ONLY, arraySize*sizeof(double), NULL, NULL);
...
// Copy data set into the input array in device memory. [host --> device]
status = clEnqueueWriteBuffer(command_queue, device_a, CL_TRUE, 0, arraySize*sizeof(double), host_a, 0, NULL, NULL);
status |= clEnqueueWriteBuffer(command_queue, device_b, CL_TRUE, 0, arraySize*sizeof(double), host_b, 0, NULL, NULL);
...
// Copy-back the results from the device [host <-- device]
clEnqueueReadBuffer(command_queue, device_c, CL_TRUE, 0, arraySize*sizeof(double), host_c, 0, NULL, NULL );
...
printf("checking result validity ...\n");
for (size_t i=0; i<arraySize; ++i)
if(host_c[i] - 1 > 1e-6) // the array is supposed to be 1 everywhere
{
printf("*** ERROR! Invalid results ! host_c[%zi]=%.9lf\n", i, host_c[i]);
break;
}
Thanks
Your test function doesn't look good, it will be met for any value < 1, it should be like this:
for (size_t i=0; i<arraySize; ++i){
cl_double val = host_c[i] - 1; // the array is supposed to be 1 everywhere
if((val > 1e-6) || (val < -1e-6))
{
printf("*** ERROR! Invalid results ! host_c[%zi]=%.9lf\n", i, host_c[i]);
break;
}
}
Non initialized values in the GPU are likely to be 0, therefore meeting your condition.
Additionally, remember that if you run the program once with the full size, consecutive reads will still hold the proper processed data (even if you close and open the app again). Since the GPU memory is not cleaned after the buffer is created/destroyed.

Resources