I'm Trying to convert a code written in Cuda to openCL and run into some trouble. My final goal is to implement the code on an Odroid XU3 board with a Mali T628 GPU.
In order to simplify the transition and save time trying to debug openCL kernels I've done the following steps:
Implement the code in Cuda and test it on a Nvidia GeForce 760
Implement the code in openCL and test it on a Nvidia GeForce 760
test the openCL code on an Odroid XU3 board with a Mali T628 GPU.
I know that different architectures may have different optimizations but that isn't my main concern for now. I manged to run the openCL code on my Nvidia GPU with no apparent issues but keep getting strange errors when trying to run the code on the Odroid board. I know that different architectures have different handling of exceptions etc. but I'm not sure how to solve those.
Since the openCL code works on my Nvidia I assume that I managed to do the correct transition between thread/blocks -> workItems/workGroups etc.
I already fixed several issues that relate to the cl_device_max_work_group_size issue so that can't be the cuase.
When running the code i'm getting a "CL_OUT_OF_RESOURCES" error. I've narrowed the cause of the error to 2 lines in the code but not sure to fix those issues.
the error is caused by the following lines:
lowestDist[pixelNum] = partialDiffSumTemp; both variables are private variables of the kernel and therefor I don't see any potential issue.
d_disparityLeft[globalMemIdx + TILE_BOUNDARY_WIDTH - WINDOW_RADIUS + 0] = bestDisparity[0];
Here I guess the cause is "OUT_OF_BOUND" but not sure how to debug it since the original code doesn't have any issue.
My Kernel code is is:
#define ALIGN_IMAGE_WIDTH 64
#define NUM_PIXEL_PER_THREAD 4
#define MIN_DISPARITY 0
#define MAX_DISPARITY 55
#define WINDOW_SIZE 19
#define WINDOW_RADIUS (WINDOW_SIZE / 2)
#define TILE_SHARED_MEM_WIDTH 96
#define TILE_SHARED_MEM_HEIGHT 32
#define TILE_BOUNDARY_WIDTH 64
#define TILE_BOUNDARY_HEIGHT (2 * WINDOW_RADIUS)
#define BLOCK_WIDTH (TILE_SHARED_MEM_WIDTH - TILE_BOUNDARY_WIDTH)
#define BLOCK_HEIGHT (TILE_SHARED_MEM_HEIGHT - TILE_BOUNDARY_HEIGHT)
#define THREAD_NUM_WIDTH 8
#define THREADS_NUM_HEIGHT TILE_SHARED_MEM_HEIGHT
//TODO fix input arguments
__kernel void hello_kernel( __global unsigned char* d_leftImage,
__global unsigned char* d_rightImage,
__global float* d_disparityLeft) {
int blockX = get_group_id(0);
int blockY = get_group_id(1);
int threadX = get_local_id(0);
int threadY = get_local_id(1);
__local unsigned char leftImage [TILE_SHARED_MEM_WIDTH * TILE_SHARED_MEM_HEIGHT];
__local unsigned char rightImage [TILE_SHARED_MEM_WIDTH * TILE_SHARED_MEM_HEIGHT];
__local unsigned int partialDiffSum [BLOCK_WIDTH * TILE_SHARED_MEM_HEIGHT];
int alignedImageWidth = 640;
int partialDiffSumTemp;
float bestDisparity[4] = {0,0,0,0};
int lowestDist[4];
lowestDist[0] = 214748364;
lowestDist[1] = 214748364;
lowestDist[2] = 214748364;
lowestDist[3] = 214748364;
// Read image blocks into shared memory. read is done at 32bit integers on a uchar array. each thread reads 3 integers(12byte) 96/12=8threads
int sharedMemIdx = threadY * TILE_SHARED_MEM_WIDTH + 4 * threadX;
int globalMemIdx = (blockY * BLOCK_HEIGHT + threadY) * alignedImageWidth + blockX * BLOCK_WIDTH + 4 * threadX;
for (int i = 0; i < 4; i++) {
leftImage [sharedMemIdx + i ] = d_leftImage [globalMemIdx + i];
leftImage [sharedMemIdx + 4 * THREAD_NUM_WIDTH + i ] = d_leftImage [globalMemIdx + 4 * THREAD_NUM_WIDTH + i];
leftImage [sharedMemIdx + 8 * THREAD_NUM_WIDTH + i ] = d_leftImage [globalMemIdx + 8 * THREAD_NUM_WIDTH + i];
rightImage[sharedMemIdx + i ] = d_rightImage[globalMemIdx + i];
rightImage[sharedMemIdx + 4 * THREAD_NUM_WIDTH + i ] = d_rightImage[globalMemIdx + 4 * THREAD_NUM_WIDTH + i];
rightImage[sharedMemIdx + 8 * THREAD_NUM_WIDTH + i ] = d_rightImage[globalMemIdx + 8 * THREAD_NUM_WIDTH + i];
}
barrier(CLK_LOCAL_MEM_FENCE);
int imageIdx = sharedMemIdx + TILE_BOUNDARY_WIDTH - WINDOW_RADIUS;
int partialSumIdx = threadY * BLOCK_WIDTH + 4 * threadX;
for(int dispLevel = MIN_DISPARITY; dispLevel <= MAX_DISPARITY; dispLevel++) {
// horizontal partial sum
partialDiffSumTemp = 0;
#pragma unroll
for(int i = imageIdx - WINDOW_RADIUS; i <= imageIdx + WINDOW_RADIUS; i++) {
//partialDiffSumTemp += calcDiff(leftImage [i], rightImage[i - dispLevel]);
partialDiffSumTemp += abs(leftImage[i] - rightImage[i - dispLevel]);
}
partialDiffSum[partialSumIdx] = partialDiffSumTemp;
barrier(CLK_LOCAL_MEM_FENCE);
for (int pixelNum = 1, i = imageIdx - WINDOW_RADIUS; pixelNum < NUM_PIXEL_PER_THREAD; pixelNum++, i++) {
partialDiffSum[partialSumIdx + pixelNum] = partialDiffSum[partialSumIdx + pixelNum - 1] +
abs(leftImage[i + WINDOW_SIZE] - rightImage[i - dispLevel + WINDOW_SIZE]) -
abs(leftImage[i] - rightImage[i - dispLevel]);
}
barrier(CLK_LOCAL_MEM_FENCE);
// vertical sum
if(threadY >= WINDOW_RADIUS && threadY < TILE_SHARED_MEM_HEIGHT - WINDOW_RADIUS) {
for (int pixelNum = 0; pixelNum < NUM_PIXEL_PER_THREAD; pixelNum++) {
int rowIdx = partialSumIdx - WINDOW_RADIUS * BLOCK_WIDTH;
partialDiffSumTemp = 0;
for(int i = -WINDOW_RADIUS; i <= WINDOW_RADIUS; i++,rowIdx += BLOCK_WIDTH) {
partialDiffSumTemp += partialDiffSum[rowIdx + pixelNum];
}
if (partialDiffSumTemp < lowestDist[pixelNum]) {
lowestDist[pixelNum] = partialDiffSumTemp;
bestDisparity[pixelNum] = dispLevel - 1;
}
}
}
}
if (threadY >= WINDOW_RADIUS && threadY < TILE_SHARED_MEM_HEIGHT - WINDOW_RADIUS && blockY < 32) {
d_disparityLeft[globalMemIdx + TILE_BOUNDARY_WIDTH - WINDOW_RADIUS + 0] = bestDisparity[0];
d_disparityLeft[globalMemIdx + TILE_BOUNDARY_WIDTH - WINDOW_RADIUS + 1] = bestDisparity[1];
d_disparityLeft[globalMemIdx + TILE_BOUNDARY_WIDTH - WINDOW_RADIUS + 2] = bestDisparity[2];
d_disparityLeft[globalMemIdx + TILE_BOUNDARY_WIDTH - WINDOW_RADIUS + 3] = bestDisparity[3];
}
}
Thanks for all the help
Yuval
From my experience NVidia GPUs not always crash on out of bound access and many times kernel still returns expected results.
Use printf to check the indexes. If you have Nvidia OpenCL 1.2 driver installed printf should be available as a core function. As far as I checked Mali-T628 uses OpenCL 1.1 then check if printf is available as a vendor extension. Also you can run your kernel on AMD/Intel CPU where printf is available (OpenCL 1.2 / 2.0).
Alternative way of checking indexes can be passing __global int* debug array where you would store indexes and then check them on the host. Make sure to allocate it big enough so that out of bound index will be recorded.
Related
Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 1 year ago.
Improve this question
I am working to implement CUDA for the following code. The first version has been written serially and the second version is written with CUDA. I am sure about its results in serial version. I expect that the second version that I have added CUDA functionality also give me the same result, but it seems that kernel function does not do any thing and it gives me the initial value of u and v. I know due to lack of my experience, the bug may be obvious, but I cannot figure it out. Also, please do not recommend using flatten array, because it is harder for me to understand the indexing in code.
First version:
#include <fstream>
#include <iostream>
#include <math.h>
#include <vector>
#include <chrono>
#include <omp.h>
using namespace std;
const int M = 1024;
const int N = 1024;
const double A = 1;
const double B = 3;
const double Du = 5 * pow(10, -5);
const double Dv = 5 * pow(10, -6);
const int Max_Itr = 1000;
const double h = 1.0 / static_cast<double>(M - 1);
const double delta_t = 0.0025;
const double s1 = (Du * delta_t) / pow(h, 2);
const double s2 = (Dv * delta_t) / pow(h, 2);
int main() {
double** u=new double* [M];
double** v=new double* [M];
for (int i=0; i<M; i++){
u[i]=new double [N];
v[i]=new double [N];
}
for (int j = 0; j < M; j++) {
for (int i = 0; i < N;i++) {
u[i][j]=0.02;
v[i][j]=0.02;
}
}
for (int k = 1; k < Max_Itr; k++) {
for (int i = 1; i < N - 1; i++) {
for (int j = 1; j < M - 1; j++) {
u[i][j] = ((1 - (4 * s1)) * u[i][j]) + (s1 * (u[i + 1][j] + u[i - 1][j] + u[i][j + 1] + u[i][j - 1])) +
(A * delta_t) + (delta_t * pow(u[i][j], 2) * v[i][j]) - (delta_t * (B + 1) * u[i][j]);
v[i][j] = ((1 - (4 * s2)) * v[i][j]) + (s2 * (v[i + 1][j] + v[i - 1][j] + v[i][j + 1] + v[i][j - 1])) + (B * delta_t * u[i][j])
- (delta_t * pow(u[i][j], 2) * v[i][j]);
}
}
}
cout<<"u: "<<u[512][512]<<" v: "<<v[512][512]<<endl;
return 0;
}
Second version:
#include <fstream>
#include <iostream>
#include <math.h>
#include <vector>
using namespace std;
#define M 1024
#define N 1024
__global__ void my_kernel(double** v, double** u){
int i= blockIdx.y * blockDim.y + threadIdx.y;
int j= blockIdx.x * blockDim.x + threadIdx.x;
double A = 1;
double B = 3;
int Max_Itr = 1000;
double delta_t = 0.0025;
double Du = 5 * powf(10, -5);
double Dv = 5 * powf(10, -6);
double h = 1.0 / (M - 1);
double s1 = (Du * delta_t) / powf(h, 2);
double s2 = (Dv * delta_t) / powf(h, 2);
for (int k = 1; k < Max_Itr; k++) {
u[i][j] = ((1 - (4 * s1))
* u[i][j]) + (s1 * (u[i + 1][j] + u[i - 1][j] + u[i][j + 1] + u[i][j - 1])) +
(A * delta_t) + (delta_t * pow(u[i][j], 2) * v[i][j]) - (delta_t * (B + 1) * u[i][j]);
v[i][j] = ((1 - (4 * s2))
* v[i][j]) + (s2 * (v[i + 1][j] + v[i - 1][j] + v[i][j + 1] + v[i][j - 1])) + (B * delta_t * u[i][j])
- (delta_t * pow(u[i][j], 2) * v[i][j]);
__syncthreads();
}
}
int main() {
double** u=new double* [M];
double** v=new double* [M];
for (int i=0; i<M; i++){
u[i]=new double [N];
v[i]=new double [N];
}
dim3 blocks(32,32);
dim3 grids(M/32 +1, N/32 + 1);
for (int j = 0; j < M; j++) {
for (int i = 0; i < N;i++) {
u[i][j]=0.02;
v[i][j]=0.02;
}
}
double **u_d, **v_d;
int d_size = N * M * sizeof(double);
cudaMalloc(&u_d, d_size);
cudaMalloc(&v_d, d_size);
cudaMemcpy(u_d, u, d_size, cudaMemcpyHostToDevice);
cudaMemcpy(v_d, v, d_size, cudaMemcpyHostToDevice);
my_kernel<<<grids, blocks>>> (v_d,u_d);
cudaDeviceSynchronize();
cudaMemcpy(v, v_d, d_size, cudaMemcpyDeviceToHost);
cudaMemcpy(u, u_d, d_size, cudaMemcpyDeviceToHost);
cout<<"u: "<<u[512][512]<<" v: "<<v[512][512]<<endl;
return 0;
}
What I expect from the second version is :
u: 0.2815 v: 1.7581
Your two-dimensional array - in the first version of the program - is implemented using an array of pointers, each of which to a separately-allocated array of double values.
In your second version, you are using the same pointer-to-pointer-to-double type, but - you're not allocating any space for the actual data, just for the array of pointers (and not copying any of the data to the GPU - just the pointers; which are useless to copy anyway, since they're pointers to host-side memory.)
What is most likely happening is that your kernel attempts to access memory at an invalid address, and its execution is aborted.
If you were to properly check for errors, as #njuffa noted, you would know that is what happened.
Now, you could avoid having to make multiple memory allocations if you were to use a single data area instead of separate allocations for each second-dimension 1D array; and that is true both for the first and the second version of your program. That would not quite be array flattening. See an explanation of how to do this (C-language-style) on this page.
Note, however, that double-dereferencing, which you insist on performing in your kernel, is likely slowing it down significantly.
I have several similar kernels to generate random data and store it in global memory. I'm always using the same algorithm to randomize, but due to variable scope issues (I need to keep track of data) I fail to avoid severe code duplications.
Are there any ways to avoid this? Generating random data in OpenCL seems a fairly standard task, but it goes against any good coding standards to have this level of code duplication. For example, here are two of my kernels:
////////////////////////////////////////////////////////////////////////////////
// OpenCL Kernel for Mersenne Twister RNG -- applied to AWGN channel
////////////////////////////////////////////////////////////////////////////////
__kernel void MersenneTwisterAWGN(__global double* d_Rand,
__global int* seeds,
__global long* inputcw,
int nPerRng, float sigma)
{
int globalID = get_global_id(0);
double c = 2.0/(sigma*sigma);
int iState, iState1, iStateM, iOut;
unsigned int mti, mti1, mtiM, x;
unsigned int mt[MT_NN];
//Initialize current state
mt[0] = seeds[globalID];
for (iState = 1; iState < MT_NN; iState++)
mt[iState] = (1812433253U*(mt[iState-1]^(mt[iState-1]>>30))+iState) & MT_WMASK;
iState = 0;
mti1 = mt[0];
for (iOut = 0; iOut < nPerRng; iOut=iOut+2) {
iState1 = iState + 1;
iStateM = iState + MT_MM;
if(iState1 >= MT_NN) iState1 -= MT_NN;
if(iStateM >= MT_NN) iStateM -= MT_NN;
mti = mti1;
mti1 = mt[iState1];
mtiM = mt[iStateM];
// MT recurrence
x = (mti & MT_UMASK) | (mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? matrix_a : 0);
mt[iState] = x;
iState = iState1;
//Tempering transformation
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & mask_b;
x ^= (x << MT_SHIFTC) & mask_c;
x ^= (x >> MT_SHIFT1);
double u1 = ((double)x + 1.0f) / 4294967296.0f;
iState1 = iState + 1;
iStateM = iState + MT_MM;
if(iState1 >= MT_NN) iState1 -= MT_NN;
if(iStateM >= MT_NN) iStateM -= MT_NN;
mti = mti1;
mti1 = mt[iState1];
mtiM = mt[iStateM];
// MT recurrence
x = (mti & MT_UMASK) | (mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? matrix_a : 0);
mt[iState] = x;
iState = iState1;
//Tempering transformation
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & mask_b;
x ^= (x << MT_SHIFTC) & mask_c;
x ^= (x >> MT_SHIFT1);
double u2 = ((double)x + 1.0f) / 4294967296.0f;
double r = sqrt(-2.0f * log(u1));
double phi = 2 * PI * u2;
u1 = r * cos(phi);
u1 = inputcw[iOut]+sigma*u1;
u1=1/(1+exp(-c*u1));
d_Rand[globalID * nPerRng + iOut]=log((1-u1)/u1);
if (iOut!=nPerRng-1) {
u2 = r * sin(phi);
u2 = inputcw[iOut+1]+sigma*u2;
u2=1/(1+exp(-c*u2));
u2=log((1-u2)/u2);
d_Rand[globalID * nPerRng + iOut+1]=u2;
}
}
}
and
////////////////////////////////////////////////////////////////////////////////
// OpenCL Kernel for Mersenne Twister RNG -- applied to BSC channel
////////////////////////////////////////////////////////////////////////////////
__kernel void MersenneTwisterBSC(__global double* d_Rand,
__global int* seeds,
__global long* inputcw,
int nPerRng, float flipProb)
{
int globalID = get_global_id(0);
int iState, iState1, iStateM, iOut;
unsigned int mti, mti1, mtiM, x;
unsigned int mt[MT_NN];
//Initialize current state
mt[0] = seeds[globalID];
for (iState = 1; iState < MT_NN; iState++)
mt[iState] = (1812433253U*(mt[iState-1]^(mt[iState-1]>>30))+iState) & MT_WMASK;
iState = 0;
mti1 = mt[0];
for (iOut = 0; iOut < nPerRng; iOut=iOut+1) {
iState1 = iState + 1;
iStateM = iState + MT_MM;
if(iState1 >= MT_NN) iState1 -= MT_NN;
if(iStateM >= MT_NN) iStateM -= MT_NN;
mti = mti1;
mti1 = mt[iState1];
mtiM = mt[iStateM];
// MT recurrence
x = (mti & MT_UMASK) | (mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? matrix_a : 0);
mt[iState] = x;
iState = iState1;
//Tempering transformation
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & mask_b;
x ^= (x << MT_SHIFTC) & mask_c;
x ^= (x >> MT_SHIFT1);
double c = log((1-flipProb)/flipProb);
double u = ((double)x + 1.0f) / 4294967296.0f;
u = (2*isless(u,flipProb)-1)*inputcw[iOut]*c;
d_Rand[globalID * nPerRng + iOut]=u;
}
}
Are there any ways, tricks or methods to avoid this? Subroutines seem unable to make proper use of the variables (especially mt), so I didn't manage to cut it down in the way other languages would allow to.
Or should I just accept this as a necessary evil in OpenCL and keep managing 10 different kernels this way?
At Khronos' site, it says
OpenCL programs may also contain auxiliary functions and constant data that can be used by __kernel functions.
An example to generate random number between 0.0f and 1.0f per thread:
Core function to iterate a seed:
uint wang_hash(uint seed)
{
seed = (seed ^ 61) ^ (seed >> 16);
seed *= 9;
seed = seed ^ (seed >> 4);
seed *= 0x27d4eb2d;
seed = seed ^ (seed >> 15);
return seed;
}
Initialization and iteration of each threads seed:
// id=thread id, rnd=seed array
void wang_rnd_init(__global unsigned int * rnd,int id)
{
uint maxint=0;
maxint--; // could be a 0xFFFFFFFF
uint rndint=wang_hash(id);
rnd[id]=rndint;
}
// id=thread id, rnd=seed array
float wang_rnd(__global unsigned int * rnd,int id)
{
uint maxint=0;
maxint--; // could be a 0xFFFFFFFF
uint rndint=wang_hash(rnd[id]);
rnd[id]=rndint;
return ((float)rndint)/(float)maxint;
}
Usage in a random grayscale color pixel generator kernel:
__kernel void rnd_1(__global unsigned int * rnd, __global int *rgba)
{
int id=get_global_id(0);
float rgba_register=wang_rnd(rnd,id);
rgba[id] = ((int)(rgba_register * 255) << 24) | ((int)(rgba_register * 255) << 16) | ((int)(rgba_register * 255) << 8) | ((int)(rgba_register * 255));
}
and wang_rnd() can be used in other kernels without defining it twice if they are in same compiled context, same as putting all relevant kernels and functions in the same file to be compiled.
Auxilliary functions are not limited to registers and global memory. They can take local and constant memory parameters too. Since they are working with device side memory mainly, they can take and return structs too.
I am very new to OpenCL and am going through the Altera OpenCL examples.
In their matrix multiplication example, they have used the concept of blocks, where dimensions of the input matrices are multiple of block size. Here's the code:
void matrixMult( // Input and output matrices
__global float *restrict C,
__global float *A,
__global float *B,
// Widths of matrices.
int A_width, int B_width)
{
// Local storage for a block of input matrices A and B
__local float A_local[BLOCK_SIZE][BLOCK_SIZE];
__local float B_local[BLOCK_SIZE][BLOCK_SIZE];
// Block index
int block_x = get_group_id(0);
int block_y = get_group_id(1);
// Local ID index (offset within a block)
int local_x = get_local_id(0);
int local_y = get_local_id(1);
// Compute loop bounds
int a_start = A_width * BLOCK_SIZE * block_y;
int a_end = a_start + A_width - 1;
int b_start = BLOCK_SIZE * block_x;
float running_sum = 0.0f;
for (int a = a_start, b = b_start; a <= a_end; a += BLOCK_SIZE, b += (BLOCK_SIZE * B_width))
{
A_local[local_y][local_x] = A[a + A_width * local_y + local_x];
B_local[local_x][local_y] = B[b + B_width * local_y + local_x];
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
running_sum += A_local[local_y][k] * B_local[local_x][k];
}
}
// Store result in matrix C
C[get_global_id(1) * get_global_size(0) + get_global_id(0)] = running_sum;
}
Assume block size is 2, then: block_x and block_y are both 0; and local_x and local_y are both 0.
Then A_local[0][0] would be A[0] and B_local[0][0] would be B[0].
Sizes of A_local and B_local are 4 elements each.
In that case, how would A_local and B_local access other elements of the block in that iteration?
Also would separate threads/cores be assigned for each local_x and local_y?
There is definitely a barrier missing in your code sample. The outer for loop as you have it will only produce correct results if all work items are executing instructions in lockstep fashion, thus guaranteeing the local memory is populated before the for k loop.
Maybe this is the case for Altera and other FPGAs, but this is not correct for CPUs and GPUs.
You should add barrier(CLK_LOCAL_MEM_FENCE); if you are getting unexpected results, or want to be compatible with other type of hardware.
float running_sum = 0.0f;
for (int a = a_start, b = b_start; a <= a_end; a += BLOCK_SIZE, b += (BLOCK_SIZE * B_width))
{
A_local[local_y][local_x] = A[a + A_width * local_y + local_x];
B_local[local_x][local_y] = B[b + B_width * local_y + local_x];
barrier(CLK_LOCAL_MEM_FENCE);
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
running_sum += A_local[local_y][k] * B_local[local_x][k];
}
}
A_local and B_local are both shared by all work items of the work group, so all their elements are loaded in parallel (by all work items of the work group) at each step of the encompassing for loop.
Then each work item uses some of the loaded values (not necessarily the values the work item loaded itself) to do its share of the computation.
And finally, the work item stores its individual result into the global output matrix.
It is a classical tiled implementation of a matrix-matrix multiplication. However, I'm really surprised not to see any sort of call to a memory synchronisation function, such as work_group_barrier(CLK_LOCAL_MEM_FENCE) between the load of A_local and B_local and their use in the k loop... But I might very well have overlooked something here.
I'm trying to implement Hough transform for circles in OpenCL, but i've encountered really weird problem. Every time i run the Hough kernel, i end up with slightly different accumulator, even though parameters are the same and accumulator is always a freshly zero'ed table (ex. http://imgur.com/a/VcIw1). My kernel code is as below:
#define BLOCK_LEN 256
__kernel void HoughCirclesKernel(
__global int* A,
__global int* imgData,
__global int* _width,
__global int* _height,
__global int* r
)
{
__local int imgBuff[BLOCK_LEN];
int localThreadIndex = get_local_id(0); //threadIdx.x
int globalThreadIndex = get_local_id(0) + get_group_id(0) * BLOCK_LEN; //threadIdx.x + blockIdx.x * Block_Len
int width = *_width; int height = *_height;
int radius = *r;
A[globalThreadIndex] = 0;
barrier(CLK_GLOBAL_MEM_FENCE);
if(globalThreadIndex < width*height)
{
imgBuff[localThreadIndex] = imgData[globalThreadIndex];
barrier(CLK_LOCAL_MEM_FENCE);
if(imgBuff[localThreadIndex] > 0)
{
float s1, c1;
for(int i = 0; i<180; i++)
{
s1 = sincos(i, &c1);
int centerX = globalThreadIndex % width + radius * c1;
int centerY = ((globalThreadIndex - centerX) / height) + radius * s1;
if(centerX < width && centerY < height)
atomic_inc(A + centerX + centerY * width);
}
}
}
barrier(CLK_GLOBAL_MEM_FENCE);
}
Could this be the fault of how I am incrementing the accumulator?
if(globalThreadIndex < width*height)
{
imgBuff[localThreadIndex] = imgData[globalThreadIndex];
barrier(CLK_LOCAL_MEM_FENCE);
...
}
this is undefined behaviour since there is a barrier inside a branch.
All streaming units in a compute unit must enter same memory fence.
Try this:
if(globalThreadIndex < width*height)
{
imgBuff[localThreadIndex] = imgData[globalThreadIndex];
...
}
barrier(CLK_LOCAL_MEM_FENCE);
Alse there could be another issue if you are using multiple devices:
get_local_id(0) + get_group_id(0)
here get_group_id(0) is getting group id per device and it starts from 0 for all devices just as get_global_id starts zero too; so you should add proper offsets in the "ndrange" instruction when using multiple devices. Even though different devices can support same floatig point accuracy requirements, one of them may give better accuracy than other and can give slightly different results. If it is single device, then you should try lowering gpu frequencies as it may have defects or side effects of an overclock.
I have managed to solve my problem by finding and correcting three issues.
First of all the kernel code, the line:
int centerY = ((globalThreadIndex - centerX) / height) + radius * s1;
should be:
int centerY = (globalThreadIndex / width) + radius * s1;
The main change here was dividing by width, not height. This caused inaccuracy problems.
if(centerX < width && centerY < height)
The above condition was changed to:
if(x < width && x >= 0)
if(y < height && y >=0)
As for the accumulator problem, first I will post the code I used to create clBuffer (I am using OpenCL.net library for C#):
int[] a = new int[width*height]; //image size
ErrorCode error;
Mem cl_accumulator = (Mem)Cl.CreateBuffer(cl_context, MemFlags.ReadWrite, (IntPtr)(a.Length * sizeof(int)), out error);
CheckErr(error, "Cl.CreateBuffer");
The fix here was simple and pretty much self-explainatory:
int[] a = Enumerable.Repeat(0, width * height).ToArray();
ErrorCode error;
GCHandle accHandle = GCHandle.Alloc(a, GCHandleType.Pinned);
IntPtr accPtr = accHandle.AddrOfPinnedObject();
Mem cl_accumulator = (Mem)Cl.CreateBuffer(cl_context, MemFlags.ReadWrite | MemFlags.CopyHostPtr, (IntPtr)(a.Length * sizeof(int)), accPtr, out error);
CheckErr(error, "Cl.CreateBuffer");
I filled the accumulator table with zeros and then copied it to device buffer each time I executed the kernel.
The above errors caused the accumulator to look different and bit malformed each time I executed the kernel.
I am running the following kernel on windows 7, 64 bit, with Intel CPU and HD graphics.
I get very strange error reporting by clGetProgramBuildInfo for the following code:
#define BLOCK_SIZE 256
__kernel void reduce4(__global uint* input, __global uint* output, __local uint* sdata)
{
unsigned int tid = get_local_id(0);
unsigned int bid = get_group_id(0);
unsigned int gid = get_global_id(0);
unsigned int blockSize = get_local_size(0);
unsigned int index = bid*(BLOCK_SIZE*2) + tid;
sdata[tid] = input[index] + input[index+BLOCK_SIZE];
barrier(CLK_LOCAL_MEM_FENCE);
for(unsigned int s = BLOCK_SIZE/2; s > 64 ; s >>= 1) {
// Unrolling the last wavefront and we cut 7 iterations of this
// for-loop while we practice wavefront-programming
if(tid < s)
{
sdata[tid] += sdata[tid + s];
}
barrier(CLK_LOCAL_MEM_FENCE);
}
if (tid < 64) {
if (blockSize >= 128) sdata[tid] += sdata[tid + 64];
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
// write result for this block to global mem
if(tid == 0)
{
output[bid] = sdata[0];
}
}
It always says:
Compilation started
:38:2: error: expected identifier or '('
Compilation failed
this is for the last line, where I have put }. What is wrong here?
Update:
This is how I am reading the kernel file:
int offset = 0;
for(int i = 0; i < numOfDevices; ++i, ++offset ) {
/* Load the two source files into temporary datastores */
const char *file_names[] = {"SimpleOptimizations.cl"};
const int NUMBER_OF_FILES = 1;
char* buffer[NUMBER_OF_FILES];
size_t sizes[NUMBER_OF_FILES];
loadProgramSource(file_names, NUMBER_OF_FILES, buffer, sizes);
/* Create the OpenCL program object */
program = clCreateProgramWithSource(context, NUMBER_OF_FILES, (const char**)buffer, sizes, &error);
if(error != CL_SUCCESS) {
perror("Can't create the OpenCL program object");
exit(1);
}
Definition of loadProgramSource
void loadProgramSource(const char** files,
size_t length,
char** buffer,
size_t* sizes) {
/* Read each source file (*.cl) and store the contents into a temporary datastore */
for(size_t i=0; i < length; i++) {
FILE* file = fopen(files[i], "r");
if(file == NULL) {
perror("Couldn't read the program file");
exit(1);
}
fseek(file, 0, SEEK_END);
sizes[i] = ftell(file);
rewind(file); // reset the file pointer so that 'fread' reads from the front
buffer[i] = (char*)malloc(sizes[i]+1);
buffer[i][sizes[i]] = '\0';
fread(buffer[i], sizeof(char), sizes[i], file);
fclose(file);
}
}
I believe this is an issue with the way the Windows deals with text files opened with fopen(). If you take a look at the MSDN page for fopen(), it indicates that if you open a file with just "r" as the mode string, some translations will happen with regards to line-endings. This means that the size of the file you query may not match the amount of data read by fread().
To solve this, simply change the mode string to indicate that you wish to read the file as binary data (i.e. without any pesky translations):
FILE* file = fopen(files[i], "rb");