OpenCl clEnqueueMapBuffer doesn't work properly? - opencl

As far as I know you can use clEnqueueMapBuffer for accessing memory objects. Instead of using the read/write operations you can map a memory object on a device to a memory region on the host.
I wrote a very simple code to test it. this code sends charter 'X' to GPU and the kernel adds 1 to it so we should get 'Y' but I don't.
it seems that clEnqueueUnmapMemObject doesn't copy the result that is stored in the GPU memory to the buffer on the host!
this is my code:
#include <iostream>
#include <CL\cl.h>
using namespace std;
#pragma warning(disable : 4996)
#define PROGRAM "__kernel void hello(__global char* string )\
{\
string[0] = string[0] + 1;\
}"
int main() {
cl_platform_id platform; cl_device_id device; cl_context context;
cl_program program; cl_int error; cl_build_status status;
char *programBuffer = PROGRAM;
// make contex
clGetPlatformIDs(1, &platform, NULL);
clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 1, &device, NULL);
context = clCreateContext(NULL, 1, &device, NULL, NULL, NULL);
//built program
program = clCreateProgramWithSource(context, 1, (const char**)&programBuffer, nullptr, NULL);
const char options[] = "-cl-std=CL1.1 -cl-mad-enable -Werror";
error = clBuildProgram(program, 1, &device, options, NULL, NULL);
// create kernel
cl_command_queue command_queue;
command_queue = clCreateCommandQueue(context, device, NULL, nullptr);
cl_kernel kernels, found_kernel;
cl_uint num_kernels;
error = clCreateKernelsInProgram(program, 0, nullptr, &num_kernels);
kernels = clCreateKernel(program, "hello", nullptr);
//make buffers
cl_mem memobj = clCreateBuffer(context, CL_MEM_ALLOC_HOST_PTR| CL_MEM_READ_WRITE, 2 * sizeof(char), nullptr, &error);//if nulptr nazarim then itt will retun null pointer
error = clSetKernelArg(kernels, 0, sizeof(cl_mem), (void *)&memobj);
// I am goign to send this data to GPU
char *CPU_2_GPU_Data = new char[2]{ "X" };
void* mapbuffer =clEnqueueMapBuffer(command_queue, memobj, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, 2 * sizeof(char), 0, nullptr, nullptr, &error);
memccpy(mapbuffer, CPU_2_GPU_Data, 0, 2 * sizeof(char));
cout<<"I am sending this dat to GPU:"<<(char*)(mapbuffer)<<endl;
error = clEnqueueTask(command_queue, kernels, 0, nullptr, nullptr);
clEnqueueUnmapMemObject(command_queue, memobj, mapbuffer, 1, nullptr, nullptr);
cout << "I am getiing this data from GPU:" << (char*)(mapbuffer) << endl;
clReleaseContext(context);
return 0;
}
actually I can send data to the GPU using Mapping memory objects but I cant read the result. to make the code work I have to explicitly ask the GPU to send me the data as:
char* newbuffer = new char[2];
clEnqueueReadBuffer(command_queue, memobj, CL_TRUE, 0, 2 * sizeof(char), newbuffer, 0, nullptr, nullptr);
cout << "the result is :" << newbuffer << endl;
why that happens?? why I can send data to GPU using Mapping memory objects but I cant get the result back?

The intent is that:
1) You map to read it on the host.
2) You then unmap it so the GPU can use it again.
3) You then map it again to read it from the host.
4) Then unmap it to clean up.
You seem to be mapping, launching a task and then unmapping. So at the point where you try to read the data the host actually can't read it any more because you just unmapped it!

Related

OpenCL: Basic example not working. clSetKernelArg -38 Error

I am attempting a very simple OpenCL example. I have developed the following code below. It compiles a simple kernel, and then I create a simple float* buffer and set it to a cl::Buffer. However, when I attempt to call the kernel.setArg() function, it crashes, with an error -38. This error states that my cl::Buffer is invalid. I have no idea why this is happening:
#define CL_HPP_ENABLE_EXCEPTIONS
#define CL_HPP_TARGET_OPENCL_VERSION 200
#include <CL/cl2.hpp>
#define MULTI_LINE_STRING(ARG) #ARG
namespace op
{
const char *resizeAndMergeKernel = MULTI_LINE_STRING(
__kernel void testKernel(__global float* image)
{
}
);
}
void testCL(){
cl::Device device;
cl::Context context;
cl::CommandQueue queue;
int deviceId = 0;
// Load Device
std::vector<cl::Platform> platforms;
std::vector<cl::Device> devices;
std::string deviceName;
cl_uint i, type;
cl::Platform::get(&platforms);
type = platforms[0].getDevices(CL_DEVICE_TYPE_GPU, &devices);
if( type == CL_SUCCESS)
{
// Get only relavent device
cl::Context allContext(devices);
std::vector<cl::Device> gpuDevices;
gpuDevices = allContext.getInfo<CL_CONTEXT_DEVICES>();
bool deviceFound = false;
for(int i=0; i<gpuDevices.size(); i++){
if(i == deviceId){
device = gpuDevices[i];
context = cl::Context(device);
queue = cl::CommandQueue(context, device, CL_QUEUE_PROFILING_ENABLE);
deviceFound = true;
cout << "Made new GPU Instance: " << deviceId << endl;
break;
}
}
if(!deviceFound)
{
throw std::runtime_error("Error: Invalid GPU ID");
}
}
// Create Kernel
cl::Program program = cl::Program(context, op::resizeAndMergeKernel, true);
cl::Kernel kernel = cl::Kernel(program, "testKernel");
// Simple Buffer
cl_int err;
float* test = new float[3*224*224];
cl::Buffer x = cl::Buffer(context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR, sizeof(float) * 3 * 224 * 224, (void*)test, &err);
cout << err << endl;
kernel.setArg(0,x); // CRASHES WITH cl::Error -38
}
As you can see the last line kernel.setArg(0,x) crashes with error -38.
It's not a "crash", it's an error code. OpenCL error -38 is CL_INVALID_MEM_OBJECT. It means the cl_mem_obj is not valid. It is because you are passing a cl::Buffer object to setArg, but you need to instead pass the cl_mem handle which represents that buffer. The cl::Buffer operator() method returns that. So use kernel.setArg(0,x()). Note the () are the added part (yes, it's subtle).

openCL trouble saving compiled binaries for CPU and GPU simultaneously

So I'm writing an openCL program that runs on both CPU + GPU and am currently trying to save/cache the binaries after creating my program with clCreateProgramWithSource(). I create my clContext and clProgram with CL_DEVICE_TYPE_ALL and build the source with those specifications.
I then take the binaries and store them to disk (with one binary file per device) so that on subsequent starts my program automatically calls clBuildProgramWithBinary.
The problem is that if I save the binaries to disk that were created with the setting CL_DEVICE_TYPE_ALL, the binary for the CPU gets corrupted and clBuildProgramWithBinary throws an error.
In order to get all the binary files saved to disk properly, I've had to edit my code to first run using CL_DEVICE_TYPE_CPU and save the CPU binary on its own, then edit my code again to run using CL_DEVICE_TYPE_GPU, save the gpu binaries and then finally switch it back to CL_DEVICE_TYPE_ALL. If I do this, clBuildProgramWithBinary is able to accurately build the binary for each device type and execute my program.
So is this just a quirk of openCL that I can't build binaries for GPUs and CPUs together? Or am I just doing this incorrectly?
I'm basing my code on the implementation of binary saving found here: https://code.google.com/p/opencl-book-samples/source/browse/trunk/src/Chapter_6/HelloBinaryWorld/HelloBinaryWorld.cpp?r=42 with modifications in place to handle multiple devices.
Here are some portions of my code below:
/*----Initial setup of platform, context and devices---*/
cl_int err, deviceCount;
cl_device_id *devices;
cl_platform_id platform;
cl_context context;
cl_program program;
err = clGetPlatformIDs(1, &platform, NULL);
err = clGetDeviceIDs(platform, CL_DEVICE_TYPE_ALL, 0, NULL, &deviceCount);
devices = new cl_device_id[deviceCount];
err = clGetDeviceIDs(platform, CL_DEVICE_TYPE_ALL, deviceCount, devices, NULL);
context = clCreateContext(NULL, deviceCount, devices, NULL, NULL, &err);
/*---Build Program---*/
int numFiles = 2;
const char *sourceFiles[] =
{
"File1.cl",
"File2.cl",
};
char *sourceStrings[numFiles];
for(int i = 0; i < numFiles; i++)
{
sourceStrings[i] = ReadFile(sourceFiles[i]);
}
/*---Create the compute program from the source buffer---*/
program = clCreateProgramWithSource(context, numFiles, (const char **)sourceStrings, NULL, &err);
/*---Build the program executable---*/
err = clBuildProgram(program, deviceCount, devices, NULL, NULL, NULL);
/*----Save binary to disk---*/
//Determine the size of each program binary
size_t *programBinarySizes = new size_t[deviceCount];
err = clGetProgramInfo(program, CL_PROGRAM_BINARY_SIZES, sizeof(size_t) * deviceCount, programBinarySizes, NULL);
if(err != CL_SUCCESS)
{
delete [] devices;
delete [] programBinarySizes;
return false;
}
unsigned char **programBinaries = new unsigned char*[deviceCount];
for(cl_uint i = 0; i < deviceCount; i++)
{
programBinaries[i] = new unsigned char[programBinarySizes[i]];
}
//Get all of the program binaries
err = clGetProgramInfo(program, CL_PROGRAM_BINARIES, sizeof(unsigned char *) * deviceCount, programBinaries, NULL);
if (err != CL_SUCCESS)
{
delete [] devices;
delete [] programBinarySizes;
for (cl_uint i = 0; i < deviceCount; i++)
{
delete [] programBinaries[i];
}
delete [] programBinaries;
}
//Store the binaries
for(cl_uint i = 0; i < deviceCount; i++)
{
// Store the binary for all devices
std::string currFile = binaryFile + to_string(i) + ".txt";
FILE *fp = fopen(currFile.c_str(), "wb");
fwrite(programBinaries[i], 1, programBinarySizes[i], fp);
fclose(fp);
}
// Cleanup
delete [] programBinarySizes;
for (cl_uint i = 0; i < deviceCount; i++)
{
delete [] programBinaries[i];
}
delete [] programBinaries;
And then on the next go around my code with call this function to create the program from the binaries:
unsigned char **programBinaries = new unsigned char *[deviceCount];
size_t sizes[deviceCount];
for(int i = 0; i < deviceCount; i++)
{
string currFile = binaryFile + to_string(i) + ".txt";
FILE *fp = fopen(currFile.c_str(), "rb");
if(!fp) return NULL;
size_t binarySize;
fseek(fp, 0, SEEK_END);
binarySize = ftell(fp);
sizes[i] = binarySize;
rewind(fp);
programBinaries[i] = new unsigned char[binarySize];
fread(programBinaries[i], 1, binarySize, fp);
fclose(fp);
}
cl_int errNum = 0;
cl_program program;
cl_int binaryStatus;
program = clCreateProgramWithBinary(context,
deviceCount,
devices,
sizes,
(const unsigned char **)programBinaries,
&binaryStatus,
&errNum);
delete [] programBinaries;
errNum = clBuildProgram(program, 0, NULL, NULL, NULL, NULL);
I have a rmbp which has three devices on the only one apple platform. I run your code on it and encountered the same problem. Actually I do not know the solution, but I can give you some hints for debugging.
do not use ftell to compute the size of a regular file, see the reason here
I modified your snippet as follows:
#include <sys/stat.h>
unsigned char **programBinaries = new unsigned char *[deviceCount];
size_t sizes[deviceCount];
int fd;
struct stat st;
for(cl_uint i = 0; i < deviceCount; i++)
{
string currFile = binaryFile + to_string(i) + ".txt";
fd = open(currFile.c_str(), O_RDONLY);
if (fd == -1) {
return -1;
}
if ((fstat(fd, &st) != 0) || (!S_ISREG(st.st_mode))) {
return -2;
}
size_t binarySize;
FILE *fp = fdopen(fd, "rb");
if (fseeko(fp, 0 , SEEK_END) != 0) {
return -3;
}
binarySize = ftello(fp);
cout << "device " << i << ": " << binarySize << endl;
sizes[i] = binarySize;
rewind(fp);
programBinaries[i] = new unsigned char[binarySize];
fread(programBinaries[i], 1, binarySize, fp);
fclose(fp);
close(fd);
}
on my system, however, I got the same result as your original code.
according to
cl_program clCreateProgramWithBinary ( cl_context context,
cl_uint num_devices,
const cl_device_id *device_list,
const size_t *lengths,
const unsigned char **binaries,
cl_int *binary_status,
cl_int *errcode_ret)
binary_status: Returns whether the program binary for each device specified in device_list was loaded successfully or not. It is an array of num_devices entries and returns CL_SUCCESS in binary_status[i] if binary was successfully loaded for device specified by device_list[i]; otherwise returns CL_INVALID_VALUE if lengths[i] is zero or if binaries[i] is a NULL value or CL_INVALID_BINARY in binary_status[i] if program binary is not a valid binary for the specified device. If binary_status is NULL, it is ignored.
if you modify your code like this:
cl_int binaryStatus[deviceCount];
program = clCreateProgramWithBinary(context,
deviceCount,
devices,
sizes,
(const unsigned char **)programBinaries,
binaryStatus,
&errNum);
for (cl_uint i = 0; i < deviceCount; ++i)
{
cout << "device: " << i << ": " << binaryStatus[i] << endl;
}
normally, you will get the following results:
device: 0: 0
device: 1: -42
the first line means that the first binary program (for CPU) was successfully loaded. -42 in the second line corresponds CL_INVALID_BINARY ,which means it is failed to load the binary program.
I also try to retrieve the build options from the program, but got nothing.
//set device_id to 0,1,3...
cl_uint device_id = 0;
cl_build_status status;
// Determine the reason for the error
char buildOptions[16384];
char buildLog[16384];
clGetProgramBuildInfo(program, devices[device_id], CL_PROGRAM_BUILD_STATUS,
sizeof(cl_build_status), &status, NULL);
std::cout << "status: " << status << endl;
clGetProgramBuildInfo(program, devices[device_id], CL_PROGRAM_BUILD_OPTIONS,
sizeof(buildOptions), buildOptions, NULL);
std::cout << "build options: " << endl;
std::cout << buildOptions;
clGetProgramBuildInfo(program, devices[device_id], CL_PROGRAM_BUILD_LOG,
sizeof(buildLog), buildLog, NULL);
std::cout << "build log: " << endl;
std::cout << buildLog;
I guess it is a bug of opencl driver. hope the above stuff is helpful for you.

How to use clCreateImage

I am trying to create a cl_mem using clCreateImage but the program keeps crashing. I am following my book as close as possible but it's been a pretty bump road so far.
#include "stdafx.h"
#include <iostream>
#include <CL\cl.h>
using namespace std;
int _tmain(int argc, _TCHAR* argv[])
{
cl_int status;
cl_platform_id platform;
status = clGetPlatformIDs(1, &platform, NULL);
cl_device_id device;
clGetDeviceIDs(platform, CL_DEVICE_TYPE_ALL, 1, &device, NULL);
cl_context_properties props[3] = { CL_CONTEXT_PLATFORM, (cl_context_properties) (platform), 0 };
cl_context context = clCreateContext(props, 1, &device, NULL, NULL, &status);
cl_image_desc desc;
desc.image_type = CL_MEM_OBJECT_IMAGE2D;
desc.image_width = 100;
desc.image_height = 100;
desc.image_depth = 0;
desc.image_array_size = 0;
desc.image_row_pitch = 0;
desc.image_slice_pitch = 0;
desc.num_mip_levels = 0;
desc.num_samples = 0;
desc.buffer = NULL;
cl_image_format format;
format.image_channel_order = CL_R;
format.image_channel_data_type = CL_FLOAT;
// crashes on the next line with -- Unhandled exception at 0x72BCC9F1 in Convolution.exe: 0xC0000005: Access violation executing location 0x00000000.
cl_mem d_inputImage = clCreateImage(context, CL_MEM_READ_ONLY, &format, &desc, NULL, &status);
// never gets here
cout << "--->"; int exit; cin >> exit;
return 0;
}
clCreateImage has the following parameters:
cl_mem clCreateImage ( cl_context context,
cl_mem_flags flags,
const cl_image_format *image_format,
const cl_image_desc *image_desc,
void *host_ptr,
cl_int *errcode_ret)
In the doc page there is no mention that "host_ptr" may be NULL. Try with a valid pointer there. This is different from clCrateBuffer where a NULL pointer is allowed. However in CreateBuffer there is also no mention to that case, but I do know that it works. So it may be a driver/library bug.
Since it is fairly clear that the OpenCL library is trying to access a NULL pointer location as this error code states :Access violation executing location 0x00000000 I recomend to first try with that.
I think it's easier to code ignoring the usaless paremeters as follow:
cl_image_desc desc;
memset(&desc, '\0', sizeof(cl_image_desc));
desc.image_type = CL_MEM_OBJECT_IMAGE2D;
desc.image_width = width;
desc.image_height = height;
desc.mem_object= NULL; // or someBuf;
Additionally, the "host_ptr" can be NULL.
The common errors are usually the wrong image format which isn't supported by device, the wrong size and wrong version.

Opencl: GPU Execution Time is always Zero

I am trying to print the execution time for some functions on GPU. But timing on GPU is always comming out to be 0. Also when I choose CL_DEVICE_TYPE_CPU in the following it works fine.
errcode = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_CPU, 1, &device_id, &ret_num_devices);
This works fine and shows non-zero value of execution time but if I choose CL_DEVICE_TYPE_GPU, then it always shows 0, irrespective of total no. of data points and threads. please note that in both cases (CL_DEVICE_TYPE_CPU and CL_DEVICE_TYPE_GPU), I am printing the execution time in same way. That is my host code and my kernel code is same in both cases(thats what openCL is!). Following are some of the code section:
// openCL code to get platform and device ids
errcode = clGetPlatformIDs(1, &platform_id, &ret_num_platforms);
errcode = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_GPU, 1, &device_id, &ret_num_devices);
// to create context
clGPUContext = clCreateContext( NULL, 1, &device_id, NULL, NULL, &errcode);
//Create a command-queue
clCommandQue = clCreateCommandQueue(clGPUContext,
device_id, CL_QUEUE_PROFILING_ENABLE, &errcode);
// Setup device memory
d_instances= clCreateBuffer(clGPUContext,CL_MEM_READ_ONLY |
CL_MEM_COPY_HOST_PTR,mem_size_i,instances->data, &errcode);
d_centroids = clCreateBuffer(clGPUContext,CL_MEM_READ_WRITE,mem_size_c, NULL, &errcode);
d_distance = clCreateBuffer(clGPUContext,CL_MEM_READ_WRITE,mem_size_d,NULL, &errcode);
// d_dist_X = clCreateBuffer(clGPUContext,CL_MEM_READ_WRITE,mem_size4,NULL, &errcode);
//d_dist_Y = clCreateBuffer(clGPUContext,CL_MEM_READ_WRITE,mem_size4,NULL, &errcode);
//to build program
clProgram = clCreateProgramWithSource(clGPUContext,1, (const char **)&source_str,(const
size_t*)&source_size, &errcode);
errcode = clBuildProgram(clProgram, 0,NULL, NULL, NULL, NULL);
if (errcode == CL_BUILD_PROGRAM_FAILURE)
{
// Determine the size of the log
size_t log_size;
clGetProgramBuildInfo(clProgram, device_id, CL_PROGRAM_BUILD_LOG, 0, NULL,
&log_size);
// Allocate memory for the log
char *log = (char *) malloc(log_size);
// Get the log
clGetProgramBuildInfo(clProgram, device_id, CL_PROGRAM_BUILD_LOG, log_size, log,
NULL);
// Print the log
printf("%s\n", log);
}
clKernel = clCreateKernel(clProgram,"distance_finding", &errcode);
// Launch OpenCL kernel
size_t localWorkSize[1], globalWorkSize[1];
if(num_instances >= 500)
{
localWorkSize[0] = 500;
float block1=num_instances/localWorkSize[0];
int block= (int)(ceil(block1));
globalWorkSize[0] = block*localWorkSize[0];
}
else
{
localWorkSize[0]=num_instances;
globalWorkSize[0]=num_instances;
}
int iteration=1;
while(iteration < MAX_ITERATIONS)
{
errcode = clEnqueueWriteBuffer(clCommandQue,d_centroids , CL_TRUE, 0,
mem_size_c, (void*)centroids->data, 0, NULL, NULL);
errcode = clEnqueueWriteBuffer(clCommandQue,d_distance , CL_TRUE, 0, mem_size_d,
(void*)distance->data, 0, NULL, NULL);
//set kernel arguments
errcode = clSetKernelArg(clKernel, 0,sizeof(cl_mem), (void *)&d_instances);
errcode = clSetKernelArg(clKernel, 1,sizeof(cl_mem), (void *)&d_centroids);
errcode = clSetKernelArg(clKernel, 2,sizeof(cl_mem), (void *)&d_distance);
errcode = clSetKernelArg(clKernel, 3,sizeof(unsigned int), (void *)
&num_instances);
errcode = clSetKernelArg(clKernel,4,sizeof(unsigned int),(void *)&clusters);
errcode = clSetKernelArg(clKernel,5,sizeof(unsigned int),(void *)&dimensions);
errcode = clEnqueueNDRangeKernel(clCommandQue,clKernel, 1, NULL,
globalWorkSize,localWorkSize, 0, NULL, &myEvent);
clFinish(clCommandQue); // wait for all events to finish
clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_START,sizeof(cl_ulong),
&startTime, NULL);
clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_END,sizeof(cl_ulong),
&endTime, NULL);
kernelExecTimeNs = endTime-startTime;
gpu_time+= kernelExecTimeNs;
// Retrieve result from device
errcode = clEnqueueReadBuffer(clCommandQue,d_distance, CL_TRUE, 0,
mem_size_d,distance->data, 0, NULL, NULL);
Printing the time in ms
printf("\n\n Time taken by GPU is %llu ms",gpu_time/1000000);
If the way I am calculating the GPU timing is wrong, why would it work on a CPU (by changing to CL_DEVICE_TYPE_CPU)? What is wrong here?
Edited:
System Information
AMD APP SDK 2.4
AMD ATI FirePro GL 3D, having 800 cores
Kerenel
#pragma OPENCL EXTENSION cl_khr_fp64:enable
double distance_cal(__local float* cent,float* data,int dimensions)
{
float dist1=0.00;
for(int i=0;i<dimensions;i++)
dist1 += ((data[i]-cent[i]) * (data[i]-cent[i]));
double sq_dist=sqrt(dist1);
return sq_dist;
}
void fetch_col(float* data,__constant float* x,int col,int dimension,int len)
{
//hari[i]=8;
for(int i=0;i<dimension;i++)
{
data[i]=x[col];
col=col+len;
}
}
void fetch_col_cen(__local float* data,__global float* x,int col,int dimension,int len)
{
//hari[i]=8;
for(int i=0;i<dimension;i++)
{
data[i]=x[col];
col=col+len;
}
}
__kernel void distance_finding(__constant float* data,__global float* cen,__global float*
dist,int inst,int clus,const int dimensions)
{
int idx=get_global_id(0);
float data_col[4];
fetch_col( data_col,data,idx,dimensions,inst);
for(int i=0;i<clus;i++)
{
int k=i*inst; // take each dimension value for each cluster data
__local float cent[4];
barrier(CLK_LOCAL_MEM_FENCE | CLK_GLOBAL_MEM_FENCE);
fetch_col_cen(cent,cen,i,dimensions,clus);
dist[idx+k]=distance_cal(cent,data_col,dimensions);// calculate distance wrt
each data n each centroid
}
}
clEnqueueNDRangeKernel() is asynchronous if it is using GPU and therefore you only see the time it took to enqueue the request but not to execution it.
That said, I could be wrong, but I usually write c++ code to do the timing and put the start_time before the instruction and end_time after the
clFinish(cmd_queue);
just like you did with C++ timing code, that would be a good test, if you're sure your GPU shouldn't be finishing by 0 seconds.
An easy way to check would be to introduce an abnormally long operation inside the kernel. If THAT shows up as zero when there a perceptible lag in actual execution - then you have your answer.
That said, I believe (even though the indicated thread is for Linux, it probably holds water on Windows too) you might need to install the instrumented drivers to even have the system write to the performance counters. You can also use the CUDA profiler on nVidia's OpenCL implementation because it sits on top of CUDA.
change to
clFinish(clCommandQue); // wait for all events to finish
// add this after clFinish()
// Ensure kernel execution is finished
clWaitForEvents(1 , &myEvent);
..
double gpu_time = endTime-startTime;
..
printf("\n\n Time taken by GPU is %0.3f ms", gpu_time/1000000.0);

OpenCL enqueueNDRangeKernel causes Access Violation error

I am continuously getting an Access Violation Error with a all my kernels which I am trying to build. Other kernels which I take from books seem to work fine.
https://github.com/ssarangi/VideoCL - This is where the code is.
Something seems to be missing in this. Could someone help me with this.
Thanks so much.
[James] - Thanks for the suggestion and you are right. I am doing it on Win 7 with a AMD Redwood card. I have the Catalyst 11.7 drivers with AMD APP SDK 2.5. I am posting the code below.
#include <iostream>
#include "bmpfuncs.h"
#include "CLManager.h"
void main()
{
float theta = 3.14159f/6.0f;
int W ;
int H ;
const char* inputFile = "input.bmp";
const char* outputFile = "output.bmp";
float* ip = readImage(inputFile, &W, &H);
float *op = new float[W*H];
//We assume that the input image is the array “ip”
//and the angle of rotation is theta
float cos_theta = cos(theta);
float sin_theta = sin(theta);
try
{
CLManager* clMgr = new CLManager();
// Build the Source
unsigned int pgmID = clMgr->buildSource("rotation.cl");
// Create the kernel
cl::Kernel* kernel = clMgr->makeKernel(pgmID, "img_rotate");
// Create the memory Buffers
cl::Buffer* clIp = clMgr->createBuffer(CL_MEM_READ_ONLY, W*H*sizeof(float));
cl::Buffer* clOp = clMgr->createBuffer(CL_MEM_READ_WRITE, W*H*sizeof(float));
// Get the command Queue
cl::CommandQueue* queue = clMgr->getCmdQueue();
queue->enqueueWriteBuffer(*clIp, CL_TRUE, 0, W*H*sizeof(float), ip);
// Set the arguments to the kernel
kernel->setArg(0, clOp);
kernel->setArg(1, clIp);
kernel->setArg(2, W);
kernel->setArg(3, H);
kernel->setArg(4, sin_theta);
kernel->setArg(5, cos_theta);
// Run the kernel on specific NDRange
cl::NDRange globalws(W, H);
queue->enqueueNDRangeKernel(*kernel, cl::NullRange, globalws, cl::NullRange);
queue->enqueueReadBuffer(*clOp, CL_TRUE, 0, W*H*sizeof(float), op);
storeImage(op, outputFile, H, W, inputFile);
}
catch(cl::Error error)
{
std::cout << error.what() << "(" << error.err() << ")" << std::endl;
}
}
I am getting the error at the queue->enqueueNDRangeKernel line.
I have the queue and the kernel stored in a class.
CLManager::CLManager()
: m_programIDs(-1)
{
// Initialize the Platform
cl::Platform::get(&m_platforms);
// Create a Context
cl_context_properties cps[3] = {
CL_CONTEXT_PLATFORM,
(cl_context_properties)(m_platforms[0])(),
0
};
m_context = cl::Context(CL_DEVICE_TYPE_GPU, cps);
// Get a list of devices on this platform
m_devices = m_context.getInfo<CL_CONTEXT_DEVICES>();
cl_int err;
m_queue = new cl::CommandQueue(m_context, m_devices[0], 0, &err);
}
cl::Kernel* CLManager::makeKernel(unsigned int programID, std::string kernelName)
{
cl::CommandQueue queue = cl::CommandQueue(m_context, m_devices[0]);
cl::Kernel* kernel = new cl::Kernel(*(m_programs[programID]), kernelName.c_str());
m_kernels.push_back(kernel);
return kernel;
}
I checked your code. I'm on Linux though. At runtime I'm getting Error -38, which means CL_INVALID_MEM_OBJECT. So I went and checked your buffers.
cl::Buffer* clIp = clMgr->createBuffer(CL_MEM_READ_ONLY, W*H*sizeof(float));
cl::Buffer* clOp = clMgr->createBuffer(CL_MEM_READ_WRITE, W*H*sizeof(float));
Then you pass the buffers as a Pointer:
kernel->setArg(0, clOp);
kernel->setArg(1, clIp);
But setArg is expecting a value, so the buffer pointers should be dereferenced:
kernel->setArg(0, *clOp);
kernel->setArg(1, *clIp);
After those changes the cat rotates ;)

Resources