I want to do a sub-matrix multiplication. Say I have a function:
void MatMul(cl_mem A, cl_mem B, cl_mem C, int M, int K, int N)
where A is M*K, B is K*N, C is M*N, and A, B, C are all row major 1 dimension array passed by host memory float *h_A, *h_B, *hC with the following function:
void ocl_push_array(cl_mem d_x, float *h_x, int n){
size_t data_size = sizeof(float)*n;
err = clEnqueueWriteBuffer(queue, d_x, CL_TRUE, 0, data_size, h_x, 0, NULL, NULL);
}
I want to ask:
if I want to do sub-matrix multiplication, say slicing A by row:
// cl_mem A, B, C;
for(int x=0; x<M; x+=16)
{
cl_mem A_sub = (cl_mem)((float *)A+x*K);
cl_mem C_sub = (cl_mem)((float *)C+x*N);
if((M-x+1)>=16)
MatMul(A_sub, B, C_sub, 16, K, N);
else
MatMul(A_sub, B, C_sub, M-x+1, K, N);
}
Is it the right code to do this operation? I got a run time error says: "CL_INVALID_MEM_OBJECT" (-38) when it assigns arguments to OpenCL kernel (clSetKernelArg).
The reason I want to do this operation is that I found the matrix multiplication got wrong answers when my input matrix A and B become big.
My OpenCL kernel is:
#define BLOCK_SIZE 16
#define AS(i, j) As[j + i * BLOCK_SIZE]
#define BS(i, j) Bs[j + i * BLOCK_SIZE]
__kernel void
matrixMul(__global float* A, __global float* B, __global float* C,
__local float* As, __local float* Bs, int uiWA, int uiWB)
{
int bx = get_group_id(0);
int by = get_group_id(1);
int tx = get_local_id(0);
int ty = get_local_id(1);
int aBegin = uiWA * BLOCK_SIZE * by;
int aEnd = aBegin + uiWA - 1;
int aStep = BLOCK_SIZE;
int bBegin = BLOCK_SIZE * bx;
int bStep = BLOCK_SIZE * uiWB;
float Csub = 0.0f;
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) {
AS(ty, tx) = A[a + uiWA * ty + tx];
BS(ty, tx) = B[b + uiWB * ty + tx];
barrier(CLK_LOCAL_MEM_FENCE);
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += AS(ty, k) * BS(k, tx);
barrier(CLK_LOCAL_MEM_FENCE);
}
C[get_global_id(1) * get_global_size(0) + get_global_id(0)] = Csub;
}
and the size is:
#define BLOCK_SIZE 16
size_t localWorkSize[] = {BLOCK_SIZE, BLOCK_SIZE};
size_t globalWorkSize[] = {shrRoundUp(BLOCK_SIZE, N), shrRoundUp(BLOCK_SIZE, M)};
size_t shrRoundUp(int group_size, int global_size)
{
int r = global_size % group_size;
if(r == 0)
{
return global_size;
} else
{
return global_size + group_size - r;
}
}
the code is adopted from Nvidia OpenCL matrix multiplication sample. My GPU is: Intel(R) HD Graphics 4600.
Thanks!
I don't think you can do this:
cl_mem A_sub = (cl_mem)((float *)A+x*K);
Because cl_mem is an object in OpenCL, which is actually a complex data structure instead of just a data pointer. It maintains information such as data pointer to the actually memory, reference to the object, memory properties and so on. Different run-times may even have different implementations of cl_mem object. That's why you got the CL_INVALID_MEM_OBJECT error message.
What you can do to get wanted data for the sub matrix is one of the followings:
define two new cl_mem objects, and use a separate kernel to do
the copy work.
use clEnqueueCopyBuffer function to copy the data at the host
code domain.
use CL_MEM_ALLOC_HOST_PTR to memory buffer, and then use
clEnqueueMapBuffer map the GPU memory to host memory pointer, and
then modify the memory content by using the mapped host memory
pointer, when you finish, unmap the pointer to return the GPU memory
to the device memory domain.
Related
I know atomic functions with OpenCL-1.x are not recommended but I just want to understand an atomic example.
The following kernel code is not working well, it produces random final values for the computation of sum of all array values (sum reduction) :
#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
void atom_add_double(volatile __local double *val, double delta)
{
union {
double f;
ulong i;
} old, new;
do
{
old.f = *val;
new.f = old.f + delta;
}
while (atom_cmpxchg((volatile __local ulong *)val, old.i, new.i) != old.i);
}
__kernel void sumGPU ( __global const double *input,
__local double *localInput,
__global double *finalSum
)
{
uint lid = get_local_id(0);
uint gid = get_global_id(0);
uint localSize = get_local_size(0);
uint groupid = get_group_id(0);
local double partialSum;
local double finalSumTemp;
// Initialize sums
if (lid==0)
{
partialSum = 0.0;
finalSumTemp = 0.0;
}
barrier(CLK_LOCAL_MEM_FENCE);
// Set in local memory
int idx = groupid * localSize + lid;
localInput[lid] = input[idx];
// Compute atom_add into each workGroup
barrier(CLK_LOCAL_MEM_FENCE);
atom_add_double(&partialSum, localInput[lid]);
// See and Check if barrier below is necessary
barrier(CLK_LOCAL_MEM_FENCE);
// Final sum of partialSums
if (lid==0)
{
atom_add_double(&finalSumTemp, partialSum);
*finalSum = finalSumTemp;
}
}
The version with global id strategy works good but the version above, which passes by the using of local memory (shared memory), doesn't give the expected results (the value of *finalSum is random for each execution).
Here the Buffers and kernel args that I have put in my host code :
// Write to buffers
ret = clEnqueueWriteBuffer(command_queue, inputBuffer, CL_TRUE, 0,
nWorkItems * sizeof(double), xInput, 0, NULL, NULL);
ret = clEnqueueWriteBuffer(command_queue, finalSumBuffer, CL_TRUE, 0,
sizeof(double), finalSumGPU, 0, NULL, NULL);
// Set the arguments of the kernel
clSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&inputBuffer);
clSetKernelArg(kernel, 1, local_item_size*sizeof(double), NULL);
clSetKernelArg(kernel, 2, sizeof(cl_mem), (void *)&finalSumBuffer);
and Finally, I read finalSumBuffer to get the sum value.
I think my issue comes rather from the kernel code but I can't find where is the error.
If anyone could see what's wrong, this would be nice to tell me.
Thanks
UPDATE 1 :
I nearly manage to perform this reduction. Following the propositions suggested by huseyin tugrul buyukisik, I have modified the kernel code like this :
#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
void atom_add_double(volatile __local double *val, double delta)
{
union {
double d;
ulong i;
} old, new;
do
{
old.d = *val;
new.d = old.d + delta;
}
while (atom_cmpxchg((volatile __local ulong *)val, old.i, new.i) != old.i);
}
__kernel void sumGPU ( __global const double *input,
__local double *localInput,
__local double *partialSum,
__global double *finalSum
)
{
uint lid = get_local_id(0);
uint gid = get_global_id(0);
uint localSize = get_local_size(0);
uint groupid = get_group_id(0);
// Initialize partial sums
if (lid==0)
partialSum[groupid] = 0.0;
barrier(CLK_LOCAL_MEM_FENCE);
// Set in local memory
int idx = groupid * localSize + lid;
localInput[lid] = input[idx];
// Compute atom_add into each workGroup
barrier(CLK_LOCAL_MEM_FENCE);
atom_add_double(&partialSum[groupid], localInput[lid]);
// See and Check if barrier below is necessary
barrier(CLK_LOCAL_MEM_FENCE);
// Compute final sum
if (lid==0)
*finalSum += partialSum[groupid];
}
As said huseyin , I don't need to use atomic functions for the final sum of all partial sums.
So I did at the end :
// Compute final sum
if (lid==0)
*finalSum += partialSum[groupid];
But unfortunately, the final sum doesn't give the value expected and the value is random (for example, with nwork-items = 1024 and size-WorkGroup = 16, I get random values in the order of [1e+3 - 1e+4] instead of 5.248e+05 expected.
Here are the setting of arguments into the host code :
// Set the arguments of the kernel
clSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&inputBuffer);
clSetKernelArg(kernel, 1, local_item_size*sizeof(double), NULL);
clSetKernelArg(kernel, 2, nWorkGroups*sizeof(double), NULL);
clSetKernelArg(kernel, 3, sizeof(cl_mem), (void *)&finalSumBuffer);
Could you see where is my error in the kernel code ?
Thanks
Not an error but logic issue:
atom_add_double(&finalSumTemp, partialSum);
is working only once per group (by zero-local-indexed thread).
So you are just doing
finalSumTemp = partialSum
so atomics here is not needed.
There is race condition for
*finalSum = finalSumTemp;
between workgroups where each zero-index local thread writes to same address. So this should be the atomic addition (for learning purposes) or could be written on different cells to be added on host side such as sum_group1+sum_group2+... = total sum.
int idx = groupid * localSize + lid;
localInput[lid] = input[idx];
here using groupid is suspicious for multi-device summation. Because each device has its own global range and workgroup id indexings so two device could have same group id values for two different groups. Some device related offset should be used when multiple devices are used. Such as:
idx= get_global_id(0) + deviceOffset[deviceId];
Also if atomic operation is inavoidable, and if exactly N times operated, it could be moved to a single thread(such as 0-indexed thread) and looped for N times(probably being faster) in a second kernel unless that atomic operation latency can't be hidden by other means.
I have a __local int* pointer which I want to copy the data from a __global int* to it. To make the copy faster, I cast both to long16*, I know all the arrays (input, output and local memory) are of size 16 * 1024 bytes. The code is as follows:
__kernel void test_kernel(
__global int* a,
__global int* b,
__local int* localbuf
){
int thread_idx = get_global_id(0);
int local_idx = get_local_id(0);
__global long16* input = (__global long16*)a;
__global long16* output = (__global long16*)b;
__local long16* local_buf = (__local long16*)localbuf;
local_buf[local_idx * 4 + 0] = input[0];
local_buf[local_idx * 4 + 1] = input[1];
local_buf[local_idx * 4 + 2] = input[2];
local_buf[local_idx * 4 + 3] = input[3];
barrier(CLK_LOCAL_MEM_FENCE);
output[thread_idx] = local_buf[thread_idx];
}
The result is not as I expect and output is filled with zeros.
Now, if I simplly replace the local_buf in last line with input, still I will get all zeros in output:
__kernel void test_kernel(
__global int* a,
__global int* b,
__local int* localbuf
){
int thread_idx = get_global_id(0);
int local_idx = get_local_id(0);
__global long16* input = (__global long16*)a;
__global long16* output = (__global long16*)b;
__local long16* local_buf = (__local long16*)localbuf;
local_buf[local_idx * 4 + 0] = input[0];
local_buf[local_idx * 4 + 1] = input[1];
local_buf[local_idx * 4 + 2] = input[2];
local_buf[local_idx * 4 + 3] = input[3];
barrier(CLK_LOCAL_MEM_FENCE);
output[thread_idx] = input[thread_idx];
}
But if I remove the assignment lines to the local buffer as follows:
__kernel void test_kernel(
__global int* a,
__global int* b,
__local int* localbuf
){
int thread_idx = get_global_id(0);
int local_idx = get_local_id(0);
__global long16* input = (__global long16*)a;
__global long16* output = (__global long16*)b;
__local long16* local_buf = (__local long16*)localbuf;
barrier(CLK_LOCAL_MEM_FENCE);
output[thread_idx] = input[thread_idx];
}
I will get the values totally correct in the output array. Also, if I simply do not cast the localbuf to long16 and copy it as ints, everything will work fine.
I really do not know what can be the problem. I am using a nVIDIA GTX 560 Ti.
Update 1: I noticed that this problem does not exist on AMD R9 280X and nVIDIA GTX 280... So, it can be architecture dependent.
Update2: Source Code:
size_t buffer_size = 16 * 1024 / 4 ;
size_t global_ws = buffer_size;
size_t local_ws = 32;
std::vector<int> host_data (buffer_size);
std::vector<int> output_data(buffer_size);
for(size_t i = 0; i < buffer_size; i++){
host_data[i] = static_cast<int>(i);
output_data[i] = 0;
}
cl_mem input = clCreateBuffer(cl->devices[0].ctx, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, buffer_size * sizeof(int), host_data.data(), &err); CL_ERROR(err);
cl_mem output = clCreateBuffer(cl->devices[0].ctx, CL_MEM_WRITE_ONLY, buffer_size * sizeof(int), nullptr, &err); CL_ERROR(err);
auto start_frame_time = hrc::now();
clSetKernelArg(kernel, 0, sizeof(cl_mem), &input);
clSetKernelArg(kernel, 1, sizeof(cl_mem), &output);
clSetKernelArg(kernel, 2, buffer_size*sizeof(cl_int), NULL);
err = clEnqueueNDRangeKernel(cl->devices[0].cmd_queue, kernel, 1, nullptr, &global_ws, &local_ws, 0, nullptr, nullptr); CL_ERROR(err);
clFinish(cl->devices[0].cmd_queue);
err = clEnqueueReadBuffer(cl->devices[0].cmd_queue, output, CL_TRUE, 0, buffer_size * sizeof(int), output_data.data(), 0, nullptr, nullptr); CL_ERROR(err);
for (size_t i = 0; i < buffer_size; i++){
std::cout << i << ". " << output_data[i] << std::endl;
if (i % 512 == 0) getchar();
}
std::cout << "Elapsed Time: " << hrc::now() - start_frame_time << std::endl;
long -> 8 bytes
int -> 4 bytes
long16 -> 128 bytes
long16 is 32 times bigger than int.
So if you have 16kB of input/local/output you can only have (according to your kernel code) 512 work items. How many are you launching? Can you put that code as well?
For nVIDIA platform, if you go over these limits, you will get an error not at the kernel launch time but at the enqueueRead(). And the output will not even be read (leaving the output array with zeros). Check the errors there.
Also, AMD/Others might look like it works, but then have half of the results will be wrong.
I am using the following kernel for sum reduciton.
__kernel void reduce(__global float* input, __global float* output, __local float* sdata)
{
// load shared mem
unsigned int tid = get_local_id(0);
unsigned int bid = get_group_id(0);
unsigned int gid = get_global_id(0);
unsigned int localSize = get_local_size(0);
unsigned int stride = gid * 2;
sdata[tid] = input[stride] + input[stride + 1];
barrier(CLK_LOCAL_MEM_FENCE);
// do reduction in shared mem
for(unsigned int s = localSize >> 2; s > 0; s >>= 1)
{
if(tid < s)
{
sdata[tid] += sdata[tid + s];
}
barrier(CLK_LOCAL_MEM_FENCE);
}
// write result for this block to global mem
if(tid == 0) output[bid] = sdata[0];
}
It works fine, but I don't know how to choose the optimal workgroup size or number of workgroups if I need more than one workgroup (for example if I want to calculate the sum of 1048576 elements). As far as I understand, the more workgroups I use, the more subresults I will get, which also means that I will need more global reductions at the end.
I've seen the answers to the general workgroup size question here. Are there any recommendations that concern reduction operations specifically?
This question is a possible duplicate of one I answered a while back:
What is the algorithm to determine optimal work group size and number of workgroup.
Experimentation will be the best way to know for sure for any given device.
Update:
I think you can safely stick to 1-dimensional work groups, as you have done in your sample code. On the host, you can try out the best values.
For each device:
1) query for CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE.
2) loop over a few multiples and run the kernel with that group size. save the execution time for each test.
3) when you think you have an optimal value, hard code it into a new kernel for use with that specific device. This will give a further boost to performance. You can also eliminate your sdata parameter in the device-specific kernel.
//define your own context, kernel, queue here
int err;
size_t global_size; //set this somewhere to match your test data size
size_t preferred_size;
size_t max_group_size;
err = clGetKernelWorkGroupInfo(kernel, device_id, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, sizeof(size_t), preferred_size, NULL);
//check err
err = clGetKernelWorkGroupInfo(kernel, device_id, CL_KERNEL_WORK_GROUP_SIZE, sizeof(size_t), max_group_size, NULL);
//check err
size_t test_size;
//your vars for hi-res timer go here
for (unsigned int i=preferred_size ; i<=max_group_size ; i+=preferred_size){
//reset timer
test_size = (size_t)i;
err = clEnqueueNDRangeKernel(queue, kernel, 1, NULL, &global_size, &test_size, 0, NULL, NULL);
if(err){
fail("Unable to enqueue kernel"); //implement your own fail function somewhere..
}else{
clfinish(queue);
//stop timer, save value
//output timer value and test_size
}
}
The device-specific kernel can look like this, except the first line should have your optimal value substituted:
#define LOCAL_SIZE 32
__kernel void reduce(__global float* input, __global float* output)
{
unsigned int tid = get_local_id(0);
unsigned int stride = get_global_id(0) * 2;
__local float sdata[LOCAL_SIZE];
sdata[tid] = input[stride] + input[stride + 1];
barrier(CLK_LOCAL_MEM_FENCE);
for(unsigned int s = LOCAL_SIZE >> 2; s > 0; s >>= 1){
if(tid < s){
sdata[tid] += sdata[tid + s];
}
barrier(CLK_LOCAL_MEM_FENCE);
}
if(tid == 0) output[get_group_id(0)] = sdata[0];
}
I am new to Open-cl and I am trying to write kernel code for the following matrix operation:
A is a 2X2 matrix:
A = [1 2] ----> row1
[3 4] ----->row2
I need to compute:
1) s1 = transpose(row1) X row1
2) s1 = transpose(row2) X row2
3) Sum = s1+s2
I wrote kernel code for row level (i.e I can do transpose(row1) X row1 )
-this serves the purpose for first row only
How do I use parallelism to compute this for each row and find the final sum within kernel function ?
private static String programSource1 =
"__kernel"+
" void matrixMul(__global float* A, __global float* C, int rowLength)"+
"{"+
"int row = get_global_id(1);"+
"int col = get_global_id(0);"+
"C[row*rowLength+col] = A[col] * A[row];"+
"}";
#define MAX_ROW_LENGTH 2 // or more
__kernel void matrixMul(__global float* A, __global float* C,
int rowLength)
{
__local float buffer[MAX_ROW_LENGTH * MAX_ROW_LENGTH];
__local float s1[MAX_ROW_LENGTH * MAX_ROW_LENGTH];
int col = get_global_id(0);
int row = get_global_id(1);
int rows = get_global_size(1);
// read the matrix from global to local memory
buffer[row * rowLength + col] = A[row * rowLength + col];
s1[row * rowLength + col] = 0.0f;
barrier(CLK_LOCAL_MEM_FENCE);
for (int i = 0; i < rows; ++i)
{
s1[row * rowLength + col] +=
buffer[i * rowLength + col] * buffer[i * rowLength + row];
}
C[row * rowLength + col] = s1[row*rowLength+col];
}
Here is some kernel code that does what you want for small matrices. The kernel uses local memory to reduce global memory access. For such small problems (2x2 matrix) this want achiev anything but if you are computing greater matrices this can speedup the thing a little bit. However, this is a short example and not optimized.Iit comes with some limitations:
this code only supports local workgroup sizes equal to the global
workgroup size (no chunks)
if your matrices get to big the shared memory will limit the utilization of your GPU and
if your matrices get realy big their will not be enough shared memory
If you don't want local memory remove replace the calls for buffer within the for loop by A and write directly to C instead of s1.
I am working on a piece of OpencL code for a specialized matrix function: for a Dx1 vector v, two DxD matrices A and B and a constant c, return 1xD vector r where r[i] = c * sum_over_j (v[j] * A[i][j] * B[i][j])
Below is what I have so far, but it runs freakishly slow. A version without summing that returns a DxD matrix is about ten times faster. It's called from PyOpenCL if that makes any difference.
Is anything done wrong? Could it be optimized?
#define D 1000
...
__kernel void element_mult(
__global float *result,
__global const float *vector,
__global const float *matrix,
__global const float *matrix2,
const float factor)
{
int y = get_global_id(1);
float sum = 0;
for(int k = 0; k < D; k++)
{
sum += vector[k] * matrix[(y*D) + k]
* matrix2[(y*D) + k ];
}
result[y] = sum * factor;
}
Cheers!
Optimization #1: make vector __local.
My first pass at this got a decent improvement in performance. I noticed that each vector[k] is read a total of D times, so I copied it to a __local. This is only possible because D is small enough to allow this. The kernel as you have it above suffers from a terrible ALU:fetch ratio of 0.08 on both the 5870 and the 6970 gpus. Even the slower gpus are still waiting on the memory access.
#define D 1000
__kernel void element_mult(
__global float *result,
__global const float *vector,
__global const float *matrix,
__global const float *matrix2,
const float factor)
{
int y = get_global_id(0);
float sum = 0;
__local float vectCopy[D];
int ls = get_local_size(0);
int lid = get_local_id(0);
for(int i=0;i<D;i+=ls){
vectCopy[i+lid] = vector[i+lid];
}
mem_fence(CLK_LOCAL_MEM_FENCE);
for(int k = 0; k < D; k++)
{
sum += vectCopy[k] * matrix[(y*D) + k] * matrix2[(y*D) + k ];
}
result[y] = sum * factor;
}
With this change, APP profiler is showing a new ALU:fetch ratio of 0.20 for the 5870 and 6970 gpus. Average times changed from 1513-->1034, and 1261-->861 on the same cards. The low end gpus are now bound by ALU instead of fetch. (greater than 4:1 ratio)
Opimization #2: calculate each result[y] using an entire work group.
You would have to do this id D were much larger (100k+). The idea is to get the best memory access pattern by using the work group to compute a single element of the result at a time. I defined ls (local size) to be 64 here, because it works on my hardware, as well as most vendors'. The workgroup size you use from the host-side will have to be 64 unless you change that definition. It needs to be defined to create the sum[ls] storage as __local, and I don't like passing variable sized __local vars into my kernels.
results: 5870 ALU:fetch=0.59:1, avg=708. 6970 ALU:fetch=0.72, avg=590. According to APP profiler, this is about twice as fast as your original listing.
#define D 1000
#define ls 64
__kernel void element_mult(
__global float *result,
__global const float *vector,
__global const float *matrix,
__global const float *matrix2,
const float factor)
{
__local float vectCopy[D];
int lid = get_local_id(0);
for(int i=0;i<D;i+=ls){
vectCopy[i+lid] = vector[i+lid];
}
mem_fence(CLK_LOCAL_MEM_FENCE);
int ng = get_num_groups(0);
int gid = get_group_id(0);
int y, k;
__local float sum[ls];
for(y = gid; y < D; y+=ng){
for(k = lid; k < D; k+=ls)
{
sum[lid] += vectCopy[k] * matrix[(y*D) + k] * matrix2[(y*D) + k ];
}
if(lid==0){
result[y] = sum[0];
for(k=1;k<ls;k++){
result[y] += sum[k];
}
result[y] *= factor;
}
mem_fence(CLK_LOCAL_MEM_FENCE);
}
}
EDIT: APP profiler = AMD APP KernelAnalyzer