I need to find average for thousands (20,000+) images represented by unsigned short arrays. Could you please check me, it looks for me that this code is not optimal:
my kernel:
__global__ void VecAdd(unsigned short *A, float *B, unsigned int Size, float div){
register float divider = div;
register int idx = threadIdx.x + blockIdx.x * blockDim.x;
if ( idx < Size) {
B[ idx ] = (float) A[idx] / divider + B[idx];
}
//__syncthreads();
}
kernel wrapper:
void kernel_wrapper(unsigned short* pixels1, float* pixels2, unsigned int length, float div)
{
unsigned short* deviceData1;
float* deviceData2;
cudaMalloc((void**)&deviceData1, length * sizeof(unsigned short));
cudaMalloc((void**)&deviceData2, length * sizeof(float));
cudaMemcpy(deviceData1, pixels1, length * sizeof(unsigned short), cudaMemcpyHostToDevice);
cudaMemcpy(deviceData2, pixels2, length * sizeof(float), cudaMemcpyHostToDevice);
int threads = 1024; //my maximum
int blocks = (length / threads); // lenght=1280*960 -> blocks=1200
VecAdd<<< blocks, threads >>>( deviceData1, deviceData2, length, div );
cudaMemcpy(pixels2, deviceData2, length * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree( deviceData1 );
cudaFree( deviceData2 );
}`
and I do
float* avrg2f = (float*)malloc( width * height * sizeof(float));
memset( avrg2f, 0.0, sizeof(float) * width * height);
for (int k = 0; k < count; k++) {
imageObjectList.at( curObj )->getImage( k );
kernel_wrapper( avrg1, avrg2f, height * width, (float)count);
}
as result may averaged image will be in avrg2f;
Thank you.
If the images are all the same size, then your wrapper function need not do cudaMalloc and cudaFree operations on every call.
Pre-allocate that storage needed, and don't allocated and free it on every call to the wrapper.
In addition you may see something like a ~2x speedup (for the cudaMemcpy operations) if you use pinned allocations (cudaHostAlloc) on the host side for your image storage.
Finally, for the duration of your loop, there's no need to copy the results back to the host. Do this after you're done computing the average. This will save 2 out of the 3 cudaMemcpy operations you are doing in the wrapper.
While we're at it, in my opinion using memset to initialize a float array is questionable. It works for a zero value, but essentially no other. Furthermore, I would expect passing 0.0 as the second parameter to memset to at least throw a compiler warning.
The following code shows the above optimizations, and demonstrates about an 8x speedup over your code in my test case:
#include <stdio.h>
#include <sys/time.h>
#include <time.h>
__global__ void VecAdd(unsigned short *A, float *B, unsigned int Size, float div){
register float divider = div;
register int idx = threadIdx.x + blockIdx.x * blockDim.x;
if ( idx < Size) {
B[ idx ] = (float) A[idx] / divider + B[idx];
}
//__syncthreads();
}
__global__ void VecAdd2(unsigned short *A, float *B, unsigned int Size, float mult){
register int idx = threadIdx.x + blockIdx.x * blockDim.x;
if ( idx < Size) {
B[ idx ] = (float) A[idx] * mult + B[idx];
}
}
void kernel_wrapper(unsigned short* pixels1, float* pixels2, unsigned int length, float div)
{
unsigned short* deviceData1;
float* deviceData2;
cudaMalloc((void**)&deviceData1, length * sizeof(unsigned short));
cudaMalloc((void**)&deviceData2, length * sizeof(float));
cudaMemcpy(deviceData1, pixels1, length * sizeof(unsigned short), cudaMemcpyHostToDevice);
cudaMemcpy(deviceData2, pixels2, length * sizeof(float), cudaMemcpyHostToDevice);
int threads = 1024; //my maximum
int blocks = (length / threads); // lenght=1280*960 -> blocks=1200
VecAdd<<< blocks, threads >>>( deviceData1, deviceData2, length, div );
cudaMemcpy(pixels2, deviceData2, length * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree( deviceData1 );
cudaFree( deviceData2 );
}
void kernel_wrapper2(unsigned short* h_pixels1, unsigned short* d_pixels1, float* d_pixels2, unsigned int length, float my_mult)
{
cudaMemcpy(d_pixels1, h_pixels1, length * sizeof(unsigned short), cudaMemcpyHostToDevice);
int threads = 1024; //my maximum
int blocks = (length / threads); // lenght=1280*960 -> blocks=1200
VecAdd2<<< blocks, threads >>>( d_pixels1, d_pixels2, length, my_mult );
}
int main(){
const int count = 2000;
const int width = 1280;
const int height = 960;
timeval t1, t2;
unsigned long et;
unsigned short *h1_image;
h1_image = (unsigned short *)malloc(height*width*sizeof(unsigned short));
float* avrg2f = (float*)malloc( width * height * sizeof(float));
for (int i = 0; i<height*width; i++){
h1_image[i] = (i%256);
avrg2f[i] = 0.0f;
}
gettimeofday(&t1,NULL);
for (int k = 0; k < count; k++) {
kernel_wrapper( h1_image, avrg2f, height * width, (float)count);
}
gettimeofday(&t2,NULL);
et = ((t2.tv_sec * 1000000)+t2.tv_usec) - ((t1.tv_sec * 1000000) + t1.tv_usec);
printf("time 1 = %ld us\n", et);
unsigned short *h2_image;
float* avrg3f = (float*)malloc( width * height * sizeof(float));
cudaHostAlloc((void **)&h2_image, height*width*sizeof(unsigned short), cudaHostAllocDefault);
for (int i = 0; i<height*width; i++){
h2_image[i] = (i%256);
avrg3f[i] = 0.0f;
}
gettimeofday(&t1,NULL);
unsigned short *d_image;
float *d_result;
cudaMalloc((void **)&d_image, height*width*sizeof(unsigned short));
cudaMalloc((void **)&d_result, height*width*sizeof(float));
cudaMemcpy(d_result, avrg3f, height*width*sizeof(float), cudaMemcpyHostToDevice);
for (int k = 0; k < count; k++) {
kernel_wrapper2( h2_image, d_image, d_result, height * width, (float)(1/(float)count));
}
cudaMemcpy(avrg3f, d_result, height*width*sizeof(float), cudaMemcpyDeviceToHost);
gettimeofday(&t2,NULL);
et = ((t2.tv_sec * 1000000)+t2.tv_usec) - ((t1.tv_sec * 1000000) + t1.tv_usec);
printf("time 2 = %ld us\n", et);
for (int i = 0; i < (height*width); i++)
if (fabs(avrg2f[i] - avrg3f[i]) > 0.0001) {printf("mismatch at %d, 1 = %f, 2 = %f\n", i, avrg2f[i], avrg3f[i]); return 1;}
return 0;
}
Related
This question already has answers here:
Allocate 2D array with cudaMallocPitch and copying with cudaMemcpy2D
(3 answers)
Closed 5 years ago.
I'm trying to allocate matrix on device, fill it with some number in kernel and then copy it back to host. Problem is that on host only one row seems to be filled.
I got something like this:
9 9 9 9
-1 -1 -1 -1
-1 -1 -1 -1
-1 -1 -1 -1
Here is my code:
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
void check(cudaError x) {
fprintf(stderr, "%s\n", cudaGetErrorString(x));
}
void showMatrix2(int* v1, int width, int height) {
printf("---------------------\n");
for (int i = 0; i < width; i++) {
for (int j = 0; j < height; j++) {
printf("%d ", v1[i * width + j]);
}
printf("\n");
}
}
__global__ void kernel(int* tab,int width, int height, int pitch) {
int row = threadIdx.x + blockIdx.x * blockDim.x;
int col = threadIdx.y + blockIdx.y * blockDim.y;
if (row < width && col < height) {
tab[col * pitch + row] = 9;
}
}
int main()
{
int width = 4;
int height = 4;
int* d_tab;
int* h_tab;
int realSize = width * height* sizeof(int);
size_t pitch;
check( cudaMallocPitch(&d_tab, &pitch, width * sizeof(int), height) );
h_tab = (int*)malloc(realSize);
check( cudaMemset(d_tab, 0, realSize) );
dim3 grid(4, 4);
dim3 block(4, 4);
kernel <<<grid, block>>>(d_tab, width, height, pitch);
check( cudaMemcpy2D(h_tab, width*sizeof(int), d_tab, pitch, width*sizeof(int), height, cudaMemcpyDeviceToHost) );
showMatrix2(h_tab, width, height);
printf("\nPitch size: %d \n", pitch);
getchar();
return 0;
}
Any time you are having trouble with a CUDA code, in addition to doing error checking, run your code with cuda-memcheck. If you had done so, you would have gotten at least a hint as to what is going on, and then you could use techniques like this to continue your own debug. Even if you can't figure it out, the cuda-memcheck output will be useful to others trying to help you.
You have invalid writes in your kernel. There are multiple errors here. To properly access a pitched allocation in kernel code, I strongly recommend studying the example given in the documentation for cudaMallocPitch. In a nutshell, this kind of index generation is just broken:
tab[col * pitch + row]
Firstly, pitch returned by cudaMallocPitch is a width in bytes. You cannot use it as an adjustment to an index for quantities like int or float (study the documentation). Secondly, the pitch value should ultimately multiply a row index, not a column index.
not related to your problem, but your final printf statement has an incorrect format specifier if you are on a 64-bit platform, it should be %ld (or better, %lu).
Here is a code that has the indexing issue fixed, it seems to work correctly for me:
$ cat t109.cu
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
void check(cudaError x) {
fprintf(stderr, "%s\n", cudaGetErrorString(x));
}
void showMatrix2(int* v1, int width, int height) {
printf("---------------------\n");
for (int i = 0; i < width; i++) {
for (int j = 0; j < height; j++) {
printf("%d ", v1[i * width + j]);
}
printf("\n");
}
}
__global__ void kernel(int* tab,int width, int height, int pitch) {
int row = threadIdx.x + blockIdx.x * blockDim.x;
int col = threadIdx.y + blockIdx.y * blockDim.y;
if (row < width && col < height) {
*( ((int *)(((char *)tab) + (row * pitch))) + col) = 9;
}
}
int main()
{
int width = 4;
int height = 4;
int* d_tab;
int* h_tab;
int realSize = width * height* sizeof(int);
size_t pitch;
check( cudaMallocPitch(&d_tab, &pitch, width * sizeof(int), height) );
h_tab = (int*)malloc(realSize);
check( cudaMemset(d_tab, 0, realSize) );
dim3 grid(4, 4);
dim3 block(4, 4);
kernel <<<grid, block>>>(d_tab, width, height, pitch);
check( cudaMemcpy2D(h_tab, width*sizeof(int), d_tab, pitch, width*sizeof(int), height, cudaMemcpyDeviceToHost) );
showMatrix2(h_tab, width, height);
printf("\nPitch size: %ld \n", pitch);
return 0;
}
$ nvcc -arch=sm_61 -o t109 t109.cu
$ cuda-memcheck ./t109
========= CUDA-MEMCHECK
no error
no error
no error
---------------------
9 9 9 9
9 9 9 9
9 9 9 9
9 9 9 9
Pitch size: 512
========= ERROR SUMMARY: 0 errors
$
I want to do a sub-matrix multiplication. Say I have a function:
void MatMul(cl_mem A, cl_mem B, cl_mem C, int M, int K, int N)
where A is M*K, B is K*N, C is M*N, and A, B, C are all row major 1 dimension array passed by host memory float *h_A, *h_B, *hC with the following function:
void ocl_push_array(cl_mem d_x, float *h_x, int n){
size_t data_size = sizeof(float)*n;
err = clEnqueueWriteBuffer(queue, d_x, CL_TRUE, 0, data_size, h_x, 0, NULL, NULL);
}
I want to ask:
if I want to do sub-matrix multiplication, say slicing A by row:
// cl_mem A, B, C;
for(int x=0; x<M; x+=16)
{
cl_mem A_sub = (cl_mem)((float *)A+x*K);
cl_mem C_sub = (cl_mem)((float *)C+x*N);
if((M-x+1)>=16)
MatMul(A_sub, B, C_sub, 16, K, N);
else
MatMul(A_sub, B, C_sub, M-x+1, K, N);
}
Is it the right code to do this operation? I got a run time error says: "CL_INVALID_MEM_OBJECT" (-38) when it assigns arguments to OpenCL kernel (clSetKernelArg).
The reason I want to do this operation is that I found the matrix multiplication got wrong answers when my input matrix A and B become big.
My OpenCL kernel is:
#define BLOCK_SIZE 16
#define AS(i, j) As[j + i * BLOCK_SIZE]
#define BS(i, j) Bs[j + i * BLOCK_SIZE]
__kernel void
matrixMul(__global float* A, __global float* B, __global float* C,
__local float* As, __local float* Bs, int uiWA, int uiWB)
{
int bx = get_group_id(0);
int by = get_group_id(1);
int tx = get_local_id(0);
int ty = get_local_id(1);
int aBegin = uiWA * BLOCK_SIZE * by;
int aEnd = aBegin + uiWA - 1;
int aStep = BLOCK_SIZE;
int bBegin = BLOCK_SIZE * bx;
int bStep = BLOCK_SIZE * uiWB;
float Csub = 0.0f;
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) {
AS(ty, tx) = A[a + uiWA * ty + tx];
BS(ty, tx) = B[b + uiWB * ty + tx];
barrier(CLK_LOCAL_MEM_FENCE);
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += AS(ty, k) * BS(k, tx);
barrier(CLK_LOCAL_MEM_FENCE);
}
C[get_global_id(1) * get_global_size(0) + get_global_id(0)] = Csub;
}
and the size is:
#define BLOCK_SIZE 16
size_t localWorkSize[] = {BLOCK_SIZE, BLOCK_SIZE};
size_t globalWorkSize[] = {shrRoundUp(BLOCK_SIZE, N), shrRoundUp(BLOCK_SIZE, M)};
size_t shrRoundUp(int group_size, int global_size)
{
int r = global_size % group_size;
if(r == 0)
{
return global_size;
} else
{
return global_size + group_size - r;
}
}
the code is adopted from Nvidia OpenCL matrix multiplication sample. My GPU is: Intel(R) HD Graphics 4600.
Thanks!
I don't think you can do this:
cl_mem A_sub = (cl_mem)((float *)A+x*K);
Because cl_mem is an object in OpenCL, which is actually a complex data structure instead of just a data pointer. It maintains information such as data pointer to the actually memory, reference to the object, memory properties and so on. Different run-times may even have different implementations of cl_mem object. That's why you got the CL_INVALID_MEM_OBJECT error message.
What you can do to get wanted data for the sub matrix is one of the followings:
define two new cl_mem objects, and use a separate kernel to do
the copy work.
use clEnqueueCopyBuffer function to copy the data at the host
code domain.
use CL_MEM_ALLOC_HOST_PTR to memory buffer, and then use
clEnqueueMapBuffer map the GPU memory to host memory pointer, and
then modify the memory content by using the mapped host memory
pointer, when you finish, unmap the pointer to return the GPU memory
to the device memory domain.
I implemented a reduce kernel in OpenCL to sum up all entries in the input vector of size N. For a easier testing I initialize the input vector with 1.0f. So the result should be N. But it is not!
Here is my reduce-kernel:
kernel void reduce(global float* input, global float* output, const unsigned int N, local float* cache)
{
const uint local_id = get_local_id(0);
const uint global_id = get_global_id(0);
const uint local_size = get_local_size(0);
cache[local_id] = (global_id < N) ? input[global_id] : 0.0f;
barrier(CLK_LOCAL_MEM_FENCE);
for (unsigned int s = local_size >> 1; s > 0; s >>= 1) {
if (local_id < s) {
cache[local_id] += cache[local_id + s];
}
barrier(CLK_LOCAL_MEM_FENCE);
}
if (local_id == 0) output[local_size] = cache[0];
}
And here is the setting for OpenCL:
const uint N = 8196;
cl_float a[N];
cl_float b[N];
for (uint i=0; i<N; i++) {
a[i] = 1.0f;
b[i] = 0.0f;
}
cl::Buffer inputBuffer(context, CL_MEM_WRITE_ONLY, sizeof(cl_float)*N);
cl::Buffer resultBuffer(context, CL_MEM_READ_ONLY, sizeof(cl_float)*N);
queue.enqueueWriteBuffer(inputBuffer, CL_TRUE, 0, sizeof(cl_float)*N, a);
queue.enqueueWriteBuffer(resultBuffer, CL_TRUE, 0, sizeof(cl_float)*N, b);
cl::Kernel addVectorKernel = cl::Kernel(program, "reduce");
size_t localSize = addVectorKernel.getWorkGroupInfo<CL_KERNEL_WORK_GROUP_SIZE>(device); // e.g. => 512
size_t globalSize = roundUp(localSize, N); // rounds up to a multiple of localSize
addVectorKernel.setArg(0, inputBuffer);
addVectorKernel.setArg(1, resultBuffer);
addVectorKernel.setArg(2, N);
addVectorKernel.setArg(3, (sizeof(cl_float) * localSize), NULL);
queue.enqueueNDRangeKernel(
addVectorKernel,
cl::NullRange,
cl::NDRange(globalSize),
cl::NDRange(localSize)
);
queue.finish(); // wait for ending
queue.enqueueReadBuffer(resultBuffer, CL_TRUE, 0, sizeof(cl_float)*N, b); // e.g. => 1024
The result depends on the workgroup size. What am I doing wrong? Is it the kernel itself or is it the settings for OpenCL?
You should be using the group's id when writing the sum back to global memory.
if (local_id == 0) output[local_size] = cache[0];
That line will write to output[512] repeatedly. You need each work group to write to a dedicated location in the output.
kernel void reduce(global float* input, global float* output, const unsigned int N, local float* cache)
{
const uint local_id = get_local_id(0);
const uint global_id = get_global_id(0);
const uint group_id = get_group_id(0);
const uint local_size = get_local_size(0);
cache[local_id] = (global_id < N) ? input[global_id] : 0.0f;
barrier(CLK_LOCAL_MEM_FENCE);
for (unsigned int s = local_size >> 1; s > 0; s >>= 1) {
if (local_id < s) {
cache[local_id] += cache[local_id + s];
}
barrier(CLK_LOCAL_MEM_FENCE);
}
if (local_id == 0) output[group_id] = cache[0];
}
Then you need to sum the values from the output on the host. Note that 'b' in the host code does not need to hold N elements. Only one element for each work group will be used.
//replace (globalSize/localSize) with the pre-calculated/known number of work groups
for (i=1; i<(globalSize/localSize); i++) {
b[0] += b[i];
}
Now b[0] is your grand total.
In the reduction for loop, you need this:
for(unsigned int s = localSize >> 1; s > 0; s >>= 1)
You are shifting one more bit than you should when initializing s.
After that's fixed, let's look at what your kernel is doing. The host code executes it with globalSize of 8192 and localSize of 512, which results in 16 work groups. Inside the kernel you first sum the data from the two consecutive memory locations at index 2*global_id. For work group with id 15, work item 0, that will be at index 15*512*2 = 15,360 and 15,361, which is outside the boundaries of your input array. I am surprised you don't get a crash. At the same time, this explains why you have double the values that you expect.
To fix it, you can do this:
cache[localID] = input[globalID];
Or specify a global size that's half of the number of the current one.
In OpenCL, if I want to add two N-dimension vectors, the global work group size (globalSize) should satisfy globalSize = ceil(N/localSize) * localSize, where localSize is the local work group size. Is this correct? If N = 1000, and localSize = 128, globalSize should be 1024? Can we always set globalSize some multiple of localSize and larger than needed?
I tried many times and it worked well for 1-dimension problems.
However, when it comes to 2d problems, for example, multiply two matrices of dimension m*n and n*p, the result matrix is of order m*p, things get more complicated.
The max work group size on my device is 128, so I set localSize [2] = {16,8} and
globalSize [2] = {ceil(m/16)*16,ceil(p/8)*8}.
It is similar to the 1-dimension case but the result is wrong!
If I set localSize [2] = {1,128} and change the globalSize accordingly, I can get the correct result. So where is the problem? Can anyone tell me why?
In addition, I find out the indices where the matrix element is wrong.
It seems that the result is wrong at (i,j) where i*p + j = n * some constant (n = 1,2,3...)
Why?
Here is my kernel function:
kernel void mmult(const int Mdim, const int Ndim, const int Pdim,
global float *A, global float *B, global float *C)
{
int i = get_global_id(1);
int j = get_global_id(0);
if(i < 0 || j < 0 || i > Mdim || j > Pdim) return;
else
{
float tmp = 0;
for(int k = 0; k < Ndim; k++)
tmp += A[i*Ndim+k] * B[k*Pdim+j];
C[i*Pdim + j] = tmp;
}
}
And then it is the host program:
#define __NO_STD_VECTOR // Use cl::vector instead of STL version
#define __CL_ENABLE_EXCEPTIONS
#include <CL/cl.hpp>
#include <utility>
#include <iostream>
#include <fstream>
#include <string>
#include <cmath>
using namespace cl;
int main()
{
// Create the two input matrices
int m = 1000;
int n = 1000;
int p = 1000;
float *A = new float[m*n];
float *B = new float[n*p];
for(int i = 0; i < m*n; i++)
{
A[i] = i;
}
for(int i = 0; i < n*p; i++)
{
B[i] = i;
}
try
{
// Get available platforms
vector<Platform> platforms;
Platform::get(&platforms);
// Select the default platform and create a context using this platform and the GPU
cl_context_properties cps[3] =
{
CL_CONTEXT_PLATFORM,
(cl_context_properties)(platforms[0])(),
0
};
Context context( CL_DEVICE_TYPE_GPU, cps);
// Get a list of devices on this platform
vector<Device> devices = context.getInfo<CL_CONTEXT_DEVICES>();
// Create a command queue and use the first device
CommandQueue queue = CommandQueue(context, devices[0]);
// Read source file
std::ifstream sourceFile("mmul.cl");
std::string sourceCode(
std::istreambuf_iterator<char>(sourceFile),
(std::istreambuf_iterator<char>()));
Program::Sources source(1, std::make_pair(sourceCode.c_str(), sourceCode.length()+1));
// Make program of the source code in the context
Program program = Program(context, source);
// Build program for these specific devices
program.build(devices);
// Make kernel
Kernel kernel(program, "mmult");
// Create memory buffers
Buffer bufferA = Buffer(context, CL_MEM_READ_ONLY, m*n * sizeof(float));
Buffer bufferB = Buffer(context, CL_MEM_READ_ONLY, p*n * sizeof(float));
Buffer bufferC = Buffer(context, CL_MEM_WRITE_ONLY, m*p * sizeof(float));
// Copy lists A and B to the memory buffers
queue.enqueueWriteBuffer(bufferA, CL_TRUE, 0, m * n * sizeof(float), A);
queue.enqueueWriteBuffer(bufferB, CL_TRUE, 0, p * n * sizeof(float), B);
// Set arguments to kernel
kernel.setArg(0, m);
kernel.setArg(1, n);
kernel.setArg(2, p);
kernel.setArg(3, bufferA);
kernel.setArg(4, bufferB);
kernel.setArg(5, bufferC);
// Run the kernel on specific ND range
NDRange global((ceil((float)(p)/16))*16,(ceil((float)(m)/8))*8);
NDRange local(16,8);
queue.enqueueNDRangeKernel(kernel, NullRange, global, local);
// Read buffer C into a local list
float *C = new float[m*p];
queue.enqueueReadBuffer(bufferC, CL_TRUE, 0, m*p * sizeof(float), C);
// check the correctness of the result
float *c = new float[m*p];
for(int i = 0; i < m; i++)
for(int j = 0; j < p; j++)
{
float z = 0.0;
for(int k = 0; k < n; k++)
{
z += A[i*n+k] * B[k*p+j];
}
c[i*p+j] = z;
}
for(int i = 0; i < m*p; i++)
{
if(fabs(c[i]-C[i])>0.001)
std::cout<<i<<" "<<c[i]<<" "<<C[i]<<std::endl;
}
delete []A;
delete []B;
delete []C;
}
catch(Error error)
{
std::cout << error.what() << "(" << error.err() << ")" << std::endl;
}
return 0;
}
Your bounds checking code inside your OpenCL kernel is incorrect. Instead of this:
if(i < 0 || j < 0 || i > Mdim || j > Pdim) return;
You should have this:
if(i < 0 || j < 0 || i >= Mdim || j >= Pdim) return;
Let's assume, that you have float matrix of size 1000x1000:
const int size = 1000;
// Whatever
float* myMatrix = (float*)calloc(size * size, sizeof(*myMatrix));
Determine size of Local Group first:
size_t localSize[] = {16, 8};
Then determine, how many Local Groups do you need:
size_t numLocalGroups[] = {ceil(size/localSize[0]), ceil(size/localSize[1])};
Finally, determine NDRange size:
size_t globalSize[] = {localSize[0] * numLocalGroups[0], localSize[1] * numLocalGroups[1]};
Don't forget to handle out-of-bounds access in right-most Local Groups.
I am having troubles with this piece of CUDA code I have written. This is supposed to be the CUDA implementation of the Dijkstra's algorithm. The code is as follows:
__global__ void cuda_dijkstra_kernel_1(float* Va, int* Ea, int* Sa, float* Ca, float* Ua, char* Ma, unsigned int* lock){
int tid = blockIdx.x;
if(Ma[tid]=='1'){
Ma[tid] = '0';
int ind_Ea = Sa[tid * 2];
int num_edges = Sa[(tid * 2) + 1];
int v;
float wt = 0;
unsigned int leaveloop;
leaveloop = 0u;
while(leaveloop==0u){
if(atomicExch(lock, 1u) == 0u){
for(v = 0; v < num_edges; v++){
wt = (Va[tid * 3] - Va[Ea[ind_Ea + v] * 3]) * (Va[tid * 3] - Va[Ea[ind_Ea + v] * 3]) +
(Va[(tid * 3) + 1] - Va[(Ea[ind_Ea + v] * 3) + 1]) * (Va[(tid * 3) + 1] - Va[(Ea[ind_Ea + v] * 3) + 1]) +
(Va[(tid * 3) + 2] - Va[(Ea[ind_Ea + v] * 3) + 2]) * (Va[(tid * 3) + 2] - Va[(Ea[ind_Ea + v] * 3) + 2]) ;
wt = sqrt(wt);
if(Ca[Ea[ind_Ea + v]] > (Ca[tid] + wt)){
Ca[Ea[ind_Ea + v]] = Ca[tid] + wt;
Ma[Ea[ind_Ea + v]] = '1';
}
__threadfence();
leaveloop = 1u;
atomicExch(lock, 0u);
}
}
}
}
}
The problem is in the relaxation phase of the Dijkstra's algorithm. I have implemented such a phase as a critical section. If there is a vertex (lets say a) which is a neighbor of more than one vertex (i.e., connecting to other vertices with edges), then all of the threads for those vertices will try to write to the location of vertex a in the Cost Array Ca. Now my goal is to have the smaller value written in that location. To do that, I am trying to serialize the process and applying __threadfence() as well so that value written by one thread is visible to others and then eventually the smaller value is retained in the location of vertex a. But the problem is, that this logic is not working. The location of vertex a does not get the smallest value of all the threads trying to write to that location and I don't understand why. Any help will be highly appreciated.
There is a "classical" (at least, mostly referenced) implementation of Dijkstra's Single-Source Shortest Path (SSSP) algorithm on the GPU contained in the paper
Accelerating large graph algorithms on the GPU using CUDA by Parwan Harish and P.J. Narayanan
However, the implementation in that paper has been recognized to be bugged, see
CUDA Solutions for the SSSP Problem by Pedro J. MartÃn, Roberto Torres, and Antonio Gavilanes
I'm reporting below the implementation suggested in the first paper fixed according to the remark of the second. The code also contains a C++ version.
#include <sstream>
#include <vector>
#include <iostream>
#include <stdio.h>
#include <float.h>
#include "Utilities.cuh"
#define NUM_ASYNCHRONOUS_ITERATIONS 20 // Number of async loop iterations before attempting to read results back
#define BLOCK_SIZE 16
/***********************/
/* GRAPHDATA STRUCTURE */
/***********************/
// --- The graph data structure is an adjacency list.
typedef struct {
// --- Contains the integer offset to point to the edge list for each vertex
int *vertexArray;
// --- Overall number of vertices
int numVertices;
// --- Contains the "destination" vertices each edge is attached to
int *edgeArray;
// --- Overall number of edges
int numEdges;
// --- Contains the weight of each edge
float *weightArray;
} GraphData;
/**********************************/
/* GENERATE RANDOM GRAPH FUNCTION */
/**********************************/
void generateRandomGraph(GraphData *graph, int numVertices, int neighborsPerVertex) {
graph -> numVertices = numVertices;
graph -> vertexArray = (int *)malloc(graph -> numVertices * sizeof(int));
graph -> numEdges = numVertices * neighborsPerVertex;
graph -> edgeArray = (int *)malloc(graph -> numEdges * sizeof(int));
graph -> weightArray = (float *)malloc(graph -> numEdges * sizeof(float));
for (int i = 0; i < graph -> numVertices; i++) graph -> vertexArray[i] = i * neighborsPerVertex;
int *tempArray = (int *)malloc(neighborsPerVertex * sizeof(int));
for (int k = 0; k < numVertices; k++) {
for (int l = 0; l < neighborsPerVertex; l++) tempArray[l] = INT_MAX;
for (int l = 0; l < neighborsPerVertex; l++) {
bool goOn = false;
int temp;
while (goOn == false) {
goOn = true;
temp = (rand() % graph->numVertices);
for (int t = 0; t < neighborsPerVertex; t++)
if (temp == tempArray[t]) goOn = false;
if (temp == k) goOn = false;
if (goOn == true) tempArray[l] = temp;
}
graph -> edgeArray [k * neighborsPerVertex + l] = temp;
graph -> weightArray[k * neighborsPerVertex + l] = (float)(rand() % 1000) / 1000.0f;
}
}
}
/************************/
/* minDistance FUNCTION */
/************************/
// --- Finds the vertex with minimum distance value, from the set of vertices not yet included in shortest path tree
int minDistance(float *shortestDistances, bool *finalizedVertices, const int sourceVertex, const int N) {
// --- Initialize minimum value
int minIndex = sourceVertex;
float min = FLT_MAX;
for (int v = 0; v < N; v++)
if (finalizedVertices[v] == false && shortestDistances[v] <= min) min = shortestDistances[v], minIndex = v;
return minIndex;
}
/************************/
/* dijkstraCPU FUNCTION */
/************************/
void dijkstraCPU(float *graph, float *h_shortestDistances, int sourceVertex, const int N) {
// --- h_finalizedVertices[i] is true if vertex i is included in the shortest path tree
// or the shortest distance from the source node to i is finalized
bool *h_finalizedVertices = (bool *)malloc(N * sizeof(bool));
// --- Initialize h_shortestDistancesances as infinite and h_shortestDistances as false
for (int i = 0; i < N; i++) h_shortestDistances[i] = FLT_MAX, h_finalizedVertices[i] = false;
// --- h_shortestDistancesance of the source vertex from itself is always 0
h_shortestDistances[sourceVertex] = 0.f;
// --- Dijkstra iterations
for (int iterCount = 0; iterCount < N - 1; iterCount++) {
// --- Selecting the minimum distance vertex from the set of vertices not yet
// processed. currentVertex is always equal to sourceVertex in the first iteration.
int currentVertex = minDistance(h_shortestDistances, h_finalizedVertices, sourceVertex, N);
// --- Mark the current vertex as processed
h_finalizedVertices[currentVertex] = true;
// --- Relaxation loop
for (int v = 0; v < N; v++) {
// --- Update dist[v] only if it is not in h_finalizedVertices, there is an edge
// from u to v, and the cost of the path from the source vertex to v through
// currentVertex is smaller than the current value of h_shortestDistances[v]
if (!h_finalizedVertices[v] &&
graph[currentVertex * N + v] &&
h_shortestDistances[currentVertex] != FLT_MAX &&
h_shortestDistances[currentVertex] + graph[currentVertex * N + v] < h_shortestDistances[v])
h_shortestDistances[v] = h_shortestDistances[currentVertex] + graph[currentVertex * N + v];
}
}
}
/***************************/
/* MASKARRAYEMPTY FUNCTION */
/***************************/
// --- Check whether all the vertices have been finalized. This tells the algorithm whether it needs to continue running or not.
bool allFinalizedVertices(bool *finalizedVertices, int numVertices) {
for (int i = 0; i < numVertices; i++) if (finalizedVertices[i] == true) { return false; }
return true;
}
/*************************/
/* ARRAY INITIALIZATIONS */
/*************************/
__global__ void initializeArrays(bool * __restrict__ d_finalizedVertices, float* __restrict__ d_shortestDistances, float* __restrict__ d_updatingShortestDistances,
const int sourceVertex, const int numVertices) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numVertices) {
if (sourceVertex == tid) {
d_finalizedVertices[tid] = true;
d_shortestDistances[tid] = 0.f;
d_updatingShortestDistances[tid] = 0.f; }
else {
d_finalizedVertices[tid] = false;
d_shortestDistances[tid] = FLT_MAX;
d_updatingShortestDistances[tid] = FLT_MAX;
}
}
}
/**************************/
/* DIJKSTRA GPU KERNEL #1 */
/**************************/
__global__ void Kernel1(const int * __restrict__ vertexArray, const int* __restrict__ edgeArray,
const float * __restrict__ weightArray, bool * __restrict__ finalizedVertices, float* __restrict__ shortestDistances,
float * __restrict__ updatingShortestDistances, const int numVertices, const int numEdges) {
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid < numVertices) {
if (finalizedVertices[tid] == true) {
finalizedVertices[tid] = false;
int edgeStart = vertexArray[tid], edgeEnd;
if (tid + 1 < (numVertices)) edgeEnd = vertexArray[tid + 1];
else edgeEnd = numEdges;
for (int edge = edgeStart; edge < edgeEnd; edge++) {
int nid = edgeArray[edge];
atomicMin(&updatingShortestDistances[nid], shortestDistances[tid] + weightArray[edge]);
}
}
}
}
/**************************/
/* DIJKSTRA GPU KERNEL #1 */
/**************************/
__global__ void Kernel2(const int * __restrict__ vertexArray, const int * __restrict__ edgeArray, const float* __restrict__ weightArray,
bool * __restrict__ finalizedVertices, float* __restrict__ shortestDistances, float* __restrict__ updatingShortestDistances,
const int numVertices) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numVertices) {
if (shortestDistances[tid] > updatingShortestDistances[tid]) {
shortestDistances[tid] = updatingShortestDistances[tid];
finalizedVertices[tid] = true; }
updatingShortestDistances[tid] = shortestDistances[tid];
}
}
/************************/
/* dijkstraGPU FUNCTION */
/************************/
void dijkstraGPU(GraphData *graph, const int sourceVertex, float * __restrict__ h_shortestDistances) {
// --- Create device-side adjacency-list, namely, vertex array Va, edge array Ea and weight array Wa from G(V,E,W)
int *d_vertexArray; gpuErrchk(cudaMalloc(&d_vertexArray, sizeof(int) * graph -> numVertices));
int *d_edgeArray; gpuErrchk(cudaMalloc(&d_edgeArray, sizeof(int) * graph -> numEdges));
float *d_weightArray; gpuErrchk(cudaMalloc(&d_weightArray, sizeof(float) * graph -> numEdges));
// --- Copy adjacency-list to the device
gpuErrchk(cudaMemcpy(d_vertexArray, graph -> vertexArray, sizeof(int) * graph -> numVertices, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_edgeArray, graph -> edgeArray, sizeof(int) * graph -> numEdges, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_weightArray, graph -> weightArray, sizeof(float) * graph -> numEdges, cudaMemcpyHostToDevice));
// --- Create mask array Ma, cost array Ca and updating cost array Ua of size V
bool *d_finalizedVertices; gpuErrchk(cudaMalloc(&d_finalizedVertices, sizeof(bool) * graph->numVertices));
float *d_shortestDistances; gpuErrchk(cudaMalloc(&d_shortestDistances, sizeof(float) * graph->numVertices));
float *d_updatingShortestDistances; gpuErrchk(cudaMalloc(&d_updatingShortestDistances, sizeof(float) * graph->numVertices));
bool *h_finalizedVertices = (bool *)malloc(sizeof(bool) * graph->numVertices);
// --- Initialize mask Ma to false, cost array Ca and Updating cost array Ua to \u221e
initializeArrays <<<iDivUp(graph->numVertices, BLOCK_SIZE), BLOCK_SIZE >>>(d_finalizedVertices, d_shortestDistances,
d_updatingShortestDistances, sourceVertex, graph -> numVertices);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// --- Read mask array from device -> host
gpuErrchk(cudaMemcpy(h_finalizedVertices, d_finalizedVertices, sizeof(bool) * graph->numVertices, cudaMemcpyDeviceToHost));
while (!allFinalizedVertices(h_finalizedVertices, graph->numVertices)) {
// --- In order to improve performance, we run some number of iterations without reading the results. This might result
// in running more iterations than necessary at times, but it will in most cases be faster because we are doing less
// stalling of the GPU waiting for results.
for (int asyncIter = 0; asyncIter < NUM_ASYNCHRONOUS_ITERATIONS; asyncIter++) {
Kernel1 <<<iDivUp(graph->numVertices, BLOCK_SIZE), BLOCK_SIZE >>>(d_vertexArray, d_edgeArray, d_weightArray, d_finalizedVertices, d_shortestDistances,
d_updatingShortestDistances, graph->numVertices, graph->numEdges);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
Kernel2 <<<iDivUp(graph->numVertices, BLOCK_SIZE), BLOCK_SIZE >>>(d_vertexArray, d_edgeArray, d_weightArray, d_finalizedVertices, d_shortestDistances, d_updatingShortestDistances,
graph->numVertices);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
}
gpuErrchk(cudaMemcpy(h_finalizedVertices, d_finalizedVertices, sizeof(bool) * graph->numVertices, cudaMemcpyDeviceToHost));
}
// --- Copy the result to host
gpuErrchk(cudaMemcpy(h_shortestDistances, d_shortestDistances, sizeof(float) * graph->numVertices, cudaMemcpyDeviceToHost));
free(h_finalizedVertices);
gpuErrchk(cudaFree(d_vertexArray));
gpuErrchk(cudaFree(d_edgeArray));
gpuErrchk(cudaFree(d_weightArray));
gpuErrchk(cudaFree(d_finalizedVertices));
gpuErrchk(cudaFree(d_shortestDistances));
gpuErrchk(cudaFree(d_updatingShortestDistances));
}
/****************/
/* MAIN PROGRAM */
/****************/
int main() {
// --- Number of graph vertices
int numVertices = 8;
// --- Number of edges per graph vertex
int neighborsPerVertex = 6;
// --- Source vertex
int sourceVertex = 0;
// --- Allocate memory for arrays
GraphData graph;
generateRandomGraph(&graph, numVertices, neighborsPerVertex);
// --- From adjacency list to adjacency matrix.
// Initializing the adjacency matrix
float *weightMatrix = (float *)malloc(numVertices * numVertices * sizeof(float));
for (int k = 0; k < numVertices * numVertices; k++) weightMatrix[k] = FLT_MAX;
// --- Displaying the adjacency list and constructing the adjacency matrix
printf("Adjacency list\n");
for (int k = 0; k < numVertices; k++) weightMatrix[k * numVertices + k] = 0.f;
for (int k = 0; k < numVertices; k++)
for (int l = 0; l < neighborsPerVertex; l++) {
weightMatrix[k * numVertices + graph.edgeArray[graph.vertexArray[k] + l]] = graph.weightArray[graph.vertexArray[k] + l];
printf("Vertex nr. %i; Edge nr. %i; Weight = %f\n", k, graph.edgeArray[graph.vertexArray[k] + l],
graph.weightArray[graph.vertexArray[k] + l]);
}
for (int k = 0; k < numVertices * neighborsPerVertex; k++)
printf("%i %i %f\n", k, graph.edgeArray[k], graph.weightArray[k]);
// --- Displaying the adjacency matrix
printf("\nAdjacency matrix\n");
for (int k = 0; k < numVertices; k++) {
for (int l = 0; l < numVertices; l++)
if (weightMatrix[k * numVertices + l] < FLT_MAX)
printf("%1.3f\t", weightMatrix[k * numVertices + l]);
else
printf("--\t");
printf("\n");
}
// --- Running Dijkstra on the CPU
float *h_shortestDistancesCPU = (float *)malloc(numVertices * sizeof(float));
dijkstraCPU(weightMatrix, h_shortestDistancesCPU, sourceVertex, numVertices);
printf("\nCPU results\n");
for (int k = 0; k < numVertices; k++) printf("From vertex %i to vertex %i = %f\n", sourceVertex, k, h_shortestDistancesCPU[k]);
// --- Allocate space for the h_shortestDistancesGPU
float *h_shortestDistancesGPU = (float*)malloc(sizeof(float) * graph.numVertices);
dijkstraGPU(&graph, sourceVertex, h_shortestDistancesGPU);
printf("\nGPU results\n");
for (int k = 0; k < numVertices; k++) printf("From vertex %i to vertex %i = %f\n", sourceVertex, k, h_shortestDistancesGPU[k]);
free(h_shortestDistancesCPU);
free(h_shortestDistancesGPU);
return 0;
}