Two forcings in compiled code - R package deSolve - r

I am using package deSolve to run some models that include an external forcing. To gain speed, I produced compiled code following the vignette of the package (see 6.2 in https://cran.r-project.org/web/packages/deSolve/vignettes/compiledCode.pdf). My problem is that now I want to introduce two external forcings in the compiled code. Does anyone have a working example/knows how to do it?
#include <R.h>
static double parms[6];
static double forc[1];
/* A trick to keep up with the parameters and forcings */
#define b parms[0]
#define c parms[1]
#define d parms[2]
#define e parms[3]
#define f parms[4]
#define g parms[5]
#define import forc[0]
/* initializers: */
void odec(void (* odeparms)(int *, double *))
{
int N=6;
odeparms(&N, parms);
}
void forcc(void (* odeforcs)(int *, double *))
{
int N=1;
odeforcs(&N, forc);
}
/* derivative function */
void derivsc(int *neq, double *t, double *y, double *ydot,double *yout, int*ip)
{
if (ip[0] <2) error("nout should be at least 2");
ydot[0] = import - b*y[0]*y[1] + g*y[2];
ydot[1] = c*y[0]*y[1] - d*y[2]*y[1];
ydot[2] = e*y[1]*y[2] - f*y[2];
yout[0] = y[0] + y[1] + y[2];
yout[1] = import;
}
Thanks

Oh, it was so easy to solve. I just defined a second forcing and actualize the counters. I modified the previous example here:
static double parms[6];
static double forc[2];
/* A trick to keep up with the parameters and forcings */
#define b parms[0]
#define c parms[1]
#define d parms[2]
#define e parms[3]
#define f parms[4]
#define g parms[5]
#define import forc[0]
#define import2 forc[1]
/* initializers: */
void odec(void (* odeparms)(int *, double *))
{
int N=6;
odeparms(&N, parms);
}
void forcc(void (* odeforcs)(int *, double *))
{
int N=2;
odeforcs(&N, forc);
}
/* derivative function */
void derivsc(int *neq, double *t, double *y, double *ydot,double *yout, int*ip)
{
if (ip[0] <2) error("nout should be at least 2");
ydot[0] = import2 - b*y[0]*y[1] + g*y[2];
ydot[1] = c*y[0]*y[1] - d*y[2]*y[1];
ydot[2] = e*y[1]*y[2] - f*y[2];
yout[0] = y[0] + y[1] + y[2];
yout[1] = import;
}

Related

Explicit FDM with CUDA [closed]

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 1 year ago.
Improve this question
I am working to implement CUDA for the following code. The first version has been written serially and the second version is written with CUDA. I am sure about its results in serial version. I expect that the second version that I have added CUDA functionality also give me the same result, but it seems that kernel function does not do any thing and it gives me the initial value of u and v. I know due to lack of my experience, the bug may be obvious, but I cannot figure it out. Also, please do not recommend using flatten array, because it is harder for me to understand the indexing in code.
First version:
#include <fstream>
#include <iostream>
#include <math.h>
#include <vector>
#include <chrono>
#include <omp.h>
using namespace std;
const int M = 1024;
const int N = 1024;
const double A = 1;
const double B = 3;
const double Du = 5 * pow(10, -5);
const double Dv = 5 * pow(10, -6);
const int Max_Itr = 1000;
const double h = 1.0 / static_cast<double>(M - 1);
const double delta_t = 0.0025;
const double s1 = (Du * delta_t) / pow(h, 2);
const double s2 = (Dv * delta_t) / pow(h, 2);
int main() {
double** u=new double* [M];
double** v=new double* [M];
for (int i=0; i<M; i++){
u[i]=new double [N];
v[i]=new double [N];
}
for (int j = 0; j < M; j++) {
for (int i = 0; i < N;i++) {
u[i][j]=0.02;
v[i][j]=0.02;
}
}
for (int k = 1; k < Max_Itr; k++) {
for (int i = 1; i < N - 1; i++) {
for (int j = 1; j < M - 1; j++) {
u[i][j] = ((1 - (4 * s1)) * u[i][j]) + (s1 * (u[i + 1][j] + u[i - 1][j] + u[i][j + 1] + u[i][j - 1])) +
(A * delta_t) + (delta_t * pow(u[i][j], 2) * v[i][j]) - (delta_t * (B + 1) * u[i][j]);
v[i][j] = ((1 - (4 * s2)) * v[i][j]) + (s2 * (v[i + 1][j] + v[i - 1][j] + v[i][j + 1] + v[i][j - 1])) + (B * delta_t * u[i][j])
- (delta_t * pow(u[i][j], 2) * v[i][j]);
}
}
}
cout<<"u: "<<u[512][512]<<" v: "<<v[512][512]<<endl;
return 0;
}
Second version:
#include <fstream>
#include <iostream>
#include <math.h>
#include <vector>
using namespace std;
#define M 1024
#define N 1024
__global__ void my_kernel(double** v, double** u){
int i= blockIdx.y * blockDim.y + threadIdx.y;
int j= blockIdx.x * blockDim.x + threadIdx.x;
double A = 1;
double B = 3;
int Max_Itr = 1000;
double delta_t = 0.0025;
double Du = 5 * powf(10, -5);
double Dv = 5 * powf(10, -6);
double h = 1.0 / (M - 1);
double s1 = (Du * delta_t) / powf(h, 2);
double s2 = (Dv * delta_t) / powf(h, 2);
for (int k = 1; k < Max_Itr; k++) {
u[i][j] = ((1 - (4 * s1))
* u[i][j]) + (s1 * (u[i + 1][j] + u[i - 1][j] + u[i][j + 1] + u[i][j - 1])) +
(A * delta_t) + (delta_t * pow(u[i][j], 2) * v[i][j]) - (delta_t * (B + 1) * u[i][j]);
v[i][j] = ((1 - (4 * s2))
* v[i][j]) + (s2 * (v[i + 1][j] + v[i - 1][j] + v[i][j + 1] + v[i][j - 1])) + (B * delta_t * u[i][j])
- (delta_t * pow(u[i][j], 2) * v[i][j]);
__syncthreads();
}
}
int main() {
double** u=new double* [M];
double** v=new double* [M];
for (int i=0; i<M; i++){
u[i]=new double [N];
v[i]=new double [N];
}
dim3 blocks(32,32);
dim3 grids(M/32 +1, N/32 + 1);
for (int j = 0; j < M; j++) {
for (int i = 0; i < N;i++) {
u[i][j]=0.02;
v[i][j]=0.02;
}
}
double **u_d, **v_d;
int d_size = N * M * sizeof(double);
cudaMalloc(&u_d, d_size);
cudaMalloc(&v_d, d_size);
cudaMemcpy(u_d, u, d_size, cudaMemcpyHostToDevice);
cudaMemcpy(v_d, v, d_size, cudaMemcpyHostToDevice);
my_kernel<<<grids, blocks>>> (v_d,u_d);
cudaDeviceSynchronize();
cudaMemcpy(v, v_d, d_size, cudaMemcpyDeviceToHost);
cudaMemcpy(u, u_d, d_size, cudaMemcpyDeviceToHost);
cout<<"u: "<<u[512][512]<<" v: "<<v[512][512]<<endl;
return 0;
}
What I expect from the second version is :
u: 0.2815 v: 1.7581
Your two-dimensional array - in the first version of the program - is implemented using an array of pointers, each of which to a separately-allocated array of double values.
In your second version, you are using the same pointer-to-pointer-to-double type, but - you're not allocating any space for the actual data, just for the array of pointers (and not copying any of the data to the GPU - just the pointers; which are useless to copy anyway, since they're pointers to host-side memory.)
What is most likely happening is that your kernel attempts to access memory at an invalid address, and its execution is aborted.
If you were to properly check for errors, as #njuffa noted, you would know that is what happened.
Now, you could avoid having to make multiple memory allocations if you were to use a single data area instead of separate allocations for each second-dimension 1D array; and that is true both for the first and the second version of your program. That would not quite be array flattening. See an explanation of how to do this (C-language-style) on this page.
Note, however, that double-dereferencing, which you insist on performing in your kernel, is likely slowing it down significantly.

Passing data to nlopt in Rcpp?

This is a rather simple question, but I haven't been able to quite find the answer on the web yet.
Wishing my latest attempt, here is latest compiler output:
note: candidate function not viable: no known conversion from 'double (unsigned int, const double *, void *, void )' to 'nlopt_func' (aka 'double ()(unsigned int, const double *, double *, void *)') for 2nd argument
From this error I surmise that I am now wrapping or 'type casting' the data argument correctly and also the parameter vector. The discrepency between the third input, the gradient, confuses me. As I am calling a gradient free optimization routine.
Here is a simple linear regression with a constant and a variable:
#include "RcppArmadillo.h"
// [[Rcpp::depends(RcppArmadillo)]]
// [[Rcpp::depends(nloptr)]]
//#include <vector>
#include <nloptrAPI.h>
using namespace arma;
using namespace Rcpp;
typedef struct {
arma::mat data_in;
} *my_func_data;
typedef struct {
double a, b;
} my_theta;
double myfunc(unsigned n, const double *theta, void *grad, void *data){
my_func_data &temp = (my_func_data &) data;
arma::mat data_in = temp->data_in;
my_theta *theta_temp = (my_theta *) theta;
double a = theta_temp->a, b = theta_temp->b;
int Len = arma::size(data_in)[0];
arma::vec Y1 = data_in(span(0, Len-1), 1);
arma::vec Y2 = data_in(span(0, Len-1), 2);
arma::vec res = data_in(span(0, Len-1), 0) - a*Y1 - b*Y2 ;
return sum(res);
}
// [[Rcpp::export]]
void test_nlopt_c() {
arma::mat data_in(10,3);
data_in(span(0,9),0) = arma::regspace(40, 49);
data_in(span(0,9),1) = arma::ones(10);
data_in(span(0,9),2) = arma::regspace(10, 19);
my_func_data &temp = (my_func_data &) data_in;
double lb[2] = { 0, 0,}; /* lower bounds */
nlopt_opt opt;
opt = nlopt_create(NLOPT_LN_NELDERMEAD, 2); /* algorithm and dimensionality */
nlopt_set_lower_bounds(opt, lb);
nlopt_set_min_objective(opt, myfunc, &data_in );
nlopt_set_xtol_rel(opt, 1e-4);
double minf; /* the minimum objective value, upon return */
double x[2] = {0.5, 0.5}; /* some initial guess */
nlopt_result result = nlopt_optimize(opt, x, &minf);
Rcpp::Rcout << "result:" << result;
return;
}
Got it figured out, stupid answer turns out to be correct, just change 'void' to 'double', no clue why. Anyway, the example code needs some improving but it works.
#include "RcppArmadillo.h"
// [[Rcpp::depends(RcppArmadillo)]]
// [[Rcpp::depends(nloptr)]]
//#include <vector>
#include <nloptrAPI.h>
using namespace arma;
using namespace Rcpp;
typedef struct {
arma::mat data_in;
} *my_func_data;
typedef struct {
double a, b;
} my_theta;
double myfunc(unsigned n, const double *theta, double *grad, void *data){
my_func_data &temp = (my_func_data &) data;
arma::mat data_in = temp->data_in;
my_theta *theta_temp = (my_theta *) theta;
double a = theta_temp->a, b = theta_temp->b;
int Len = arma::size(data_in)[0];
arma::vec Y1 = data_in(span(0, Len-1), 1);
arma::vec Y2 = data_in(span(0, Len-1), 2);
arma::vec res = data_in(span(0, Len-1), 0) - a*Y1 - b*Y2 ;
return sum(res);
}
// [[Rcpp::export]]
void test_nlopt_c() {
arma::mat data_in(10,3);
data_in(span(0,9),0) = arma::regspace(40, 49);
data_in(span(0,9),1) = arma::ones(10);
data_in(span(0,9),2) = arma::regspace(10, 19);
my_func_data &temp = (my_func_data &) data_in;
double lb[2] = { 0, 0,}; /* lower bounds */
nlopt_opt opt;
opt = nlopt_create(NLOPT_LN_NELDERMEAD, 2); /* algorithm and dimensionality */
nlopt_set_lower_bounds(opt, lb);
nlopt_set_min_objective(opt, myfunc, &data_in );
nlopt_set_xtol_rel(opt, 1e-4);
double minf; /* the minimum objective value, upon return */
double x[2] = {0.5, 0.5}; /* some initial guess */
nlopt_result result = nlopt_optimize(opt, x, &minf);
Rcpp::Rcout << "result:" << result;
return;
}

C code translated to Qt crashes while executing

I´m a beginner on programming and Qt, but as liked the framework I´m trying to improve my skills and write my C++ codes on it. I got a task of writing a Ricker wavelet code and then plot it.
I divided it in two tasks, first make the ricker code works, and when it is running, then implement a way to plot it, I will use qcustomplot for it.
I got a code from C and I´m trying to adapt it to Qt. Although it doesn´t give any errors during compilation, when executing it crashes, with the following message:
Invalid parameter passed to C runtime function. C:/Users/Flavio/Documents/qtTest/build-ricker2-Desktop_Qt_5_11_0_MinGW_32bit-Debug/debug/ricker2.exe
exited with code 255
The code I´m supposed to translate is:
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
float *rickerwavelet(float fpeak, float dt, int *nwricker);
int main(int argc, char **argv)
{
int i;
float dt;
float fpeak;
float *wricker=NULL;
int nwricker;
fpeak = atof(argv[1]);
dt = atof(argv[2]);
wricker = rickerwavelet(fpeak, dt, &nwricker);
/* show value of ricker wavelets */
for (i=0; i<nwricker; i++)
printf("%i. %3.5f \n", i, wricker[i]);
free(wricker);
return(1);
}
/* ricker wavelet function, return an array ricker wavelets */
float *rickerwavelet(float fpeak, float dt, int *nwricker)
{
int i, k;
int nw;
int nc;
float pi;
float nw1, alpha, beta;
float *wricker=NULL;
pi = 3.141592653589793;
nw1 = 2.2/fpeak/dt;
nw = 2*floor(nw1/2)+1;
nc = floor(nw/2);
wricker = (float*) calloc (nw, sizeof(float));
for (i=0; i<nw; i++)
{
k = i+1;
alpha = (nc-k+1)fpeakdtpi;
beta = pow(alpha, 2.0);
wricker[i] = (1 - (beta2)) * exp(-beta);
}
(*nwricker) = nw;
return(wricker);
}
The code i wrote on Qt is:
#include <QCoreApplication>
#include <qmath.h>
#include <stdio.h>
#include <stdlib.h>
#include <QDebug>
int main(int argc, char *argv[])
{
QCoreApplication a(argc, argv);
int i,k,nw,nc;
double *wricker=NULL;
int nwricker = 60;
int wavelet_freq = 30;
int polarity=1;
int sampling_rate=0.004;
float nw1, alpha, beta;
const double pi = 3.141592653589793238460;
nw1 = 2.2/wavelet_freq/sampling_rate;
nw = 2*floor(nw1/2)+1;
nc = floor(nw/2);
wricker = (double*)calloc (nw, sizeof(double));
for (i=0; i<nw; i++)
{
k = i+1;
alpha = (nc-k+1)wavelet_freqsampling_ratepi;
beta = pow(alpha, 2.0);
wricker[i] = polarity((1 - (beta2)) * exp(-beta));
};
/* show value of ricker wavelets */
for (i=0; i<nwricker; i++)
{
qDebug()<<i<<wricker[i];
};
free(wricker);
return a.exec();
}
Analytic expression
The amplitude A of the Ricker wavelet with peak frequency f at time t is computed like so:
A = (1-2 pi^2 f^2* t^2) e^{-pi^2* f^2* t^2}
A py code for it would be:
import numpy as np
import matplotlib.pyplot as plt
def ricker(f, length=0.128, dt=0.001):
t = np.arange(-length/2, (length-dt)/2, dt)
y = (1.0 - 2.0*(np.pi2)(f2)(t2)) * np.exp(-(np.pi2)(f2)(t**2))
return t, y
f = 25 # A low wavelength of 25 Hz
t, w = ricker(f)
What seems quite simple.
Does anyone have any idea what is wrong in my code???
Doing a bit of Debugging I found the problem is when passing the vectors to qDebug, it give a message:
THE INFERIOR STOPPED BECAUSE IT RECEIVED A SIGNAL FROM THE OPERATING
SYSTEM . SIGNAL NAME: SIGSEGV SIGNAL MEANING: SEGMENTATION FAULT
I´ll search for more information on this signal meaning. I used qDebug only with the intention of showing the data on a terminal to make sure the calculation was working.
Thanks in advance.
C++ is much more like Python than C. Even though your C code was particularly convoluted, it still isn't as nice a C++ can be.
A complete example that even plots the data can be very, very simple. If that doesn't show the combined power of C++ and Qt, I hardly know what will :)
wavelet-plot-50690312.pro file
QT = charts
SOURCES = main.cpp
main.cpp
// https://github.com/KubaO/stackoverflown/tree/master/questions/wavelet-plot-50690312
#include <QtCharts>
#include <cmath>
const double pi = 3.14159265358979323846;
QVector<QPointF> ricker(double f, double length = 2.0, double dt = 0.001) {
size_t N = (length - dt/2.0)/dt;
QVector<QPointF> w(N);
for (size_t i = 0; i < N; ++i) {
double t = -length/2 + i*dt;
w[i].setX(t);
w[i].setY((1.0 - 2*pi*pi*f*f*t*t) * exp(-pi*pi*f*f*t*t));
}
return w;
}
QLineSeries *rickerSeries(double f) {
auto *series = new QLineSeries;
series->setName(QStringLiteral("Ricker Wavelet for f=%1").arg(f, 2));
series->replace(ricker(f));
return series;
}
int main(int argc, char *argv[]) {
QApplication app(argc, argv);
QChartView view;
view.chart()->addSeries(rickerSeries(1.0));
view.chart()->addSeries(rickerSeries(2.0));
view.chart()->createDefaultAxes();
view.setMinimumSize(800, 600);
view.show();
return app.exec();
}
Of course, this looks nice in C++. How about C? Let's pretend we had some C binding for Qt. Then it might look as follows:
// https://github.com/KubaO/stackoverflown/tree/master/questions/wavelet-plot-50690312/c-binding
#include "qc_binding.h"
#include <math.h>
#include <stddef.h>
#include <stdio.h>
const double pi = 3.14159265358979323846;
CQVector *ricker(double f, double length, double dt) {
scope_enter();
size_t N = (length - dt/2.0)/dt;
CQVector *vector = CQVector_size_(CQ_, CQPointF_type(), N);
CQPointF *const points = CQPointF_(CQVector_data_at(vector, 0));
for (size_t i = 0; i < N; ++i) {
double t = -length/2 + i*dt;
points[i].x = t;
points[i].y = (1.0 - 2*pi*pi*f*f*t*t) * exp(-pi*pi*f*f*t*t);
}
return scope_leave_ptr(vector);
}
CQLineSeries *rickerSeries(double f) {
scope_enter();
CQLineSeries *series = CQLineSeries_(CQ_);
CQLineSeries_setName(series, CQString_asprintf(CQ_, "Ricker Wavelet for f=%.2f", f));
CQLineSeries_replaceVector(series, ricker(f, 2.0, 0.001));
return scope_leave_ptr(series);
}
int main(int argc, char *argv[]) {
scope_enter();
CQApplication *app = CQApplication_(CQ_, &argc, argv);
CQChartView *view = CQChartView_(CQ_);
CQChart *chart = CQChartView_getChart(view);
CQChart_addLineSeries(chart, rickerSeries(1.0));
CQChart_addLineSeries(chart, rickerSeries(2.0));
CQChart_createDefaultAxes(chart);
CQWidget *view_ = CQWidget_cast(view);
CQWidget_setMinimumSize(view_, 800, 600);
CQWidget_show(view_);
return scope_leave_int(CQApplication_exec(app));
}
With a little bit of work, a C binding can be made that is about as easy to use as C++: scopes are tracked, RAII works, destructors get called when needed, values about to be returned are not destructed, etc.
Such a minimum binding, implementing all that's needed to run the code above, is available at https://github.com/KubaO/stackoverflown/tree/master/questions/wavelet-plot-50690312/c-binding.

Using devtools to build an R package that imports cuda code

I'm trying to utilize gpu machines in oder to improve performance of a matrix multiplication operation.
I tried to make sense of this post and utilize cuda code from this repos and build it all in an R package using devtools.
What I did is write a cuda file named matrixMultiplication.cu:
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#define BLOCK_SIZE 16
__global__ void runGpuMatrixMult(double *a, double *b, double *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
}
extern "C"
void gpuMatrixMult(double &A, double &B, double &C, int& m, int& n, int& k) {
// allocate memory in host RAM
double *h_A, *h_B, *h_C;
cudaMallocHost((void **) &h_A, sizeof(int)*m*n);
cudaMallocHost((void **) &h_B, sizeof(int)*n*k);
cudaMallocHost((void **) &h_C, sizeof(int)*m*k);
// Allocate memory space on the device
int *d_A, *d_B, *d_C;
cudaMalloc((void **) &d_A, sizeof(int)*m*n);
cudaMalloc((void **) &d_B, sizeof(int)*n*k);
cudaMalloc((void **) &d_C, sizeof(int)*m*k);
// copy matrix A and B from host to device memory
cudaMemcpy(d_A, h_A, sizeof(int)*m*n, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizeof(int)*n*k, cudaMemcpyHostToDevice);
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// Launch kernel
runGpuMatrixMult<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, m, n, k);
// Transfer results from device to host
cudaMemcpy(h_C, d_C, sizeof(int)*m*k, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
// free memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFreeHost(h_A);
cudaFreeHost(h_B);
cudaFreeHost(h_C);
return 0;
}
Then a cpp file named matrixUtils.cpp:
// [[Rcpp::depends(RcppArmadillo)]]
#include <RcppArmadillo.h>
using namespace Rcpp;
extern "C"
void gpuMatrixMult(double const&A, double const&B, double const& C, int& m, int& n, int& k);
//' gpuMatrixMultCaller calls matrixMultiplication.cu::gpuMatrixMult
//'
//' #export
//[[Rcpp::export]]
SEXP gpuMatrixMultCaller(double const& A, double const& B, double& C, int m, int n, int k) {
gpuMatrixMult(A, B, C, m, n, k);
return R_NilValue;
}
Finally, I have an R file named utils.R which has a wrapper function that calls gpuMatrixMultCaller:
#' gpuMatrixMultWrapper calls matrixUtils.cpp::gpuMatrixMultCaller which runs a GPU matrix multiplication
#' Returns the product of the input matrices
gpuMatrixMultWrapper <- function(A,B)
{
m <- nrow(A)
n <- ncol(A)
k <- ncol(B)
C <- bigmemory::deepcopy(A)
gpuMatrixMultCaller(A, B, C, m, n, k)
return(C)
}
When I run devtools::document I get this error:
Error in dyn.load(dllfile) :
unable to load shared object '/home/code/packages/utils/src/utils.so':
/home/code/packages/utils/src/utils.so: undefined symbol: gpuMatrixMult
The NAMESPACE file does have: useDynLib(utils) at the bottom line and in the DESCRIPTION file I specify: LinkingTo: Rcpp, RcppArmadillo
So my questions are:
Is it even possible to build an R pacakge which imports cuda code? using devtools? If not should the cuda part simply be coded in the cpp file?
If so what am I missing? I tried adding #include <cuda.h> in matrixUtils.cpp but got: fatal error: cuda.h: No such file or directory
Thanks a lot

Higher radix (or better) formulation for Stockham FFT

Background
I've implemented this algorithm from Microsoft Research for a radix-2 FFT (Stockham auto sort) using OpenCL.
I use floating point textures (256 cols X N rows) for input and output in the kernel, because I will need to sample at non-integral points and I thought it better to delegate that to the texture sampling hardware. Note that my FFTs are always of 256-point sequences (every row in my texture). At this point, my N is 16384 or 32768 depending on the GPU i'm using and the max 2D texture size allowed.
I also need to perform the FFT of 4 real-valued sequences at once, so the kernel performs the FFT(a, b, c, d) as FFT(a + ib, c + id) from which I can extract the 4 complex sequences out later using an O(n) algorithm. I can elaborate on this if someone wishes - but I don't believe it falls in the scope of this question.
Kernel Source
const sampler_t fftSampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_NEAREST;
__kernel void FFT_Stockham(read_only image2d_t input, write_only image2d_t output, int fftSize, int size)
{
int x = get_global_id(0);
int y = get_global_id(1);
int b = floor(x / convert_float(fftSize)) * (fftSize / 2);
int offset = x % (fftSize / 2);
int x0 = b + offset;
int x1 = x0 + (size / 2);
float4 val0 = read_imagef(input, fftSampler, (int2)(x0, y));
float4 val1 = read_imagef(input, fftSampler, (int2)(x1, y));
float angle = -6.283185f * (convert_float(x) / convert_float(fftSize));
// TODO: Convert the two calculations below into lookups from a __constant buffer
float tA = native_cos(angle);
float tB = native_sin(angle);
float4 coeffs1 = (float4)(tA, tB, tA, tB);
float4 coeffs2 = (float4)(-tB, tA, -tB, tA);
float4 result = val0 + coeffs1 * val1.xxzz + coeffs2 * val1.yyww;
write_imagef(output, (int2)(x, y), result);
}
The host code simply invokes this kernel log2(256) times, ping-ponging the input and output textures.
Note: I tried removing the native_cos and native_sin to see if that impacted timing, but it doesn't seem to change things by very much. Not the factor I'm looking for, in any case.
Access pattern
Knowing that I am probably memory-bandwidth bound, here is the memory access pattern (per-row) for my radix-2 FFT.
X0 - element 1 to combine (read)
X1 - element 2 to combine (read)
X - element to write to (write)
Question
So my question is - can someone help me with/point me toward a higher-radix formulation for this algorithm? I ask because most FFTs are optimized for large cases and single real/complex valued sequences. Their kernel generators are also very case dependent and break down quickly when I try to muck with their internals.
Are there other options better than simply going to a radix-8 or 16 kernel?
Some of my constraints are - I have to use OpenCL (no cuFFT). I also cannot use clAmdFft from ACML for this purpose. It would be nice to also talk about CPU optimizations (this kernel SUCKS big time on the CPU) - but getting it to run in fewer iterations on the GPU is my main use-case.
Thanks in advance for reading through all this and trying to help!
I tried several versions, but the one with the best performance on CPU and GPU was a radix-16 kernel for my specific case.
Here is the kernel for reference. It was taken from Eric Bainville's (most excellent) website and used with full attribution.
// #define M_PI 3.14159265358979f
//Global size is x.Length/2, Scale = 1 for direct, 1/N to inverse (iFFT)
__kernel void ConjugateAndScale(__global float4* x, const float Scale)
{
int i = get_global_id(0);
float temp = Scale;
float4 t = (float4)(temp, -temp, temp, -temp);
x[i] *= t;
}
// Return a*EXP(-I*PI*1/2) = a*(-I)
float2 mul_p1q2(float2 a) { return (float2)(a.y,-a.x); }
// Return a^2
float2 sqr_1(float2 a)
{ return (float2)(a.x*a.x-a.y*a.y,2.0f*a.x*a.y); }
// Return the 2x DFT2 of the four complex numbers in A
// If A=(a,b,c,d) then return (a',b',c',d') where (a',c')=DFT2(a,c)
// and (b',d')=DFT2(b,d).
float8 dft2_4(float8 a) { return (float8)(a.lo+a.hi,a.lo-a.hi); }
// Return the DFT of 4 complex numbers in A
float8 dft4_4(float8 a)
{
// 2x DFT2
float8 x = dft2_4(a);
// Shuffle, twiddle, and 2x DFT2
return dft2_4((float8)(x.lo.lo,x.hi.lo,x.lo.hi,mul_p1q2(x.hi.hi)));
}
// Complex product, multiply vectors of complex numbers
#define MUL_RE(a,b) (a.even*b.even - a.odd*b.odd)
#define MUL_IM(a,b) (a.even*b.odd + a.odd*b.even)
float2 mul_1(float2 a, float2 b)
{ float2 x; x.even = MUL_RE(a,b); x.odd = MUL_IM(a,b); return x; }
float4 mul_1_F4(float4 a, float4 b)
{ float4 x; x.even = MUL_RE(a,b); x.odd = MUL_IM(a,b); return x; }
float4 mul_2(float4 a, float4 b)
{ float4 x; x.even = MUL_RE(a,b); x.odd = MUL_IM(a,b); return x; }
// Return the DFT2 of the two complex numbers in vector A
float4 dft2_2(float4 a) { return (float4)(a.lo+a.hi,a.lo-a.hi); }
// Return cos(alpha)+I*sin(alpha) (3 variants)
float2 exp_alpha_1(float alpha)
{
float cs,sn;
// sn = sincos(alpha,&cs); // sincos
//cs = native_cos(alpha); sn = native_sin(alpha); // native sin+cos
cs = cos(alpha); sn = sin(alpha); // sin+cos
return (float2)(cs,sn);
}
// Return cos(alpha)+I*sin(alpha) (3 variants)
float4 exp_alpha_1_F4(float alpha)
{
float cs,sn;
// sn = sincos(alpha,&cs); // sincos
// cs = native_cos(alpha); sn = native_sin(alpha); // native sin+cos
cs = cos(alpha); sn = sin(alpha); // sin+cos
return (float4)(cs,sn,cs,sn);
}
// mul_p*q*(a) returns a*EXP(-I*PI*P/Q)
#define mul_p0q1(a) (a)
#define mul_p0q2 mul_p0q1
//float2 mul_p1q2(float2 a) { return (float2)(a.y,-a.x); }
__constant float SQRT_1_2 = 0.707106781186548; // cos(Pi/4)
#define mul_p0q4 mul_p0q2
float2 mul_p1q4(float2 a) { return (float2)(SQRT_1_2)*(float2)(a.x+a.y,-a.x+a.y); }
#define mul_p2q4 mul_p1q2
float2 mul_p3q4(float2 a) { return (float2)(SQRT_1_2)*(float2)(-a.x+a.y,-a.x-a.y); }
__constant float COS_8 = 0.923879532511287; // cos(Pi/8)
__constant float SIN_8 = 0.382683432365089; // sin(Pi/8)
#define mul_p0q8 mul_p0q4
float2 mul_p1q8(float2 a) { return mul_1((float2)(COS_8,-SIN_8),a); }
#define mul_p2q8 mul_p1q4
float2 mul_p3q8(float2 a) { return mul_1((float2)(SIN_8,-COS_8),a); }
#define mul_p4q8 mul_p2q4
float2 mul_p5q8(float2 a) { return mul_1((float2)(-SIN_8,-COS_8),a); }
#define mul_p6q8 mul_p3q4
float2 mul_p7q8(float2 a) { return mul_1((float2)(-COS_8,-SIN_8),a); }
// Compute in-place DFT2 and twiddle
#define DFT2_TWIDDLE(a,b,t) { float2 tmp = t(a-b); a += b; b = tmp; }
// T = N/16 = number of threads.
// P is the length of input sub-sequences, 1,16,256,...,N/16.
__kernel void FFT_Radix16(__global const float4 * x, __global float4 * y, int pp)
{
int p = pp;
int t = get_global_size(0); // number of threads
int i = get_global_id(0); // current thread
////// y[i] = 2*x[i];
////// return;
int k = i & (p-1); // index in input sequence, in 0..P-1
// Inputs indices are I+{0,..,15}*T
x += i;
// Output indices are J+{0,..,15}*P, where
// J is I with four 0 bits inserted at bit log2(P)
y += ((i-k)<<4) + k;
// Load
float4 u[16];
for (int m=0;m<16;m++) u[m] = x[m*t];
// Twiddle, twiddling factors are exp(_I*PI*{0,..,15}*K/4P)
float alpha = -M_PI*(float)k/(float)(8*p);
for (int m=1;m<16;m++) u[m] = mul_1_F4(exp_alpha_1_F4(m * alpha), u[m]);
// 8x in-place DFT2 and twiddle (1)
DFT2_TWIDDLE(u[0].lo,u[8].lo,mul_p0q8);
DFT2_TWIDDLE(u[0].hi,u[8].hi,mul_p0q8);
DFT2_TWIDDLE(u[1].lo,u[9].lo,mul_p1q8);
DFT2_TWIDDLE(u[1].hi,u[9].hi,mul_p1q8);
DFT2_TWIDDLE(u[2].lo,u[10].lo,mul_p2q8);
DFT2_TWIDDLE(u[2].hi,u[10].hi,mul_p2q8);
DFT2_TWIDDLE(u[3].lo,u[11].lo,mul_p3q8);
DFT2_TWIDDLE(u[3].hi,u[11].hi,mul_p3q8);
DFT2_TWIDDLE(u[4].lo,u[12].lo,mul_p4q8);
DFT2_TWIDDLE(u[4].hi,u[12].hi,mul_p4q8);
DFT2_TWIDDLE(u[5].lo,u[13].lo,mul_p5q8);
DFT2_TWIDDLE(u[5].hi,u[13].hi,mul_p5q8);
DFT2_TWIDDLE(u[6].lo,u[14].lo,mul_p6q8);
DFT2_TWIDDLE(u[6].hi,u[14].hi,mul_p6q8);
DFT2_TWIDDLE(u[7].lo,u[15].lo,mul_p7q8);
DFT2_TWIDDLE(u[7].hi,u[15].hi,mul_p7q8);
// 8x in-place DFT2 and twiddle (2)
DFT2_TWIDDLE(u[0].lo,u[4].lo,mul_p0q4);
DFT2_TWIDDLE(u[0].hi,u[4].hi,mul_p0q4);
DFT2_TWIDDLE(u[1].lo,u[5].lo,mul_p1q4);
DFT2_TWIDDLE(u[1].hi,u[5].hi,mul_p1q4);
DFT2_TWIDDLE(u[2].lo,u[6].lo,mul_p2q4);
DFT2_TWIDDLE(u[2].hi,u[6].hi,mul_p2q4);
DFT2_TWIDDLE(u[3].lo,u[7].lo,mul_p3q4);
DFT2_TWIDDLE(u[3].hi,u[7].hi,mul_p3q4);
DFT2_TWIDDLE(u[8].lo,u[12].lo,mul_p0q4);
DFT2_TWIDDLE(u[8].hi,u[12].hi,mul_p0q4);
DFT2_TWIDDLE(u[9].lo,u[13].lo,mul_p1q4);
DFT2_TWIDDLE(u[9].hi,u[13].hi,mul_p1q4);
DFT2_TWIDDLE(u[10].lo,u[14].lo,mul_p2q4);
DFT2_TWIDDLE(u[10].hi,u[14].hi,mul_p2q4);
DFT2_TWIDDLE(u[11].lo,u[15].lo,mul_p3q4);
DFT2_TWIDDLE(u[11].hi,u[15].hi,mul_p3q4);
// 8x in-place DFT2 and twiddle (3)
DFT2_TWIDDLE(u[0].lo,u[2].lo,mul_p0q2);
DFT2_TWIDDLE(u[0].hi,u[2].hi,mul_p0q2);
DFT2_TWIDDLE(u[1].lo,u[3].lo,mul_p1q2);
DFT2_TWIDDLE(u[1].hi,u[3].hi,mul_p1q2);
DFT2_TWIDDLE(u[4].lo,u[6].lo,mul_p0q2);
DFT2_TWIDDLE(u[4].hi,u[6].hi,mul_p0q2);
DFT2_TWIDDLE(u[5].lo,u[7].lo,mul_p1q2);
DFT2_TWIDDLE(u[5].hi,u[7].hi,mul_p1q2);
DFT2_TWIDDLE(u[8].lo,u[10].lo,mul_p0q2);
DFT2_TWIDDLE(u[8].hi,u[10].hi,mul_p0q2);
DFT2_TWIDDLE(u[9].lo,u[11].lo,mul_p1q2);
DFT2_TWIDDLE(u[9].hi,u[11].hi,mul_p1q2);
DFT2_TWIDDLE(u[12].lo,u[14].lo,mul_p0q2);
DFT2_TWIDDLE(u[12].hi,u[14].hi,mul_p0q2);
DFT2_TWIDDLE(u[13].lo,u[15].lo,mul_p1q2);
DFT2_TWIDDLE(u[13].hi,u[15].hi,mul_p1q2);
// 8x DFT2 and store (reverse binary permutation)
y[0] = u[0] + u[1];
y[p] = u[8] + u[9];
y[2*p] = u[4] + u[5];
y[3*p] = u[12] + u[13];
y[4*p] = u[2] + u[3];
y[5*p] = u[10] + u[11];
y[6*p] = u[6] + u[7];
y[7*p] = u[14] + u[15];
y[8*p] = u[0] - u[1];
y[9*p] = u[8] - u[9];
y[10*p] = u[4] - u[5];
y[11*p] = u[12] - u[13];
y[12*p] = u[2] - u[3];
y[13*p] = u[10] - u[11];
y[14*p] = u[6] - u[7];
y[15*p] = u[14] - u[15];
}
Note that I have modified the kernel to perform the FFT of 2 complex-valued sequences at once instead of one. Also, since I only need the FFT of 256 elements at a time in a much larger sequence, I perform only 2 runs of this kernel, which leaves me with 256-length DFTs in the larger array.
Here's some of the relevant host code as well.
var ev = new[] { new Cl.Event() };
var pEv = new[] { new Cl.Event() };
int fftSize = 1;
int iter = 0;
int n = distributionSize >> 5;
while (fftSize <= n)
{
Cl.SetKernelArg(fftKernel, 0, memA);
Cl.SetKernelArg(fftKernel, 1, memB);
Cl.SetKernelArg(fftKernel, 2, fftSize);
Cl.EnqueueNDRangeKernel(commandQueue, fftKernel, 1, null, globalWorkgroupSize, localWorkgroupSize,
(uint)(iter == 0 ? 0 : 1),
iter == 0 ? null : pEv,
out ev[0]).Check();
if (iter > 0)
pEv[0].Dispose();
Swap(ref ev, ref pEv);
Swap(ref memA, ref memB); // ping-pong
fftSize = fftSize << 4;
iter++;
Cl.Finish(commandQueue);
}
Swap(ref memA, ref memB);
Hope this helps someone!

Resources