Cube to Sphere mapping (inverse function wanted) - math

I've came across a cube to sphere mapping function that provides a more uniform result than just normalizing the coordinates or other mapping methods. Unfortunately there is no unwrapping function.
Source: http://mathproofs.blogspot.com/2005/07/mapping-cube-to-sphere.html
vec3 spherify ( vec3 v ) {
float x2 = v.x * v.x;
float y2 = v.y * v.y;
float z2 = v.z * v.z;
vec3 s;
s.x = v.x * sqrt(1.0 - y2 / 2.0 - z2 / 2.0 + y2 * z2 / 3.0);
s.y = v.y * sqrt(1.0 - x2 / 2.0 - z2 / 2.0 + x2 * z2 / 3.0);
s.z = v.z * sqrt(1.0 - x2 / 2.0 - y2 / 2.0 + x2 * y2 / 3.0);
return s;
}
How could this be unwrapped back to a cube face? To wrap back to a square i use the following, though it doesn't unwrap the special squeezing of the coordinates.
vec3 cubify ( vec3 s ) {
s.x = ( s.x / s.z );
s.y = ( s.y / s.z );
return s;
}
Which looks as the following
Mapping

How about something like this (C++/VCL):
//---------------------------------------------------------------------------
#include <vcl.h>
#pragma hdrstop
#include "Unit1.h"
#include "gl_simple.h"
#include "glsl_math.h"
//---------------------------------------------------------------------------
#pragma package(smart_init)
#pragma resource "*.dfm"
TForm1 *Form1;
const int n=10*10*6;
vec3 col[n];
vec3 cube[n];
vec3 sphere[n];
vec3 cube2[n];
//---------------------------------------------------------------------------
vec3 spherify(vec3 v)
{
float x2 = v.x * v.x;
float y2 = v.y * v.y;
float z2 = v.z * v.z;
vec3 s;
s.x = v.x * sqrt(1.0 - y2 / 2.0 - z2 / 2.0 + y2 * z2 / 3.0);
s.y = v.y * sqrt(1.0 - x2 / 2.0 - z2 / 2.0 + x2 * z2 / 3.0);
s.z = v.z * sqrt(1.0 - x2 / 2.0 - y2 / 2.0 + x2 * y2 / 3.0);
return s;
}
//---------------------------------------------------------------------------
vec3 cubify(vec3 v)
{
int i;
float r,a;
// major axis and size
a=fabs(v.x); { r=a; i=0; }
a=fabs(v.y); if (r<a){ r=a; i=1; }
a=fabs(v.z); if (r<a){ r=a; i=2; }
v/=r; r*=1.75; a=4.0*r/M_PI;
// convert of cube + linearization
if (i==0){ v.y=a*atan(v.y/r); v.z=a*atan(v.z/r); }
else if (i==1){ v.x=a*atan(v.x/r); v.z=a*atan(v.z/r); }
else { v.x=a*atan(v.x/r); v.y=a*atan(v.y/r); }
// just remedy boundaries after linearization
if (v.x<-1.0) v.x=-1.0;
if (v.x>+1.0) v.x=+1.0;
if (v.y<-1.0) v.y=-1.0;
if (v.y>+1.0) v.y=+1.0;
if (v.z<-1.0) v.z=-1.0;
if (v.z>+1.0) v.z=+1.0;
return v;
}
//---------------------------------------------------------------------------
void set_cube()
{
float u,v,d;
int m=sqrt(n/6),i,j,k;
k=0; d=2.0/float(m-1);
for (u=-1.0,i=0;i<m;i++,u+=d)
for (v=-1.0,j=0;j<m;j++,v+=d)
{
col[k]=vec3(0.5,0.0,0.0); cube[k]=vec3(u,v,-1.0); k++;
col[k]=vec3(1.0,0.0,0.0); cube[k]=vec3(u,v,+1.0); k++;
col[k]=vec3(0.0,0.5,0.0); cube[k]=vec3(u,-1.0,v); k++;
col[k]=vec3(0.0,1.0,0.0); cube[k]=vec3(u,+1.0,v); k++;
col[k]=vec3(0.0,0.0,0.5); cube[k]=vec3(-1.0,u,v); k++;
col[k]=vec3(0.0,0.0,1.0); cube[k]=vec3(+1.0,u,v); k++;
}
}
//---------------------------------------------------------------------------
void gl_draw()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
float aspect=float(xs)/float(ys);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0/aspect,aspect,0.1,100.0);
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(0.0,0.0,-10.5);
glRotatef(-10.0,1.0,0.0,0.0);
glRotatef(-20.0,0.0,1.0,0.0);
glEnable(GL_DEPTH_TEST);
glDisable(GL_TEXTURE_2D);
int i;
glPointSize(2);
glMatrixMode(GL_MODELVIEW);
glTranslatef(-3.0,0.0,0.0); glBegin(GL_POINTS); for (i=0;i<n;i++){ glColor3fv(col[i].dat); glVertex3fv(cube[i].dat); } glEnd(); // set_cube
glTranslatef(+3.0,0.0,0.0); glBegin(GL_POINTS); for (i=0;i<n;i++){ glColor3fv(col[i].dat); glVertex3fv(sphere[i].dat); } glEnd(); // spherify
glTranslatef(+3.0,0.0,0.0); glBegin(GL_POINTS); for (i=0;i<n;i++){ glColor3fv(col[i].dat); glVertex3fv(cube2[i].dat); } glEnd(); // cubify
glEnd();
glPointSize(1);
glFlush();
SwapBuffers(hdc);
}
//---------------------------------------------------------------------------
__fastcall TForm1::TForm1(TComponent* Owner):TForm(Owner)
{
gl_init(Handle);
int i;
set_cube();
for (i=0;i<n;i++) sphere[i]=spherify(cube[i]);
for (i=0;i<n;i++) cube2[i]=cubify(sphere[i]);
}
//---------------------------------------------------------------------------
void __fastcall TForm1::FormDestroy(TObject *Sender)
{
gl_exit();
}
//---------------------------------------------------------------------------
void __fastcall TForm1::FormPaint(TObject *Sender)
{
gl_draw();
}
//---------------------------------------------------------------------------
void __fastcall TForm1::Timer1Timer(TObject *Sender)
{
gl_draw();
}
//---------------------------------------------------------------------------
void __fastcall TForm1::FormResize(TObject *Sender)
{
gl_resize(ClientWidth,ClientHeight);
gl_draw();
}
//---------------------------------------------------------------------------
Just ignore the VCL stuff. Code creates uniform grid cube points using set_cube , that is converted into sphere using your spherify and that is finally converted to cube2 using mine cubify.
Here preview:
from left cube,sphere,cube2. The colors are stored in col to better show the mapping between points...
The idea behind cubify is to leave biggest coordinate as is and the other two convert into spherical angle and then use this angle as coordinate. Basically its a reverse of this. Its a bit nonlinear near edges hence the slight shifting of 45 deg range to smaller ones... Also to avoid crossings of points above surface after linearization another check is involved (the 6 ifs at the end).

Related

pyglet gives error regarding GLSL version

I am trying to run a code that has a GUI built with pyglet.
but it gives this error. I have searched and found that I need to directly set the version of GLSL to be used by the code but I don't know how. would be happy if you helped me out with it.
b"0:20(27): error: cannot initialize uniform weight in GLSL 1.10 (GLSL 1.20 required)\n0:20(27): error: array constructors forbidden in GLSL 1.10 (GLSL 1.20 or GLSL ES 3.00 required)\n0:20(27): error: initializer of uniform variable `weight' must be a constant expression\n0:79(17): error: could not implicitly convert operands to arithmetic operator\n0:79(16): error: operands to arithmetic operators must be numeric\n0:89(7): warning: `coeff' used uninitialized\n"
this is the shader.py file:
Update
added the glsl file with the uniform weight in it
#ifdef GL_ES
precision highp float;
#endif
uniform sampler2D inlet;
uniform sampler2D disp0;
uniform float sigma;
uniform bool xmirror;
uniform vec3 colors[9];
uniform float streams[9];
/*mat4 gaussm = mat4(0.00000067, 0.00002292, 0.00019117, 0.00038771,
0.00002292, 0.00078634, 0.00655965, 0.01330373,
0.00019117, 0.00655965, 0.05472157, 0.11098164,
0.00038771, 0.01330373, 0.11098164, 0.22508352);*/
uniform float weight[5] = float[](0.2270270270, 0.1945946, 0.1216216216, 0.0540540541, 0.0162162162);
vec4 sample(vec2 p)
{
vec4 col;
if(streams[0] >= 0.)
{
int stream = 0;
for(int i=0;streams[stream] < min(p.x, 1.) && stream < 8;stream++) { }
col = vec4(colors[stream], 1.);
}
else {
col = texture2D(inlet, p);
}
return col;
}
float gaussian(float d, float s)
{
float pi = 3.141592653;
//return exp(- d*d / (4.0 * pi * s * s));
//return pow(4*pi*s*s, 0.5)*exp(- d*d / (4.0 * pi * s * s));
return exp(- d*d / (2.*s*s));
}
float gaussian2(float d, float s)
{
float pi = 3.141592653;
float c = pow(1.0 / (4.0 * pi * s), 0.5);
float e = -1.0*(d * d) / (4.0 * s);
return c * exp(e);
}
float gaussf(int i, int j,float nf, float s)
{
//return gaussm[i][j];
float fi = float(i)/nf;
float jf = float(j)/nf;
return gaussian2(sqrt(fi*fi+jf*jf), s);
}
float cosh(float x)
{
return (exp(x)+exp(-x))/2.;
}
float rect_calc(vec2 d)
{
float pi = 3.141592653;
float AR = 0.25;
float offset = 0.125;
float m = 155.;
float n = 155.;
vec3 xyz = vec3(0., (d.x/1.), (d.y/8. + offset));
float u = 0.;
float coeff = (16 * pow(AR, 2.)) / pow(pi, 4.);
float num;
float den;
for(float i = 1.; i <= n; i += 2.)
{
for(float j = 1.; j <= m; j += 2.)
{
num = sin(i * pi * xyz.y) * sin(j * pi * ((xyz.z)/AR));
den = i * j * (pow(AR, 2.) * pow(AR, 2.) + pow(j, 2.));
u += coeff * (num / den);
}
}
// Convert velocity to time-of-flight
float L = 2.0;
float u_mean = 0.0043;
float u_norm = u/u_mean;
return L / u_norm;
}
void main()
{
vec2 uv = gl_TexCoord[0].st;
if(xmirror)
{
uv.x = 1.-uv.x;
}
vec2 d = texture2D(disp0, uv).yz * vec2(1.,8.);
if(xmirror)
{
d.x = -d.x;
uv.x = 1.-uv.x;
}
vec2 p = uv + d;
if(sigma <= 0.)
{
gl_FragColor = sample(p);
} else {
//Sample
vec4 c = vec4(0.);
float Dt = sigma*rect_calc(uv.xy);
float s = pow(Dt, 0.5);
float s2 = 1.0;
float t = 0.;
int ni = 8;
float n = 8.;
for(int ii = 0; ii < ni-1; ii++)
{
float i = float(ii);
for(int jj = 0; jj < ni-1; jj++)
{
float j = float(jj);
t += gaussf(ii,jj,n-1.,s2)*4.;
c += gaussf(ii,jj,n-1.,s2) * (sample(p + vec2((n-1.-i)*s, (n-1.-j)*s)) + sample(p + vec2(-(n-1.-i)*s, (n-1.-j)*s)) + sample(p + vec2(-(n-1.-i)*s, -(n-1.-j)*s)) + sample(p + vec2((n-1.-i)*s, -(n-1.-j)*s)));
}
t += gaussf(ii,ni-1,n-1.,s2)*4.;
c += gaussf(ii,ni-1,n-1.,s2) * (sample(p + vec2((n-1.-i)*s, 0.)) + sample(p + vec2(-(n-1.-i)*s, 0.))+ sample(p + vec2(0., (n-1.-i)*s))+ sample(p + vec2(0., -(n-1.-i)*s)));
}
t += gaussf(ni-1,ni-1,n-1.,s2);
c += gaussf(ni-1,ni-1,n-1.,s2) * sample(p);
//gl_FragColor = c;
gl_FragColor = c/t;
//gl_FragColor = (sigma*rect_calcu(uv.xy))*c/t;
}
}
well it got solved!
just needed to add the directive #version 120 at the beginning of the shader like this:
#version 120
#ifdef GL_ES
precision highp float;
#endif
uniform sampler2D inlet;
uniform sampler2D disp0;
uniform float sigma;
uniform bool xmirror;
uniform vec3 colors[9];
uniform float streams[9];
/*mat4 gaussm = mat4(0.00000067, 0.00002292, 0.00019117, 0.00038771,
0.00002292, 0.00078634, 0.00655965, 0.01330373,
0.00019117, 0.00655965, 0.05472157, 0.11098164,
0.00038771, 0.01330373, 0.11098164, 0.22508352);*/
uniform float weight[5] = float[](0.2270270270, 0.1945946, 0.1216216216, 0.0540540541, 0.0162162162);
vec4 sample(vec2 p)
{
vec4 col;
if(streams[0] >= 0.)
{
int stream = 0;
for(int i=0;streams[stream] < min(p.x, 1.) && stream < 8;stream++) { }
col = vec4(colors[stream], 1.);
}
else {
col = texture2D(inlet, p);
}
return col;
}
float gaussian(float d, float s)
{
float pi = 3.141592653;
//return exp(- d*d / (4.0 * pi * s * s));
//return pow(4*pi*s*s, 0.5)*exp(- d*d / (4.0 * pi * s * s));
return exp(- d*d / (2.*s*s));
}
float gaussian2(float d, float s)
{
float pi = 3.141592653;
float c = pow(1.0 / (4.0 * pi * s), 0.5);
float e = -1.0*(d * d) / (4.0 * s);
return c * exp(e);
}
float gaussf(int i, int j,float nf, float s)
{
//return gaussm[i][j];
float fi = float(i)/nf;
float jf = float(j)/nf;
return gaussian2(sqrt(fi*fi+jf*jf), s);
}
float cosh(float x)
{
return (exp(x)+exp(-x))/2.;
}
float rect_calc(vec2 d)
{
float pi = 3.141592653;
float AR = 0.25;
float offset = 0.125;
float m = 155.;
float n = 155.;
vec3 xyz = vec3(0., (d.x/1.), (d.y/8. + offset));
float u = 0.;
float coeff = (16 * pow(AR, 2.)) / pow(pi, 4.);
float num;
float den;
for(float i = 1.; i <= n; i += 2.)
{
for(float j = 1.; j <= m; j += 2.)
{
num = sin(i * pi * xyz.y) * sin(j * pi * ((xyz.z)/AR));
den = i * j * (pow(AR, 2.) * pow(AR, 2.) + pow(j, 2.));
u += coeff * (num / den);
}
}
// Convert velocity to time-of-flight
float L = 2.0;
float u_mean = 0.0043;
float u_norm = u/u_mean;
return L / u_norm;
}
void main()
{
vec2 uv = gl_TexCoord[0].st;
if(xmirror)
{
uv.x = 1.-uv.x;
}
vec2 d = texture2D(disp0, uv).yz * vec2(1.,8.);
if(xmirror)
{
d.x = -d.x;
uv.x = 1.-uv.x;
}
vec2 p = uv + d;
if(sigma <= 0.)
{
gl_FragColor = sample(p);
} else {
//Sample
vec4 c = vec4(0.);
float Dt = sigma*rect_calc(uv.xy);
float s = pow(Dt, 0.5);
float s2 = 1.0;
float t = 0.;
int ni = 8;
float n = 8.;
for(int ii = 0; ii < ni-1; ii++)
{
float i = float(ii);
for(int jj = 0; jj < ni-1; jj++)
{
float j = float(jj);
t += gaussf(ii,jj,n-1.,s2)*4.;
c += gaussf(ii,jj,n-1.,s2) * (sample(p + vec2((n-1.-i)*s, (n-1.-j)*s)) + sample(p + vec2(-(n-1.-i)*s, (n-1.-j)*s)) + sample(p + vec2(-(n-1.-i)*s, -(n-1.-j)*s)) + sample(p + vec2((n-1.-i)*s, -(n-1.-j)*s)));
}
t += gaussf(ii,ni-1,n-1.,s2)*4.;
c += gaussf(ii,ni-1,n-1.,s2) * (sample(p + vec2((n-1.-i)*s, 0.)) + sample(p + vec2(-(n-1.-i)*s, 0.))+ sample(p + vec2(0., (n-1.-i)*s))+ sample(p + vec2(0., -(n-1.-i)*s)));
}
t += gaussf(ni-1,ni-1,n-1.,s2);
c += gaussf(ni-1,ni-1,n-1.,s2) * sample(p);
//gl_FragColor = c;
gl_FragColor = c/t;
//gl_FragColor = (sigma*rect_calcu(uv.xy))*c/t;
}
}

QPainter how draw texture with special color

I have some patterns which are black with alpha and have some points that I want to draw line with patterns.
I find QBrush can be constructed by texture, but I don't know how to draw it with difference colors.
This answer show a way here in C# code, but I don't know how to change patterns color with ColorMatrix.
The modification of RGBA values of an image using a 5×5 color matrix reminds me to the transformation of homogeneous coordinates how it is often used in computer graphics. If you imagine the RGBA values as 4-dimensional color/alpha space the transformation of colors using transformation matrices doesn't sound that revolutionary. (Not that you got me wrong – this impressed me much, and I couldn't resist to try this out immediately.) Hence, I didn't wonder why a 5×5 matrix is needed though there are only 4 color components. (E.g. if a translation of color values is intended the 5th dimension cames into play.)
I must admit that I first applied my knowledge from Computer Animation to this problem and compared my approach to the one described on MSDN Using a Color Matrix to Transform a Single Color afterwards. Then I realized that the original paper uses transposed vectors and matrices compared to mine. This is just mathematics as
(vT MT)T = v' = M v
if I remember right.
Practically, it means I have to use matrix rows as columns when I try to reproduce the samples of e.g. the ColorMatrix Guide. (This feels somehow right to me as it is exactly as we describe transformations in 3d space i.e. translation is the last column of the transformation matrix.)
The sample code:
colorMatrix.h:
#ifndef COLOR_MATRIX_H
#define COLOR_MATRIX_H
#include <algorithm>
struct ColorMatrix {
float values[5][5];
ColorMatrix() { }
ColorMatrix(const float(&values)[25])
{
std::copy(std::begin(values), std::end(values), (float*)this->values);
}
float (&operator[](unsigned i))[5] { return values[i]; }
const float(&operator[](unsigned i) const)[5] { return values[i]; }
};
struct ColorVector {
float values[5];
ColorVector(const float(&values)[5])
{
std::copy(std::begin(values), std::end(values), (float*)this->values);
}
float& operator[](size_t i) { return values[i]; }
const float& operator[](size_t i) const { return values[i]; }
};
#endif // COLOR_MATRIX_H
colorMatrix.cc:
#include <algorithm>
#include <QtWidgets>
#include "colorMatrix.h"
#include "QColorMatrixView.h"
ColorVector operator*(const ColorMatrix &m, const ColorVector &v)
{
return ColorVector({
m[0][0] * v[0] + m[0][1] * v[1] + m[0][2] * v[2] + m[0][3] * v[3] + m[0][4] * v[4],
m[1][0] * v[0] + m[1][1] * v[1] + m[1][2] * v[2] + m[1][3] * v[3] + m[1][4] * v[4],
m[2][0] * v[0] + m[2][1] * v[1] + m[2][2] * v[2] + m[2][3] * v[3] + m[2][4] * v[4],
m[3][0] * v[0] + m[3][1] * v[1] + m[3][2] * v[2] + m[3][3] * v[3] + m[3][4] * v[4],
m[4][0] * v[0] + m[4][1] * v[1] + m[4][2] * v[2] + m[4][3] * v[3] + m[4][4] * v[4]
});
}
const ColorMatrix Identity({
1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 0.0f, 1.0f
});
template <typename T>
T clamp(T value, T min, T max)
{
return value < min ? min
: value > max ? max
: value;
}
QRgb transform(const ColorMatrix &mat, const QRgb &color)
{
ColorVector vec({
qRed(color) / 255.0f, qGreen(color) / 255.0f, qBlue(color) / 255.0f, qAlpha(color) / 255.0f, 1.0f });
vec = mat * vec;
if (vec[4] != 0.0f) {
vec[0] /= vec[4]; vec[1] /= vec[4]; vec[2] /= vec[4]; vec[3] /= vec[4]; // vec[4] = 1.0f;
}
return qRgba(
clamp<int>(255 * vec[0], 0, 255),
clamp<int>(255 * vec[1], 0, 255),
clamp<int>(255 * vec[2], 0, 255),
clamp<int>(255 * vec[3], 0, 255));
}
QImage transform(const ColorMatrix &mat, const QImage &qImg)
{
const int w = qImg.width(), h = qImg.height();
QImage qImgDst(w, h, qImg.format());
for (int y = 0; y < h; ++y) for (int x = 0; x < w; ++x) {
qImgDst.setPixel(x, y, transform(mat, qImg.pixel(x, y)));
}
return qImgDst;
}
QImage open(QWidget *pQParent)
{
return QImage(
QFileDialog::getOpenFileName(pQParent,
QString::fromUtf8("Open Image File"),
QString()));
}
void update(
QLabel &qLblViewResult,
const QColorMatrixView &qEditColMat, const QLabel &qLblViewOrig)
{
ColorMatrix colMat = qEditColMat.values();
const QPixmap *pQPixmap = qLblViewOrig.pixmap();
const QImage qImg = pQPixmap ? pQPixmap->toImage() : QImage();
qLblViewResult.setPixmap(
QPixmap::fromImage(transform(colMat, qImg)));
}
int main(int argc, char **argv)
{
QApplication app(argc, argv);
// setup GUI
QWidget qWin;
qWin.setWindowTitle(QString::fromUtf8("Qt Color Matrix Demo"));
QGridLayout qGrid;
QVBoxLayout qVBoxColMat;
QLabel qLblColMat(QString::fromUtf8("Color Matrix:"));
qVBoxColMat.addWidget(&qLblColMat, 0);
QColorMatrixView qEditColMat;
qEditColMat.setValues(Identity);
qVBoxColMat.addWidget(&qEditColMat);
QPushButton qBtnReset(QString::fromUtf8("Identity"));
qVBoxColMat.addWidget(&qBtnReset);
QPushButton qBtnGray(QString::fromUtf8("Grayscale"));
qVBoxColMat.addWidget(&qBtnGray);
qVBoxColMat.addStretch(1);
qGrid.addLayout(&qVBoxColMat, 0, 0, 2, 1);
QLabel qLblX(QString::fromUtf8(" \xc3\x97 "));
qGrid.addWidget(&qLblX, 0, 1);
QLabel qLblViewOrig;
qGrid.addWidget(&qLblViewOrig, 0, 2);
QPushButton qBtnLoad(QString::fromUtf8("Open..."));
qGrid.addWidget(&qBtnLoad, 1, 2);
QLabel qLblEq(QString::fromUtf8(" = "));
qGrid.addWidget(&qLblEq, 0, 3);
QLabel qLblViewResult;
qGrid.addWidget(&qLblViewResult, 0, 4);
qWin.setLayout(&qGrid);
qWin.show();
// install signal handlers
QObject::connect(&qEditColMat, &QColorMatrixView::editingFinished,
[&]() { update(qLblViewResult, qEditColMat, qLblViewOrig); });
QObject::connect(&qBtnReset, &QPushButton::clicked,
[&]() {
qEditColMat.setValues(Identity);
update(qLblViewResult, qEditColMat, qLblViewOrig);
});
QObject::connect(&qBtnGray, &QPushButton::clicked,
[&]() {
qEditColMat.setValues(ColorMatrix({
0.33f, 0.59f, 0.11f, 0.0f, 0.0f,
0.33f, 0.59f, 0.11f, 0.0f, 0.0f,
0.33f, 0.59f, 0.11f, 0.0f, 0.0f,
0.00f, 0.00f, 0.00f, 1.0f, 0.0f,
0.00f, 0.00f, 0.00f, 0.0f, 1.0f
}));
update(qLblViewResult, qEditColMat, qLblViewOrig);
});
QObject::connect(&qBtnLoad, &QPushButton::clicked,
[&]() {
qLblViewOrig.setPixmap(QPixmap::fromImage(open(&qBtnLoad)));
update(qLblViewResult, qEditColMat, qLblViewOrig);
});
// initial contents
{
QImage qImg("colorMatrixDefault.jpg");
qLblViewOrig.setPixmap(QPixmap::fromImage(qImg));
update(qLblViewResult, qEditColMat, qLblViewOrig);
}
// runtime loop
return app.exec();
}
QColorMatrixView.h:
#ifndef Q_COLOR_MATRIX_VIEW_H
#define Q_COLOR_MATRIX_VIEW_H
#include <QLineEdit>
#include <QGridLayout>
#include <QWidget>
#include "colorMatrix.h"
class QColorMatrixView: public QWidget {
Q_OBJECT
private:
QGridLayout _qGrid;
QLineEdit _qEdit[5][5];
signals:
void editingFinished();
public:
QColorMatrixView(QWidget *pQParent = nullptr);
virtual ~QColorMatrixView() = default;
QColorMatrixView(const QColorMatrixView&) = delete;
QColorMatrixView& operator=(const QColorMatrixView&) = delete;
ColorMatrix values() const;
void setValues(const ColorMatrix &mat);
};
#endif // Q_COLOR_MATRIX_VIEW_H
QColorMatrixView.cc:
#include "QColorMatrixView.h"
QColorMatrixView::QColorMatrixView(QWidget *pQParent):
QWidget(pQParent)
{
QFontMetrics qFontMetrics(font());
const int w = qFontMetrics.boundingRect(QString("-000.000")).width() + 10;
for (int r = 0; r < 5; ++r) {
for (int c = 0; c < 5; ++c) {
QLineEdit &qEdit = _qEdit[r][c];
_qGrid.addWidget(&qEdit, r, c);
qEdit.setFixedWidth(w);
QObject::connect(&qEdit, &QLineEdit::editingFinished,
[this, r, c]() {
_qEdit[r][c].setText(
QString::number(_qEdit[r][c].text().toFloat(), 'f', 3));
editingFinished();
});
}
}
setLayout(&_qGrid);
}
ColorMatrix QColorMatrixView::values() const
{
ColorMatrix mat;
for (int r = 0; r < 5; ++r) for (int c = 0; c < 5; ++c) {
mat[r][c] = _qEdit[r][c].text().toFloat();
}
return mat;
}
void QColorMatrixView::setValues(const ColorMatrix &mat)
{
for (int r = 0; r < 5; ++r) for (int c = 0; c < 5; ++c) {
_qEdit[r][c].setText(QString::number(mat[r][c], 'f', 3));
}
}
moc_colorMatrix.cc (to consider moc generated sources):
#include "moc_QColorMatrixView.cpp"
colorMatrix.pro (the qmake project file):
SOURCES = colorMatrix.cc QColorMatrixView.cc
HEADERS = colorMatrix.h QColorMatrixView.h
SOURCES += moc_colorMatrix.cc
MOC_DIR = .
QT += widgets
and the default sample image colorMatrixDefault.jpg if no (cat) photo file is at hand:
Although, I've developed and tested the application in VS2013, I built and tested also on cygwin to ensure that the qmake project is complete and self-standing:
$ qmake-qt5 colorMatrix.pro
$ make
$ ./colorMatrix
An enhanced version of this sample code can be found on github Qt Color Matrix Demo.

qt opengl shader texture coordinates

I use OpenGL shader for apply median filter to image. Input image I copy to in_fbo buffer. All work fine.
QGLFramebufferObject *in_fbo, *out_fbo;
painter.begin(in_fbo); //Copy QImage to QGLFramebufferObject
painter.drawImage(0,0,image_in,0,0,width,height);
painter.end();
out_fbo->bind();
glViewport( 0, 0, nWidth, nHeight );
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
glOrtho( 0.0, nWidth, 0.0, nHeight, -1.0, 1.0 );
glMatrixMode( GL_MODELVIEW );
glLoadIdentity( );
glEnable( GL_TEXTURE_2D );
out_fbo->drawTexture( QPointF(0.0,0.0), in_fbo->texture( ), GL_TEXTURE_2D );
But in shader code I need divide position of vertex by width and height of image, because texture coordinates are normalized to a range between 0 and 1.
How correctly calculate texture coordinates?
//vertex shader
varying vec2 pos;
void main( void )
{
pos = gl_Vertex.xy;
gl_Position = ftransform( );
}
//fragment shader
#extension GL_ARB_texture_rectangle : enable
uniform sampler2D texture0;
uniform int imgWidth;
uniform int imgHeight;
uniform int len;
varying vec2 pos;
#define MAX_LEN (100)
void main(){
float v[ MAX_LEN ];
for (int i = 0; i < len; i++) {
vec2 posi = pos + float(i);
posi.x = posi.x / float( imgWidth );
posi.y = posi.y / float( imgHeight );
v[i] = texture2D(texture0, posi).r;
}
//
//.... Calculating new value
//
gl_FragColor = vec4( m, m, m, 1.0 );
}
Before I did it in OpenFrameworks. But shader for texture in OF does not work for texture in Qt. I suppose because OF create textures with textureTarget = GL_TEXTURE_RECTANGLE_ARB. Now the result of applying shader above isn't correct. It isn't identical with result of the old shader (there are few pixels with different colors). I don't know how modify shader above :(.
Old shaders:
//vertex
#version 120
#extension GL_ARB_texture_rectangle : enable
void main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
gl_TexCoord[0] = gl_MultiTexCoord0;
gl_FrontColor = gl_Color;
}
//fragment
#version 120
#extension GL_ARB_texture_rectangle : enable
uniform sampler2D texture0;
uniform int len;
void main(){
vec2 pos = gl_TexCoord[0].xy;
pos.x = int( pos.x );
pos.y = int( pos.y );
float v[ MAX_LEN ];
for (int i=0; i<len; i++) {
vec2 posi = pos + i;
posi.x = int( posi.x + 0.5 ) + 0.5;
posi.y = int( posi.y + 0.5 ) + 0.5;
v[i] = texture2D(texture0, posi).r;
}
//
//.... Calculating new value
//
gl_FragColor = vec4( m, m, m, 1.0 );
}
OpenGL code from OpenFrameworks lib
texData.width = w;
texData.height = h;
texData.tex_w = w;
texData.tex_h = h;
texData.textureTarget = GL_TEXTURE_RECTANGLE_ARB;
texData.bFlipTexture = true;
texData.glType = GL_RGBA;
// create & setup FBO
glGenFramebuffersEXT(1, &fbo);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo);
// Create the render buffer for depth
glGenRenderbuffersEXT(1, &depthBuffer);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, depthBuffer);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT, texData.tex_w, texData.tex_h);
// create & setup texture
glGenTextures(1, (GLuint *)(&texData.textureID)); // could be more then one, but for now, just one
glBindTexture(texData.textureTarget, (GLuint)(texData.textureID));
glTexParameterf(texData.textureTarget, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(texData.textureTarget, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameterf(texData.textureTarget, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(texData.textureTarget, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(texData.textureTarget, 0, texData.glType, texData.tex_w, texData.tex_h, 0, texData.glType, GL_UNSIGNED_BYTE, 0);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
// attach it to the FBO so we can render to it
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, texData.textureTarget, (GLuint)texData.textureID, 0);
I do not think you actually want to use the texture's dimensions to do this. From the sounds of things this is a simple fullscreen image filter and you really just want fragment coordinates mapped into the range [0.0,1.0]. If this is the case, then gl_FragCoord.xy / viewport.xy, where viewport is a 2D uniform that defines the width and height of your viewport ought to work for your texture coordinates (in the fragment shader).
vec2 texCoord = vec2 (transformed_pos.x, transformed_pos.y) / transformed_pos.w * vec2 (0.5, 0.5) + vec2 (1.0, 1.0) may also work using the same principle -- clip-space coordinates transformed into NDC and then mapped to texture-space. This approach will not properly account for texel centers ((0.5, 0.5) rather than (0.0, 0.0)), however and can present problems when texture filtering is enabled and the wrap mode is not GL_CLAMP_TO_EDGE.

OpenCL traversal kernel - further optimization

Currently, I have an OpenCL kernel for like traversal as below. I'd be glad if someone had some point on optimization of this quite large kernel.
The thing is, I'm running this code with SAH BVH and I'd like to get performance similar to Timo Aila with his traversals in his paper (Understanding the Efficiency of Ray Traversal on GPUs), of course his code uses SplitBVH (which I might consider using in place of SAH BVH, but in my opinion it has really slow build times). But I'm asking about traversal, not BVH (also I've so far worked only with scenes, where SplitBVH won't give you much advantages over SAH BVH).
First of all, here is what I have so far (standard while-while traversal kernel).
__constant sampler_t sampler = CLK_FILTER_NEAREST;
// Inline definition of horizontal max
inline float max4(float a, float b, float c, float d)
{
return max(max(max(a, b), c), d);
}
// Inline definition of horizontal min
inline float min4(float a, float b, float c, float d)
{
return min(min(min(a, b), c), d);
}
// Traversal kernel
__kernel void traverse( __read_only image2d_t nodes,
__global const float4* triangles,
__global const float4* rays,
__global float4* result,
const int num,
const int w,
const int h)
{
// Ray index
int idx = get_global_id(0);
if(idx < num)
{
// Stack
int todo[32];
int todoOffset = 0;
// Current node
int nodeNum = 0;
float tmin = 0.0f;
float depth = 2e30f;
// Fetch ray origin, direction and compute invdirection
float4 origin = rays[2 * idx + 0];
float4 direction = rays[2 * idx + 1];
float4 invdir = native_recip(direction);
float4 temp = (float4)(0.0f, 0.0f, 0.0f, 1.0f);
// Traversal loop
while(true)
{
// Fetch node information
int2 nodeCoord = (int2)((nodeNum << 2) % w, (nodeNum << 2) / w);
int4 specs = read_imagei(nodes, sampler, nodeCoord + (int2)(3, 0));
// While node isn't leaf
while(specs.z == 0)
{
// Fetch child bounding boxes
float4 n0xy = read_imagef(nodes, sampler, nodeCoord);
float4 n1xy = read_imagef(nodes, sampler, nodeCoord + (int2)(1, 0));
float4 nz = read_imagef(nodes, sampler, nodeCoord + (int2)(2, 0));
// Test ray against child bounding boxes
float oodx = origin.x * invdir.x;
float oody = origin.y * invdir.y;
float oodz = origin.z * invdir.z;
float c0lox = n0xy.x * invdir.x - oodx;
float c0hix = n0xy.y * invdir.x - oodx;
float c0loy = n0xy.z * invdir.y - oody;
float c0hiy = n0xy.w * invdir.y - oody;
float c0loz = nz.x * invdir.z - oodz;
float c0hiz = nz.y * invdir.z - oodz;
float c1loz = nz.z * invdir.z - oodz;
float c1hiz = nz.w * invdir.z - oodz;
float c0min = max4(min(c0lox, c0hix), min(c0loy, c0hiy), min(c0loz, c0hiz), tmin);
float c0max = min4(max(c0lox, c0hix), max(c0loy, c0hiy), max(c0loz, c0hiz), depth);
float c1lox = n1xy.x * invdir.x - oodx;
float c1hix = n1xy.y * invdir.x - oodx;
float c1loy = n1xy.z * invdir.y - oody;
float c1hiy = n1xy.w * invdir.y - oody;
float c1min = max4(min(c1lox, c1hix), min(c1loy, c1hiy), min(c1loz, c1hiz), tmin);
float c1max = min4(max(c1lox, c1hix), max(c1loy, c1hiy), max(c1loz, c1hiz), depth);
bool traverseChild0 = (c0max >= c0min);
bool traverseChild1 = (c1max >= c1min);
nodeNum = specs.x;
int nodeAbove = specs.y;
// We hit just one out of 2 childs
if(traverseChild0 != traverseChild1)
{
if(traverseChild1)
{
nodeNum = nodeAbove;
}
}
// We hit either both or none
else
{
// If we hit none, pop node from stack (or exit traversal, if stack is empty)
if (!traverseChild0)
{
if(todoOffset == 0)
{
break;
}
nodeNum = todo[--todoOffset];
}
// If we hit both
else
{
// Sort them (so nearest goes 1st, further 2nd)
if(c1min < c0min)
{
unsigned int tmp = nodeNum;
nodeNum = nodeAbove;
nodeAbove = tmp;
}
// Push further on stack
todo[todoOffset++] = nodeAbove;
}
}
// Fetch next node information
nodeCoord = (int2)((nodeNum << 2) % w, (nodeNum << 2) / w);
specs = read_imagei(nodes, sampler, nodeCoord + (int2)(3, 0));
}
// If node is leaf & has some primitives
if(specs.z > 0)
{
// Loop through primitives & perform intersection with them (Woop triangles)
for(int i = specs.x; i < specs.y; i++)
{
// Fetch first point from global memory
float4 v0 = triangles[i * 4 + 0];
float o_z = v0.w - origin.x * v0.x - origin.y * v0.y - origin.z * v0.z;
float i_z = 1.0f / (direction.x * v0.x + direction.y * v0.y + direction.z * v0.z);
float t = o_z * i_z;
if(t > 0.0f && t < depth)
{
// Fetch second point from global memory
float4 v1 = triangles[i * 4 + 1];
float o_x = v1.w + origin.x * v1.x + origin.y * v1.y + origin.z * v1.z;
float d_x = direction.x * v1.x + direction.y * v1.y + direction.z * v1.z;
float u = o_x + t * d_x;
if(u >= 0.0f && u <= 1.0f)
{
// Fetch third point from global memory
float4 v2 = triangles[i * 4 + 2];
float o_y = v2.w + origin.x * v2.x + origin.y * v2.y + origin.z * v2.z;
float d_y = direction.x * v2.x + direction.y * v2.y + direction.z * v2.z;
float v = o_y + t * d_y;
if(v >= 0.0f && u + v <= 1.0f)
{
// We got successful hit, store the information
depth = t;
temp.x = u;
temp.y = v;
temp.z = t;
temp.w = as_float(i);
}
}
}
}
}
// Pop node from stack (if empty, finish traversal)
if(todoOffset == 0)
{
break;
}
nodeNum = todo[--todoOffset];
}
// Store the ray traversal result in global memory
result[idx] = temp;
}
}
First question of the day is, how could one write his Persistent while-while and Speculative while-while kernel in OpenCL?
Ad Persistent while-while, do I get it right, that I actually just start kernel with global work size equivalent to local work size, and both these numbers should be equal to warp/wavefront size of the GPU?
I get that with CUDA the persistent thread implementation looks like this:
do
{
volatile int& jobIndexBase = nextJobArray[threadIndex.y];
if(threadIndex.x == 0)
{
jobIndexBase = atomicAdd(&warpCounter, WARP_SIZE);
}
index = jobIndexBase + threadIndex.x;
if(index >= totalJobs)
return;
/* Perform work for task numbered 'index' */
}
while(true);
How could equivalent in OpenCL look like, I know I'll have to do some barriers in there, I also know that one should be after the score where I atomically add WARP_SIZE to warpCounter.
Ad Speculative traversal - well I probably don't have any ideas how this should be implemented in OpenCL, so any hints are welcome. I also don't have idea where to put barriers (because putting them around simulated __any will result in driver crash).
If you made it here, thanks for reading & any hints, answers, etc. are welcome!
An optimization you can do is use vector variables and the fused multiply add function to speed up your set up math. As for the rest of the kernel, It is slow because it is branchy. If you can make assumptions on the signal data you might be able to reduce the execution time by reducing the code branches. I have not checked the float4 swizles (the .xxyy and .x .y .z .w after the float 4 variables) so just check that.
float4 n0xy = read_imagef(nodes, sampler, nodeCoord);
float4 n1xy = read_imagef(nodes, sampler, nodeCoord + (int2)(1, 0));
float4 nz = read_imagef(nodes, sampler, nodeCoord + (int2)(2, 0));
float4 oodf4 = -origin * invdir;
float4 c0xyf4 = fma(n0xy,invdir.xxyy,oodf4);
float4 c0zc1z = fma(nz,(float4)(invdir.z),oodf4);
float c0min = max4(min(c0xyf4.x, c0xyf4.y), min(c0xyf4.z, c0xyf4.w), min(c0zc1z.z, c0zc1z.w), tmin);
float c0max = min4(max(c0xyf4.x, c0xyf4.y), max(c0xyf4.z, c0xyf4.w), max(c0zc1z.z, c0zc1z.w), depth);
float4 c1xy = fma(n1xy,invdir.xxyy,oodf4);
float c1min = max4(min(c1xy.x, c1xy.y), min(c1xy.z, c1xy.w), min(c0zc1z.z, c0zc1z.w), tmin);
float c1max = min4(max(c1xy.x, c1xy.y), max(c1xy.z, c1xy.w), max(c0zc1z.z, c0zc1z.w), depth);

openGL (Qt) bind properly + Two rotation at the same time

I'm trying to get some experience in openGL, but now I'm facing "1.5" problems ;).
The first problem / question is how can I get a rotation in two directions "simultaneously"?
I want to draw a coordinate system which is movable on the x- and y-axis. But I'm only able to move on the x-axis or y-axis. I can't figure it out how to do both at the same time.
My other half problem is not really a problem but as you can see I'm binding my shaders all the time new when I move my mouse. Is there a better way how it could been done?
void GLWidget::mouseMoveEvent(QMouseEvent *event)
{
differencePostition.setX(event->x() - lastPosition.x());
differencePostition.setY(event->y() - lastPosition.y());
shaderProgram.removeAllShaders();
shaderProgram.addShaderFromSourceFile(QGLShader::Vertex, "../Vector/yRotation.vert");
shaderProgram.addShaderFromSourceFile(QGLShader::Fragment, "../Vector/CoordinateSystemLines.frag");
shaderProgram.link();
shaderProgram.bind();
shaderProgram.setAttributeValue("angle", differencePostition.x());
//shaderProgram.release();
//shaderProgram.addShaderFromSourceFile(QGLShader::Vertex, "../Vector/xRotation.vert");
//shaderProgram.addShaderFromSourceFile(QGLShader::Fragment, "../Vector/CoordinateSystemLines.frag");
//shaderProgram.link();
//shaderProgram.bind();
//shaderProgram.setAttributeValue("angle", differencePostition.y());
updateGL();
}
void GLWidget::mousePressEvent(QMouseEvent *event)
{
lastPosition = event->posF();
}
xRotation.vert
#version 330
in float angle;
const float PI = 3.14159265358979323846264;
void main(void)
{
float rad_angle = angle * PI / 180.0;
vec4 oldPosition = gl_Vertex;
vec4 newPosition = oldPosition;
newPosition.y = oldPosition.y * cos(rad_angle) - oldPosition.z * sin(rad_angle);
newPosition.z = oldPosition.y * sin(rad_angle) + oldPosition.z * cos(rad_angle);
gl_Position = gl_ModelViewProjectionMatrix * newPosition;
}
yRotation.vert
#version 330
in float angle;
const float PI = 3.14159265358979323846264;
void main(void)
{
float rad_angle = angle * PI / 180.0;
vec4 oldPosition = gl_Vertex;
vec4 newPosition = oldPosition;
newPosition.x = oldPosition.x * cos(rad_angle) + oldPosition.z * sin(rad_angle);
newPosition.z = oldPosition.z * cos(rad_angle) - oldPosition.x * sin(rad_angle);
gl_Position = gl_ModelViewProjectionMatrix * newPosition;
}
Rotation in more than one direction at the same time requires a combination of matrices ( commonly called a general rotation matrix )
There are several sites that show how this matrix is generated if you are more interested.
As to your second problem, the shaders are usually initialized in the init section.
Example: http://doc-snapshot.qt-project.org/5.0/qtopengl/cube-mainwidget-cpp.html
You only need to call shaderProgram.bind(); every time before you want to draw an object with your shader. Loading and linking is usually only done once in the initialization of your programm. Only call shaderProgram.setAttributeValue your mouseMoveEvent method.
EDIT
A quick way to solve your rotation problem is to write a shader that does both rotations one after the other. Add a second in variable and set both using the setAttributeValue method.
#version 330
in float angleX;
in float angleY;
const float PI = 3.14159265358979323846264;
void main(void)
{
float rad_angle_x = angleX * PI / 180.0;
vec4 oldPosition = gl_Vertex;
vec4 newPositionX = oldPosition;
newPositionX.y = oldPosition.y * cos(rad_angle_x) - oldPosition.z * sin(rad_angle_x);
newPositionX.z = oldPosition.y * sin(rad_angle_x) + oldPosition.z * cos(rad_angle_x);
float rad_angle_y = angleY * PI / 180.0;
vec4 newPositionXY = newPositionX;
newPositionXY.x = newPositionX.x * cos(rad_angle_y) + newPositionX.z * sin(rad_angle_y);
newPositionXY.z = newPositionX.z * cos(rad_angle_y) - newPositionX.x * sin(rad_angle_y);
gl_Position = gl_ModelViewProjectionMatrix * newPositionXY;
}
This way you don't need to know matrix multiplications.

Resources