qt opengl shader texture coordinates - qt

I use OpenGL shader for apply median filter to image. Input image I copy to in_fbo buffer. All work fine.
QGLFramebufferObject *in_fbo, *out_fbo;
painter.begin(in_fbo); //Copy QImage to QGLFramebufferObject
painter.drawImage(0,0,image_in,0,0,width,height);
painter.end();
out_fbo->bind();
glViewport( 0, 0, nWidth, nHeight );
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
glOrtho( 0.0, nWidth, 0.0, nHeight, -1.0, 1.0 );
glMatrixMode( GL_MODELVIEW );
glLoadIdentity( );
glEnable( GL_TEXTURE_2D );
out_fbo->drawTexture( QPointF(0.0,0.0), in_fbo->texture( ), GL_TEXTURE_2D );
But in shader code I need divide position of vertex by width and height of image, because texture coordinates are normalized to a range between 0 and 1.
How correctly calculate texture coordinates?
//vertex shader
varying vec2 pos;
void main( void )
{
pos = gl_Vertex.xy;
gl_Position = ftransform( );
}
//fragment shader
#extension GL_ARB_texture_rectangle : enable
uniform sampler2D texture0;
uniform int imgWidth;
uniform int imgHeight;
uniform int len;
varying vec2 pos;
#define MAX_LEN (100)
void main(){
float v[ MAX_LEN ];
for (int i = 0; i < len; i++) {
vec2 posi = pos + float(i);
posi.x = posi.x / float( imgWidth );
posi.y = posi.y / float( imgHeight );
v[i] = texture2D(texture0, posi).r;
}
//
//.... Calculating new value
//
gl_FragColor = vec4( m, m, m, 1.0 );
}
Before I did it in OpenFrameworks. But shader for texture in OF does not work for texture in Qt. I suppose because OF create textures with textureTarget = GL_TEXTURE_RECTANGLE_ARB. Now the result of applying shader above isn't correct. It isn't identical with result of the old shader (there are few pixels with different colors). I don't know how modify shader above :(.
Old shaders:
//vertex
#version 120
#extension GL_ARB_texture_rectangle : enable
void main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
gl_TexCoord[0] = gl_MultiTexCoord0;
gl_FrontColor = gl_Color;
}
//fragment
#version 120
#extension GL_ARB_texture_rectangle : enable
uniform sampler2D texture0;
uniform int len;
void main(){
vec2 pos = gl_TexCoord[0].xy;
pos.x = int( pos.x );
pos.y = int( pos.y );
float v[ MAX_LEN ];
for (int i=0; i<len; i++) {
vec2 posi = pos + i;
posi.x = int( posi.x + 0.5 ) + 0.5;
posi.y = int( posi.y + 0.5 ) + 0.5;
v[i] = texture2D(texture0, posi).r;
}
//
//.... Calculating new value
//
gl_FragColor = vec4( m, m, m, 1.0 );
}
OpenGL code from OpenFrameworks lib
texData.width = w;
texData.height = h;
texData.tex_w = w;
texData.tex_h = h;
texData.textureTarget = GL_TEXTURE_RECTANGLE_ARB;
texData.bFlipTexture = true;
texData.glType = GL_RGBA;
// create & setup FBO
glGenFramebuffersEXT(1, &fbo);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo);
// Create the render buffer for depth
glGenRenderbuffersEXT(1, &depthBuffer);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, depthBuffer);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT, texData.tex_w, texData.tex_h);
// create & setup texture
glGenTextures(1, (GLuint *)(&texData.textureID)); // could be more then one, but for now, just one
glBindTexture(texData.textureTarget, (GLuint)(texData.textureID));
glTexParameterf(texData.textureTarget, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(texData.textureTarget, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameterf(texData.textureTarget, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(texData.textureTarget, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(texData.textureTarget, 0, texData.glType, texData.tex_w, texData.tex_h, 0, texData.glType, GL_UNSIGNED_BYTE, 0);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
// attach it to the FBO so we can render to it
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, texData.textureTarget, (GLuint)texData.textureID, 0);

I do not think you actually want to use the texture's dimensions to do this. From the sounds of things this is a simple fullscreen image filter and you really just want fragment coordinates mapped into the range [0.0,1.0]. If this is the case, then gl_FragCoord.xy / viewport.xy, where viewport is a 2D uniform that defines the width and height of your viewport ought to work for your texture coordinates (in the fragment shader).
vec2 texCoord = vec2 (transformed_pos.x, transformed_pos.y) / transformed_pos.w * vec2 (0.5, 0.5) + vec2 (1.0, 1.0) may also work using the same principle -- clip-space coordinates transformed into NDC and then mapped to texture-space. This approach will not properly account for texel centers ((0.5, 0.5) rather than (0.0, 0.0)), however and can present problems when texture filtering is enabled and the wrap mode is not GL_CLAMP_TO_EDGE.

Related

WebGL2 not writing second output of `out int[2]` result

When I read output from the fragment shader:
#version 300 es
precision highp float;
precision highp int;
out int outColor[2];
void main() {
outColor[0] = 5;
outColor[1] = 2;
}
rendered into a 32 bit integer RG texture, I find that only the 5s have been written but not the 2s. Presumably I've got some format specifier wrong somewhere. Or I might be attaching the framebuffer to the wrong thing (gl.COLOR_ATTACHMENT0). I've tried varying various arguments but most changes that I make result in nothing coming out due to formats not lining up. It might be that I need to change 3 constants in tandem.
Here's my self-contained source. The output I want is an array alternatingbetween 5 and 2. Instead, I get an array alternating between 5 and semi-random large constants and 0.
let canvas /** #type {HTMLCanvasElement} */ = document.createElement('canvas');
let gl = canvas.getContext("webgl2");
let vertexShader = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(vertexShader, `#version 300 es
in vec4 a_position;
void main() {
gl_Position = a_position;
}
`);
gl.compileShader(vertexShader);
console.assert(gl.getShaderParameter(vertexShader, gl.COMPILE_STATUS), "Vertex shader compile failed.");
let fragmentShader = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(fragmentShader, `#version 300 es
precision highp float;
precision highp int;
out int outColor[2];
void main() {
outColor[0] = 5;
outColor[1] = 2;
}
`);
gl.compileShader(fragmentShader);
let program = gl.createProgram();
gl.attachShader(program, vertexShader);
gl.attachShader(program, fragmentShader);
gl.linkProgram(program);
let positionBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([-3, -1, 1, 3, 1, -1]), gl.STATIC_DRAW);
let positionAttributeLocation = gl.getAttribLocation(program, "a_position");
let vao = gl.createVertexArray();
gl.bindVertexArray(vao);
gl.enableVertexAttribArray(positionAttributeLocation);
gl.vertexAttribPointer(positionAttributeLocation, 2, gl.FLOAT, false, 0, 0);
let w = 4;
let h = 4;
let texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RG32I, w, h, 0, gl.RG_INTEGER, gl.INT, null);
let frameBuffer = gl.createFramebuffer();
gl.bindFramebuffer(gl.FRAMEBUFFER, frameBuffer);
gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture, 0);
gl.useProgram(program);
gl.viewport(0, 0, w, h);
gl.drawArrays(gl.TRIANGLES, 0, 3);
let outputBuffer = new Int32Array(w*h*2);
gl.readPixels(0, 0, w, h, gl.RG_INTEGER, gl.INT, outputBuffer);
console.log(outputBuffer);
Arrayed outputs like out int outColor[2]; are used for outputting to multiple render targets. In your case, two render targets with one channel each, because you've used a scalar type.
To express a single render target with two channels, try out ivec2 outColor;.

Vulkan 2D geometry shader generated geometry lines exhibiting parralellogram shape

I am trying to add support for geometry shaders for a Vulkan project, so I am just starting with something simple for now.
The goal is, given a list of vertices, generate a perfect rectangle encompassing that line.
For that effect I made this geometry shader:
#version 450
#extension GL_ARB_separate_shader_objects : enable
layout(lines) in;
layout(triangle_strip, max_vertices = 6) out;
layout(location = 0) in vec2 fragCoord[];
layout(location = 0) out vec2 fragTexCoord;
void main() {
vec2 p1 = gl_in[0].gl_Position.xy;
vec2 p2 = gl_in[1].gl_Position.xy;
vec2 tangent = normalize(p2 - p1);
vec2 normal = vec2(tangent.y, -tangent.x) * 0.05;
vec2 quad[4] = vec2[](p1 + normal, p1 - normal, p2 + normal, p2 - normal);
// Create first triangle
gl_Position = vec4(quad[0], 0, 1);
EmitVertex();
gl_Position = vec4(quad[1], 0, 1);
EmitVertex();
gl_Position = vec4(quad[2], 0, 1);
EmitVertex();
EndPrimitive();
// Create second triangle
gl_Position = vec4(quad[1], 0, 1);
EmitVertex();
gl_Position = vec4(quad[2], 0, 1);
EmitVertex();
gl_Position = vec4(quad[3], 0, 1);
EmitVertex();
EndPrimitive();
}
Which outputs:
The vertex shader is:
#version 450
#extension GL_ARB_separate_shader_objects : enable
layout(location = 0) in vec3 inPosition;
layout(location = 1) in vec2 inTexCoord;
layout(location = 0) out vec2 fragTexCoord;
void main() {
gl_Position = vec4(inPosition, 1.0);
fragTexCoord = inTexCoord;
}
I am not sure why the lines are parallelograms instead of rectangles. Adding the normal to the line (the orthogonal direction) to both vertices in the line should make a rectangle, by definition.
Edit:
Even hard coding the vertices in the vertex shader seems to produce the same result:
vec4 verts[2] = vec4[](vec4(-0.5,-0.5,0,1), vec4(0.5,0.5,0,1));
void main() {
gl_Position = verts[gl_VertexID];//vec4(inPosition, 1.0);
fragTexCoord = inTexCoord;
}
I made a silly mistake, since coordinates are calculated on the NORMALIZED GL space, but the window is not a square, my space is stretched, defroming the topology. This is the result in a perfectly squared image:
To correct this error I must pass the aspect ratio information to the shader and correct the vertex positions accordingly.

Receiving denormalized output texture coordinates in Frag shader

Update
See rationale at the end of my question below
Using WebGL2 I can access a texel by its denormalized coordinates (sorry don't the right lingo for this). That means I don't have to scale them down to 0-1 like I do in texture2D().
However the input to the fragment shader is still the vec2/3 in normalized values.
Is there a way to declare in/out variables in the Vertex and Frag shaders so that I don't have to scale the coordinates?
somewhere in vertex shader:
...
out vec2 TextureCoordinates;
somewhere in frag shader:
...
in vec2 TextureCoordinates;
I would like for TextureCoordinates to be ivec2 and already scaled.
This question and all my other questions on webgl related to general computing using WebGL. We are trying to do tensor (multi-D matrix) operations using WebGL.
We map our data in a few ways to a Texture. The simplest approach we follow is -- assuming we can access our data as a flat array -- to lay it out along the texture's width and go up the texture's height until we're done.
Since our thinking, logic, and calculations are all based on tensor/matrix indices -- inside the fragment shader -- we'd have to map back to/from the X-Y texture coordinates to indices. The intermediate step here is to calculate an offset for a given position of a texel. Then from that offset we can calculate the matrix indices from its strides.
Calculating an offset in webgl 1 for very large textures seems to be taking much longer than webgl2 using the integer coordinates. See below:
WebGL 1 offset calculation
int coordsToOffset(vec2 coords, int width, int height) {
float s = coords.s * float(width);
float t = coords.t * float(height);
int offset = int(t) * width + int(s);
return offset;
}
vec2 offsetToCoords(int offset, int width, int height) {
int t = offset / width;
int s = offset - t*width;
vec2 coords = (vec2(s,t) + vec2(0.5,0.5)) / vec2(width, height);
return coords;
}
WebGL 2 offset calculation in the presence of int coords
int coordsToOffset(ivec2 coords, int width) {
return coords.t * width + coords.s;
}
ivec2 offsetToCoords(int offset, int width) {
int t = offset / width;
int s = offset - t*width;
return ivec2(s,t);
}
It should be clear that for a series of large texture operations we're saving hundreds of thousands of operations just on the offset/coords calculation.
It's not clear why you want do what you're trying to do. It would be better to ask something like "I'm trying to draw an image/implement post processing glow/do ray tracing/... and to do that I want to use un-normalized texture coordinates because " and then we can tell you if your solution is going to work and how to solve it.
In any case, passing int or unsigned int or ivec2/3/4 or uvec2/3/4 as a varying is supported but not interpolation. You have to declare them as flat.
Still, you can pass un-normalized values as float or vec2/3/4 and the convert to int, ivec2/3/4 in the fragment shader.
The other issue is you'll get no sampling using texelFetch, the function that takes texel coordinates instead of normalized texture coordinates. It just returns the exact value of a single pixel. It does not support filtering like the normal texture function.
Example:
function main() {
const gl = document.querySelector('canvas').getContext('webgl2');
if (!gl) {
return alert("need webgl2");
}
const vs = `
#version 300 es
in vec4 position;
in ivec2 texelcoord;
out vec2 v_texcoord;
void main() {
v_texcoord = vec2(texelcoord);
gl_Position = position;
}
`;
const fs = `
#version 300 es
precision mediump float;
in vec2 v_texcoord;
out vec4 outColor;
uniform sampler2D tex;
void main() {
outColor = texelFetch(tex, ivec2(v_texcoord), 0);
}
`;
// compile shaders, link program, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// create buffers via gl.createBuffer, gl.bindBuffer, gl.bufferData)
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: [
-.5, -.5,
.5, -.5,
0, .5,
],
},
texelcoord: {
numComponents: 2,
data: new Int32Array([
0, 0,
15, 0,
8, 15,
]),
}
});
// make a 16x16 texture
const ctx = document.createElement('canvas').getContext('2d');
ctx.canvas.width = 16;
ctx.canvas.height = 16;
for (let i = 23; i > 0; --i) {
ctx.fillStyle = `hsl(${i / 23 * 360 | 0}, 100%, ${i % 2 ? 25 : 75}%)`;
ctx.beginPath();
ctx.arc(8, 15, i, 0, Math.PI * 2, false);
ctx.fill();
}
const tex = twgl.createTexture(gl, { src: ctx.canvas });
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// no need to set uniforms since they default to 0
// and only one texture which is already on texture unit 0
gl.drawArrays(gl.TRIANGLES, 0, 3);
}
main();
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
So in response to your updated question it's still not clear what you want to do. Why do you want to pass varyings to the fragment shader? Can't you just do whatever math you want in the fragment shader itself?
Example:
uniform sampler2D tex;
out float result;
// some all the values in the texture
vec4 sum4 = vec4(0);
ivec2 texDim = textureSize(tex, 0);
for (int y = 0; y < texDim.y; ++y) {
for (int x = 0; x < texDim.x; ++x) {
sum4 += texelFetch(tex, ivec2(x, y), 0);
}
}
result = sum4.x + sum4.y + sum4.z + sum4.w;
Example2
uniform isampler2D indices;
uniform sampler2D data;
out float result;
// some only values in data pointed to by indices
vec4 sum4 = vec4(0);
ivec2 texDim = textureSize(indices, 0);
for (int y = 0; y < texDim.y; ++y) {
for (int x = 0; x < texDim.x; ++x) {
ivec2 index = texelFetch(indices, ivec2(x, y), 0).xy;
sum4 += texelFetch(tex, index, 0);
}
}
result = sum4.x + sum4.y + sum4.z + sum4.w;
Note that I'm also not an expert in GPGPU but I have an hunch the code above is not the fastest way because I believe parallelization happens based on output. The code above has only 1 output so no parallelization? It would be easy to change so that it takes a block ID, tile ID, area ID as input and computes just the sum for that area. Then you'd write out a larger texture with the sum of each block and finally sum the block sums.
Also, dependant and non-uniform texture reads are a known perf issue. The first example reads the texture in order. That's cache friendly. The second example reads the texture in a random order (specified by indices), that's not cache friendly.

QPainter how draw texture with special color

I have some patterns which are black with alpha and have some points that I want to draw line with patterns.
I find QBrush can be constructed by texture, but I don't know how to draw it with difference colors.
This answer show a way here in C# code, but I don't know how to change patterns color with ColorMatrix.
The modification of RGBA values of an image using a 5×5 color matrix reminds me to the transformation of homogeneous coordinates how it is often used in computer graphics. If you imagine the RGBA values as 4-dimensional color/alpha space the transformation of colors using transformation matrices doesn't sound that revolutionary. (Not that you got me wrong – this impressed me much, and I couldn't resist to try this out immediately.) Hence, I didn't wonder why a 5×5 matrix is needed though there are only 4 color components. (E.g. if a translation of color values is intended the 5th dimension cames into play.)
I must admit that I first applied my knowledge from Computer Animation to this problem and compared my approach to the one described on MSDN Using a Color Matrix to Transform a Single Color afterwards. Then I realized that the original paper uses transposed vectors and matrices compared to mine. This is just mathematics as
(vT MT)T = v' = M v
if I remember right.
Practically, it means I have to use matrix rows as columns when I try to reproduce the samples of e.g. the ColorMatrix Guide. (This feels somehow right to me as it is exactly as we describe transformations in 3d space i.e. translation is the last column of the transformation matrix.)
The sample code:
colorMatrix.h:
#ifndef COLOR_MATRIX_H
#define COLOR_MATRIX_H
#include <algorithm>
struct ColorMatrix {
float values[5][5];
ColorMatrix() { }
ColorMatrix(const float(&values)[25])
{
std::copy(std::begin(values), std::end(values), (float*)this->values);
}
float (&operator[](unsigned i))[5] { return values[i]; }
const float(&operator[](unsigned i) const)[5] { return values[i]; }
};
struct ColorVector {
float values[5];
ColorVector(const float(&values)[5])
{
std::copy(std::begin(values), std::end(values), (float*)this->values);
}
float& operator[](size_t i) { return values[i]; }
const float& operator[](size_t i) const { return values[i]; }
};
#endif // COLOR_MATRIX_H
colorMatrix.cc:
#include <algorithm>
#include <QtWidgets>
#include "colorMatrix.h"
#include "QColorMatrixView.h"
ColorVector operator*(const ColorMatrix &m, const ColorVector &v)
{
return ColorVector({
m[0][0] * v[0] + m[0][1] * v[1] + m[0][2] * v[2] + m[0][3] * v[3] + m[0][4] * v[4],
m[1][0] * v[0] + m[1][1] * v[1] + m[1][2] * v[2] + m[1][3] * v[3] + m[1][4] * v[4],
m[2][0] * v[0] + m[2][1] * v[1] + m[2][2] * v[2] + m[2][3] * v[3] + m[2][4] * v[4],
m[3][0] * v[0] + m[3][1] * v[1] + m[3][2] * v[2] + m[3][3] * v[3] + m[3][4] * v[4],
m[4][0] * v[0] + m[4][1] * v[1] + m[4][2] * v[2] + m[4][3] * v[3] + m[4][4] * v[4]
});
}
const ColorMatrix Identity({
1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 0.0f, 1.0f
});
template <typename T>
T clamp(T value, T min, T max)
{
return value < min ? min
: value > max ? max
: value;
}
QRgb transform(const ColorMatrix &mat, const QRgb &color)
{
ColorVector vec({
qRed(color) / 255.0f, qGreen(color) / 255.0f, qBlue(color) / 255.0f, qAlpha(color) / 255.0f, 1.0f });
vec = mat * vec;
if (vec[4] != 0.0f) {
vec[0] /= vec[4]; vec[1] /= vec[4]; vec[2] /= vec[4]; vec[3] /= vec[4]; // vec[4] = 1.0f;
}
return qRgba(
clamp<int>(255 * vec[0], 0, 255),
clamp<int>(255 * vec[1], 0, 255),
clamp<int>(255 * vec[2], 0, 255),
clamp<int>(255 * vec[3], 0, 255));
}
QImage transform(const ColorMatrix &mat, const QImage &qImg)
{
const int w = qImg.width(), h = qImg.height();
QImage qImgDst(w, h, qImg.format());
for (int y = 0; y < h; ++y) for (int x = 0; x < w; ++x) {
qImgDst.setPixel(x, y, transform(mat, qImg.pixel(x, y)));
}
return qImgDst;
}
QImage open(QWidget *pQParent)
{
return QImage(
QFileDialog::getOpenFileName(pQParent,
QString::fromUtf8("Open Image File"),
QString()));
}
void update(
QLabel &qLblViewResult,
const QColorMatrixView &qEditColMat, const QLabel &qLblViewOrig)
{
ColorMatrix colMat = qEditColMat.values();
const QPixmap *pQPixmap = qLblViewOrig.pixmap();
const QImage qImg = pQPixmap ? pQPixmap->toImage() : QImage();
qLblViewResult.setPixmap(
QPixmap::fromImage(transform(colMat, qImg)));
}
int main(int argc, char **argv)
{
QApplication app(argc, argv);
// setup GUI
QWidget qWin;
qWin.setWindowTitle(QString::fromUtf8("Qt Color Matrix Demo"));
QGridLayout qGrid;
QVBoxLayout qVBoxColMat;
QLabel qLblColMat(QString::fromUtf8("Color Matrix:"));
qVBoxColMat.addWidget(&qLblColMat, 0);
QColorMatrixView qEditColMat;
qEditColMat.setValues(Identity);
qVBoxColMat.addWidget(&qEditColMat);
QPushButton qBtnReset(QString::fromUtf8("Identity"));
qVBoxColMat.addWidget(&qBtnReset);
QPushButton qBtnGray(QString::fromUtf8("Grayscale"));
qVBoxColMat.addWidget(&qBtnGray);
qVBoxColMat.addStretch(1);
qGrid.addLayout(&qVBoxColMat, 0, 0, 2, 1);
QLabel qLblX(QString::fromUtf8(" \xc3\x97 "));
qGrid.addWidget(&qLblX, 0, 1);
QLabel qLblViewOrig;
qGrid.addWidget(&qLblViewOrig, 0, 2);
QPushButton qBtnLoad(QString::fromUtf8("Open..."));
qGrid.addWidget(&qBtnLoad, 1, 2);
QLabel qLblEq(QString::fromUtf8(" = "));
qGrid.addWidget(&qLblEq, 0, 3);
QLabel qLblViewResult;
qGrid.addWidget(&qLblViewResult, 0, 4);
qWin.setLayout(&qGrid);
qWin.show();
// install signal handlers
QObject::connect(&qEditColMat, &QColorMatrixView::editingFinished,
[&]() { update(qLblViewResult, qEditColMat, qLblViewOrig); });
QObject::connect(&qBtnReset, &QPushButton::clicked,
[&]() {
qEditColMat.setValues(Identity);
update(qLblViewResult, qEditColMat, qLblViewOrig);
});
QObject::connect(&qBtnGray, &QPushButton::clicked,
[&]() {
qEditColMat.setValues(ColorMatrix({
0.33f, 0.59f, 0.11f, 0.0f, 0.0f,
0.33f, 0.59f, 0.11f, 0.0f, 0.0f,
0.33f, 0.59f, 0.11f, 0.0f, 0.0f,
0.00f, 0.00f, 0.00f, 1.0f, 0.0f,
0.00f, 0.00f, 0.00f, 0.0f, 1.0f
}));
update(qLblViewResult, qEditColMat, qLblViewOrig);
});
QObject::connect(&qBtnLoad, &QPushButton::clicked,
[&]() {
qLblViewOrig.setPixmap(QPixmap::fromImage(open(&qBtnLoad)));
update(qLblViewResult, qEditColMat, qLblViewOrig);
});
// initial contents
{
QImage qImg("colorMatrixDefault.jpg");
qLblViewOrig.setPixmap(QPixmap::fromImage(qImg));
update(qLblViewResult, qEditColMat, qLblViewOrig);
}
// runtime loop
return app.exec();
}
QColorMatrixView.h:
#ifndef Q_COLOR_MATRIX_VIEW_H
#define Q_COLOR_MATRIX_VIEW_H
#include <QLineEdit>
#include <QGridLayout>
#include <QWidget>
#include "colorMatrix.h"
class QColorMatrixView: public QWidget {
Q_OBJECT
private:
QGridLayout _qGrid;
QLineEdit _qEdit[5][5];
signals:
void editingFinished();
public:
QColorMatrixView(QWidget *pQParent = nullptr);
virtual ~QColorMatrixView() = default;
QColorMatrixView(const QColorMatrixView&) = delete;
QColorMatrixView& operator=(const QColorMatrixView&) = delete;
ColorMatrix values() const;
void setValues(const ColorMatrix &mat);
};
#endif // Q_COLOR_MATRIX_VIEW_H
QColorMatrixView.cc:
#include "QColorMatrixView.h"
QColorMatrixView::QColorMatrixView(QWidget *pQParent):
QWidget(pQParent)
{
QFontMetrics qFontMetrics(font());
const int w = qFontMetrics.boundingRect(QString("-000.000")).width() + 10;
for (int r = 0; r < 5; ++r) {
for (int c = 0; c < 5; ++c) {
QLineEdit &qEdit = _qEdit[r][c];
_qGrid.addWidget(&qEdit, r, c);
qEdit.setFixedWidth(w);
QObject::connect(&qEdit, &QLineEdit::editingFinished,
[this, r, c]() {
_qEdit[r][c].setText(
QString::number(_qEdit[r][c].text().toFloat(), 'f', 3));
editingFinished();
});
}
}
setLayout(&_qGrid);
}
ColorMatrix QColorMatrixView::values() const
{
ColorMatrix mat;
for (int r = 0; r < 5; ++r) for (int c = 0; c < 5; ++c) {
mat[r][c] = _qEdit[r][c].text().toFloat();
}
return mat;
}
void QColorMatrixView::setValues(const ColorMatrix &mat)
{
for (int r = 0; r < 5; ++r) for (int c = 0; c < 5; ++c) {
_qEdit[r][c].setText(QString::number(mat[r][c], 'f', 3));
}
}
moc_colorMatrix.cc (to consider moc generated sources):
#include "moc_QColorMatrixView.cpp"
colorMatrix.pro (the qmake project file):
SOURCES = colorMatrix.cc QColorMatrixView.cc
HEADERS = colorMatrix.h QColorMatrixView.h
SOURCES += moc_colorMatrix.cc
MOC_DIR = .
QT += widgets
and the default sample image colorMatrixDefault.jpg if no (cat) photo file is at hand:
Although, I've developed and tested the application in VS2013, I built and tested also on cygwin to ensure that the qmake project is complete and self-standing:
$ qmake-qt5 colorMatrix.pro
$ make
$ ./colorMatrix
An enhanced version of this sample code can be found on github Qt Color Matrix Demo.

QGLWidget - distortion occured

I would like to display sample6 of the OptixSDK in a QGLWidget.
My application has only 3 QSlider for the rotation around the X,Y,Z axis and the QGLWidget.
For my understanding, paintGL() gets called whenever updateGL() is called by my QSlider or Mouseevents. Then I initialize a rotation matrix and apply this matrix to the PinholeCamera in order to trace the scene with new transformed cameracoordinates, right?
When tracing is finished i get the outputbuffer and use it draw the pixels with glDrawPixels(), just like in GLUTdisplay.cpp given in the OptiX framework.
But my issue is that the image is skewed/distorted. For example I wanted to display a ball, but the ball is extremley flatened, but the rotation works fine.
When I am zooming out, it seems that the Image scales much slower horizontally than vertically.
I am almost sure/hope that it has to do something with the gl...() functions that are not used properly. What am I missing? Can someone help me out?
For the completeness i post my paintGL() and updateGL() code.
void MyGLWidget::initializeGL()
{
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
m_scene = new MeshViewer();
m_scene->setMesh( (std::string( sutilSamplesDir() ) + "/ball.obj").c_str());
int buffer_width, buffer_height;
// Set up scene
SampleScene::InitialCameraData initial_camera_data;
m_scene->setUseVBOBuffer( false );
m_scene->initScene( initial_camera_data );
int m_initial_window_width = 400;
int m_initial_window_height = 400;
if( m_initial_window_width > 0 && m_initial_window_height > 0)
m_scene->resize( m_initial_window_width, m_initial_window_height );
// Initialize camera according to scene params
m_camera = new PinholeCamera( initial_camera_data.eye,
initial_camera_data.lookat,
initial_camera_data.up,
-1.0f, // hfov is ignored when using keep vertical
initial_camera_data.vfov,
PinholeCamera::KeepVertical );
Buffer buffer = m_scene->getOutputBuffer();
RTsize buffer_width_rts, buffer_height_rts;
buffer->getSize( buffer_width_rts, buffer_height_rts );
buffer_width = static_cast<int>(buffer_width_rts);
buffer_height = static_cast<int>(buffer_height_rts);
float3 eye, U, V, W;
m_camera->getEyeUVW( eye, U, V, W );
SampleScene::RayGenCameraData camera_data( eye, U, V, W );
// Initial compilation
m_scene->getContext()->compile();
// Accel build
m_scene->trace( camera_data );
m_scene->getContext()->launch( 0, 0 );
// Initialize state
glMatrixMode(GL_PROJECTION);glLoadIdentity();glOrtho(0, 1, 0, 1, -1, 1 );
glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glViewport(0, 0, buffer_width, buffer_height);
}
And here is paintGL()
void MyGLWidget::paintGL()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
float3 eye, U, V, W;
m_camera->getEyeUVW( eye, U, V, W );
SampleScene::RayGenCameraData camera_data( eye, U, V, W );
{
nvtx::ScopedRange r( "trace" );
m_scene->trace( camera_data );
}
// Draw the resulting image
Buffer buffer = m_scene->getOutputBuffer();
RTsize buffer_width_rts, buffer_height_rts;
buffer->getSize( buffer_width_rts, buffer_height_rts );
int buffer_width = static_cast<int>(buffer_width_rts);
int buffer_height = static_cast<int>(buffer_height_rts);
RTformat buffer_format = buffer.get()->getFormat();
GLvoid* imageData = buffer->map();
assert( imageData );
switch (buffer_format) {
/*... set gl_data_type and gl_format ...*/
}
RTsize elementSize = buffer->getElementSize();
int align = 1;
if ((elementSize % 8) == 0) align = 8;
else if ((elementSize % 4) == 0) align = 4;
else if ((elementSize % 2) == 0) align = 2;
glPixelStorei(GL_UNPACK_ALIGNMENT, align);
gldata = QGLWidget::convertToGLFormat(image_data);
NVTX_RangePushA("glDrawPixels");
glDrawPixels( static_cast<GLsizei>( buffer_width ), static_cast<GLsizei>( buffer_height ),gl_format, gl_data_type, imageData);
// glDraw
NVTX_RangePop();
buffer->unmap();
}
After hours of debugging, I found out that I forgot to set the Camera-parameters right, it had nothing to go to with the OpenGL stuff.
My U-coordinate, the horizontal axis of view plane was messed up, but the V,W and eye coordinates were right.
After I added these lines in initializeGL()
m_camera->setParameters(initial_camera_data.eye,
initial_camera_data.lookat,
initial_camera_data.up,
initial_camera_data.vfov,
initial_camera_data.vfov,
PinholeCamera::KeepVertical );
everything was right.

Resources