OpenGL and Qt 5.5: glm::perspective doesnt work - qt

I have made the following perspective matrix to isolate the problem to the glm perspective function:
QMatrix4x4 proj (1.f, 0.f, 0.f, 0.f,
0.f, 1.f, 0.f, 0.f,
0.f, 0.f, 1.f, 0.0f,
0.f, 0.f, 1.1f, 1.f);
This works and produces an image. However, when trying to use glm to construct the perspective matrix as so:
glm::mat4 proj;
proj = glm::perspective(
glm::radians(80.0f),
1.0f,
0.0f,
2.0f
);
Nothing comes up.
I was under the impression that when putting 0.0f, 2.0f into the near plane, far plane arguments, any vertex coordinate in the range 0.0f-2.0f was linearly interpolated into the coordinate system -1.0f to 1.0f to be used as normalized device coordinates. However, no matter which pair of values I put here, nothing is rendered.
Here's the coordinates im trying to draw:
rawverts = {
0.0f, 0.0f, 1.0f,
0.0f, 0.7f, 1.0f,
0.4f, 0.0f, 1.0f,
0.0f, -0.7f, 1.0f,
-0.4f, 0.0f, 1.0f
};
and when passing the projection matrix to the vertex shader:
int projIndex = shaders->uniformLocation("proj");
...
shaders->setUniformValue(projIndex, QMatrix4x4(glm::value_ptr(proj)) );
The vertex shader itself:
#version 330 core
in vec3 vertex;
uniform mat4 translate;
uniform mat4 view;
uniform mat4 proj;
uniform float time;
uniform float aspect;
uniform vec2 resolution;
void main() {
gl_Position = proj * view * translate * vec4(vertex, 1);
}

You MUST pass the QMatrix4x4 transposed matrix of the glm matrix.
Then instead of using.
shaders->setUniformValue(projIndex, QMatrix4x4(glm::value_ptr(proj)) );
you must use :
shaders->setUniformValue(projIndex, QMatrix4x4(glm::value_ptr(proj)).transposed());
Everytime you must send the transposed matrix when you combine QMatrix4x4 and glm::mat4
it should work.
JC
PS : for the record, I also struggle 2 days on this:(

Related

Rendering a texture in openGL with Qt5 and glew

I am trying to render a textured quad in openGL using Qt5 and glew.
specifically I am using a QopenGLWidget.
The code I am using is modified from https://open.gl/textures
The main changes are using a triangle strip instead of an elements buffer
and using my own pixel array instead of loading an image.
I have tested changes and they work using glfw3.
In Qt all I can see is a black quad with no colour.
This only occurs when I try to use textures, it renders colours fine if i remove the texture part of the shader.
My shaders compile fine with no errors (although there is no error checking in the code below).
texture(tex, Texcoord)
is just outputting zero.
So my question is, why is it rendering just a black quad?
GLfloat tmpdata[] = {
//pos //colour //texCoord
-0.5f, 0.5f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, // Top-left
0.5f, 0.5f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, // Top-right
-0.5f, -0.5f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, // Bottom-left
0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f // Bottom-right
};
GLRenderer::GLRenderer(QWidget *parent)
:
QOpenGLWidget(parent)
{
// Shader sources
VertSource =
"#version 150 core\n"
"in vec2 position;"
"in vec3 color;"
"in vec2 texcoord;"
"out vec3 Color;"
"out vec2 Texcoord;"
"void main() {"
" Color = color;"
" Texcoord = texcoord;"
" gl_Position = vec4(position, 0.0, 1.0);"
"}";
FragSource =
"#version 150 core\n"
"in vec3 Color;"
"in vec2 Texcoord;"
"out vec4 outColor;"
"uniform sampler2D tex;"
"void main() {"
" outColor = texture(tex, Texcoord) * vec4(Color, 1.0);"
// " outColor = vec4(Color, 1.0);"
//" outColor = texture(tex, Texcoord);"
"}";
}
GLRenderer::~GLRenderer()
{
glDeleteProgram(shaderProgram);
glDeleteShader(fragmentShader);
glDeleteShader(vertexShader);
glDeleteBuffers(1, &vbo);
glDeleteVertexArrays(1, &vao);
}
void GLRenderer::initializeGL()
{
glewExperimental = GL_TRUE;
GLenum err = glewInit();
if (err != GLEW_OK)
{
printf("%s", glewGetErrorString(err));
}
// Create Vertex Array Object
GLuint vao;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
// Create a Vertex Buffer Object and copy the vertex data to it
GLuint vbo;
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(tmpdata), tmpdata, GL_STATIC_DRAW);
// Create and compile the vertex shader
GLuint vertexShader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertexShader, 1, &VertSource, NULL);
glCompileShader(vertexShader);
// Create and compile the fragment shader
GLuint fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragmentShader, 1, &FragSource, NULL);
glCompileShader(fragmentShader);
// Link the vertex and fragment shader into a shader program
GLuint shaderProgram = glCreateProgram();
glAttachShader(shaderProgram, vertexShader);
glAttachShader(shaderProgram, fragmentShader);
glBindFragDataLocation(shaderProgram, 0, "outColor");
glLinkProgram(shaderProgram);
glUseProgram(shaderProgram);
// Specify the layout of the vertex data
GLint posAttrib = glGetAttribLocation(shaderProgram, "position");
glEnableVertexAttribArray(posAttrib);
glVertexAttribPointer(posAttrib, 2, GL_FLOAT, GL_FALSE, 7 * sizeof(GLfloat), 0);
GLint colAttrib = glGetAttribLocation(shaderProgram, "color");
glEnableVertexAttribArray(colAttrib);
glVertexAttribPointer(colAttrib, 3, GL_FLOAT, GL_FALSE, 7 * sizeof(GLfloat), (void*)(2 * sizeof(GLfloat)));
GLint texAttrib = glGetAttribLocation(shaderProgram, "texcoord");
glEnableVertexAttribArray(texAttrib);
glVertexAttribPointer(texAttrib, 2, GL_FLOAT, GL_FALSE, 7 * sizeof(GLfloat), (void*)(5 * sizeof(GLfloat)));
// Load texture
GLuint tex;
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
// Black/white checkerboard
float pixels[] = {
1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f,
0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 0.0f
};
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 2, 2, 0, GL_RGB, GL_FLOAT, pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
}
void GLRenderer::paintGL()
{
// Clear the screen to black
glClearColor(0.0f, 0.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
}

Reflection Mapping in OpenGL ES

I am trying to implement the reflection mapping in OpenGL ES 2.0 for 'sphere'.
I have done the skybox.
For sphere rendering, the reflection shaders i have used are:
Environment mapping (Sphere) vertex shader::
precision highp float;
uniform mat4 u_mvMatrix; // ModelView Matrix
uniform mat4 u_mvpMatrix; // ModelViewProjection Matrix
attribute vec4 a_position;
attribute vec3 a_envmapNormal;
varying vec3 v_eyecoordEyeReflection;
vec3 v_eyecoordPosition;
vec3 v_eyecoordNormal;
void main()
{
// position and normal in model coordinates
vec4 modelCoordPosition = a_position;
vec3 modelCoordNormal = a_envmapNormal;
// Calculate position in eye space
v_eyecoordPosition = vec3(u_mvMatrix * modelCoordPosition);
// Calculate and normalize eye space normal
vec3 eyecoordNormal = vec3(u_mvMatrix * vec4(modelCoordNormal, 0.0));
v_eyecoordNormal = normalize(eyecoordNormal);
// Calculate reflection vector
v_eyecoordEyeReflection = reflect(v_eyecoordPosition, v_eyecoordNormal);
gl_Position = u_mvpMatrix * a_position;
}
Environment mapping (Sphere) Fragment shader
precision highp float;
uniform lowp samplerCube baseCubeMapTexture;
varying vec3 v_eyecoordEyeReflection;
void main()
{
gl_FragColor = textureCube(baseCubeMapTexture, v_eyecoordEyeReflection);
}
But i am not getting correct output.
When the sphere is rotated, the texture is not changing.
what is the error in the shader?
Thanks Andon...
I used your shader code.
But i am getting white sphere.
Sphere Normals are calculated using:
#define ANGLE_STEP ((2.0f * OGLES_PI) / ((float) NUM_OF_SLICES))
for ( iCnti = 0; iCnti < NUM_OF_PARALLELS + 1; iCnti++ ) {
for ( iCntj = 0; iCntj < NUM_OF_SLICES + 1; iCntj++ ) {
pSphereNormals[iNormalIndex + 0] = sin(ANGLE_STEP * (FLOAT) iCnti )* sin (ANGLE_STEP *(FLOAT)iCntj);
pSphereNormals[iNormalIndex + 1] = cos(ANGLE_STEP * (FLOAT) iCnti );
pSphereNormals[iNormalIndex + 2] = sin(ANGLE_STEP * (FLOAT) iCnti )* cos (ANGLE_STEP *(FLOAT)iCntj);
iNormalIndex += 3;
}
}
My View Matrix "matViewMatrix" is derived from (http://www.learnopengles.com/tag/linmath-h/ mat4x4_look_at())
MyCameraLookAt(matViewMatrix, 0.0f , 0.0f, -2.0f, 0.0f , 0.0f, -1.0f, 0.0f , 1.0f, 0.0f);
The Inverse matrix InvViewMat is // inverse() function is taken from http://www.opensource.apple.com/source/WebCore/WebCore-514/platform/graphics/transforms/TransformationMatrix.cpp
InvViewMat[0][0] = -1.000000 InvViewMat[1][0] = -0.000000 InvViewMat[2][0] = 0.000000 InvViewMat[3][0] = -0.000000
InvViewMat[0][1] = -0.000000 InvViewMat[1][1] = 1.000000 InvViewMat[2][1] = -0.000000 InvViewMat[3][1] = -0.000000
InvViewMat[0][2] = 0.000000 InvViewMat[1][2] = -0.000000 InvViewMat[2][2] = -1.000000 InvViewMat[3][2] = -2.000000
InvViewMat[0][3] = -0.000000 InvViewMat[1][3] = 0.000000 InvViewMat[2][3] = 0.000000 InvViewMat[3][3] = 1.000000
Is there any problem with my matrix values or any of my calculations?
If you have a sphere centered at the camera's origin (eye-space), then no matter how you rotate it the position and normals in eye-space are always going to be the same at any location on screen. That is the definition of a sphere - every vertex is the same distance (radius) from the center.
You actually need to do this in world-space (that position will vary as you rotate the sphere).
Now, this brings up an issue - you only have a ModelView matrix (which transforms from object-space to eye-space). You are going to need to split your Model and View matrices to do this and for convenience you should pass the inverse of the View matrix to GLSL.
Below is a modified Vertex Shader that does what you want:
precision highp float;
uniform mat4 u_vInvMatrix; // Inverse View Matrix -- NEW
uniform mat4 u_mvMatrix; // ModelView Matrix
uniform mat4 u_mvpMatrix; // ModelViewProjection Matrix
attribute vec4 a_position;
attribute vec3 a_envmapNormal;
//varying vec3 v_eyecoordEyeReflection; // YOU DO NOT WANT EYE-SPACE
varying vec3 v_worldReflection; // Use world-space instead -- MODIFIED
vec3 v_eyecoordPosition;
vec3 v_eyecoordNormal;
void main()
{
// position and normal in model coordinates
vec4 modelCoordPosition = a_position;
vec3 modelCoordNormal = a_envmapNormal;
// Calculate position in eye space
v_eyecoordPosition = vec3(u_mvMatrix * modelCoordPosition);
// Calculate and normalize eye space normal
vec3 eyecoordNormal = vec3(u_mvMatrix * vec4(modelCoordNormal, 0.0));
v_eyecoordNormal = normalize(eyecoordNormal);
// Calculate reflection vector (eye-space)
vec3 eyeReflection = reflect(v_eyecoordPosition, v_eyecoordNormal);
// Transform the reflection into world-space -- NEW
v_worldReflection = vec3 (u_vInvMatrix * vec4 (eyeReflection, 0.0f));
gl_Position = u_mvpMatrix * a_position;
}

How to calculate a circle (of texels) around a point in opengl

Im calculating a circle around a point in the fragment shader.The problem is that the part of the texture that is change is not a circle, Its an oval. The form actually depends on the texture's form.If the texture were to be a perfect square I would get a perfect circle but when its a rectangle I get an oval. This is the current fragment shader:
varying highp vec2 textureCoordinate;
uniform sampler2D inputImageTexture;
uniform highp vec2 center;
uniform highp float radius;
uniform highp float scale;
void main()
{
highp vec2 textureCoordinateToUse = textureCoordinate;
highp float dist = distance(center, textureCoordinate);
textureCoordinateToUse -= center;
if (dist < radius)
{
highp float percent = 1.0 - ((radius - dist) / radius) * scale;
percent = percent * percent;
textureCoordinateToUse = textureCoordinateToUse * percent;
textureCoordinateToUse += center;
gl_FragColor = texture2D(inputImageTexture, textureCoordinateToUse );
}
textureCoordinateToUse += center;
gl_FragColor = texture2D(inputImageTexture,textureCoordinate);
}
UPDATE SHADER CODE:
highp float aspectRatio = 854.0 / 480.0;
//highp vec2 textureCoordinateToUse = textureCoordinate;
highp vec2 textureCoordinateToUse = vec2(textureCoordinate.x, (textureCoordinate.y * aspectRatio + 0.5 - 0.5 * aspectRatio));
highp float dist = distance(center, textureCoordinateToUse);
textureCoordinateToUse -= center;
if (dist < radius)
{
highp float percent = 1.0 - ((radius - dist) / radius) * scale;
percent = percent * percent;
textureCoordinateToUse = textureCoordinateToUse * percent;
textureCoordinateToUse += center;
gl_FragColor = texture2D(inputImageTexture, textureCoordinateToUse );
return;
}
textureCoordinateToUse += center;
gl_FragColor = texture2D(inputImageTexture,textureCoordinate);
I see you're trying to use my bulge distortion fragment shader. While you haven't actually asked a question, I think I might know what you want here.
If you provide your texture coordinates in normalized 0.0 - 1.0 ranges for a rectangular input texture, the above will operate over an elliptical area rather than a circular one. That's because the above calculations work in texture coordinate space, not the image coordinate space.
To correct for this, you can do one of two things. First, you could provide texture coordinates that account for the aspect ratio of the image (have one of them not max out at 1.0).
Second, you could do what I do in this answer and feed in the aspect ratio of the image as a uniform and use that to correct for the rectangular nature of the image. If you provided an aspectRatio uniform to the shader with the ratio between the width and height of the image, you could replace the first line in the body of your shader with
highp vec2 textureCoordinateToUse = vec2(textureCoordinate.x, (textureCoordinate.y * aspectRatio + 0.5 - 0.5 * aspectRatio));
and it would operate over a circular area.

GLSL: Move transformation from tessellation-evaluation shader to vertex shader

I am working in glsl with tessellation-shaders and I am trying to do displacement mapping. It's working, but I want to move the matrix-transformation-code from the tessellation evaluation shader to the vertex shader. Why I want to have this in the vertex-shader is because I do not want to do this calculation
for every sub triangles vertices, and I want the vertices to be in screenspace in the vertex shader so I can decide how much every triangle should be subdivided in the tessellation control shader.
The version that do not work, is "almost" working, there is some issues when the triangles are rendered.
I would really appreciate even the smallest hint of what may be wrong.
This (bad) version works (position and normal are transformed in tessellation evaluation shader)
// vertex shader
void main_(void)
{
gl_Position = VertexPosition;
VertexTexCoord1 = VertexTexCoord;
VertexNormal1 = VertexNormal;
}
// tessellation evaluation shader
void main_()
{
VertexTexCoord3 = interpolate(VertexTexCoord2);
vec3 normal = interpolate(VertexNormal2);
vec4 pos = interpolate(gl_in[0].gl_Position, gl_in[1].gl_Position, gl_in[2].gl_Position);
vec4 movement = vec4(normal * (texture2D(heigthMap,VertexTexCoord3).r), 0.0);
gl_Position = mvpMatrix * (pos + movement);
}
This version does not work (position and normal are transformed in vertex shader)
// vertex shader
void main(void)
{
gl_Position = mvpMatrix * VertexPosition;
VertexTexCoord1 = VertexTexCoord;
VertexNormal1 = mat3(mvpMatrix) * VertexNormal;
}
// tessellation evaluation shader
void main()
{
VertexTexCoord3 = interpolate(VertexTexCoord2);
vec3 normal = interpolate(VertexNormal2);
vec4 pos = interpolate(gl_in[0].gl_Position, gl_in[1].gl_Position, gl_in[2].gl_Position);
vec4 movement = vec4(normal * (texture2D(heigthMap,VertexTexCoord3).r), 0.0);
gl_Position = (pos + movement);
}
In the "non-working" version the last line in tesselation shader seems to be incorrect. You're forgetting that in the source variant you had 'movement' multiplied by the mvpMatrix.
I would have tried to use this:
// tessellation evaluation shader
void main()
{
VertexTexCoord3 = interpolate(VertexTexCoord2);
vec3 normal = interpolate(VertexNormal2);
vec4 pos = interpolate(gl_in[0].gl_Position, gl_in[1].gl_Position, gl_in[2].gl_Position);
vec4 movement = vec4(normal * (texture2D(heigthMap,VertexTexCoord3).r), 0.0);
/// This multiplication by mvpMatrix is inevitable
gl_Position = (pos + mvpMatrix * movement);
}
Sorry if I mixed the order of the stages, but the code above (two versions) is definitely non-equivalent.

light calculations in modelspace

I have a working per fragment lighting but I wonder what can I do to keep a lighting calculation in the modelspace where I don't have to multiply normals by normalModelMatrix as below in the fragment shader.
Shaders: ViewMatrix - camera transformation, ModelMatrix - objects transfomation.
Light position - glm::vec4 lightPos(3.0f, 2.0f, -30.0f, 1.0f)
Render loop:
glUseProgram(ProgramId);
glUniformMatrix4fv(ViewMatrixUniformLocation, 1, GL_FALSE, glm::value_ptr(ViewMatrix));
glUniform4f(lightIntensityUniformLocation, 0.8f, 0.8f, 0.8f, 1.0f);
glUniform4f(ambientIntensityUniformLocation, 0.2f, 0.2f, 0.2f, 0.2f);
glUniform3fv(dirToLightUniformLocation, 1, glm::value_ptr( lightPos));
ModelMatrixStack.push(ModelMatrix);
ModelMatrix = glm::translate(ModelMatrix, glm::vec3(0, 0, -30));
ModelMatrix = glm::rotate(ModelMatrix, 75.0f, glm::vec3(0,0,1));
normMatrix = glm::mat3(ModelMatrix);
glUniformMatrix4fv(ModelMatrixUniformLocation, 1, GL_FALSE,
glm::value_ptr(ModelMatrix));
glUniformMatrix3fv(normalModelMatrixUniformLocation, 1, GL_FALSE,
glm::value_ptr(normMatrix));
drawTeapot();
ModelMatrix = ModelMatrixStack.top();
normMatrix = glm::mat3(ModelMatrix);
glUniformMatrix4fv(ModelMatrixUniformLocation, 1, GL_FALSE,
glm::value_ptr(ModelMatrix));
glUniformMatrix3fv(normalModelMatrixUniformLocation, 1, GL_FALSE,
glm::value_ptr(normMatrix));
myground.draw();
glUseProgram(0);
Vertex shader:
#version 400
layout(location=0) in vec4 in_position;
layout(location=1) in vec3 in_normal;
out vec3 normal;
out vec4 position;
uniform mat4 ModelMatrix;
uniform mat4 ViewMatrix;
uniform mat4 ProjectionMatrix;
void main(void)
{
vec4 vertexPosition = ModelMatrix * in_position;
gl_Position = ProjectionMatrix * ViewMatrix * vertexPosition;
normal = in_normal;
position = vertexPosition;
}
Fragment shader:
version 400
in vec3 normal;
in vec4 position;
out vec4 outputColor;
uniform vec3 lightPos;
uniform vec4 lightIntensity;
uniform vec4 ambientIntensity;
uniform mat3 normalModelMatrix;
void main(void)
{
vec3 normCamSpace = normalize(normalModelMatrix * normalize(normal));
vec3 dirToLight = normalize(lightPos - vec3(position));
float cosAngIncidence = dot(normCamSpace, dirToLight);
cosAngIncidence = clamp(cosAngIncidence, 0, 1);
outputColor = (lightIntensity * cosAngIncidence) + ambientIntensity;
}
The computational cost for doing illumination in model space is actually higher, than doing it in eye space, as you've to transform the light position and directions for individually each model. Those usually happen on the CPU side. Yet you still have to perform a transformation of the normals then.
modelspace where I don't have to multiply normals by normalModelMatrix as below in the fragment shader.
That calculation works as well in the vertex shader. Just move it there.
Update/EDIT
for clarification here the modified shader code:
Vertex shader:
#version 400
layout(location=0) in vec4 in_position;
layout(location=1) in vec3 in_normal;
out vec3 eyespaceNormal;
out vec4 eyespacePosition;
uniform mat4 ModelviewMatrix;
uniform mat3 NormalMatrix; // == inverse(transpose(ModelviewMatrix))
uniform mat4 ProjectionMatrixq;
void main(void)
{
eyespacePosition = ModelviewMatrix * in_position;
eyespaceNormal = normalize(NormalMatrix * in_normal);
gl_Position = ProjectionMatrix * eyespacePosition;
}
Fragment shader:
#version 400
in vec3 eyespaceNormal;
in vec4 eyespacePosition;
out vec4 outputColor;
uniform vec3 lightPos;
uniform vec4 lightIntensity;
uniform vec4 ambientIntensity;
void main(void)
{
vec3 dirToLight = normalize(lightPos - vec3(eyespacePosition));
float cosAngIncidence = dot(eyespaceNormal, dirToLight);
cosAngIncidence = clamp(cosAngIncidence, 0, 1);
outputColor = (lightIntensity * cosAngIncidence) + ambientIntensity;
}
BTW: You normal transformation matrix was wrong. You must use the inverse of the transposed modelview matrix for this.

Resources