I am trying to add support for geometry shaders for a Vulkan project, so I am just starting with something simple for now.
The goal is, given a list of vertices, generate a perfect rectangle encompassing that line.
For that effect I made this geometry shader:
#version 450
#extension GL_ARB_separate_shader_objects : enable
layout(lines) in;
layout(triangle_strip, max_vertices = 6) out;
layout(location = 0) in vec2 fragCoord[];
layout(location = 0) out vec2 fragTexCoord;
void main() {
vec2 p1 = gl_in[0].gl_Position.xy;
vec2 p2 = gl_in[1].gl_Position.xy;
vec2 tangent = normalize(p2 - p1);
vec2 normal = vec2(tangent.y, -tangent.x) * 0.05;
vec2 quad[4] = vec2[](p1 + normal, p1 - normal, p2 + normal, p2 - normal);
// Create first triangle
gl_Position = vec4(quad[0], 0, 1);
EmitVertex();
gl_Position = vec4(quad[1], 0, 1);
EmitVertex();
gl_Position = vec4(quad[2], 0, 1);
EmitVertex();
EndPrimitive();
// Create second triangle
gl_Position = vec4(quad[1], 0, 1);
EmitVertex();
gl_Position = vec4(quad[2], 0, 1);
EmitVertex();
gl_Position = vec4(quad[3], 0, 1);
EmitVertex();
EndPrimitive();
}
Which outputs:
The vertex shader is:
#version 450
#extension GL_ARB_separate_shader_objects : enable
layout(location = 0) in vec3 inPosition;
layout(location = 1) in vec2 inTexCoord;
layout(location = 0) out vec2 fragTexCoord;
void main() {
gl_Position = vec4(inPosition, 1.0);
fragTexCoord = inTexCoord;
}
I am not sure why the lines are parallelograms instead of rectangles. Adding the normal to the line (the orthogonal direction) to both vertices in the line should make a rectangle, by definition.
Edit:
Even hard coding the vertices in the vertex shader seems to produce the same result:
vec4 verts[2] = vec4[](vec4(-0.5,-0.5,0,1), vec4(0.5,0.5,0,1));
void main() {
gl_Position = verts[gl_VertexID];//vec4(inPosition, 1.0);
fragTexCoord = inTexCoord;
}
I made a silly mistake, since coordinates are calculated on the NORMALIZED GL space, but the window is not a square, my space is stretched, defroming the topology. This is the result in a perfectly squared image:
To correct this error I must pass the aspect ratio information to the shader and correct the vertex positions accordingly.
Related
I use OpenGL shader for apply median filter to image. Input image I copy to in_fbo buffer. All work fine.
QGLFramebufferObject *in_fbo, *out_fbo;
painter.begin(in_fbo); //Copy QImage to QGLFramebufferObject
painter.drawImage(0,0,image_in,0,0,width,height);
painter.end();
out_fbo->bind();
glViewport( 0, 0, nWidth, nHeight );
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
glOrtho( 0.0, nWidth, 0.0, nHeight, -1.0, 1.0 );
glMatrixMode( GL_MODELVIEW );
glLoadIdentity( );
glEnable( GL_TEXTURE_2D );
out_fbo->drawTexture( QPointF(0.0,0.0), in_fbo->texture( ), GL_TEXTURE_2D );
But in shader code I need divide position of vertex by width and height of image, because texture coordinates are normalized to a range between 0 and 1.
How correctly calculate texture coordinates?
//vertex shader
varying vec2 pos;
void main( void )
{
pos = gl_Vertex.xy;
gl_Position = ftransform( );
}
//fragment shader
#extension GL_ARB_texture_rectangle : enable
uniform sampler2D texture0;
uniform int imgWidth;
uniform int imgHeight;
uniform int len;
varying vec2 pos;
#define MAX_LEN (100)
void main(){
float v[ MAX_LEN ];
for (int i = 0; i < len; i++) {
vec2 posi = pos + float(i);
posi.x = posi.x / float( imgWidth );
posi.y = posi.y / float( imgHeight );
v[i] = texture2D(texture0, posi).r;
}
//
//.... Calculating new value
//
gl_FragColor = vec4( m, m, m, 1.0 );
}
Before I did it in OpenFrameworks. But shader for texture in OF does not work for texture in Qt. I suppose because OF create textures with textureTarget = GL_TEXTURE_RECTANGLE_ARB. Now the result of applying shader above isn't correct. It isn't identical with result of the old shader (there are few pixels with different colors). I don't know how modify shader above :(.
Old shaders:
//vertex
#version 120
#extension GL_ARB_texture_rectangle : enable
void main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
gl_TexCoord[0] = gl_MultiTexCoord0;
gl_FrontColor = gl_Color;
}
//fragment
#version 120
#extension GL_ARB_texture_rectangle : enable
uniform sampler2D texture0;
uniform int len;
void main(){
vec2 pos = gl_TexCoord[0].xy;
pos.x = int( pos.x );
pos.y = int( pos.y );
float v[ MAX_LEN ];
for (int i=0; i<len; i++) {
vec2 posi = pos + i;
posi.x = int( posi.x + 0.5 ) + 0.5;
posi.y = int( posi.y + 0.5 ) + 0.5;
v[i] = texture2D(texture0, posi).r;
}
//
//.... Calculating new value
//
gl_FragColor = vec4( m, m, m, 1.0 );
}
OpenGL code from OpenFrameworks lib
texData.width = w;
texData.height = h;
texData.tex_w = w;
texData.tex_h = h;
texData.textureTarget = GL_TEXTURE_RECTANGLE_ARB;
texData.bFlipTexture = true;
texData.glType = GL_RGBA;
// create & setup FBO
glGenFramebuffersEXT(1, &fbo);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo);
// Create the render buffer for depth
glGenRenderbuffersEXT(1, &depthBuffer);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, depthBuffer);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT, texData.tex_w, texData.tex_h);
// create & setup texture
glGenTextures(1, (GLuint *)(&texData.textureID)); // could be more then one, but for now, just one
glBindTexture(texData.textureTarget, (GLuint)(texData.textureID));
glTexParameterf(texData.textureTarget, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(texData.textureTarget, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameterf(texData.textureTarget, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(texData.textureTarget, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(texData.textureTarget, 0, texData.glType, texData.tex_w, texData.tex_h, 0, texData.glType, GL_UNSIGNED_BYTE, 0);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
// attach it to the FBO so we can render to it
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, texData.textureTarget, (GLuint)texData.textureID, 0);
I do not think you actually want to use the texture's dimensions to do this. From the sounds of things this is a simple fullscreen image filter and you really just want fragment coordinates mapped into the range [0.0,1.0]. If this is the case, then gl_FragCoord.xy / viewport.xy, where viewport is a 2D uniform that defines the width and height of your viewport ought to work for your texture coordinates (in the fragment shader).
vec2 texCoord = vec2 (transformed_pos.x, transformed_pos.y) / transformed_pos.w * vec2 (0.5, 0.5) + vec2 (1.0, 1.0) may also work using the same principle -- clip-space coordinates transformed into NDC and then mapped to texture-space. This approach will not properly account for texel centers ((0.5, 0.5) rather than (0.0, 0.0)), however and can present problems when texture filtering is enabled and the wrap mode is not GL_CLAMP_TO_EDGE.
I have tried creating a shader that has an arc that rotates around a circle, you can see an example here:
https://www.shadertoy.com/view/MljGDK
#define center vec2(0.5)
#define pi 3.1415926535897932384626433832795
#define resolution 250.0
#define arcColor vec4(0.1, 0.2, 0.9, 1.0)
vec4 arc(vec2 uv, vec2 pos, float radius, float angle, vec4 color) {
vec2 b = (pos * resolution - uv * resolution);
float d = 1.0 - clamp(length(b) - radius * resolution, 0.0, 1.0);
float a1 = atan(-b.x, -b.y);
float a2 = atan(b.x, b.y);
//return color * smoothstep(0.0, d, angle - a);
return color * (a2 >= radians(angle) - pi / 8.0 && a2 <= radians(angle) + pi / 8.0 ? d : 0.0);
}
vec4 circle(vec2 uv, vec2 pos, float radius, vec4 color) {
float d = length(pos * resolution - uv * resolution) - radius * resolution;
float t = clamp(d, 0.0, 1.0);
return color * (1.0 - t);
}
void mainImage ( out vec4 fragColor, in vec2 fragCoord ) {
vec2 uv = fragCoord.xy / iResolution.xy;
vec4 arcSection = arc(uv, center, 0.5, mod(iGlobalTime*100.0, 360.0), arcColor);
vec4 hole = circle(uv, center, 0.45, vec4(1.0));
fragColor = arcSection - hole;
}
However, I do not know why the atan is returning values which cut off the arc at the poles of the circle. I was under the impression that atan(x, y) in glsl is implemented as atan2.
Any help in either improving the arc rotation or making the algorithm cleaner would be greatly appreciated.
Your problem is failure to allow for the circular nature of results. In the abstract: how far is an angle of 0.0 degrees from an angle of 359.0? According to your code, it's 359 degrees away rather than 1 degree away.
Suggested alternative:
float a2Diff = mod(radians(angle) - a2, pi * 2.0);
return color * ((a2Diff >= pi * 15.0 / 8.0 || a2Diff <= pi / 8.0) ? d : 0.0);
So you're computing the difference between the two angles, creating a fixed centre, and then allowing for potential wraparound with the mod.
You need to be careful with the wraparound in order to treat angles close to 2*pi as being close to 0. Here's a working code example:
#define center vec2(0.5, 0.5)
#define pi 3.1415926535897932384626433832795
#define resolution 250.0
#define arcColor vec4(0.1, 0.2, 0.9, 1.0)
vec4 arc(vec2 uv, vec2 pos, float radius1, float radius2, float angle, vec4 color) {
vec2 b = (pos * resolution - uv * resolution);
float angdist = mod(atan(b.x, b.y) - angle, 2.0*pi);
return color * ((angdist < pi/8.0)
&& (length(b) >= radius1 * resolution)
&& (length(b) <= radius2 * resolution) ? 1.0 : 0.0);
}
void mainImage ( out vec4 fragColor, in vec2 fragCoord ) {
vec2 uv = fragCoord.xy / iResolution.xy;
vec4 arcSection = arc(uv, center, 0.45, 0.5, radians(mod(iGlobalTime*100.0, 360.0)), arcColor);
fragColor = arcSection;
}
I am trying to implement the reflection mapping in OpenGL ES 2.0 for 'sphere'.
I have done the skybox.
For sphere rendering, the reflection shaders i have used are:
Environment mapping (Sphere) vertex shader::
precision highp float;
uniform mat4 u_mvMatrix; // ModelView Matrix
uniform mat4 u_mvpMatrix; // ModelViewProjection Matrix
attribute vec4 a_position;
attribute vec3 a_envmapNormal;
varying vec3 v_eyecoordEyeReflection;
vec3 v_eyecoordPosition;
vec3 v_eyecoordNormal;
void main()
{
// position and normal in model coordinates
vec4 modelCoordPosition = a_position;
vec3 modelCoordNormal = a_envmapNormal;
// Calculate position in eye space
v_eyecoordPosition = vec3(u_mvMatrix * modelCoordPosition);
// Calculate and normalize eye space normal
vec3 eyecoordNormal = vec3(u_mvMatrix * vec4(modelCoordNormal, 0.0));
v_eyecoordNormal = normalize(eyecoordNormal);
// Calculate reflection vector
v_eyecoordEyeReflection = reflect(v_eyecoordPosition, v_eyecoordNormal);
gl_Position = u_mvpMatrix * a_position;
}
Environment mapping (Sphere) Fragment shader
precision highp float;
uniform lowp samplerCube baseCubeMapTexture;
varying vec3 v_eyecoordEyeReflection;
void main()
{
gl_FragColor = textureCube(baseCubeMapTexture, v_eyecoordEyeReflection);
}
But i am not getting correct output.
When the sphere is rotated, the texture is not changing.
what is the error in the shader?
Thanks Andon...
I used your shader code.
But i am getting white sphere.
Sphere Normals are calculated using:
#define ANGLE_STEP ((2.0f * OGLES_PI) / ((float) NUM_OF_SLICES))
for ( iCnti = 0; iCnti < NUM_OF_PARALLELS + 1; iCnti++ ) {
for ( iCntj = 0; iCntj < NUM_OF_SLICES + 1; iCntj++ ) {
pSphereNormals[iNormalIndex + 0] = sin(ANGLE_STEP * (FLOAT) iCnti )* sin (ANGLE_STEP *(FLOAT)iCntj);
pSphereNormals[iNormalIndex + 1] = cos(ANGLE_STEP * (FLOAT) iCnti );
pSphereNormals[iNormalIndex + 2] = sin(ANGLE_STEP * (FLOAT) iCnti )* cos (ANGLE_STEP *(FLOAT)iCntj);
iNormalIndex += 3;
}
}
My View Matrix "matViewMatrix" is derived from (http://www.learnopengles.com/tag/linmath-h/ mat4x4_look_at())
MyCameraLookAt(matViewMatrix, 0.0f , 0.0f, -2.0f, 0.0f , 0.0f, -1.0f, 0.0f , 1.0f, 0.0f);
The Inverse matrix InvViewMat is // inverse() function is taken from http://www.opensource.apple.com/source/WebCore/WebCore-514/platform/graphics/transforms/TransformationMatrix.cpp
InvViewMat[0][0] = -1.000000 InvViewMat[1][0] = -0.000000 InvViewMat[2][0] = 0.000000 InvViewMat[3][0] = -0.000000
InvViewMat[0][1] = -0.000000 InvViewMat[1][1] = 1.000000 InvViewMat[2][1] = -0.000000 InvViewMat[3][1] = -0.000000
InvViewMat[0][2] = 0.000000 InvViewMat[1][2] = -0.000000 InvViewMat[2][2] = -1.000000 InvViewMat[3][2] = -2.000000
InvViewMat[0][3] = -0.000000 InvViewMat[1][3] = 0.000000 InvViewMat[2][3] = 0.000000 InvViewMat[3][3] = 1.000000
Is there any problem with my matrix values or any of my calculations?
If you have a sphere centered at the camera's origin (eye-space), then no matter how you rotate it the position and normals in eye-space are always going to be the same at any location on screen. That is the definition of a sphere - every vertex is the same distance (radius) from the center.
You actually need to do this in world-space (that position will vary as you rotate the sphere).
Now, this brings up an issue - you only have a ModelView matrix (which transforms from object-space to eye-space). You are going to need to split your Model and View matrices to do this and for convenience you should pass the inverse of the View matrix to GLSL.
Below is a modified Vertex Shader that does what you want:
precision highp float;
uniform mat4 u_vInvMatrix; // Inverse View Matrix -- NEW
uniform mat4 u_mvMatrix; // ModelView Matrix
uniform mat4 u_mvpMatrix; // ModelViewProjection Matrix
attribute vec4 a_position;
attribute vec3 a_envmapNormal;
//varying vec3 v_eyecoordEyeReflection; // YOU DO NOT WANT EYE-SPACE
varying vec3 v_worldReflection; // Use world-space instead -- MODIFIED
vec3 v_eyecoordPosition;
vec3 v_eyecoordNormal;
void main()
{
// position and normal in model coordinates
vec4 modelCoordPosition = a_position;
vec3 modelCoordNormal = a_envmapNormal;
// Calculate position in eye space
v_eyecoordPosition = vec3(u_mvMatrix * modelCoordPosition);
// Calculate and normalize eye space normal
vec3 eyecoordNormal = vec3(u_mvMatrix * vec4(modelCoordNormal, 0.0));
v_eyecoordNormal = normalize(eyecoordNormal);
// Calculate reflection vector (eye-space)
vec3 eyeReflection = reflect(v_eyecoordPosition, v_eyecoordNormal);
// Transform the reflection into world-space -- NEW
v_worldReflection = vec3 (u_vInvMatrix * vec4 (eyeReflection, 0.0f));
gl_Position = u_mvpMatrix * a_position;
}
I am working in glsl with tessellation-shaders and I am trying to do displacement mapping. It's working, but I want to move the matrix-transformation-code from the tessellation evaluation shader to the vertex shader. Why I want to have this in the vertex-shader is because I do not want to do this calculation
for every sub triangles vertices, and I want the vertices to be in screenspace in the vertex shader so I can decide how much every triangle should be subdivided in the tessellation control shader.
The version that do not work, is "almost" working, there is some issues when the triangles are rendered.
I would really appreciate even the smallest hint of what may be wrong.
This (bad) version works (position and normal are transformed in tessellation evaluation shader)
// vertex shader
void main_(void)
{
gl_Position = VertexPosition;
VertexTexCoord1 = VertexTexCoord;
VertexNormal1 = VertexNormal;
}
// tessellation evaluation shader
void main_()
{
VertexTexCoord3 = interpolate(VertexTexCoord2);
vec3 normal = interpolate(VertexNormal2);
vec4 pos = interpolate(gl_in[0].gl_Position, gl_in[1].gl_Position, gl_in[2].gl_Position);
vec4 movement = vec4(normal * (texture2D(heigthMap,VertexTexCoord3).r), 0.0);
gl_Position = mvpMatrix * (pos + movement);
}
This version does not work (position and normal are transformed in vertex shader)
// vertex shader
void main(void)
{
gl_Position = mvpMatrix * VertexPosition;
VertexTexCoord1 = VertexTexCoord;
VertexNormal1 = mat3(mvpMatrix) * VertexNormal;
}
// tessellation evaluation shader
void main()
{
VertexTexCoord3 = interpolate(VertexTexCoord2);
vec3 normal = interpolate(VertexNormal2);
vec4 pos = interpolate(gl_in[0].gl_Position, gl_in[1].gl_Position, gl_in[2].gl_Position);
vec4 movement = vec4(normal * (texture2D(heigthMap,VertexTexCoord3).r), 0.0);
gl_Position = (pos + movement);
}
In the "non-working" version the last line in tesselation shader seems to be incorrect. You're forgetting that in the source variant you had 'movement' multiplied by the mvpMatrix.
I would have tried to use this:
// tessellation evaluation shader
void main()
{
VertexTexCoord3 = interpolate(VertexTexCoord2);
vec3 normal = interpolate(VertexNormal2);
vec4 pos = interpolate(gl_in[0].gl_Position, gl_in[1].gl_Position, gl_in[2].gl_Position);
vec4 movement = vec4(normal * (texture2D(heigthMap,VertexTexCoord3).r), 0.0);
/// This multiplication by mvpMatrix is inevitable
gl_Position = (pos + mvpMatrix * movement);
}
Sorry if I mixed the order of the stages, but the code above (two versions) is definitely non-equivalent.
I have a working per fragment lighting but I wonder what can I do to keep a lighting calculation in the modelspace where I don't have to multiply normals by normalModelMatrix as below in the fragment shader.
Shaders: ViewMatrix - camera transformation, ModelMatrix - objects transfomation.
Light position - glm::vec4 lightPos(3.0f, 2.0f, -30.0f, 1.0f)
Render loop:
glUseProgram(ProgramId);
glUniformMatrix4fv(ViewMatrixUniformLocation, 1, GL_FALSE, glm::value_ptr(ViewMatrix));
glUniform4f(lightIntensityUniformLocation, 0.8f, 0.8f, 0.8f, 1.0f);
glUniform4f(ambientIntensityUniformLocation, 0.2f, 0.2f, 0.2f, 0.2f);
glUniform3fv(dirToLightUniformLocation, 1, glm::value_ptr( lightPos));
ModelMatrixStack.push(ModelMatrix);
ModelMatrix = glm::translate(ModelMatrix, glm::vec3(0, 0, -30));
ModelMatrix = glm::rotate(ModelMatrix, 75.0f, glm::vec3(0,0,1));
normMatrix = glm::mat3(ModelMatrix);
glUniformMatrix4fv(ModelMatrixUniformLocation, 1, GL_FALSE,
glm::value_ptr(ModelMatrix));
glUniformMatrix3fv(normalModelMatrixUniformLocation, 1, GL_FALSE,
glm::value_ptr(normMatrix));
drawTeapot();
ModelMatrix = ModelMatrixStack.top();
normMatrix = glm::mat3(ModelMatrix);
glUniformMatrix4fv(ModelMatrixUniformLocation, 1, GL_FALSE,
glm::value_ptr(ModelMatrix));
glUniformMatrix3fv(normalModelMatrixUniformLocation, 1, GL_FALSE,
glm::value_ptr(normMatrix));
myground.draw();
glUseProgram(0);
Vertex shader:
#version 400
layout(location=0) in vec4 in_position;
layout(location=1) in vec3 in_normal;
out vec3 normal;
out vec4 position;
uniform mat4 ModelMatrix;
uniform mat4 ViewMatrix;
uniform mat4 ProjectionMatrix;
void main(void)
{
vec4 vertexPosition = ModelMatrix * in_position;
gl_Position = ProjectionMatrix * ViewMatrix * vertexPosition;
normal = in_normal;
position = vertexPosition;
}
Fragment shader:
version 400
in vec3 normal;
in vec4 position;
out vec4 outputColor;
uniform vec3 lightPos;
uniform vec4 lightIntensity;
uniform vec4 ambientIntensity;
uniform mat3 normalModelMatrix;
void main(void)
{
vec3 normCamSpace = normalize(normalModelMatrix * normalize(normal));
vec3 dirToLight = normalize(lightPos - vec3(position));
float cosAngIncidence = dot(normCamSpace, dirToLight);
cosAngIncidence = clamp(cosAngIncidence, 0, 1);
outputColor = (lightIntensity * cosAngIncidence) + ambientIntensity;
}
The computational cost for doing illumination in model space is actually higher, than doing it in eye space, as you've to transform the light position and directions for individually each model. Those usually happen on the CPU side. Yet you still have to perform a transformation of the normals then.
modelspace where I don't have to multiply normals by normalModelMatrix as below in the fragment shader.
That calculation works as well in the vertex shader. Just move it there.
Update/EDIT
for clarification here the modified shader code:
Vertex shader:
#version 400
layout(location=0) in vec4 in_position;
layout(location=1) in vec3 in_normal;
out vec3 eyespaceNormal;
out vec4 eyespacePosition;
uniform mat4 ModelviewMatrix;
uniform mat3 NormalMatrix; // == inverse(transpose(ModelviewMatrix))
uniform mat4 ProjectionMatrixq;
void main(void)
{
eyespacePosition = ModelviewMatrix * in_position;
eyespaceNormal = normalize(NormalMatrix * in_normal);
gl_Position = ProjectionMatrix * eyespacePosition;
}
Fragment shader:
#version 400
in vec3 eyespaceNormal;
in vec4 eyespacePosition;
out vec4 outputColor;
uniform vec3 lightPos;
uniform vec4 lightIntensity;
uniform vec4 ambientIntensity;
void main(void)
{
vec3 dirToLight = normalize(lightPos - vec3(eyespacePosition));
float cosAngIncidence = dot(eyespaceNormal, dirToLight);
cosAngIncidence = clamp(cosAngIncidence, 0, 1);
outputColor = (lightIntensity * cosAngIncidence) + ambientIntensity;
}
BTW: You normal transformation matrix was wrong. You must use the inverse of the transposed modelview matrix for this.