I have a working per fragment lighting but I wonder what can I do to keep a lighting calculation in the modelspace where I don't have to multiply normals by normalModelMatrix as below in the fragment shader.
Shaders: ViewMatrix - camera transformation, ModelMatrix - objects transfomation.
Light position - glm::vec4 lightPos(3.0f, 2.0f, -30.0f, 1.0f)
Render loop:
glUseProgram(ProgramId);
glUniformMatrix4fv(ViewMatrixUniformLocation, 1, GL_FALSE, glm::value_ptr(ViewMatrix));
glUniform4f(lightIntensityUniformLocation, 0.8f, 0.8f, 0.8f, 1.0f);
glUniform4f(ambientIntensityUniformLocation, 0.2f, 0.2f, 0.2f, 0.2f);
glUniform3fv(dirToLightUniformLocation, 1, glm::value_ptr( lightPos));
ModelMatrixStack.push(ModelMatrix);
ModelMatrix = glm::translate(ModelMatrix, glm::vec3(0, 0, -30));
ModelMatrix = glm::rotate(ModelMatrix, 75.0f, glm::vec3(0,0,1));
normMatrix = glm::mat3(ModelMatrix);
glUniformMatrix4fv(ModelMatrixUniformLocation, 1, GL_FALSE,
glm::value_ptr(ModelMatrix));
glUniformMatrix3fv(normalModelMatrixUniformLocation, 1, GL_FALSE,
glm::value_ptr(normMatrix));
drawTeapot();
ModelMatrix = ModelMatrixStack.top();
normMatrix = glm::mat3(ModelMatrix);
glUniformMatrix4fv(ModelMatrixUniformLocation, 1, GL_FALSE,
glm::value_ptr(ModelMatrix));
glUniformMatrix3fv(normalModelMatrixUniformLocation, 1, GL_FALSE,
glm::value_ptr(normMatrix));
myground.draw();
glUseProgram(0);
Vertex shader:
#version 400
layout(location=0) in vec4 in_position;
layout(location=1) in vec3 in_normal;
out vec3 normal;
out vec4 position;
uniform mat4 ModelMatrix;
uniform mat4 ViewMatrix;
uniform mat4 ProjectionMatrix;
void main(void)
{
vec4 vertexPosition = ModelMatrix * in_position;
gl_Position = ProjectionMatrix * ViewMatrix * vertexPosition;
normal = in_normal;
position = vertexPosition;
}
Fragment shader:
version 400
in vec3 normal;
in vec4 position;
out vec4 outputColor;
uniform vec3 lightPos;
uniform vec4 lightIntensity;
uniform vec4 ambientIntensity;
uniform mat3 normalModelMatrix;
void main(void)
{
vec3 normCamSpace = normalize(normalModelMatrix * normalize(normal));
vec3 dirToLight = normalize(lightPos - vec3(position));
float cosAngIncidence = dot(normCamSpace, dirToLight);
cosAngIncidence = clamp(cosAngIncidence, 0, 1);
outputColor = (lightIntensity * cosAngIncidence) + ambientIntensity;
}
The computational cost for doing illumination in model space is actually higher, than doing it in eye space, as you've to transform the light position and directions for individually each model. Those usually happen on the CPU side. Yet you still have to perform a transformation of the normals then.
modelspace where I don't have to multiply normals by normalModelMatrix as below in the fragment shader.
That calculation works as well in the vertex shader. Just move it there.
Update/EDIT
for clarification here the modified shader code:
Vertex shader:
#version 400
layout(location=0) in vec4 in_position;
layout(location=1) in vec3 in_normal;
out vec3 eyespaceNormal;
out vec4 eyespacePosition;
uniform mat4 ModelviewMatrix;
uniform mat3 NormalMatrix; // == inverse(transpose(ModelviewMatrix))
uniform mat4 ProjectionMatrixq;
void main(void)
{
eyespacePosition = ModelviewMatrix * in_position;
eyespaceNormal = normalize(NormalMatrix * in_normal);
gl_Position = ProjectionMatrix * eyespacePosition;
}
Fragment shader:
#version 400
in vec3 eyespaceNormal;
in vec4 eyespacePosition;
out vec4 outputColor;
uniform vec3 lightPos;
uniform vec4 lightIntensity;
uniform vec4 ambientIntensity;
void main(void)
{
vec3 dirToLight = normalize(lightPos - vec3(eyespacePosition));
float cosAngIncidence = dot(eyespaceNormal, dirToLight);
cosAngIncidence = clamp(cosAngIncidence, 0, 1);
outputColor = (lightIntensity * cosAngIncidence) + ambientIntensity;
}
BTW: You normal transformation matrix was wrong. You must use the inverse of the transposed modelview matrix for this.
Related
I switched my shaders to the opengles 3.0 glsl.
Shaders not compiled in 3.0...
No error info i just now that this not pass :
if (gl.getProgramParameter(shaderProgram, gl.LINK_STATUS)) {
#version 300 es
in vec3 aVertexPosition;
in vec3 aVertexNormal;
in vec2 aTextureCoord;
uniform mat4 uMVMatrix;
uniform mat4 uPMatrix;
uniform mat3 uNMatrix;
uniform vec3 uAmbientColor;
uniform vec3 uLightingDirection;
uniform vec3 uDirectionalColor;
uniform bool uUseLighting;
out vec2 vTextureCoord;
out vec3 vLightWeighting;
// Spot
uniform vec3 u_lightWorldPosition;
out vec3 v_normal;
out vec3 v_surfaceToLight;
out vec3 v_surfaceToView;
// spot-Shadow
uniform mat4 u_textureMatrix;
out vec2 v_texcoord;
out vec4 v_projectedTexcoord;
void main(void) {
// SPOT
v_normal = mat3(uNMatrix) * aVertexNormal;
vec3 surfaceWorldPosition = (uNMatrix * aVertexPosition).xyz;
v_surfaceToLight = u_lightWorldPosition - surfaceWorldPosition;
v_surfaceToView = (uPMatrix * uMVMatrix * vec4(aVertexPosition, 1.0)).xyz - surfaceWorldPosition;
// spot shadow
// Multiply the position by the matrix.
// vec4 worldPosition = u_world * a_position;
vec4 worldPosition = vec4(0,0,0,0) * vec4( aVertexPosition, 1.0);
v_texcoord = aTextureCoord;
v_projectedTexcoord = u_textureMatrix * worldPosition;
gl_Position = uPMatrix * uMVMatrix * vec4(aVertexPosition, 1.0);
vTextureCoord = aTextureCoord;
if (!uUseLighting) {
vLightWeighting = vec3(1.0, 1.0, 1.0);
}
else {
vec3 transformedNormal = uNMatrix * aVertexNormal;
float directionalLightWeighting = max(dot(transformedNormal, uLightingDirection), 0.0);
vLightWeighting = uAmbientColor + uDirectionalColor * directionalLightWeighting;
}
}
FS
#version 300 es
precision mediump float;
precision highp float;
in vec2 vTextureCoord;
in vec3 vLightWeighting;
uniform float textureSamplerAmount[1];
int MixOperandString = 0;
uniform sampler2D uSampler;
uniform sampler2D uSampler1;
uniform sampler2D uSampler2;
uniform sampler2D uSampler3;
uniform sampler2D uSampler4;
uniform sampler2D uSampler5;
uniform sampler2D uSampler6;
uniform sampler2D uSampler7;
in vec3 v_normal;
in vec3 v_surfaceToLight;
in vec3 v_surfaceToView;
uniform vec4 u_color;
uniform float u_shininess;
uniform vec3 u_lightDirection;
uniform float u_innerLimit; // in dot space
uniform float u_outerLimit; // in dot space
uniform mat4 u_projection;
uniform mat4 u_view;
uniform mat4 u_world;
uniform mat4 u_textureMatrix;
out vec4 outColor;
void main(void) {
vec4 textureColor = texture2D(uSampler, vec2(vTextureCoord.s, vTextureCoord.t));
vec4 textureColor1 = texture2D(uSampler1, vec2(vTextureCoord.s, vTextureCoord.t));
vec4 textureColor2 = texture2D(uSampler2, vec2(vTextureCoord.s, vTextureCoord.t));
vec4 textureColor3 = texture2D(uSampler3, vec2(vTextureCoord.s, vTextureCoord.t));
vec4 textureColor4 = texture2D(uSampler4, vec2(vTextureCoord.s, vTextureCoord.t));
if (1 == 1) {
outColor = vec4(textureColor.rgb * vLightWeighting, textureColor.a);
}
vec3 normal = normalize(v_normal);
vec3 surfaceToLightDirection = normalize(v_surfaceToLight);
vec3 surfaceToViewDirection = normalize(v_surfaceToView);
vec3 halfVector = normalize(surfaceToLightDirection + surfaceToViewDirection);
float dotFromDirection = dot(surfaceToLightDirection,
-u_lightDirection);
float limitRange = u_innerLimit - u_outerLimit;
float inLight = clamp((dotFromDirection - u_outerLimit) / limitRange, 0.0, 1.0);
float light = inLight * dot(normal, surfaceToLightDirection);
float specular = inLight * pow(dot(normal, halfVector), u_shininess);
outColor.rgb *= light;
// Just add in the specular
// outColor.rgb += specular;
}
`;
trying to catch error log :
var shaderProgram = gl.createProgram();
// console.log("Creating Shader fragment");
gl.attachShader(shaderProgram, vertexShader);
gl.attachShader(shaderProgram, fragmentShader);
gl.linkProgram(shaderProgram);
console.info('TEST ERROR => ' + gl.getShaderInfoLog(vertexShader));
console.info('TEST ERROR => ' + gl.getShaderInfoLog(fragmentShader));
This not returning anything... it is empty ''.
I try this also
var compiled = gl.getShaderParameter(vertexShader, gl.COMPILE_STATUS);
console.log('Shader compiled successfully: ' + compiled);
var compiled = gl.getShaderParameter(fragmentShader, gl.COMPILE_STATUS);
console.log('Shader fragmentShader compiled successfully: ' + compiled);
output is
Shader compiled successfully: true
app.js:810 Shader fragmentShader compiled successfully: true
Ok o got error log :
Shader Program compile failed:ERROR: 0:41: 'texture2D' : no matching overloaded function found
ERROR: 0:41: '=' : dimension mismatch
ERROR: 0:41: '=' : cannot convert from 'const mediump float' to 'highp 4-component vector of float'
I am trying to add support for geometry shaders for a Vulkan project, so I am just starting with something simple for now.
The goal is, given a list of vertices, generate a perfect rectangle encompassing that line.
For that effect I made this geometry shader:
#version 450
#extension GL_ARB_separate_shader_objects : enable
layout(lines) in;
layout(triangle_strip, max_vertices = 6) out;
layout(location = 0) in vec2 fragCoord[];
layout(location = 0) out vec2 fragTexCoord;
void main() {
vec2 p1 = gl_in[0].gl_Position.xy;
vec2 p2 = gl_in[1].gl_Position.xy;
vec2 tangent = normalize(p2 - p1);
vec2 normal = vec2(tangent.y, -tangent.x) * 0.05;
vec2 quad[4] = vec2[](p1 + normal, p1 - normal, p2 + normal, p2 - normal);
// Create first triangle
gl_Position = vec4(quad[0], 0, 1);
EmitVertex();
gl_Position = vec4(quad[1], 0, 1);
EmitVertex();
gl_Position = vec4(quad[2], 0, 1);
EmitVertex();
EndPrimitive();
// Create second triangle
gl_Position = vec4(quad[1], 0, 1);
EmitVertex();
gl_Position = vec4(quad[2], 0, 1);
EmitVertex();
gl_Position = vec4(quad[3], 0, 1);
EmitVertex();
EndPrimitive();
}
Which outputs:
The vertex shader is:
#version 450
#extension GL_ARB_separate_shader_objects : enable
layout(location = 0) in vec3 inPosition;
layout(location = 1) in vec2 inTexCoord;
layout(location = 0) out vec2 fragTexCoord;
void main() {
gl_Position = vec4(inPosition, 1.0);
fragTexCoord = inTexCoord;
}
I am not sure why the lines are parallelograms instead of rectangles. Adding the normal to the line (the orthogonal direction) to both vertices in the line should make a rectangle, by definition.
Edit:
Even hard coding the vertices in the vertex shader seems to produce the same result:
vec4 verts[2] = vec4[](vec4(-0.5,-0.5,0,1), vec4(0.5,0.5,0,1));
void main() {
gl_Position = verts[gl_VertexID];//vec4(inPosition, 1.0);
fragTexCoord = inTexCoord;
}
I made a silly mistake, since coordinates are calculated on the NORMALIZED GL space, but the window is not a square, my space is stretched, defroming the topology. This is the result in a perfectly squared image:
To correct this error I must pass the aspect ratio information to the shader and correct the vertex positions accordingly.
I'm using Qt-5.4. Now I want to draw several 3D shapes on QOpenGLWidget. I tried by creating 2 VBOs to set vertices positions, but only one shape is rendered. And there is a "QOpenGLVertexArrayObject::create() VAO is already created" showing in debug info.
Is there anyone who can tell me what to do?
My implementation of QOpenGLWidget is below:
static const char *vertexShaderSourceCore =
"#version 150\n"
"in vec4 vertex;\n"
"in vec3 normal;\n"
"in vec3 color;\n"
"out vec3 vert;\n"
"out vec3 vertNormal;\n"
"out vec3 vertColor;\n"
"uniform mat4 projMatrix;\n"
"uniform mat4 mvMatrix;\n"
"uniform mat3 normalMatrix;\n"
"void main() {\n"
" vert = vertex.xyz;\n"
" vertNormal = normalMatrix * normal;\n"
" vertColor = color;\n"
" gl_Position = projMatrix * mvMatrix * vertex;\n"
"}\n";
static const char *fragmentShaderSourceCore =
"#version 150\n"
"in highp vec3 vert;\n"
"in highp vec3 vertNormal;\n"
"in highp vec3 vertColor;\n"
"out highp vec4 fragColor;\n"
"uniform highp vec3 lightPos;\n"
"void main() {\n"
" highp vec3 L = normalize(lightPos - vert);\n"
" highp float NL = max(dot(normalize(vertNormal), L), 0.0);\n"
" highp vec3 color = vec3(0.5, 0.5, 0);\n"
" highp vec3 col = clamp(vertColor * 0.2 + vertColor * 0.8 * NL, 0.0, 1.0);\n"
" fragColor = vec4(col, 1.0);\n"
"}\n";
static const char *vertexShaderSource =
"attribute vec4 vertex;\n"
"attribute vec3 normal;\n"
"attribute vec3 color;\n"
"varying vec3 vert;\n"
"varying vec3 vertNormal;\n"
"varying vec3 vertColor;\n"
"uniform mat4 projMatrix;\n"
"uniform mat4 mvMatrix;\n"
"uniform mat3 normalMatrix;\n"
"void main() {\n"
" vert = vertex.xyz;\n"
" vertColor = color;\n"
" vertNormal = normalMatrix * normal;\n"
" gl_Position = projMatrix * mvMatrix * vertex;\n"
"}\n";
static const char *fragmentShaderSource =
"varying highp vec3 vert;\n"
"varying highp vec3 vertNormal;\n"
"varying highp vec3 vertColor;\n"
"uniform highp vec3 lightPos;\n"
"void main() {\n"
" highp vec3 L = normalize(lightPos - vert);\n"
" highp float NL = max(dot(normalize(vertNormal), L), 0.0);\n"
" highp vec3 color = vec3(0.39, 1.0, 0.0);\n"
" highp vec3 col = clamp(vertColor * 0.2 + vertColor * 0.8 * NL, 0.0, 1.0);\n"
" gl_FragColor = vec4(col, 1.0);\n"
"}\n";
void DisplayGLWidget::initializeGL()
{
// In this example the widget's corresponding top-level window can change
// several times during the widget's lifetime. Whenever this happens, the
// QOpenGLWidget's associated context is destroyed and a new one is created.
// Therefore we have to be prepared to clean up the resources on the
// aboutToBeDestroyed() signal, instead of the destructor. The emission of
// the signal will be followed by an invocation of initializeGL() where we
// can recreate all resources.
initializeOpenGLFunctions();
glClearColor(255, 255, 255, m_transparent ? 0 : 1);
m_program = new QOpenGLShaderProgram;
m_program->addShaderFromSourceCode(QOpenGLShader::Vertex, m_core ? vertexShaderSourceCore : vertexShaderSource);
m_program->addShaderFromSourceCode(QOpenGLShader::Fragment, m_core ? fragmentShaderSourceCore : fragmentShaderSource);
m_program->bindAttributeLocation("vertex", 0);
m_program->bindAttributeLocation("normal", 1);
m_program->bindAttributeLocation("color", 2);
m_program->link();
m_program->bind();
m_projMatrixLoc = m_program->uniformLocation("projMatrix");
m_mvMatrixLoc = m_program->uniformLocation("mvMatrix");
m_normalMatrixLoc = m_program->uniformLocation("normalMatrix");
m_lightPosLoc = m_program->uniformLocation("lightPos");
m_camera.setToIdentity();
QVector3D eye(0, 0, 8.0);
QVector3D up(0, 1.0, 0);
QVector3D center(0, 0, 0.0);
m_camera.lookAt(eye, center, up);
// Store the vertex attribute bindings for the program.
setupVertexAttribs();
// Light position is fixed.
m_program->setUniformValue(m_lightPosLoc, QVector3D(0, 0, 70));
m_program->release();
}
void DisplayGLWidget::setupVertexAttribs()
{
// Create a vertex array object. In OpenGL ES 2.0 and OpenGL 2.x
// implementations this is optional and support may not be present
// at all. Nonetheless the below code works in all cases and makes
// sure there is a VAO when one is needed.
m_vao.create();
QOpenGLVertexArrayObject::Binder vaoBinder(&m_vao);
// Setup our vertex buffer object.
m_meshModelVbo.create();
m_meshModelVbo.bind();
m_meshModelVbo.allocate(m_model->constData(), m_model->count() * sizeof(GLfloat));
m_meshModelVbo.bind();
QOpenGLFunctions *f = QOpenGLContext::currentContext()->functions();
f->glEnableVertexAttribArray(0);
f->glEnableVertexAttribArray(1);
f->glEnableVertexAttribArray(2);
f->glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 9 * sizeof(GLfloat), 0);
f->glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 9 * sizeof(GLfloat), reinterpret_cast<void *>(3 * sizeof(GLfloat)));
f->glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 9 * sizeof(GLfloat), reinterpret_cast<void *>(6 * sizeof(GLfloat)));
m_meshModelVbo.release();
m_pcModelVbo.create();
m_pcModelVbo.bind();
m_pcModelVbo.allocate(m_model2->constData(), m_model2->count() * sizeof(GLfloat));
m_pcModelVbo.bind();
f->glEnableVertexAttribArray(0);
f->glEnableVertexAttribArray(1);
f->glEnableVertexAttribArray(2);
f->glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 9 * sizeof(GLfloat), 0);
f->glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 9 * sizeof(GLfloat), reinterpret_cast<void *>(3 * sizeof(GLfloat)));
f->glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 9 * sizeof(GLfloat), reinterpret_cast<void *>(6 * sizeof(GLfloat)));
m_pcModelVbo.release();
m_vao.release();
}
void DisplayGLWidget::paintGL()
{
/* paint 1st object */
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
m_vao.bind();
m_meshModelVbo.bind();
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
m_world.setToIdentity();
m_world.translate(m_model->getCenter().x(), m_model->getCenter().y(), m_model->getCenter().z());
m_world.rotate(m_xRot / 16.0f, 1, 0, 0);
m_world.rotate(m_yRot / 16.0f, 0, 1, 0);
m_world.rotate(m_zRot / 16.0f, 0, 0, 1);
m_world.translate(-m_model->getCenter().x(), -m_model->getCenter().y(), -m_model->getCenter().z());
QOpenGLVertexArrayObject::Binder vaoBinder(&m_vao);
m_program->bind();
m_program->setUniformValue(m_projMatrixLoc, m_proj);
m_program->setUniformValue(m_mvMatrixLoc, m_camera * m_world);
QMatrix3x3 normalMatrix = m_world.normalMatrix();
m_program->setUniformValue(m_normalMatrixLoc, normalMatrix);
glPointSize(2.0);
glDrawArrays(GL_POINTS, 0, m_model->vertexCount());
glFinish();
m_program->release();
/* paint 2nd object */
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
m_pcModelVbo.bind();
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
m_world.setToIdentity();
m_world.translate(m_model2->getCenter().x(), m_model2->getCenter().y(), m_model2->getCenter().z());
m_world.rotate(m_xRot / 16.0f, 1, 0, 0);
m_world.rotate(m_yRot / 16.0f, 0, 1, 0);
m_world.rotate(m_zRot / 16.0f, 0, 0, 1);
m_world.translate(-m_model2->getCenter().x(), -m_model2->getCenter().y(), -m_model2->getCenter().z());
m_program->bind();
m_program->setUniformValue(m_projMatrixLoc, m_proj);
m_program->setUniformValue(m_mvMatrixLoc, m_camera * m_world);
m_program->setUniformValue(m_normalMatrixLoc, normalMatrix);
glPointSize(2.0);
glDrawArrays(GL_POINTS, 0, m_model2->vertexCount());
glFinish();
m_program->release();
m_vao.release();
}
void DisplayGLWidget::resizeGL(int w, int h)
{
if (width != w)
width = w;
if (height != h)
height = h;
m_proj.setToIdentity();
m_proj.perspective(30.0f, GLfloat(w) / h, 2.0f, 20.0f);
}
You should not call "glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);" before drawing your second shape : it clears the current frame buffer (probably the screen)
About your "QOpenGLVertexArrayObject::create() VAO is already created" warning, I don't know, maybe your method "setupVertexAttribs" is called twice ?
I am trying to implement the reflection mapping in OpenGL ES 2.0 for 'sphere'.
I have done the skybox.
For sphere rendering, the reflection shaders i have used are:
Environment mapping (Sphere) vertex shader::
precision highp float;
uniform mat4 u_mvMatrix; // ModelView Matrix
uniform mat4 u_mvpMatrix; // ModelViewProjection Matrix
attribute vec4 a_position;
attribute vec3 a_envmapNormal;
varying vec3 v_eyecoordEyeReflection;
vec3 v_eyecoordPosition;
vec3 v_eyecoordNormal;
void main()
{
// position and normal in model coordinates
vec4 modelCoordPosition = a_position;
vec3 modelCoordNormal = a_envmapNormal;
// Calculate position in eye space
v_eyecoordPosition = vec3(u_mvMatrix * modelCoordPosition);
// Calculate and normalize eye space normal
vec3 eyecoordNormal = vec3(u_mvMatrix * vec4(modelCoordNormal, 0.0));
v_eyecoordNormal = normalize(eyecoordNormal);
// Calculate reflection vector
v_eyecoordEyeReflection = reflect(v_eyecoordPosition, v_eyecoordNormal);
gl_Position = u_mvpMatrix * a_position;
}
Environment mapping (Sphere) Fragment shader
precision highp float;
uniform lowp samplerCube baseCubeMapTexture;
varying vec3 v_eyecoordEyeReflection;
void main()
{
gl_FragColor = textureCube(baseCubeMapTexture, v_eyecoordEyeReflection);
}
But i am not getting correct output.
When the sphere is rotated, the texture is not changing.
what is the error in the shader?
Thanks Andon...
I used your shader code.
But i am getting white sphere.
Sphere Normals are calculated using:
#define ANGLE_STEP ((2.0f * OGLES_PI) / ((float) NUM_OF_SLICES))
for ( iCnti = 0; iCnti < NUM_OF_PARALLELS + 1; iCnti++ ) {
for ( iCntj = 0; iCntj < NUM_OF_SLICES + 1; iCntj++ ) {
pSphereNormals[iNormalIndex + 0] = sin(ANGLE_STEP * (FLOAT) iCnti )* sin (ANGLE_STEP *(FLOAT)iCntj);
pSphereNormals[iNormalIndex + 1] = cos(ANGLE_STEP * (FLOAT) iCnti );
pSphereNormals[iNormalIndex + 2] = sin(ANGLE_STEP * (FLOAT) iCnti )* cos (ANGLE_STEP *(FLOAT)iCntj);
iNormalIndex += 3;
}
}
My View Matrix "matViewMatrix" is derived from (http://www.learnopengles.com/tag/linmath-h/ mat4x4_look_at())
MyCameraLookAt(matViewMatrix, 0.0f , 0.0f, -2.0f, 0.0f , 0.0f, -1.0f, 0.0f , 1.0f, 0.0f);
The Inverse matrix InvViewMat is // inverse() function is taken from http://www.opensource.apple.com/source/WebCore/WebCore-514/platform/graphics/transforms/TransformationMatrix.cpp
InvViewMat[0][0] = -1.000000 InvViewMat[1][0] = -0.000000 InvViewMat[2][0] = 0.000000 InvViewMat[3][0] = -0.000000
InvViewMat[0][1] = -0.000000 InvViewMat[1][1] = 1.000000 InvViewMat[2][1] = -0.000000 InvViewMat[3][1] = -0.000000
InvViewMat[0][2] = 0.000000 InvViewMat[1][2] = -0.000000 InvViewMat[2][2] = -1.000000 InvViewMat[3][2] = -2.000000
InvViewMat[0][3] = -0.000000 InvViewMat[1][3] = 0.000000 InvViewMat[2][3] = 0.000000 InvViewMat[3][3] = 1.000000
Is there any problem with my matrix values or any of my calculations?
If you have a sphere centered at the camera's origin (eye-space), then no matter how you rotate it the position and normals in eye-space are always going to be the same at any location on screen. That is the definition of a sphere - every vertex is the same distance (radius) from the center.
You actually need to do this in world-space (that position will vary as you rotate the sphere).
Now, this brings up an issue - you only have a ModelView matrix (which transforms from object-space to eye-space). You are going to need to split your Model and View matrices to do this and for convenience you should pass the inverse of the View matrix to GLSL.
Below is a modified Vertex Shader that does what you want:
precision highp float;
uniform mat4 u_vInvMatrix; // Inverse View Matrix -- NEW
uniform mat4 u_mvMatrix; // ModelView Matrix
uniform mat4 u_mvpMatrix; // ModelViewProjection Matrix
attribute vec4 a_position;
attribute vec3 a_envmapNormal;
//varying vec3 v_eyecoordEyeReflection; // YOU DO NOT WANT EYE-SPACE
varying vec3 v_worldReflection; // Use world-space instead -- MODIFIED
vec3 v_eyecoordPosition;
vec3 v_eyecoordNormal;
void main()
{
// position and normal in model coordinates
vec4 modelCoordPosition = a_position;
vec3 modelCoordNormal = a_envmapNormal;
// Calculate position in eye space
v_eyecoordPosition = vec3(u_mvMatrix * modelCoordPosition);
// Calculate and normalize eye space normal
vec3 eyecoordNormal = vec3(u_mvMatrix * vec4(modelCoordNormal, 0.0));
v_eyecoordNormal = normalize(eyecoordNormal);
// Calculate reflection vector (eye-space)
vec3 eyeReflection = reflect(v_eyecoordPosition, v_eyecoordNormal);
// Transform the reflection into world-space -- NEW
v_worldReflection = vec3 (u_vInvMatrix * vec4 (eyeReflection, 0.0f));
gl_Position = u_mvpMatrix * a_position;
}
Im calculating a circle around a point in the fragment shader.The problem is that the part of the texture that is change is not a circle, Its an oval. The form actually depends on the texture's form.If the texture were to be a perfect square I would get a perfect circle but when its a rectangle I get an oval. This is the current fragment shader:
varying highp vec2 textureCoordinate;
uniform sampler2D inputImageTexture;
uniform highp vec2 center;
uniform highp float radius;
uniform highp float scale;
void main()
{
highp vec2 textureCoordinateToUse = textureCoordinate;
highp float dist = distance(center, textureCoordinate);
textureCoordinateToUse -= center;
if (dist < radius)
{
highp float percent = 1.0 - ((radius - dist) / radius) * scale;
percent = percent * percent;
textureCoordinateToUse = textureCoordinateToUse * percent;
textureCoordinateToUse += center;
gl_FragColor = texture2D(inputImageTexture, textureCoordinateToUse );
}
textureCoordinateToUse += center;
gl_FragColor = texture2D(inputImageTexture,textureCoordinate);
}
UPDATE SHADER CODE:
highp float aspectRatio = 854.0 / 480.0;
//highp vec2 textureCoordinateToUse = textureCoordinate;
highp vec2 textureCoordinateToUse = vec2(textureCoordinate.x, (textureCoordinate.y * aspectRatio + 0.5 - 0.5 * aspectRatio));
highp float dist = distance(center, textureCoordinateToUse);
textureCoordinateToUse -= center;
if (dist < radius)
{
highp float percent = 1.0 - ((radius - dist) / radius) * scale;
percent = percent * percent;
textureCoordinateToUse = textureCoordinateToUse * percent;
textureCoordinateToUse += center;
gl_FragColor = texture2D(inputImageTexture, textureCoordinateToUse );
return;
}
textureCoordinateToUse += center;
gl_FragColor = texture2D(inputImageTexture,textureCoordinate);
I see you're trying to use my bulge distortion fragment shader. While you haven't actually asked a question, I think I might know what you want here.
If you provide your texture coordinates in normalized 0.0 - 1.0 ranges for a rectangular input texture, the above will operate over an elliptical area rather than a circular one. That's because the above calculations work in texture coordinate space, not the image coordinate space.
To correct for this, you can do one of two things. First, you could provide texture coordinates that account for the aspect ratio of the image (have one of them not max out at 1.0).
Second, you could do what I do in this answer and feed in the aspect ratio of the image as a uniform and use that to correct for the rectangular nature of the image. If you provided an aspectRatio uniform to the shader with the ratio between the width and height of the image, you could replace the first line in the body of your shader with
highp vec2 textureCoordinateToUse = vec2(textureCoordinate.x, (textureCoordinate.y * aspectRatio + 0.5 - 0.5 * aspectRatio));
and it would operate over a circular area.