Need help doing simple rendering with Qt5 Qml + OpenGL - qt

I need to make it so part of my Qml view is “taken over” by some non-Qt OpenGL rendering, and I was having issues getting a texture to display properly so I thought I would just draw a line and get that to work before moving on to more complicated code.
For those not familiar with Qt5, the entire window is drawn using OpenGL, and I'm hooking into Qt's OpenGL drawing mechanism using their QQuickWindow::beforeRendering() signal which means my painting code is executed every redraw (every vertical sync).
I took the Squircle sample code ( http://qt-project.org/doc/qt-5/qtquick-scenegraph-openglunderqml-example.html ) and modified it slightly so it would draw in a specified portion of the screen (instead of the entire screen) and that is working perfectly. Then, I modified just the renderer::paint() function to initially draw three green lines, and after 2 seconds to instead draw one blue line:
void CtRenderer::paint()
{
static int n = 0;
if (n == 0)
{
glViewport(m_rect.x() + 10, m_rect.y() + 10, m_rect.width() - 20, m_rect.height() - 20);
glEnable(GL_SCISSOR_TEST);
glScissor(m_rect.x() + 10, m_rect.y() + 10, m_rect.width() - 20, m_rect.height() - 20);
glDisable(GL_DEPTH_TEST);
glDisable(GL_LIGHTING);
// glClearColor(1, 0, 0, 1);
// glClear(GL_COLOR_BUFFER_BIT);
glLineWidth(10);
glColor4f(0.0, 1.0, 0.0, 1);
glBegin(GL_LINES);
glVertex3f(0.0, 0.0, 1); glVertex3f(1, 0.5, 1);
glVertex3f(0.0, 0.0, 1); glVertex3f(0.5, 0.5, 1);
glVertex3f(0.0, 0.0, 1); glVertex3f(0.5, 1, 1);
glEnd();
}
else if (n == 120)
{
glViewport(m_rect.x() + 10, m_rect.y() + 10, m_rect.width() - 20, m_rect.height() - 20);
glEnable(GL_SCISSOR_TEST);
glScissor(m_rect.x() + 10, m_rect.y() + 10, m_rect.width() - 20, m_rect.height() - 20);
glDisable(GL_DEPTH_TEST);
glDisable(GL_LIGHTING);
// glClearColor(1, 0, 0, 1);
// glClear(GL_COLOR_BUFFER_BIT);
glLineWidth(10);
glColor4f(0.0, 0.0, 1.0, 1);
glBegin(GL_LINES);
glVertex3f(0.0, 0.0, 1); glVertex3f(0.4, 0.8, 1);
glEnd();
}
n++;
return;
}`
What I get instead are three gray lines that flicker continuously and never change to being a single line. After some online research, I thought that maybe I should not use glBegin()/glEnd() so I changed the code :
void CtRenderer::paint()
{
static bool bOnce = true;
if (bOnce)
{
glViewport(m_rect.x() + 10, m_rect.y() + 10, m_rect.width() - 20, m_rect.height() - 20);
glEnable(GL_SCISSOR_TEST);
glScissor(m_rect.x() + 10, m_rect.y() + 10, m_rect.width() - 20, m_rect.height() - 20);
float vertices[] = {-0.5f, -0.5f, 0.5f, 0.5f};
glLineWidth(10);
glColor4f(0.0, 1.0, 0.0, 1);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(2, GL_FLOAT, 0, vertices);
glDrawArrays(GL_LINES, 0, 2);
glDisableClientState(GL_VERTEX_ARRAY);
bOnce = false;
}
return;
}
This still gives me a flickering gray line.
When I try this code in a simple GLUT application, outside of Qt, it works just fine, so it seems to be some interaction between OpenGL and Qt5 Qml. What can I try next?
p.s. I'm using Qt version 5.3 on a Linux Ubuntu box
p.p.s. In response to some comments, I updated my code to look like this:
void dumpGlErrors(int iLine)
{
for (;;)
{
GLenum err = glGetError();
if (err == GL_NO_ERROR)
return;
std::cout << "GL error " << err << " detected in line " << iLine << std::endl;
}
}
void CtRenderer::paint()
{
glPushMatrix();
glLoadIdentity();
glViewport(m_rect.x() + 10, m_rect.y() + 10, m_rect.width() - 20, m_rect.height() - 20);
glEnable(GL_SCISSOR_TEST);
glScissor(m_rect.x() + 10, m_rect.y() + 10, m_rect.width() - 20, m_rect.height() - 20);
dumpGlErrors(__LINE__);
glClearColor(1, 0, 1, 1); // Magenta
glClear(GL_COLOR_BUFFER_BIT);
bool bDepth = glIsEnabled(GL_DEPTH_TEST);
glDisable(GL_DEPTH_TEST);
bool bLighting = glIsEnabled(GL_LIGHTING);
glDisable(GL_LIGHTING);
bool bTexture = glIsEnabled(GL_TEXTURE_2D);
glDisable(GL_TEXTURE_2D);
dumpGlErrors(__LINE__);
float vertices[] = { -0.5f, -0.5f, 0.1f, 0.5f, 0.5f, 0.1f, 0.5f, 0.5f, 1.0f, 0.5f, -0.5f, 1.0f };
float colors[] = { 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f };
glLineWidth(10);
glColor4f(0.0, 0.0, 1.0, 1);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, 0, vertices);
glEnableClientState(GL_COLOR_ARRAY);
glColorPointer(4, GL_FLOAT, 0, colors);
dumpGlErrors(__LINE__);
glDrawArrays(GL_LINES, 0, 4); // 4, not 2.
dumpGlErrors(__LINE__);
// glFlush();
glDisableClientState(GL_COLOR_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
if (bTexture)
glEnable(GL_TEXTURE_2D);
if (bLighting)
glEnable(GL_LIGHTING);
if (bDepth)
glEnable(GL_DEPTH_TEST);
glPopMatrix();
dumpGlErrors(__LINE__);
// In case somebody else calls glClear()
glClearColor(0, 1, 1, 1); // Cyan
}
Now I get a nice magenta background and I see my line flash once and then it goes away and I'm left with only magenta.
p.p.p.s. I tried using VBO type functions:
class Point
{
public:
float m_vertex[3];
float m_color[4];
};
void SquircleRenderer::drawBuffer()
{
QOpenGLFunctions glFuncs(QOpenGLContext::currentContext());
if (!m_bBufInit)
{
std::cout << "SquircleRenderer::drawBuffer()" << std::endl;
// Adding these two lines doesn't change anything
/*
glFuncs.initializeOpenGLFunctions();
glFuncs.glUseProgram(0);
*/
// Create a new VBO
glFuncs.glGenBuffers(1, &m_buf);
// Make the new VBO active
glFuncs.glBindBuffer(GL_ARRAY_BUFFER, m_buf);
static const Point points[4] = {
{ { -0.5f, -0.5f, 0.1f }, { 0.0f, 1.0f, 0.0f, 1.0f } },
{ { 0.5f, 0.5f, 0.1f }, { 1.0f, 0.0f, 0.0f, 1.0f } },
{ { 0.5f, 0.5f, 1.0f }, { 0.0f, 0.0f, 1.0f, 1.0f } },
{ { 0.5f, -0.5f, 1.0f }, { 1.0f, 1.0f, 0.0f, 1.0f } }
};
// Upload vertex data to the video device
glFuncs.glBufferData(GL_ARRAY_BUFFER, 4 * sizeof(Point), points, GL_STATIC_DRAW);
dumpGlErrors(__LINE__);
m_bBufInit = true;
}
glPushMatrix();
glLoadIdentity();
glViewport(m_rect.x() + 10, m_rect.y() + 10, m_rect.width() - 20, m_rect.height() - 20);
glEnable(GL_SCISSOR_TEST);
glScissor(m_rect.x() + 10, m_rect.y() + 10, m_rect.width() - 20, m_rect.height() - 20);
glClearColor(1, 1, 0, 1); // Yellow
glClear(GL_COLOR_BUFFER_BIT);
glFuncs.glBindBuffer(GL_ARRAY_BUFFER, m_buf);
dumpGlErrors(__LINE__);
/*
* "If a non-zero named buffer object is bound to the GL_ARRAY_BUFFER target
* (see glBindBuffer) while a vertex array is
* specified, pointer is treated as a byte offset into the buffer object's data store"
*/
glLineWidth(10);
glColor4f(0.0, 0.0, 1.0, 1);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, sizeof(Point), 0);
glEnableClientState(GL_COLOR_ARRAY);
glColorPointer(4, GL_FLOAT, sizeof(Point), (void*)offsetof(Point, m_color));
dumpGlErrors(__LINE__);
glDrawArrays(GL_LINES, 0, 4); // 4, not 2.
dumpGlErrors(__LINE__);
glDisableClientState(GL_COLOR_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
// In case somebody else calls glClear()
glClearColor(0, 1, 1, 1); // Cyan
glPopMatrix();
dumpGlErrors(__LINE__);
}
As with all my other efforts, I get a yellow background and my lines appear for what I assume is 1/60 second before the lines disappear and all I have is my yellow background.

I discovered the answer: I have to tell Qt that I'm using the old fixed function pipeline. Making one function call before doing any drawing does the trick:
QOpenGLFunctions glFuncs(QOpenGLContext::currentContext());
glFuncs.glUseProgram(0);

With Qt Quick there are three ways you can mix OpenGL:
You can draw under the (entire) QML (see http://qt-project.org/doc/qt-5/qtquick-scenegraph-openglunderqml-example.html)
You can draw over the (entire) QML
You can supply an OpenGL texture that is drawn on a QML element (see http://qt-project.org/doc/qt-5/qsgsimpletexturenode.html)
Alternatively you can build a QWidget application and use OpenGL in a QWidget window and QML in another (QML widget window).

void SquircleRenderer::paint()
{
if (!m_program) {
m_program = new QOpenGLShaderProgram();
m_program->addShaderFromSourceCode(QOpenGLShader::Vertex,
"attribute highp vec4 aVertices;"
"attribute highp vec4 aColors;"
"varying highp vec4 vColors;"
"void main() {"
" gl_Position = aVertices;"
" vColors= aColors;"
"}");
m_program->addShaderFromSourceCode(QOpenGLShader::Fragment,
"varying highp vec4 vColors;"
"void main() {"
" gl_FragColor = vColors;"
"}");
m_program->bindAttributeLocation("aVertices", 0);
m_program->bindAttributeLocation("aColors", 1);
m_program->link();
}
m_program->bind();
m_program->enableAttributeArray(0);
m_program->enableAttributeArray(1);
float vertices[] = {
-1, -1, //Diag bottom left to top right
1, 1,
-1, 1, //Diag top left to bottom right
1, -1,
-1, 0, //Horizontal line
1, 0
};
float colors[] = {
1, 1, 0, 1,
1, 0, 1, 1,
0, 1, 1, 1,
1, 0, 0, 1,
0, 0, 1, 1,
0, 1, 0, 1
};
m_program->setAttributeArray(0, GL_FLOAT, vertices, 2); //3rd to 0, 4th to 1 by default
m_program->setAttributeArray(1, GL_FLOAT, colors, 4);
glViewport(0, 0, m_viewportSize.width(), m_viewportSize.height());
glDisable(GL_DEPTH_TEST);
glClearColor(0, 0, 0, 1);
glClear(GL_COLOR_BUFFER_BIT);
//Here I draw 3 lines, reduce to 2 instead of 6 to draw only one.
//Change second param to choose which line to draw
glDrawArrays(GL_LINES, 0, 6);
m_program->disableAttributeArray(0);
m_program->disableAttributeArray(1);
m_program->release();
}
My conclusion would be to leave the Legacy version of OpenGL in the history book. Even if it's for dummy shaders like those one, it saves you the trouble of managing any matrices and you know what is happening.
PS : The code hasn't been tested, it's just a tweak of the Squircle code. Let me know if there is trouble, but I'd rather continue the troubleshoot session from this piece of code.

Related

Render to texture mipmap level

I am trying to understand the correct approach to render to a specific texture mipmap level.
In the example below, I attempt to render the color cyan to mipmap level 1 of texture. If I change the level from 1 to 0 in the framebufferTexture2D call, the canvas displays cyan as expected. However I don't understand why only level 0 works here, because non-zero levels are supported in the WebGL 2/OpenGL ES 3 specification.
I've also tried explicitly detaching level 0 (binding to null) and various other combinations (i.e. using texImage2D instead of texStorage2D), but none of the combinations seem to render to the mipmap level.
const
canvas = document.createElement('canvas'),
gl = canvas.getContext('webgl2'),
triangle = new Float32Array([ 0, 0, 2, 0, 0, 2 ]);
texture = gl.createTexture(),
framebuffer = gl.createFramebuffer(),
size = 100,
vertex = createShader(gl.VERTEX_SHADER, `#version 300 es
precision mediump float;
uniform sampler2D sampler;
layout(location = 0) in vec2 position;
out vec4 color;
void main() {
color = textureLod(sampler, position, 0.5);
gl_Position = vec4(position * 2. - 1., 0, 1);
}`
),
fragment = createShader(gl.FRAGMENT_SHADER, `#version 300 es
precision mediump float;
in vec4 color;
out vec4 fragColor;
void main() {
fragColor = color;
}`
),
program = gl.createProgram();
canvas.width = canvas.height = size;
document.body.appendChild(canvas);
gl.viewport(0, 0, size, size);
gl.attachShader(program, vertex);
gl.attachShader(program, fragment);
gl.linkProgram(program);
if (!gl.getProgramParameter(program, gl.LINK_STATUS)) {
console.error('program');
}
gl.useProgram(program);
// Create a big triangle
gl.bindBuffer(gl.ARRAY_BUFFER, gl.createBuffer());
gl.bufferData(gl.ARRAY_BUFFER, triangle, gl.STATIC_DRAW);
gl.vertexAttribPointer(0, 2, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(0);
// Create a texture with mipmap levels 0 (base) and 1
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texStorage2D(gl.TEXTURE_2D, 2, gl.RGB8, 2, 2);
// Setup framebuffer to render to texture level 1, clear to cyan
gl.bindFramebuffer(gl.FRAMEBUFFER, framebuffer);
gl.framebufferTexture2D(
gl.FRAMEBUFFER,
gl.COLOR_ATTACHMENT0,
gl.TEXTURE_2D,
texture,
1 // Switching this to `0` will work fine
);
const status = gl.checkFramebufferStatus(gl.FRAMEBUFFER);
if (status !== gl.FRAMEBUFFER_COMPLETE) {
console.error(status);
}
gl.clearColor(0, 1, 1, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
// Detach framebuffer, clear to red
gl.bindFramebuffer(gl.FRAMEBUFFER, null);
gl.clearColor(1, 0, 0, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
// Draw the triangle
gl.drawArrays(gl.TRIANGLES, 0, 3);
// Some utility functions to cleanup the above code
function createShader(type, source) {
const shader = gl.createShader(type);
gl.shaderSource(shader, source);
gl.compileShader(shader);
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
console.log(gl.getShaderInfoLog(shader));
}
return shader;
}
I expect that I'm doing something wrong in the setup, but I haven't been able to find many examples of this.
Don't you want either
color = textureLod(sampler, position, 0.0); // lod 0
or
color = textureLod(sampler, position, 1.0); // lod 1
?
The code didn't set filtering in a way that you can actually access the other lods.
It had them set to gl.NEAREST which means only ever use lod 0.
const canvas = document.createElement('canvas');
const gl = canvas.getContext('webgl2');
const triangle = new Float32Array([0, -1, 1, -1, 1, 1]);
const texture = gl.createTexture();
const framebuffers = [];
canvas.width = canvas.height = 100;
document.body.appendChild(canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
const vertex = createShader(gl.VERTEX_SHADER, `#version 300 es
precision mediump float;
uniform sampler2D sampler;
uniform float lod;
uniform vec4 offset;
layout(location = 0) in vec4 position;
out vec4 color;
void main() {
color = textureLod(sampler, vec2(.5), lod);
gl_Position = position + offset;
}`
);
const fragment = createShader(gl.FRAGMENT_SHADER, `#version 300 es
precision mediump float;
in vec4 color;
out vec4 fragColor;
void main() {
fragColor = color;
}`
);
const program = createProgram(vertex, fragment);
const lodLocation = gl.getUniformLocation(program, "lod");
const offsetLocation = gl.getUniformLocation(program, "offset");
// Create a big triangle
gl.bindBuffer(gl.ARRAY_BUFFER, gl.createBuffer());
gl.bufferData(gl.ARRAY_BUFFER, triangle, gl.STATIC_DRAW);
gl.vertexAttribPointer(0, 2, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(0);
// Create a texture with mipmap levels 0 (base) and 1
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST_MIPMAP_NEAREST);
gl.texStorage2D(gl.TEXTURE_2D, 2, gl.RGB8, 2, 2);
// Setup framebuffers for each level
for (let i = 0; i < 2; ++i) {
const framebuffer = gl.createFramebuffer();
gl.bindFramebuffer(gl.FRAMEBUFFER, framebuffer);
gl.framebufferTexture2D(
gl.FRAMEBUFFER,
gl.COLOR_ATTACHMENT0,
gl.TEXTURE_2D,
texture,
i);
let status = gl.checkFramebufferStatus(gl.FRAMEBUFFER);
if (status !== gl.FRAMEBUFFER_COMPLETE) {
console.error(glErrToString(gl, status));
}
const r = (i === 0) ? 1 : 0;
const g = (i === 1) ? 1 : 0;
gl.clearColor(r, g, 0, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
framebuffers.push(framebuffer);
};
// Detach framebuffer, clear to red
gl.bindFramebuffer(gl.FRAMEBUFFER, null);
gl.clearColor(0, 0, 0, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
// Draw the triangle
gl.uniform1f(lodLocation, 0);
gl.uniform4fv(offsetLocation, [0, 0, 0, 0]);
gl.drawArrays(gl.TRIANGLES, 0, 3);
gl.uniform1f(lodLocation, 1.);
gl.uniform4fv(offsetLocation, [-1, 0, 0, 0]);
gl.drawArrays(gl.TRIANGLES, 0, 3);
// Some utility functions to cleanup the above code
function createShader(shaderType, source) {
const shader = gl.createShader(shaderType);
gl.shaderSource(shader, source);
gl.compileShader(shader);
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
console.log(gl.getShaderInfoLog(shader));
}
return shader;
}
function createProgram(vertex, fragment) {
const program = gl.createProgram();
gl.attachShader(program, vertex);
gl.attachShader(program, fragment);
gl.linkProgram(program);
if (!gl.getProgramParameter(program, gl.LINK_STATUS)) {
console.error('program');
}
gl.useProgram(program);
return program;
}
function glErrToString(gl, error) {
for (var key in gl) {
if (gl[key] === error) {
return key;
}
}
return "0x" + error.toString(16);
}

How to use QPainter for overlaying in QOpenGLWidget [closed]

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 5 years ago.
Improve this question
I've received the problem, using QPainter for overlaying in QOpenGLWidget Qt.
I create a CameraSurface_GL class that draw yuv image received from ip camera.
Here's my code:
#include "camerasurface_gl.h"
#include <QtWidgets>
#include <QtGui>
#include <QOpenGLFunctions_3_0>
const char* YUV420P_VS = ""
"#version 330\n"
""
"uniform mat4 u_pm;"
"uniform vec4 draw_pos;"
""
"const vec2 verts[4] = vec2[] ("
" vec2(-0.5, 0.5), "
" vec2(-0.5, -0.5), "
" vec2( 0.5, 0.5), "
" vec2( 0.5, -0.5) "
");"
""
"const vec2 texcoords[4] = vec2[] ("
" vec2(0.0, 1.0), "
" vec2(0.0, 0.0), "
" vec2(1.0, 1.0), "
" vec2(1.0, 0.0) "
"); "
""
"out vec2 v_coord; "
""
"void main() {"
" vec2 vert = verts[gl_VertexID];"
" vec4 p = vec4((0.5 * draw_pos.z) + draw_pos.x + (vert.x * draw_pos.z), "
" (0.5 * draw_pos.w) + draw_pos.y + (vert.y * draw_pos.w), "
" 0, 1);"
" gl_Position = u_pm * p;"
" v_coord = texcoords[gl_VertexID];"
"}"
"";
const char* YUV420P_FS = ""
"#version 330\n"
"uniform sampler2D y_tex;"
"uniform sampler2D u_tex;"
"uniform sampler2D v_tex;"
"in vec2 v_coord;"
"layout( location = 0 ) out vec4 fragcolor;"
""
"const vec3 R_cf = vec3(1, 0.000000, 1.13983);"
"const vec3 G_cf = vec3(1, -0.39465, -0.58060);"
"const vec3 B_cf = vec3(1, 2.03211, 0.000000);"
"const vec3 offset = vec3(-0.0625, -0.5, -0.5);"
""
"void main() {"
" float y = texture(y_tex, v_coord).r;"
" float u = texture(u_tex, v_coord).r;"
" float v = texture(v_tex, v_coord).r;"
" vec3 yuv = vec3(y,u,v);"
" yuv += offset;"
" fragcolor = vec4(0.0, 0.0, 0.0, 1.0);"
" fragcolor.r = dot(yuv, R_cf);"
" fragcolor.g = dot(yuv, G_cf);"
" fragcolor.b = dot(yuv, B_cf);"
"}"
"";
CameraSurface_GL::CameraSurface_GL(QWidget *parent) :
QOpenGLWidget(parent)
,vid_w(0)
,vid_h(0)
,win_w(0)
,win_h(0)
,vao(0)
,vao1(0)
,y_tex(0)
,u_tex(0)
,v_tex(0)
,vert(0)
,frag(0)
,prog(0)
,u_pos(-1)
,textures_created(false)
,shader_created(false)
{
qRegisterMetaType<YUVFrame>("YUVFrame");
m_ipCamera = new IpCamera;
m_yuvWidth = 0;
m_yuvHeight = 0;
connect(m_ipCamera, SIGNAL(frameChanged(YUVFrame)), this, SLOT(slotFrameChanged(YUVFrame)));
m_frameCount = 0;
m_setup = 0;
}
CameraSurface_GL::~CameraSurface_GL()
{
glDeleteTextures(1, &y_tex);
glDeleteTextures(1, &u_tex);
glDeleteTextures(1, &v_tex);
glDeleteProgram(prog);
}
void CameraSurface_GL::openCamera(QString ipAddress, QString userName, QString password)
{
if(m_ipCamera->isRunning())
m_ipCamera->close();
qDebug() << "open camera";
bool opend = m_ipCamera->open(ipAddress, userName, password);
if(opend == false)
return;
}
void CameraSurface_GL::closeCamera()
{
m_ipCamera->close();
}
void CameraSurface_GL::slotFrameChanged(YUVFrame frame)
{
m_yuvData = QByteArray((char*)frame.yuvData, frame.width * frame.height * 3 / 2);
m_yuvWidth = frame.width;
m_yuvHeight = frame.height;
if(m_setup == 0)
{
setup_gl(frame.width, frame.height);
resize_gl(rect().width(), rect().height());
m_setup = 1;
}
update();
m_frameCount ++;
}
void CameraSurface_GL::setup_gl(int width, int height)
{
vid_w = width;
vid_h = height;
if(!vid_w || !vid_h) {
printf("Invalid texture size.\n");
return;
}
if(!setupTextures()) {
return;
}
if(!setupShader()) {
return;
}
return;
}
void CameraSurface_GL::resize_gl(int width, int height)
{
win_w = width;
win_h = height;
pm.setToIdentity();
pm.ortho(0, win_w, win_h, 0, 0.0, 100.0f);
glUseProgram(prog);
glUniformMatrix4fv(glGetUniformLocation(prog, "u_pm"), 1, GL_FALSE, pm.data());
}
void CameraSurface_GL::resizeGL(int width, int height)
{
glViewport(0, 0, width, height);
if(m_setup)
{
resize_gl(width, height);
}
}
bool CameraSurface_GL::setupTextures()
{
if(textures_created) {
printf("Textures already created.\n");
return false;
}
glGenTextures(1, &y_tex);
glBindTexture(GL_TEXTURE_2D, y_tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, vid_w, vid_h, 0, GL_RED, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glGenTextures(1, &u_tex);
glBindTexture(GL_TEXTURE_2D, u_tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, vid_w/2, vid_h/2, 0, GL_RED, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glGenTextures(1, &v_tex);
glBindTexture(GL_TEXTURE_2D, v_tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, vid_w/2, vid_h/2, 0, GL_RED, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
textures_created = true;
return true;
}
bool CameraSurface_GL::setupShader()
{
if(shader_created) {
printf("Already creatd the shader.\n");
return false;
}
vert = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vert, 1, &YUV420P_VS, 0);
glCompileShader(vert);
frag = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(frag, 1, &YUV420P_FS, 0);
glCompileShader(frag);
prog = glCreateProgram();
glAttachShader(prog, vert);
glAttachShader(prog, frag);
glLinkProgram(prog);
glUseProgram(prog);
glUniform1i(glGetUniformLocation(prog, "y_tex"), 0);
glUniform1i(glGetUniformLocation(prog, "u_tex"), 1);
glUniform1i(glGetUniformLocation(prog, "v_tex"), 2);
u_pos = glGetUniformLocation(prog, "draw_pos");
glUseProgram(0);
return true;
}
void CameraSurface_GL::setYPixels(uint8_t* pixels, int stride)
{
glBindTexture(GL_TEXTURE_2D, y_tex);
glPixelStorei(GL_UNPACK_ROW_LENGTH, stride);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, vid_w, vid_h, GL_RED, GL_UNSIGNED_BYTE, pixels);
}
void CameraSurface_GL::setUPixels(uint8_t* pixels, int stride)
{
glBindTexture(GL_TEXTURE_2D, u_tex);
glPixelStorei(GL_UNPACK_ROW_LENGTH, stride);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, vid_w/2, vid_h/2, GL_RED, GL_UNSIGNED_BYTE, pixels);
}
void CameraSurface_GL::setVPixels(uint8_t* pixels, int stride)
{
glBindTexture(GL_TEXTURE_2D, v_tex);
glPixelStorei(GL_UNPACK_ROW_LENGTH, stride);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, vid_w/2, vid_h/2, GL_RED, GL_UNSIGNED_BYTE, pixels);
}
void CameraSurface_GL::initializeGL()
{
initializeOpenGLFunctions();
glClearColor(0, 0, 0, 1);
}
void CameraSurface_GL::paintEvent(QPaintEvent* e)
{
QPainter painter;
painter.begin(this);
painter.beginNativePainting();
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
if(m_yuvData.size())
{
setYPixels((unsigned char*)m_yuvData.data(), m_yuvWidth);
setUPixels((unsigned char*)m_yuvData.data() + m_yuvWidth * m_yuvHeight, m_yuvWidth / 2);
setVPixels((unsigned char*)m_yuvData.data() + m_yuvWidth * m_yuvHeight * 5 / 4, m_yuvWidth / 2);
glBindVertexArray(vao);
glUseProgram(prog);
glUniform4f(u_pos, rect().left(), rect().top(), rect().width(), rect().height());
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, y_tex);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, u_tex);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, v_tex);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glUseProgram(0);
}
painter.endNativePainting();
painter.end();
}
I want to the draw contents on the yuv layer, so I appended the following code in void paintEvent(QPaintEvent* e) functions:
void CameraSurface_GL::paintEvent(QPaintEvent* e)
{
QPainter painter;
painter.begin(this);
painter.beginNativePainting();
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
if(m_yuvData.size())
{
setYPixels((unsigned char*)m_yuvData.data(), m_yuvWidth);
setUPixels((unsigned char*)m_yuvData.data() + m_yuvWidth * m_yuvHeight, m_yuvWidth / 2);
setVPixels((unsigned char*)m_yuvData.data() + m_yuvWidth * m_yuvHeight * 5 / 4, m_yuvWidth / 2);
glBindVertexArray(vao);
glUseProgram(prog);
glUniform4f(u_pos, rect().left(), rect().top(), rect().width(), rect().height());
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, y_tex);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, u_tex);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, v_tex);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glUseProgram(0);
}
painter.endNativePainting();
//////new appended code//////
QPixmap memPix(rect().width(), rect().height());
QPainter memDC;
memDC.setPen(Qt::red);
memDC.drawRect(QRect(0, 0, 100, 100));
painter.drawPixmap(rect(), memPix);
/////end/////
painter.end();
}
At this time, I received the issue crashing my program.
And, When I also changed to the following, I received the same:
void CameraSurface_GL::paintEvent(QPaintEvent* e)
{
QPainter painter;
painter.begin(this);
painter.beginNativePainting();
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
if(m_yuvData.size())
{
setYPixels((unsigned char*)m_yuvData.data(), m_yuvWidth);
setUPixels((unsigned char*)m_yuvData.data() + m_yuvWidth * m_yuvHeight, m_yuvWidth / 2);
setVPixels((unsigned char*)m_yuvData.data() + m_yuvWidth * m_yuvHeight * 5 / 4, m_yuvWidth / 2);
}
painter.endNativePainting();
QPixmap memPix(rect().width(), rect().height());
QPainter memDC;
memDC.setPen(Qt::red);
memDC.drawRect(QRect(0, 0, 100, 100));
painter.drawPixmap(rect(), memPix);
painter.end();
}
And, When I remove the part of calling setYPixels, setUPixels and setVPixels, my program is not crashing.
I can't know why I received the error. I want to know the cause of this issue.
You should unbind any textures, buffers and so on. I have had quite similar problem with unbinded VBO.

Why i should generate texture in initializeGL qt

I'm trying to draw yuv image using Qt OpenGL.
I've create a 'CameraSurface_GL' class inherited from QGLWidget, drawing yuv image received from ip camera.
I introduce my code here.
#include "camerasurface_gl.h"
#include <QtWidgets>
#include <QtGui>
#include <QOpenGLFunctions_3_0>
const char* YUV420P_VS = ""
"#version 330\n"
""
"uniform mat4 u_pm;"
"uniform vec4 draw_pos;"
""
"const vec2 verts[4] = vec2[] ("
" vec2(-0.5, 0.5), "
" vec2(-0.5, -0.5), "
" vec2( 0.5, 0.5), "
" vec2( 0.5, -0.5) "
");"
""
"const vec2 texcoords[4] = vec2[] ("
" vec2(0.0, 1.0), "
" vec2(0.0, 0.0), "
" vec2(1.0, 1.0), "
" vec2(1.0, 0.0) "
"); "
""
"out vec2 v_coord; "
""
"void main() {"
" vec2 vert = verts[gl_VertexID];"
" vec4 p = vec4((0.5 * draw_pos.z) + draw_pos.x + (vert.x * draw_pos.z), "
" (0.5 * draw_pos.w) + draw_pos.y + (vert.y * draw_pos.w), "
" 0, 1);"
" gl_Position = u_pm * p;"
" v_coord = texcoords[gl_VertexID];"
"}"
"";
const char* YUV420P_FS = ""
"#version 330\n"
"uniform sampler2D y_tex;"
"uniform sampler2D u_tex;"
"uniform sampler2D v_tex;"
"in vec2 v_coord;"
"layout( location = 0 ) out vec4 fragcolor;"
""
"const vec3 R_cf = vec3(1, 0.000000, 1.13983);"
"const vec3 G_cf = vec3(1, -0.39465, -0.58060);"
"const vec3 B_cf = vec3(1, 2.03211, 0.000000);"
"const vec3 offset = vec3(-0.0625, -0.5, -0.5);"
""
"void main() {"
" float y = texture(y_tex, v_coord).r;"
" float u = texture(u_tex, v_coord).r;"
" float v = texture(v_tex, v_coord).r;"
" vec3 yuv = vec3(y,u,v);"
" yuv += offset;"
" fragcolor = vec4(0.0, 0.0, 0.0, 1.0);"
" fragcolor.r = dot(yuv, R_cf);"
" fragcolor.g = dot(yuv, G_cf);"
" fragcolor.b = dot(yuv, B_cf);"
"}"
"";
CameraSurface_GL::CameraSurface_GL(QWidget *parent) :
QGLWidget(parent)
,vid_w(0)
,vid_h(0)
,win_w(0)
,win_h(0)
,vao(0)
,y_tex(0)
,u_tex(0)
,v_tex(0)
,vert(0)
,frag(0)
,prog(0)
,u_pos(-1)
,textures_created(false)
,shader_created(false)
{
qRegisterMetaType<YUVFrame>("YUVFrame");
m_ipCamera = new IpCamera;
connect(m_ipCamera, SIGNAL(frameChanged(YUVFrame)), this, SLOT(slotFrameChanged(YUVFrame)));
m_frameCount = 0;
m_setup = 0;
}
CameraSurface_GL::~CameraSurface_GL()
{
glDeleteTextures(1, &y_tex);
glDeleteTextures(1, &u_tex);
glDeleteTextures(1, &v_tex);
glDeleteProgram(prog);
}
void CameraSurface_GL::openCamera(QString ipAddress, QString userName, QString password)
{
if(m_ipCamera->isRunning())
m_ipCamera->close();
bool opend = m_ipCamera->open(ipAddress, userName, password);
if(opend == false)
return;
}
void CameraSurface_GL::closeCamera()
{
m_ipCamera->close();
}
void CameraSurface_GL::slotFrameChanged(YUVFrame frame)
{
m_yuvFrame = frame;
update();
}
void CameraSurface_GL::setup_gl(int width, int height)
{
vid_w = width;
vid_h = height;
if(!vid_w || !vid_h) {
printf("Invalid texture size.\n");
return;
}
if(!setupTextures()) {
return;
}
if(!setupShader()) {
return;
}
glGenVertexArrays(1, &vao);
return;
}
void CameraSurface_GL::resize_gl(int width, int height)
{
win_w = width;
win_h = height;
pm.setToIdentity();
pm.ortho(0, win_w, win_h, 0, 0.0, 100.0f);
glUseProgram(prog);
glUniformMatrix4fv(glGetUniformLocation(prog, "u_pm"), 1, GL_FALSE, pm.data());
}
void CameraSurface_GL::initializeGL()
{
initializeOpenGLFunctions();
glClearColor(0, 0, 0, 1);
setup_gl(1920, 1080);
}
void CameraSurface_GL::paintGL()
{
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
if(m_yuvFrame.yuvData.size())
{
setYPixels((unsigned char*)m_yuvFrame.yuvData.data(), yuvFrame.width);
setUPixels((unsigned char*)m_yuvFrame.yuvData.data() + yuvFrame.width * yuvFrame.height, yuvFrame.width / 2);
setVPixels((unsigned char*)m_yuvFrame.yuvData.data() + yuvFrame.width * yuvFrame.height * 5 / 4, yuvFrame.width / 2);
glBindVertexArray(vao);
glUseProgram(prog);
QRect frameRect = rect();
glUniform4f(u_pos, frameRect.left(), frameRect.top(), frameRect.width(), frameRect.height());
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, y_tex);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, u_tex);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, v_tex);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
}
}
void CameraSurface_GL::resizeGL(int width, int height)
{
glViewport(0, 0, width, height);
resize_gl(width, height);
}
bool CameraSurface_GL::setupTextures()
{
if(textures_created) {
printf("Textures already created.\n");
return false;
}
glGenTextures(1, &y_tex);
glBindTexture(GL_TEXTURE_2D, y_tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, vid_w, vid_h, 0, GL_RED, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glGenTextures(1, &u_tex);
glBindTexture(GL_TEXTURE_2D, u_tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, vid_w/2, vid_h/2, 0, GL_RED, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glGenTextures(1, &v_tex);
glBindTexture(GL_TEXTURE_2D, v_tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, vid_w/2, vid_h/2, 0, GL_RED, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
textures_created = true;
return true;
}
bool CameraSurface_GL::setupShader()
{
if(shader_created) {
printf("Already creatd the shader.\n");
return false;
}
vert = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vert, 1, &YUV420P_VS, 0);
glCompileShader(vert);
frag = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(frag, 1, &YUV420P_FS, 0);
glCompileShader(frag);
prog = glCreateProgram();
glAttachShader(prog, vert);
glAttachShader(prog, frag);
glLinkProgram(prog);
glUseProgram(prog);
glUniform1i(glGetUniformLocation(prog, "y_tex"), 0);
glUniform1i(glGetUniformLocation(prog, "u_tex"), 1);
glUniform1i(glGetUniformLocation(prog, "v_tex"), 2);
u_pos = glGetUniformLocation(prog, "draw_pos");
return true;
}
void CameraSurface_GL::setYPixels(uint8_t* pixels, int stride)
{
glBindTexture(GL_TEXTURE_2D, y_tex);
glPixelStorei(GL_UNPACK_ROW_LENGTH, stride);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, vid_w, vid_h, GL_RED, GL_UNSIGNED_BYTE, pixels);
}
void CameraSurface_GL::setUPixels(uint8_t* pixels, int stride)
{
glBindTexture(GL_TEXTURE_2D, u_tex);
glPixelStorei(GL_UNPACK_ROW_LENGTH, stride);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, vid_w/2, vid_h/2, GL_RED, GL_UNSIGNED_BYTE, pixels);
}
void CameraSurface_GL::setVPixels(uint8_t* pixels, int stride)
{
glBindTexture(GL_TEXTURE_2D, v_tex);
glPixelStorei(GL_UNPACK_ROW_LENGTH, stride);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, vid_w/2, vid_h/2, GL_RED, GL_UNSIGNED_BYTE, pixels);
}
I call 'setup' function in initializeGL, using static parameter 1920, 1080.
This is not my idea.
I want to call 'setup' function when received yuv frame from ip camera, so I changed my code to the following.
void CameraSurface_GL::initializeGL()
{
initializeOpenGLFunctions();
glClearColor(0, 0, 0, 1);
}
void CameraSurface_GL::slotFrameChanged(YUVFrame frame)
{
if(m_setup == 0)
{
setup_gl(frame.width, frame.height);
resize_gl(frame.width, frame.height);
m_setup = 1;
}
m_yuvFrame = frame;
update();
}
At this time, I can't show drawn image on view.
Why I receive the issue?
Can't I call 'setup' function in other place, in not initializeGL?

GLES 2.0 draws GL_POINTS as squares on Android?

Below is a simple QT program using a VBO to draw 6 points. The points appear as squares on Android and single pixels on the desktop. When I change gl_PointSize in the vertex shader, the squares change to the appropriate size on Android but remain single pixels on the desktop.
example.pro
QT += core gui widgets opengl
TARGET = example
TEMPLATE = app
SOURCES = main.cpp
HEADERS = main.h
main.h
#include <QGLWidget>
#include <QGLFunctions>
#include <QGLShader>
class glview : public QGLWidget, protected QGLFunctions
{
Q_OBJECT
public:
explicit glview(QWidget *parent = 0);
protected:
void initializeGL();
void resizeGL(int w, int h);
void paintGL();
private:
quint32 vbo_id[1];
QGLShaderProgram *program;
};
main.cpp
#include <QApplication>
#include "main.h"
struct vrtx {
GLfloat x;
GLfloat y;
GLfloat z;
GLfloat r;
GLfloat g;
GLfloat b;
}__attribute__((packed)) geomrtry[] = {
// x, y, z r, g, b
{1, 1, 0, 1, 0, 0},
{1.5, 2, 0, 0, 1, 0},
{2, 1, 0, 0, 0, 1},
{3, 1, 0, 1, 0, 1},
{3.5, 2, 0, 1, 1, 0},
{4, 1, 0, 0, 1, 1},
};
int main(int argc, char *argv[])
{
QApplication app(argc, argv);
glview widget;
widget.show();
return app.exec();
}
glview::glview(QWidget *parent) : QGLWidget(parent)
{
}
void glview::initializeGL()
{
initializeGLFunctions();
qglClearColor(Qt::white);
QGLShader *vshader = new QGLShader(QGLShader::Vertex, this);
const char *vsrc =
"attribute highp vec4 vertex;\n"
"attribute mediump vec4 colour;\n"
"varying mediump vec4 f_colour;\n"
"uniform mediump mat4 matrix;\n"
"void main(void)\n"
"{\n"
" gl_Position = matrix * vertex;\n"
" f_colour = colour;\n"
" gl_PointSize = 12.0;\n"
"}\n";
vshader->compileSourceCode(vsrc);
QGLShader *fshader = new QGLShader(QGLShader::Fragment, this);
const char *fsrc =
"varying mediump vec4 f_colour;\n"
"void main(void)\n"
"{\n"
" gl_FragColor = f_colour;\n"
"}\n";
fshader->compileSourceCode(fsrc);
program = new QGLShaderProgram(this);
program->addShader(vshader);
program->addShader(fshader);
program->link();
glGenBuffers(1, vbo_id);
glBindBuffer(GL_ARRAY_BUFFER, vbo_id[0]);
glBufferData(GL_ARRAY_BUFFER, sizeof(geomrtry), geomrtry, GL_STATIC_DRAW);
glEnable(GL_DEPTH_TEST);
}
void glview::resizeGL(int w, int h)
{
glViewport(0, 0, w, h);
}
void glview::paintGL()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
QMatrix4x4 matrix;
matrix.ortho(0, 5, 0, 3, -1, 1);
program->bind();
program->setUniformValue("matrix", matrix);
glBindBuffer(GL_ARRAY_BUFFER, vbo_id[0]);
int vertexLocation = program->attributeLocation("vertex");
program->enableAttributeArray(vertexLocation);
glVertexAttribPointer(vertexLocation, 3, GL_FLOAT, GL_FALSE, sizeof(struct vrtx), 0);
int colourLocation = program->attributeLocation("colour");
program->enableAttributeArray(colourLocation);
glVertexAttribPointer(colourLocation, 3, GL_FLOAT, GL_FALSE, sizeof(struct vrtx), ((char*)NULL + 12));
//glDrawArrays(GL_TRIANGLES, 0, sizeof(geomrtry) / sizeof(struct vrtx));
glDrawArrays(GL_POINTS, 0, sizeof(geomrtry) / sizeof(struct vrtx));
glFlush();
}
You're using functionality where the size of rendered points is taken from a built-in gl_PointSize variable set in the vertex shader. This functionality is the default in ES 2.0.
The same functionality is available in desktop OpenGL as well, but it is disabled by default. It can be enabled by calling:
glEnable(GL_PROGRAM_POINT_SIZE);
If this setting is not enabled, desktop OpenGL uses the point size set with the glPointSize() API call.

OpenGL VBO's not drawing anything

It's been a while since I last worked with VBO's, and now I'm trying to get it working once again. However, whatever I do, I can't get anything to render, besides with glClearColor.
This is my code:
Initialize GL
...
vmml::vec3f eye = vmml::vec3f(0.0, 0.0, 0.0);
vmml::vec3f tar = vmml::vec3f(0.0, 0.0, 1.0);
vmml::vec3f up = vmml::vec3f(0.0, 1.0, 0.0);
viewMatrix = lookAt(eye, tar, up);
Creating the box
glGenBuffers(1, &vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, 72 * sizeof(GLfloat), &vertices[0], GL_STATIC_DRAW);
glGenBuffers(1, &uvBuffer);
glBindBuffer(GL_ARRAY_BUFFER, uvBuffer);
glBufferData(GL_ARRAY_BUFFER, 48 * sizeof(GLfloat), &uvs[0], GL_STATIC_DRAW);
glGenBuffers(1, &normalBuffer);
glBindBuffer(GL_ARRAY_BUFFER, normalBuffer);
glBufferData(GL_ARRAY_BUFFER, 72 * sizeof(GLfloat), &normals[0], GL_STATIC_DRAW);
[code for adding material and texture to box...]
PaintGL
[code for shadows...]
glViewport(0, 0, width, height);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
shadeManager.bindShader(SHADOWSHADER);
vmml::vec4f b1(0.5, 0.0, 0.0, 0.5);
vmml::vec4f b2(0.0, 0.5, 0.0, 0.5);
vmml::vec4f b3(0.0, 0.0, 0.5, 0.5);
vmml::vec4f b4(0.0, 0.0, 0.0, 1.0);
vmml::mat4f bias = vmml::mat4f::ZERO;
bias.set_column(0, b1);
bias.set_column(1, b2);
bias.set_column(2, b3);
bias.set_column(3, b4);
vmml::mat4f depthBiasVP = bias * depthVP;
GLuint depthBiasID = glGetUniformLocation(shadowShaderID, "depthBiasVP");
GLuint lightDirID = glGetUniformLocation(shadowShaderID, "lightInvDir");
GLuint shadowMapID = glGetUniformLocation(shadowShaderID, "shadowMap");
GLuint viewID = glGetUniformLocation(shadowShaderID, "V");
GLuint projectionID = glGetUniformLocation(shadowShaderID, "P");
glUniformMatrix4fv(depthBiasID, 1, GL_FALSE, &depthBiasVP[0][0]);
glUniform3fv(lightDirID, 1, &lightPos[0]);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, depthTextureID);
glUniform1i(shadowMapID, 1);
[calculate eye, target and up...]
viewMatrix = lookAt(eye, target, up);
glUniformMatrix4fv(viewID, 1, GL_FALSE, &viewMatrix[0][0]);
glUniformMatrix4fv(projectionID, 1, GL_FALSE, &projectionMatrix[0][0]);
currentFrustum->setActive(true);
currentFrustum->extractFrustum(projectionMatrix, viewMatrix);
scenegraph.render(false);
And rendering the box
GLuint id = ShaderManager::getInstance().getShader(SHADOWSHADER);
if (depthPass)
id = ShaderManager::getInstance().getShader(DEPTHSHADER);
GLuint mID = glGetUniformLocation(id, "M");
GLuint texID = glGetUniformLocation(id, "tex");
GLuint diffID = glGetUniformLocation(id, "diffMaterial");
GLuint ambiID = glGetUniformLocation(id, "ambiMaterial");
GLuint specID = glGetUniformLocation(id, "specMaterial");
GLuint shinID = glGetUniformLocation(id, "shininess");
glUniformMatrix4fv(mID, 1, GL_FALSE, &mod[0][0]);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
glUniform1i(texID, 0);
glUniform3fv(diffID, 1, &values.diffuseMaterial[0]);
glUniform3fv(ambiID, 1, &values.ambientMaterial[0]);
glUniform3fv(specID, 1, &values.specularMaterial[0]);
glUniform1f(shinID, values.shinyMaterial[0]);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glVertexAttribPointer(
0,
3,
GL_FLOAT,
GL_FALSE,
0,
(void*)0
);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, uvBuffer);
glVertexAttribPointer(
1,
2,
GL_FLOAT,
GL_FALSE,
0,
(void*)0
);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, normalBuffer);
glVertexAttribPointer(
2,
3,
GL_FLOAT,
GL_FALSE,
0,
(void*)0
);
glEnableVertexAttribArray(2);
glDrawArrays(GL_QUADS, 0, 6 * 4 );
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
Shaders..
#version 420 core
layout(location = 0) in vec3 vertexPos;
layout(location = 1) in vec2 vertexUV;
layout(location = 2) in vec3 vertexNorm;
out vec2 UV;
out vec3 position;
out vec3 normal;
out vec3 viewDirection;
out vec3 lightDirection;
out vec4 shadow;
uniform mat4 M;
uniform mat4 V;
uniform mat4 P;
uniform vec3 lightInvDir;
uniform mat4 depthBiasVP;
void main() {
gl_Position = P * V * M * vec4(vertexPos, 1);
UV = vertexUV;
position = (M * vec4(vertexPos, 1)).xyz;
normal = (V * M * vec4(vertexNorm, 0)).xyz;
viewDirection = vec3(0,0,0) - (V * M * vec4(vertexPos, 1)).xyz;
lightDirection = (V * vec4(lightInvDir, 0)).xyz;
shadow = depthBiasVP * M * vec4(vertexPos, 1);
}
#version 420 core
in vec2 UV;
in vec3 position;
in vec3 normal;
in vec3 viewDirection;
in vec3 lightDirection;
in vec4 shadow;
layout(location = 0) out vec3 color;
uniform sampler2D tex;
uniform sampler2D shadowMap;
uniform vec3 diffLight;
uniform vec3 ambiLight;
uniform vec3 specLight;
uniform vec3 diffMaterial;
uniform vec3 ambiMaterial;
uniform vec3 specMaterial;
uniform float shininess;
vec2 disk[16] = vec2[] (
vec2( -0.94201624, -0.39906216 ),
vec2( 0.94558609, -0.76890725 ),
vec2( -0.094184101, -0.92938870 ),
vec2( 0.34495938, 0.29387760 ),
vec2( -0.91588581, 0.45771432 ),
vec2( -0.81544232, -0.87912464 ),
vec2( -0.38277543, 0.27676845 ),
vec2( 0.97484398, 0.75648379 ),
vec2( 0.44323325, -0.97511554 ),
vec2( 0.53742981, -0.47373420 ),
vec2( -0.26496911, -0.41893023 ),
vec2( 0.79197514, 0.19090188 ),
vec2( -0.24188840, 0.99706507 ),
vec2( -0.81409955, 0.91437590 ),
vec2( 0.19984126, 0.78641367 ),
vec2( 0.14383161, -0.14100790 )
);
float random(vec3 seed, int i) {
vec4 s = vec4(seed, i);
float dotProduct = dot(s, vec4(12.9898, 78.233, 45.164, 94.673));
return fract(sin(dotProduct) * 43758.5453);
}
void main() {
vec3 materialDiffuseColor = diffMaterial * texture2D( tex, UV ).rgb;
vec3 materialAmbientColor = ambiMaterial * materialDiffuseColor;
vec3 materialSpecularColor = specMaterial;
vec3 l = normalize(lightDirection);
vec3 n = normalize(normal);
vec3 v = normalize(viewDirection);
vec3 ref = reflect(-l, n);
float diff = clamp(dot(n, l), 0,1);
float spec = clamp(dot(l, ref), 0,1);
float visibility = 1.0;
float bias = 0.005*tan(acos(diff));
bias = clamp(bias, 0.0, 0.01);
vec4 shadowCoordDivide = shadow / shadow.w;
if (texture2D( shadowMap, shadowCoordDivide.xy ).z < shadowCoordDivide.z-bias) {
//visibility = 0.5;
diff = 0;
spec = 0;
}
color = 2 * ambiLight * materialAmbientColor;
if (diff != 0) {
color += visibility * diffLight * materialDiffuseColor * diff;
float iSpec = pow(spec, shininess);
color += visibility * specLight * materialSpecularColor * iSpec;
}
}
I've made sure that the shaders are loading properly, and that the model/view/projection matrices are correct, still nothing. Anyone who can point me in the right direction?
Edit1: Added code section I forgot and removed some unimportant code..
Edit2: Stripped code down to possible error areas
Just from looking at your rendering code, it doesn't look like you are passing the View and Perspective matrix to your vertex shader. What happens if you reference a uniform that has not been passed in depends on how nice your GPU drivers are. Some will crash, some will happily feed some default fallback matrix just to get the shader running. It may be that your shader is seeing this default matrix and is not transforming your box correctly.
If you are passing in your matrix correctly elsewhere in your code, than please disregard this answer. Its just based off the code you pasted.

Resources