What is the magic inside Qt's bindTexture? - qt

I am using Qt5 and QGLWidget class to render a live stream of pixels. I am having some performance problems and want to setup a pixel buffer to perform asynchronous data transfer.
I am trying to bind a texture the old fashioned way but it render as a blank. When I go back to my Qt5 code everything works.
Does anybody know how to get the standard OpenGL version to work?
Working
void glStream::reserveTextures()
{
displayBuff = (GLubyte*) calloc(numGLFrames*widthGL*heightGL,
sizeof(GLubyte));
QImage mySurface(&displayBuff[displayStart],widthGL,heightGL,
QImage::Format_Indexed8);
textures[0]=bindTexture(mySurface,GL_TEXTURE_2D,GL_LUMINANCE);
glBindTexture(GL_TEXTURE_2D,0);
glECheck();//no errors
}
Not Working
void glStream::reserveTextures()
{
displayBuff = (GLubyte*) calloc(numGLFrames*widthGL*heightGL,sizeof(GLubyte));
glGenTextures(1,&textures[0]);
glBindTexture(GL_TEXTURE_2D,textures[0]);
glTexImage2D(GL_TEXTURE_2D,0,GL_LUMINANCE,widthGL,heightGL,0,GL_LUMINANCE,
GL_UNSIGNED_BYTE,&displayBuff[displayStart]);
glBindTexture(GL_TEXTURE_2D,0);
glECheck();//no errors
}
Update Function
glECheck();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
program1.bind();
program1.setUniformValue("texture", 0);
program1.enableAttributeArray(vertexAttr1);
program1.enableAttributeArray(vertexTexr1);
program1.setAttributeArray(vertexAttr1, vertices.constData());
program1.setAttributeArray(vertexTexr1, texCoords.constData());
glBindTexture(GL_TEXTURE_2D, textures[0]);
glECheck();
glDrawArrays(GL_TRIANGLES, 0, vertices.size());
glBindTexture(GL_TEXTURE_2D,0);
program1.disableAttributeArray(vertexTexr1);
program1.disableAttributeArray(vertexAttr1);
program1.release();
glECheck();
Shader
QGLShader *fshader1 = new QGLShader(QGLShader::Fragment, this);
const char *fsrc1 =
"uniform sampler2D texture;\n"
"varying mediump vec4 texc;\n"
"void main(void)\n"
"{\n"
" gl_FragColor = texture2D(texture, texc.st);\n"
"}\n";
fshader1->compileSourceCode(fsrc1);
program1.addShader(fshader1);

It would appear that you need a few other settings enabled to display a texture.
void glStream::reserveTextures()
{
displayBuff = (GLubyte*) calloc(numGLFrames*widthGL*heightGL,sizeof(GLubyte));
glGenTextures(1,&textures[0]);
glBindTexture(GL_TEXTURE_2D,textures[0]);
glTexImage2D(GL_TEXTURE_2D,0,GL_LUMINANCE,widthGL,heightGL,0,GL_LUMINANCE,
GL_UNSIGNED_BYTE,&displayBuff[displayStart]);
//This is the magic inside Qt5's implementation
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_2D,0);
glECheck();//no errors
}

Related

Save OpenGLES texture to PNG file

I was able to create OpenGL texture from QImage with a code like this:
QImage qImage(file_name);
qImage = qImage.mirrored().convertToFormat(QImage::Format_ARGB32);
GLuint Id;
glGenTextures(1, &id);
glBindTexture(GL_TEXTURE_2D, id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(target, level, GL_RGBA, qImage.width(), qImage.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, qImage.bits());
where file_name is a JPG or PNG file.
And now I am looking for way to create QImage from a texture so I will be able to save it to a PNG file with QImage::save.
Theoretically I can crate QImage with its constructor
QImage::QImage(const uchar *data, int width, int height, QImage::Format format, QImageCleanupFunction cleanupFunction = nullptr, void *cleanupInfo = nullptr)
but it is not clear how do I access texture data.

openGL - failed to display an images

Learning to display images using QOpenGLWidget. However, I've met some problems.
How can I pass the GLuint texture variable (the actual texture loaded from the image) into the shader scripts? Like how to bind GLuint texture to uniform sampler2D texture? Maybe I am just not realising I already did that.
What's the difference between attribute vec4 vertexColorIn and uniform sampler2D texture? I think the color comes from the texture.
Can I use glTexCoord2f() and glVertex2f() instead of glVertexAttribPointer() and glVertexAttribPointer()? It's because they seem better to me.
I am still not clear on the concept about how openGL displays an image, although I've done many researches. I'm not quit sure what I'm doing wrong. The image is NOT showing up.
MyGLWiget.cpp
shader scipts:
#define STR(x) #x
#define VS_LOCATION 0
#define FS_LOCATION 1
const char* vertextShader = STR(
attribute vec4 position;
attribute vec4 vertexColorIn;
varying vec4 vertexColorOut;
void main(void)
{
gl_Position = position;
vertexColorOut = vertexColorIn;
}
);
const char* fragmentShader = STR(
varying vec4 vertexColorOut;
uniform sampler2D texture;
void main(void)
{
??? = texture2D(???, textureOut).r // no clue how to use it
gl_FragColor = vertexColorOut;
}
);
loading an Image texture:
void MyGLWiget::loadTexture(const char* file_path)
{
img_data = SOIL_load_image(file_path, &width, &height, &channels, SOIL_LOAD_RGB);
glEnable(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, img_data);
SOIL_free_image_data(img_data);
}
initialization:
void MyGLWiget::initializeGL()
{
initializeOpenGLFunctions();
program.addShaderFromSourceCode(QGLShader::Vertex, vertextShader);
program.bindAttributeLocation("position", VS_LOCATION);
program.addShaderFromSourceCode(QGLShader::Fragment, fragmentShader);
program.bindAttributeLocation("vertexColorIn", FS_LOCATION);
program.link();
program.bind();
static const GLfloat ver[] = {
-1.0f, -1.0f, 1.0f, -1.0f, -1.0f, 1.0f, 1.0f, 1.0f
};
static const GLfloat tex[] = {
0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f
};
glVertexAttribPointer(VS_LOCATION, 2, GL_FLOAT, 0, 0, ver);
glEnableVertexAttribArray(VS_LOCATION);
glVertexAttribPointer(FS_LOCATION, 2, GL_FLOAT, 0, 0, tex);
glEnableVertexAttribArray(FS_LOCATION);
program.setUniformValue("texture", texture);
//texture = program.uniformLocation("texture");
}
paintGL:
I'm really confused with this part. I have no idea what should I use to make it to draw an image.
void MyGLWiget::paintGL()
{
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, img_data);
glUniform1i(texture, 0);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 1);
}
How can I pass the GLuint texture variable (the actual texture loaded from the image) into the shader scripts? Like how to bind GLuint texture to uniform sampler2D texture? Maybe I am just not realising I already did that.
This binds the texture to texture unit 0:
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
This is invalid because texture is not a uniform location, so remove this line:
glUniform1i(texture, 0); // <-- invalid
This is invalid too, because the uniform texture should be set to the number of the texture unit:
program.setUniformValue("texture", texture); // <-- invalid
So replace it with:
program.setUniformValue("texture", 0); // <-- sampler2D texture uses GL_TEXTURE0
Note: I'm assuming here that setUniformValue works correctly.
What's the difference between attribute vec4 vertexColorIn and uniform sampler2D texture? I think the color comes from the texture.
vertexColorIn comes from the VAO and is different for each vertex. texture is the sampler that samples from the texture that's bound to the texture unit that you set above.
In your code you don't need a vertex color, but you do need texture coordinates. So your shaders should look like:
const char* vertextShader = STR(
attribute vec4 position;
attribute vec4 texcoordIn;
varying vec4 texcoordOut;
void main(void)
{
gl_Position = position;
texcoordOut = texcoordIn;
}
);
const char* fragmentShader = STR(
varying vec4 texcoordOut;
uniform sampler2D texture;
void main(void)
{
gl_FragColor = texture2D(texture, texcoordOut);
}
);
Can I use glTexCoord2f() and glVertex2f() instead of glVertexAttribPointer() and glVertexAttribPointer()? It's because they seem better to me.
glTexCoord2f and glVertex2f are legacy functions that were removed in OpenGL 3, and are available only in the compatibility profile. You shall not use them.
This lines are in the wrong place:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
They shall go after you bound the texture:
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, img_data);
// sets the filtering for the bound texture:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
Since the question is tagged opengl-4: you don't need to set any uniforms in this case. You can specify the locations and the bindings directly in the shaders:
const char* vertextShader =
"#version 450 core\n" STR(
layout(location = 0) in vec4 position;
layout(location = 1) in vec4 texcoordIn;
layout(location = 0) out vec4 texcoordOut;
void main(void)
{
gl_Position = position;
texcoordOut = texcoordIn;
}
);
const char* fragmentShader =
"#version 450 core\n" STR(
layout(location = 0) in vec4 texcoord;
layout(binding = 0) uniform sampler2D TEX;
layout(location = 0) out vec4 OUT;
void main(void)
{
OUT = texture(TEX, texcoord);
}
);
a few edits
const char* vertextShader = STR(
attribute vec4 position;
attribute vec4 vertexColorIn;
varying vec4 vertexColorOut;
out vec2 TexCoord;//--->add
void main(void)
{
gl_Position = position;
vertexColorOut = vertexColorIn;
TexCoord = vec2(aPos.x/2.0+0.5, 0.5-aPos.y/2.0);//a hack,ideally you need to pass the UV coordinates for proper texture mapping.UVs need to be passed in as a uniform or an attribute depending on preference.
}
);
const char* fragmentShader = STR(
varying vec4 vertexColorOut;
uniform sampler2D texture;
in vec2 TexCoord; //---->add
void main(void)
{
gl_FragColor = texture2D(texture,TexCoord) //( no clue how to use it) -->here is the change
//gl_FragColor = vertexColorOut;
}
);

Qt signal slot cv::Mat unable to read memory access violation

I have a Microsoft visual studio application that is grabbing frames from cameras and I am trying to display those frames in a Qt application. I am doing some processing with the frames using OpenCV, so the frames are Mat objects. I use QThreads to parallelize the application. I am getting a Access Violation reading location when I try to emit a Mat signal from my CameraThread class.
main.cpp
int main(int argc, char *argv[])
{
QApplication app(argc, argv);
MainWindow window;
window.show();
return app.exec();
}
mainwindow.cpp
#include "main_window.h"
MainWindow::MainWindow()
{
// create a horizontal widget
main_layout = new QVBoxLayout;
QHBoxLayout* row1 = new QHBoxLayout;
QHBoxLayout* row2 = new QHBoxLayout;
for (int i = 0; i < 1; i++) {
camera_array[i] = new CameraWidget(i);
if (i < 4)
row1->addWidget(camera_array[i]);
else
row2->addWidget(camera_array[i]);
}
main_layout->addLayout(row1);
main_layout->addLayout(row2);
// make the central widget the main layout window
central = new QWidget();
central->setLayout(main_layout);
setCentralWidget(central);
}
camerawidget.cpp
#include "stdafx.h"
#include "camera_widget.h"
CameraWidget::CameraWidget(int id)
{
camera_id = id;
qRegisterMetaType<cv::Mat>("cv::Mat");
current_frame = cv::imread("camera_1.png");
thread = new CameraThread(camera_id);
QObject::connect(thread, SIGNAL(renderFrame(cv::Mat)), this, SLOT(updateFrame(cv::Mat)));
thread->start();
}
CameraWidget::~CameraWidget()
{
qDebug("camera widget destructor");
thread->wait(5000);
}
// initializeGL() function is called just once, before paintGL() is called.
void CameraWidget::initializeGL()
{
qglClearColor(Qt::black);
glDisable(GL_DEPTH_TEST);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, 480.0f, 640.0f, 0.0f, 0.0f, 1.0f);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glEnable(GL_TEXTURE_2D);
glGenTextures(3, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 0, 480.0f, 640.0f, GL_BGR, GL_UNSIGNED_BYTE, NULL);
glDisable(GL_TEXTURE_2D);
}
void CameraWidget::paintGL()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glDisable(GL_DEPTH_TEST);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, 480.0f, 640.0f, 0.0f, 0.0f, 1.0f);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glEnable(GL_TEXTURE_2D);
current_frame_i = QImage(current_frame.data, current_frame.cols, current_frame.rows, current_frame.cols * 3, QImage::Format_RGB888);
glBindTexture(GL_TEXTURE_2D, texture);
// ******************************
// getting access violation here
// ******************************
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 480.0f, 640.0f, 0.0f, GL_BGR, GL_UNSIGNED_BYTE, current_frame.ptr());
glBegin(GL_QUADS);
glTexCoord2i(0, 1); glVertex2i(0, 640.0f);
glTexCoord2i(0, 0); glVertex2i(0, 0);
glTexCoord2i(1, 0); glVertex2i(480.0f, 0);
glTexCoord2i(1, 1); glVertex2i(480.0f, 640.0f);
glEnd();
glFlush();
}
void CameraWidget::resizeGL(int w, int h)
{
// setup viewport, projection etc.
glViewport(0, 0, w, h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, 480.0f, 640.0f, 0.0f, 0.0f, 1.0f);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
void CameraWidget::updateFrame(cv::Mat image)
{
current_frame = image;
update();
}
camerathread.cpp
CameraThread::CameraThread(int id)
{
camera_q = new bounded_frame_queue(50);
}
void CameraThread::run()
{
cv::Mat image;
while (true) {
if (!camera_q->empty()) {
image = camera_q->pop();
if (!image.empty())
emit renderFrame(image);
}
else {
msleep(1);
}
}
}
When I emit renderFrame from the camerathread.cpp, I get an access violation reading location. I cannot read the current_frame.ptr() value in camerawidget.cpp.
Can someone direct me on how I can fix this issue?
What I see is happenning:
You get an image from queue. As per OpenCV docs:
Mat& cv::Mat::operator= ( const Mat & m )
Assigned, right-hand-side matrix. Matrix assignment is an O(1)
operation. This means that no data is copied but the data is shared
and the reference counter, if any, is incremented. Before assigning
new data, the old data is de-referenced via Mat::release .
Then you pass it as cv::Mat image (by value) when emitting signal. The copy constructor again doesn't copy any data:
Array that (as a whole or partly) is assigned to the constructed
matrix. No data is copied by these constructors. Instead, the header
pointing to m data or its sub-array is constructed and associated with
it. The reference counter, if any, is incremented. So, when you modify
the matrix formed using such a constructor, you also modify the
corresponding elements of m . If you want to have an independent copy
of the sub-array, use Mat::clone() .
Your data pointers are queued on UI thread
You get/try-get new frame triggering release from p.1
Your queued slot is executed and crashes...
Suggestion: I haven't worked much with it, but it seems something like cv::Mat::clone to make a deep copy is what you need, to prevent release of memory before it would be used by UI thread.
Or possibly it would be enough to define image right when you pop it from queue:
cv::Mat image = camera_q->pop();

OpenGL texture not rendering if it's currently active

I was using QOpenGLWidget to render textured triangle, the code was looking good but the triangle was always rendering black i had problem with it for two days until i accidentally found out what the title says.
This is the code, the texture gets loaded to default location of GL_TEXTURE0 and the code will not work unless i call glActiveTexture(GL_TEXTURE1) at the end, GL_TEXTURE1 is just an example it can be any other texture slot except the one where texture actually is. Without the call the object will be black.
QImage ready;
QImage image("C:/Users/Gamer/Desktop/New folder/ring.jpg");
ready = image.convertToFormat(QImage::Format_RGBA8888);
glGenTextures(1, &texture);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
glUniform1i(glGetUniformLocation(program.programId(), "samp"), 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, ready.width(), ready.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, ready.constBits());
glGenerateMipmap(GL_TEXTURE_2D);
glActiveTexture(GL_TEXTURE1)
I've tried some tests, creating multiple textures and displaying them all at once, the last active texture was always black unless i activate some other unoccupied slot.
I don't know what to make of this, i'm begginer in OpenGL and Qt but this doesn't sound right.
EDIT:
Main function
#include "mainwindow.h"
#include <QApplication>
#include <QSurfaceFormat>
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
QSurfaceFormat format;
format.setVersion(3, 3);
format.setProfile(QSurfaceFormat::CoreProfile);
format.setDepthBufferSize(24);
format.setStencilBufferSize(8);
format.setSamples(4);
format.setSwapInterval(0);
QSurfaceFormat::setDefaultFormat(format);
MainWindow w;
w.show();
return a.exec();
}
Widget code
#include "openglwidget.h"
#include <QOpenGLShaderProgram>
#include <QImage>
#include <QDebug>
OpenGLWidget::OpenGLWidget(QWidget *parent) :
QOpenGLWidget(parent)
{
}
OpenGLWidget::~OpenGLWidget()
{
glDeleteBuffers(1, &vbo);
glDeleteVertexArrays(1, &vao);
glDeleteTextures(1, &texture);
}
void OpenGLWidget::initializeGL()
{
QOpenGLFunctions_3_3_Core::initializeOpenGLFunctions();
GLfloat vertices[] = {
0.0f, 0.75f, 0.0f,
-0.75f, -0.75f, 0.0f,
0.75f, -0.75f, 0.0f,
0.5f, 0.0f,
0.0f, 1.0f,
1.0f, 1.0f
};
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
program.addShaderFromSourceFile(QOpenGLShader::Vertex, "C:/Users/Gamer/Desktop/New folder/vertex.vert");
program.addShaderFromSourceFile(QOpenGLShader::Fragment, "C:/Users/Gamer/Desktop/New folder/fragment.frag");
program.link();
program.bind();
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, (void*)36);
glEnableVertexAttribArray(1);
QImage ready;
QImage image("C:/Users/Gamer/Desktop/New folder/ring.jpg");
ready = image.convertToFormat(QImage::Format_RGBA8888);
glGenTextures(1, &texture);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
glUniform1i(glGetUniformLocation(program.programId(), "samp"), 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, ready.width(), ready.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, ready.constBits());
glGenerateMipmap(GL_TEXTURE_2D);
// glActiveTexture(GL_TEXTURE1);
}
void OpenGLWidget::paintGL()
{
GLfloat yellow[] = {1.0, 1.0, 0.0, 0.0};
glClearBufferfv(GL_COLOR, 0, yellow);
glDrawArrays(GL_TRIANGLES, 0, 3);
}
void OpenGLWidget::resizeGL(int w, int h)
{
glViewport(0, 0, w, h);
}
And shaders
#version 330 core
layout(location = 0) in vec3 pos;
layout(location = 1) in vec2 coord;
out vec2 tc;
void main(void)
{
tc = coord;
gl_Position = vec4(pos, 1.0);
}
#version 330 core
uniform sampler2D samp;
in vec2 tc;
out vec4 color;
void main(void)
{
color = texture(samp, tc);
}
QOpenGLWidget is a rather complex abstraction which has some side effects which you might not expect. Quoting from the Qt5 docs:
All rendering happens into an OpenGL framebuffer object. makeCurrent() ensure that it is bound in the context. Keep this in mind when creating and binding additional framebuffer objects in the rendering code in paintGL(). Never re-bind the framebuffer with ID 0. Instead, call defaultFramebufferObject() to get the ID that should be bound.
Now, this in itself isn't an issue. However, looking at the description for the initializeGL() method (my emphasis):
There is no need to call makeCurrent() because this has already been done when this function is called. Note however that the framebuffer is not yet available at this stage, so avoid issuing draw calls from here. Defer such calls to paintGL() instead.
Now, this in itself still is not the issue. But: it means that Qt will create the FBO in-between initializeGL and the first paintGL. Since Qt creates a texture as the color buffer for the FBO, this means it will re-use the currently active texture unit, and change the texture binding you did establish in initializeGL.
If you, on the other hand set glActiveTexture to something other than unit 0, Qt will screw up the binding of that unit, but since you only use unit 0, it will not have any negative effects in your example.
You need to bind the texture to the texture unit before drawing. Texture unit state is not part of program state, unlike uniforms. It is unusual to try and set texture unit state during program startup, that would require allocating different texture units to each program (not out of the question, it's just not the way things are normally done).
Add the following line to paintGL, before the draw call:
glBindTexture(GL_TEXTURE_2D, texture);

OpenGL Texture Rate too slow?

I have a 5 megapixel texture that I am having trouble updating. The texture is displayed on a rectangle not dissimilar to a video stream.
The OpenGL commands execute quickly, but the real texture update rate is sub optimal, perhaps only 3 actual frames per second. There is some change, but not much change when using a smaller texture (500x500).
The machine has a NVIDIA gtx 570
My initial efforts were to use glTexSubImage2D and glBufferSubData, but these performed slightly worse than the memory mapped scheme.
Is there any way to force the graphics card to update the texture? How is video streaming software written?
Render Loop
void glStream::paintGL()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
program1.bind();
program1.setUniformValue("texture", 0);
program1.enableAttributeArray(vertexAttr1);
program1.enableAttributeArray(vertexTexr1);
program1.setAttributeArray(vertexAttr1, vertices.constData());
program1.setAttributeArray(vertexTexr1, texCoords.constData());
//
glECheck();
glBindTexture(GL_TEXTURE_2D, textures[0]);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB,pbos[0]);
void* memory = glMapBuffer(GL_PIXEL_UNPACK_BUFFER_ARB,GL_WRITE_ONLY);
device->fillBuffer((unsigned char *)memory,heightGL,widthGL); // takes 2ms (not long)
glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER_ARB);
glTexSubImage2D(GL_TEXTURE_2D,0,0,0,widthGL,heightGL,GL_LUMINANCE,GL_UNSIGNED_BYTE, NULL);
glDrawArrays(GL_TRIANGLES, 0, vertices.size());
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB,0);
glBindTexture(GL_TEXTURE_2D,0);
//
program1.disableAttributeArray(vertexTexr1);
program1.disableAttributeArray(vertexAttr1);
program1.release();
glECheck();//no errors
}
Texture Reservation
void glStream::reserveTextures()
{
assert(numGLFrames>0);
assert(glGenBuffers);
displayBuff = (GLubyte*) calloc(numGLFrames*widthGL*heightGL,sizeof(GLubyte));//GL_RGB8
memset(displayBuff,100,numGLFrames*widthGL*heightGL*sizeof(GLubyte));
glGenBuffers(1,&pbos[0]);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbos[0]);
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB,
numGLFrames*widthGL*heightGL*sizeof(GLubyte),
&displayBuff[0], GL_STREAM_DRAW);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glGenTextures(1,&textures[0]);
glBindTexture(GL_TEXTURE_2D,textures[0]);
glTexImage2D(GL_TEXTURE_2D,0,GL_LUMINANCE,
widthGL,heightGL,0,GL_LUMINANCE,GL_UNSIGNED_BYTE,NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_FALSE);
glBindTexture(GL_TEXTURE_2D,0);
}
Initialization
void glStream::initializeGL()
{
GLenum err = glewInit();
if (GLEW_OK != err)
{
const char * emssage = (const char*)glewGetErrorString(err);
QMessageBox::information(0, "OpenGL 3.x Context Example",
emssage);
exit(20);
}
glDisable(GL_DEPTH_TEST);
QGLShader *vshader1 = new QGLShader(QGLShader::Vertex, this);
const char *vsrc1 =
"attribute vec2 coord2d; \n"
"attribute mediump vec4 texCoord;\n"
"varying mediump vec4 texc;\n"
"void main() \n"
"{ \n"
" gl_Position = vec4(coord2d, 0.0, 1.0); \n"
" texc = texCoord;\n"
"} \n";
vshader1->compileSourceCode(vsrc1);
QGLShader *fshader1 = new QGLShader(QGLShader::Fragment, this);
const char *fsrc1 =
"uniform sampler2D texture;\n"
"varying mediump vec4 texc;\n"
"void main(void)\n"
"{\n"
" gl_FragColor = texture2D(texture, texc.st);\n"
"}\n";
fshader1->compileSourceCode(fsrc1);
program1.addShader(vshader1);
program1.addShader(fshader1);
program1.link();
vertexAttr1 = program1.attributeLocation( "coord2d");
vertexTexr1 = program1.attributeLocation( "texCoord");
// Create the vertex buffer.
vertices.clear();
float u=1;
#define AVEC -u,u
#define BVEC -u,-u
#define CVEC u,u
#define DVEC u,-u
vertices << QVector2D(AVEC);vertices << QVector2D(BVEC);
vertices << QVector2D(CVEC);vertices << QVector2D(BVEC);
vertices << QVector2D(DVEC);vertices << QVector2D(CVEC);
// Create the texture vertex buffer
#define TAVEC 0,1
#define TBVEC 0,0
#define TCVEC 1,1
#define TDVEC 1,0
texCoords << QVector2D(TAVEC);texCoords << QVector2D(TBVEC);
texCoords << QVector2D(TCVEC);texCoords << QVector2D(TBVEC);
texCoords << QVector2D(TDVEC);texCoords << QVector2D(TCVEC);
glECheck();
reserveTextures();
}
Edit 1
I am confident that fill buffer comes up with a new texture because in some other part of the code I write this texture to a file and it is indeed different. Its a sad day when my File IO is faster then a OpenGL texture.
Edit 2
I tried out FRAPS and I verified that the render loop is going at ~18 FPS, but the visible updates are slower (maybe 3 FPS). What could account for such a discrepancy?
The PBO is much larger than the texture, in the call to glBufferData:
numGLFrames*widthGL*heightGL*sizeof(GLubyte)
You're allocating a PBO large enough for multiple textures (frames), but only ever reading/writing one frame's worth of data.
If you make the PBO the same size as the texture and use glTexImage2D instead of glTexSubImage2D, is it much faster?

Resources