RENDER WARNING: texture bound to texture unit 0 is not renderable. It maybe non-power-of-2 and have incompatible texture filtering - webgl2

So i'm trying to use WebGL to offload some of the data processing needed for the image later.
I have two stages, first I'm trying 'render' unsigned ints to a texture.
On the second pass, I read from this texture and render to the canvas.
If i define the texture as RGBA than I have no problems. But when I change the format to RGBA32UI, I keep getting:
RENDER WARNING: texture bound to texture unit 0 is not renderable. It maybe non-power-of-2 and have incompatible texture filtering
I reduced my shaders to a single pixel and still getting the same error.
Texture is initialized like this :
var texture = gl.createTexture();
gl.activeTexture(gl.TEXTURE0 + 0);
gl.bindTexture(gl.TEXTURE_2D, texture);
{
var level = 0;
var internalFormat = gl.RGBA32UI;
var border = 0;
var format = gl.RGBA_INTEGER;
var type = gl.UNSIGNED_INT;
var data = null;
gl.texImage2D(gl.TEXTURE_2D, level, internalFormat,
1, 1, border, format, type, data);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
}
var fb = gl.createFramebuffer();
gl.bindFramebuffer(gl.FRAMEBUFFER, fb);
var level = 0;
var attachmentPoint = gl.COLOR_ATTACHMENT1;
gl.framebufferTexture2D(gl.FRAMEBUFFER, attachmentPoint, gl.TEXTURE_2D, texture, level);
and inside the fragment shader I have two color types :
layout(location = FLOAT_COLOR_LOCATION) out vec4 float_color;
layout(location = UINT_COLOR_LOCATION) out uvec4 uint_color;
Thanks for the help!

The issue is you're using COLOR_ATTACHMENT1 and skipping COLOR_ATTACHMENT0.
IIRC you need to start at attachment 0 and work your way up.
Also you should probably be checking the framebuffer is complete with gl.checkFramebufferStatus
Also, integer textures are not filterable so you need to change gl.LINEAR to gl.NEAREST
const gl = document.createElement("canvas").getContext("webgl2");
testAttachment(gl.COLOR_ATTACHMENT1);
testAttachment(gl.COLOR_ATTACHMENT0);
function testAttachment(attachmentPoint) {
function createTexture() {
var texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
var level = 0;
var internalFormat = gl.RGBA32UI;
var border = 0;
var format = gl.RGBA_INTEGER;
var type = gl.UNSIGNED_INT;
var data = null;
gl.texImage2D(gl.TEXTURE_2D, level, internalFormat,
1, 1, border, format, type, data);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
return texture;
}
var texture = createTexture();
var fb = gl.createFramebuffer();
gl.bindFramebuffer(gl.FRAMEBUFFER, fb);
var level = 0;
gl.framebufferTexture2D(gl.FRAMEBUFFER, attachmentPoint, gl.TEXTURE_2D, texture, level);
const status = gl.checkFramebufferStatus(gl.FRAMEBUFFER);
console.log(glEnumToString(gl, attachmentPoint), glEnumToString(gl, status));
if (status !== gl.FRAMEBUFFER_COMPLETE) {
return;
}
const vs = `#version 300 es
void main() {
gl_Position = vec4(0,0,0,1);
gl_PointSize = 100.0;
}
`;
const fs = `#version 300 es
uniform highp usampler2D color;
out uvec4 outColor;
void main() {
outColor = texture(color, gl_PointCoord);
}
`;
const prg = twgl.createProgram(gl, [vs, fs]);
gl.useProgram(prg);
// need a different input texture than output texture
const inTex = createTexture();
// no need to set uniforms since they default to 0
// so using texture unit 0
gl.drawArrays(gl.POINTS, 0, 1);
// check that it rendered without error
console.log(glEnumToString(gl, gl.getError()));
}
function glEnumToString(gl, value) {
if (value === 0) {
return "NONE";
}
for (let key in gl) {
if (gl[key] === value) {
return key;
}
}
return "0x" + value.toString(16);
}
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>

Related

How to properly use QOpenGLBuffer.PixelPackBuffer with PyQt5

I am trying to read the color buffer content of the default framebuffer in PyQt5 using pixel buffer object given by the Qt OpenGL framework.
It looks like the reading is unsuccessful because the end image always contains all zeros. There's very little examples with pixel buffers and PyQt5, so I was mostly relying on this c++ tutorial explaining pixel buffers, specifically section Example: Asynchronous Read-back.
My code goes something like this:
class GLCanvas(QtWidgets.QOpenGLWidget):
# ...
def screenDump(self):
"""
Takes a screenshot and returns a pixmap.
:returns: A pixmap with the rendered content.
:rtype: QPixmap
"""
self.makeCurrent()
w = self.size().width()
h = self.size().height()
ppo = QtGui.QOpenGLBuffer(QtGui.QOpenGLBuffer.PixelPackBuffer)
ppo.setUsagePattern(QOpenGLBuffer.StaticRead)
ppo.create()
success = ppo.bind()
if success:
ppo.allocate(w * h * 4)
# Render the stuff
# ...
# Read the color buffer.
glReadBuffer(GL_FRONT)
glReadPixels(0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, 0)
# TRY1: Create an image with pixel buffer data - Doesn't work, image contains all zeros.
pixel_buffer_mapped = ppo.map(QOpenGLBuffer.ReadOnly)
image = QtGui.QImage(sip.voidptr(pixel_buffer_mapped), w, h, QtGui.QImage.Format_ARGB32)
ppo.unmap()
# TRY2: Create an image with pixel buffer data - Doesn't work, image contains all zeros.
# image = QtGui.QImage(w, h, QtGui.QImage.Format_ARGB32)
# bits = image.constBits()
# ppo.read(0, bits, w * h * 4)
ppo.release()
pixmap = QtGui.QPixmap.fromImage(image)
return pixmap
Any help would be greatly appreciated.
I didn't have any success after a couple of days, so I decided to implement color buffer fetching with pixel buffer object in C++, and then use SWIG to pass the data to Python.
I'm posting relevant code, maybe it will help somebody.
CPP side
// renderer.cpp
class Renderer{
// ...
void resize(int width, int height) {
// Set the viewport
glViewport(0, 0, width, height);
// Store width and height
width_ = width;
height_ = height;
// ...
}
// -------------------------------------------------------------------------- //
// Returns the color buffer data in GL_RGBA format.
GLubyte* screenDumpCpp(){
// Check if pixel buffer objects are available.
if (!GLInfo::pixelBufferSupported()){
return 0;
}
// Get the color buffer size in bytes.
int channels = 4;
int data_size = width_ * height_ * channels;
GLuint pbo_id;
// Generate pixel buffer for reading.
glGenBuffers(1, &pbo_id);
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbo_id);
glBufferData(GL_PIXEL_PACK_BUFFER, data_size, 0, GL_STREAM_READ);
// Set the framebuffer to read from.
glReadBuffer(GL_FRONT);
// Read the framebuffer and store data in the pixel buffer.
glReadPixels(0, 0, width_, height_, GL_RGBA, GL_UNSIGNED_BYTE, 0);
// Map the pixel buffer.
GLubyte* pixel_buffer = (GLubyte*)glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY);
// Cleanup.
glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
glDeleteBuffers(1, &pbo_id);
return pixel_buffer;
}
// Returns the color buffer data in RGBA format as a numpy array.
PyObject* screenDump(){
// Get screen dump.
GLubyte* cpp_image = screenDumpCpp();
int channels = 4;
int image_size = width_* height_ * channels;
// Setup dimensions for numpy vector.
PyObject * python_image = NULL;
int ndim = 1;
npy_intp dims[1] = {image_size};
// Set up numpy vector.
python_image = PyArray_SimpleNew(ndim, dims, NPY_UINT8);
GLubyte * data = static_cast<GLubyte *>(PyArray_DATA(toPyArrayObject(python_image)));
// Copy screen dump to python space.
memcpy(data, cpp_image, image_size);
// return screen dump to python.
return python_image;
}
};
// glinfo.cpp
const GLInt GLInfo::glVersionInt(){ ... }
GLV GLInt::GLV(int major, int minor){ ... }
bool GLInfo::pixelBufferSupported(){
const GLint version = GLInfo::glVersionInt();
bool supported = false;
if (version >= GLInfo::GLV(1, 5) && version < GLInfo::GLV(3, 0)){
supported = true;
}
else if (version >= GLInfo::GLV(3, 0)){
GLint extensions_number;
glGetIntegerv(GL_NUM_EXTENSIONS, &extensions_number);
std::string pixel_buffer_extension("GL_ARB_pixel_buffer_object");
while (extensions_number--) {
const auto extension_name = reinterpret_cast<const char *>(glGetStringi(GL_EXTENSIONS, extensions_number));
std::string extension_name_str(extension_name);
if (pixel_buffer_extension == extension_name) {
supported = true;
break;
}
}
}
return supported;
}
Python side
# ...
class MyCanvas(QOpenGLWidget):
def __init__(self):
# Get renderer from c++
self._renderer = Renderer()
def resizeGL(self, width, height):
self._renderer.resize(width, height)
# ...
if __name__ == '__main__':
# ...
canvas = MyCanvas()
canvas.show()
width = canvas.width()
height = canvas.height()
data = canvas._renderer().screenDump()
image = QtGui.QImage(data.data, width, height, QtGui.QImage.Format_RGBA8888)
new_image = image.mirrored()
pixmap = QtGui.QPixmap.fromImage(new_image)
pixmap.save(path)
sys.exit(app.exec_())

pcl visualizer What's the meaning of getViewerPose

I don't quite understand what pose is returned by pcl::visualization::PCLVisualizer::getViewerPose. Is this pose relative to the viewer? or something else? If yes, how is the initial pose of the viewer defined?
Based on the code, the getViewerPose is just retrieving camera pose.
pcl::visualization::PCLVisualizer::getViewerPose (int viewport)
{
Eigen::Affine3f ret (Eigen::Affine3f::Identity ());
rens_->InitTraversal ();
vtkRenderer* renderer = nullptr;
if (viewport == 0)
viewport = 1;
int viewport_i = 1;
while ((renderer = rens_->GetNextItem ()))
{
if (viewport_i == viewport)
{
vtkCamera& camera = *renderer->GetActiveCamera ();
Eigen::Vector3d pos, x_axis, y_axis, z_axis;
camera.GetPosition (pos[0], pos[1], pos[2]);
camera.GetViewUp (y_axis[0], y_axis[1], y_axis[2]);
camera.GetFocalPoint (z_axis[0], z_axis[1], z_axis[2]);
z_axis = (z_axis - pos).normalized ();
x_axis = y_axis.cross (z_axis).normalized ();
ret.translation () = pos.cast<float> ();
ret.linear ().col (0) << x_axis.cast<float> ();
ret.linear ().col (1) << y_axis.cast<float> ();
ret.linear ().col (2) << z_axis.cast<float> ();
return ret;
}
viewport_i ++;
}
return ret;
}

pass data between shader programs

Ok I'm going to keep this as simple as possible. I want to pass data between shader programs. I'm using readPixels currently to do that but I feel it may be slowing operations down and I'm exploring faster options.
what my program does:
program1 does my rendering to the canvas.
program2 does some wonderful operations in it's shaders that I want to pass to program1.
MY QUESTIONS:
is it possible to use the vbo from program2 and pass that to program1 for rendering? From what it sounds like in the link I give below, you can't share data across contexts, meaning the data from one buffer can't be used for another. But maybe I'm missing something.
I believe the method mentioned in this article would do what I'm looking for by rendering to a canvas and then using texImage2D to update program1 (Copy framebuffer data from one WebGLRenderingContext to another?). Am I correct? If so, would this be faster than using readPixels? ( i ask because if using texImage2D is about the same I won't bother ).
thanks in advance to anyone who answers.
The normal way to pass data from one shader to the next is to render to a texture (by attaching that texture to a framebuffer). Then pass that texture to the second shader.
function main() {
const gl = document.querySelector('canvas').getContext('webgl2');
if (!gl) {
return alert('need webgl2');
}
const vs1 = `#version 300 es
void main () {
gl_Position = vec4(0, 0, 0, 1);
gl_PointSize = 64.0;
}
`;
const fs1 = `#version 300 es
precision highp float;
out vec4 myOutColor;
void main() {
myOutColor = vec4(fract(gl_PointCoord * 4.), 0, 1);
}
`;
const vs2 = `#version 300 es
in vec4 position;
void main () {
gl_Position = position;
gl_PointSize = 32.0;
}
`;
const fs2 = `#version 300 es
precision highp float;
uniform sampler2D tex;
out vec4 myOutColor;
void main() {
myOutColor = texture(tex, gl_PointCoord);
}
`;
// make 2 programs
const prg1 = twgl.createProgram(gl, [vs1, fs1]);
const prg2 = twgl.createProgram(gl, [vs2, fs2]);
// make a texture
const tex = gl.createTexture();
const texWidth = 64;
const texHeight = 64;
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA8, texWidth, texHeight, 0,
gl.RGBA, gl.UNSIGNED_BYTE, null);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
// attach texture to framebuffer
const fb = gl.createFramebuffer();
gl.bindFramebuffer(gl.FRAMEBUFFER, fb);
gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0,
gl.TEXTURE_2D, tex, 0);
// render to texture
gl.viewport(0, 0, texWidth, texHeight);
gl.useProgram(prg1);
gl.drawArrays(gl.POINTS, 0, 1);
// render texture (output of prg1) to canvas using prg2
gl.bindFramebuffer(gl.FRAMEBUFFER, null);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.useProgram(prg2);
// note: the texture is already bound to texture unit 0
// and uniforms default to 0 so the texture is already setup
const posLoc = gl.getAttribLocation(prg2, 'position')
const numDraws = 12
for (let i = 0; i < numDraws; ++i) {
const a = i / numDraws * Math.PI * 2;
gl.vertexAttrib2f(posLoc, Math.sin(a) * .7, Math.cos(a) * .7);
gl.drawArrays(gl.POINTS, 0, 1);
}
}
main();
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
<canvas></canvas>
You can also use "transform feedback" to store the outputs of a vertex shader to one or more buffers and of course those buffers can be used as input to another shader.
// this example from
// https://webgl2fundamentals.org/webgl/lessons/resources/webgl-state-diagram.html?exampleId=transform-feedback
const canvas = document.querySelector('canvas');
const gl = canvas.getContext('webgl2');
const genPointsVSGLSL = `#version 300 es
uniform int numPoints;
out vec2 position;
out vec4 color;
#define PI radians(180.0)
void main() {
float u = float(gl_VertexID) / float(numPoints);
float a = u * PI * 2.0;
position = vec2(cos(a), sin(a)) * 0.8;
color = vec4(u, 0, 1.0 - u, 1);
}
`;
const genPointsFSGLSL = `#version 300 es
void main() {
discard;
}
`;
const drawVSGLSL = `#version 300 es
in vec4 position;
in vec4 color;
out vec4 v_color;
void main() {
gl_PointSize = 20.0;
gl_Position = position;
v_color = color;
}
`;
const drawFSGLSL = `#version 300 es
precision highp float;
in vec4 v_color;
out vec4 outColor;
void main() {
outColor = v_color;
}
`;
const createShader = function(gl, type, glsl) {
const shader = gl.createShader(type)
gl.shaderSource(shader, glsl)
gl.compileShader(shader)
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
throw new Error(gl.getShaderInfoLog(shader))
}
return shader
};
const createProgram = function(gl, vsGLSL, fsGLSL, outVaryings) {
const vs = createShader(gl, gl.VERTEX_SHADER, vsGLSL)
const fs = createShader(gl, gl.FRAGMENT_SHADER, fsGLSL)
const prg = gl.createProgram()
gl.attachShader(prg, vs)
gl.attachShader(prg, fs)
if (outVaryings) {
gl.transformFeedbackVaryings(prg, outVaryings, gl.SEPARATE_ATTRIBS)
}
gl.linkProgram(prg)
if (!gl.getProgramParameter(prg, gl.LINK_STATUS)) {
throw new Error(gl.getProgramParameter(prg))
}
return prg
};
const genProg = createProgram(gl, genPointsVSGLSL, genPointsFSGLSL, ['position', 'color']);
const drawProg = createProgram(gl, drawVSGLSL, drawFSGLSL);
const numPointsLoc = gl.getUniformLocation(genProg, 'numPoints');
const posLoc = gl.getAttribLocation(drawProg, 'position');
const colorLoc = gl.getAttribLocation(drawProg, 'color');
const numPoints = 24;
// make a vertex array and attach 2 buffers
// one for 2D positions, 1 for colors.
const dotVertexArray = gl.createVertexArray();
gl.bindVertexArray(dotVertexArray);
const positionBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
gl.bufferData(gl.ARRAY_BUFFER, numPoints * 2 * 4, gl.DYNAMIC_DRAW);
gl.enableVertexAttribArray(posLoc);
gl.vertexAttribPointer(
posLoc, // location
2, // size (components per iteration)
gl.FLOAT, // type of to get from buffer
false, // normalize
0, // stride (bytes to advance each iteration)
0, // offset (bytes from start of buffer)
);
const colorBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, colorBuffer);
gl.bufferData(gl.ARRAY_BUFFER, numPoints * 4 * 4, gl.DYNAMIC_DRAW);
gl.enableVertexAttribArray(colorLoc);
gl.vertexAttribPointer(
colorLoc, // location
4, // size (components per iteration)
gl.FLOAT, // type of to get from buffer
false, // normalize
0, // stride (bytes to advance each iteration)
0, // offset (bytes from start of buffer)
);
// This is not really needed but if we end up binding anything
// to ELEMENT_ARRAY_BUFFER, say we are generating indexed geometry
// we'll change cubeVertexArray's ELEMENT_ARRAY_BUFFER. By binding
// null here that won't happen.
gl.bindVertexArray(null);
// setup a transform feedback object to write to
// the position and color buffers
const tf = gl.createTransformFeedback();
gl.bindTransformFeedback(gl.TRANSFORM_FEEDBACK, tf);
gl.bindBufferBase(gl.TRANSFORM_FEEDBACK_BUFFER, 0, positionBuffer);
gl.bindBufferBase(gl.TRANSFORM_FEEDBACK_BUFFER, 1, colorBuffer);
gl.bindTransformFeedback(gl.TRANSFORM_FEEDBACK, null);
// above this line is initialization code
// --------------------------------------
// below is rendering code.
// --------------------------------------
// First compute points into buffers
// no need to call the fragment shader
gl.enable(gl.RASTERIZER_DISCARD);
// unbind the buffers so we don't get errors.
gl.bindBuffer(gl.TRANSFORM_FEEDBACK_BUFFER, null);
gl.bindBuffer(gl.ARRAY_BUFFER, null);
gl.useProgram(genProg);
// generate numPoints of positions and colors
// into the buffers
gl.bindTransformFeedback(gl.TRANSFORM_FEEDBACK, tf);
gl.beginTransformFeedback(gl.POINTS);
gl.uniform1i(numPointsLoc, numPoints);
gl.drawArrays(gl.POINTS, 0, numPoints);
gl.endTransformFeedback();
gl.bindTransformFeedback(gl.TRANSFORM_FEEDBACK, null);
// turn on using fragment shaders again
gl.disable(gl.RASTERIZER_DISCARD);
// --------------------------------------
// Now draw using the buffers we just computed
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.bindVertexArray(dotVertexArray);
gl.useProgram(drawProg);
gl.drawArrays(gl.POINTS, 0, numPoints);
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
<canvas></canvas>
Also this answer might be useful.
ok so what I was trying to do is something like the following ( hopefully this helps someone else in future ). Basically I want to have one shader doing calculations for movement (program#2) for another shader which will render (program#1). I want to avoid any vector calculations in JS. This example combines #gman's transform feedback sample and the sample I provided above:
const canvas = document.querySelector('canvas');
var gl = canvas.getContext('webgl2', {preserveDrawingBuffer: true});
// ___________shaders
// ___________vs and fs #1
const genPointsVSGLSL = `#version 300 es
in vec4 aPos;
void main(void) {
gl_PointSize = 20.0;
gl_Position = vec4( -0.01 + aPos.x , -0.01+aPos.y , aPos.zw);
}
`;
const genPointsFSGLSL = `#version 300 es
precision highp float;
out vec4 color;
void main() {
discard;
//color = vec4(0.5,0.5,0.0,1.0);
}
`;
// ___________vs and fs #2
const drawVSGLSL = `#version 300 es
in vec4 position;
void main() {
gl_PointSize = 20.0;
gl_Position = position;
}
`;
const drawFSGLSL = `#version 300 es
precision highp float;
out vec4 outColor;
void main() {
outColor = vec4( 255.0,0.0,0.0,1.0 );
}
`;
// create shaders and programs code
const createShader = function(gl, type, glsl) {
const shader = gl.createShader(type)
gl.shaderSource(shader, glsl)
gl.compileShader(shader)
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
throw new Error(gl.getShaderInfoLog(shader))
}
return shader
};
const createProgram = function(gl, vsGLSL, fsGLSL, outVaryings) {
const vs = createShader(gl, gl.VERTEX_SHADER, vsGLSL)
const fs = createShader(gl, gl.FRAGMENT_SHADER, fsGLSL)
const prg = gl.createProgram()
gl.attachShader(prg, vs)
gl.attachShader(prg, fs)
if (outVaryings) {
gl.transformFeedbackVaryings(prg, outVaryings, gl.SEPARATE_ATTRIBS)
}
gl.linkProgram(prg)
if (!gl.getProgramParameter(prg, gl.LINK_STATUS)) {
throw new Error(gl.getProgramParameter(prg))
}
return prg
};
const genProg = createProgram(gl, genPointsVSGLSL, genPointsFSGLSL, ['gl_Position']);
const drawProg = createProgram(gl, drawVSGLSL, drawFSGLSL, ['gl_Position']);
// program1 location attribute
const positionLoc = gl.getAttribLocation( drawProg , 'position');
// program2 location attribute
const aPosLoc = gl.getAttribLocation( genProg , 'aPos');
var vertizes = [0.8,0,0,1, 0.8,0.5,0,1];
var indizes = vertizes.length/4;
// create buffers and transform feedback
var bufA = gl.createBuffer()
gl.bindBuffer(gl.ARRAY_BUFFER, bufA)
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array( vertizes ), gl.DYNAMIC_COPY)
var bufB = gl.createBuffer()
gl.bindBuffer(gl.ARRAY_BUFFER, bufB)
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array( vertizes ) , gl.DYNAMIC_COPY)
var transformFeedback = gl.createTransformFeedback()
gl.bindTransformFeedback(gl.TRANSFORM_FEEDBACK, transformFeedback)
// draw
function draw(){
gl.useProgram( genProg );
gl.clear(gl.COLOR_BUFFER_BIT);
// bind bufA to output of program#2
gl.bindBuffer(gl.ARRAY_BUFFER, bufA);
gl.enableVertexAttribArray( aPosLoc );
gl.vertexAttribPointer(aPosLoc, 4, gl.FLOAT, gl.FALSE, 0, 0)
// run movement calculation code, aka program#2 (calculate movement location and hide the results using RASTERIZER_DISCARD )
gl.enable(gl.RASTERIZER_DISCARD);
gl.drawArrays(gl.POINTS, 0, indizes);
gl.disable(gl.RASTERIZER_DISCARD);
gl.bindBufferBase(gl.TRANSFORM_FEEDBACK_BUFFER, 0, bufB);
// move dot using rendering code and the position calculated previously which is still stored in bufA
gl.useProgram( drawProg );
gl.bindBuffer( gl.ARRAY_BUFFER, bufA );
gl.enableVertexAttribArray( positionLoc );
gl.vertexAttribPointer( positionLoc , 4, gl.FLOAT, gl.FALSE, 0, 0);
gl.drawArrays(gl.POINTS, 0, indizes);
gl.useProgram( genProg );
// run transforma feedback
gl.beginTransformFeedback(gl.POINTS);
gl.drawArrays(gl.POINTS, 0, indizes);
gl.endTransformFeedback();
gl.bindBufferBase(gl.TRANSFORM_FEEDBACK_BUFFER, 0, null);
// switch bufA and bufB in preperation for the next draw call
var t = bufA;
bufA = bufB;
bufB = t;
}
setInterval( draw , 100 );
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
<canvas></canvas>

In QT i have function xyz() and i need to return QImage and QString both?

In the function xyz(), I am calculating the string value and number of image and I need to return all the value like string and image. So, What I need to take the return type so they will take all value?
<Return-Type> MainWindow::xyz(QString m_ImgPath, int i)
{
try
{
m_ImgPath = Array[i];
QByteArray m_path = m_ImgPath.toLocal8Bit();
char* ImagePath = m_path.data();
obj *m_ThumpDCMReader = obj::New();
TReader->SetFileName(ImagePath);
TReader->Update();
//const QString string = NULL;
const char *str_uchar = TReader->GetMetaData()->GetAttributeValue(DC::string).GetCharData();
string = QString::fromUtf8((char *)str_uchar);
SPointer<ImageData> imageData = TReader->GetOutput();
if (!imageData) { return QImage(); }
/// \todo retrieve just the UpdateExtent
int width = imageData->GetDimensions()[0];
int height = imageData->GetDimensions()[1];
QImage image(width, height, QImage::Format_RGB32);
QRgb *rgbPtr = reinterpret_cast<QRgb *>(image.bits()) + width * (height - 1);
unsigned char *colorsPtr = reinterpret_cast<unsigned char *>(imageData->GetScalarPointer());
for (int row = 0; row < height; row++)
{
for (int col = 0; col < width; col++)
{
*(rgbPtr++) = QColor(colorsPtr[0], colorsPtr[1], colorsPtr[2]).rgb();
colorsPtr += imageData->GetNumberOfScalarComponents();
}
rgbPtr -= width * 2;
}
return (Image,string)
}
catch (...) { return QImage(); }
}
SO what i need to add the the return type.So, they will return multiple data.
You can use a QPair<QString, QImage> for that, and use qMakePair to build the values:
QPair<QString, QImage> MainWindow::xyz(QString m_ImgPath, int i) {
try {
// ...
return qMakePair(string, Image);
} catch (...) {
return qMakePair(QString(), QImage());
}
}
The caller can then use .first and .second to access the string and image, resp:
auto values = xyz("",0); // or QPair<QString, QImage> values = xyz("",0);
auto str = values.first;
auto img = values.second;
If you need to extend to more then 2 items, I suggest to use a custom struct, e.g.:
struct StringWithImage {
QString string;
QImage image;
};
// In your return:
return StringWithImage{ string, Image };
// Usage:
auto values = xyz("", 0);
auto str = values.string;
auto img = values.image;

OpenGL Texture Mapping memory leak

I am writing a video content analysis application which analyses recorded and live videos.
I use opengl to display the videos on a qt interface (using qglwidgets). I am using texture mapping with picture buffer objects if the graphics card supports it(here's the reference: http://www.songho.ca/opengl/gl_pbo.html )to display the video(loaded from OpenCV's IPLImage).
The problem is that, the memory for the application keeps on increasing over time. Approx. 4-8KB per second. I am using the task manager to verify this.
I have narrowed down the issue with the rendering of the video because I saw a lot of posts about textures not being freed which leads to memory usage but I haven't been able to find a solution for my problem.
I am only using glGenTextures in initializeGL() so the texture is being generated only once and reused.
Here's the code wherein the problem lies:
void paintGL(){
static int index = 0;
int nextIndex = 0; // pbo index used for next frame
if(paintFlag){
if(pboMode > 0) {
// "index" is used to copy pixels from a PBO to a texture object "nextIndex" is used to update pixels in a PBO
if(pboMode == 1){
// In single PBO mode, the index and nextIndex are set to 0
index = nextIndex = 0;
}
else if(pboMode == 2)
{
// In dual PBO mode, increment current index first then get the next index
index = (index + 1) % 2;
nextIndex = (index + 1) % 2;
}
// start to copy from PBO to texture object ///////
// bind the texture and PBO
glBindTexture(GL_TEXTURE_2D, texture);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pboIds[index]);
// copy pixels from PBO to texture object
// Use offset instead of ponter.
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, WIDTH, HEIGHT, GL_BGR, GL_UNSIGNED_BYTE, 0);
// measure the time copying data from PBO to texture object
//t1.stop();
//copyTime = t1.getElapsedTimeInMilliSec();
///////////////////////////////////////////////////
// start to modify pixel values ///////////////////
// t1.start();
// bind PBO to update pixel values
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pboIds[nextIndex]);
// map the buffer object into client's memory
// Note that glMapBufferARB() causes sync issue.
// If GPU is working with this buffer, glMapBufferARB() will wait(stall)
// for GPU to finish its job. To avoid waiting (stall), you can call
// first glBufferDataARB() with NULL pointer before glMapBufferARB().
// If you do that, the previous data in PBO will be discarded and
// glMapBufferARB() returns a new allocated pointer immediately
// even if GPU is still working with the previous data.
glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB, DATA_SIZE, 0, GL_STREAM_DRAW_ARB);
GLubyte* ptr = (GLubyte*)glMapBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, GL_WRITE_ONLY_ARB);
if(ptr)
{
// update data directly on the mapped buffer
//updatePixels(ptr, DATA_SIZE);
memcpy(ptr,original->imageData,DATA_SIZE);
glUnmapBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB); // release pointer to mapping buffer
}
// measure the time modifying the mapped buffer
//t1.stop();
//updateTime = t1.getElapsedTimeInMilliSec();
///////////////////////////////////////////////////
// it is good idea to release PBOs with ID 0 after use.
// Once bound with 0, all pixel operations behave normal ways.
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
}
else
{
///////////////////////////////////////////////////
// start to copy pixels from system memory to textrure object
//t1.start();
glBindTexture(GL_TEXTURE_2D, texture);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, WIDTH, HEIGHT, GL_BGR, GL_UNSIGNED_BYTE, (GLvoid*)original->imageData);
//t1.stop();
//copyTime = t1.getElapsedTimeInMilliSec();
}
paintFlag=false;
}
// clear buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glBegin(GL_QUADS);
glTexCoord2i(0,1); glVertex2i(0,HEIGHT);
glTexCoord2i(0,0); glVertex2i(0,0);
glTexCoord2i(1,0); glVertex2i(WIDTH,0);
glTexCoord2i(1,1); glVertex2i(WIDTH,HEIGHT);
glEnd();
glFlush();
glBindTexture(GL_TEXTURE_2D, 0);
swapBuffers();
glDeleteBuffers(1,&texture);
updateGL();
}
The code is pretty much the same as in the tutorial. However, my texture data comes from an IplImage structure which is continuously updated by a separate thread. I am also using boost's lock_guard for synchronization purposes.
Is there anything wrong that I am doing here?
EDIT: I am adding the remaining code:
//Constructor, this is where all the allocation happens
const int DATA_SIZE = WIDTH * HEIGHT * 3;
QGLCanvas::QGLCanvas(QWidget* parent,QString caption)
: QGLWidget(parent)
{
imageFormat=QImage::Format_RGB888;
this->name=caption;
original=cvCreateImage(cvSize(WIDTH,HEIGHT),IPL_DEPTH_8U,3);
if(this->name=="Background")
bgFrameBackup=cvCreateImage(cvSize(WIDTH,HEIGHT),IPL_DEPTH_8U,3);
cvZero(original);
//cvShowImage("w",original);
//cvWaitKey(0);
switch(original->nChannels) {
case 1:
format = GL_LUMINANCE;
break;
case 2:
format = GL_LUMINANCE_ALPHA;
break;
case 3:
format = GL_BGR;
break;
default:
return;
}
drawing=false;
setMouseTracking(true);
mouseX=0;mouseY=0;
startX=0; endX=0;
startY=0; endY=0;
dialog=new EntryExitRuleDialog();
makeCurrent();
GLenum result=glewInit();
if(result){
qDebug()<<(const char*)(glewGetErrorString(result));
}
//qDebug()<<"Open GL Version: "<<(const char*)glGetString(GL_VERSION);
bgColor=QColor::fromRgb(100,100,100);
initializeGL();
qglClearColor(bgColor);
glInfo glInfo;
glInfo.getInfo();
#ifdef _WIN32
// check PBO is supported by your video card
if(glInfo.isExtensionSupported("GL_ARB_pixel_buffer_object"))
{
// get pointers to GL functions
glGenBuffersARB = (PFNGLGENBUFFERSARBPROC)wglGetProcAddress("glGenBuffersARB");
glBindBufferARB = (PFNGLBINDBUFFERARBPROC)wglGetProcAddress("glBindBufferARB");
glBufferDataARB = (PFNGLBUFFERDATAARBPROC)wglGetProcAddress("glBufferDataARB");
glBufferSubDataARB = (PFNGLBUFFERSUBDATAARBPROC)wglGetProcAddress("glBufferSubDataARB");
glDeleteBuffersARB = (PFNGLDELETEBUFFERSARBPROC)wglGetProcAddress("glDeleteBuffersARB");
glGetBufferParameterivARB = (PFNGLGETBUFFERPARAMETERIVARBPROC)wglGetProcAddress("glGetBufferParameterivARB");
glMapBufferARB = (PFNGLMAPBUFFERARBPROC)wglGetProcAddress("glMapBufferARB");
glUnmapBufferARB = (PFNGLUNMAPBUFFERARBPROC)wglGetProcAddress("glUnmapBufferARB");
// check once again PBO extension
if(glGenBuffersARB && glBindBufferARB && glBufferDataARB && glBufferSubDataARB &&
glMapBufferARB && glUnmapBufferARB && glDeleteBuffersARB && glGetBufferParameterivARB)
{
pboSupported = true;
cout << "Video card supports GL_ARB_pixel_buffer_object." << endl;
}
else
{
pboSupported = false;
cout << "Video card does NOT support GL_ARB_pixel_buffer_object." << endl;
}
}
#else // for linux, do not need to get function pointers, it is up-to-date
if(glInfo.isExtensionSupported("GL_ARB_pixel_buffer_object"))
{
pboSupported = pboUsed = true;
cout << "Video card supports GL_ARB_pixel_buffer_object." << endl;
}
else
{
pboSupported = pboUsed = false;
cout << "Video card does NOT support GL_ARB_pixel_buffer_object." << endl;
}
#endif
if(pboSupported){
glGenBuffersARB(2, pboIds);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pboIds[0]);
glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB, DATA_SIZE, 0, GL_STREAM_DRAW_ARB);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pboIds[1]);
glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB, DATA_SIZE, 0, GL_STREAM_DRAW_ARB);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
//Note: pboMode=2 somehow does not work while calibration. Fix this later.
pboMode=1;
}
else{
pboMode=0;
}
paintFlag=false;
}
void QGLCanvas::setImage(IplImage image){
if(QString(this->name)=="Background"){
cvCopyImage(&image,bgFrameBackup);
}
//cvShowImage(name,&image);
// Display a rectangle between startX ,startY and endX,endY if we are in calibration mode
//and drawing flag is set.(typically, by a mouse click)
if(QString(this->name)=="Calibrate" && calibrating ){
if(drawing)
cvRectangle(&image,cvPoint(startX,startY),cvPoint(endX,endY),cvScalarAll(0xee));
if(select_object) //During calibration
cvRectangle(&image,cvPoint(selection.x,selection.y),cvPoint(selection.x+selection.width,selection.y+selection.height),cvScalarAll(0xee));
//Draw existing calibration rectangles
for (list<CvRect>::iterator it=calibration_rect_list->begin(); it!=calibration_rect_list->end(); ++it)
{
cvRectangle(&image, cvPoint((*it).x, (*it).y), cvPoint((*it).x + (*it).width, (*it).y + (*it).height), CV_RGB(100,255,0), 2, 8, 0);
}
}
//Only draw on the video widget with the name "Final"
if(QString(this->name)=="Final")
{
if(calibrating && drawing)
cvRectangle(&image,cvPoint(startX,startY),cvPoint(endX,endY),cvScalarAll(0xee));
//If we are adding a rule, the corresponding rule shape must be drawn on the widget.
if(addingRule && drawing){
if(currentShape==RULE_SHAPE_RECT){
cvRectangle(&image,cvPoint(startX,startY),cvPoint(endX,endY),cvScalarAll(0xee));
}
else if(currentShape==RULE_SHAPE_POLY){
int linecolor=0xee;
if(points.count()>0){
//Draw polygon...
for(int i=1;i<points.count();i++){
cvLine(&image,cvPoint(points[i-1]->x(),points[i-1]->y()),cvPoint(points[i]->x(),points[i]->y()),cvScalarAll(linecolor));
}
cvLine(&image,cvPoint(startX,startY),cvPoint(endX,endY),cvScalarAll(0xee));
cvLine(&image,cvPoint(endX,endY),cvPoint(points[0]->x(),points[0]->y()),cvScalarAll(linecolor));
}
}
else if(currentShape==RULE_SHAPE_TRIPLINE){
for(int i=1;i<points.count();i++){
cvLine(&image,cvPoint(points[i-1]->x(),points[i-1]->y()),cvPoint(points[i]->x(),points[i]->y()),cvScalarAll(0xee));
}
cvLine(&image,cvPoint(startX,startY),cvPoint(endX,endY),cvScalarAll(0xee));
}
}
if(entryExitRuleCreated && currentZoneType==RULE_ZONE_TYPE_ENTRY_EXIT ){
//Highlight appropriate sides of the currentRule to mark them as Entry/Exit Zone
for(int i=0;i<currentRule->points.count();i++){
QPoint* P1=currentRule->points[i];
QPoint* P2;
//Implement cyclic nature of polygon
if(i<currentRule->points.count()-1)
P2=currentRule->points[i+1];
else P2=currentRule->points[0];
int deltax=mouseX-P1->x();
int deltax1=P2->x()-P1->x();
float m,m1;
if(deltax!=0)
m= (float)(mouseY-P1->y())/deltax;
if(deltax1!=0 && deltax!=0){
m1=(float)(P2->y()-P1->y())/deltax1;
if(round(m,1)==round(m1,1))//Mouse pointer lies on the line whose slope is same as the polygon edge
{
//Mouse pointer is on the edge of a polygon, highlight the edge
if(abs(P1->y()-P2->y()) >= abs(mouseY-P2->y()) && abs(P1->y()-P2->y()) >= abs(mouseY-P1->y())
&& abs(P1->x()-P2->x()) >= abs(mouseX-P2->x()) && abs(P1->x()-P2->x()) >= abs(mouseX-P1->x())
){
edgeHighlighted=true;
highlightedEdge[0]=P1;
highlightedEdge[1]=P2;
currentEdgeNumber=i;
break;
}
}
else{
edgeHighlighted=false;
}
}
else{
//Vertical edge of a polygon.
if(abs(mouseX-P1->x())<4) { //Same vertical line
if(abs(P1->y()-P2->y()) > abs(mouseY-P2->y()) && abs(P1->y()-P2->y()) > abs(mouseY-P1->y())){
//Current y lies between the two vertices of an edge
//Mouse pointer is on the edge of polygon,highlight the edge
//qDebug()<<"P1="<<P1->x()<<","<<P1->y()<<", P2="<<P2->x()<<","<<P2->y();
edgeHighlighted=true;
highlightedEdge[0]=P1;
highlightedEdge[1]=P2;
currentEdgeNumber=i;
break;
}
else
edgeHighlighted=false;
}
}
}
if(edgeHighlighted || edgeHighlightedFromButton){
cvLine(&image,cvPoint(highlightedEdge[0]->x(),highlightedEdge[0]->y()),cvPoint(highlightedEdge[1]->x(),highlightedEdge[1]->y()),cvScalar(0xff,0x00,0x00),3);
}
}
}
{
//qDebug()<<name<<":Saving original image";
ExclusiveLock xlock(globalXMutex);
this->original=ℑ
paintFlag=true;
}
updateGL();
/*if(this->name=="Final"){
cvShowImage("Final",original);
cvWaitKey(1);
}*/
}
//Texture is generated here
void QGLCanvas::initializeGL(){
glDisable(GL_LIGHTING);
glEnable(GL_TEXTURE_2D);
glClearColor(0, 0, 0, 0); // background color
glClearStencil(0); // clear stencil buffer
glClearDepth(1.0f); // 0 is near, 1 is far
glDepthFunc(GL_LEQUAL);
glEnable(GL_TEXTURE_2D);
glGenTextures(1,&texture);
glBindTexture(GL_TEXTURE_2D,texture);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glBindTexture(GL_TEXTURE_2D,texture);
glTexImage2D(GL_TEXTURE_2D,0,GL_RGB,WIDTH,HEIGHT,0,GL_BGR,GL_UNSIGNED_BYTE,NULL);
glBindTexture(GL_TEXTURE_2D, 0);
glClearStencil(0); // clear stencil buffer
glClearDepth(1.0f); // 0 is near, 1 is far
glDepthFunc(GL_LEQUAL);
setAutoBufferSwap(false);
}
void QGLCanvas::resizeGL(int width,int height){
if (height==0) // Prevent A Divide By Zero By
{
height=1; // Making Height Equal One
}
glViewport(0,0,WIDTH,HEIGHT); // Reset The Current Viewport
glMatrixMode(GL_PROJECTION); // Select The Projection Matrix
glLoadIdentity(); // Reset The Projection Matrix
glOrtho(0.0f,WIDTH,HEIGHT,0.0f,0.0f,1.0f);
glEnable(GL_TEXTURE_2D);
glMatrixMode(GL_MODELVIEW); // Select The Modelview Matrix
glLoadIdentity(); // Reset The Modelview Matrix
}
You're calling glDeleteBuffers() on a texture object (should be buffer object), or rather, should not be here at all I think. Like other GL objects, only glDelete() once for every glGen() call.
You're calling glFlush() and swapBuffers(), I believe Qt takes care of that for you.
The OpenGL driver could have a memory leak. Try it without PBO.
Try glGetError() after each GL call to see if you've made a mistake elsewhere.

Resources