GLSL, reading wrong value inside a fragment shader for a bounded depth texture - jogl

I am applying a slightly modified version of the classic depth peeling algorithm, basically I am rendering all the opaque objects first and then I use that depth as minimum depth, because since they are opaque, it doesnt make sense to not discard fragment deeper than them.
I first tested it on a small test case and it works flawless.
Now I am applying this algorithm to my main application, but for some unknown reasons, it doesnt work and I am going crazy, the main problem is that I keep reading the value 0 for the opaque depth texture bounded in the fragment shader of the next stage
To sum up, this is the fbo for the opaque stuff:
opaqueDepthTexture = new int[1];
opaqueColorTexture = new int[1];
opaqueFbo = new int[1];
gl3.glGenTextures(1, opaqueDepthTexture, 0);
gl3.glGenTextures(1, opaqueColorTexture, 0);
gl3.glGenFramebuffers(1, opaqueFbo, 0);
gl3.glBindTexture(GL3.GL_TEXTURE_RECTANGLE, opaqueDepthTexture[0]);
gl3.glTexImage2D(GL3.GL_TEXTURE_RECTANGLE, 0, GL3.GL_DEPTH_COMPONENT32F, width, height, 0,
GL3.GL_DEPTH_COMPONENT, GL3.GL_FLOAT, null);
gl3.glTexParameteri(GL3.GL_TEXTURE_RECTANGLE, GL3.GL_TEXTURE_BASE_LEVEL, 0);
gl3.glTexParameteri(GL3.GL_TEXTURE_RECTANGLE, GL3.GL_TEXTURE_MAX_LEVEL, 0);
gl3.glBindTexture(GL3.GL_TEXTURE_RECTANGLE, opaqueColorTexture[0]);
gl3.glTexImage2D(GL3.GL_TEXTURE_RECTANGLE, 0, GL3.GL_RGBA, width, height, 0,
GL3.GL_RGBA, GL3.GL_FLOAT, null);
gl3.glTexParameteri(GL3.GL_TEXTURE_RECTANGLE, GL3.GL_TEXTURE_BASE_LEVEL, 0);
gl3.glTexParameteri(GL3.GL_TEXTURE_RECTANGLE, GL3.GL_TEXTURE_MAX_LEVEL, 0);
gl3.glBindFramebuffer(GL3.GL_FRAMEBUFFER, opaqueFbo[0]);
gl3.glFramebufferTexture2D(GL3.GL_FRAMEBUFFER, GL3.GL_DEPTH_ATTACHMENT, GL3.GL_TEXTURE_RECTANGLE,
opaqueDepthTexture[0], 0);
gl3.glFramebufferTexture2D(GL3.GL_FRAMEBUFFER, GL3.GL_COLOR_ATTACHMENT0, GL3.GL_TEXTURE_RECTANGLE,
opaqueColorTexture[0], 0);
checkBindedFrameBuffer(gl3);
Here I just clear the depth (default to 1), I even commented out the opaque rendering:
/**
* (1) Initialize Opaque FBO.
*/
gl3.glBindFramebuffer(GL3.GL_FRAMEBUFFER, opaqueFbo[0]);
gl3.glDrawBuffer(GL3.GL_COLOR_ATTACHMENT0);
gl3.glClearColor(1, 1, 1, 1);
gl3.glClear(GL3.GL_COLOR_BUFFER_BIT | GL3.GL_DEPTH_BUFFER_BIT);
gl3.glEnable(GL3.GL_DEPTH_TEST);
dpOpaque.bind(gl3);
{
// EC_Graph.instance.getRoot().renderDpOpaque(gl3, dpOpaque, new MatrixStack(), properties);
}
dpOpaque.unbind(gl3);
And I have a double confirmation from this
FloatBuffer fb = FloatBuffer.allocate(1 * GLBuffers.SIZEOF_FLOAT);
gl3.glReadPixels(width / 2, height / 2, 1, 1, GL3.GL_DEPTH_COMPONENT, GL3.GL_FLOAT, fb);
System.out.println("opaque fb.get(0) " + fb.get(0));
If I change the clearDepth to 0.9 for example, I get 0.9, so this is ok.
Now I initialize the minimum depth buffer, by rendering all the geometry having alpha < 1 and I bind the previous depth texture, the one used in the opaque rendering, to the
uniform sampler2D opaqueDepthTexture;
I temporarily switched the rendering of this passage to the default framebuffer
/**
* (2) Initialize Min Depth Buffer.
*/
gl3.glBindFramebuffer(GL3.GL_FRAMEBUFFER, 0);
gl3.glDrawBuffer(GL3.GL_BACK);
// gl3.glBindFramebuffer(GL3.GL_FRAMEBUFFER, blendFbo[0]);
// gl3.glDrawBuffer(GL3.GL_COLOR_ATTACHMENT0);
gl3.glClearColor(0, 0, 0, 1);
gl3.glClear(GL3.GL_COLOR_BUFFER_BIT | GL3.GL_DEPTH_BUFFER_BIT);
gl3.glEnable(GL3.GL_DEPTH_TEST);
if (cullFace) {
gl3.glEnable(GL3.GL_CULL_FACE);
}
dpInit.bind(gl3);
{
gl3.glActiveTexture(GL3.GL_TEXTURE1);
gl3.glBindTexture(GL3.GL_TEXTURE_RECTANGLE, opaqueDepthTexture[0]);
gl3.glUniform1i(dpInit.getOpaqueDepthTextureUL(), 1);
gl3.glBindSampler(1, sampler[0]);
{
EC_Graph.instance.getRoot().renderDpTransparent(gl3, dpInit, new MatrixStack(), properties);
}
gl3.glBindTexture(GL3.GL_TEXTURE_RECTANGLE, 0);
gl3.glBindSampler(1, 0);
}
dpInit.unbind(gl3);
This is the dpInit Fragment Shader:
#version 330
out vec4 outputColor;
uniform sampler2D texture0;
in vec2 oUV;
uniform sampler2D opaqueDepthTexture;
/*
* Layout {lighting, normal orientation, active, selected}
*/
uniform ivec4 settings;
const vec3 selectionColor = vec3(1, .5, 0);
const vec4 inactiveColor = vec4(.5, .5, .5, .2);
vec4 CalculateLight();
void main()
{
float opaqueDepth = texture(opaqueDepthTexture, gl_FragCoord.xy).r;
if(gl_FragCoord.z > opaqueDepth) {
//discard;
}
vec4 color = (1 - settings.x) * texture(texture0, oUV) + settings.x * CalculateLight();
if(settings.w == 1) {
if(settings.z == 1) {
color = vec4(selectionColor, color.q);
} else {
color = vec4(selectionColor, inactiveColor.w);
}
} else {
if(settings.z == 0) {
color = inactiveColor;
}
}
outputColor = vec4(color.rgb * color.a, 1.0 - color.a);
outputColor = vec4(.5, 1, 1, 1.0 - color.a);
if(opaqueDepth == 0)
outputColor = vec4(1, 0, 0, 1);
else
outputColor = vec4(0, 1, 0, 1);
}
Ignore the middle, the important is just at the begin, where I read the red component of the previous depth texture and then I compare at the end, and the geometry I obtain is red, this means the value I read in the opaqueDepthTexture is 0...
The question is why?
After the dpInit rendering, if I bind again the opaqueFbo and read the depth, it is always the clearDepth, so 1 as default or .9 if I cleared it with .9, so it works.
The problem is really that I read the wrong value in the dpInit FS from a bound depth texture.. why?
For clarification, this is the sampler:
private void initSampler(GL3 gl3) {
sampler = new int[1];
gl3.glGenSamplers(1, sampler, 0);
gl3.glSamplerParameteri(sampler[0], GL3.GL_TEXTURE_WRAP_S, GL3.GL_CLAMP_TO_EDGE);
gl3.glSamplerParameteri(sampler[0], GL3.GL_TEXTURE_WRAP_T, GL3.GL_CLAMP_TO_EDGE);
gl3.glSamplerParameteri(sampler[0], GL3.GL_TEXTURE_MIN_FILTER, GL3.GL_NEAREST);
gl3.glSamplerParameteri(sampler[0], GL3.GL_TEXTURE_MAG_FILTER, GL3.GL_NEAREST);
}
Ps: checking all the components, I see the opaqueDepthTexture has always the following values (0, 0, 0, 1)

Oh god, I found it, in the init FS
uniform sampler2D opaqueDepthTexture;
should be
uniform sampler2DRect opaqueDepthTexture;

Related

Why OpenGL 3D texture sampling to wrong color?

I have some trouble when using 3D-LUT in GLSL shader, so I'm trying to test a minimal 3D-LUT, which is of size 2x2x2, totally 8 colors. My pixel data is
static const unsigned char lutData222[] = {
0, 0, 0, 255, 0, 0,
0,255, 0, 255,255, 0,
0, 0,255, 255, 0,255,
0,255,255, 255,255,255,
};
The 3D-texture settings are
m_lut.create();
m_lut.bind();
m_lut.setFormat(QOpenGLTexture::TextureFormat::RGB8_UNorm);
m_lut.setMinificationFilter(QOpenGLTexture::Filter::Nearest);
m_lut.setMagnificationFilter(QOpenGLTexture::Filter::Nearest);
m_lut.setWrapMode(QOpenGLTexture::WrapMode::ClampToEdge);
m_lut.setSize(LUT_EXTENT,LUT_EXTENT,LUT_EXTENT);
m_lut.allocateStorage(QOpenGLTexture::PixelFormat::RGB,
QOpenGLTexture::PixelType::UInt8);
m_lut.setData(QOpenGLTexture::PixelFormat::RGB,
QOpenGLTexture::PixelType::UInt8,
lutData222);
m_lut.release();
where m_lut is of type QOpenGLTexture, Yes this a Qt project. Blow is my fragment shader, it just using the texture coordinates plus a third dimension to sample the lut. So I can change the third dimension to be 0 or 1, to see if the color is as expected.
#version 330 core
in vec2 uv;
uniform sampler3D lut;
out vec4 FragColor;
void main() {
vec4 col = texture(lut, vec3(uv, 0));
col.a = 1;
FragColor = col;
};
But it is not. When the third dimension is 0, the reuslt picture is
And when the third dimension is 1, the reuslt picture is

Receiving denormalized output texture coordinates in Frag shader

Update
See rationale at the end of my question below
Using WebGL2 I can access a texel by its denormalized coordinates (sorry don't the right lingo for this). That means I don't have to scale them down to 0-1 like I do in texture2D().
However the input to the fragment shader is still the vec2/3 in normalized values.
Is there a way to declare in/out variables in the Vertex and Frag shaders so that I don't have to scale the coordinates?
somewhere in vertex shader:
...
out vec2 TextureCoordinates;
somewhere in frag shader:
...
in vec2 TextureCoordinates;
I would like for TextureCoordinates to be ivec2 and already scaled.
This question and all my other questions on webgl related to general computing using WebGL. We are trying to do tensor (multi-D matrix) operations using WebGL.
We map our data in a few ways to a Texture. The simplest approach we follow is -- assuming we can access our data as a flat array -- to lay it out along the texture's width and go up the texture's height until we're done.
Since our thinking, logic, and calculations are all based on tensor/matrix indices -- inside the fragment shader -- we'd have to map back to/from the X-Y texture coordinates to indices. The intermediate step here is to calculate an offset for a given position of a texel. Then from that offset we can calculate the matrix indices from its strides.
Calculating an offset in webgl 1 for very large textures seems to be taking much longer than webgl2 using the integer coordinates. See below:
WebGL 1 offset calculation
int coordsToOffset(vec2 coords, int width, int height) {
float s = coords.s * float(width);
float t = coords.t * float(height);
int offset = int(t) * width + int(s);
return offset;
}
vec2 offsetToCoords(int offset, int width, int height) {
int t = offset / width;
int s = offset - t*width;
vec2 coords = (vec2(s,t) + vec2(0.5,0.5)) / vec2(width, height);
return coords;
}
WebGL 2 offset calculation in the presence of int coords
int coordsToOffset(ivec2 coords, int width) {
return coords.t * width + coords.s;
}
ivec2 offsetToCoords(int offset, int width) {
int t = offset / width;
int s = offset - t*width;
return ivec2(s,t);
}
It should be clear that for a series of large texture operations we're saving hundreds of thousands of operations just on the offset/coords calculation.
It's not clear why you want do what you're trying to do. It would be better to ask something like "I'm trying to draw an image/implement post processing glow/do ray tracing/... and to do that I want to use un-normalized texture coordinates because " and then we can tell you if your solution is going to work and how to solve it.
In any case, passing int or unsigned int or ivec2/3/4 or uvec2/3/4 as a varying is supported but not interpolation. You have to declare them as flat.
Still, you can pass un-normalized values as float or vec2/3/4 and the convert to int, ivec2/3/4 in the fragment shader.
The other issue is you'll get no sampling using texelFetch, the function that takes texel coordinates instead of normalized texture coordinates. It just returns the exact value of a single pixel. It does not support filtering like the normal texture function.
Example:
function main() {
const gl = document.querySelector('canvas').getContext('webgl2');
if (!gl) {
return alert("need webgl2");
}
const vs = `
#version 300 es
in vec4 position;
in ivec2 texelcoord;
out vec2 v_texcoord;
void main() {
v_texcoord = vec2(texelcoord);
gl_Position = position;
}
`;
const fs = `
#version 300 es
precision mediump float;
in vec2 v_texcoord;
out vec4 outColor;
uniform sampler2D tex;
void main() {
outColor = texelFetch(tex, ivec2(v_texcoord), 0);
}
`;
// compile shaders, link program, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// create buffers via gl.createBuffer, gl.bindBuffer, gl.bufferData)
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: [
-.5, -.5,
.5, -.5,
0, .5,
],
},
texelcoord: {
numComponents: 2,
data: new Int32Array([
0, 0,
15, 0,
8, 15,
]),
}
});
// make a 16x16 texture
const ctx = document.createElement('canvas').getContext('2d');
ctx.canvas.width = 16;
ctx.canvas.height = 16;
for (let i = 23; i > 0; --i) {
ctx.fillStyle = `hsl(${i / 23 * 360 | 0}, 100%, ${i % 2 ? 25 : 75}%)`;
ctx.beginPath();
ctx.arc(8, 15, i, 0, Math.PI * 2, false);
ctx.fill();
}
const tex = twgl.createTexture(gl, { src: ctx.canvas });
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// no need to set uniforms since they default to 0
// and only one texture which is already on texture unit 0
gl.drawArrays(gl.TRIANGLES, 0, 3);
}
main();
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
So in response to your updated question it's still not clear what you want to do. Why do you want to pass varyings to the fragment shader? Can't you just do whatever math you want in the fragment shader itself?
Example:
uniform sampler2D tex;
out float result;
// some all the values in the texture
vec4 sum4 = vec4(0);
ivec2 texDim = textureSize(tex, 0);
for (int y = 0; y < texDim.y; ++y) {
for (int x = 0; x < texDim.x; ++x) {
sum4 += texelFetch(tex, ivec2(x, y), 0);
}
}
result = sum4.x + sum4.y + sum4.z + sum4.w;
Example2
uniform isampler2D indices;
uniform sampler2D data;
out float result;
// some only values in data pointed to by indices
vec4 sum4 = vec4(0);
ivec2 texDim = textureSize(indices, 0);
for (int y = 0; y < texDim.y; ++y) {
for (int x = 0; x < texDim.x; ++x) {
ivec2 index = texelFetch(indices, ivec2(x, y), 0).xy;
sum4 += texelFetch(tex, index, 0);
}
}
result = sum4.x + sum4.y + sum4.z + sum4.w;
Note that I'm also not an expert in GPGPU but I have an hunch the code above is not the fastest way because I believe parallelization happens based on output. The code above has only 1 output so no parallelization? It would be easy to change so that it takes a block ID, tile ID, area ID as input and computes just the sum for that area. Then you'd write out a larger texture with the sum of each block and finally sum the block sums.
Also, dependant and non-uniform texture reads are a known perf issue. The first example reads the texture in order. That's cache friendly. The second example reads the texture in a random order (specified by indices), that's not cache friendly.

Moving rotated arrow in straight line

I'm trying to move an arrow, which could be rotated, in a straight line. I'm having some difficulty coming up with the correct formula to use. I know it should probably involve sine and cosine, but I've tried various configurations and haven't been able to get something that works.
Here's a picture of my scene with the arrow and bow
rotateNumber is an integer like -1 (for 1 left rotation), 0 (no rotation), 1 (1 right rotation), etc.
rotateAngle is 10 degrees by default.
Here's the code to move the arrow:
if (arrowMoving) {
var rAngle = rotateAngle * rotateNumber;
var angleInRad = rAngle * (Math.PI/180);
var stepSize = 1/20;
arrowX += stepSize * Math.cos(angleInRad);
arrowY += stepSize * Math.sin(angleInRad);
DrawArrowTranslate(arrowX, arrowY);
requestAnimFrame(render);
} else {
DrawArrow();
arrowX = 0;
arrowY = 0;
}
Here's the code to draw and translate the arrow:
function DrawArrowTranslate(tx, ty) {
modelViewStack.push(modelViewMatrix);
/*
var s = scale4(0.3, -0.7, 1);
var t = translate(0, -4, 0);
*/
var s = scale4(0.3, -0.7, 1);
var t = translate(0, -5, 0);
var t2 = translate(0 + tx, 1 + ty, 0)
// rotate takes angle in degrees
var rAngle = rotateAngle;
var r = rotate(rAngle, 0, 0, 1);
var m = mult(t, r);
var m = mult(m, t2);
modelViewMatrix = mat4();
modelViewMatrix = mult(modelViewMatrix, m);
modelViewMatrix = mult(modelViewMatrix, s);
/*
// update bounding box
arrowBoundingBox.translate(0, -5);
arrowBoundingBox.rotate(rAngle);
arrowBoundingBox.translate(0, 1);
arrowBoundingBox.scale(0.3, -0.7);
*/
gl.uniformMatrix4fv(modelViewMatrixLoc, false, flatten(modelViewMatrix));
gl.drawArrays( gl.LINE_STRIP, 1833, 4);
gl.drawArrays( gl.LINE_STRIP, 1837, 4);
modelViewMatrix = modelViewStack.pop();
}
Your code looks quite correct, but you should eliminate the use of the rotateNumber. You can just use positive and negative angles for rotation instead, eliminating what I imagine is the cause of error here.
Sin and Cos can certainly handle angles of any magnitude positive, negative, or zero.
Good luck!
I figured out the problem. I was translating after rotating when I needed to translate before rotating as the rotation was messing up the translation.

Nothing gets drawn as soon as GL_DEPTH_TEST is enabled

As soon as I set glEnable(GL_DEPTH_TEST) in the following code, nothing except for the clear color gets drawn on screen.
window.cpp
void Window::initializeGL() {
makeCurrent();
initializeOpenGLFunctions();
glClearColor(0.0f, 0.03f, 0.2f, 1.0f);
Shaders::initShaders();
glEnable(GL_DEPTH_TEST);
currentLevel.addTiles();
viewMatrix.setToIdentity();
viewMatrix.translate(0.0f, 0.0f, -8.0f);
viewMatrix.rotate(45.0f, -1.0f, 0.0f, 0.0f);
}
void Window::resizeGL(int width, int height) {
const float fov = 45.0f,
zNear = 0.0f,
zFar = 1000.0f;
projectionMatrix.setToIdentity();
projectionMatrix.perspective(fov, width / float(height), zNear, zFar);
}
void Window::paintGL() {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
currentLevel.setProjectionMatrix(projectionMatrix);
currentLevel.setViewMatrix(viewMatrix);
currentLevel.draw();
}
tile.cpp:
// Constructor with custom shader
Tile::Tile(QVector3D v1, QVector3D v2, QVector3D v3, QVector3D v4, QOpenGLShaderProgram* shaderProgram) :
vbo(QOpenGLBuffer::VertexBuffer),
cbo(QOpenGLBuffer::VertexBuffer),
ibo(QOpenGLBuffer::IndexBuffer) {
// Calculate surface normal & second set of vertices
QVector3D surfaceNormal = QVector3D::normal(v1, v2, v3);
QVector3D v1_n = v1 - surfaceNormal;
QVector3D v2_n = v2 - surfaceNormal;
QVector3D v3_n = v3 - surfaceNormal;
QVector3D v4_n = v4 - surfaceNormal;
// Set up rectangular mesh from given corner vectors
vertices = {
v1, v2, v3, v4,
v1_n, v2_n, v3_n, v4_n
};
colors = {
color1, color1, color2, color2,
color1, color1, color2, color2
};
indices = {
// Face 1
0, 1, 2,
2, 3, 0,
// Face 2
0, 4, 5,
5, 1, 0,
// Face 3
4, 5, 6,
6, 7, 4
};
this->shaderProgram = shaderProgram;
cacheShaderLocations();
createBuffers();
}
Tile::~Tile() {
vbo.destroy();
vao.destroy();
ibo.destroy();
}
// Cache Uniform Locations for shaders
void Tile::cacheShaderLocations() {
positionLocation = shaderProgram->attributeLocation("position");
colorLocation = shaderProgram->attributeLocation("color");
modelLocation = shaderProgram->uniformLocation("model");
viewLocation = shaderProgram->uniformLocation("view");
projectionLocation = shaderProgram->uniformLocation("projection");
}
// Create buffers
void Tile::createBuffers() {
// Vertex Buffer Object
vbo.create();
vbo.bind();
vbo.setUsagePattern(QOpenGLBuffer::StaticDraw);
vbo.allocate(vertices.constData(), vertices.size() * sizeof(QVector3D));
vbo.release();
// Color Buffer Object
cbo.create();
cbo.bind();
cbo.setUsagePattern(QOpenGLBuffer::StaticDraw);
cbo.allocate(colors.constData(), colors.size() * sizeof(QVector3D));
cbo.release();
// Index Buffer Object
ibo.create();
ibo.bind();
ibo.setUsagePattern(QOpenGLBuffer::StaticDraw);
ibo.allocate(indices.constData(), indices.size() * sizeof(GLushort));
ibo.release();
// Vertex Array Object
vao.create();
// Setup buffer attributes
shaderProgram->bind();
vao.bind();
vbo.bind();
shaderProgram->enableAttributeArray(positionLocation);
shaderProgram->setAttributeBuffer(positionLocation, GL_FLOAT, 0, 3, 0);
cbo.bind();
shaderProgram->enableAttributeArray(colorLocation);
shaderProgram->setAttributeBuffer(colorLocation, GL_FLOAT, 0, 3, 0);
ibo.bind();
vao.release();
// Release buffers & shader program
vbo.release();
cbo.release();
ibo.release();
shaderProgram->release();
}
void Tile::draw() {
shaderProgram->bind();
// Send uniforms to shader
shaderProgram->setUniformValue(projectionLocation, projectionMatrix);
shaderProgram->setUniformValue(viewLocation, viewMatrix);
shaderProgram->setUniformValue(modelLocation, modelMatrix);
// Draw vertices
vao.bind();
glDrawElements(GL_TRIANGLES, indices.size(), GL_UNSIGNED_SHORT, 0);
vao.release();
shaderProgram->release();
}
As for the depth buffer itself, according to the Qt documentation it's enabled by default. In the main.cpp I set the surface format/context like this:
// Set OpenGL Version information
QSurfaceFormat format;
format.setDepthBufferSize(24);
format.setRenderableType(QSurfaceFormat::OpenGL);
format.setProfile(QSurfaceFormat::CoreProfile);
format.setVersion(3,3);
I really have no clue why nothing gets drawn when I try to use depth testing so I would greatly appreciate any help.

GUI update in Qt

I'm working on a project which I need get some data(points) from a device and show them on the widget. I'm using Qt 5 and C++ language. the data come 12 times in a second and each time it has 895 points. actually its a simulation of PPI display of a Radar. I a have list which I put the point on that, and then I try to paint them on the widget. the problem is when I draw points on the widget it will connects the points and draw lines that obviously are unwanted.
void FormPPI::PPIUpdate()
{
QPainter* pdc = new QPainter();
ObservationList* list = monitoring->getObservationList();
ObservationList::iterator it = list->begin();
// qDebug() << list->size();
while (it != list->end())
{
Observation *o = &(*it);
int xp, yp;
float angle = calcPhi(o->pos().bearing());
//Find the pixel position on the screen
getPixelPos(o->pos().range() * cos(angle * DEG2RAD),
o->pos().range() * sin(angle * DEG2RAD), xp, yp);
//Draw observations here
//o->lifeTime() gets object lifetime which is initialized by 12
//used to reduce the intensity of color like antique radar system displays
pdc->fillRect(xp - 1, yp - 1, 2, 2, QColor(255, 255, 0, o->lifeTime() * 21));
//Decrease the observation life time by 1
o->decLifeTime();
if(o->lifeTime() <= 1)
list->erase(it);
it++;
}
qDebug() << list->size();
update();
list->release();
}
I am not able to explain why
pdc->fillRect(xp - 1, yp - 1, 2, 2, QColor(255, 255, 0, o->lifeTime() * 21));
fill rectanges between two consecutives point (xp1,yp1) and (xp2,yp2) .
But you have an alternative. You can draw points directly with a specific diameter d (seems you want a diameter of approximately 2 pixels).
int diameter = 2;
QPen pen = pdc->pen();
pen.setWidth(2);
pdc.setPen(pen);
while()
{
...
pen.setColor(QColor(255, 255, 0, o->lifeTime() * 21));
pdc->setPen(pen);
pdc->drawPoint(xp, yp);
...
}

Resources