I've came across a cube to sphere mapping function that provides a more uniform result than just normalizing the coordinates or other mapping methods. Unfortunately there is no unwrapping function.
Source: http://mathproofs.blogspot.com/2005/07/mapping-cube-to-sphere.html
vec3 spherify ( vec3 v ) {
float x2 = v.x * v.x;
float y2 = v.y * v.y;
float z2 = v.z * v.z;
vec3 s;
s.x = v.x * sqrt(1.0 - y2 / 2.0 - z2 / 2.0 + y2 * z2 / 3.0);
s.y = v.y * sqrt(1.0 - x2 / 2.0 - z2 / 2.0 + x2 * z2 / 3.0);
s.z = v.z * sqrt(1.0 - x2 / 2.0 - y2 / 2.0 + x2 * y2 / 3.0);
return s;
}
How could this be unwrapped back to a cube face? To wrap back to a square i use the following, though it doesn't unwrap the special squeezing of the coordinates.
vec3 cubify ( vec3 s ) {
s.x = ( s.x / s.z );
s.y = ( s.y / s.z );
return s;
}
Which looks as the following
Mapping
How about something like this (C++/VCL):
//---------------------------------------------------------------------------
#include <vcl.h>
#pragma hdrstop
#include "Unit1.h"
#include "gl_simple.h"
#include "glsl_math.h"
//---------------------------------------------------------------------------
#pragma package(smart_init)
#pragma resource "*.dfm"
TForm1 *Form1;
const int n=10*10*6;
vec3 col[n];
vec3 cube[n];
vec3 sphere[n];
vec3 cube2[n];
//---------------------------------------------------------------------------
vec3 spherify(vec3 v)
{
float x2 = v.x * v.x;
float y2 = v.y * v.y;
float z2 = v.z * v.z;
vec3 s;
s.x = v.x * sqrt(1.0 - y2 / 2.0 - z2 / 2.0 + y2 * z2 / 3.0);
s.y = v.y * sqrt(1.0 - x2 / 2.0 - z2 / 2.0 + x2 * z2 / 3.0);
s.z = v.z * sqrt(1.0 - x2 / 2.0 - y2 / 2.0 + x2 * y2 / 3.0);
return s;
}
//---------------------------------------------------------------------------
vec3 cubify(vec3 v)
{
int i;
float r,a;
// major axis and size
a=fabs(v.x); { r=a; i=0; }
a=fabs(v.y); if (r<a){ r=a; i=1; }
a=fabs(v.z); if (r<a){ r=a; i=2; }
v/=r; r*=1.75; a=4.0*r/M_PI;
// convert of cube + linearization
if (i==0){ v.y=a*atan(v.y/r); v.z=a*atan(v.z/r); }
else if (i==1){ v.x=a*atan(v.x/r); v.z=a*atan(v.z/r); }
else { v.x=a*atan(v.x/r); v.y=a*atan(v.y/r); }
// just remedy boundaries after linearization
if (v.x<-1.0) v.x=-1.0;
if (v.x>+1.0) v.x=+1.0;
if (v.y<-1.0) v.y=-1.0;
if (v.y>+1.0) v.y=+1.0;
if (v.z<-1.0) v.z=-1.0;
if (v.z>+1.0) v.z=+1.0;
return v;
}
//---------------------------------------------------------------------------
void set_cube()
{
float u,v,d;
int m=sqrt(n/6),i,j,k;
k=0; d=2.0/float(m-1);
for (u=-1.0,i=0;i<m;i++,u+=d)
for (v=-1.0,j=0;j<m;j++,v+=d)
{
col[k]=vec3(0.5,0.0,0.0); cube[k]=vec3(u,v,-1.0); k++;
col[k]=vec3(1.0,0.0,0.0); cube[k]=vec3(u,v,+1.0); k++;
col[k]=vec3(0.0,0.5,0.0); cube[k]=vec3(u,-1.0,v); k++;
col[k]=vec3(0.0,1.0,0.0); cube[k]=vec3(u,+1.0,v); k++;
col[k]=vec3(0.0,0.0,0.5); cube[k]=vec3(-1.0,u,v); k++;
col[k]=vec3(0.0,0.0,1.0); cube[k]=vec3(+1.0,u,v); k++;
}
}
//---------------------------------------------------------------------------
void gl_draw()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
float aspect=float(xs)/float(ys);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0/aspect,aspect,0.1,100.0);
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(0.0,0.0,-10.5);
glRotatef(-10.0,1.0,0.0,0.0);
glRotatef(-20.0,0.0,1.0,0.0);
glEnable(GL_DEPTH_TEST);
glDisable(GL_TEXTURE_2D);
int i;
glPointSize(2);
glMatrixMode(GL_MODELVIEW);
glTranslatef(-3.0,0.0,0.0); glBegin(GL_POINTS); for (i=0;i<n;i++){ glColor3fv(col[i].dat); glVertex3fv(cube[i].dat); } glEnd(); // set_cube
glTranslatef(+3.0,0.0,0.0); glBegin(GL_POINTS); for (i=0;i<n;i++){ glColor3fv(col[i].dat); glVertex3fv(sphere[i].dat); } glEnd(); // spherify
glTranslatef(+3.0,0.0,0.0); glBegin(GL_POINTS); for (i=0;i<n;i++){ glColor3fv(col[i].dat); glVertex3fv(cube2[i].dat); } glEnd(); // cubify
glEnd();
glPointSize(1);
glFlush();
SwapBuffers(hdc);
}
//---------------------------------------------------------------------------
__fastcall TForm1::TForm1(TComponent* Owner):TForm(Owner)
{
gl_init(Handle);
int i;
set_cube();
for (i=0;i<n;i++) sphere[i]=spherify(cube[i]);
for (i=0;i<n;i++) cube2[i]=cubify(sphere[i]);
}
//---------------------------------------------------------------------------
void __fastcall TForm1::FormDestroy(TObject *Sender)
{
gl_exit();
}
//---------------------------------------------------------------------------
void __fastcall TForm1::FormPaint(TObject *Sender)
{
gl_draw();
}
//---------------------------------------------------------------------------
void __fastcall TForm1::Timer1Timer(TObject *Sender)
{
gl_draw();
}
//---------------------------------------------------------------------------
void __fastcall TForm1::FormResize(TObject *Sender)
{
gl_resize(ClientWidth,ClientHeight);
gl_draw();
}
//---------------------------------------------------------------------------
Just ignore the VCL stuff. Code creates uniform grid cube points using set_cube , that is converted into sphere using your spherify and that is finally converted to cube2 using mine cubify.
Here preview:
from left cube,sphere,cube2. The colors are stored in col to better show the mapping between points...
The idea behind cubify is to leave biggest coordinate as is and the other two convert into spherical angle and then use this angle as coordinate. Basically its a reverse of this. Its a bit nonlinear near edges hence the slight shifting of 45 deg range to smaller ones... Also to avoid crossings of points above surface after linearization another check is involved (the 6 ifs at the end).
I have written a small program that renders a 175x175 heightmap. The rendering is done using Qt3D which is basically a set of wrappers around OpenGL. The program loads fine and runs fine on a powerful desktop. However, when I run it on a lower power GPU, image updates are very choppy when I start moving the camera around. Rendering 3D terrain mesh really shouldn't be that difficult for even a small GPU, so I assume I am doing something very wrong. Are there some obvious ways to optimize this code or am I just expecting too much from a small GPU?
Fragment shader
https://github.com/qt/qt3d/blob/5.12/src/extras/shaders/es2/phong.inc.frag
Vertex shader:
https://github.com/qt/qt3d/blob/5.12/src/extras/shaders/es2/morphphong.vert
int main(int argc, char* argv[])
{
QGuiApplication app(argc, argv);
Qt3DExtras::Qt3DWindow view;
// Scene Root
Qt3DCore::QEntity *sceneRoot = new Qt3DCore::QEntity();
// Scene Camera
Qt3DRender::QCamera *basicCamera = view.camera();
basicCamera->setProjectionType(Qt3DRender::QCameraLens::PerspectiveProjection);
basicCamera->setUpVector(QVector3D(0.0f, 1.0f, 0.0f));
basicCamera->setViewCenter(QVector3D(60.0f, 15.0f, -60.0f));
basicCamera->setPosition(QVector3D(60.0f, 26.0f, 0.0f));
// For camera controls
Qt3DExtras::QFirstPersonCameraController *camController = new Qt3DExtras::QFirstPersonCameraController(sceneRoot);
camController->setCamera(basicCamera);
// Material
Qt3DRender::QMaterial *material= new Qt3DExtras::QPhongMaterial(sceneRoot);
Qt3DCore::QEntity *customMeshEntity = new Qt3DCore::QEntity(sceneRoot);
// Transform
Qt3DCore::QTransform *transform = new Qt3DCore::QTransform;
Qt3DRender::QGeometryRenderer *customMeshRenderer = new Qt3DRender::QGeometryRenderer;
Qt3DRender::QGeometry *customGeometry = new Qt3DRender::QGeometry(customMeshRenderer);
Qt3DRender::QBuffer *vertexDataBuffer = new Qt3DRender::QBuffer(Qt3DRender::QBuffer::VertexBuffer, customGeometry);
Qt3DRender::QBuffer *indexDataBuffer = new Qt3DRender::QBuffer(Qt3DRender::QBuffer::IndexBuffer, customGeometry);
QImage heightmap("../assets/heightmap.png");
QByteArray vertexBufferData;
vertexBufferData.resize(heightmap.width() * heightmap.height() * (3 + 3 + 3) * sizeof(float));
QVector<QVector3D> vertexPositions;
for (int row = 0; row < heightmap.height(); row++) {
for (int column = 0; column < heightmap.width(); column++) {
vertexPositions.append(QVector3D(row, heightmap.pixelColor(row, column).red()/8.0, -column));
}
}
QVector<QVector3D> vertexNormals;
for (int row = 0; row < heightmap.height(); row++) {
for (int column = 0; column < heightmap.width(); column++) {
int center = (row * heightmap.width()) + column;
int upper = center - heightmap.width();
int lower = center + heightmap.width();
int right = center + 1;
int left = center -1;
int lowerLeft = center - 1 + heightmap.width();
int upperRight = center + 1 - heightmap.width();
int rightEdge = heightmap.width() - 1;
int bottomEdge = heightmap.height() -1;
// Calculate normals for each adjacent face and sum
// Check for edge conditions
QVector3D vertexNormal(0, 0, 0);
if (column != 0 && row != 0 ) {
vertexNormal += QVector3D::normal(vertexPositions[center], vertexPositions[upper], vertexPositions[left]);
}
if (column != rightEdge && row != 0) {
vertexNormal += QVector3D::normal(vertexPositions[center], vertexPositions[upperRight], vertexPositions[upper]);
vertexNormal += QVector3D::normal(vertexPositions[center], vertexPositions[right], vertexPositions[upperRight]);
}
if (column != rightEdge && row != bottomEdge) {
vertexNormal += QVector3D::normal(vertexPositions[center], vertexPositions[lower], vertexPositions[right]);
}
if (column != 0 && row != bottomEdge) {
vertexNormal += QVector3D::normal(vertexPositions[center], vertexPositions[lowerLeft], vertexPositions[lower]);
vertexNormal += QVector3D::normal(vertexPositions[center], vertexPositions[left], vertexPositions[lowerLeft]);
}
vertexNormals.append(vertexNormal.normalized());
}
}
// Colors
QVector3D red(1.0f, 0.0f, 0.0f);
QVector3D yellow(1.0f, 1.0f, 0.0f);
QVector3D green(0.0f, 1.0f, 0.0f);
QVector3D blue(0.0f, 0.0f, 1.0f);
QVector3D white(1.0f, 1.0f, 1.0f);
QVector<QVector3D> vertices;
for (int i = 0; i < vertexPositions.count(); i ++) {
vertices.append(vertexPositions[i]);
vertices.append(vertexNormals[i]);
if (vertexPositions[i].y() > 20.0) {
vertices.append(red);
}
else if (vertexPositions[i].y() > 18.0) {
vertices.append(yellow);
}
else {
vertices.append(green);
}
}
float *rawVertexArray = reinterpret_cast<float *>(vertexBufferData.data());
int idx = 0;
Q_FOREACH (const QVector3D &v, vertices) {
rawVertexArray[idx++] = v.x();
rawVertexArray[idx++] = v.y();
rawVertexArray[idx++] = v.z();
}
// Indices
QByteArray indexBufferData;
int indicesCount = (heightmap.height() - 1) * (heightmap.width() - 1) * 2 * 3;
indexBufferData.resize( indicesCount * sizeof(uint));
uint *rawIndexArray = reinterpret_cast<uint *>(indexBufferData.data());
int index = 0;
for (int row = 0; row < heightmap.height()-1; row++) {
for (int column = 0; column < heightmap.width()-1; column++) {
// 1 <- 3
// | /
// | /
// v /
// 2
int vertexBufferIndex = (row * heightmap.width()) + column;
rawIndexArray[index++] = vertexBufferIndex;
rawIndexArray[index++] = vertexBufferIndex + heightmap.width(); // down one row
rawIndexArray[index++] = vertexBufferIndex + 1; // right one column
// 1
// / ^
// / |
// / |
// 2 -> 3
rawIndexArray[index++] = vertexBufferIndex + 1; // right one column
rawIndexArray[index++] = vertexBufferIndex + heightmap.width(); // down one row
rawIndexArray[index++] = vertexBufferIndex + heightmap.width() + 1; // down one row and right one column
}
}
vertexDataBuffer->setData(vertexBufferData);
indexDataBuffer->setData(indexBufferData);
// Attributes
Qt3DRender::QAttribute *positionAttribute = new Qt3DRender::QAttribute();
positionAttribute->setAttributeType( Qt3DRender::QAttribute::VertexAttribute);
positionAttribute->setBuffer(vertexDataBuffer);
positionAttribute->setDataType( Qt3DRender::QAttribute::Float);
positionAttribute->setDataSize(3);
positionAttribute->setByteOffset(0);
positionAttribute->setByteStride(9 * sizeof(float));
positionAttribute->setCount(vertexPositions.count());
positionAttribute->setName( Qt3DRender::QAttribute::defaultPositionAttributeName());
Qt3DRender::QAttribute *normalAttribute = new Qt3DRender::QAttribute();
normalAttribute->setAttributeType( Qt3DRender::QAttribute::VertexAttribute);
normalAttribute->setBuffer(vertexDataBuffer);
normalAttribute->setDataType( Qt3DRender::QAttribute::Float);
normalAttribute->setDataSize(3);
normalAttribute->setByteOffset(3 * sizeof(float));
normalAttribute->setByteStride(9 * sizeof(float));
normalAttribute->setCount(vertexPositions.count());
normalAttribute->setName( Qt3DRender::QAttribute::defaultNormalAttributeName());
Qt3DRender::QAttribute *colorAttribute = new Qt3DRender::QAttribute();
colorAttribute->setAttributeType( Qt3DRender::QAttribute::VertexAttribute);
colorAttribute->setBuffer(vertexDataBuffer);
colorAttribute->setDataType( Qt3DRender::QAttribute::Float);
colorAttribute->setDataSize(3);
colorAttribute->setByteOffset(6 * sizeof(float));
colorAttribute->setByteStride(9 * sizeof(float));
colorAttribute->setCount(vertexPositions.count());
colorAttribute->setName( Qt3DRender::QAttribute::defaultColorAttributeName());
Qt3DRender::QAttribute *indexAttribute = new Qt3DRender::QAttribute();
indexAttribute->setAttributeType( Qt3DRender::QAttribute::IndexAttribute);
indexAttribute->setBuffer(indexDataBuffer);
indexAttribute->setDataType( Qt3DRender::QAttribute::UnsignedInt);
indexAttribute->setDataSize(1);
indexAttribute->setByteOffset(0);
indexAttribute->setByteStride(0);
indexAttribute->setCount(indicesCount);
customGeometry->addAttribute(positionAttribute);
customGeometry->addAttribute(normalAttribute);
customGeometry->addAttribute(colorAttribute);
customGeometry->addAttribute(indexAttribute);
customMeshRenderer->setInstanceCount(1);
customMeshRenderer->setFirstVertex(0);
customMeshRenderer->setFirstInstance(0);
customMeshRenderer->setPrimitiveType(Qt3DRender::QGeometryRenderer::Triangles);
customMeshRenderer->setGeometry(customGeometry);
customMeshEntity->addComponent(customMeshRenderer);
customMeshEntity->addComponent(transform);
customMeshEntity->addComponent(material);
view.setRootEntity(sceneRoot);
view.show();
return app.exec();
}
The Qt blog has a very good write-up on optimizing Qt3D applications for low-end hardware.
https://blog.qt.io/blog/2019/04/02/optimizing-real-time-3d-entry-level-hardware/
I use OpenGL shader for apply median filter to image. Input image I copy to in_fbo buffer. All work fine.
QGLFramebufferObject *in_fbo, *out_fbo;
painter.begin(in_fbo); //Copy QImage to QGLFramebufferObject
painter.drawImage(0,0,image_in,0,0,width,height);
painter.end();
out_fbo->bind();
glViewport( 0, 0, nWidth, nHeight );
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
glOrtho( 0.0, nWidth, 0.0, nHeight, -1.0, 1.0 );
glMatrixMode( GL_MODELVIEW );
glLoadIdentity( );
glEnable( GL_TEXTURE_2D );
out_fbo->drawTexture( QPointF(0.0,0.0), in_fbo->texture( ), GL_TEXTURE_2D );
But in shader code I need divide position of vertex by width and height of image, because texture coordinates are normalized to a range between 0 and 1.
How correctly calculate texture coordinates?
//vertex shader
varying vec2 pos;
void main( void )
{
pos = gl_Vertex.xy;
gl_Position = ftransform( );
}
//fragment shader
#extension GL_ARB_texture_rectangle : enable
uniform sampler2D texture0;
uniform int imgWidth;
uniform int imgHeight;
uniform int len;
varying vec2 pos;
#define MAX_LEN (100)
void main(){
float v[ MAX_LEN ];
for (int i = 0; i < len; i++) {
vec2 posi = pos + float(i);
posi.x = posi.x / float( imgWidth );
posi.y = posi.y / float( imgHeight );
v[i] = texture2D(texture0, posi).r;
}
//
//.... Calculating new value
//
gl_FragColor = vec4( m, m, m, 1.0 );
}
Before I did it in OpenFrameworks. But shader for texture in OF does not work for texture in Qt. I suppose because OF create textures with textureTarget = GL_TEXTURE_RECTANGLE_ARB. Now the result of applying shader above isn't correct. It isn't identical with result of the old shader (there are few pixels with different colors). I don't know how modify shader above :(.
Old shaders:
//vertex
#version 120
#extension GL_ARB_texture_rectangle : enable
void main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
gl_TexCoord[0] = gl_MultiTexCoord0;
gl_FrontColor = gl_Color;
}
//fragment
#version 120
#extension GL_ARB_texture_rectangle : enable
uniform sampler2D texture0;
uniform int len;
void main(){
vec2 pos = gl_TexCoord[0].xy;
pos.x = int( pos.x );
pos.y = int( pos.y );
float v[ MAX_LEN ];
for (int i=0; i<len; i++) {
vec2 posi = pos + i;
posi.x = int( posi.x + 0.5 ) + 0.5;
posi.y = int( posi.y + 0.5 ) + 0.5;
v[i] = texture2D(texture0, posi).r;
}
//
//.... Calculating new value
//
gl_FragColor = vec4( m, m, m, 1.0 );
}
OpenGL code from OpenFrameworks lib
texData.width = w;
texData.height = h;
texData.tex_w = w;
texData.tex_h = h;
texData.textureTarget = GL_TEXTURE_RECTANGLE_ARB;
texData.bFlipTexture = true;
texData.glType = GL_RGBA;
// create & setup FBO
glGenFramebuffersEXT(1, &fbo);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo);
// Create the render buffer for depth
glGenRenderbuffersEXT(1, &depthBuffer);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, depthBuffer);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT, texData.tex_w, texData.tex_h);
// create & setup texture
glGenTextures(1, (GLuint *)(&texData.textureID)); // could be more then one, but for now, just one
glBindTexture(texData.textureTarget, (GLuint)(texData.textureID));
glTexParameterf(texData.textureTarget, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(texData.textureTarget, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameterf(texData.textureTarget, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(texData.textureTarget, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(texData.textureTarget, 0, texData.glType, texData.tex_w, texData.tex_h, 0, texData.glType, GL_UNSIGNED_BYTE, 0);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
// attach it to the FBO so we can render to it
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, texData.textureTarget, (GLuint)texData.textureID, 0);
I do not think you actually want to use the texture's dimensions to do this. From the sounds of things this is a simple fullscreen image filter and you really just want fragment coordinates mapped into the range [0.0,1.0]. If this is the case, then gl_FragCoord.xy / viewport.xy, where viewport is a 2D uniform that defines the width and height of your viewport ought to work for your texture coordinates (in the fragment shader).
vec2 texCoord = vec2 (transformed_pos.x, transformed_pos.y) / transformed_pos.w * vec2 (0.5, 0.5) + vec2 (1.0, 1.0) may also work using the same principle -- clip-space coordinates transformed into NDC and then mapped to texture-space. This approach will not properly account for texel centers ((0.5, 0.5) rather than (0.0, 0.0)), however and can present problems when texture filtering is enabled and the wrap mode is not GL_CLAMP_TO_EDGE.
I'm trying to wrap my head around how to use VAOs appropriately for instanced rendering (specifically in Qt 5.2, using OpenGL 3.3). My understanding is that VAOs save the state of the VBOs and associated attributes so that you don't need to worry about binding and enabling everything at drawing time, you just bind the VAO. But with instancing, you often have multiple VBOs. How do you get around needing to bind them all? Or do I just need to use a single VBO for both my per vertex data and my per instance data?
I've been looking at a couple tutorials, for example: http://ogldev.atspace.co.uk/www/tutorial33/tutorial33.html
It looks to me like what he does is use a VAO for his per vertex data and NOT for his per instance data. I've tried doing the same thing with my Qt-based code, and it's not working for me (probably because I don't entirely understand how that works... shouldn't his instance data still need to be bound when drawing happens?)
Some dummy code... this is a bit silly, I'm just drawing a single instance of two triangles, with a perspective matrix as a per instance attribute.
glwindow.cpp:
#include "glwindow.h"
#include <QColor>
#include <QMatrix4x4>
#include <QVector>
#include <QVector3D>
#include <QVector4D>
#include <QDebug>
GLWindow::GLWindow(QWindow *parent)
: QWindow(parent)
, _vbo(QOpenGLBuffer::VertexBuffer)
, _matbo(QOpenGLBuffer::VertexBuffer)
, _context(0)
{
setSurfaceType(QWindow::OpenGLSurface);
}
GLWindow::~GLWindow()
{}
void GLWindow::initGL()
{
setupShaders();
_program->bind();
_positionAttr = _program->attributeLocation("position");
_colourAttr = _program->attributeLocation("colour");
_matrixAttr = _program->attributeLocation("matrix");
QVector<QVector3D> triangles;
triangles << QVector3D(-0.5, 0.5, 1) << QVector3D(-0.5, -0.5, 1) << QVector3D(0.5, -0.5, 1);
triangles << QVector3D(0.5, 0.5, 0.5) << QVector3D(-0.5, -0.5, 0.5) << QVector3D(0.5, -0.5, 0.5);
QVector<QVector3D> colours;
colours << QVector3D(1, 0, 0) << QVector3D(0, 1, 0) << QVector3D(0, 0, 1);
colours << QVector3D(1, 1, 1) << QVector3D(1, 1, 1) << QVector3D(1, 1, 1);
_vao.create();
_vao.bind();
_vbo.create();
_vbo.setUsagePattern(QOpenGLBuffer::StaticDraw);
_vbo.bind();
size_t positionSize = triangles.size() * sizeof(QVector3D);
size_t colourSize = colours.size() * sizeof(QVector3D);
_vbo.allocate(positionSize + colourSize);
_vbo.bind();
_vbo.write(0, triangles.constData(), positionSize);
_vbo.write(positionSize, colours.constData(), colourSize);
_colourOffset = positionSize;
_program->setAttributeBuffer(_positionAttr, GL_FLOAT, 0, 3, 0);
_program->setAttributeBuffer(_colourAttr, GL_FLOAT, _colourOffset, 3, 0);
_program->enableAttributeArray(_positionAttr);
_program->enableAttributeArray(_colourAttr);
_vao.release();
_matbo.create();
_matbo.setUsagePattern(QOpenGLBuffer::StaticDraw);
_matbo.bind();
_matbo.allocate(4 * sizeof(QVector4D));
_program->setAttributeBuffer(_matrixAttr, GL_FLOAT, 0, 4, 4 * sizeof(QVector4D));
_program->enableAttributeArray(_matrixAttr);
_func330->glVertexAttribDivisor(_matrixAttr, 1);
_matbo.release();
_program->release();
resizeGL(width(), height());
}
void GLWindow::resizeGL(int w, int h)
{
glViewport(0, 0, w, h);
}
void GLWindow::paintGL()
{
if (! _context) // not yet initialized
return;
_context->makeCurrent(this);
QColor background(Qt::black);
glClearColor(background.redF(), background.greenF(), background.blueF(), 1.0f);
glClearDepth(1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
QMatrix4x4 matrix;
matrix.perspective(60, 4.0/3.0, 0.1, 100.0);
matrix.translate(0, 0, -2);
_program->bind();
_matbo.bind();
_matbo.write(0, matrix.constData(), 4 * sizeof(QVector4D));
_vao.bind();
glEnable(GL_DEPTH_TEST);
_func330->glDrawArraysInstanced(GL_TRIANGLES, 0, 6, 1);
_vao.release();
_program->release();
_context->swapBuffers(this);
_context->doneCurrent();
}
void GLWindow::setupShaders()
{
QString vShaderSrc("#version 330\n"
"layout(location = 0) in vec4 position;\n"
"layout(location = 1) in vec4 colour;\n"
"layout(location = 2) in mat4 matrix;\n"
"smooth out vec4 col;\n"
"void main() {\n"
" col = colour;\n"
" gl_Position = matrix * position;\n"
"}\n");
QString fShaderSrc("#version 330\n"
"smooth in vec4 col;\n"
"void main() {\n"
" gl_FragColor = col;\n"
"}\n");
_program = new QOpenGLShaderProgram(this);
_program->addShaderFromSourceCode(QOpenGLShader::Vertex, vShaderSrc);
_program->addShaderFromSourceCode(QOpenGLShader::Fragment, fShaderSrc);
_program->link();
}
void GLWindow::exposeEvent(QExposeEvent *event)
{
Q_UNUSED(event);
if (isExposed())
{
if (! _context)
{
_context = new QOpenGLContext(this);
QSurfaceFormat format(requestedFormat());
format.setVersion(3,3);
format.setDepthBufferSize(24);
_context->setFormat(format);
_context->create();
_context->makeCurrent(this);
initializeOpenGLFunctions();
_func330 = _context->versionFunctions<QOpenGLFunctions_3_3_Core>();
if (_func330)
_func330->initializeOpenGLFunctions();
else
{
qWarning() << "Could not obtain required OpenGL context version";
exit(1);
}
initGL();
}
paintGL();
}
}
glwindow.h:
#ifndef GL_WINDOW_H
#define GL_WINDOW_H
#include <QExposeEvent>
#include <QSurfaceFormat>
#include <QWindow>
#include <QOpenGLBuffer>
#include <QOpenGLContext>
#include <QOpenGLFunctions>
#include <QOpenGLFunctions_3_3_Core>
#include <QOpenGLShaderProgram>
#include <QOpenGLVertexArrayObject>
class GLWindow : public QWindow, protected QOpenGLFunctions
{
Q_OBJECT
public:
GLWindow(QWindow * = 0);
virtual ~GLWindow();
void initGL();
void paintGL();
void resizeGL(int, int);
protected:
virtual void exposeEvent(QExposeEvent *);
private:
void setupShaders();
QOpenGLBuffer _vbo;
QOpenGLBuffer _matbo;
QOpenGLContext *_context;
QOpenGLShaderProgram *_program;
QOpenGLVertexArrayObject _vao;
QOpenGLFunctions_3_3_Core *_func330;
GLuint _positionAttr;
GLuint _colourAttr;
GLuint _matrixAttr;
size_t _colourOffset;
} ;
#endif
glbuffertest.cpp:
#include <QGuiApplication>
#include <QSurfaceFormat>
#include "glwindow.h"
int main(int argc, char **argv)
{
QGuiApplication app(argc, argv);
GLWindow window;
window.resize(400, 400);
window.show();
return app.exec();
}
glbuffertest.pro:
######################################################################
# Automatically generated by qmake (3.0) Fri May 16 09:49:41 2014
######################################################################
TEMPLATE = app
TARGET = glbuffertest
INCLUDEPATH += .
CONFIG += qt debug
# Input
SOURCES += glbuffertest.cpp glwindow.cpp
HEADERS += glwindow.h
UPDATE:
I've tried getting rid of my _matbo buffer and instead putting the matrix data into the same VBO as the position and colour attributes, but it's not working for me. My initGL function now looks like:
void GLWindow::initGL()
{
setupShaders();
_program->bind();
_positionAttr = _program->attributeLocation("position");
_colourAttr = _program->attributeLocation("colour");
_matrixAttr = _program->attributeLocation("matrix");
QVector<QVector3D> triangles;
triangles << QVector3D(-0.5, 0.5, 1) << QVector3D(-0.5, -0.5, 1) << QVector3D(0.5, -0.5, 1);
triangles << QVector3D(0.5, 0.5, 0.5) << QVector3D(-0.5, -0.5, 0.5) << QVector3D(0.5, -0.5, 0.5);
QVector<QVector3D> colours;
colours << QVector3D(1, 0, 0) << QVector3D(0, 1, 0) << QVector3D(0, 0, 1);
colours << QVector3D(1, 1, 1) << QVector3D(1, 1, 1) << QVector3D(1, 1, 1);
_vao.create();
_vao.bind();
_vbo.create();
_vbo.setUsagePattern(QOpenGLBuffer::StaticDraw);
_vbo.bind();
size_t positionSize = triangles.size() * sizeof(QVector3D);
size_t colourSize = colours.size() * sizeof(QVector3D);
size_t matrixSize = 4 * sizeof(QVector4D);
_vbo.allocate(positionSize + colourSize + matrixSize);
_vbo.bind();
_vbo.write(0, triangles.constData(), positionSize);
_vbo.write(positionSize, colours.constData(), colourSize);
_colourOffset = positionSize;
_matrixOffset = positionSize + colourSize;
_program->setAttributeBuffer(_positionAttr, GL_FLOAT, 0, 3, 0);
_program->setAttributeBuffer(_colourAttr, GL_FLOAT, _colourOffset, 3, 0);
_program->setAttributeBuffer(_matrixAttr, GL_FLOAT, _matrixOffset, 4, 4 * sizeof(QVector4D));
_program->enableAttributeArray(_positionAttr);
_program->enableAttributeArray(_colourAttr);
_program->enableAttributeArray(_matrixAttr);
_func330->glVertexAttribDivisor(_matrixAttr, 1);
_vao.release();
_program->release();
resizeGL(width(), height());
}
and paintGL:
void GLWindow::paintGL()
{
if (! _context) // not yet initialized
return;
_context->makeCurrent(this);
QColor background(Qt::black);
glClearColor(background.redF(), background.greenF(), background.blueF(), 1.0f);
glClearDepth(1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
QMatrix4x4 matrix;
matrix.perspective(60, 4.0/3.0, 0.1, 100.0);
matrix.translate(0, 0, -2);
_program->bind();
_vao.bind();
_vbo.write(_matrixOffset, matrix.constData(), 4 * sizeof(QVector4D));
/* I tried replacing the three preceding lines with the following, without success: */
/*
_vao.bind();
_vbo.bind();
_vbo.write(_matrixOffset, matrix.constData(), 4 * sizeof(QVector4D));
_program->bind();
_program->enableAttributeArray(_matrixAttr);
_func330->glVertexAttribDivisor(_matrixAttr, 1); */
glEnable(GL_DEPTH_TEST);
_func330->glDrawArraysInstanced(GL_TRIANGLES, 0, 6, 1);
_vao.release();
_program->release();
_context->swapBuffers(this);
_context->doneCurrent();
}
So it seems my instancing problems are bigger than just having the wrong buffer bound at the wrong time. What else am I doing wrong?
I think you must create one VBO for positions and one VBO for colors (or use interleaved data with a stride). VAO allows you to use multiple VBO, one per attribute.
vao.create();
vao.bind();
// prepare your shader program
// ...
// prepare your VBOs : one VBO for pos, one VBO for colors, one for normals,...
// example for position
vertexPositionBuffer.create();
vertexPositionBuffer.setUsagePattern(QOpenGLBuffer::StaticDraw);
vertexPositionBuffer.bind();
// if your store the points using QVector<QVector3D>
vertexPositionBuffer.allocate(vertices.constData(), vertices.size() * sizeof(QVector3D));
vertexPositionBuffer.release();
// do the same for colors or other attributes
// ...
// after all buffers are created
shaderProgram.bind();
// Bind the position buffer
vertexPositionBuffer.bind();
shaderProgram.enableAttributeArray("vertexPosition");
shaderProgram.setAttributeBuffer("vertexPosition", GL_FLOAT, 0, 3);
vertexPositionBuffer.release();
// do the same for all other buffers
// ...
shaderProgram.release();
// release vao
vao.release();
and in your paintGL function:
// update your matrices
// bind your shader program
// set you uniform variables
// then
vao.bind();
glDrawArrays(GL_TRIANGLES, 0, vertices.size());
vao.release();
// release your shader program
I've got it. The main problems were that:
I had to loop through all four columns of my mat4 attribute, setting and enabling each of them, and calling glVertexAttribDivisor() on each.
I had completely messed up the call to QOpenGLShaderProgram::setAttributeBuffer() for my mat4 attribute.
Essentially, you have to treat a mat4 as four separate vec4 attributes (one for each column). This doesn't affect how you copy QMatrix4x4 data to a QOpenGLBuffer object in the slightest, just how you tell the shader program to deal with the data. This is well described in both the tutorial I linked to in my original question and in The OpenGL Programming Guide's instancing tutorial, I just didn't get it. So, going back to the first attempt at glwindow.cpp above, I've changed very little and things now work:
#include "glwindow.h"
#include <QColor>
#include <QMatrix4x4>
#include <QVector>
#include <QVector3D>
#include <QVector4D>
#include <QDebug>
GLWindow::GLWindow(QWindow *parent)
: QWindow(parent)
, _vbo(QOpenGLBuffer::VertexBuffer)
, _matbo(QOpenGLBuffer::VertexBuffer)
, _context(0)
{
setSurfaceType(QWindow::OpenGLSurface);
}
GLWindow::~GLWindow()
{}
void GLWindow::initGL()
{
setupShaders();
_program->bind();
_positionAttr = _program->attributeLocation("position");
_colourAttr = _program->attributeLocation("colour");
_matrixAttr = _program->attributeLocation("matrix");
QVector<QVector3D> triangles;
triangles << QVector3D(-0.5, 0.5, 1) << QVector3D(-0.5, -0.5, 1) << QVector3D(0.5, -0.5, 1);
triangles << QVector3D(0.5, 0.5, 0.5) << QVector3D(-0.5, -0.5, 0.5) << QVector3D(0.5, -0.5, 0.5);
QVector<QVector3D> colours;
colours << QVector3D(1, 0, 0) << QVector3D(0, 1, 0) << QVector3D(0, 0, 1);
colours << QVector3D(1, 1, 1) << QVector3D(1, 1, 1) << QVector3D(1, 1, 1);
_vao.create();
_vao.bind();
_vbo.create();
_vbo.setUsagePattern(QOpenGLBuffer::StaticDraw);
_vbo.bind();
size_t positionSize = triangles.size() * sizeof(QVector3D);
size_t colourSize = colours.size() * sizeof(QVector3D);
_vbo.allocate(positionSize + colourSize);
_vbo.bind();
_vbo.write(0, triangles.constData(), positionSize);
_vbo.write(positionSize, colours.constData(), colourSize);
_colourOffset = positionSize;
_program->setAttributeBuffer(_positionAttr, GL_FLOAT, 0, 3, 0);
_program->setAttributeBuffer(_colourAttr, GL_FLOAT, _colourOffset, 3, 0);
_program->enableAttributeArray(_positionAttr);
_program->enableAttributeArray(_colourAttr);
_matbo.create();
_matbo.setUsagePattern(QOpenGLBuffer::StaticDraw);
_matbo.bind();
_matbo.allocate(4 * sizeof(QVector4D));
// This is completely wrong
/*_program->setAttributeBuffer(_matrixAttr, GL_FLOAT, 0, 4, 4 * sizeof(QVector4D));
_program->enableAttributeArray(_matrixAttr);
_func330->glVertexAttribDivisor(_matrixAttr, 1);
*/
// The right way to set up a mat4 attribute for instancing
for (unsigned i = 0; i < 4; i++)
{
_program->setAttributeBuffer(_matrixAttr + i, GL_FLOAT, i * sizeof(QVector4D), 4, 4 * sizeof(QVector4D));
_program->enableAttributeArray(_matrixAttr + i);
_func330->glVertexAttribDivisor(_matrixAttr + i, 1);
}
_matbo.release();
_vao.release();
_program->release();
resizeGL(width(), height());
}
void GLWindow::resizeGL(int w, int h)
{
glViewport(0, 0, w, h);
}
void GLWindow::paintGL()
{
if (! _context) // not yet initialized
return;
_context->makeCurrent(this);
QColor background(Qt::black);
glClearColor(background.redF(), background.greenF(), background.blueF(), 1.0f);
glClearDepth(1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
QMatrix4x4 matrix;
matrix.perspective(60, 4.0/3.0, 0.1, 100.0);
matrix.translate(0, 0, -2);
_program->bind();
_vao.bind();
_matbo.bind();
_matbo.write(0, matrix.constData(), 4 * sizeof(QVector4D));
glEnable(GL_DEPTH_TEST);
_func330->glDrawArraysInstanced(GL_TRIANGLES, 0, 6, 1);
_vao.release();
_program->release();
_context->swapBuffers(this);
_context->doneCurrent();
}
void GLWindow::setupShaders()
{
QString vShaderSrc("#version 330\n"
"layout(location = 0) in vec4 position;\n"
"layout(location = 1) in vec4 colour;\n"
"layout(location = 2) in mat4 matrix;\n"
"smooth out vec4 col;\n"
"void main() {\n"
" col = colour;\n"
" gl_Position = matrix * position;\n"
"}\n");
QString fShaderSrc("#version 330\n"
"smooth in vec4 col;\n"
"void main() {\n"
" gl_FragColor = col;\n"
"}\n");
_program = new QOpenGLShaderProgram(this);
_program->addShaderFromSourceCode(QOpenGLShader::Vertex, vShaderSrc);
_program->addShaderFromSourceCode(QOpenGLShader::Fragment, fShaderSrc);
_program->link();
}
void GLWindow::exposeEvent(QExposeEvent *event)
{
Q_UNUSED(event);
if (isExposed())
{
if (! _context)
{
_context = new QOpenGLContext(this);
QSurfaceFormat format(requestedFormat());
format.setVersion(3,3);
format.setDepthBufferSize(24);
_context->setFormat(format);
_context->create();
_context->makeCurrent(this);
initializeOpenGLFunctions();
_func330 = _context->versionFunctions<QOpenGLFunctions_3_3_Core>();
if (_func330)
_func330->initializeOpenGLFunctions();
else
{
qWarning() << "Could not obtain required OpenGL context version";
exit(1);
}
initGL();
}
paintGL();
}
}
Note that I also moved the binding of _matbo and setting up of the mat4 attribute so that it's all done before releasing the VAO. I was initially very confused over how many VBOs were allowed and when they needed to be bound. There's no problem having multiple VBOs inside a single VAO, it's just that the right one needs to be bound to be written to, and the right one needs to be bound before calling QOpenGLShaderProgram::setAttributeBuffer(). It doesn't matter which buffer is bound when glDraw*() is called (I trust someone will comment if I'm wrong about that).