I have written a small program that renders a 175x175 heightmap. The rendering is done using Qt3D which is basically a set of wrappers around OpenGL. The program loads fine and runs fine on a powerful desktop. However, when I run it on a lower power GPU, image updates are very choppy when I start moving the camera around. Rendering 3D terrain mesh really shouldn't be that difficult for even a small GPU, so I assume I am doing something very wrong. Are there some obvious ways to optimize this code or am I just expecting too much from a small GPU?
Fragment shader
https://github.com/qt/qt3d/blob/5.12/src/extras/shaders/es2/phong.inc.frag
Vertex shader:
https://github.com/qt/qt3d/blob/5.12/src/extras/shaders/es2/morphphong.vert
int main(int argc, char* argv[])
{
QGuiApplication app(argc, argv);
Qt3DExtras::Qt3DWindow view;
// Scene Root
Qt3DCore::QEntity *sceneRoot = new Qt3DCore::QEntity();
// Scene Camera
Qt3DRender::QCamera *basicCamera = view.camera();
basicCamera->setProjectionType(Qt3DRender::QCameraLens::PerspectiveProjection);
basicCamera->setUpVector(QVector3D(0.0f, 1.0f, 0.0f));
basicCamera->setViewCenter(QVector3D(60.0f, 15.0f, -60.0f));
basicCamera->setPosition(QVector3D(60.0f, 26.0f, 0.0f));
// For camera controls
Qt3DExtras::QFirstPersonCameraController *camController = new Qt3DExtras::QFirstPersonCameraController(sceneRoot);
camController->setCamera(basicCamera);
// Material
Qt3DRender::QMaterial *material= new Qt3DExtras::QPhongMaterial(sceneRoot);
Qt3DCore::QEntity *customMeshEntity = new Qt3DCore::QEntity(sceneRoot);
// Transform
Qt3DCore::QTransform *transform = new Qt3DCore::QTransform;
Qt3DRender::QGeometryRenderer *customMeshRenderer = new Qt3DRender::QGeometryRenderer;
Qt3DRender::QGeometry *customGeometry = new Qt3DRender::QGeometry(customMeshRenderer);
Qt3DRender::QBuffer *vertexDataBuffer = new Qt3DRender::QBuffer(Qt3DRender::QBuffer::VertexBuffer, customGeometry);
Qt3DRender::QBuffer *indexDataBuffer = new Qt3DRender::QBuffer(Qt3DRender::QBuffer::IndexBuffer, customGeometry);
QImage heightmap("../assets/heightmap.png");
QByteArray vertexBufferData;
vertexBufferData.resize(heightmap.width() * heightmap.height() * (3 + 3 + 3) * sizeof(float));
QVector<QVector3D> vertexPositions;
for (int row = 0; row < heightmap.height(); row++) {
for (int column = 0; column < heightmap.width(); column++) {
vertexPositions.append(QVector3D(row, heightmap.pixelColor(row, column).red()/8.0, -column));
}
}
QVector<QVector3D> vertexNormals;
for (int row = 0; row < heightmap.height(); row++) {
for (int column = 0; column < heightmap.width(); column++) {
int center = (row * heightmap.width()) + column;
int upper = center - heightmap.width();
int lower = center + heightmap.width();
int right = center + 1;
int left = center -1;
int lowerLeft = center - 1 + heightmap.width();
int upperRight = center + 1 - heightmap.width();
int rightEdge = heightmap.width() - 1;
int bottomEdge = heightmap.height() -1;
// Calculate normals for each adjacent face and sum
// Check for edge conditions
QVector3D vertexNormal(0, 0, 0);
if (column != 0 && row != 0 ) {
vertexNormal += QVector3D::normal(vertexPositions[center], vertexPositions[upper], vertexPositions[left]);
}
if (column != rightEdge && row != 0) {
vertexNormal += QVector3D::normal(vertexPositions[center], vertexPositions[upperRight], vertexPositions[upper]);
vertexNormal += QVector3D::normal(vertexPositions[center], vertexPositions[right], vertexPositions[upperRight]);
}
if (column != rightEdge && row != bottomEdge) {
vertexNormal += QVector3D::normal(vertexPositions[center], vertexPositions[lower], vertexPositions[right]);
}
if (column != 0 && row != bottomEdge) {
vertexNormal += QVector3D::normal(vertexPositions[center], vertexPositions[lowerLeft], vertexPositions[lower]);
vertexNormal += QVector3D::normal(vertexPositions[center], vertexPositions[left], vertexPositions[lowerLeft]);
}
vertexNormals.append(vertexNormal.normalized());
}
}
// Colors
QVector3D red(1.0f, 0.0f, 0.0f);
QVector3D yellow(1.0f, 1.0f, 0.0f);
QVector3D green(0.0f, 1.0f, 0.0f);
QVector3D blue(0.0f, 0.0f, 1.0f);
QVector3D white(1.0f, 1.0f, 1.0f);
QVector<QVector3D> vertices;
for (int i = 0; i < vertexPositions.count(); i ++) {
vertices.append(vertexPositions[i]);
vertices.append(vertexNormals[i]);
if (vertexPositions[i].y() > 20.0) {
vertices.append(red);
}
else if (vertexPositions[i].y() > 18.0) {
vertices.append(yellow);
}
else {
vertices.append(green);
}
}
float *rawVertexArray = reinterpret_cast<float *>(vertexBufferData.data());
int idx = 0;
Q_FOREACH (const QVector3D &v, vertices) {
rawVertexArray[idx++] = v.x();
rawVertexArray[idx++] = v.y();
rawVertexArray[idx++] = v.z();
}
// Indices
QByteArray indexBufferData;
int indicesCount = (heightmap.height() - 1) * (heightmap.width() - 1) * 2 * 3;
indexBufferData.resize( indicesCount * sizeof(uint));
uint *rawIndexArray = reinterpret_cast<uint *>(indexBufferData.data());
int index = 0;
for (int row = 0; row < heightmap.height()-1; row++) {
for (int column = 0; column < heightmap.width()-1; column++) {
// 1 <- 3
// | /
// | /
// v /
// 2
int vertexBufferIndex = (row * heightmap.width()) + column;
rawIndexArray[index++] = vertexBufferIndex;
rawIndexArray[index++] = vertexBufferIndex + heightmap.width(); // down one row
rawIndexArray[index++] = vertexBufferIndex + 1; // right one column
// 1
// / ^
// / |
// / |
// 2 -> 3
rawIndexArray[index++] = vertexBufferIndex + 1; // right one column
rawIndexArray[index++] = vertexBufferIndex + heightmap.width(); // down one row
rawIndexArray[index++] = vertexBufferIndex + heightmap.width() + 1; // down one row and right one column
}
}
vertexDataBuffer->setData(vertexBufferData);
indexDataBuffer->setData(indexBufferData);
// Attributes
Qt3DRender::QAttribute *positionAttribute = new Qt3DRender::QAttribute();
positionAttribute->setAttributeType( Qt3DRender::QAttribute::VertexAttribute);
positionAttribute->setBuffer(vertexDataBuffer);
positionAttribute->setDataType( Qt3DRender::QAttribute::Float);
positionAttribute->setDataSize(3);
positionAttribute->setByteOffset(0);
positionAttribute->setByteStride(9 * sizeof(float));
positionAttribute->setCount(vertexPositions.count());
positionAttribute->setName( Qt3DRender::QAttribute::defaultPositionAttributeName());
Qt3DRender::QAttribute *normalAttribute = new Qt3DRender::QAttribute();
normalAttribute->setAttributeType( Qt3DRender::QAttribute::VertexAttribute);
normalAttribute->setBuffer(vertexDataBuffer);
normalAttribute->setDataType( Qt3DRender::QAttribute::Float);
normalAttribute->setDataSize(3);
normalAttribute->setByteOffset(3 * sizeof(float));
normalAttribute->setByteStride(9 * sizeof(float));
normalAttribute->setCount(vertexPositions.count());
normalAttribute->setName( Qt3DRender::QAttribute::defaultNormalAttributeName());
Qt3DRender::QAttribute *colorAttribute = new Qt3DRender::QAttribute();
colorAttribute->setAttributeType( Qt3DRender::QAttribute::VertexAttribute);
colorAttribute->setBuffer(vertexDataBuffer);
colorAttribute->setDataType( Qt3DRender::QAttribute::Float);
colorAttribute->setDataSize(3);
colorAttribute->setByteOffset(6 * sizeof(float));
colorAttribute->setByteStride(9 * sizeof(float));
colorAttribute->setCount(vertexPositions.count());
colorAttribute->setName( Qt3DRender::QAttribute::defaultColorAttributeName());
Qt3DRender::QAttribute *indexAttribute = new Qt3DRender::QAttribute();
indexAttribute->setAttributeType( Qt3DRender::QAttribute::IndexAttribute);
indexAttribute->setBuffer(indexDataBuffer);
indexAttribute->setDataType( Qt3DRender::QAttribute::UnsignedInt);
indexAttribute->setDataSize(1);
indexAttribute->setByteOffset(0);
indexAttribute->setByteStride(0);
indexAttribute->setCount(indicesCount);
customGeometry->addAttribute(positionAttribute);
customGeometry->addAttribute(normalAttribute);
customGeometry->addAttribute(colorAttribute);
customGeometry->addAttribute(indexAttribute);
customMeshRenderer->setInstanceCount(1);
customMeshRenderer->setFirstVertex(0);
customMeshRenderer->setFirstInstance(0);
customMeshRenderer->setPrimitiveType(Qt3DRender::QGeometryRenderer::Triangles);
customMeshRenderer->setGeometry(customGeometry);
customMeshEntity->addComponent(customMeshRenderer);
customMeshEntity->addComponent(transform);
customMeshEntity->addComponent(material);
view.setRootEntity(sceneRoot);
view.show();
return app.exec();
}
The Qt blog has a very good write-up on optimizing Qt3D applications for low-end hardware.
https://blog.qt.io/blog/2019/04/02/optimizing-real-time-3d-entry-level-hardware/
Related
For the last week or so I have been struggling to find a resource that will allow me to make something like the 2D petrie polygon diagrams in this article.
My main trouble is finding out what the rules are for the edge and node connections.
I.e. in this plot, is there a simple way to make the image from scratch (even if it not fully representative of the bigger theory behind it)?
Any help is massively appreciated!
K
Here is how I solved this problem!
e8
// to run
// clink -c Ex8
// ./Ex8
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "dislin.h"
// method to generate all permutations of a set with repeated elements:
the root system
float root_sys[240][8];
int count = 0;
/// checks elements in root system to see if they should be permuted
int shouldSwap(float base[], int start, int curr)
{
for (int i = start; i < curr; i++)
if (base[i] == base[curr])
return 0;
return 1;
}
/// performs permutations of root system
void permutations(float base[], int index, int n)
{
if (index >= n) {
for(int i = 0; i < n; i++){
root_sys[count][i] = base[i];
}
count++;
return;
}
for (int i = index; i < n; i++) {
int check = shouldSwap(base, index, i);
if (check) {
float temp_0 = base[index];
float temp_1 = base[i];
base[index] = temp_1;
base[i] = temp_0;
permutations(base, index + 1, n);
float temp_2 = base[index];
float temp_3 = base[i];
base[index] = temp_3;
base[i] = temp_2;
}
}
}
// function to list all distances from one node to others
float inner_product(float * vect_0, float * vect_1){
float sum = 0;
for(int i = 0; i < 8; i++){
sum = sum + ((vect_0[i] - vect_1[i]) * (vect_0[i] - vect_1[i]));
}return sum;
}
/// inner product funtion
float inner_product_plus(float * vect_0, float * vect_1){
float sum = 0;
for(int i = 0; i < 8; i++){
sum = sum + (vect_0[i] * vect_1[i]);
}return sum;
}
int main(void){
// base vector permutations of E8 root system
float base_sys[8][8] = {
{1,1,0,0,0,0,0,0},
{1,-1,0,0,0,0,0,0},
{-1,-1,0,0,0,0,0,0},
{0.5,0.5,-0.5,-0.5,-0.5,-0.5,-0.5,-0.5},
{0.5,0.5,0.5,0.5,-0.5,-0.5,-0.5,-0.5},
{0.5,0.5,0.5,0.5,0.5,0.5,-0.5,-0.5},
{0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5},
{-0.5,-0.5,-0.5,-0.5,-0.5,-0.5,-0.5,-0.5}
};
//permute the base vectors
for(int i = 0; i < 8; i++){
permutations(base_sys[i],0,8);
}
//calculating distances between all roots, outputting correspondence matrix
int distance_matrix[240][240];
for(int i = 0; i < 240; i++){
int dist_m = 100;
for(int ii = 0; ii < 240; ii++){
float dist = inner_product(root_sys[i], root_sys[ii]);
if(dist == 2){ //connecting distance in E8
distance_matrix[i][ii] = 1;
}else{distance_matrix[i][ii] == 0;};
}
}
//use another program to calculate eigenvectors of root system . . . after some fiddling, these vectors appear
float re[8] = {0.438217070641, 0.205187681291,
0.36459828198, 0.0124511903657,
-0.0124511903657, -0.36459828198,
-0.205187681291, -0.67645247517};
float im[8] = {-0.118465163028, 0.404927414852,
0.581970822973, 0.264896157496,
0.501826483552, 0.345040496917,
0.167997088796, 0.118465163028};
//define co-ordinate system for relevent points
float rings_x[240];
float rings_y[240];
//decide on which points belong to the system
for(int i = 0; i < 240; i++){
float current_point[8];
for(int ii = 0; ii < 8; ii++){
current_point[ii] = root_sys[i][ii];
}
rings_x[i] = inner_product_plus(current_point, re);
rings_y[i] = inner_product_plus(current_point, im);
}
//graph the system using DISLIN library
scrmod("revers");
setpag("da4l");
metafl("cons");
disini();
graf(-1.2, 1.2, -1.2, 1.2, -1.2, 1.2, -1.2, 1);
// a connection appears depending on the previously calculated distance matrix
for(int i = 0; i < 240; i++){
for(int ii = 0; ii < 240; ii++){
int connect = distance_matrix[i][ii];
if(connect == 1){
rline(rings_x[i], rings_y[i], rings_x[ii], rings_y[ii]);
distance_matrix[ii][i] = 0;
}else{continue;}
}
}
// More DISLIN functions
titlin("E8", 1);
name("R-axis", "x");
name("I-axis", "y");
marker(21);
hsymbl(15);
qplsca(rings_x, rings_y, 240);
return 0;
}
Extra points to anyone who can explain how to rotate the 2d plot to create a 3-d animation of this object
I am using lis3mdl magnetometer in my quadcopter project to compensate gyroscope drift. Unfortunatelly Im having problems probably with calibrating.
Ive achive max and min values (what is weird they are 14 bits instead of 16) and calculated biases like that :
biases[i] = (MAX_VALUES[i]+MIN_VALUES[i])/2;
(where i represent each of 3 axis).
Ive substracted biases from raw values x = (double)new_x-biases[0]; (etc), and then wanted to calculate heading like that :
heading = atan2(x,y);
heading += declinationAngle;
where declination angle is calculated.
Outcome are angles (conversion from radians heading*(180/M_PI)), and it does change when Iam rotating quad in yaw axi, BUT when I am rotating it in roll and pitch axi value change either. I want to achive stable yaw value which does not change when Iam rotating object in other axis. Maybe some type of fusing with accelerometer?
I am not sure when Ive made mistake in my calculations...
Whole class:
class Magnetometer {
int x=0,y=0,z=0;
LIS3MDL mag;
int running_min[3] = {32767, 32767, 32767}, running_max[3] = {-32768, -32768, -32768};
double mag_norm = 0.0;
double declinationAngle = 0.0;
double heading=0.0;
const int MAX_VALUES[3] = {3014,3439,10246};
const int MIN_VALUES[3] = {-4746, -4110, 492};
double biases[3] = {0.0};
double scales[3] = {0.0};
double avg_scale = 0.0;
ButterworthDLPF xyz_filter[3];
double DLPF_ON = true;
const float sample_rate = MAG_SAMPLE_RATE;
const float cutoff_freq = 4.0;
public:
Magnetometer() {}
void Init() {
declinationAngle = (6.0 + (8.0 / 60.0)) / (180.0 / M_PI);
for(int i=0; i<3; i++) {
biases[i] = (MAX_VALUES[i]+MIN_VALUES[i])/2;
scales[i] = (MAX_VALUES[i]-MIN_VALUES[i])/2;
}
avg_scale = (scales[0]+scales[1]+scales[2])/3.0;
for(int i=0; i<3; i++) scales[i] = avg_scale / scales[i];
Serial.println("Turning on magnetometer. . .");
if(!mag.init()) {
Serial.println("Failed to detect magnetometer!");
ESP.restart();
}
mag.enableDefault();
//Calibrate();
for(int i=0; i<3; i++) xyz_filter[i].Init(sample_rate, cutoff_freq);
Serial.println("9DOF readdy!");
}
void Calibrate() {
delay(100);
while(true) {
mag.read();
if(running_max[0]<mag.m.x) running_max[0] = mag.m.x;
if(running_max[1]<mag.m.y) running_max[1] = mag.m.y;
if(running_max[2]<mag.m.z) running_max[2] = mag.m.z;
if(running_min[0]>mag.m.x) running_min[0] = mag.m.x;
if(running_min[1]>mag.m.y) running_min[1] = mag.m.y;
if(running_min[2]>mag.m.z) running_min[2] = mag.m.z;
Serial.println((String)running_max[0]+" "+(String)running_max[1]+" "+(String)running_max[2]+ " "+(String)running_min[0] +" "+(String)running_min[1]+" "+(String)running_min[2]);
delay(20);
}
}
void Update(){
mag.read();
xyz_filter[0].Update(mag.m.x);
xyz_filter[1].Update(mag.m.y);
xyz_filter[2].Update(mag.m.z);
//Serial.println(xyz_filter[0].getData());
/*x = ((double)xyz_filter[0].getData()-biases[0])*scales[0];
y = ((double)xyz_filter[1].getData()-biases[1])*scales[1];
z = ((double)xyz_filter[2].getData()-biases[2])*scales[2];*/
x = ((double)mag.m.x-biases[0])*scales[0];
y = ((double)mag.m.y-biases[1])*scales[1];
z = ((double)mag.m.z-biases[2])*scales[2];
CalculateHeading();
}
void CalculateHeading() {
heading = atan2(y,x);
heading += declinationAngle;
//if(heading<0) heading += 2*PI;
//else if(heading>2*PI) heading -= 2*PI;
heading=MOD(heading*(180/M_PI));
}
double GetHeading() {return heading;}
void ShowRawValues(bool names=false) {
if(names) Serial.print("X: "+(String)x+" Y: "+ (String)y+ " Z: " + (String)z);
else Serial.print((String)x+" "+ (String)y+ " " + (String)z);
}
};
I have data from a camera in mono 8bit.
This is converted into an int vector using
std::vector<int> grayVector(size);
// convert / copy pointer data into vector: 8 bit
if (static_cast<XI_IMG_FORMAT>(format) == XI_MONO8)
{
quint8* imageIterator = reinterpret_cast<quint8*> (pMemVoid);
for (size_t count = 0; count < size; ++count)
{
grayVector[count] = static_cast<int>(*imageIterator);
imageIterator++;
}
}
Next, I need to convert this into a QImage. If I set the image format to QImage::Format_Mono the app crashes. With QImage::Format_RGB16 I get strippes, and with QImage::Format_RGB32 everything is black.
I would like to know how to do this the best, efficient and correct way?
// convert gray values into QImage data
QImage image = QImage(static_cast<int>(sizeX), static_cat<int>(sizeY), QImage::Format_RGB16);
for ( int y = 0; y < sizeY; ++y )
{
int yoffset = sizeY*y;
QRgb *line = reinterpret_cast<QRgb *>(image.scanLine(y)) ;
for ( int x = 0; x < sizeX ; ++x )
{
int pos = x + yoffset;
int color = grayVector[static_cast<size_t>(pos)];
*line++ = qRgb(color, color, color);
}
}
The conversion to int is unnecessary and you do it in a very inefficient way; all you need is to use the QImage::Format_Grayscale8 available since Qt 5.5 (mid-2015).
Anyway, what you really want is a way to go from XI_IMG to QImage. The default BP_UNSAFE buffering policy should be adequate - the QImage will do a format conversion, so taking the data from XiApi's internal buffer is OK. Thus the following - all of the conversions are implemented in Qt and are quite efficient - much better than most any naive code.
I didn't check whether some Xi formats may need a BGR swap. If so, then the swap can be set to true in the format selection code and the rest will happen automatically.
See also: xiAPI manual.
static QVector<QRgb> grayScaleColorTable() {
static QVector<QRgb> table;
if (table.isEmpty()) {
table.resize(256);
auto *data = table.data();
for (int i = 0; i < table.size(); ++i)
data[i] = qRgb(i, i, i);
}
return table;
}
constexpr QImage::Format grayScaleFormat() {
return (QT_VERSION >= QT_VERSION_CHECK(5,5,0))
? QImage::Format_Grayscale8
: QImage::Format_Indexed8;
}
QImage convertToImage(const XI_IMG *src, QImage::Format f) {
Q_ASSERT(src->fmt == XI_MONO16);
Q_ASSERT((src->padding_x % 2) == 0);
if (src->fmt != XI_MONO16) return {};
const quint16 *s = static_cast<const quint16*>(src->bp);
const int s_pad = src->padding_x/2;
if (f == QImage::Format_BGR30 ||
f == QImage::Format_A2BGR30_Premultiplied ||
f == QImage::Format_RGB30 ||
f == QImage::Format_A2RGB30_Premultiplied)
{
QImage ret{src->width, src->height, f};
Q_ASSERT((ret->bytesPerLine() % 4) == 0);
const int d_pad = ret->bytesPerLine()/4 - ret->width();
quint32 *d = (quint32*)ret.bits();
if (s_pad == d_pad) {
const int N = (src->width + s_pad) * src->height - s_pad;
for (int i = 0; i < N; ++i) {
quint32 const v = (*s++) >> (16-10);
*d++ = 0xC0000000 | v << 20 | v << 10 | v;
}
} else {
for (int j = 0; j < src->height; ++j) {
for (int i = 0; i < src->width; ++i) {
quint32 const v = (*s++) >> (16-10);
*d++ = 0xC0000000u | v << 20 | v << 10 | v;
}
s += s_pad;
d += d_pad;
}
}
return ret;
}
QImage ret{src->width, src->height, grayScaleFormat()};
const int d_pad = ret->bytesPerLine() - ret->width();
auto *d = ret.bits();
if (s_pad == d_pad) {
const int N = (src->width + s_pad) * src->height - s_pad;
for (int i = 0; i < N; ++i) {
*d++ = (*s++) >> 8;
} else {
for (int j = 0; j < src->height; ++j) {
for (int i = 0; i < src->width; ++i)
*d++ = (*s++) >> 8;
s += s_pad;
d += d_pad;
}
}
return ret;
}
QImage fromXiImg(const XI_IMG *src, QImage::Format dstFormat = QImage::Format_ARGB32Premultiplied) {
Q_ASSERT(src->width > 0 && src->height > 0 && src->padding_x >= 0 && src->bp_size > 0);
Q_ASSERT(dstFormat != QImage::Format_Invalid);
bool swap = false;
int srcPixelBytes = 0;
bool externalConvert = false;
QImage::Format srcFormat = QImage::Format_Invalid;
switch (src->fmt) {
case XI_MONO8:
srcPixelBytes = 1;
srcFormat = grayScaleFormat();
break;
case XI_MONO16:
srcPixelBytes = 2;
externalConvert = true;
break;
case XI_RGB24:
srcPixelBytes = 3;
srcFormat = QImage::Format_RGB888;
break;
case XI_RGB32:
srcPixelBytes = 4;
srcFormat = QImage::Format_RGB32;
break;
};
if (srcFormat == QImage::Format_Invalid && !externalConvert) {
qWarning("Unhandled XI_IMG image format");
return {};
}
Q_ASSERT(srcPixelBytes > 0 && srcPixelBytes <= 4);
int bytesPerLine = src->width * srcPixelBytes + src->padding_x;
if ((bytesPerLine * src->height - src->padding_x) > src->bp_size) {
qWarning("Inconsistent XI_IMG data");
return {};
}
QImage ret;
if (!externalConvert)
ret = QImage{static_cast<const uchar*>(src->bp), src->width, src->height,
bytesPerLine, srcFormat};
else
ret = convertToImage(src, dstFormat);
if (ret.format() == QImage::Format_Indexed8)
ret.setColorTable(grayScaleColorTable());
if (ret.format() != dstFormat)
ret = std::move(ret).convertToFormat(dstFormat);
if (swap)
ret = std::move(ret).rgbSwapped();
if (!ret.isDetached()) // ensure that we don't share XI_IMG's data buffer
ret.detach();
return ret;
}
I would like to display image in qt window , so I used Qlabel->setpixmap
but how can I convert from IPLImage to QImage to display it in the label??
I found the follwing function to convert it but I did not know how to use it in call statement
QImage *IplImageToQImage(const IplImage * iplImage, uchar **data, double mini, double maxi)
{
uchar *qImageBuffer = NULL;
int width = iplImage->width;
int widthStep = iplImage->widthStep;
int height = iplImage->height;
switch (iplImage->depth)
{
case IPL_DEPTH_8U:
if (iplImage->nChannels == 1)
{
// OpenCV image is stored with one byte grey pixel. We convert it
// to an 8 bit depth QImage.
//
qImageBuffer = (uchar *) malloc(width*height*sizeof(uchar));
uchar *QImagePtr = qImageBuffer;
const uchar *iplImagePtr = (const uchar *) iplImage->imageData;
for (int y = 0; y < height; y++)
{
// Copy line by line
memcpy(QImagePtr, iplImagePtr, width);
QImagePtr += width;
iplImagePtr += widthStep;
}
}
else if (iplImage->nChannels == 3)
{
/* OpenCV image is stored with 3 byte color pixels (3 channels).
We convert it to a 32 bit depth QImage.
*/
qImageBuffer = (uchar *) malloc(width*height*4*sizeof(uchar));
uchar *QImagePtr = qImageBuffer;
const uchar *iplImagePtr = (const uchar *) iplImage->imageData;
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
// We cannot help but copy manually.
QImagePtr[0] = iplImagePtr[0];
QImagePtr[1] = iplImagePtr[1];
QImagePtr[2] = iplImagePtr[2];
QImagePtr[3] = 0;
QImagePtr += 4;
iplImagePtr += 3;
}
iplImagePtr += widthStep-3*width;
}
}
else
{
qDebug("IplImageToQImage: image format is not supported : depth=8U and %d channels\n", iplImage->nChannels);
}
break;
case IPL_DEPTH_16U:
if (iplImage->nChannels == 1)
{
/* OpenCV image is stored with 2 bytes grey pixel. We convert it
to an 8 bit depth QImage.
*/
qImageBuffer = (uchar *) malloc(width*height*sizeof(uchar));
uchar *QImagePtr = qImageBuffer;
//const uint16_t *iplImagePtr = (const uint16_t *);
const unsigned int *iplImagePtr = (const unsigned int *)iplImage->imageData;
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
// We take only the highest part of the 16 bit value. It is
//similar to dividing by 256.
*QImagePtr++ = ((*iplImagePtr++) >> 8);
}
iplImagePtr += widthStep/sizeof(unsigned int)-width;
}
}
else
{
qDebug("IplImageToQImage: image format is not supported : depth=16U and %d channels\n", iplImage->nChannels);
}
break;
case IPL_DEPTH_32F:
if (iplImage->nChannels == 1)
{
/* OpenCV image is stored with float (4 bytes) grey pixel. We
convert it to an 8 bit depth QImage.
*/
qImageBuffer = (uchar *) malloc(width*height*sizeof(uchar));
uchar *QImagePtr = qImageBuffer;
const float *iplImagePtr = (const float *) iplImage->imageData;
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
uchar p;
float pf = 255 * ((*iplImagePtr++) - mini) / (maxi - mini);
if (pf < 0) p = 0;
else if (pf > 255) p = 255;
else p = (uchar) pf;
*QImagePtr++ = p;
}
iplImagePtr += widthStep/sizeof(float)-width;
}
}
else
{
qDebug("IplImageToQImage: image format is not supported : depth=32F and %d channels\n", iplImage->nChannels);
}
break;
case IPL_DEPTH_64F:
if (iplImage->nChannels == 1)
{
/* OpenCV image is stored with double (8 bytes) grey pixel. We
convert it to an 8 bit depth QImage.
*/
qImageBuffer = (uchar *) malloc(width*height*sizeof(uchar));
uchar *QImagePtr = qImageBuffer;
const double *iplImagePtr = (const double *) iplImage->imageData;
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
uchar p;
double pf = 255 * ((*iplImagePtr++) - mini) / (maxi - mini);
if (pf < 0) p = 0;
else if (pf > 255) p = 255;
else p = (uchar) pf;
*QImagePtr++ = p;
}
}
}
else
{
qDebug("IplImageToQImage: image format is not supported : depth=64F and %d channels\n", iplImage->nChannels);
}
break;
default:
qDebug("IplImageToQImage: image format is not supported : depth=%d and %d channels\n", iplImage->depth, iplImage->nChannels);
}
QImage *qImage;
QVector<QRgb> vcolorTable;
if (iplImage->nChannels == 1)
{
// We should check who is going to destroy this allocation.
QRgb *colorTable = new QRgb[256];
for (int i = 0; i < 256; i++)
{
colorTable[i] = qRgb(i, i, i);
vcolorTable[i] = colorTable[i];
}
qImage = new QImage(qImageBuffer, width, height, QImage::Format_Indexed8);
qImage->setColorTable(vcolorTable);
}
else
{
qImage = new QImage(qImageBuffer, width, height, QImage::Format_RGB32);
}
*data = qImageBuffer;
return qImage;
}
The parameter was:
const IplImage * iplImage, uchar **data, double mini, double maxi
what are data,mini,max? how can I get it from my IPLImage to use it in call statement?
Thanks alot :)
Looks like data is not used by the code, and mini and maxi are used for converting floating point values that certain image formats use to integer values in the range 0-255.
I'd try using NULL for data. mini and maxi really depend on the image data, and I don't know what reasonable ranges are. But if your IplImage is not stored as floating point values then these values shouldn't make any difference.
You can simply create a QImage where the data is owned by something else (eg the IPLImage) using the QImage(data,widht,height,format) and data is the IPLImage data ptr as long as the format isthe samein both QImage and IPLImage (eg RGB888 = 8U_C3)
I have found some bugs in the code.... maybe there are still more bugs in it but for now it looks fine for me. QImage with Format_Index8 needs sometimes (depending on the image resolution....) 2 byte added on the right side (don t know why but it seems like to be like this).
Here is the new adapted code
QImage *IplImageToQImage(const IplImage * iplImage, uchar **data, double mini, double maxi)
{
uchar *qImageBuffer = NULL;
int width = iplImage->width;
int widthStep = iplImage->widthStep;
int height = iplImage->height;
QImage *qImage;
switch (iplImage->depth)
{
case IPL_DEPTH_8U:
if (iplImage->nChannels == 1)
{
// OpenCV image is stored with one byte grey pixel. We convert it
// to an 8 bit depth QImage.
qImage = new QImage(width,height,QImage::Format_Indexed8);
uchar *QImagePtr = qImage->scanLine(0);
qImageBuffer = qImage->scanLine(0);
const uchar *iplImagePtr = (const uchar *) iplImage->imageData;
for (int y = 0; y < height; y++)
{
// Copy line by line
QImagePtr = qImage->scanLine(y);
memcpy(QImagePtr, iplImagePtr, width);
iplImagePtr += widthStep;
}
/*
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
// We take only the highest part of the 16 bit value. It is
//similar to dividing by 256.
//*QImagePtr++ = ((*iplImagePtr++) >> 8);
*QImagePtr = *iplImagePtr;
QImagePtr++;
iplImagePtr++;
}
iplImagePtr += widthStep/sizeof(uchar)-width;
}*/
}
else if (iplImage->nChannels == 3)
{
/* OpenCV image is stored with 3 byte color pixels (3 channels).
We convert it to a 32 bit depth QImage.
*/
qImageBuffer = (uchar *) malloc(width*height*4*sizeof(uchar));
uchar *QImagePtr = qImageBuffer;
const uchar *iplImagePtr = (const uchar *) iplImage->imageData;
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
// We cannot help but copy manually.
QImagePtr[0] = iplImagePtr[0];
QImagePtr[1] = iplImagePtr[1];
QImagePtr[2] = iplImagePtr[2];
QImagePtr[3] = 0;
QImagePtr += 4;
iplImagePtr += 3;
}
iplImagePtr += widthStep-3*width;
}
}
else
{
qDebug("IplImageToQImage: image format is not supported : depth=8U and %d channels\n", iplImage->nChannels);
}
break;
case IPL_DEPTH_16U:
if (iplImage->nChannels == 1)
{
/* OpenCV image is stored with 2 bytes grey pixel. We convert it
to an 8 bit depth QImage.
*/
qImage = new QImage(width,height,QImage::Format_Indexed8);
uchar *QImagePtr = qImage->scanLine(0);
qImageBuffer = qImage->scanLine(0);
//const uint16_t *iplImagePtr = (const uint16_t *);
const unsigned short *iplImagePtr = (const unsigned short *)iplImage->imageData;
for (int y = 0; y < height; y++)
{
QImagePtr = qImage->scanLine(y);
for (int x = 0; x < width; x++)
{
// We take only the highest part of the 16 bit value. It is
//similar to dividing by 256.
//*QImagePtr++ = ((*iplImagePtr++) >> 8);
//change here 16 bit could be everything !! set max min to your desire
*QImagePtr = 255*(((*iplImagePtr) - mini) / (maxi - mini));
QImagePtr++;
iplImagePtr++;
}
iplImagePtr += widthStep/sizeof(unsigned short)-width;
}
}
else
{
qDebug("IplImageToQImage: image format is not supported : depth=16U and %d channels\n", iplImage->nChannels);
}
break;
case IPL_DEPTH_32F:
if (iplImage->nChannels == 1)
{
/* OpenCV image is stored with float (4 bytes) grey pixel. We
convert it to an 8 bit depth QImage.
*/
qImage = new QImage(width,height,QImage::Format_Indexed8);
uchar *QImagePtr = qImage->scanLine(0);
qImageBuffer = qImage->scanLine(0);
const float *iplImagePtr = (const float *) iplImage->imageData;
for (int y = 0; y < height; y++)
{
QImagePtr = qImage->scanLine(y);
for (int x = 0; x < width; x++)
{
uchar p;
float pf = 255 * ((*iplImagePtr++) - mini) / (maxi - mini);
if (pf < 0) p = 0;
else if (pf > 255) p = 255;
else p = (uchar) pf;
*QImagePtr++ = p;
}
iplImagePtr += widthStep/sizeof(float)-width;
}
}
else
{
qDebug("IplImageToQImage: image format is not supported : depth=32F and %d channels\n", iplImage->nChannels);
}
break;
case IPL_DEPTH_64F:
if (iplImage->nChannels == 1)
{
/* OpenCV image is stored with double (8 bytes) grey pixel. We
convert it to an 8 bit depth QImage.
*/
qImage = new QImage(width,height,QImage::Format_Indexed8);
uchar *QImagePtr = qImage->scanLine(0);
qImageBuffer = qImage->scanLine(0);
const double *iplImagePtr = (const double *) iplImage->imageData;
for (int y = 0; y < height; y++)
{
QImagePtr = qImage->scanLine(y);
for (int x = 0; x < width; x++)
{
uchar p;
double pf = 255 * ((*iplImagePtr++) - mini) / (maxi - mini);
if (pf < 0) p = 0;
else if (pf > 255) p = 255;
else p = (uchar) pf;
*QImagePtr++ = p;
}
}
}
else
{
qDebug("IplImageToQImage: image format is not supported : depth=64F and %d channels\n", iplImage->nChannels);
}
break;
default:
qDebug("IplImageToQImage: image format is not supported : depth=%d and %d channels\n", iplImage->depth, iplImage->nChannels);
}
QVector<QRgb> vcolorTable;
if (iplImage->nChannels == 1)
{
// We should check who is going to destroy this allocation.
vcolorTable.resize(256);
for (int i = 0; i < 256; i++)
{
vcolorTable[i] = qRgb(i, i, i);
}
//Qt vector is difficult to use... start with std to qvector
//here I allocate QImage using qt constructor (Forma_Indexed8 adds sometimes 2 bytes on the right side !!! o.O not specified nowhere !!!)
//qImage = new QImage(tmpImg->scanLine(0), width, height, QImage::Format_Indexed8);
qImage->setColorTable(vcolorTable);
}
else
{
qImage = new QImage(qImageBuffer, width, height, QImage::Format_RGB32);
}
*data = qImageBuffer;
return qImage;
}
I don t know if 3 channels have also the same bug but I hope not
QT4 How to blur QPixmap image?
I am looking for something like one of the following:
Blur(pixmap);
painter.Blur();
painter.Blur(rect);
What is the best way to do this?
1st) declare external QT routine:
QT_BEGIN_NAMESPACE
extern Q_WIDGETS_EXPORT void qt_blurImage( QPainter *p, QImage &blurImage, qreal radius, bool quality, bool alphaOnly, int transposed = 0 );
QT_END_NAMESPACE
2nd) Use:
extern QImage srcImg;//source image
QPixmap pxDst( srcImg.size() );//blurred destination
pxDst.fill( Qt::transparent );
{
QPainter painter( &pxDst );
qt_blurImage( &painter, srcImg, 2, true, false );//blur radius: 2px
}
Let's contribute to this topic. As of Qt 5.3, following function will help you a lot with applying QGraphicsEffect to QImage (and not losing the alpha)
QImage applyEffectToImage(QImage src, QGraphicsEffect *effect, int extent = 0)
{
if(src.isNull()) return QImage(); //No need to do anything else!
if(!effect) return src; //No need to do anything else!
QGraphicsScene scene;
QGraphicsPixmapItem item;
item.setPixmap(QPixmap::fromImage(src));
item.setGraphicsEffect(effect);
scene.addItem(&item);
QImage res(src.size()+QSize(extent*2, extent*2), QImage::Format_ARGB32);
res.fill(Qt::transparent);
QPainter ptr(&res);
scene.render(&ptr, QRectF(), QRectF( -extent, -extent, src.width()+extent*2, src.height()+extent*2 ) );
return res;
}
Them, using this function to blur your image is straightforward:
QGraphicsBlurEffect *blur = new QGraphicsBlurEffect;
blur->setBlurRadius(8);
QImage source("://img1.png");
QImage result = applyEffectToImage(source, blur);
result.save("final.png");
Of course, you don't need to save it, this was just an example of usefulness.
You can even drop a shadow:
QGraphicsDropShadowEffect *e = new QGraphicsDropShadowEffect;
e->setColor(QColor(40,40,40,245));
e->setOffset(0,10);
e->setBlurRadius(50);
QImage p("://img3.png");
QImage res = applyEffectToImage(p, e, 40);
And note the extent parameter, it adds extent number of pixels to all sides of the original image, especially useful for shadows and blurs to not be cut-off.
Check out this:
#include <QtGui/QApplication>
#include <QImage>
#include <QPixmap>
#include <QLabel>
QImage blurred(const QImage& image, const QRect& rect, int radius, bool alphaOnly = false)
{
int tab[] = { 14, 10, 8, 6, 5, 5, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2 };
int alpha = (radius < 1) ? 16 : (radius > 17) ? 1 : tab[radius-1];
QImage result = image.convertToFormat(QImage::Format_ARGB32_Premultiplied);
int r1 = rect.top();
int r2 = rect.bottom();
int c1 = rect.left();
int c2 = rect.right();
int bpl = result.bytesPerLine();
int rgba[4];
unsigned char* p;
int i1 = 0;
int i2 = 3;
if (alphaOnly)
i1 = i2 = (QSysInfo::ByteOrder == QSysInfo::BigEndian ? 0 : 3);
for (int col = c1; col <= c2; col++) {
p = result.scanLine(r1) + col * 4;
for (int i = i1; i <= i2; i++)
rgba[i] = p[i] << 4;
p += bpl;
for (int j = r1; j < r2; j++, p += bpl)
for (int i = i1; i <= i2; i++)
p[i] = (rgba[i] += ((p[i] << 4) - rgba[i]) * alpha / 16) >> 4;
}
for (int row = r1; row <= r2; row++) {
p = result.scanLine(row) + c1 * 4;
for (int i = i1; i <= i2; i++)
rgba[i] = p[i] << 4;
p += 4;
for (int j = c1; j < c2; j++, p += 4)
for (int i = i1; i <= i2; i++)
p[i] = (rgba[i] += ((p[i] << 4) - rgba[i]) * alpha / 16) >> 4;
}
for (int col = c1; col <= c2; col++) {
p = result.scanLine(r2) + col * 4;
for (int i = i1; i <= i2; i++)
rgba[i] = p[i] << 4;
p -= bpl;
for (int j = r1; j < r2; j++, p -= bpl)
for (int i = i1; i <= i2; i++)
p[i] = (rgba[i] += ((p[i] << 4) - rgba[i]) * alpha / 16) >> 4;
}
for (int row = r1; row <= r2; row++) {
p = result.scanLine(row) + c2 * 4;
for (int i = i1; i <= i2; i++)
rgba[i] = p[i] << 4;
p -= 4;
for (int j = c1; j < c2; j++, p -= 4)
for (int i = i1; i <= i2; i++)
p[i] = (rgba[i] += ((p[i] << 4) - rgba[i]) * alpha / 16) >> 4;
}
return result;
}
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
QLabel label;
QImage image("image.png");
image = blurred(image,image.rect(),10,false);
label.setPixmap(QPixmap::fromImage(image));
label.show();
return a.exec();
}
Method 1a: grab the raw bits and do it yourself. You'll need to be sufficiently familiar with bitmaps and blurring algorithms to implement the blur yourself. If you want that sort of precision, this is the way to go.
QImage image = pixmap.toImage();
if (image.format() != QImage::Format_RGB32)
image = image.convertToFormat(QImage::Format_RGB32);
uchar* bits = image.bits();
int rowBytes = image.bytesPerLine();
DoMyOwnBlurAlgorithm(bits, image.width(), image.height(), rowBytes);
return QPixmap::fromImage(image);
Method 1b: who needs raw bits? You can use image.pixel(x,y) and image.setPixel(x,y,color) instead. This won't be as fast as 1a, but it should be a bit easier to understand and code.
QImage image = pixmap.toImage();
QImage output(image.width(), image.height(), image.format());
for (int y=0; y<image.height(); ++y)
for (int x=0; x<image.width(); ++x)
output.setPixel(getBlurredColor(image, x, y));
return output;
Method 2: use a QGraphicsBlurEffect, through a widget or scene. The code here uses a label widget:
QPixmap BlurAPixmap(const QPixmap& inPixmap)
{
QLabel* label = new QLabel();
label->setPixmap(inPixmap);
label->setGraphicsEffect(new QGraphicsBlurEffect());
QPixmap output(inPixmap.width(), inPixmap.height());
QPainter painter(&output);
label->render(&painter);
return output;
}
Tweak as needed. For example, I'm presuming the default graphics blur effect is acceptable. I'm using Method 2 in my project.
A Gaussian blur is a simple way to create a blurring effect.
Edit: And lo, I came across Qt's QGraphicsBlurEffect. Introduced in Qt 4.6, it seems to do exactly what you want.
Add python code based on #Петър Петров answer for QT 5.
def applyEffectToImage(src, effect):
scene = QGraphicsScene()
item = QGraphicsPixmapItem()
item.setPixmap(QPixmap.fromImage(src))
item.setGraphicsEffect(effect)
scene.addItem(item)
res = QImage(src.size(), QImage.Format_ARGB32)
res.fill(Qt.transparent)
ptr = QPainter(res)
scene.render(ptr, QRectF(), QRectF(0,0, src.width(), src.height()) )
return res
blur = QGraphicsBlurEffect()
blur.setBlurRadius(8)
source = QImage(r"C:\Users\fran\Desktop\test.png")
result = applyEffectToImage(source, blur)
result.save(r"C:\Users\fran\Desktop\result.png")