I need to fill QML ChartView with LineSeries. I am doing it by invoking method from c++:
QMetaObject::invokeMethod(m_chartview, "createSeries", Qt::DirectConnection,
Q_RETURN_ARG(QAbstractSeries *, serie),
Q_ARG(int, type),
Q_ARG(QString, ("Chart "+QString::number(index+1))),
Q_ARG(QAbstractAxis *, axis_x),
Q_ARG(QAbstractAxis *, axis_y));
and then appending points to created serie:
if(QLineSeries *line_serie = qobject_cast<QLineSeries *>(serie)){
static std::default_random_engine e;
static std::uniform_real_distribution<> dis(0, 3);
for(int i=0; i < 10; i++){
line_serie->append(i, dis(e));
}
}
It's working fine with small number of data, but extremely slow with large number of data.
The problem is that each 'append' triggers redrawing. So my question is: is there any way to add serie to ChartView only after it has been populated with points?
Related
I have a question that I found many threads in, but none did explicitly answer my question.
I am trying to have a multidimensional array inside the kernel of the GPU using thrust. Flattening would be difficult, as all the dimensions are non-homogeneous and I go up to 4D. Now I know I cannot have device_vectors of device_vectors, for whichever underlying reason (explanation would be welcome), so I tried going the way over raw-pointers.
My reasoning is, a raw pointer points onto memory on the GPU, why else would I be able to access it from within the kernel. So I should technically be able to have a device_vector, which holds raw pointers, all pointers that should be accessible from within the GPU. This way I constructed the following code:
thrust::device_vector<Vector3r*> d_fluidmodelParticlePositions(nModels);
thrust::device_vector<unsigned int***> d_allFluidNeighborParticles(nModels);
thrust::device_vector<unsigned int**> d_nFluidNeighborsCrossFluids(nModels);
for(unsigned int fluidModelIndex = 0; fluidModelIndex < nModels; fluidModelIndex++)
{
FluidModel *model = sim->getFluidModelFromPointSet(fluidModelIndex);
const unsigned int numParticles = model->numActiveParticles();
thrust::device_vector<Vector3r> d_neighborPositions(model->getPositions().begin(), model->getPositions().end());
d_fluidmodelParticlePositions[fluidModelIndex] = CudaHelper::GetPointer(d_neighborPositions);
thrust::device_vector<unsigned int**> d_fluidNeighborIndexes(nModels);
thrust::device_vector<unsigned int*> d_nNeighborsFluid(nModels);
for(unsigned int pid = 0; pid < nModels; pid++)
{
FluidModel *fm_neighbor = sim->getFluidModelFromPointSet(pid);
thrust::device_vector<unsigned int> d_nNeighbors(numParticles);
thrust::device_vector<unsigned int*> d_neighborIndexesArray(numParticles);
for(unsigned int i = 0; i < numParticles; i++)
{
const unsigned int nNeighbors = sim->numberOfNeighbors(fluidModelIndex, pid, i);
d_nNeighbors[i] = nNeighbors;
thrust::device_vector<unsigned int> d_neighborIndexes(nNeighbors);
for(unsigned int j = 0; j < nNeighbors; j++)
{
d_neighborIndexes[j] = sim->getNeighbor(fluidModelIndex, pid, i, j);
}
d_neighborIndexesArray[i] = CudaHelper::GetPointer(d_neighborIndexes);
}
d_fluidNeighborIndexes[pid] = CudaHelper::GetPointer(d_neighborIndexesArray);
d_nNeighborsFluid[pid] = CudaHelper::GetPointer(d_nNeighbors);
}
d_allFluidNeighborParticles[fluidModelIndex] = CudaHelper::GetPointer(d_fluidNeighborIndexes);
d_nFluidNeighborsCrossFluids[fluidModelIndex] = CudaHelper::GetPointer(d_nNeighborsFluid);
}
Now the compiler won't complain, but accessing for example d_nFluidNeighborsCrossFluids from within the kernel will work, but return wrong values. I access it like this (again, from within a kernel):
d_nFluidNeighborsCrossFluids[iterator1][iterator2][iterator3];
// Note: out of bounds indexing guaranteed to not happen, indexing is definitely right
The question is, why does it return wrong values? The logic behind it should work in my opinion, since my indexing is correct and the pointers should be valid addresses from within the kernel.
Thank you already for your time and have a great day.
EDIT:
Here is a minimal reproducable example. For some reason the values appear right despite of having the same structure as my code, but cuda-memcheck reveals some errors. Uncommenting the two commented lines leads me to my main problem I am trying to solve. What does the cuda-memcheck here tell me?
/* Part of this example has been taken from code of Robert Crovella
in a comment below */
#include <thrust/device_vector.h>
#include <stdio.h>
template<typename T>
static T* GetPointer(thrust::device_vector<T> &vector)
{
return thrust::raw_pointer_cast(vector.data());
}
__global__
void k(unsigned int ***nFluidNeighborsCrossFluids, unsigned int ****allFluidNeighborParticles){
const unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i > 49)
return;
printf("i: %d nNeighbors: %d\n", i, nFluidNeighborsCrossFluids[0][0][i]);
//for(int j = 0; j < nFluidNeighborsCrossFluids[0][0][i]; j++)
// printf("i: %d j: %d neighbors: %d\n", i, j, allFluidNeighborParticles[0][0][i][j]);
}
int main(){
const unsigned int nModels = 2;
const int numParticles = 50;
thrust::device_vector<unsigned int**> d_nFluidNeighborsCrossFluids(nModels);
thrust::device_vector<unsigned int***> d_allFluidNeighborParticles(nModels);
for(unsigned int fluidModelIndex = 0; fluidModelIndex < nModels; fluidModelIndex++)
{
thrust::device_vector<unsigned int*> d_nNeighborsFluid(nModels);
thrust::device_vector<unsigned int**> d_fluidNeighborIndexes(nModels);
for(unsigned int pid = 0; pid < nModels; pid++)
{
thrust::device_vector<unsigned int> d_nNeighbors(numParticles);
thrust::device_vector<unsigned int*> d_neighborIndexesArray(numParticles);
for(unsigned int i = 0; i < numParticles; i++)
{
const unsigned int nNeighbors = i;
d_nNeighbors[i] = nNeighbors;
thrust::device_vector<unsigned int> d_neighborIndexes(nNeighbors);
for(unsigned int j = 0; j < nNeighbors; j++)
{
d_neighborIndexes[j] = i + j;
}
d_neighborIndexesArray[i] = GetPointer(d_neighborIndexes);
}
d_nNeighborsFluid[pid] = GetPointer(d_nNeighbors);
d_fluidNeighborIndexes[pid] = GetPointer(d_neighborIndexesArray);
}
d_nFluidNeighborsCrossFluids[fluidModelIndex] = GetPointer(d_nNeighborsFluid);
d_allFluidNeighborParticles[fluidModelIndex] = GetPointer(d_fluidNeighborIndexes);
}
k<<<256, 256>>>(GetPointer(d_nFluidNeighborsCrossFluids), GetPointer(d_allFluidNeighborParticles));
if (cudaGetLastError() != cudaSuccess)
printf("Sync kernel error: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaDeviceSynchronize();
}
A device_vector is a class definition. That class has various methods and operators associated with it. The thing that allows you to do this:
d_nFluidNeighborsCrossFluids[...]...;
is a square-bracket operator. That operator is a host operator (only). It is not usable in device code. Issues like this give rise to the general statements that "thrust::device_vector is not usable in device code." The device_vector object itself is generally not usable. However the data it contains is usable in device code, if you attempt to access it via a raw pointer.
Here is an example of a thrust device vector that contains an array of pointers to the data contained in other device vectors. That data is usable in device code, as long as you don't attempt to make use of the thrust::device_vector object itself:
$ cat t1509.cu
#include <thrust/device_vector.h>
#include <stdio.h>
template <typename T>
__global__ void k(T **data){
printf("the first element of vector 1 is: %d\n", (int)(data[0][0]));
printf("the first element of vector 2 is: %d\n", (int)(data[1][0]));
printf("the first element of vector 3 is: %d\n", (int)(data[2][0]));
}
int main(){
thrust::device_vector<int> vector_1(1,1);
thrust::device_vector<int> vector_2(1,2);
thrust::device_vector<int> vector_3(1,3);
thrust::device_vector<int *> pointer_vector(3);
pointer_vector[0] = thrust::raw_pointer_cast(vector_1.data());
pointer_vector[1] = thrust::raw_pointer_cast(vector_2.data());
pointer_vector[2] = thrust::raw_pointer_cast(vector_3.data());
k<<<1,1>>>(thrust::raw_pointer_cast(pointer_vector.data()));
cudaDeviceSynchronize();
}
$ nvcc -o t1509 t1509.cu
$ cuda-memcheck ./t1509
========= CUDA-MEMCHECK
the first element of vector 1 is: 1
the first element of vector 2 is: 2
the first element of vector 3 is: 3
========= ERROR SUMMARY: 0 errors
$
EDIT: In the mcve you have now posted, you point out that an ordinary run of the code appears to give correct results, but when you use cuda-memcheck, errors are reported. You have a general design problem that will cause this.
In C++, when an object is defined within a curly-braces region:
{
{
Object A;
// object A is in-scope here
}
// object A is out-of-scope here
}
// object A is out of scope here
k<<<...>>>(anything that points to something in object A); // is illegal
and you exit that region, the object defined within the region is now out of scope. For objects with constructors/destructors, this usually means the destructor of the object will be called when it goes out-of-scope. For a thrust::device_vector (or std::vector) this will deallocate any underlying storage associated with that vector. That does not necessarily "erase" any data, but attempts to use that data are illegal and would be considered UB (undefined behavior) in C++.
When you establish pointers to such data inside an in-scope region, and then go out-of-scope, those pointers no longer point to anything that would be legal to access, so attempts to dereference the pointer would be illegal/UB. Your code is doing this. Yes, it does appear to give the correct answer, because nothing is actually erased on deallocation, but the code design is illegal, and cuda-memcheck will highlight that.
I suppose one fix would be to pull all this stuff out of the inner curly-braces, and put it at main scope, just like the d_nFluidNeighborsCrossFluids device_vector is. But you might also want to rethink your general data organization strategy and flatten your data.
You should really provide a minimal, complete, verifiable/reproducible example; yours is neither minimal, nor complete, nor verifiable.
I will, however, answer your side-question:
I know I cannot have device_vectors of device_vectors, for whichever underlying reason (explanation would be welcome)
While a device_vector regards a bunch of data on the GPU, it's a host-side data structure - otherwise you would not have been able to use it in host-side code. On the host side, what it holds should be something like: The capacity, the size in elements, the device-side pointer to the actual data, and maybe more information. This is similar to how an std::vector variable may refer to data that's on the heap, but if you create the variable locally the fields I mentioned above will exist on the stack.
Now, those fields of the device vector that are located in host memory are not generally accessible from the device-side. In device-side code you would typically use the raw pointer to the device-side data the device_vector manages.
Also, note that if you have a thrust::device_vector<T> v, each use of operator[] means a bunch of separate CUDA calls to copy data to or from the device (unless there's some caching going on under the hoold). So you really want to avoid using square-brackets with this structure.
Finally, remember that pointer-chasing can be a performance killer, especially on a GPU. You might want to consider massaging your data structure somewhat in order to make it amenable to flattening.
For learning purposes, I would like to plot data stored in a text file using QCustomPlot within Qt. The QCustomPlot takes a QVector (double), and when I create the vector by hand (initializing the vector with values), the plotting works. But when I read in the values from the text file, nothing happens. I get no compiler errors and there is no plotting.
I used the code snippets from code example. You can see the code I am using below. I spent a few hours reading through the web, but it was to no avail.
QVector<double> v;
QVector<double> x(20), y(20);
QFile textFile("Data3.txt");
if(textFile.open(QIODevice::ReadOnly))
{
double d;
QTextStream textStream(&textFile);
while (!textStream.atEnd()) {
textStream >> d;
if(textStream.status() == QTextStream::Ok){
v.append(d);
}
else
break;
}
for(int i=0; i<=20; ++i)
{
x[i] = i;
y[i] = v[i];
}
}
QVector<double> myVectorx({0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19});
QVector<double> myVectory({0.5,1,3,5,7,10});
ui->custom2Widget->addGraph();
ui->custom2Widget->graph()->setData(myVectorx, y);
//ui->custom2Widget->graph()->addData(x,y);
ui->custom2Widget->graph()->setPen(QPen(Qt::blue));
ui->custom2Widget->xAxis->setRange(0,25);
ui->custom2Widget->yAxis->setRange(0, 100);
ui->custom2Widget->xAxis->setLabel("Time");
ui->custom2Widget->yAxis->setLabel("CO2 values");
ui->custom2Widget->replot();
The data file looks like this (it is one column, not one after the other in the same row):
315.71
317.45
317.5
317.1
315.86
314.93
313.2
312.66
313.33
i am trying to create a kind of metaball, nice curves between two circles.
Something like the image, the lines are drawn straight but can also be more curved. I need them as a vector in Processing. Does anyone can help me?
thanks in advance!
Example in paperjs:
http://paperjs.org/examples/meta-balls/
image:
http://www.smeulders.biz/tmp/metaballs.png
void setup() {
size(500,500);
ellipse(100, 250, 100, 100);
ellipse(350, 250, 200, 200);
}
void draw() {}
With a bit of math (to workout distance between circles) and a bit of pixel manipulation to set pixel colours based on these calculated distances, you can render 2D metaballs and there plenty of examples
For fun however I decided to take a stab at making a very hacky version of the example you shared by simply rendering ellipses into an image, then filtering the image at the end:
PGraphics pg;//a separate layer to render into
int dilateAmt = 3;
PImage grid;//pixels of the grid alone, minus the 'cursor'
void setup(){
size(400,400);
//create a new layer
pg = createGraphics(width,height);
pg.beginDraw();
//draw a di-grid inside
pg.background(255);
pg.noStroke();pg.fill(0);
for(int y = 0 ; y < 5; y++)
for(int x = 0 ; x < 5; x++)
pg.ellipse((y%2==0?40:0)+(x * 80),40+(y * 80), 40, 40);
pg.endDraw();
//grab a snapshot for later re-use
grid = pg.get();
}
void draw(){
pg.beginDraw();
//draw the cached grid (no need to loop and re-render circles)
pg.image(grid,0,0);
//and the cursor into the layer
pg.ellipse(mouseX,mouseY,60,60);
pg.endDraw();
//since PGraphics extends PImage, you can filter, so we dilate
for(int i = 0; i < dilateAmt; i++) pg.filter(DILATE);
//finally render the result
image(pg,0,0);
}
void keyPressed(){
if(keyCode == UP) dilateAmt++;
if(keyCode == DOWN) dilateAmt--;
if(dilateAmt < 1) dilateAmt = 1;
println(dilateAmt);
}
Note that the end result is raster, not vector.
If you want to achieve the exact effect you will need to port your example from JavaScript to Java. The source code is available.
If you like Processing the above example you could use plain javascript using p5.js. You'll find most of the familiar functions from Processing, but also directly use the paper.js library.
I've been trying for a while to get support for softbodies in my project,
I have already added all primitives, including static triangle meshes as you can see below:
I've now been trying to implement the softbodies.
I do have triangle shapes as I mentioned, and I thought I could re-use the triangulation code to
create softbody objects with the function:
btSoftBody* psb = btSoftBodyHelpers::CreateFromTriMesh(.....);
I successfully did this with the bunny mesh that's hardcoded, but now I want to insert any trinangulated mesh into this function.
But I'm a bit lost figuring out exactly what parameters to send in (how to get the right parameters from my triangulated mesh).
Do anyone of you have a example of this? (not a hardcoded one, but from a
btTriangleMesh *mTriMesh = new btTriangleMesh();
type object? )
It does work with the predefined type shapes that bullet has, so my update loop and all that works fine.
This is for version 2.81 (assuming vertices are stored as PHY_FLOAT and indices as PHY_INTEGER):
btTriangleMesh *mTriMesh = new btTriangleMesh();
// ...
const btVector3 meshScaling = mTriMesh->getScaling();
btAlignedObjectArray<btScalar> vertices;
btAlignedObjectArray<int> triangles;
for (int part=0;part< mTriMesh->getNumSubParts(); part++)
{
const unsigned char * vertexbase;
const unsigned char * indexbase;
int indexstride;
int stride,numverts,numtriangles;
PHY_ScalarType type, gfxindextype;
mTriMesh->getLockedReadOnlyVertexIndexBase(&vertexbase,numverts,type,stride,&indexbase,indexstride,numtriangles,gfxindextype,part);
for (int gfxindex=0; gfxindex < numverts; gfxindex++)
{
float* graphicsbase = (float*)(vertexbase+gfxindex*stride);
vertices.push_back(graphicsbase[0]*meshScaling.getX());
vertices.push_back(graphicsbase[1]*meshScaling.getY());
vertices.push_back(graphicsbase[2]*meshScaling.getZ());
}
for (int gfxindex=0;gfxindex < numtriangles; gfxindex++)
{
unsigned int* tri_indices= (unsigned int*)(indexbase+gfxindex*indexstride);
triangles.push_back(tri_indices[0]);
triangles.push_back(tri_indices[1]);
triangles.push_back(tri_indices[2]);
}
}
btSoftBodyWorldInfo worldInfo;
// Setup worldInfo...
// ....
btSoftBodyHelper::CreateFromTriMesh(worldInfo, &vertices[0], &triangles[0], triangles.size()/3 /*, randomizeConstraints = true*/);
A slower, more general approach is to iterate the mesh using mTriMesh->InternalProcessAllTriangles() but that will make your mesh a soup.
Suppose I have a 2D array full of data say 10 x 10. The contents, as well as a number of rows, can change any time.
Now I want to display this data in a QTableWidget.
I use a timer with time out 1sec to refresh the table contents. In the timeout slot if I use
void slot_timeOut()
{
//Iterate over the rows
//and for each cell do something like
ui->tw_data->setItem(row, 0, new TableWidgetItem(data[row][0]);
ui->tw_data->setItem(row, 0, new TableWidgetItem(data[row][1]);
//...
ui->tw_data->setItem(row, 0, new TableWidgetItem(data[row][9]);
}
the use out new TableWidgetItem worries me. I have no reference to it and I never delete it.
Over a period of time is this a memory leak, or is this managed by Qt, pls help...
There is no leak, as
The table takes ownership of the item.
(From QTableWidget::setItem()).
Ownership here means that the QTableWidget will take care of deleting the item when its not longer needed, or the QTableWidget itself is destroyed. Ownership is usually documented in the Qt documentation (if not, I'd consider that a Qt bug).
Once setItem() returns from being called on the same cell, the previously set QTableWidgetItem will be deleted:
int main(int argc, char** argv)
{
QApplication app(argc, argv);
QTableWidget widget(2, 1);
QTableWidgetItem* foo = new QTableWidgetItem("Foo");
widget.setItem(0, 0, foo);
qDebug() << foo->text(); //works
widget.setItem(0, 0, new QTableWidgetItem("Bar")); //replaces foo with bar and deletes foo
qDebug() << foo->text(); // Undefined (usually, crash)
widget.show();
return app.exec();
}
If you're on Linux, you can also verify the behavior by running above code (without the second qDebug()) in valgrind with --leak-check=full.
From the Qt documentation:
void QTableWidget::setItem ( int row, int column, QTableWidgetItem *
item ) Sets the item for the given row and column to item. The table
takes ownership of the item.
This indicates that Qt will manage the memory for the object as necessary.
It will delete properly when the you add Q-objects to the cells.
I was pushing thousands of C-strings in cells and watched my memory blow up.
Made my "raw text data" a QString and my problem was solved.
Such problem takes place to be: Qt does not really clear the memory allocated through QTableWidgetItem.
for( int row = 0; row < 35; row++)
{
for(int i = 0; i < 9; i++)
{
QTableWidgetItem* item = new QTableWidgetItem();
item->setText(QString::number(i));
ui->tableWidget->setItem(row, i, item);
}
}
This code gives 116 bytes of leakage.
We found this solution: Instead of QTableWidgetItem, you can use QLineEdit:
QLineEdit* tableline = new QLineEdit();
m_qtablewidget->setCellWidget(row, column, tableline);
p.s. To search for leaks used Dr.memory