The code I lost in a hard drive failure however over a year later it still bugs me about it and I really want an answer so I can sleep at night without this popping into my head.
So I was writing a defragmentation software and it displayed the clusters. The problem with the drawing code is floating point math. I'm not experienced enough to solve this math problem.
I could draw the clusters no problem however due to floating point math the last pixel were fighting with the first pixel of a new cluster when I scaled it down to fit a window. I can't figure out how to prevent this. I tried rounding down and up but that left a pixel either empty or overwriting a pixel f the last cluster when it shouldn't be, etc.
It should look like this. Can't figure out how this JkDefrag (open source) did it and I have the source code for it! This is what keeps me up at night. Please help me get a good nights sleep.
Drawing code from a backup:
void DrawTESTClusters( HWND hWnd, const CMemoryDC &memoryDC, LONGLONG ClusterStart, LONGLONG ClusterCount, int color )
{
LONG Width;
LONG Height;
LONGLONG x1;
LONGLONG y1;
//LONGLONG x2;
//LONGLONG y2;
HDC hDC;
LONG MaxLength;
//DebugPrint(_T("%I64d - %I64d, %I64d\n"), ClusterStart, ClusterEnd, ClusterEnd - ClusterStart);
// The usual checks
if ( TotalClusters <= 0 ) // Can this happen ??
return;
//_ASSERT( ClusterStart != ClusterEnd );
//if ( ClusterStart == ClusterEnd ) // Can this happen ??
// return;
_ASSERT( ClusterStart >= 0 && ClusterStart <= TotalClusters );
//if ( ClusterStart < 0 || ClusterStart > TotalClusters ) // Can this happen ??
// return;
//_ASSERT( ClusterEnd >= 0 && ClusterEnd <= TotalClusters );
//if ( ClusterEnd < 0 || ClusterEnd > TotalClusters ) // Can this happen ??
// return;
_ASSERT( ClusterCount >= 0 && (ClusterStart + ClusterCount) <= TotalClusters );
hDC = memoryDC.GetMemoryDC();
Width = memoryDC.GetWidth();
Height = memoryDC.GetHeight();
MaxLength = memoryDC.GetMaxLength();
// Calculate some stuff
float Scale = static_cast<float>(MaxLength) / TotalClusters;
// TODO: Test this drawing code with scale above 1.0.
// The current code should work in theory (to scale up) but if it doesn't the proposed fix-code below, commented:
//if ( Scale > 1.0 )
// Scale = 1.0;
LONG Length = static_cast<LONG>( ceil( static_cast<float>(ClusterCount) * Scale ) );
if ( Length <= 0 )
return;
//LONG ScaleA = (ClusterStart * Scale / TotalClusters);
//LONG ScaleB = (ClusterEnd * Scale / TotalClusters);
//x1 = (double)(ClusterStart * Scale) / (double)Width;
//x1 = (double)((double)ClusterStart / (double)Width) * Scale;
//y1 = (ClusterStart * Scale) % Width;
//y1 = fmod( (ClusterStart * Scale), Width );
//y1 = fmod( (double)ClusterStart / (double)Width), Scale );
//x2 = ScaleB % Width;
//y2 = ScaleB / Width;
x1 = static_cast<LONGLONG>( fmod( static_cast<float>(ClusterStart * Scale), static_cast<float>(Width) ) );
y1 = static_cast<LONGLONG>( static_cast<float>(ClusterStart * Scale) / static_cast<float>(Width) );
// Calculation done, now check if there is any point in drawing it
//if ( x1 <= 0 && x2 <= 0 )
// return;
// Save original object.
HGDIOBJ oldPen = SelectObject( hDC, GetStockObject(DC_PEN) );
// Change the DC pen color
SetDCPenColor( hDC, ClusterMapColors[ color ] );
//if ( InUse == 1 )
// SetDCPenColor( hDC, RGB(160, 160, 160) ); // 160 == 0xA0
//else if ( InUse == 0 )
// SetDCPenColor( hDC, RGB(0xFF, 0xFF, 0xFF) );
// //SetDCPenColor( hDC, RGB(0x00, 0x00, 0x00) );
//else if ( InUse == 2 )
// //SetDCPenColor( hDC, RGB(0x00, 0xFF, 0x00) );
// SetDCPenColor( hDC, RGB(0x00, 0x00, 0xFF) ); // Blue
//else if ( InUse == 3 )
// //SetDCPenColor( hDC, RGB(0x00, 160, 0xFF) );
// SetDCPenColor( hDC, RGB(0x00, 0xFF, 0x00) ); // Green
//else if ( InUse == 4 )
// //SetDCPenColor( hDC, RGB(0x00, 160, 0xFF) );
// SetDCPenColor( hDC, RGB(0xFF, 0x00, 0x00) ); // Red
//else if ( InUse == 5 ) // Debug
// SetDCPenColor( hDC, RGB(0xFF, 0xFF, 0x00) ); // Yellow
//----------------------------------------------------
// Only Draw code in here
LONG step;
LONG line = static_cast<LONG>( y1 );
while ( Length > 0 )
{
step = Min( Length, static_cast<LONG>(Width - x1) );
//step = Min( Length, Width );
if ( MoveToEx( hDC, static_cast<int>(x1), static_cast<int>(line), NULL ) )
LineTo( hDC, static_cast<int>(step + x1), static_cast<int>(line) );
Length -= step;
x1 = 0;
line++;
}
#if 0
Width = clientRect.right;
Height = clientRect.bottom;
int Scale = (Width * Height) / TotalClusters;
//x2 = (ClusterEnd * Scale ) % Width;
//y2 = (ClusterEnd * Scale ) / Width;
// Start
int startX = (ClusterStart * Width * Height / TotalClusters ) % Width;
int startY = (ClusterStart * Width * Height / TotalClusters ) / Width;
int endX = ;
int endY;
LONG Length = ClusterEnd - ClusterStart;
while ( Length > 0 )
{
LONG len = Min( Length, Width );
len -= startX;
MoveToEx( hDC, startX, startY, NULL );
LineTo( hDC, startX + len, endY );
Length -= len;
startY++; // Next line
startX = 0;
}
#endif
//----------------------------------------------------
// Restore original object.
SelectObject( hDC, oldPen );
}
... due to floating point math the last pixel were fighting with the first pixel of a new cluster when I scaled it down to fit a window
Rather than low precision floating point math, use integer math. Easier to control edge cases exactly.
Form the scale as a numerator, denominator pair:
// float Scale = static_cast<float>(MaxLength) / TotalClusters;
long ScaleNumerator = MaxLength;
long ScaleDenominator = TotalClusters;
Replace FP with integer code. Use wider integer math when needed. Example:
// x1 = static_cast<LONGLONG>( fmod( static_cast<float>(ClusterStart * Scale),
// static_cast<float>(Width) ) );
long long m = 1LL * ClusterStart * ScaleNumerator / ScaleDenominator;
x1 = m % Width;
y1 = m / Width;
OP's commented code may have suffered with overflow issues. Alternative:
// int startX = (ClusterStart * Width * Height / TotalClusters ) % Width;
int startX = (1LL * ClusterStart * Width * Height / TotalClusters ) % Width;
Should this still result in edge case issues, adjusting integer math code is easier to remedy than FP code.
Related
Function in pcl/1.13.0_2/include/pcl-1.13/pcl/surface/3rdparty/poisson4/multi_grid_octree_data.hpp:
template<int Degree>
int Octree<Degree>::NonLinearUpdateWeightContribution( TreeOctNode* node , const Point3D<Real>& position , Real weight )
{
TreeOctNode::Neighbors3& neighbors = neighborKey.setNeighbors( node );
double x,dxdy,dx[DIMENSION][3];
double width;
Point3D<Real> center;
Real w;
node->centerAndWidth( center , w );
width=w;
const double SAMPLE_SCALE = 1. / ( 0.125 * 0.125 + 0.75 * 0.75 + 0.125 * 0.125 );
for( int i=0 ; i<DIMENSION ; i++ )
{
x = ( center[i] - position[i] - width ) / width;
dx[i][0] = 1.125 + 1.500*x + 0.500*x*x;
x = ( center[i] - position[i] ) / width;
dx[i][1] = 0.750 - x*x;
dx[i][2] = 1. - dx[i][1] - dx[i][0];
// Note that we are splatting along a co-dimension one manifold, so uniform point samples
// do not generate a unit sample weight.
dx[i][0] *= SAMPLE_SCALE;
}
for( int i=0 ; i<3 ; i++ ) for( int j=0 ; j<3 ; j++ )
{
dxdy = dx[0][i] * dx[1][j] * weight;
for( int k=0 ; k<3 ; k++ ) if( neighbors.neighbors[i][j][k] )
neighbors.neighbors[i][j][k]->nodeData.centerWeightContribution += Real( dxdy * dx[2][k] );
}
return 0;
}
My confusions are:
why is the PCL version adding the additional SAMPLE_SCALE, and only onto the first spline coefficient?
since the implicit indicator function is defined on R^3 (ignoring discreteness), where do the co-dimension one and SAMPLE_SCALE come from?
I found the version of Poisson reconstruction shipped with MeshLab does not have this issue in meshlab/unsupported/plugins_unsupported/filter_poisson/src/MultiGridOctreeData.inl:
template<int Degree>
int Octree<Degree>::NonLinearUpdateWeightContribution(TreeOctNode* node,const Point3D<Real>& position,const Real& weight){
int i,j,k;
TreeOctNode::Neighbors& neighbors=neighborKey.setNeighbors(node);
double x,dxdy,dx[DIMENSION][3];
double width;
Point3D<Real> center;
Real w;
node->centerAndWidth(center,w);
width=w;
for(i=0;i<DIMENSION;i++){
x=(center.coords[i]-position.coords[i]-width)/width;
dx[i][0]=1.125+1.500*x+0.500*x*x;
x=(center.coords[i]-position.coords[i])/width;
dx[i][1]=0.750 - x*x;
dx[i][2]=1.0-dx[i][1]-dx[i][0];
}
for(i=0;i<3;i++){
for(j=0;j<3;j++){
dxdy=dx[0][i]*dx[1][j]*weight;
for(k=0;k<3;k++){
if(neighbors.neighbors[i][j][k]){neighbors.neighbors[i][j][k]->nodeData.centerWeightContribution+=Real(dxdy*dx[2][k]);}
}
}
}
return 0;
}
I use fitInView() function to view the image pixels, but image pixels exists offset, how to solve this problem?
void QtGraphicsView::setViewRect(const QRectF &rect)
{
if (m_viewRect == rect)
return;
m_viewRect = rect;
fitInView(rect);
}
void QtGraphicsView::scaleView(qreal factor)
{
const QSizeF &size = m_viewRect.size() / factor;
if (size.width() <= 0 || size.height() <= 0
|| size.width() >= sceneRect().width()
|| size.height() >= sceneRect().height())
return;
const QSizeF &dsize = m_viewRect.size() - size;
const QPointF &topLeft = m_viewRect.topLeft()
+ QPointF(dsize.width() / 2, dsize.height() / 2);
setViewRect(QRectF(topLeft, size).toRect());
}
I would like to display sample6 of the OptixSDK in a QGLWidget.
My application has only 3 QSlider for the rotation around the X,Y,Z axis and the QGLWidget.
For my understanding, paintGL() gets called whenever updateGL() is called by my QSlider or Mouseevents. Then I initialize a rotation matrix and apply this matrix to the PinholeCamera in order to trace the scene with new transformed cameracoordinates, right?
When tracing is finished i get the outputbuffer and use it draw the pixels with glDrawPixels(), just like in GLUTdisplay.cpp given in the OptiX framework.
But my issue is that the image is skewed/distorted. For example I wanted to display a ball, but the ball is extremley flatened, but the rotation works fine.
When I am zooming out, it seems that the Image scales much slower horizontally than vertically.
I am almost sure/hope that it has to do something with the gl...() functions that are not used properly. What am I missing? Can someone help me out?
For the completeness i post my paintGL() and updateGL() code.
void MyGLWidget::initializeGL()
{
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
m_scene = new MeshViewer();
m_scene->setMesh( (std::string( sutilSamplesDir() ) + "/ball.obj").c_str());
int buffer_width, buffer_height;
// Set up scene
SampleScene::InitialCameraData initial_camera_data;
m_scene->setUseVBOBuffer( false );
m_scene->initScene( initial_camera_data );
int m_initial_window_width = 400;
int m_initial_window_height = 400;
if( m_initial_window_width > 0 && m_initial_window_height > 0)
m_scene->resize( m_initial_window_width, m_initial_window_height );
// Initialize camera according to scene params
m_camera = new PinholeCamera( initial_camera_data.eye,
initial_camera_data.lookat,
initial_camera_data.up,
-1.0f, // hfov is ignored when using keep vertical
initial_camera_data.vfov,
PinholeCamera::KeepVertical );
Buffer buffer = m_scene->getOutputBuffer();
RTsize buffer_width_rts, buffer_height_rts;
buffer->getSize( buffer_width_rts, buffer_height_rts );
buffer_width = static_cast<int>(buffer_width_rts);
buffer_height = static_cast<int>(buffer_height_rts);
float3 eye, U, V, W;
m_camera->getEyeUVW( eye, U, V, W );
SampleScene::RayGenCameraData camera_data( eye, U, V, W );
// Initial compilation
m_scene->getContext()->compile();
// Accel build
m_scene->trace( camera_data );
m_scene->getContext()->launch( 0, 0 );
// Initialize state
glMatrixMode(GL_PROJECTION);glLoadIdentity();glOrtho(0, 1, 0, 1, -1, 1 );
glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glViewport(0, 0, buffer_width, buffer_height);
}
And here is paintGL()
void MyGLWidget::paintGL()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
float3 eye, U, V, W;
m_camera->getEyeUVW( eye, U, V, W );
SampleScene::RayGenCameraData camera_data( eye, U, V, W );
{
nvtx::ScopedRange r( "trace" );
m_scene->trace( camera_data );
}
// Draw the resulting image
Buffer buffer = m_scene->getOutputBuffer();
RTsize buffer_width_rts, buffer_height_rts;
buffer->getSize( buffer_width_rts, buffer_height_rts );
int buffer_width = static_cast<int>(buffer_width_rts);
int buffer_height = static_cast<int>(buffer_height_rts);
RTformat buffer_format = buffer.get()->getFormat();
GLvoid* imageData = buffer->map();
assert( imageData );
switch (buffer_format) {
/*... set gl_data_type and gl_format ...*/
}
RTsize elementSize = buffer->getElementSize();
int align = 1;
if ((elementSize % 8) == 0) align = 8;
else if ((elementSize % 4) == 0) align = 4;
else if ((elementSize % 2) == 0) align = 2;
glPixelStorei(GL_UNPACK_ALIGNMENT, align);
gldata = QGLWidget::convertToGLFormat(image_data);
NVTX_RangePushA("glDrawPixels");
glDrawPixels( static_cast<GLsizei>( buffer_width ), static_cast<GLsizei>( buffer_height ),gl_format, gl_data_type, imageData);
// glDraw
NVTX_RangePop();
buffer->unmap();
}
After hours of debugging, I found out that I forgot to set the Camera-parameters right, it had nothing to go to with the OpenGL stuff.
My U-coordinate, the horizontal axis of view plane was messed up, but the V,W and eye coordinates were right.
After I added these lines in initializeGL()
m_camera->setParameters(initial_camera_data.eye,
initial_camera_data.lookat,
initial_camera_data.up,
initial_camera_data.vfov,
initial_camera_data.vfov,
PinholeCamera::KeepVertical );
everything was right.
Closed. This question needs details or clarity. It is not currently accepting answers.
Want to improve this question? Add details and clarify the problem by editing this post.
Closed 8 years ago.
Improve this question
I am taking data from socket in which some paramaters are changing after every second. I am projecting the data on my gui. I want to update my gui after every second.
This is my sockettest class where i am receving the data and i want to plot the data in tactical widget.
void socketTest::readyRead()
{
//QByteArray buffer;
int numRead = 0, numReadTotal = 0;
char buffer[180];
socketTest *a = new socketTest();
tacticalwidget *b = new tacticalwidget();
forever {
numRead = socket->read(buffer, 180);
// do whatever with array
numReadTotal += numRead;
memcpy (detailed_track_data, buffer, sizeof(buffer));
//qDebug() << sizeof(buffer);
amp = qToBigEndian(detailed_track_data[0].amplitude);
theta = qToBigEndian(detailed_track_data[0].doa)*0.1;
emit SIGNAL(valuechanged());
connect(a,SIGNAL(valuechanged()), b , SLOT(tacticalwidget::paintEvent()));
int track_state = qToBigEndian(detailed_track_data[0].track_status);
int bit1 = 2^5;
int bit2 = 2^4;
//check hostility level for hostile
if(((track_state) & (bit1)) == 0 && ((track_state) & (bit2)) == 1)
tracktype = 0;// Hostile
else
//check hostility level for unknown
if(((track_state) & (bit1)) == 0 && ((track_state) & (bit2)) == 0)
tracktype = 1;//unknown
else
tracktype = 2;//friendly
if (numRead == 0 && !socket->waitForReadyRead())
break;
}
TacticalWidget code:
void tacticalwidget::paintEvent(QPaintEvent */*e*/)
{
QSize sz = size();
int radius;
QPainter painter(this);
painter.setRenderHint(QPainter::Antialiasing,true);
QBrush br;
br.setColor(Qt::black);
painter.setBrush(br);
painter.fillRect(0,0,sz.width(),sz.height(),Qt::SolidPattern);
int centrex = sz.width()/2 ;
int centrey = sz.height()/2;
if (centrex < centrey )
radius = centrex -4;
else
radius = centrey -4;
painter.setPen(QPen(Qt::green,2,Qt::SolidLine,Qt::RoundCap));
painter.drawEllipse(centrex - radius,centrey - radius,2*radius,2*radius);
painter.setPen(QPen(Qt::yellow,2,Qt::SolidLine,Qt::RoundCap));
painter.drawEllipse(centrex - (radius*2/3),centrey-radius*2/3),(radius*4 /3),(radius*4/3));
painter.setPen(QPen(Qt::red,2,Qt::SolidLine,Qt::RoundCap));
painter.drawEllipse(centrex - radius/3,centrey - radius/3,radius*2/3,radius*2/3);
painter.setPen(QPen(Qt::white,2,Qt::SolidLine,Qt::RoundCap));
painter.drawLine(centrex,0,centrex,centrey*2);
painter.drawLine(0,centrey,centrex*2,centrey);
painter.drawText(centrex - radius + 4,centrey,"270");
painter.drawText(centrex,centrey-radius + 4 ,"N");
painter.drawText(centrex,centrey+radius -4,"180");
painter.drawText(centrex + radius -4,centrey,"90");
painter.setPen(QPen(Qt::magenta,2,Qt::SolidLine,Qt::RoundCap));
double theta = 45 * PI /180;
painter.drawLine(centrex,centrey,centrex + radius * sin(theta),centrey - radius * cos(theta));
//Track Display
double amplitudefactor = (theObj.amp + 80)/100;
//int trackNo = i+1;
double pointx,pointy;
int circleradius,innerradius = 0;
switch(theObj.tracktype)
{
case 0://hostile
circleradius = radius/3;
pointx =centrex + circleradius * sin(theObj.theta) * amplitudefactor;
pointy = centrey - circleradius * cos(theObj.theta)* amplitudefactor;
painter.setPen(QPen(Qt::red,2,Qt::SolidLine,Qt::RoundCap));
painter.drawImage(pointx,pointy,QImage("E:\\bitmaps\\rbitmap.png"));
break;
case 1://unknown
circleradius = radius/3;
innerradius = radius/3;
pointx =centrex + innerradius * sin(theObj.theta) + circleradius * sin(theObj.theta) * amplitudefactor;
pointy = centrey - innerradius * cos(theObj.theta) - circleradius * cos(theObj.theta)* amplitudefactor;
painter.setPen(QPen(Qt::yellow,2,Qt::SolidLine,Qt::RoundCap));
painter.drawImage(pointx,pointy,QImage("E:\\bitmaps\\ybitmap.png"));
break;
case 2: //friendly
circleradius = radius/3;
innerradius = radius *2/3;
pointx =centrex + innerradius * sin(theObj.theta) + circleradius * sin(theObj.theta) * amplitudefactor;
pointy = centrey - innerradius * cos(theObj.theta) - circleradius * cos(theObj.theta)* amplitudefactor;
painter.setPen(QPen(Qt::green,2,Qt::SolidLine,Qt::RoundCap));
painter.drawImage(pointx,pointy,QImage("E:\\bitmaps\\gbitmap.png"));
break;
default:
break;
}
// setStyleSheet("background-color: rgb(0, 0, 0);");
}
you are incorrectly handling a socket in readyRead(), check my other answer
you are emitting signal then connecting it, and you are doing this mutiple times, This is BIG problem.
you messed up something with endian, check out QDataStream class (it assumes that by default stream is in big endian).
In general you have nice spaghetti code so I'm not surprised it doesn't work.
I have the following rotation function:
public void rotate( Actor a ) {
float rotateBy = 30.0f,
newX = ( a.getX() * ( float ) Math.cos( rotateBy ) ) - ( a.getY() * ( float ) Math.sin( rotateBy ) ),
newY = ( a.getX() * ( float ) Math.sin( rotateBy ) ) + ( a.getY() * ( float ) Math.cos( rotateBy ) ),
moveByX = a.getX() - newX, // delta
moveByY = a.getY() - newY, // delta
duration = 1.0f; // 1 second
a.addAction(
Actions.sequence(
Actions.parallel(
Actions.rotateBy( rotateBy, duration ),
Actions.moveBy( moveByX, moveByY, duration )
)
)
);
}
But it seems the moveByX and moveByY values are not accurate.
The goal is to figure out what the new x,y would be after rotation, and move up by that much so that it still maintains the same y as before it was rotated.
Also, my Actor's draw method:
#Override
public void draw( Batch batch, float parentAlpha ) {
batch.draw( texture, getX(), getY(), getWidth()/2, getHeight()/2, getWidth(), getHeight(), getScaleX(), getScaleY(), getRotation() );
}
This is the way I would do it: first the origin of the actor must be the center of the square, then rotate the actor on its center and you will see something like this
The red line represent the y-offset that the actor needs to be moved up in order to follow the floor, as you can see this y-offset increases and decreases (so maybe you can't use Actions.moveBy because this move it along a linear way and your path isn't linear, see the blue line) so
Vector2 pos = new Vector2();
public void rotate(float delta, Actor a) {
a.rotateBy(angularSpeed*delta);
pos.x += linearSpeed*delta;
a.setPosition(pos.x,pos.y+yOffset(a.getRotation());
}
yOffset is the function that describes the lenght of the red line based on the square rotation angle:
public float yOffset(float angle) {
angle %= Math.PI/2;
return (float) Math.abs(squareSize*(Math.sin((Math.PI/4+angle))-Math.sin(Math.PI/4)));
}
All angles are in radians.