Related
I am trying to implement an algorithm to maintain a QChart aspect ratio. It is kind of working but i was wondering if anyone had a simpler solution.
below is the code that occurs on a resize event
void TrainChartView::maintainAspectRatio(QSizeF eventSize) {
int aspect = 8;
QSizeF chartSize = m_chart->size();
QValueAxis* axisX = qobject_cast<QValueAxis*>(m_chart->axisX());
QValueAxis* axisY = qobject_cast<QValueAxis*>(m_chart->axisY());
// get Min Max X-axis Value
double minAxisX = axisX->min();
double maxAxisX = axisX->max();
double minAxisY = axisY->min();
double maxAxisY = axisY->max();
// Get Coordinates in scene of min and max X-axis value
QPointF minAxisXPosition = m_chart->mapToPosition(QPointF(minAxisX, 0));
QPointF maxAxisXPosition = m_chart->mapToPosition(QPointF(maxAxisX, 0));
QPointF minAxisYPosition = m_chart->mapToPosition(QPointF(0, minAxisY));
QPointF maxAxisYPosition = m_chart->mapToPosition(QPointF(0, maxAxisY));
double axisXSize = abs(maxAxisXPosition.x() - minAxisXPosition.x());
double axisYSize = abs(maxAxisXPosition.y() - minAxisYPosition.y());
// get the size of axis x in the coordinate system
double deltaAxisXSize = maxAxisXPosition.x() - minAxisXPosition.x();
if (chartSize.width() != eventSize.width()) {
QPointF maxAxisValue = m_chart->mapToValue(QPointF(0, (minAxisYPosition.y() - deltaAxisXSize)));
axisY->setRange(minAxisY, maxAxisValue.y() / aspect);
}
if (chartSize.height() != eventSize.height() && m_chart->minimumSize().height() >= eventSize.height()) {
double deltaHeight = eventSize.height() - chartSize.height();
maxAxisYPosition.setY(maxAxisYPosition.y() - deltaHeight);
QPointF maxAxisValue = m_chart->mapToValue(QPointF(maxAxisYPosition));
axisY->setRange(minAxisY, maxAxisValue.y());
}
I modified a bit the piece of code you did, and here is what I ended up with :
in the .h
//! \brief The QChartView_scaledAxis class extends the QChartView class but force the graph to be normalized
//! (i.e.) 1 pixel on the x-axis represent the same amount as 1 pixel on the y-axis.
class QChartView_scaledAxis : public QChartView {
public:
void resizeEvent(QResizeEvent *event) override;
};
in the .cpp
void QChartView_scaledAxis::resizeEvent(QResizeEvent *event) {
QChartView::resizeEvent(event);
// Get the axis of the graph
QValueAxis* axisX = qobject_cast<QValueAxis*>(this->chart()->axes(Qt::Horizontal)[0]);
QValueAxis* axisY = qobject_cast<QValueAxis*>(this->chart()->axes(Qt::Vertical)[0]);
// Get the series displayed on the graph
const QList<QAbstractSeries*> series = this->chart()->series();
// get Min Max values (on X and Y) of all the points plotted on the graph
float minX = std::numeric_limits<double>::max();
float maxX = std::numeric_limits<double>::min();
float minY = std::numeric_limits<double>::max();
float maxY = std::numeric_limits<double>::min();
for(QAbstractSeries *p_serie : series) { //iterate on all the series in the graph
//Assuming all the series in the graph are QXYSeries...
for(QPointF p : qobject_cast<QXYSeries*>(p_serie)->points()) { //iterate on each point of each serie
minX = fmin(minX, p.x());
maxX = fmax(maxX, p.x());
minY = fmin(minY, p.y());
maxY = fmax(maxY, p.y());
}
}
// Get the points at both ends of the axis (will help to determine the plottable area in pixel)
const QPointF minPosition = this->chart()->mapToPosition(QPointF(axisX->min(), axisY->min()));
const QPointF maxPosition = this->chart()->mapToPosition(QPointF(axisX->max(), axisY->max()));
// Ration between the size of the axis in pixel and in term of represented value
const double axisX_PixToValue = (maxX - minX) / (maxPosition.x() - minPosition.x());
const double axisY_PixToValue = (maxY - minY) / (maxPosition.y() - minPosition.y());
// The smallest ratio must be 'kept' and applied to the other axis
if(abs(axisX_PixToValue) > abs(axisY_PixToValue)) {
axisY->setMin(minY);
axisY->setMax(minY + (maxPosition.y() - minPosition.y()) * std::copysign(axisX_PixToValue, axisY_PixToValue));
} else {
axisX->setMin(minX);
axisX->setMax(minX + (maxPosition.x() - minPosition.x()) * std::copysign(axisY_PixToValue, axisX_PixToValue));
}
}
This code is for a 1:1 ratio, but I'm sure it can be easily modified for any other ratio...
I want to project A vector onto vector a and vector c, in Processing.
In my sketch vector a is red and c is blue, I wanted c to be perpendicular to b but this is where i'm having alot of trouble. I'm using the JAMA library to try and make this easier. Any help with this is much appreciated as I have been stumped for about a week now.
float X=200; // Origin : Note we have now centred the origin in the
X-direction float Y=350; float ax=150; // Vector a resolved into
components float ay=-50; float bx=0; // Vector b resolved into
components float by=-150; float cx=150; float cy=200;
Matrix a; Matrix b; Matrix c;
void setup() {
size(400,400); // Create a drawing window
strokeWeight(3); // Make pen 3 pixels wide for all lines
double [][] anums = {{ax},
{ay}};
double [][] bnums = {{bx},
{by}};
double [][] cnums = {{-cy},
{cx}};
a = new Matrix(anums);
b = new Matrix(bnums);
c = new Matrix(cnums); }
void draw() {
background(255); // Clear screen
// Evaluate equation (1.5)
// STEP1: Insert code here that computes a_unit (i.e. the unit vector in the
// direction of a
double length = a.norm2();
Matrix a_unit= a.times(1/length);
// STEP2: Insert code here to compute the dot product of b and a_unit
Matrix a_unit_T = a_unit.transpose();
Matrix projection = a_unit_T.times(b);
double lp = projection.get(0,0);
// STEP3 Insert code here to compute the vector p using equation 1.5 above Matrix p = a_unit.times(lp);
float px = (float)p.get(0,0);
float py = (float)p.get(1,0);
float ax = (float)a.get(0,0);
float ay = (float)a.get(1,0);
float bx = (float)b.get(0,0);
float by = (float)b.get(1,0);
float cx = (float)c.get(0,0);
float cy = (float)c.get(1,0);
// Draw the projection of b onto a
stroke(0,0,0); // Use a black pen
ellipse(X+px,Y+py,10,10); // point where b projects onto a
line(X+px,Y+py,X+bx,Y+by); // line from a to point of projection on b
stroke(255,0,0); // Make pen red
arrow(X,Y,X+ax,Y+ay); // Draw vector a starting at (X,Y)
//stroke(0,0,255);
//arrow(X,Y,X-ax,Y+ay);
stroke(0,255,0); // Make pen green
arrow(X,Y,X+bx,Y+by); // Draw vector b starting at (X,Y)
// STEP 4. Insert code here to add a new vector at 90 degrees to the vector a
stroke(0,0,255);
arrow(X,Y,X+cx,Y+cy);
// STEP 5. Insert code here to compute and draw the projection of b onto c
double length1 = c.norm2();
Matrix c_unit= c.times(1/length1);
// STEP2: Insert code here to compute the dot product of b and a_unit
Matrix c_unit_T = c_unit.transpose();
Matrix projection1 = c_unit_T.times(b);
double lp1 = projection.get(0,0);
// STEP3 Insert code here to compute the vector p using equation 1.5 above
Matrix r = c_unit.times(lp1);
float rx = (float)r.get(0,0);
float ry = (float)r.get(1,0);
stroke(0,0,0); // Use a black pen
ellipse(X+rx,Y+ry,10,10); // point where b projects onto a
line(X+rx,Y+ry,X+bx,Y+by); // line from a to point of projection on b
if (mouseButton == RIGHT)
{
a.set(0,0,(double)mouseX-X);
a.set(1,0,(double)mouseY-Y);
}
if (mouseButton == LEFT)
{
b.set(0,0,(double)mouseX-X);
b.set(1,0,(double)mouseY-Y);
} } // Draw an arrow from (x1,y1) to (x2,y2) void arrow(float x1, float y1, float x2, float y2) { line(x1, y1, x2, y2);
pushMatrix(); translate(x2, y2); float a = atan2(x1-x2, y2-y1);
rotate(a); line(0, 0, -8, -8); line(0, 0, 8, -8); popMatrix(); }
Here is the code mate,
float X=200; // Origin : Note we have now centred the origin in the X-direction
float Y=350;
float ax=300; // Vector a resolved into components
float ay=-100;
float bx=0; // Vector b resolved into components
float by=-300;
Matrix a;
Matrix b;
void setup()
{
size(400,400); // Create a drawing window
strokeWeight(3); // Make pen 3 pixels wide for all lines
double [][] anums = {{ax},
{ay}};
double [][] bnums = {{bx},
{by}};
a = new Matrix(anums);
b = new Matrix(bnums);
}
void draw()
{
background(255); // Clear screen
// Evaluate equation (1.5)
// STEP1: Insert code here that computes a_unit (i.e. the unit vector in the
// direction of a
double length = a.norm2();
Matrix a_unit = a.times(1/length);
// STEP2: Insert code here to compute the dot product of b and a_unit
Matrix a_unit_T = a_unit.transpose();
Matrix projection = a_unit_T.times(b);
double lp = projection.get(0,0);
// STEP3: Insert code here to compute the vector p using equation 1.5 above
Matrix p = a_unit.times(lp);
float px = (float)p.get(0,0);
float py = (float)p.get(1,0);
float ax = (float)a.get(0,0);
float ay = (float)a.get(1,0);
float bx = (float)b.get(0,0);
float by = (float)b.get(1,0);
// Draw the projection of b onto a
stroke(0,0,0); // Use a black pen
ellipse(X+px,Y+py,10,10); // point where b projects onto a
line(X+px,Y+py,X+bx,Y+by); // line from a to point of projection on b
stroke(255,0,0); // Make pen red
arrow(X,Y,X+ax,Y+ay); // Draw vector a starting at (X,Y)
stroke(0,255,0); // Make pen green
arrow(X,Y,X+bx,Y+by); // Draw vector b starting at (X,Y)
// STEP 4. Insert code here to add a new vector at 90 degrees to the vector a
double [][] cnums = {{ay},
{-ax}};
Matrix c = new Matrix(cnums);
float cx = (float)c.get(0,0);
float cy = (float)c.get(1,0);
stroke(0,0,255);
arrow(X,Y,X+cx,Y+cy);
// STEP 5. Insert code here to compute and draw the projection of b onto c
double length1 = c.norm2();
Matrix c_unit= c.times(1/length1);
Matrix c_unit_T = c_unit.transpose();
Matrix projection1 = c_unit_T.times(b);
double lp1 = projection1.get(0,0);
Matrix r = c_unit.times(lp1);
float rx = (float)r.get(0,0);
float ry = (float)r.get(1,0);
stroke(0,0,0); // Use a black pen
ellipse(X+rx,Y+ry,10,10); // point where b projects onto a
line(X+rx,Y+ry,X+bx,Y+by); // line from a to point of projection on b
if (mouseButton == RIGHT)
{
a.set(0,0,(double)mouseX-X);
a.set(1,0,(double)mouseY-Y);
}
if (mouseButton == LEFT)
{
b.set(0,0,(double)mouseX-X);
b.set(1,0,(double)mouseY-Y);
}
}
// Draw an arrow from (x1,y1) to (x2,y2)
void arrow(float x1, float y1, float x2, float y2)
{
line(x1, y1, x2, y2);
pushMatrix();
translate(x2, y2);
float a = atan2(x1-x2, y2-y1);
rotate(a);
line(0, 0, -8, -8);
line(0, 0, 8, -8);
popMatrix();
}
I'm developing a Processing sketch that, given a certain angle, draws a dot at the edge of a rhombus.
I know the width of the rhombus, and its position, but I'm not sure how to calculate the x-y coordinates of a dot resting at its edge.
Are there any elegant solutions for this problem? Any help in pseudocode would be welcomed.
Let's square side length is A, half-length is H = A/2. Angle Theta. Intersection point P.
All coordinates are relative to the square center.
Rotate square by -Pi/4, angle Alpha = Theta - Pi/4
if Alpha lies in range -Pi/4..Pi/4, then intersection point P' = (H, H*Tan(Alpha))
if Alpha lies in range Pi/4..3*Pi/4, then P' = (H*Cotangent(Alpha), H)
if Alpha lies in range 3*Pi/4..5*Pi/4, then P' = (-H, -H*Tan(Alpha))
if Alpha lies in range 5*Pi/4..7*Pi/4, then P' = (-H*Cotangent(Alpha), -H)
Then rotate point P' back by Pi/4:
S = Sqrt(2)/2
P.X = S * (P'.X - P'.Y)
P.Y = S * (P'.X + P'.Y)
Example (data like your sketch):
A = 200, Theta = 5*Pi/12
H = 200/2 = 100, Alpha =Theta-Pi/4 = Pi/6
P'.X = H = 100
P'.Y = H * Tan(Alpha) = 100 * Tan(Pi/6) ~= 57.7
S = 0.707
P.X = 0.707 * (100 - 57.7) = 30
P.Y = 0.707 * (100 + 57.7) = 111
Based on your image, you want to find the intersection of two equations, that of the line at angle θ, and that of the side of the square with which it intersects.
Assuming the size of your square is n, the equation of the square is y=±(n*(√2/2))±x (by Pythagoras' theorem). The equation for the side you intersect in your image is y=n*(√2/2)-x.
The equation of the radial line can be calculated using trigonometry to be y=tan(θ)*x, with θ expressed in radians.
You can then solve this as a simultaneous equation to determine the intersection. Please note that it will intersect with both sides of the square (both above and below), so if you only want the one you will have to choose the equation for the correct side of the square. Also guard against the case where θ is π/2, as tan(π/2) is undefined. You can easily work out that case, as x=0 and so it will always intersect at y=±(n*(√2/2)).
In your example, the intersection occurs when x*(1+tan(θ))=n*(√n/n), or x=(n*(√n/n))/(1+tan(θ)). You can calculate that, plug it back into y and that is your (x,y) intersection.
Imagine a circle with a larger radius that will intersect your rhombus at the points you want. One way to draw at that location is to use a nested coordinate system that you translate and rotate. All you need to know is the radius and the angle.
Here's a very basic example:
float angle = radians(-80.31);
float radius = 128;
float centerX,centerY;
void setup(){
size(320,320);
noFill();
rectMode(CENTER);
centerX = width * 0.5;
centerY = height * 0.5;
}
void draw(){
background(255);
noFill();
//small circle
strokeWeight(1);
stroke(95,105,120);
ellipse(centerX,centerY,210,210);
rhombus(centerX,centerY,210);
//large circle
strokeWeight(3);
stroke(95,105,120);
ellipse(centerX,centerY,radius * 2,radius * 2);
//line at angle
pushMatrix();
translate(centerX,centerY);
rotate(angle);
stroke(162,42,32);
line(0,0,radius,0);
popMatrix();
//debug
fill(0);
text("angle: " + degrees(angle),10,15);
}
void rhombus(float x,float y,float size){
pushMatrix();
translate(x,y);
rotate(radians(45));
rect(0,0,size,size);
popMatrix();
}
void mouseDragged(){
angle = atan2(centerY-mouseY,centerX-mouseX)+PI;
}
You can try a demo here(you can drag the mouse to change the angle):
var angle;
var radius = 128;
var centerX,centerY;
function setup(){
createCanvas(320,320);
noFill();
rectMode(CENTER);
angle = radians(-80.31);
centerX = width * 0.5;
centerY = height * 0.5;
}
function draw(){
background(255);
noFill();
//small circle
strokeWeight(1);
stroke(95,105,120);
ellipse(centerX,centerY,210,210);
rhombus(centerX,centerY,210);
//large circle
strokeWeight(3);
stroke(95,105,120);
ellipse(centerX,centerY,radius * 2,radius * 2);
//line at angle
push();
translate(centerX,centerY);
rotate(angle);
stroke(162,42,32);
line(0,0,radius,0);
pop();
//debug
fill(0);
noStroke();
text("angle: " + degrees(angle),10,15);
}
function rhombus(x,y,size){
push();
translate(x,y);
rotate(radians(45));
rect(0,0,size,size);
pop();
}
function mouseDragged(){
angle = atan2(centerY-mouseY,centerX-mouseX)+PI;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/0.5.0/p5.min.js"></script>
If you want to calculate the position, you can use the polar to cartesian coordinate conversion formula:
x = cos(angle) * radius
y = sin(angle) * radius
Here's an example using that. Note that drawing is done from the centre, therefore the centre coordinates are added to the above:
float angle = radians(-80.31);
float radius = 128;
float centerX,centerY;
void setup(){
size(320,320);
noFill();
rectMode(CENTER);
centerX = width * 0.5;
centerY = height * 0.5;
}
void draw(){
background(255);
noFill();
//small circle
strokeWeight(1);
stroke(95,105,120);
ellipse(centerX,centerY,210,210);
rhombus(centerX,centerY,210);
//large circle
strokeWeight(3);
stroke(95,105,120);
ellipse(centerX,centerY,radius * 2,radius * 2);
//line at angle
float x = centerX+(cos(angle) * radius);
float y = centerX+(sin(angle) * radius);
stroke(162,42,32);
line(centerX,centerY,x,y);
//debug
fill(0);
text("angle: " + degrees(angle),10,15);
}
void rhombus(float x,float y,float size){
pushMatrix();
translate(x,y);
rotate(radians(45));
rect(0,0,size,size);
popMatrix();
}
void mouseDragged(){
angle = atan2(centerY-mouseY,centerX-mouseX)+PI;
}
Another option would be using transformation matrices
I'm trying to draw an arrow so I just referred to the example code where we can draw arrows:
http://doc.qt.io/qt-5/qtwidgets-graphicsview-elasticnodes-edge-cpp.html
I decided to draw using the same formula and tried like:
theCurrentLine->setP1(QPointF(0, 0) );
theCurrentLine->setP2((theLineVector));
p->drawLine(*theCurrentLine);
double angle = ::acos(theCurrentLine->dx() / theCurrentLine->length());
if (theCurrentLine->dy() >= 0)
angle = TwoPi - angle;
QPointF sourcePoint = QPointF(0,0);
QPointF sourceArrowP1 = sourcePoint + QPointF(sin(angle + Pi / 3) * theArrowSize,
cos(angle + Pi / 3) * theArrowSize);
QPointF sourceArrowP2 = sourcePoint + QPointF(sin(angle + Pi - Pi / 3) * theArrowSize,
cos(angle + Pi - Pi / 3) * theArrowSize);
p->drawPolygon(QPolygonF() << theCurrentLine->p1() << sourceArrowP1 << sourceArrowP2);
but now I want to draw the line after the arrow head polygon gets drawn.
How can I change the P1() value of the theCurrentLine which can start after the polygon as currently the polygon(arrowHead) and the line start at the same point? I need to start the line after the arrow head is drawn. The reason is sometimes if the pen width increases the arrow head looks smaller than the line.
You can get the point at index in QPolygon.
QPoint QPolygon::point ( int index ) const
It would be easy when you know how many points there are. And Qt documentation is your friend.
And you could use count(), for example:
QPoint lastPoint = myPolygon.point(myPolygon.count() - 1);
Just give a name to your polygon, and you'll be fine.
Edit: Latest version of these codes should solve your problem. I thought I need to give an example. You need to add the points in this order:
QPolygon myPolygon;
myPolygon.setPoint(0, sourceArrowP1);
myPolygon.setPoint(1, theCurrentLine->p1());
myPolygon.setPoint(2, sourceArrowP2);
p->drawPolygon(myPolygon);
QPoint lastPoint;
lastPoint = myPolygon.point(myPolygon.count() - 1);
You need to draw the line between last and first points. Here:
p->drawLine(myPolygon.point(0, myPolygon.point(myPolygon.count() - 1));
If you want your arrow head to be filled with color, you need to use QPainterPath instead of QPolygon:
QPen pen(Qt::black); //Or whatever color you want
pen.setWidthF(10); //Or whatever size you want your lines to be
QBrush brush(Qt::red); //Or whatever color you want to fill the arrow head
p->setPen(pen);
p->setBrush(brush);
QPainterPath arrow(sourceArrowP1);
arrow.lineTo(theCurrentLine->p1());
arrow.lineTo(sourceArrowP2);
arrow.lineTo(sourceArrowP1); //This returns to the first point. You might eliminate this line if you want to.
p->drawPath(arrow);
My actual implementation:
theCurrentLine->setP1(QPointF(0, 0) ); // arrow line coordinates
theCurrentLine->setP2((theLineVector));
double angle = ::acos(theCurrentLine->dx() / theCurrentLine->length()); // angle of the current Line
if (theCurrentLine->dy() >= 0)
angle = TwoPi - angle;
// getting arrow head points to be drawn
QPointF sourcePoint = QPointF(0,0);
QPointF sourceArrowP1 = sourcePoint + QPointF(sin(angle + Pi / 3) * theArrowSize,
cos(angle + Pi / 3) * theArrowSize);
QPointF sourceArrowP2 = sourcePoint + QPointF(sin(angle + Pi - Pi / 3) * theArrowSize,
cos(angle + Pi - Pi / 3) * theArrowSize);
p->drawPolygon(QPolygonF() << theCurrentLine->p1() << sourceArrowP1 << sourceArrowP2);
// to find the center point in the arrow head right
QLineF perpLine = QLineF(theCurrentLine->p1(), theCurrentLine->p2());
QLineF arrowheadWidth = QLineF(sourceArrowP1, sourceArrowP2);
QPointF originPoint;
QLineF::IntersectType res = perpLine.intersect(arrowheadWidth, &originPoint);
theCurrentLine->setP1(originPoint);
p->drawLine(*theCurrentLine);
If anyone knows a much better way to implement this (I'm sure there will be), please correct me.
i'm trying to code correct 2D affine texture mapping in GLSL.
Explanation:
...NONE of this images is correct for my purposes. Right (labeled Correct) has perspective correction which i do not want. So this: Getting to know the Q texture coordinate solution (without further improvements) is not what I'm looking for.
I'd like to simply "stretch" texture inside quadrilateral, something like this:
but composed from two triangles. Any advice (GLSL) please?
This works well as long as you have a trapezoid, and its parallel edges are aligned with one of the local axes. I recommend playing around with my Unity package.
GLSL:
varying vec2 shiftedPosition, width_height;
#ifdef VERTEX
void main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
shiftedPosition = gl_MultiTexCoord0.xy; // left and bottom edges zeroed.
width_height = gl_MultiTexCoord1.xy;
}
#endif
#ifdef FRAGMENT
uniform sampler2D _MainTex;
void main() {
gl_FragColor = texture2D(_MainTex, shiftedPosition / width_height);
}
#endif
C#:
// Zero out the left and bottom edges,
// leaving a right trapezoid with two sides on the axes and a vertex at the origin.
var shiftedPositions = new Vector2[] {
Vector2.zero,
new Vector2(0, vertices[1].y - vertices[0].y),
new Vector2(vertices[2].x - vertices[1].x, vertices[2].y - vertices[3].y),
new Vector2(vertices[3].x - vertices[0].x, 0)
};
mesh.uv = shiftedPositions;
var widths_heights = new Vector2[4];
widths_heights[0].x = widths_heights[3].x = shiftedPositions[3].x;
widths_heights[1].x = widths_heights[2].x = shiftedPositions[2].x;
widths_heights[0].y = widths_heights[1].y = shiftedPositions[1].y;
widths_heights[2].y = widths_heights[3].y = shiftedPositions[2].y;
mesh.uv2 = widths_heights;
I recently managed to come up with a generic solution to this problem for any type of quadrilateral. The calculations and GLSL maybe of help. There's a working demo in java (that runs on Android), but is compact and readable and should be easily portable to unity or iOS: http://www.bitlush.com/posts/arbitrary-quadrilaterals-in-opengl-es-2-0
In case anyone's still interested, here's a C# implementation that takes a quad defined by the clockwise screen verts (x0,y0) (x1,y1) ... (x3,y3), an arbitrary pixel at (x,y) and calculates the u and v of that pixel. It was originally written to CPU-render an arbitrary quad to a texture, but it's easy enough to split the algorithm across CPU, Vertex and Pixel shaders; I've commented accordingly in the code.
float Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, A, B, C;
//These are all uniforms for a given quad. Calculate on CPU.
Ax = (x3 - x0) - (x2 - x1);
Bx = (x0 - x1);
Cx = (x2 - x1);
Dx = x1;
Ay = (y3 - y0) - (y2 - y1);
By = (y0 - y1);
Cy = (y2 - y1);
Dy = y1;
float ByCx_plus_AyDx_minus_BxCy_minus_AxDy = (By * Cx) + (Ay * Dx) - (Bx * Cy) - (Ax * Dy);
float ByDx_minus_BxDy = (By * Dx) - (Bx * Dy);
A = (Ay*Cx)-(Ax*Cy);
//These must be calculated per-vertex, and passed through as interpolated values to the pixel-shader
B = (Ax * y) + ByCx_plus_AyDx_minus_BxCy_minus_AxDy - (Ay * x);
C = (Bx * y) + ByDx_minus_BxDy - (By * x);
//These must be calculated per-pixel using the interpolated B, C and x from the vertex shader along with some of the other uniforms.
u = ((-B) - Mathf.Sqrt((B*B-(4.0f*A*C))))/(A*2.0f);
v = (x - (u * Cx) - Dx)/((u*Ax)+Bx);
Tessellation solves this problem. Subdividing quad vertex adds hints to interpolate pixels.
Check out this link.
https://www.youtube.com/watch?v=8TleepxIORU&feature=youtu.be
I had similar question ( https://gamedev.stackexchange.com/questions/174857/mapping-a-texture-to-a-2d-quadrilateral/174871 ) , and at gamedev they suggested using imaginary Z coord, which I calculate using the following C code, which appears to be working in general case (not just trapezoids):
//usual euclidean distance
float distance(int ax, int ay, int bx, int by) {
int x = ax-bx;
int y = ay-by;
return sqrtf((float)(x*x + y*y));
}
void gfx_quad(gfx_t *dst //destination texture, we are rendering into
,gfx_t *src //source texture
,int *quad // quadrilateral vertices
)
{
int *v = quad; //quad vertices
float z = 20.0;
float top = distance(v[0],v[1],v[2],v[3]); //top
float bot = distance(v[4],v[5],v[6],v[7]); //bottom
float lft = distance(v[0],v[1],v[4],v[5]); //left
float rgt = distance(v[2],v[3],v[6],v[7]); //right
// By default all vertices lie on the screen plane
float az = 1.0;
float bz = 1.0;
float cz = 1.0;
float dz = 1.0;
// Move Z from screen, if based on distance ratios.
if (top<bot) {
az *= top/bot;
bz *= top/bot;
} else {
cz *= bot/top;
dz *= bot/top;
}
if (lft<rgt) {
az *= lft/rgt;
cz *= lft/rgt;
} else {
bz *= rgt/lft;
dz *= rgt/lft;
}
// draw our quad as two textured triangles
gfx_textured(dst, src
, v[0],v[1],az, v[2],v[3],bz, v[4],v[5],cz
, 0.0,0.0, 1.0,0.0, 0.0,1.0);
gfx_textured(dst, src
, v[2],v[3],bz, v[4],v[5],cz, v[6],v[7],dz
, 1.0,0.0, 0.0,1.0, 1.0,1.0);
}
I'm doing it in software to scale and rotate 2d sprites, and for OpenGL 3d app you will need to do it in pixel/fragment shader, unless you will be able to map these imaginary az,bz,cz,dz into your actual 3d space and use the usual pipeline. DMGregory gave exact code for OpenGL shaders: https://gamedev.stackexchange.com/questions/148082/how-can-i-fix-zig-zagging-uv-mapping-artifacts-on-a-generated-mesh-that-tapers
I came up with this issue as I was trying to implement a homography warping in OpenGL. Some of the solutions that I found relied on a notion of depth, but this was not feasible in my case since I am working on 2D coordinates.
I based my solution on this article, and it seems to work for all cases that I could try. I am leaving it here in case it is useful for someone else as I could not find something similar. The solution makes the following assumptions:
The vertex coordinates are the 4 points of a quad in Lower Right, Upper Right, Upper Left, Lower Left order.
The coordinates are given in OpenGL's reference system (range [-1, 1], with origin at bottom left corner).
std::vector<cv::Point2f> points;
// Convert points to homogeneous coordinates to simplify the problem.
Eigen::Vector3f p0(points[0].x, points[0].y, 1);
Eigen::Vector3f p1(points[1].x, points[1].y, 1);
Eigen::Vector3f p2(points[2].x, points[2].y, 1);
Eigen::Vector3f p3(points[3].x, points[3].y, 1);
// Compute the intersection point between the lines described by opposite vertices using cross products. Normalization is only required at the end.
// See https://leimao.github.io/blog/2D-Line-Mathematics-Homogeneous-Coordinates/ for a quick summary of this approach.
auto line1 = p2.cross(p0);
auto line2 = p3.cross(p1);
auto intersection = line1.cross(line2);
intersection = intersection / intersection(2);
// Compute distance to each point.
for (const auto &pt : points) {
auto distance = std::sqrt(std::pow(pt.x - intersection(0), 2) +
std::pow(pt.y - intersection(1), 2));
distances.push_back(distance);
}
// Assumes same order as above.
std::vector<cv::Point2f> texture_coords_unnormalized = {
{1.0f, 1.0f},
{1.0f, 0.0f},
{0.0f, 0.0f},
{0.0f, 1.0f}
};
std::vector<float> texture_coords;
for (int i = 0; i < texture_coords_unnormalized.size(); ++i) {
float u_i = texture_coords_unnormalized[i].x;
float v_i = texture_coords_unnormalized[i].y;
float d_i = distances.at(i);
float d_i_2 = distances.at((i + 2) % 4);
float scale = (d_i + d_i_2) / d_i_2;
texture_coords.push_back(u_i*scale);
texture_coords.push_back(v_i*scale);
texture_coords.push_back(scale);
}
Pass the texture coordinates to your shader (use vec3). Then:
gl_FragColor = vec4(texture2D(textureSampler, textureCoords.xy/textureCoords.z).rgb, 1.0);
thanks for answers, but after experimenting i found a solution.
two triangles on the left has uv (strq) according this and two triangles on the right are modifed version of this perspective correction.
Numbers and shader:
tri1 = [Vec2(-0.5, -1), Vec2(0.5, -1), Vec2(1, 1)]
tri2 = [Vec2(-0.5, -1), Vec2(1, 1), Vec2(-1, 1)]
d1 = length of top edge = 2
d2 = length of bottom edge = 1
tri1_uv = [Vec4(0, 0, 0, d2 / d1), Vec4(d2 / d1, 0, 0, d2 / d1), Vec4(1, 1, 0, 1)]
tri2_uv = [Vec4(0, 0, 0, d2 / d1), Vec4(1, 1, 0, 1), Vec4(0, 1, 0, 1)]
only right triangles are rendered using this glsl shader (on left is fixed pipeline):
void main()
{
gl_FragColor = texture2D(colormap, vec2(gl_TexCoord[0].x / glTexCoord[0].w, gl_TexCoord[0].y);
}
so.. only U is perspective and V is linear.