drawing a cube with perspective - math

I am trying to draw the Mandelbox using my own ray marching on the CPU.
I have a width*height bitmap to render to.
For each pixel, I want to march towards the cube:
static float eye = 0.0f; eye = glm::clamp(eye+0.005f,0.0f,1.0f); // animate
const glm::mat4 projection = glm::perspective(35.0f, (float)width/height, 0.1f, 10.0f),
modelview = glm::lookAt(glm::vec3(cos(eye),sin(eye),-1),glm::vec3(0,0,0),glm::vec3(0,0,1));
const float epsilon = sqrt(1.0f/std::max(width,height))/2.0f;
for(int y=0; y<height; y++) {
for(int x=0; x<width; x++) {
glm::vec3 p = glm::unProject(glm::vec3(x,y,0),modelview,projection,glm::vec4(0,0,width,height)),
dir = glm::unProject(glm::vec3(x,y,1),modelview,projection,glm::vec4(0,0,width,height))-p,
P0 = p;
//std::cout << x << "," << y << " " << p.x << "," << p.y << "," << p.z << " " << dir.x << "," << dir.y << "," << dir.z << std::endl;
float D = 0;
for(int i=0; i<MAX_ITER; i++) {
const float d = DE(p);
D += d;
if(d<epsilon) {
depth_bmp[y*width+x] = 255.0f/i;
break;
}
p = dir*D + P0;
}
}
}
My distance estimator function is a very literal translation and looks like this:
float DE(glm::vec3 p) {
const float Scale = -1.77f, fixedRadius2 = 1.0f, minRadius2 = (0.5f*0.5f);
const glm::vec3 p0 = p;
float dr = 1.0f;
for(int n = 0; n < 13; n++) {
// Reflect
p = (glm::clamp(p,-1.0f,1.0f) * 2.0f) - p;
// Sphere Inversion
const float r2 = glm::dot(p,p);
if(r2<minRadius2) {
const float t = (fixedRadius2/minRadius2);
p *= t;
dr *= t;
} else if(r2<fixedRadius2) {
const float t = (fixedRadius2/r2);
p *= t;
dr *= t;
}
// Scale & Translate
p = p * Scale + p0;
dr = dr * abs(Scale) + 1.0f;
}
return glm::length(p)/abs(dr);
}
And the output looks completely unbox-like:
How do I set the eye transform up so I see the cube properly?

The issue is that the length of the ray must be normalised:
glm::vec3 p = glm::unProject(glm::vec3(x,y,0),modelview,projection,glm::vec4(0,0,width,height)),
dir = glm::unProject(glm::vec3(x,y,1),modelview,projection,glm::vec4(0,0,width,height))-p;
const float len = glm::length(dir);
dir = glm::normalise(dir);
float D = 0;
for(int i=0; i<MAX_ITER; i++) {
const float d = DE(p + dir*D);
D += d;
if(D > len) break;
...

You can use the method outlined here to generate the correct rays (and their lengths).

Related

Implementation of "The W4 Method" (newton-raphson extension)

I am currently implementing a newton-raphson root-finding method that guarantees convergence in a multidimensional setting (not homework!). Currently it finds the root for x, but not for y. I also observed a strange behaviour where f1 and f2 get equalised to the same number. For example, after 2000 iterations both are ≈ 560.0. I think f1 and f2 both need to approach 0. At least, this is how it works using the classical newton-raphson method.
Can anyone see what could cause this? I need a second pair of eyes.
paper: https://arxiv.org/pdf/1809.04495.pdf and addendum: https://arxiv.org/pdf/1809.04358.pdf (section D.2 -> includes the attached math)
Note: U, L are the upper and lower triangular matrices of the Jacobian (matrix of partial derivatives).
My current implementation looks like the following (Eigen is used, but it's pretty clear what it does). Currently something strange
#include "../../Eigen/Eigen/Core"
#include "../../Eigen/Eigen/LU"
#include <iostream>
int main(){
double eps = 1e-4;
Eigen::Vector2d p(0.0, 0.0);
double x = 0.1;
double y = 1.0;
double f1 = 1e9;
double f2 = 1e9;
unsigned int count = 0;
while (count < 2000 && f1 > eps){
std::cout << "count : " << count << std::endl;
f1 = x*x - 10*x + y*y - 10*y + 34;
f2 = x*x - 22*x + y*y - 10*y + 130;
std::cout << "f1: " << f1 << ", f2: " << f2 << std::endl;
double A = 2*x - 10;
double B = 2*y - 10;
double C = 2*x - 22;
double D = 2*y - 10;
Eigen::Matrix2d J;
J << A, B, C, D;
Eigen::Matrix2d J_U_inv;
J_U_inv << J(0,0), J(0,1), 0.0, J(1,1);
J_U_inv = J_U_inv.inverse();
Eigen::Matrix2d J_L_inv;
J_L_inv << J(0,0), 0.0, J(1,0), J(1,1);
J_L_inv = J_L_inv.inverse();
Eigen::Vector2d f3(f1, f2);
Eigen::Vector2d T(x, y);
if (count == 0){
p = -0.5 * J_U_inv * f3;
}
Eigen::Vector2d E = T + 0.5 * J_L_inv * p;
p = -0.5 * J_U_inv * f3;
x = E(0);
y = E(1);
std::cout << "x, y: " << x << ", " << y << std::endl;
++count;
}
}
It seems I was not aware of the proper way to do the matrix decomposition.
Below is a working example of the W4 method for a 2dimensional system.
#include "../../Eigen/Eigen/Core"
#include "../../Eigen/Eigen/LU"
#include <iostream>
int main(){
double eps = 1e-4;
Eigen::Vector2d p(0.0, 0.0);
double x = 0.1;
double y = 1.0;
double f1 = 1e9;
double f2 = 1e9;
unsigned int count = 0;
while (std::abs(f1) > eps && std::abs(f2) > eps){
std::cout << "count : " << count << std::endl;
f1 = x*x - 10*x + y*y - 10*y + 34;
f2 = x*x - 22*x + y*y - 10*y + 130;
std::cout << "f1: " << f1 << ", f2: " << f2 << std::endl;
double A = 2*x - 10;
double B = 2*y - 10;
double C = 2*x - 22;
double D = 2*y - 10;
Eigen::Matrix2d J;
J << A, B, C, D;
Eigen::Matrix2d J_U_inv;
J_U_inv << J(0,0) -J(0,1)*J(1,0)/J(1,1), J(0,1),
0.0, J(1,1);
J_U_inv = J_U_inv.inverse().eval();
Eigen::Matrix2d J_L_inv;
J_L_inv << 1.0, 0.0,
J(1,0)/J(1,1), 1.0;
J_L_inv = J_L_inv.inverse().eval();
Eigen::Vector2d f3(f1, f2);
Eigen::Vector2d T(x, y);
if (count == 0){
p = -0.5 * J_U_inv * f3;
}
Eigen::Vector2d E = T + 0.5 * J_L_inv * p;
p = -0.5 * J_U_inv * f3;
x = E(0);
y = E(1);
std::cout << "x, y: " << x << ", " << y << std::endl;
++count;
}
}

Collision detection between two objects

The collision is not working
According to that post Collision detection between 2 "linearly" moving objects in WGS84,
I have the following data
EDIT:
I have updated the data for a collision that should occur in 10 seconds.
m_sPosAV = {North=48.276111971715515 East=17.921031349301817 Altitude=6000.0000000000000 }
Poi_Position = {North=48.806113707277042 East=17.977161602106488 Altitude=5656.0000000000000 }
velocity.x = -189.80000000000001 // m/s
velocity.y = -39.800000000000004 // m/s
velocity.z = 9 // m/s
m_sVelAV = {x=1.0000000000000000 y=1.0000000000000000 z=0.00000000000000000 } // m/s
void WGS84toXYZ(double &x, double &y, double &z, double lon, double lat, double alt)
{
const double _earth_a = 6378141.4; // [m] equator radius
const double _earth_b = 6356755.0; // [m] polar radius
double a, b, h, l, c, s;
a = lon;
b = lat;
h = alt;
c = cos(b);
s = sin(b);
h = h + sqrt((_earth_a*_earth_a*c*c) + (_earth_b*_earth_b*s*s));
z = h*s;
l = h*c;
x = l*cos(a);
y = l*sin(a);
}
bool CPoiFilterCollision::collisionDetection(const CPoiItem& poi)
{
const double _min_t = 10; // min_time
const double _max_d = 500; // max_distance
const double _max_t = 0.001; // max_time
double dt;
double d0, d1;
double xAv, yAv, zAv;
double xPoi, yPoi, zPoi;
double x, y, z;
double Ux, Uy, Uz; // [m]
double Vx, Vy, Vz; // [m]
double Wx, Wy, Wz; // [m]
double da = 1.567e-7; // [rad] angular step ~ 1.0 m in lon direction
double dl = 1.0;
const double deg = pi / 180.0;
// [m] altitide step 1.0 m
WGS84toXYZ(xAv, yAv, zAv, m_sPosAV.GetLongitude(), m_sPosAV.GetLatitude(), m_sPosAV.GetAltitude()); // actual position
WGS84toXYZ(xPoi, yPoi, zPoi, poi.Position().GetLongitude(), poi.Position().GetLatitude(), poi.Position().GetAltitude()); // actual position
WGS84toXYZ(Ux, Uy, Uz, m_sPosAV.GetLongitude() + da, m_sPosAV.GetLatitude(), m_sPosAV.GetAltitude()); // lon direction Nort
WGS84toXYZ(Vx, Vy, Vz, m_sPosAV.GetLongitude(), m_sPosAV.GetLatitude() + da, m_sPosAV.GetAltitude()); // lat direction East
WGS84toXYZ(Wx, Wy, Wz, m_sPosAV.GetLongitude(), m_sPosAV.GetLatitude(), m_sPosAV.GetAltitude() + dl); // alt direction High/Up
Ux -= xAv; Uy -= yAv; Uz -= zAv;
Vx -= xAv; Vy -= yAv; Vz -= zAv;
Wx -= xAv; Wy -= yAv; Wz -= zAv;
normalize(Ux, Uy, Uz);
normalize(Vx, Vy, Vz);
normalize(Wx, Wy, Wz);
double vx = m_sVelAV.x*Ux + m_sVelAV.y*Vx + m_sVelAV.z*Wx;
double vy = m_sVelAV.x*Uy + m_sVelAV.y*Vy + m_sVelAV.z*Wy;
double vz = m_sVelAV.x*Uz + m_sVelAV.y*Vz + m_sVelAV.z*Wz;
const QList<QVariant> velocity = poi.Property(QLatin1String("VELOCITY")).toList();
if (velocity.size() == 3)
{
dt = _max_t;
x = xAv - xPoi;
y = yAv - yPoi;
z = zAv - zPoi;
d0 = sqrt((x*x) + (y*y) + (z*z));
x = xAv - xPoi + (vx - velocity.at(0).toDouble())*dt;
y = yAv - yPoi + (vy - velocity.at(1).toDouble())*dt;
z = zAv - zPoi + (vz - velocity.at(2).toDouble())*dt;
d1 = sqrt((x*x) + (y*y) + (z*z));
if (d0 <= _max_d)
{
return true;
}
if (d0 <= d1)
{
return false;
}
double t = (_max_d - d0)*dt / (d1 - d0);
if (t < _min_t)
{
qDebug() << "Collision at time " << t;
return true;
}
}
return false;
}

making Console::WriteLine(x) in the same line when looped in CLR console C++

I'm trying to create a pascal triangle using stdafx.h.
I come to a problem when trying to put a Console::WriteLine(x) using a loop but wanting it in the same line
I "translated" this from a Iostream C++ empty project
#include "stdafx.h"
using namespace System;
int main(array<System::String ^> ^args)
{
int k, i, x, a, b, c, d, e, f, g, h;
Console::WriteLine(L"Number of Rows : ");
String^ strabx = Console::ReadLine();
int n = int::Parse(strabx); //the number of rows
d = 0;
g = 0;
for (i = 0; i <= (n - 1); i++) // i adalah baris
{
for (a = (n - i); a >= 1; a--) {
Console::WriteLine(" ");
}
x = 1;
b = 0;
e = 0;
f = 0;
k = 0;
do
{
f = f + x;
Console::WriteLine(" " + x + " "); //my problem lies here
x = x * (i - k) / (k + 1);
b = b + 1;
k++;
} while (k <= i);
c = b;
d = d + c;
g = f + g;
Console::WriteLine();
}
Console::WriteLine("digit count " + d + "\n");
Console::WriteLine("sums of the number : " + g);
Console::ReadLine();
Console::WriteLine();
}
Use Console::Write() instead.
Console::Write(" " + x + " ");

How do you represent an image in OpenCL

I have sample code but it completely leaves out what my (void*)should_be!
I setup a cl_image_desc, cl_image_format, buffer, origin, and region:
cl_image_desc desc;
desc.image_type = CL_MEM_OBJECT_IMAGE2D;
desc.image_width = width;
desc.image_height = height;
desc.image_depth = 0;
desc.image_array_size = 0;
desc.image_row_pitch = 0;
desc.image_slice_pitch = 0;
desc.num_mip_levels = 0;
desc.num_samples = 0;
desc.buffer = NULL;
cl_image_format format;
format.image_channel_order = CL_R;
format.image_channel_data_type = CL_FLOAT;
cl_mem bufferSourceImage = clCreateImage(context, CL_MEM_READ_ONLY, &format, &desc, NULL, NULL);
size_t origin[3] = {0, 0, 0};
size_t region[3] = {width, height,1};
In this next snippet sourceImage is a void pointer to my image. But what is my image? For every pixel there are r, g, b, a, x, and y values.
clEnqueueWriteImage(queue, bufferSourceImage, CL_TRUE, origin, region, 0, 0, sourceImage, 0, NULL, NULL);
How do I turn my image (a bunch of (r,g,b,a,x,y)'s) into a suitable array?
This is the kernel they provide:
__kernel void convolution(__read_only image2d_t sourceImage, __write_only image2d_t outputImage, int rows, int cols, __constant float* filter, int filterWidth, sampler_t sampler)
{
int column = get_global_id(0);
int row = get_global_id(1);
int halfWidth = (int)(filterWidth/2);
float4 sum = {0.0f, 0.0f, 0.0f, 0.0f};
int filterIdx = 0;
int2 coords;
for(int i = -halfWidth; i <= halfWidth; i++)
{
coords.y = row + i;
for(int i2 = -halfWidth; i2 <= halfWidth; i2++)
{
coords.x = column + i2;
float4 pixel;
pixel = read_imagef(sourceImage, sampler, coords);
sum.x += pixel.x * filter[filterIdx++];
}
}
if(myRow < rows && myCol < cols)
{
coords.x = column;
coords.y = row;
write_imagef(outputImage, coords, sum);
}
}
Set up the cl_image_format as you like and then you just have to follow that format what you selected. Currently your channel (R, G, B, A) data should be represented as "single precision floating-point value" - image_channel_data_type = CL_FLOAT, and you can take only one channel of those and feed it into the expected R channel (image_channel_order = CL_R).
Your kernel expect float:
float4 pixel;
pixel = read_imagef(sourceImage, sampler, coords);

OpenCL RGB->HSL and back

Ive been through every resource and cant fix my problem.
My host code calls the rgb2hsl kernel, then calls the hsl2rgb kernel. I should end up with the same image that I started with, but I do not. My new image hue is off in certain areas.
The red areas should not be there.
Here is the screen shot of what happens:
Here is the original picture
Here is the code:
#define E .0000001f
bool fEqual(float x, float y)
{
return (x+E > y && x-E < y);
}
__kernel void rgb2hsl(__global float *values, int numValues)
{
// thread index and total
int idx = get_global_id(0);
int idxVec3 = idx*3;
float3 gMem;
if (idx < numValues)
{
gMem.x = values[idxVec3];
gMem.y = values[idxVec3+1];
gMem.z = values[idxVec3+2];
}
barrier(CLK_LOCAL_MEM_FENCE);
gMem /= 255.0f; //convert from 256 color to float
//calculate chroma
float M = max(gMem.x, gMem.y);
M = max(M, gMem.z);
float m = min(gMem.x, gMem.y);
m = min(m, gMem.z);
float chroma = M-m; //calculate chroma
float lightness = (M+m)/2.0f;
float saturation = chroma/(1.0f-fabs(2.0f*lightness-1.0f));
float hue = 0;
if (fEqual(gMem.x, M))
hue = (int)((gMem.y - gMem.z)/chroma) % 6;
if (fEqual(gMem.y, M))
hue = (((gMem.z - gMem.x))/chroma) + 2;
if (fEqual(gMem.z, M))
hue = (((gMem.x - gMem.y))/chroma) + 4;
hue *= 60.0f;
barrier(CLK_LOCAL_MEM_FENCE);
if (idx < numValues)
{
values[idxVec3] = hue;
values[idxVec3+1] = saturation;
values[idxVec3+2] = lightness;
}
}
__kernel void hsl2rgb(__global float *values, int numValues)
{
// thread index and total
int idx = get_global_id(0);
int idxVec3 = idx*3;
float3 gMem;
if (idx < numValues)
{
gMem.x = values[idxVec3];
gMem.y = values[idxVec3+1];
gMem.z = values[idxVec3+2];
}
barrier(CLK_LOCAL_MEM_FENCE);
float3 rgb = (float3)(0,0,0);
//calculate chroma
float chroma = (1.0f - fabs( (float)(2.0f*gMem.z - 1.0f) )) * gMem.y;
float H = gMem.x/60.0f;
float x = chroma * (1.0f - fabs( fmod(H, 2.0f) - 1.0f ));
switch((int)H)
{
case 0:
rgb = (float3)(chroma, x, 0);
break;
case 1:
rgb = (float3)(x, chroma, 0);
break;
case 2:
rgb = (float3)(0, chroma, x);
break;
case 3:
rgb = (float3)(0, x, chroma);
break;
case 4:
rgb = (float3)(x, 0, chroma);
break;
case 5:
rgb = (float3)(chroma, 0, x);
break;
default:
rgb = (float3)(0, 0, 0);
}
barrier(CLK_LOCAL_MEM_FENCE);
rgb += gMem.z - .5f*chroma;
rgb *= 255;
if (idx < numValues)
{
values[idxVec3] = rgb.x;
values[idxVec3+1] = rgb.y;
values[idxVec3+2] = rgb.z;
}
}
The problem was this line:
hue = (int)((gMem.y - gMem.z)/chroma) % 6;
It should be
hue = fmod((gMem.y - gMem.z)/chroma, 6.0f);
I did some more changes to remove artifacts:
#define E .0000001f
bool fEqual(float x, float y)
{
return (x+E > y && x-E < y);
}
__kernel void rgb2hsl(__global float *values, int numValues)
{
// thread index and total
int idx = get_global_id(0);
int idxVec3 = idx*3;
float3 gMem;
if (idx < numValues)
{
gMem.x = values[idxVec3];
gMem.y = values[idxVec3+1];
gMem.z = values[idxVec3+2];
}
barrier(CLK_LOCAL_MEM_FENCE);
gMem /= 255.0f; //convert from 256 color to float
//calculate chroma
float M = max(gMem.x, gMem.y);
M = max(M, gMem.z);
float m = min(gMem.x, gMem.y);
m = min(m, gMem.z);
float chroma = M-m; //calculate chroma
float lightness = (M+m)/2.0f;
float saturation = chroma/(1.0f-fabs(2.0f*lightness-1.0f));
float hue = 0;
if (fEqual(gMem.x, M))
hue = fmod((gMem.y - gMem.z)/chroma, 6.0f);
if (fEqual(gMem.y, M))
hue = (((gMem.z - gMem.x))/chroma) + 2;
if (fEqual(gMem.z, M))
hue = (((gMem.x - gMem.y))/chroma) + 4;
hue *= 60.0f;
barrier(CLK_LOCAL_MEM_FENCE);
if (M == m)
hue = saturation = 0;
barrier(CLK_GLOBAL_MEM_FENCE);
if (idx < numValues)
{
//NOTE: ARTIFACTS SHOW UP if we do not cast to integer!
values[idxVec3] = (int)hue;
values[idxVec3+1] = saturation;
values[idxVec3+2] = lightness;
}
}

Resources