I am applying two equations of rotation to rotate gray scale images easily. It's not rotating, however.
The two equations are:
x' = x *cos (theta) - y *sin (theta)
and
y' = x *sin (theta) + y *cos (theta)
I have visited a number of Q&A's on this site but the explanations are unclear.
IMG imgRotate(IMG output, float deg)
{
IMG lalo;
lalo.degree = deg;
float radian = ((2 *pi*output.degree) / 360);
float cosine = cos(radian);
float sine = sin(radian);
int x1 = (output.height * sine);
int y1 = (output.height * cosine);
int x2 = (output.width * cosine + output.height* sine);
int y2 = (output.height* cosine -output.width * sine);
int x3 = (output.width * cosine);
int y3 =(-output.width * sine);
int minx = min(0, min(x1, min(x2, x3)));
int miny = min(0, min(y1, min(y2, y3)));
int maxx = max(0, max(x1, max(x2, x3)));
int maxy = max(0, max(y1, max(y2, y3)));
int w = maxx - minx;
int h = maxy - miny;
int x, y,nx,ny;
lalo.pixel = (unsigned char*)calloc(lalo.height*lalo.width, sizeof (unsigned char));
for (y = 0; y < h; y++)
{
for (x = 0; x <w; x++)
{
nx = ceilf(x*cos(radian) - y*sin(radian));
ny = ceilf(x*sin(radian) + y*cos(radian));
lalo.pixel[w*ny + nx] = output.pixel[w*ny + nx];
}
}
return lalo;
}
I have added the following code but it is giving incomplete image
IMG imgRotate(IMG output,float deg, int height, int width)
{
IMG lalo;
lalo.degree = deg;
lalo.width = width;
lalo.height = height;
lalo.pixel=(unsigned char*)calloc (lalo.height*lalo.width, sizeof (unsigned char));
float radian = ((2 *pi*lalo.degree) / 360);
int x, y, x1, y1;
for (y = 0; y < lalo.height; y++)
{
for (x = 0; x <lalo.width; x++)
{
x1 = ceilf(x*cos(radian)-y*sin(radian));
y1 = ceilf(x*sin(radian) + y*cos(radian));
lalo.pixel[lalo.width*y1+x1] = output.pixel[output.width*x1+y1];
}
}
return lalo;
}
this is the we do with css it may help not sure
.image-class {
/* Rotate div */
-ms-transform: rotate(45deg); /* IE 9 */
-webkit-transform: rotate(45deg); /* Chrome, Safari, Opera */
transform: rotate(45deg);
}
<img class="image-class" src="https://media-mediatemple.netdna-ssl.com/wp-content/uploads/images/behavioral-css/transform_rotate.png"/>
Related
Hi I am confused as to what is wrong with my code related to the "edges" filter portion of the problem.
I am able to apply a filter that detects edges. For some reason I fail the check50. I am only able to apply the filter to middle pixels. Any guidance would be much appreciated. I am wondering if I am approaching this problem the incorrect way.
With this code I am just ignoring the calculations for the "black pixels" or the pixels outside of the range of height/width.
Here is my code:
void edges(int height, int width, RGBTRIPLE image[height][width])
{
//create temporary array
RGBTRIPLE temp[height][width];
for (int i = 0; i < height; i ++)
{
for (int j = 0; j < width; j++)
{
temp[i][j] = image[i][j];
}
}
//initialize sobel arrays
int gxarray[3][3] = {{-1, 0, 1}, {-2, 0, 2}, {-1, 0, 1}};
int gyarray[3][3] = {{-1, -2, -1}, {0, 0, 0}, {1, 2, 1}};
//loop through each ith pixel in jth column
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j ++)
{
float gx_red = 0;
float gx_blue = 0;
float gx_green = 0;
float gy_red = 0;
float gy_blue = 0;
float gy_green = 0;
//use the temporary array grid to calculate each gx value
//check if it is a corner or side pixel - and treat that pixel as black pixel
for (int k = -1; k < 2; k ++)
{
for (int l = -1; l < 2; l ++)
{
//calculate the gx and gy for each color by multiply each of
//check if they are corner or sidepixels
if (i + k < 0 || i + k >= height)
{
continue;
}
if (j + l < 0 || j + l >= width)
{
continue;
}
//otherwise calculate each color value
gx_red += temp[i + k][j + l].rgbtRed * gxarray[k + 1][l + 1];
gx_blue += temp[i + k][j + l].rgbtBlue * gxarray[k + 1][l + 1];
gx_green += temp[i + k][j + l].rgbtGreen * gxarray[k + 1][l + 1];
gy_red += temp[i + k][j + l].rgbtRed * gyarray[k + 1][l + 1];
gy_blue += temp[i + k][j + l].rgbtBlue * gyarray[k + 1][l + 1];
gy_green += temp[i + k][j + l].rgbtGreen * gyarray[k + 1][l + 1];
}
}
//times each number by itself then, add them, then square root them
int red = 0 + round(sqrt(gx_red * gx_red + gy_red * gy_red));
int blue = 0 + round(sqrt(gx_blue * gx_blue + gy_blue * gy_blue));
int green = 0 + round(sqrt(gx_green * gx_green + gy_green * gy_green));
image[i][j].rgbtRed = red;
image[i][j].rgbtBlue = blue;
image[i][j].rgbtGreen = green;
//cap it by 255
if (image[i][j].rgbtRed > 255)
{
image[i][j].rgbtRed = 255;
}
if (image[i][j].rgbtBlue > 255)
{
image[i][j].rgbtBlue = 255;
}
if (image[i][j].rgbtGreen > 255)
{
image[i][j].rgbtGreen = 255;
}
}
}
return;
}
```[enter image description here][1]
[1]: https://i.stack.imgur.com/3bExI.png
Hey guys i have been trying to figure out the problem with my code of the pset4 filter (more) about edge detection. My code compiles but the result image looks strange, with a lot of bright/white pixels and very little color pixels mostly at the boundaries of the image. I think i am close to the right result but i just can't figure it out by myself. Could someone plz check it out? appreciate any inputs!
I have treated the edges or corner pixels of the original image by creating copied image with extra lines and columns, which contain only black pixels (rgb values = 0). And i have also used copied image to store the temporary calculation results from the loop.
void edges(int height, int width, RGBTRIPLE image[height][width])
{
// sobel filters
int Gx[3][3] = {{-1, 0, 1}, {-2, 0, 2}, {-1, 0, 1}};
int Gy[3][3] = {{-1, -2, -1}, {0, 0 ,0}, {1, 2, 1}};
// creates copied image to story the original image and extra lines/columes
// of black pixels to loop through
RGBTRIPLE temp[height + 2][width + 2];
for (int i = 0; i < height + 2; i++)
{
for (int j = 0; j < width + 2; j++)
{
if (i == 0 || j == 0 || i == height + 1 || j == width + 1)
{
temp[i][j].rgbtRed = 0;
temp[i][j].rgbtGreen = 0;
temp[i][j].rgbtBlue = 0;
}
else
{
temp[i][j] = image[i - 1][j - 1];
}
}
}
// second copied image with extra black pixels to store the result
RGBTRIPLE temp2[height + 2][width + 2];
for (int i = 0; i < height + 2; i++)
{
for (int j = 0; j < width + 2; j++)
{
temp2[i][j] = temp[i][j];
}
}
//calculation based on the copied image
for (int i = 1; i < height + 1; i++)
{
// varibles to stroy Gx Gy for each color channels
float blueGx = 0.0, redGx = 0.0, greenGx = 0.0;
float blueGy = 0.0, redGy = 0.0, greenGy = 0.0;
for (int j = 1; j < width + 1; j++)
{
for (int k = -1; k < 2; k++)
{
for (int h = -1; h < 2; h++)
{
//calculate the Gx for each R G B channel by using temp
blueGx += temp[i + k][j + h].rgbtBlue * Gx[k + 1][h + 1];
redGx += temp[i + k][j + h].rgbtRed * Gx[k + 1][h + 1];
greenGx += temp[i + k][j + h].rgbtGreen * Gx[k + 1][h + 1];
//calculate the Gy for each R G B channel by using temp
blueGy += temp[i + k][j + h].rgbtBlue * Gy[k + 1][h + 1];
redGy += temp[i + k][j + h].rgbtRed * Gy[k + 1][h + 1];
greenGy += temp[i + k][j + h].rgbtGreen * Gy[k + 1][h + 1];
}
}
//store result for each pixels (i, j) in temp2
temp2[i][j].rgbtRed = maxCheck(round(sqrt(redGx * redGx + redGy * redGy)));
temp2[i][j].rgbtBlue = maxCheck(round(sqrt(blueGx * blueGx + blueGy * blueGy)));
temp2[i][j].rgbtGreen = maxCheck(round(sqrt(greenGx * greenGx + greenGy * greenGy)));
}
}
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
image[i][j] = temp2[i + 1][j + 1];
}
}
}
// cap rgb value to 255
int maxCheck(int a)
{
if (a > 255)
{
a = 255;
}
return a;
}
by using check50 i got four errors:
:( edges correctly filters middle pixel
expected "210 150 60\n", not "255 250 255\n"
:( edges correctly filters pixel on edge
expected "213 228 255\n", not "255 255 255\n"
:( edges correctly filters 3x3 image
expected "76 117 255\n21...", not "76 117 255\n25..."
:( edges correctly filters 4x4 image
expected "76 117 255\n21...", not "76 117 255\n25..."
seeing from the results a lot of pixels have been calculated up to 255. That explains all the white pixels in the result image but i don't understand why. Something wrong with the math? i think i did pay attention to the number range of rgb values and the difference between int and float number.
check50
just found out the problem: the reset of blueGx, redGx etc. is at the wrong position, which doesn't reset them properly at the beginning of each loop for a new pixel.
I wanted to add some lighting to the 3D mesh so that the back surface is visible. But when I added Ambient Light to it, some of the minor details of the graph became invisible. How can I add some lighting to the mesh so that the back surface is visible but the minor details don't vanish away?
Before adding light:
After adding light:
import java.util.logging.Logger;
import javafx.scene.DepthTest;
import javafx.scene.image.Image;
import javafx.scene.image.PixelWriter;
import javafx.scene.image.WritableImage;
import javafx.scene.paint.Color;
import javafx.scene.paint.PhongMaterial;
import javafx.scene.shape.CullFace;
import javafx.scene.shape.DrawMode;
import javafx.scene.shape.MeshView;
import javafx.scene.shape.TriangleMesh;
public class Fx3DPlotMesh extends MeshView {
private static final int SIZE = 500;
private static float AMPLIFI = 130;
private int rtResolution;
private int mzResolution;
private static final Logger LOG = Logger
.getLogger(Fx3DPlotMesh.class.getName());
public Fx3DPlotMesh(Dataset dataset) {
rtResolution = dataset.getRtResolution();
mzResolution = dataset.getMzResolution();
TriangleMesh mesh = new TriangleMesh();
int[][] peakListIndices = new int[rtResolution][mzResolution];
float factorX = (float) SIZE / rtResolution;
float factorZ = (float) SIZE / mzResolution;
float[][] intensityValues = dataset.getIntensityValues();
for (int x = 0; x < rtResolution; x++) {
for (int z = 0; z < mzResolution; z++) {
mesh.getPoints().addAll((float) x * factorX,-intensityValues[x][z] * AMPLIFI ,(float) z * factorZ);
}
}
int rtLength = rtResolution;
int mzLength = mzResolution;
float rtTotal = rtLength;
float mzTotal = mzResolution;
for (float x = 0; x < rtLength - 1; x++) {
for (float y = 0; y < mzLength - 1; y++) {
float x0 = x / rtTotal;
float y0 = y / mzTotal;
float x1 = (x + 1) / rtTotal;
float y1 = (y + 1) / mzTotal;
mesh.getTexCoords().addAll( //
x0, y0, // 0, top-left
x0, y1, // 1, bottom-left
x1, y0, // 2, top-right
x1, y1 // 3, bottom-right
);
}
}
// faces
for (int x = 0; x < rtLength - 1; x++) {
for (int z = 0; z < mzLength - 1; z++) {
int tl = x * mzLength + z; // top-left
int bl = x * mzLength + z + 1; // bottom-left
int tr = (x + 1) * mzLength + z; // top-right
int br = (x + 1) * mzLength + z + 1; // bottom-right
int offset = (x * (mzLength - 1) + z) * 8 / 2; // div 2 because
// we have u AND
// v in the list
// working
mesh.getFaces().addAll(bl, offset + 1, tl, offset + 0, tr,
offset + 2);
mesh.getFaces().addAll(tr, offset + 2, br, offset + 3, bl,
offset + 1);
}
}
LOG.info("Plot mesh is ready.");
setMesh(mesh);
setCullFace(CullFace.NONE);
setDrawMode(DrawMode.FILL);
setDepthTest(DepthTest.ENABLE);
}
}
This code produces the mesh shown in the above image. I just wanted to add some more lighting to this mesh. I tried adding some light using the code below but it didn't worked. Please help!
Light.Spot light = new Light.Spot();
light.setX(250);
light.setY(500);
light.setZ(250);
light.setPointsAtX(250);
light.setPointsAtY(0);
light.setPointsAtZ(250);
light.setSpecularExponent(2);
Lighting lighting = new Lighting();
lighting.setLight(light);
lighting.setSurfaceScale(5.0);
meshView.setEffect(lighting);
I have a question about the math involved to copy a path.
Let's say I have this path:
http://imgur.com/a/42l0t
I want an exact copy of this path besides the black one. I wrote a small C# program that calculates the angle between two points. Depending on the angle, an offset to the X or Y value is added.
It kind of works, this is the result:
http://imgur.com/bJQDCgq
As you can see, it's not that pretty.
Now, my real question is: What is the proper math to use for this?
Hopefully someone knwos an answer, because I'm kinda stuck on this one.
Regards,
Sascha
Code:
void Plot(List<Point> points)
{
Graphics g = pictureBox.CreateGraphics();
g.Clear(Color.White);
for (int i = 0; i < points.Count - 1; i++)
{
g.DrawLine(Pens.Black, points[i], points[i + 1]);
}
List<Point> points2 = new List<Point>();
for (int i = 0; i < points.Count - 1; i++)
{
var angle = getAngleFromPoint(points[i], points[i + 1]);
Debug.WriteLine(angle);
if (angle < 180 && angle >= 135)
{
points2.Add(new Point(points[i].X - OFFSET, points[i].Y));
}
if (angle < 135 && angle >= 90)
{
if (points[i].Y < points[i + 1].Y)
{
points2.Add(new Point(points[i].X - OFFSET / 2, points[i].Y + OFFSET));
}
else
{
}
}
if (angle < 90 && angle >= 45)
{
if (points[i].Y < points[i + 1].Y)
{
points2.Add(new Point(points[i].X - OFFSET, points[i].Y));
}
else
{
points2.Add(new Point(points[i].X + OFFSET, points[i].Y));
}
}
if (angle < 45 && angle >= 0)
{
if (points[i].Y < points[i + 1].Y)
{
points2.Add(new Point(points[i].X - OFFSET, points[i].Y));
}
else
{
points2.Add(new Point(points[i].X + OFFSET, points[i].Y));
}
}
if (angle < 360 && angle >= 315)
{
if (points[i].Y < points[i + 1].Y)
{
points2.Add(new Point(points[i].X + OFFSET, points[i].Y));
}
else
{
points2.Add(new Point(points[i].X + 10, points[i].Y - OFFSET));
}
}
if (angle < 315 && angle >= 270)
{
points2.Add(new Point(points[i].X, points[i].Y - OFFSET));
}
if (angle < 270 && angle >= 225)
{
if (points[i].Y < points[i + 1].Y)
{
points2.Add(new Point(points[i].X - OFFSET / 2, points[i].Y - OFFSET));
}
else
{
}
}
if (angle < 225 && angle >= 180)
{
if (points[i].X < points[i + 1].X)
{
points2.Add(new Point(points[i].X, points[i].Y - OFFSET));
}
else
{
if (points[i].Y < points[i + 1].Y) // \
{
points2.Add(new Point(points[i].X - OFFSET, points[i].Y));
}
else
{
}
}
}
}
for (int i = 0; i < points2.Count - 1; i++)
{
g.DrawLine(Pens.Red, points2[i], points2[i + 1]);
}
}
I think if i decrease the angles (from 45 degree steps to maybe 30 degrees) I could imnprove the result, but there must be a better solution.
I suppose one way to tackle this is to split it into line-pairs (ie: three points)
Find the parallel line (at distance d) for each line in the pair. Then find where these parallel lines intersect to give you the location of a point on the new line.
In very rough psuedo-code:
points a, b, c
distance d
lineab = findLineParallelTo(line(a,b), d)
linebc = findLineParallelTo(line(b,c), d)
return intersect(lineab, linebc)
I implemented the solution from #Jack and it works great:
public class Line
{
public PointF P { get; private set; }
public PointF Q { get; private set; }
public float Pitch
{
get; private set;
}
public Line()
{
}
public Line(float px, float py, float qx, float qy) : this(new PointF(px, py), new PointF(qx, qy))
{
}
public Line(PointF p, PointF q)
{
P = p;
Q = q;
}
#region Methods
/// <summary>
/// http://stackoverflow.com/questions/2825412/draw-a-parallel-line
/// </summary>
public Line FindParallelLine(float distance)
{
float length = (float)Math.Sqrt((P.X - Q.X) * (P.X - Q.X) + (P.Y - Q.Y) * (P.Y - Q.Y));
// This is the second line
float px = P.X + distance * (Q.Y - P.Y) / length;
float qx = Q.X + distance * (Q.Y - P.Y) / length;
float py = P.Y + distance * (P.X - Q.X) / length;
float qy = Q.Y + distance * (P.X - Q.X) / length;
return new Line(px, py, qx, qy);
}
public override string ToString()
{
return string.Format("P({0}|{1}), Q({2}|{3}) - Pitch: {4}", P.X, P.Y, Q.X, Q.Y, Pitch);
}
#endregion
}
private PointF FindIntersection(Line a, Line b)
{
PointF A = a.P;
PointF B = a.Q;
PointF C = b.P;
PointF D = b.Q;
float dy1 = B.Y - A.Y;
float dx1 = B.X - A.X;
float dy2 = D.Y - C.Y;
float dx2 = D.X - C.X;
// Check whether the two line parallel.
if (dy1 * dx2 == dy2 * dx1)
{
return PointF.Empty;
}
else
{
float x = ((C.Y - A.Y) * dx1 * dx2 + dy1 * dx2 * A.X - dy2 * dx1 * C.X) / (dy1 * dx2 - dy2 * dx1);
float y = A.Y + (dy1 / dx1) * (x - A.X);
return new PointF(x, y);
}
}
private PointF FindIntersection(PointF a, PointF b, PointF c, float distance)
{
Line line1 = new Line(a, b);
Line line2 = new Line(b, c);
Line parallel = line1.FindParallelLine(distance);
Line parallel2 = line2.FindParallelLine(distance);
return FindIntersection(parallel, parallel2);
}
private List<PointF> FindIntersections(PointF[] points, float distance)
{
List<PointF> intersections = new List<PointF>();
for (int i = 0; i < points.Length - 2; i++)
{
PointF intersection = FindIntersection(points[i], points[i + 1], points[i + 2], distance);
if (!intersection.IsEmpty && !double.IsNaN(intersection.X) && !double.IsNaN(intersection.Y))
{
intersections.Add(intersection);
}
}
return intersections;
}
private PointF GetFirstPoint(PointF[] points, float distance)
{
Line parallel = new Line(points[0], points[1]).FindParallelLine(distance);
return parallel.P;
}
private PointF GetLastPoint(PointF[] points, float distance)
{
Line parallel = new Line(points[points.Length - 2], points[points.Length - 1]).FindParallelLine(distance);
return parallel.Q;
}
Example call:
OFFSET = float.Parse(textBox1.Text);
List<PointF> points = new List<PointF>();
points.Add(new PointF(200, 180));
points.Add(new PointF(160, 160));
points.Add(new PointF(100, 160));
points.Add(new PointF(60, 140));
points.Add(new PointF(40, 100));
points.Add(new PointF(80, 60));
points.Add(new PointF(140, 100));
points.Add(new PointF(180, 140));
points.Add(new PointF(220, 80));
List<PointF> intersections = FindIntersections(points.ToArray(), OFFSET);
intersections.Insert(0, GetFirstPoint(points.ToArray(), OFFSET));
intersections.Add(GetLastPoint(points.ToArray(), OFFSET));
Graphics g = pictureBox.CreateGraphics();
g.Clear(Color.White);
g.DrawLines(Pens.Black, points.ToArray());
// Connect the intersection points.
g.DrawLines(Pens.Red, intersections.ToArray());
Example image:
http://imgur.com/onUstGT
Thanks again #Jack !
Ive been through every resource and cant fix my problem.
My host code calls the rgb2hsl kernel, then calls the hsl2rgb kernel. I should end up with the same image that I started with, but I do not. My new image hue is off in certain areas.
The red areas should not be there.
Here is the screen shot of what happens:
Here is the original picture
Here is the code:
#define E .0000001f
bool fEqual(float x, float y)
{
return (x+E > y && x-E < y);
}
__kernel void rgb2hsl(__global float *values, int numValues)
{
// thread index and total
int idx = get_global_id(0);
int idxVec3 = idx*3;
float3 gMem;
if (idx < numValues)
{
gMem.x = values[idxVec3];
gMem.y = values[idxVec3+1];
gMem.z = values[idxVec3+2];
}
barrier(CLK_LOCAL_MEM_FENCE);
gMem /= 255.0f; //convert from 256 color to float
//calculate chroma
float M = max(gMem.x, gMem.y);
M = max(M, gMem.z);
float m = min(gMem.x, gMem.y);
m = min(m, gMem.z);
float chroma = M-m; //calculate chroma
float lightness = (M+m)/2.0f;
float saturation = chroma/(1.0f-fabs(2.0f*lightness-1.0f));
float hue = 0;
if (fEqual(gMem.x, M))
hue = (int)((gMem.y - gMem.z)/chroma) % 6;
if (fEqual(gMem.y, M))
hue = (((gMem.z - gMem.x))/chroma) + 2;
if (fEqual(gMem.z, M))
hue = (((gMem.x - gMem.y))/chroma) + 4;
hue *= 60.0f;
barrier(CLK_LOCAL_MEM_FENCE);
if (idx < numValues)
{
values[idxVec3] = hue;
values[idxVec3+1] = saturation;
values[idxVec3+2] = lightness;
}
}
__kernel void hsl2rgb(__global float *values, int numValues)
{
// thread index and total
int idx = get_global_id(0);
int idxVec3 = idx*3;
float3 gMem;
if (idx < numValues)
{
gMem.x = values[idxVec3];
gMem.y = values[idxVec3+1];
gMem.z = values[idxVec3+2];
}
barrier(CLK_LOCAL_MEM_FENCE);
float3 rgb = (float3)(0,0,0);
//calculate chroma
float chroma = (1.0f - fabs( (float)(2.0f*gMem.z - 1.0f) )) * gMem.y;
float H = gMem.x/60.0f;
float x = chroma * (1.0f - fabs( fmod(H, 2.0f) - 1.0f ));
switch((int)H)
{
case 0:
rgb = (float3)(chroma, x, 0);
break;
case 1:
rgb = (float3)(x, chroma, 0);
break;
case 2:
rgb = (float3)(0, chroma, x);
break;
case 3:
rgb = (float3)(0, x, chroma);
break;
case 4:
rgb = (float3)(x, 0, chroma);
break;
case 5:
rgb = (float3)(chroma, 0, x);
break;
default:
rgb = (float3)(0, 0, 0);
}
barrier(CLK_LOCAL_MEM_FENCE);
rgb += gMem.z - .5f*chroma;
rgb *= 255;
if (idx < numValues)
{
values[idxVec3] = rgb.x;
values[idxVec3+1] = rgb.y;
values[idxVec3+2] = rgb.z;
}
}
The problem was this line:
hue = (int)((gMem.y - gMem.z)/chroma) % 6;
It should be
hue = fmod((gMem.y - gMem.z)/chroma, 6.0f);
I did some more changes to remove artifacts:
#define E .0000001f
bool fEqual(float x, float y)
{
return (x+E > y && x-E < y);
}
__kernel void rgb2hsl(__global float *values, int numValues)
{
// thread index and total
int idx = get_global_id(0);
int idxVec3 = idx*3;
float3 gMem;
if (idx < numValues)
{
gMem.x = values[idxVec3];
gMem.y = values[idxVec3+1];
gMem.z = values[idxVec3+2];
}
barrier(CLK_LOCAL_MEM_FENCE);
gMem /= 255.0f; //convert from 256 color to float
//calculate chroma
float M = max(gMem.x, gMem.y);
M = max(M, gMem.z);
float m = min(gMem.x, gMem.y);
m = min(m, gMem.z);
float chroma = M-m; //calculate chroma
float lightness = (M+m)/2.0f;
float saturation = chroma/(1.0f-fabs(2.0f*lightness-1.0f));
float hue = 0;
if (fEqual(gMem.x, M))
hue = fmod((gMem.y - gMem.z)/chroma, 6.0f);
if (fEqual(gMem.y, M))
hue = (((gMem.z - gMem.x))/chroma) + 2;
if (fEqual(gMem.z, M))
hue = (((gMem.x - gMem.y))/chroma) + 4;
hue *= 60.0f;
barrier(CLK_LOCAL_MEM_FENCE);
if (M == m)
hue = saturation = 0;
barrier(CLK_GLOBAL_MEM_FENCE);
if (idx < numValues)
{
//NOTE: ARTIFACTS SHOW UP if we do not cast to integer!
values[idxVec3] = (int)hue;
values[idxVec3+1] = saturation;
values[idxVec3+2] = lightness;
}
}