How to eliminate a colour from RGB LED code? - arduino

I recently adjusted an already existing code for the RGB led (in arduino) to fade from yellow to red. However it still produces green in the beginning, and I can't seem to figure out how to eliminate that. Maybe someone has an idea?
`
int redPin = 9;
int greenPin = 10;
int bluePin = 11;
#define COMMON_ANODE
void setup()
{
pinMode(redPin, OUTPUT);
pinMode(greenPin, OUTPUT);
pinMode(bluePin, OUTPUT);
}
int rgb[3];
//Arduino has no prebuilt function for hsi to rgb so we make one:
void hsi_to_rgb(float H, float S, float I) {
int r, g, b;
if (H > 360) {
H = H - 360;
}
// Serial.println("H: "+String(H));
H = fmod(H, 360); // cycle H around to 0-360 degrees
H = 3.14159 * H / (float)180; // Convert to radians.
S = S > 0 ? (S < 1 ? S : 1) : 0; // clamp S and I to interval [0,1]
I = I > 0 ? (I < 1 ? I : 1) : 0;
if (H < 2.09439) {
r = 255 * I / 3 * (1 + S * cos(H) / cos(1.047196667 - H));
g = 255 * I / 3 * (1 + S * (1 - cos(H) / cos(1.047196667 - H)));
b = 255 * I / 3 * (1 - S);
} else if (H < 4.188787) {
H = H - 2.09439;
g = 255 * I / 3 * (1 + S * cos(H) / cos(1.047196667 - H));
b = 255 * I / 3 * (1 + S * (1 - cos(H) / cos(1.047196667 - H)));
r = 255 * I / 3 * (1 - S);
} else {
H = H - 4.188787;
b = 255 * I / 3 * (1 + S * cos(H) / cos(1.047196667 - H));
r = 255 * I / 3 * (1 + S * (1 - cos(H) / cos(1.047196667 - H)));
g = 255 * I / 3 * (1 - S);
}
rgb[2] = b;
}
void setColor(int red, int green, int blue)
{
#ifdef COMMON_ANODE
green = 255 - green;
blue = 255 - blue;
#endif
analogWrite(greenPin, green);
analogWrite(bluePin, blue);
}
void loop()
{
for (int i=0; i<=360;i++){
hsi_to_rgb(i,1,1);
setColor(rgb[0],rgb[1],rgb[2]);
delay(20);
}
}
Also if someone has or can easily write a better code, it would be greatly appreciated!`

Firstly, I think you should work in HSV color space to begin with, if you're having a green channel stay on when you're trying to isolate your colors to red and then sweep to yellow, your math is where things are most probably going wrong.
HSV sweeps through the color wheel naturally, you're probably having some mixing values when you're trying to go from yellow to red. Yellow is also made from combining red and green in RGB, and the human eye is much better at distinguishing greens over reds and blues, which is probably why you're seeing green to begin with. If I were you, I'd start things off by playing with the RGB value start points until you get a color of yellow that you can't see the green in. either that, or stick to HSV color space and use that to change how your LED displays color
https://en.wikipedia.org/wiki/HSL_and_HSV
https://blog.saikoled.com/post/43693602826/why-every-led-light-should-be-using-hsi
all the same I have found a github page that converts RGB to HSV that would hopefully be helpful
https://gist.github.com/postspectacular/2a4a8db092011c6743a7

Here is an illustration of rgb values along with the change of hue value
(assuming saturation and intensity are max):
You'll see both r and g have 255 around hue=60. On the other hand, the result with your hsi_to_rgb() seems to have darker greenish values around there.
Please try the following hsi_to_rgb(), which is an implementation of
the description in Wikipedia.
/*
* convert hue, saturation, intensity values to r, g, b values
* h in [0,360)
* s, v in [0,1]
* rgb in [0,255]
*/
void hsi_to_rgb(float h, float s, float i)
{
float r = i;
float g = i;
float b = i;
if (s > 0.0) {
h *= 6.0 / 360; // 0 <= h < 6
int h2 = (int)h; // integral part of h
float f = h - (float)h2; // fractional part of h
switch (h2) {
default:
case 0:
g *= 1 - s * (1 - f);
b *= 1 - s;
break;
case 1:
r *= 1 - s * f;
b *= 1 - s;
break;
case 2:
r *= 1 - s;
b *= 1 - s * (1 - f);
break;
case 3:
r *= 1 - s;
g *= 1 - s * f;
break;
case 4:
r *= 1 - s * (1 - f);
g *= 1 - s;
break;
case 5:
g *= 1 - s;
b *= 1 - s * f;
break;
}
rgb[0] = 255 * r;
rgb[1] = 255 * g;
rgb[2] = 255 * b;
// clamp the rgb values
for (int j = 0; j < 3; j++) {
if (rgb[j] < 0) rgb[j] = 0;
else if (rgb[j] > 255) rgb[j] = 255;
}
}
}
Then change the hue value gradually from 60 (yellow) to 0 (red).

Related

My Del operator in GLSL appears to have an underflow error which results in a black area, how can I prevent that from happening?

I am writing a ShaderToy to model De Broglie-Bohm theory to help me visualize quantum mechanics from a deterministic perspective. A key part is the Del(∇) operator for calculating gradients for diffusing a field.
Right now I appear to have something that looks like it's diffusing, but I believe it runs into an underflow error and a black splotch appears. How do I prevent it from forming? This is what it looks like:
Here is the Del operator code:
vec4 del(in vec4[9] points){
vec4 deltaX = ((points[2 * 3 + 0] + points[2 * 3 + 1] + points[2 * 3 + 2])/3.
- (points[0 * 3 + 0] + points[0 * 3 + 1] + points[0 * 3 + 2])/3.)/2.;
vec4 deltaY = ((points[0 * 3 + 2] + points[1 * 3 + 2] + points[2 * 3 + 2])/3.
- (points[0 * 3 + 0] + points[1 * 3 + 0] + points[2 * 3 + 0])/3.)/2.;
return vec4((deltaX + deltaY).rgb, 1.);
}
vec4 delTex(sampler2D tex, ivec2 coord){
vec4[9] surroundingPoints = vec4[9](vec4(1),vec4(1),vec4(1),vec4(1),vec4(1),vec4(1),vec4(1),vec4(1),vec4(1));
for(int x = 0; x < 3; x++){
for(int y = 0; y < 3; y++){
surroundingPoints[x * 3 + y] = texelFetch(tex,(ivec2(coord) + (ivec2(x,y) - ivec2(1))), 0);
}
}
return del(surroundingPoints);
}
void mainImage(out vec4 fragColor, in vec2 coord) {
if(iMouse.x > 0. && iMouse.y > 0. && abs(coord.x - iMouse.x) < 5. && abs(coord.y - iMouse.y) < 5.) {
fragColor = vec4(1.,0.,1.,1.); return;
}
if(coord.x == 0. || coord.y == 0.){
fragColor = vec4(1.,0.,1.,1.);return;
}
// texelFetch(iChannel0, ivec2(coord), 0) +
fragColor = texelFetch(iChannel0, ivec2(coord), 0) + delTex(iChannel0, ivec2(coord)); return;
//fragColor = texelFetch(iChannel0, ivec2(coord), 0);
}
So I figured it out, the numbers were overflowing because the gradient was being added to the original pixel. I used the min() function to clamp the number to below 1. and there is no black square anymore.

Explicit FDM with CUDA [closed]

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 1 year ago.
Improve this question
I am working to implement CUDA for the following code. The first version has been written serially and the second version is written with CUDA. I am sure about its results in serial version. I expect that the second version that I have added CUDA functionality also give me the same result, but it seems that kernel function does not do any thing and it gives me the initial value of u and v. I know due to lack of my experience, the bug may be obvious, but I cannot figure it out. Also, please do not recommend using flatten array, because it is harder for me to understand the indexing in code.
First version:
#include <fstream>
#include <iostream>
#include <math.h>
#include <vector>
#include <chrono>
#include <omp.h>
using namespace std;
const int M = 1024;
const int N = 1024;
const double A = 1;
const double B = 3;
const double Du = 5 * pow(10, -5);
const double Dv = 5 * pow(10, -6);
const int Max_Itr = 1000;
const double h = 1.0 / static_cast<double>(M - 1);
const double delta_t = 0.0025;
const double s1 = (Du * delta_t) / pow(h, 2);
const double s2 = (Dv * delta_t) / pow(h, 2);
int main() {
double** u=new double* [M];
double** v=new double* [M];
for (int i=0; i<M; i++){
u[i]=new double [N];
v[i]=new double [N];
}
for (int j = 0; j < M; j++) {
for (int i = 0; i < N;i++) {
u[i][j]=0.02;
v[i][j]=0.02;
}
}
for (int k = 1; k < Max_Itr; k++) {
for (int i = 1; i < N - 1; i++) {
for (int j = 1; j < M - 1; j++) {
u[i][j] = ((1 - (4 * s1)) * u[i][j]) + (s1 * (u[i + 1][j] + u[i - 1][j] + u[i][j + 1] + u[i][j - 1])) +
(A * delta_t) + (delta_t * pow(u[i][j], 2) * v[i][j]) - (delta_t * (B + 1) * u[i][j]);
v[i][j] = ((1 - (4 * s2)) * v[i][j]) + (s2 * (v[i + 1][j] + v[i - 1][j] + v[i][j + 1] + v[i][j - 1])) + (B * delta_t * u[i][j])
- (delta_t * pow(u[i][j], 2) * v[i][j]);
}
}
}
cout<<"u: "<<u[512][512]<<" v: "<<v[512][512]<<endl;
return 0;
}
Second version:
#include <fstream>
#include <iostream>
#include <math.h>
#include <vector>
using namespace std;
#define M 1024
#define N 1024
__global__ void my_kernel(double** v, double** u){
int i= blockIdx.y * blockDim.y + threadIdx.y;
int j= blockIdx.x * blockDim.x + threadIdx.x;
double A = 1;
double B = 3;
int Max_Itr = 1000;
double delta_t = 0.0025;
double Du = 5 * powf(10, -5);
double Dv = 5 * powf(10, -6);
double h = 1.0 / (M - 1);
double s1 = (Du * delta_t) / powf(h, 2);
double s2 = (Dv * delta_t) / powf(h, 2);
for (int k = 1; k < Max_Itr; k++) {
u[i][j] = ((1 - (4 * s1))
* u[i][j]) + (s1 * (u[i + 1][j] + u[i - 1][j] + u[i][j + 1] + u[i][j - 1])) +
(A * delta_t) + (delta_t * pow(u[i][j], 2) * v[i][j]) - (delta_t * (B + 1) * u[i][j]);
v[i][j] = ((1 - (4 * s2))
* v[i][j]) + (s2 * (v[i + 1][j] + v[i - 1][j] + v[i][j + 1] + v[i][j - 1])) + (B * delta_t * u[i][j])
- (delta_t * pow(u[i][j], 2) * v[i][j]);
__syncthreads();
}
}
int main() {
double** u=new double* [M];
double** v=new double* [M];
for (int i=0; i<M; i++){
u[i]=new double [N];
v[i]=new double [N];
}
dim3 blocks(32,32);
dim3 grids(M/32 +1, N/32 + 1);
for (int j = 0; j < M; j++) {
for (int i = 0; i < N;i++) {
u[i][j]=0.02;
v[i][j]=0.02;
}
}
double **u_d, **v_d;
int d_size = N * M * sizeof(double);
cudaMalloc(&u_d, d_size);
cudaMalloc(&v_d, d_size);
cudaMemcpy(u_d, u, d_size, cudaMemcpyHostToDevice);
cudaMemcpy(v_d, v, d_size, cudaMemcpyHostToDevice);
my_kernel<<<grids, blocks>>> (v_d,u_d);
cudaDeviceSynchronize();
cudaMemcpy(v, v_d, d_size, cudaMemcpyDeviceToHost);
cudaMemcpy(u, u_d, d_size, cudaMemcpyDeviceToHost);
cout<<"u: "<<u[512][512]<<" v: "<<v[512][512]<<endl;
return 0;
}
What I expect from the second version is :
u: 0.2815 v: 1.7581
Your two-dimensional array - in the first version of the program - is implemented using an array of pointers, each of which to a separately-allocated array of double values.
In your second version, you are using the same pointer-to-pointer-to-double type, but - you're not allocating any space for the actual data, just for the array of pointers (and not copying any of the data to the GPU - just the pointers; which are useless to copy anyway, since they're pointers to host-side memory.)
What is most likely happening is that your kernel attempts to access memory at an invalid address, and its execution is aborted.
If you were to properly check for errors, as #njuffa noted, you would know that is what happened.
Now, you could avoid having to make multiple memory allocations if you were to use a single data area instead of separate allocations for each second-dimension 1D array; and that is true both for the first and the second version of your program. That would not quite be array flattening. See an explanation of how to do this (C-language-style) on this page.
Note, however, that double-dereferencing, which you insist on performing in your kernel, is likely slowing it down significantly.

How to remove floats or reduce actual file size of code function? (Arduino)

I am trying to get my arduino code for gemma, with neopixels, which has 5310 bytes of memory smaller so I can get more things into the program.
Currently I am trying to remove floats / reduce the size of the code snippet below:
void gradient(Color c1, Color c2, float time) {
for (float i = 0; i < time; i += 0.001) {
Color result(0, 0, 0);
result.Red = c1.Red * (1 - (i / time)) + c2.Red * (i / time);
result.Green = c1.Green * (1 - (i / time)) + c2.Green * (i / time);
result.Blue = c1.Blue * (1 - (i / time)) + c2.Blue * (i / time);
for (uint8_t x = 0; x < 20; x++)pixels.setPixelColor(x, result.Red, result.Green, result.Blue);
pixels.show();
delay(1);
}
}
I managed to reduce it by 30 bytes to:
void gradient(Color c1, Color c2, float time) {
float stepsize = 0.01; // Stepsize in seconds
float lambda;
int maxiter = (int) (time/ stepsize);
Color result(0, 0, 0);
for (int i = 0; i <= maxiter; i++) {
lambda = (float) i / maxiter;
result.Red = c1.Red * (1 - lambda) + c2.Red * (lambda);
result.Green = c1.Green * (1 - lambda) + c2.Green * (lambda);
result.Blue = c1.Blue * (1 - lambda) + c2.Blue * (lambda);
for (uint8_t x = 0; x < 20; x++)pixels.setPixelColor(x, result.Red, result.Green, result.Blue);
pixels.show();
delay(stepsize * 1000); // delay in milliseconds
}
}
But am trying still to make it smaller.
For those wondering the Color object is just an object with 3 ints called Red, Green and Blue. An example usage of this code would be:
gradient(Color(255, 0, 0), Color(0, 255, 0), 2);
Which would be a gradient from Red to Green over 2 seconds.
Thanks in advance!
If you can pull "delay()" out of all your code, it seems to avoid including a 100 byte size library? idk tbh, but here is my suggested modification, which in my testing saves 100 bytes of memory:
void gradient(Color c1, Color c2, float time) {
float stepsize = 0.01; // Stepsize in seconds
float lambda;
int maxiter = (int) (time/ stepsize);
Color result(0, 0, 0);
for (int i = 0; i <= maxiter; i++) {
lambda = (float) i / maxiter;
result.Red = c1.Red * (1 - lambda) + c2.Red * (lambda);
result.Green = c1.Green * (1 - lambda) + c2.Green * (lambda);
result.Blue = c1.Blue * (1 - lambda) + c2.Blue * (lambda);
for (uint8_t x = 0; x < 20; x++)pixels.setPixelColor(x, result.Red, result.Green, result.Blue);
pixels.show();
//delay(stepsize * 1000); // delay in milliseconds
long lastTime=millis();
long delayTime = stepsize * 1000;
while(millis()-lastTime<delayTime){}
}
}
-First off, your color object should take 3 unsigned chars (0-255) there is no reason to put ints in there. (byte type in arduino)
-Second, I am not sure how you are implementing time, but generally in arduino you are working in milliseconds. Furthermore, without seeing your other implementation, I am guessing that time is a segment of time and based on your delay, I am going to guess that you could send time as a short (up multiply x1000 if necessary) (This would hold up to 32 seconds, in milliseconds)
void gradient(Color c1, Color c2, short time) {
short maxiter = (short) (time/ 10);
Color result(0, 0, 0);
for (int i = 0; i <= maxiter; i++) {
result.Red = (c1.Red * (maxiter-i) + c2.Red * i)/maxiter;
result.Green = (c1.Green* (maxiter-i) + c2.Green* i)/maxiter;
result.Blue = (c1.Blue* (maxiter-i) + c2.Blue* i)/maxiter;
for (uint8_t x = 0; x < 20; x++)pixels.setPixelColor(x, result.Red, result.Green, result.Blue);
pixels.show();
delay(10); // delay in milliseconds
}
}

Quadratic functions in c

I have almost everything working except for solving for X in line 25 i keep getting an error saying " term does not evaluate to a function taking 1787 arguments" i had it giving me a 1 or a 0 but as i kept messing with it i lost where i was at and saved over the copy. still new to posting sorry if its hard to read
#include <stdio.h>
#include <math.h>
void quadratic_function()
{
int a,b,c; // variables
long int result; // my X in the quadractic function
long int y,x; // the result
long int quadratic;
printf("enter values for a,b,c\n");
scanf("%i\n %i\n %i", &a,&b,&c);
printf("A=%i B=%i C=%i\n", a,b,c); //Displays Variables
y= pow(b, 2);
result= (y)*-4*(a)*(c); // b^2-4ac
printf("\n%li\n",result);
if (result<0)
printf("Imaginary Number"); // if negative
else (result>0);
x=(-b/2*(a)) +- (sqrt(pow(b, 2)) (-4*(a)*(c))) / (2*(a));
//solving for x
printf("\n %li\n",x);
a = a*x;
b = b*x;
quadratic=pow(a, 2)*(b)*(c); // if positive
//printf("Quadratic equation equal to %li",quadratic); // result
}
int main()
{
quadratic_function();
return 0;
}
The first thing I noticed is that you were trying to do the + and - portions of the quadratic equation at the same time. The equation
x = (-b +- sqrt(b^2 - 4ac)) / 2a
means the same as
x = (-b + sqrt(b^2 - 4ac)) / 2a AND x = (-b - sqrt(b^2 - 4ac)) / 2a
In other words, the equation has two answers if b^2 - 4ac is greater than 0, one answer if it is 0, and no answer if it is negative.
Another thing, the line else (result>0); doesn't really do anything. The rest of the code after that will execute even if you get b^2 - 4ac < 0
Finally, I wasn't entirely sure about your groupings or C++'s precedence with the negative sign, so I changed your parentheses around a bit.
y = pow(b, 2);
result = (y) - (4*a*c); // b^2-4ac
printf("\n%li\n", result);
if (result < 0) {
printf("Imaginary Number"); // if negative
} else if (result == 0) {
x = (-b) / (2 * a); // sqrt(0) = 0, so don't bother calculating it
a = a*x;
b = b*x;
quadratic=pow(a, 2)*(b)*(c);
printf("Quadratic equation equal to %li",quadratic); // result
} else if (result > 0) {
// solve for (-b + sqrt(b^2 - 4ac)) / 2a
x = ((-b) + sqrt(pow(b, 2) - (4 * a * c))) / (2 * a);
printf("\n %li\n",x);
a = a*x;
b = b*x;
quadratic=pow(a, 2)*(b)*(c);
printf("Quadratic equation equal to %li",quadratic); // result
// do it again for (-b - sqrt(b^2 - 4ac)) / 2a
x = ((-b) - sqrt(pow(b, 2) - (4 * a * c))) / (2 * a);
printf("\n %li\n",x);
a = a*x;
b = b*x;
quadratic=pow(a, 2)*(b)*(c);
printf("Quadratic equation equal to %li",quadratic);
}

Is there a function to convert HSL to color:int in actionscript?

I want to define color value by using HSL(Hue, Saturation, Lightness) not RGB.
Does actionscript have a function for that?
For example, I want to write code like the following:
var color:int = hsl(10, 90, 30);
sprite.graphics.lineStyle(2, color);
No built-in way, but there are many functions available online to do the conversion. For example these two JavaScript functions (they might need to be slightly refactored to compile with AS3 but it should just be minor changes):
/**
* Converts an RGB color value to HSL. Conversion formula
* adapted from http://en.wikipedia.org/wiki/HSL_color_space.
* Assumes r, g, and b are contained in the set [0, 255] and
* returns h, s, and l in the set [0, 1].
*
* #param Number r The red color value
* #param Number g The green color value
* #param Number b The blue color value
* #return Array The HSL representation
*/
function rgbToHsl(r, g, b){
r /= 255, g /= 255, b /= 255;
var max = Math.max(r, g, b), min = Math.min(r, g, b);
var h, s, l = (max + min) / 2;
if(max == min){
h = s = 0; // achromatic
}else{
var d = max - min;
s = l > 0.5 ? d / (2 - max - min) : d / (max + min);
switch(max){
case r: h = (g - b) / d + (g < b ? 6 : 0); break;
case g: h = (b - r) / d + 2; break;
case b: h = (r - g) / d + 4; break;
}
h /= 6;
}
return [h, s, l];
}
/**
* Converts an HSL color value to RGB. Conversion formula
* adapted from http://en.wikipedia.org/wiki/HSL_color_space.
* Assumes h, s, and l are contained in the set [0, 1] and
* returns r, g, and b in the set [0, 255].
*
* #param Number h The hue
* #param Number s The saturation
* #param Number l The lightness
* #return Array The RGB representation
*/
function hslToRgb(h, s, l){
var r, g, b;
if(s == 0){
r = g = b = l; // achromatic
}else{
function hue2rgb(p, q, t){
if(t < 0) t += 1;
if(t > 1) t -= 1;
if(t < 1/6) return p + (q - p) * 6 * t;
if(t < 1/2) return q;
if(t < 2/3) return p + (q - p) * (2/3 - t) * 6;
return p;
}
var q = l < 0.5 ? l * (1 + s) : l + s - l * s;
var p = 2 * l - q;
r = hue2rgb(p, q, h + 1/3);
g = hue2rgb(p, q, h);
b = hue2rgb(p, q, h - 1/3);
}
return [r * 255, g * 255, b * 255];
}
Then use ColorTransform to get the uint RGB value:
var c:Array = hslToRgb(10, 90, 30);
var t:ColorTransform = new ColorTransform(1.0, 1.0, 1.0, 1.0, c[0], c[1], c[2]);
var color:uint = t.color;
If you use Flex4, you can use class HSBColor. It has two static methods: convertHSBtoRGB and convertRGBtoHSB.

Resources