MY problem is that when I calculate and add specular light from a point light, then it's fine if I hardcode in a number in the pixel shader, but if that number comes from a variable (it's the exponent), then the result is blocky, very sharp contrast between shadows and light.
This is my pixel shader code:
cbuffer lights :register(b0)
{
row_major float4x4 worldM;
row_major float4x4 TransformM;
float4 PointLightpos;
float4 PointLightcolor;
float4 AmbientLightColor;
float4 intensities;
float4 attenuations;
float4 cameraPos;
float4 ambient;
float4 diffuse;
float4 specular;
row_major float4x4 skinMatrix[50];
};
Texture2D tex : register(t0);
Texture2D nTex: register(t1);
SamplerState mySampler : SAMPLER;
struct Input {
float4 outPos : SV_POSITION;
float3 Color : COLOR;
float3 normal : NORMAL;
float2 UV : TEXCOORD;
float4 worldPos : POSITION;
float3 tangents : TANGENT;
};
struct Output
{
float4 Color : SV_TARGET;
};
Output main(Input input)
{
Output output;
//* ambientspecularExp
//pointLightDiffuse
float4 plight = PointLightcolor * intensities.y;
float4 pointLightDir = normalize(PointLightpos - input.worldPos);
float4 normalW = float4(input.normal.x, input.normal.y, input.normal.z, 1);
float4 diffuseFactor = max(dot(pointLightDir, normalW), 0);
//Atten = 1/( att0i + att1i * d + att2i * d²)
float d = distance(PointLightpos, input.worldPos);
float attenuation = 1 / (attenuations.x + (attenuations.y * d) + (attenuations.z * pow(d, 2)));
float4 aL = (AmbientLightColor * intensities.x)*attenuation;
//diffuseFactor *= attenuation;
float4 diffuseLight = (plight *diffuseFactor * diffuse)*attenuation;
//Specular * PointLightcolor
float r = reflect(-pointLightDir, normalW);
float v = normalize(cameraPos - input.worldPos);
//float4 spec = float4(max((pow(dot(r, v), specularExp) * specular), 0));
float4 spec = float4(((pow(dot(r, v), intensities.z) * specular))*attenuation);
Related
I use OpenGL shader for apply median filter to image. Input image I copy to in_fbo buffer. All work fine.
QGLFramebufferObject *in_fbo, *out_fbo;
painter.begin(in_fbo); //Copy QImage to QGLFramebufferObject
painter.drawImage(0,0,image_in,0,0,width,height);
painter.end();
out_fbo->bind();
glViewport( 0, 0, nWidth, nHeight );
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
glOrtho( 0.0, nWidth, 0.0, nHeight, -1.0, 1.0 );
glMatrixMode( GL_MODELVIEW );
glLoadIdentity( );
glEnable( GL_TEXTURE_2D );
out_fbo->drawTexture( QPointF(0.0,0.0), in_fbo->texture( ), GL_TEXTURE_2D );
But in shader code I need divide position of vertex by width and height of image, because texture coordinates are normalized to a range between 0 and 1.
How correctly calculate texture coordinates?
//vertex shader
varying vec2 pos;
void main( void )
{
pos = gl_Vertex.xy;
gl_Position = ftransform( );
}
//fragment shader
#extension GL_ARB_texture_rectangle : enable
uniform sampler2D texture0;
uniform int imgWidth;
uniform int imgHeight;
uniform int len;
varying vec2 pos;
#define MAX_LEN (100)
void main(){
float v[ MAX_LEN ];
for (int i = 0; i < len; i++) {
vec2 posi = pos + float(i);
posi.x = posi.x / float( imgWidth );
posi.y = posi.y / float( imgHeight );
v[i] = texture2D(texture0, posi).r;
}
//
//.... Calculating new value
//
gl_FragColor = vec4( m, m, m, 1.0 );
}
Before I did it in OpenFrameworks. But shader for texture in OF does not work for texture in Qt. I suppose because OF create textures with textureTarget = GL_TEXTURE_RECTANGLE_ARB. Now the result of applying shader above isn't correct. It isn't identical with result of the old shader (there are few pixels with different colors). I don't know how modify shader above :(.
Old shaders:
//vertex
#version 120
#extension GL_ARB_texture_rectangle : enable
void main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
gl_TexCoord[0] = gl_MultiTexCoord0;
gl_FrontColor = gl_Color;
}
//fragment
#version 120
#extension GL_ARB_texture_rectangle : enable
uniform sampler2D texture0;
uniform int len;
void main(){
vec2 pos = gl_TexCoord[0].xy;
pos.x = int( pos.x );
pos.y = int( pos.y );
float v[ MAX_LEN ];
for (int i=0; i<len; i++) {
vec2 posi = pos + i;
posi.x = int( posi.x + 0.5 ) + 0.5;
posi.y = int( posi.y + 0.5 ) + 0.5;
v[i] = texture2D(texture0, posi).r;
}
//
//.... Calculating new value
//
gl_FragColor = vec4( m, m, m, 1.0 );
}
OpenGL code from OpenFrameworks lib
texData.width = w;
texData.height = h;
texData.tex_w = w;
texData.tex_h = h;
texData.textureTarget = GL_TEXTURE_RECTANGLE_ARB;
texData.bFlipTexture = true;
texData.glType = GL_RGBA;
// create & setup FBO
glGenFramebuffersEXT(1, &fbo);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo);
// Create the render buffer for depth
glGenRenderbuffersEXT(1, &depthBuffer);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, depthBuffer);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT, texData.tex_w, texData.tex_h);
// create & setup texture
glGenTextures(1, (GLuint *)(&texData.textureID)); // could be more then one, but for now, just one
glBindTexture(texData.textureTarget, (GLuint)(texData.textureID));
glTexParameterf(texData.textureTarget, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(texData.textureTarget, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameterf(texData.textureTarget, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(texData.textureTarget, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(texData.textureTarget, 0, texData.glType, texData.tex_w, texData.tex_h, 0, texData.glType, GL_UNSIGNED_BYTE, 0);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
// attach it to the FBO so we can render to it
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, texData.textureTarget, (GLuint)texData.textureID, 0);
I do not think you actually want to use the texture's dimensions to do this. From the sounds of things this is a simple fullscreen image filter and you really just want fragment coordinates mapped into the range [0.0,1.0]. If this is the case, then gl_FragCoord.xy / viewport.xy, where viewport is a 2D uniform that defines the width and height of your viewport ought to work for your texture coordinates (in the fragment shader).
vec2 texCoord = vec2 (transformed_pos.x, transformed_pos.y) / transformed_pos.w * vec2 (0.5, 0.5) + vec2 (1.0, 1.0) may also work using the same principle -- clip-space coordinates transformed into NDC and then mapped to texture-space. This approach will not properly account for texel centers ((0.5, 0.5) rather than (0.0, 0.0)), however and can present problems when texture filtering is enabled and the wrap mode is not GL_CLAMP_TO_EDGE.
I am trying to implement the reflection mapping in OpenGL ES 2.0 for 'sphere'.
I have done the skybox.
For sphere rendering, the reflection shaders i have used are:
Environment mapping (Sphere) vertex shader::
precision highp float;
uniform mat4 u_mvMatrix; // ModelView Matrix
uniform mat4 u_mvpMatrix; // ModelViewProjection Matrix
attribute vec4 a_position;
attribute vec3 a_envmapNormal;
varying vec3 v_eyecoordEyeReflection;
vec3 v_eyecoordPosition;
vec3 v_eyecoordNormal;
void main()
{
// position and normal in model coordinates
vec4 modelCoordPosition = a_position;
vec3 modelCoordNormal = a_envmapNormal;
// Calculate position in eye space
v_eyecoordPosition = vec3(u_mvMatrix * modelCoordPosition);
// Calculate and normalize eye space normal
vec3 eyecoordNormal = vec3(u_mvMatrix * vec4(modelCoordNormal, 0.0));
v_eyecoordNormal = normalize(eyecoordNormal);
// Calculate reflection vector
v_eyecoordEyeReflection = reflect(v_eyecoordPosition, v_eyecoordNormal);
gl_Position = u_mvpMatrix * a_position;
}
Environment mapping (Sphere) Fragment shader
precision highp float;
uniform lowp samplerCube baseCubeMapTexture;
varying vec3 v_eyecoordEyeReflection;
void main()
{
gl_FragColor = textureCube(baseCubeMapTexture, v_eyecoordEyeReflection);
}
But i am not getting correct output.
When the sphere is rotated, the texture is not changing.
what is the error in the shader?
Thanks Andon...
I used your shader code.
But i am getting white sphere.
Sphere Normals are calculated using:
#define ANGLE_STEP ((2.0f * OGLES_PI) / ((float) NUM_OF_SLICES))
for ( iCnti = 0; iCnti < NUM_OF_PARALLELS + 1; iCnti++ ) {
for ( iCntj = 0; iCntj < NUM_OF_SLICES + 1; iCntj++ ) {
pSphereNormals[iNormalIndex + 0] = sin(ANGLE_STEP * (FLOAT) iCnti )* sin (ANGLE_STEP *(FLOAT)iCntj);
pSphereNormals[iNormalIndex + 1] = cos(ANGLE_STEP * (FLOAT) iCnti );
pSphereNormals[iNormalIndex + 2] = sin(ANGLE_STEP * (FLOAT) iCnti )* cos (ANGLE_STEP *(FLOAT)iCntj);
iNormalIndex += 3;
}
}
My View Matrix "matViewMatrix" is derived from (http://www.learnopengles.com/tag/linmath-h/ mat4x4_look_at())
MyCameraLookAt(matViewMatrix, 0.0f , 0.0f, -2.0f, 0.0f , 0.0f, -1.0f, 0.0f , 1.0f, 0.0f);
The Inverse matrix InvViewMat is // inverse() function is taken from http://www.opensource.apple.com/source/WebCore/WebCore-514/platform/graphics/transforms/TransformationMatrix.cpp
InvViewMat[0][0] = -1.000000 InvViewMat[1][0] = -0.000000 InvViewMat[2][0] = 0.000000 InvViewMat[3][0] = -0.000000
InvViewMat[0][1] = -0.000000 InvViewMat[1][1] = 1.000000 InvViewMat[2][1] = -0.000000 InvViewMat[3][1] = -0.000000
InvViewMat[0][2] = 0.000000 InvViewMat[1][2] = -0.000000 InvViewMat[2][2] = -1.000000 InvViewMat[3][2] = -2.000000
InvViewMat[0][3] = -0.000000 InvViewMat[1][3] = 0.000000 InvViewMat[2][3] = 0.000000 InvViewMat[3][3] = 1.000000
Is there any problem with my matrix values or any of my calculations?
If you have a sphere centered at the camera's origin (eye-space), then no matter how you rotate it the position and normals in eye-space are always going to be the same at any location on screen. That is the definition of a sphere - every vertex is the same distance (radius) from the center.
You actually need to do this in world-space (that position will vary as you rotate the sphere).
Now, this brings up an issue - you only have a ModelView matrix (which transforms from object-space to eye-space). You are going to need to split your Model and View matrices to do this and for convenience you should pass the inverse of the View matrix to GLSL.
Below is a modified Vertex Shader that does what you want:
precision highp float;
uniform mat4 u_vInvMatrix; // Inverse View Matrix -- NEW
uniform mat4 u_mvMatrix; // ModelView Matrix
uniform mat4 u_mvpMatrix; // ModelViewProjection Matrix
attribute vec4 a_position;
attribute vec3 a_envmapNormal;
//varying vec3 v_eyecoordEyeReflection; // YOU DO NOT WANT EYE-SPACE
varying vec3 v_worldReflection; // Use world-space instead -- MODIFIED
vec3 v_eyecoordPosition;
vec3 v_eyecoordNormal;
void main()
{
// position and normal in model coordinates
vec4 modelCoordPosition = a_position;
vec3 modelCoordNormal = a_envmapNormal;
// Calculate position in eye space
v_eyecoordPosition = vec3(u_mvMatrix * modelCoordPosition);
// Calculate and normalize eye space normal
vec3 eyecoordNormal = vec3(u_mvMatrix * vec4(modelCoordNormal, 0.0));
v_eyecoordNormal = normalize(eyecoordNormal);
// Calculate reflection vector (eye-space)
vec3 eyeReflection = reflect(v_eyecoordPosition, v_eyecoordNormal);
// Transform the reflection into world-space -- NEW
v_worldReflection = vec3 (u_vInvMatrix * vec4 (eyeReflection, 0.0f));
gl_Position = u_mvpMatrix * a_position;
}
Currently, I have an OpenCL kernel for like traversal as below. I'd be glad if someone had some point on optimization of this quite large kernel.
The thing is, I'm running this code with SAH BVH and I'd like to get performance similar to Timo Aila with his traversals in his paper (Understanding the Efficiency of Ray Traversal on GPUs), of course his code uses SplitBVH (which I might consider using in place of SAH BVH, but in my opinion it has really slow build times). But I'm asking about traversal, not BVH (also I've so far worked only with scenes, where SplitBVH won't give you much advantages over SAH BVH).
First of all, here is what I have so far (standard while-while traversal kernel).
__constant sampler_t sampler = CLK_FILTER_NEAREST;
// Inline definition of horizontal max
inline float max4(float a, float b, float c, float d)
{
return max(max(max(a, b), c), d);
}
// Inline definition of horizontal min
inline float min4(float a, float b, float c, float d)
{
return min(min(min(a, b), c), d);
}
// Traversal kernel
__kernel void traverse( __read_only image2d_t nodes,
__global const float4* triangles,
__global const float4* rays,
__global float4* result,
const int num,
const int w,
const int h)
{
// Ray index
int idx = get_global_id(0);
if(idx < num)
{
// Stack
int todo[32];
int todoOffset = 0;
// Current node
int nodeNum = 0;
float tmin = 0.0f;
float depth = 2e30f;
// Fetch ray origin, direction and compute invdirection
float4 origin = rays[2 * idx + 0];
float4 direction = rays[2 * idx + 1];
float4 invdir = native_recip(direction);
float4 temp = (float4)(0.0f, 0.0f, 0.0f, 1.0f);
// Traversal loop
while(true)
{
// Fetch node information
int2 nodeCoord = (int2)((nodeNum << 2) % w, (nodeNum << 2) / w);
int4 specs = read_imagei(nodes, sampler, nodeCoord + (int2)(3, 0));
// While node isn't leaf
while(specs.z == 0)
{
// Fetch child bounding boxes
float4 n0xy = read_imagef(nodes, sampler, nodeCoord);
float4 n1xy = read_imagef(nodes, sampler, nodeCoord + (int2)(1, 0));
float4 nz = read_imagef(nodes, sampler, nodeCoord + (int2)(2, 0));
// Test ray against child bounding boxes
float oodx = origin.x * invdir.x;
float oody = origin.y * invdir.y;
float oodz = origin.z * invdir.z;
float c0lox = n0xy.x * invdir.x - oodx;
float c0hix = n0xy.y * invdir.x - oodx;
float c0loy = n0xy.z * invdir.y - oody;
float c0hiy = n0xy.w * invdir.y - oody;
float c0loz = nz.x * invdir.z - oodz;
float c0hiz = nz.y * invdir.z - oodz;
float c1loz = nz.z * invdir.z - oodz;
float c1hiz = nz.w * invdir.z - oodz;
float c0min = max4(min(c0lox, c0hix), min(c0loy, c0hiy), min(c0loz, c0hiz), tmin);
float c0max = min4(max(c0lox, c0hix), max(c0loy, c0hiy), max(c0loz, c0hiz), depth);
float c1lox = n1xy.x * invdir.x - oodx;
float c1hix = n1xy.y * invdir.x - oodx;
float c1loy = n1xy.z * invdir.y - oody;
float c1hiy = n1xy.w * invdir.y - oody;
float c1min = max4(min(c1lox, c1hix), min(c1loy, c1hiy), min(c1loz, c1hiz), tmin);
float c1max = min4(max(c1lox, c1hix), max(c1loy, c1hiy), max(c1loz, c1hiz), depth);
bool traverseChild0 = (c0max >= c0min);
bool traverseChild1 = (c1max >= c1min);
nodeNum = specs.x;
int nodeAbove = specs.y;
// We hit just one out of 2 childs
if(traverseChild0 != traverseChild1)
{
if(traverseChild1)
{
nodeNum = nodeAbove;
}
}
// We hit either both or none
else
{
// If we hit none, pop node from stack (or exit traversal, if stack is empty)
if (!traverseChild0)
{
if(todoOffset == 0)
{
break;
}
nodeNum = todo[--todoOffset];
}
// If we hit both
else
{
// Sort them (so nearest goes 1st, further 2nd)
if(c1min < c0min)
{
unsigned int tmp = nodeNum;
nodeNum = nodeAbove;
nodeAbove = tmp;
}
// Push further on stack
todo[todoOffset++] = nodeAbove;
}
}
// Fetch next node information
nodeCoord = (int2)((nodeNum << 2) % w, (nodeNum << 2) / w);
specs = read_imagei(nodes, sampler, nodeCoord + (int2)(3, 0));
}
// If node is leaf & has some primitives
if(specs.z > 0)
{
// Loop through primitives & perform intersection with them (Woop triangles)
for(int i = specs.x; i < specs.y; i++)
{
// Fetch first point from global memory
float4 v0 = triangles[i * 4 + 0];
float o_z = v0.w - origin.x * v0.x - origin.y * v0.y - origin.z * v0.z;
float i_z = 1.0f / (direction.x * v0.x + direction.y * v0.y + direction.z * v0.z);
float t = o_z * i_z;
if(t > 0.0f && t < depth)
{
// Fetch second point from global memory
float4 v1 = triangles[i * 4 + 1];
float o_x = v1.w + origin.x * v1.x + origin.y * v1.y + origin.z * v1.z;
float d_x = direction.x * v1.x + direction.y * v1.y + direction.z * v1.z;
float u = o_x + t * d_x;
if(u >= 0.0f && u <= 1.0f)
{
// Fetch third point from global memory
float4 v2 = triangles[i * 4 + 2];
float o_y = v2.w + origin.x * v2.x + origin.y * v2.y + origin.z * v2.z;
float d_y = direction.x * v2.x + direction.y * v2.y + direction.z * v2.z;
float v = o_y + t * d_y;
if(v >= 0.0f && u + v <= 1.0f)
{
// We got successful hit, store the information
depth = t;
temp.x = u;
temp.y = v;
temp.z = t;
temp.w = as_float(i);
}
}
}
}
}
// Pop node from stack (if empty, finish traversal)
if(todoOffset == 0)
{
break;
}
nodeNum = todo[--todoOffset];
}
// Store the ray traversal result in global memory
result[idx] = temp;
}
}
First question of the day is, how could one write his Persistent while-while and Speculative while-while kernel in OpenCL?
Ad Persistent while-while, do I get it right, that I actually just start kernel with global work size equivalent to local work size, and both these numbers should be equal to warp/wavefront size of the GPU?
I get that with CUDA the persistent thread implementation looks like this:
do
{
volatile int& jobIndexBase = nextJobArray[threadIndex.y];
if(threadIndex.x == 0)
{
jobIndexBase = atomicAdd(&warpCounter, WARP_SIZE);
}
index = jobIndexBase + threadIndex.x;
if(index >= totalJobs)
return;
/* Perform work for task numbered 'index' */
}
while(true);
How could equivalent in OpenCL look like, I know I'll have to do some barriers in there, I also know that one should be after the score where I atomically add WARP_SIZE to warpCounter.
Ad Speculative traversal - well I probably don't have any ideas how this should be implemented in OpenCL, so any hints are welcome. I also don't have idea where to put barriers (because putting them around simulated __any will result in driver crash).
If you made it here, thanks for reading & any hints, answers, etc. are welcome!
An optimization you can do is use vector variables and the fused multiply add function to speed up your set up math. As for the rest of the kernel, It is slow because it is branchy. If you can make assumptions on the signal data you might be able to reduce the execution time by reducing the code branches. I have not checked the float4 swizles (the .xxyy and .x .y .z .w after the float 4 variables) so just check that.
float4 n0xy = read_imagef(nodes, sampler, nodeCoord);
float4 n1xy = read_imagef(nodes, sampler, nodeCoord + (int2)(1, 0));
float4 nz = read_imagef(nodes, sampler, nodeCoord + (int2)(2, 0));
float4 oodf4 = -origin * invdir;
float4 c0xyf4 = fma(n0xy,invdir.xxyy,oodf4);
float4 c0zc1z = fma(nz,(float4)(invdir.z),oodf4);
float c0min = max4(min(c0xyf4.x, c0xyf4.y), min(c0xyf4.z, c0xyf4.w), min(c0zc1z.z, c0zc1z.w), tmin);
float c0max = min4(max(c0xyf4.x, c0xyf4.y), max(c0xyf4.z, c0xyf4.w), max(c0zc1z.z, c0zc1z.w), depth);
float4 c1xy = fma(n1xy,invdir.xxyy,oodf4);
float c1min = max4(min(c1xy.x, c1xy.y), min(c1xy.z, c1xy.w), min(c0zc1z.z, c0zc1z.w), tmin);
float c1max = min4(max(c1xy.x, c1xy.y), max(c1xy.z, c1xy.w), max(c0zc1z.z, c0zc1z.w), depth);
I'm still new to OpenCL, I was doing some tests with Nvidia examples, the whole program consists of 5 kernels, and these kernels execute in an order (1,2,3,4,5).
The first kernel simple takes position data, velocity data, and applies gravity and basic collision detection, then adjust that position and velocity ... this kernel works perfect without problems.
Here is the first kernel:
__kernel void integrate(
__global float4 *d_Pos, //input/output
__global float4 *d_Vel, //input/output
__constant simParams_t *params,
float deltaTime,
uint numParticles
){
const uint index = get_global_id(0);
if(index >= numParticles)
return;
float4 pos = d_Pos[index];
float4 vel = d_Vel[index];
pos.w = 1.0f;
vel.w = 0.0f;
//Gravity
vel += (float4)(params->gravity.x, params->gravity.y, params->gravity.z, 0) * deltaTime;
vel *= params->globalDamping;
//Advance pos
pos += vel * deltaTime;
//Collide with cube
if(pos.x < -1.0f + params->particleRadius){
pos.x = -1.0f + params->particleRadius;
vel.x *= params->boundaryDamping;
}
if(pos.x > 1.0f - params->particleRadius){
pos.x = 1.0f - params->particleRadius;
vel.x *= params->boundaryDamping;
}
if(pos.y < -1.0f + params->particleRadius){
pos.y = -1.0f + params->particleRadius;
vel.y *= params->boundaryDamping;
}
if(pos.y > 1.0f - params->particleRadius){
pos.y = 1.0f - params->particleRadius;
vel.y *= params->boundaryDamping;
}
if(pos.z < -1.0f + params->particleRadius){
pos.z = -1.0f + params->particleRadius;
vel.z *= params->boundaryDamping;
}
if(pos.z > 1.0f - params->particleRadius){
pos.z = 1.0f - params->particleRadius;
vel.z *= params->boundaryDamping;
}
//Store new position and velocity
d_Pos[index] = pos;
d_Vel[index] = vel;
}
The second kernel is taking these positions as input and outputs another kind of data (some indices) but it doesn't change position data.
The third kernel is doing adjustments to second kernel outputs (takes data from second kernel which doesn't touch position data).
Now for the problem ...4th kernel; This takes position data and velocity data(from first kernel) ,takes adjusted data from third kernel,outputs another position and velocity data (totally different pointer for these position and velocity)
Here is the fourth kernel:
__kernel void findCellBoundsAndReorder(
__global uint *d_CellStart, //output: cell start index
__global uint *d_CellEnd, //output: cell end index
__global float4 *d_ReorderedPos, //output: reordered by cell hash positions
__global float4 *d_ReorderedVel, //output: reordered by cell hash velocities
__global const uint *d_Hash, //input: sorted grid hashes
__global const uint *d_Index, //input: particle indices sorted by hash
__global const float4 *d_Pos, //input: positions array sorted by hash
__global const float4 *d_Vel, //input: velocity array sorted by hash
__local uint *localHash, //get_group_size(0) + 1 elements
uint numParticles
){
uint hash;
const uint index = get_global_id(0);
//Handle case when no. of particles not multiple of block size
if(index < numParticles){
hash = d_Hash[index];
//Load hash data into local memory so that we can look
//at neighboring particle's hash value without loading
//two hash values per thread
localHash[get_local_id(0) + 1] = hash;
//First thread in block must load neighbor particle hash
if(index > 0 && get_local_id(0) == 0)
localHash[0] = d_Hash[index - 1];
}
barrier(CLK_LOCAL_MEM_FENCE);
if(index < numParticles){
//Border case
if(index == 0)
d_CellStart[hash] = 0;
//Main case
else{
if(hash != localHash[get_local_id(0)])
d_CellEnd[localHash[get_local_id(0)]] = d_CellStart[hash] = index;
};
//Another border case
if(index == numParticles - 1)
d_CellEnd[hash] = numParticles;
//Now use the sorted index to reorder the pos and vel arrays
uint sortedIndex = d_Index[index];
float4 pos = d_Pos[sortedIndex];
float4 vel = d_Vel[sortedIndex];
d_ReorderedPos[index] = pos;
d_ReorderedVel[index] = vel;
}
}
The problem is if I execute kernel 1 alone (or 1+2,or 1+2+3) positions and velocities are adjusted correctly from first kernel.
But if I execute kernel 1+2+3+4 (though kernel 4 doesn't change input data), the data remains the same (as if i didn't execute anything ...positions are not adjusted).
well I have figured out the problem ..I was doing mistake in the 3rd kernel local group size,after fixing that every thing went correct,sorry for this
I've implemented the spiral GLSL shader described in this question in HLSL, but the results are not the same. I think it's because of the mod function in GLSL that I've translated to fmod in HLSL. I suspect that this problem only happens when we have negative numbers in the input of the fmod function.
I've tried replacing the call to mod by a call to a function that I've made which does what is described in the GLSL documentation and it works:
mod returns the value of x modulo y. This is computed as x - y * floor(x/y).
The working code I use instead of fmod is:
float mod(float x, float y)
{
return x - y * floor(x/y)
}
By contrast to GLSL mod, MSDN says the HLSL fmod function does this:
The floating-point remainder is calculated such that x = i * y + f, where i is an integer, f has the same sign as x, and the absolute value of f is less than the absolute value of y.
I've used an HLSL to GLSL converter, and the fmod function is translated as mod. However, I don't know if I can assume that mod translates to fmod.
Questions
What are the differences between GLSL mod and HLSLfmod?
How can I translate MSDN's cryptic description of fmod to a pseudo-code implementation?
GLSL Shader
uniform float time;
uniform vec2 resolution;
uniform vec2 aspect;
void main( void ) {
vec2 position = -aspect.xy + 2.0 * gl_FragCoord.xy / resolution.xy * aspect.xy;
float angle = 0.0 ;
float radius = length(position) ;
if (position.x != 0.0 && position.y != 0.0){
angle = degrees(atan(position.y,position.x)) ;
}
float amod = mod(angle+30.0*time-120.0*log(radius), 30.0) ;
if (amod<15.0){
gl_FragColor = vec4( 0.0, 0.0, 0.0, 1.0 );
} else{
gl_FragColor = vec4( 1.0, 1.0, 1.0, 1.0 );
}
}
HLSL Shader
struct Psl_VertexShaderInput
{
float3 pos : POSITION;
};
struct Psl_VertexShaderOutput
{
float4 pos : POSITION;
};
struct Psl_PixelShaderOutput
{
float4 Output0 : COLOR0;
};
float3 psl_positionOffset;
float2 psl_dimension;
Psl_VertexShaderOutput Psl_VertexShaderFunction(Psl_VertexShaderInput psl_input)
{
Psl_VertexShaderOutput psl_output = (Psl_VertexShaderOutput)0;
psl_output.pos = float4(psl_input.pos + psl_positionOffset, 1);
return psl_output;
}
float time : TIME;
float2 resolution : DIMENSION;
Psl_PixelShaderOutput Psl_PixelShaderFunction(float2 pos : VPOS)
{
Psl_PixelShaderOutput psl_output = (Psl_PixelShaderOutput)0;
float2 aspect = float2(resolution.x / resolution.y, 1.0);
float2 position = -aspect.xy + 2.0 * pos.xy / resolution.xy * aspect.xy;
float angle = 0.0;
float radius = length(position);
if (position.x != 0.0 && position.y != 0.0)
{
angle = degrees(atan2(position.y, position.x));
}
float amod = fmod((angle + 30.0 * time - 120.0 * log(radius)), 30.0);
if (amod < 15.0)
{
psl_output.Output0 = float4(0.0, 0.0, 0.0, 1.0);
return psl_output;
}
else
{
psl_output.Output0 = float4(1.0, 1.0, 1.0, 1.0);
return psl_output;
}
}
technique Default
{
pass P0
{
VertexShader = compile vs_3_0 Psl_VertexShaderFunction();
PixelShader = compile ps_3_0 Psl_PixelShaderFunction();
}
}
As you've noted, they're different. The GLSL mod will always have the same sign as y rather than x. Otherwise it's the same -- a value f such that x = i*y + f where i is an integer and |f| < |y|. If you're trying to make a repeating pattern of some kind, the GLSL mod is generally what you want.
For comparison, the HLSL fmod is equivalent to x - y * trunc(x/y). They're the same when x/y is positive, different when x/y is negative.