Distance from point to conic bezier - math

I'm trying to rasterize a conic (rational quadratic) bezier, and came across this snippet: https://www.shadertoy.com/view/MlKcDD The idea is to calculate the distance from each fragment to the bezier and use that information to calculate translucency to achieve anti-aliasing. The problem is that it needs to work for rational beziers and the example is beyond my math skills to modify to do so.
I meant this snippet in particular:
// This method provides just an approximation, and is only usable in
// the very close neighborhood of the curve. Taken and adapted from
// http://research.microsoft.com/en-us/um/people/hoppe/ravg.pdf
float sdBezier( vec2 p, vec2 v0, vec2 v1, vec2 v2 )
{
vec2 i = v0 - v2;
vec2 j = v2 - v1;
vec2 k = v1 - v0;
vec2 w = j-k;
v0-= p; v1-= p; v2-= p;
float x = cro(v0, v2);
float y = cro(v1, v0);
float z = cro(v2, v1);
vec2 s = 2.0*(y*j+z*k)-x*i;
float r = (y*z-x*x*0.25)/dot2(s);
float t = clamp( (0.5*x+y+r*dot(s,w))/(x+y+z),0.0,1.0);
return length( v0+t*(k+k+t*w) );
}

Related

Vulkan 2D geometry shader generated geometry lines exhibiting parralellogram shape

I am trying to add support for geometry shaders for a Vulkan project, so I am just starting with something simple for now.
The goal is, given a list of vertices, generate a perfect rectangle encompassing that line.
For that effect I made this geometry shader:
#version 450
#extension GL_ARB_separate_shader_objects : enable
layout(lines) in;
layout(triangle_strip, max_vertices = 6) out;
layout(location = 0) in vec2 fragCoord[];
layout(location = 0) out vec2 fragTexCoord;
void main() {
vec2 p1 = gl_in[0].gl_Position.xy;
vec2 p2 = gl_in[1].gl_Position.xy;
vec2 tangent = normalize(p2 - p1);
vec2 normal = vec2(tangent.y, -tangent.x) * 0.05;
vec2 quad[4] = vec2[](p1 + normal, p1 - normal, p2 + normal, p2 - normal);
// Create first triangle
gl_Position = vec4(quad[0], 0, 1);
EmitVertex();
gl_Position = vec4(quad[1], 0, 1);
EmitVertex();
gl_Position = vec4(quad[2], 0, 1);
EmitVertex();
EndPrimitive();
// Create second triangle
gl_Position = vec4(quad[1], 0, 1);
EmitVertex();
gl_Position = vec4(quad[2], 0, 1);
EmitVertex();
gl_Position = vec4(quad[3], 0, 1);
EmitVertex();
EndPrimitive();
}
Which outputs:
The vertex shader is:
#version 450
#extension GL_ARB_separate_shader_objects : enable
layout(location = 0) in vec3 inPosition;
layout(location = 1) in vec2 inTexCoord;
layout(location = 0) out vec2 fragTexCoord;
void main() {
gl_Position = vec4(inPosition, 1.0);
fragTexCoord = inTexCoord;
}
I am not sure why the lines are parallelograms instead of rectangles. Adding the normal to the line (the orthogonal direction) to both vertices in the line should make a rectangle, by definition.
Edit:
Even hard coding the vertices in the vertex shader seems to produce the same result:
vec4 verts[2] = vec4[](vec4(-0.5,-0.5,0,1), vec4(0.5,0.5,0,1));
void main() {
gl_Position = verts[gl_VertexID];//vec4(inPosition, 1.0);
fragTexCoord = inTexCoord;
}
I made a silly mistake, since coordinates are calculated on the NORMALIZED GL space, but the window is not a square, my space is stretched, defroming the topology. This is the result in a perfectly squared image:
To correct this error I must pass the aspect ratio information to the shader and correct the vertex positions accordingly.

Reflection Mapping in OpenGL ES

I am trying to implement the reflection mapping in OpenGL ES 2.0 for 'sphere'.
I have done the skybox.
For sphere rendering, the reflection shaders i have used are:
Environment mapping (Sphere) vertex shader::
precision highp float;
uniform mat4 u_mvMatrix; // ModelView Matrix
uniform mat4 u_mvpMatrix; // ModelViewProjection Matrix
attribute vec4 a_position;
attribute vec3 a_envmapNormal;
varying vec3 v_eyecoordEyeReflection;
vec3 v_eyecoordPosition;
vec3 v_eyecoordNormal;
void main()
{
// position and normal in model coordinates
vec4 modelCoordPosition = a_position;
vec3 modelCoordNormal = a_envmapNormal;
// Calculate position in eye space
v_eyecoordPosition = vec3(u_mvMatrix * modelCoordPosition);
// Calculate and normalize eye space normal
vec3 eyecoordNormal = vec3(u_mvMatrix * vec4(modelCoordNormal, 0.0));
v_eyecoordNormal = normalize(eyecoordNormal);
// Calculate reflection vector
v_eyecoordEyeReflection = reflect(v_eyecoordPosition, v_eyecoordNormal);
gl_Position = u_mvpMatrix * a_position;
}
Environment mapping (Sphere) Fragment shader
precision highp float;
uniform lowp samplerCube baseCubeMapTexture;
varying vec3 v_eyecoordEyeReflection;
void main()
{
gl_FragColor = textureCube(baseCubeMapTexture, v_eyecoordEyeReflection);
}
But i am not getting correct output.
When the sphere is rotated, the texture is not changing.
what is the error in the shader?
Thanks Andon...
I used your shader code.
But i am getting white sphere.
Sphere Normals are calculated using:
#define ANGLE_STEP ((2.0f * OGLES_PI) / ((float) NUM_OF_SLICES))
for ( iCnti = 0; iCnti < NUM_OF_PARALLELS + 1; iCnti++ ) {
for ( iCntj = 0; iCntj < NUM_OF_SLICES + 1; iCntj++ ) {
pSphereNormals[iNormalIndex + 0] = sin(ANGLE_STEP * (FLOAT) iCnti )* sin (ANGLE_STEP *(FLOAT)iCntj);
pSphereNormals[iNormalIndex + 1] = cos(ANGLE_STEP * (FLOAT) iCnti );
pSphereNormals[iNormalIndex + 2] = sin(ANGLE_STEP * (FLOAT) iCnti )* cos (ANGLE_STEP *(FLOAT)iCntj);
iNormalIndex += 3;
}
}
My View Matrix "matViewMatrix" is derived from (http://www.learnopengles.com/tag/linmath-h/ mat4x4_look_at())
MyCameraLookAt(matViewMatrix, 0.0f , 0.0f, -2.0f, 0.0f , 0.0f, -1.0f, 0.0f , 1.0f, 0.0f);
The Inverse matrix InvViewMat is // inverse() function is taken from http://www.opensource.apple.com/source/WebCore/WebCore-514/platform/graphics/transforms/TransformationMatrix.cpp
InvViewMat[0][0] = -1.000000 InvViewMat[1][0] = -0.000000 InvViewMat[2][0] = 0.000000 InvViewMat[3][0] = -0.000000
InvViewMat[0][1] = -0.000000 InvViewMat[1][1] = 1.000000 InvViewMat[2][1] = -0.000000 InvViewMat[3][1] = -0.000000
InvViewMat[0][2] = 0.000000 InvViewMat[1][2] = -0.000000 InvViewMat[2][2] = -1.000000 InvViewMat[3][2] = -2.000000
InvViewMat[0][3] = -0.000000 InvViewMat[1][3] = 0.000000 InvViewMat[2][3] = 0.000000 InvViewMat[3][3] = 1.000000
Is there any problem with my matrix values or any of my calculations?
If you have a sphere centered at the camera's origin (eye-space), then no matter how you rotate it the position and normals in eye-space are always going to be the same at any location on screen. That is the definition of a sphere - every vertex is the same distance (radius) from the center.
You actually need to do this in world-space (that position will vary as you rotate the sphere).
Now, this brings up an issue - you only have a ModelView matrix (which transforms from object-space to eye-space). You are going to need to split your Model and View matrices to do this and for convenience you should pass the inverse of the View matrix to GLSL.
Below is a modified Vertex Shader that does what you want:
precision highp float;
uniform mat4 u_vInvMatrix; // Inverse View Matrix -- NEW
uniform mat4 u_mvMatrix; // ModelView Matrix
uniform mat4 u_mvpMatrix; // ModelViewProjection Matrix
attribute vec4 a_position;
attribute vec3 a_envmapNormal;
//varying vec3 v_eyecoordEyeReflection; // YOU DO NOT WANT EYE-SPACE
varying vec3 v_worldReflection; // Use world-space instead -- MODIFIED
vec3 v_eyecoordPosition;
vec3 v_eyecoordNormal;
void main()
{
// position and normal in model coordinates
vec4 modelCoordPosition = a_position;
vec3 modelCoordNormal = a_envmapNormal;
// Calculate position in eye space
v_eyecoordPosition = vec3(u_mvMatrix * modelCoordPosition);
// Calculate and normalize eye space normal
vec3 eyecoordNormal = vec3(u_mvMatrix * vec4(modelCoordNormal, 0.0));
v_eyecoordNormal = normalize(eyecoordNormal);
// Calculate reflection vector (eye-space)
vec3 eyeReflection = reflect(v_eyecoordPosition, v_eyecoordNormal);
// Transform the reflection into world-space -- NEW
v_worldReflection = vec3 (u_vInvMatrix * vec4 (eyeReflection, 0.0f));
gl_Position = u_mvpMatrix * a_position;
}

correct glsl affine texture mapping

i'm trying to code correct 2D affine texture mapping in GLSL.
Explanation:
...NONE of this images is correct for my purposes. Right (labeled Correct) has perspective correction which i do not want. So this: Getting to know the Q texture coordinate solution (without further improvements) is not what I'm looking for.
I'd like to simply "stretch" texture inside quadrilateral, something like this:
but composed from two triangles. Any advice (GLSL) please?
This works well as long as you have a trapezoid, and its parallel edges are aligned with one of the local axes. I recommend playing around with my Unity package.
GLSL:
varying vec2 shiftedPosition, width_height;
#ifdef VERTEX
void main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
shiftedPosition = gl_MultiTexCoord0.xy; // left and bottom edges zeroed.
width_height = gl_MultiTexCoord1.xy;
}
#endif
#ifdef FRAGMENT
uniform sampler2D _MainTex;
void main() {
gl_FragColor = texture2D(_MainTex, shiftedPosition / width_height);
}
#endif
C#:
// Zero out the left and bottom edges,
// leaving a right trapezoid with two sides on the axes and a vertex at the origin.
var shiftedPositions = new Vector2[] {
Vector2.zero,
new Vector2(0, vertices[1].y - vertices[0].y),
new Vector2(vertices[2].x - vertices[1].x, vertices[2].y - vertices[3].y),
new Vector2(vertices[3].x - vertices[0].x, 0)
};
mesh.uv = shiftedPositions;
var widths_heights = new Vector2[4];
widths_heights[0].x = widths_heights[3].x = shiftedPositions[3].x;
widths_heights[1].x = widths_heights[2].x = shiftedPositions[2].x;
widths_heights[0].y = widths_heights[1].y = shiftedPositions[1].y;
widths_heights[2].y = widths_heights[3].y = shiftedPositions[2].y;
mesh.uv2 = widths_heights;
I recently managed to come up with a generic solution to this problem for any type of quadrilateral. The calculations and GLSL maybe of help. There's a working demo in java (that runs on Android), but is compact and readable and should be easily portable to unity or iOS: http://www.bitlush.com/posts/arbitrary-quadrilaterals-in-opengl-es-2-0
In case anyone's still interested, here's a C# implementation that takes a quad defined by the clockwise screen verts (x0,y0) (x1,y1) ... (x3,y3), an arbitrary pixel at (x,y) and calculates the u and v of that pixel. It was originally written to CPU-render an arbitrary quad to a texture, but it's easy enough to split the algorithm across CPU, Vertex and Pixel shaders; I've commented accordingly in the code.
float Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, A, B, C;
//These are all uniforms for a given quad. Calculate on CPU.
Ax = (x3 - x0) - (x2 - x1);
Bx = (x0 - x1);
Cx = (x2 - x1);
Dx = x1;
Ay = (y3 - y0) - (y2 - y1);
By = (y0 - y1);
Cy = (y2 - y1);
Dy = y1;
float ByCx_plus_AyDx_minus_BxCy_minus_AxDy = (By * Cx) + (Ay * Dx) - (Bx * Cy) - (Ax * Dy);
float ByDx_minus_BxDy = (By * Dx) - (Bx * Dy);
A = (Ay*Cx)-(Ax*Cy);
//These must be calculated per-vertex, and passed through as interpolated values to the pixel-shader
B = (Ax * y) + ByCx_plus_AyDx_minus_BxCy_minus_AxDy - (Ay * x);
C = (Bx * y) + ByDx_minus_BxDy - (By * x);
//These must be calculated per-pixel using the interpolated B, C and x from the vertex shader along with some of the other uniforms.
u = ((-B) - Mathf.Sqrt((B*B-(4.0f*A*C))))/(A*2.0f);
v = (x - (u * Cx) - Dx)/((u*Ax)+Bx);
Tessellation solves this problem. Subdividing quad vertex adds hints to interpolate pixels.
Check out this link.
https://www.youtube.com/watch?v=8TleepxIORU&feature=youtu.be
I had similar question ( https://gamedev.stackexchange.com/questions/174857/mapping-a-texture-to-a-2d-quadrilateral/174871 ) , and at gamedev they suggested using imaginary Z coord, which I calculate using the following C code, which appears to be working in general case (not just trapezoids):
//usual euclidean distance
float distance(int ax, int ay, int bx, int by) {
int x = ax-bx;
int y = ay-by;
return sqrtf((float)(x*x + y*y));
}
void gfx_quad(gfx_t *dst //destination texture, we are rendering into
,gfx_t *src //source texture
,int *quad // quadrilateral vertices
)
{
int *v = quad; //quad vertices
float z = 20.0;
float top = distance(v[0],v[1],v[2],v[3]); //top
float bot = distance(v[4],v[5],v[6],v[7]); //bottom
float lft = distance(v[0],v[1],v[4],v[5]); //left
float rgt = distance(v[2],v[3],v[6],v[7]); //right
// By default all vertices lie on the screen plane
float az = 1.0;
float bz = 1.0;
float cz = 1.0;
float dz = 1.0;
// Move Z from screen, if based on distance ratios.
if (top<bot) {
az *= top/bot;
bz *= top/bot;
} else {
cz *= bot/top;
dz *= bot/top;
}
if (lft<rgt) {
az *= lft/rgt;
cz *= lft/rgt;
} else {
bz *= rgt/lft;
dz *= rgt/lft;
}
// draw our quad as two textured triangles
gfx_textured(dst, src
, v[0],v[1],az, v[2],v[3],bz, v[4],v[5],cz
, 0.0,0.0, 1.0,0.0, 0.0,1.0);
gfx_textured(dst, src
, v[2],v[3],bz, v[4],v[5],cz, v[6],v[7],dz
, 1.0,0.0, 0.0,1.0, 1.0,1.0);
}
I'm doing it in software to scale and rotate 2d sprites, and for OpenGL 3d app you will need to do it in pixel/fragment shader, unless you will be able to map these imaginary az,bz,cz,dz into your actual 3d space and use the usual pipeline. DMGregory gave exact code for OpenGL shaders: https://gamedev.stackexchange.com/questions/148082/how-can-i-fix-zig-zagging-uv-mapping-artifacts-on-a-generated-mesh-that-tapers
I came up with this issue as I was trying to implement a homography warping in OpenGL. Some of the solutions that I found relied on a notion of depth, but this was not feasible in my case since I am working on 2D coordinates.
I based my solution on this article, and it seems to work for all cases that I could try. I am leaving it here in case it is useful for someone else as I could not find something similar. The solution makes the following assumptions:
The vertex coordinates are the 4 points of a quad in Lower Right, Upper Right, Upper Left, Lower Left order.
The coordinates are given in OpenGL's reference system (range [-1, 1], with origin at bottom left corner).
std::vector<cv::Point2f> points;
// Convert points to homogeneous coordinates to simplify the problem.
Eigen::Vector3f p0(points[0].x, points[0].y, 1);
Eigen::Vector3f p1(points[1].x, points[1].y, 1);
Eigen::Vector3f p2(points[2].x, points[2].y, 1);
Eigen::Vector3f p3(points[3].x, points[3].y, 1);
// Compute the intersection point between the lines described by opposite vertices using cross products. Normalization is only required at the end.
// See https://leimao.github.io/blog/2D-Line-Mathematics-Homogeneous-Coordinates/ for a quick summary of this approach.
auto line1 = p2.cross(p0);
auto line2 = p3.cross(p1);
auto intersection = line1.cross(line2);
intersection = intersection / intersection(2);
// Compute distance to each point.
for (const auto &pt : points) {
auto distance = std::sqrt(std::pow(pt.x - intersection(0), 2) +
std::pow(pt.y - intersection(1), 2));
distances.push_back(distance);
}
// Assumes same order as above.
std::vector<cv::Point2f> texture_coords_unnormalized = {
{1.0f, 1.0f},
{1.0f, 0.0f},
{0.0f, 0.0f},
{0.0f, 1.0f}
};
std::vector<float> texture_coords;
for (int i = 0; i < texture_coords_unnormalized.size(); ++i) {
float u_i = texture_coords_unnormalized[i].x;
float v_i = texture_coords_unnormalized[i].y;
float d_i = distances.at(i);
float d_i_2 = distances.at((i + 2) % 4);
float scale = (d_i + d_i_2) / d_i_2;
texture_coords.push_back(u_i*scale);
texture_coords.push_back(v_i*scale);
texture_coords.push_back(scale);
}
Pass the texture coordinates to your shader (use vec3). Then:
gl_FragColor = vec4(texture2D(textureSampler, textureCoords.xy/textureCoords.z).rgb, 1.0);
thanks for answers, but after experimenting i found a solution.
two triangles on the left has uv (strq) according this and two triangles on the right are modifed version of this perspective correction.
Numbers and shader:
tri1 = [Vec2(-0.5, -1), Vec2(0.5, -1), Vec2(1, 1)]
tri2 = [Vec2(-0.5, -1), Vec2(1, 1), Vec2(-1, 1)]
d1 = length of top edge = 2
d2 = length of bottom edge = 1
tri1_uv = [Vec4(0, 0, 0, d2 / d1), Vec4(d2 / d1, 0, 0, d2 / d1), Vec4(1, 1, 0, 1)]
tri2_uv = [Vec4(0, 0, 0, d2 / d1), Vec4(1, 1, 0, 1), Vec4(0, 1, 0, 1)]
only right triangles are rendered using this glsl shader (on left is fixed pipeline):
void main()
{
gl_FragColor = texture2D(colormap, vec2(gl_TexCoord[0].x / glTexCoord[0].w, gl_TexCoord[0].y);
}
so.. only U is perspective and V is linear.

GLSL: Move transformation from tessellation-evaluation shader to vertex shader

I am working in glsl with tessellation-shaders and I am trying to do displacement mapping. It's working, but I want to move the matrix-transformation-code from the tessellation evaluation shader to the vertex shader. Why I want to have this in the vertex-shader is because I do not want to do this calculation
for every sub triangles vertices, and I want the vertices to be in screenspace in the vertex shader so I can decide how much every triangle should be subdivided in the tessellation control shader.
The version that do not work, is "almost" working, there is some issues when the triangles are rendered.
I would really appreciate even the smallest hint of what may be wrong.
This (bad) version works (position and normal are transformed in tessellation evaluation shader)
// vertex shader
void main_(void)
{
gl_Position = VertexPosition;
VertexTexCoord1 = VertexTexCoord;
VertexNormal1 = VertexNormal;
}
// tessellation evaluation shader
void main_()
{
VertexTexCoord3 = interpolate(VertexTexCoord2);
vec3 normal = interpolate(VertexNormal2);
vec4 pos = interpolate(gl_in[0].gl_Position, gl_in[1].gl_Position, gl_in[2].gl_Position);
vec4 movement = vec4(normal * (texture2D(heigthMap,VertexTexCoord3).r), 0.0);
gl_Position = mvpMatrix * (pos + movement);
}
This version does not work (position and normal are transformed in vertex shader)
// vertex shader
void main(void)
{
gl_Position = mvpMatrix * VertexPosition;
VertexTexCoord1 = VertexTexCoord;
VertexNormal1 = mat3(mvpMatrix) * VertexNormal;
}
// tessellation evaluation shader
void main()
{
VertexTexCoord3 = interpolate(VertexTexCoord2);
vec3 normal = interpolate(VertexNormal2);
vec4 pos = interpolate(gl_in[0].gl_Position, gl_in[1].gl_Position, gl_in[2].gl_Position);
vec4 movement = vec4(normal * (texture2D(heigthMap,VertexTexCoord3).r), 0.0);
gl_Position = (pos + movement);
}
In the "non-working" version the last line in tesselation shader seems to be incorrect. You're forgetting that in the source variant you had 'movement' multiplied by the mvpMatrix.
I would have tried to use this:
// tessellation evaluation shader
void main()
{
VertexTexCoord3 = interpolate(VertexTexCoord2);
vec3 normal = interpolate(VertexNormal2);
vec4 pos = interpolate(gl_in[0].gl_Position, gl_in[1].gl_Position, gl_in[2].gl_Position);
vec4 movement = vec4(normal * (texture2D(heigthMap,VertexTexCoord3).r), 0.0);
/// This multiplication by mvpMatrix is inevitable
gl_Position = (pos + mvpMatrix * movement);
}
Sorry if I mixed the order of the stages, but the code above (two versions) is definitely non-equivalent.

Glsl mod vs Hlsl fmod

I've implemented the spiral GLSL shader described in this question in HLSL, but the results are not the same. I think it's because of the mod function in GLSL that I've translated to fmod in HLSL. I suspect that this problem only happens when we have negative numbers in the input of the fmod function.
I've tried replacing the call to mod by a call to a function that I've made which does what is described in the GLSL documentation and it works:
mod returns the value of x modulo y. This is computed as x - y * floor(x/y).
The working code I use instead of fmod is:
float mod(float x, float y)
{
return x - y * floor(x/y)
}
By contrast to GLSL mod, MSDN says the HLSL fmod function does this:
The floating-point remainder is calculated such that x = i * y + f, where i is an integer, f has the same sign as x, and the absolute value of f is less than the absolute value of y.
I've used an HLSL to GLSL converter, and the fmod function is translated as mod. However, I don't know if I can assume that mod translates to fmod.
Questions
What are the differences between GLSL mod and HLSLfmod?
How can I translate MSDN's cryptic description of fmod to a pseudo-code implementation?
GLSL Shader
uniform float time;
uniform vec2 resolution;
uniform vec2 aspect;
void main( void ) {
vec2 position = -aspect.xy + 2.0 * gl_FragCoord.xy / resolution.xy * aspect.xy;
float angle = 0.0 ;
float radius = length(position) ;
if (position.x != 0.0 && position.y != 0.0){
angle = degrees(atan(position.y,position.x)) ;
}
float amod = mod(angle+30.0*time-120.0*log(radius), 30.0) ;
if (amod<15.0){
gl_FragColor = vec4( 0.0, 0.0, 0.0, 1.0 );
} else{
gl_FragColor = vec4( 1.0, 1.0, 1.0, 1.0 );
}
}
HLSL Shader
struct Psl_VertexShaderInput
{
float3 pos : POSITION;
};
struct Psl_VertexShaderOutput
{
float4 pos : POSITION;
};
struct Psl_PixelShaderOutput
{
float4 Output0 : COLOR0;
};
float3 psl_positionOffset;
float2 psl_dimension;
Psl_VertexShaderOutput Psl_VertexShaderFunction(Psl_VertexShaderInput psl_input)
{
Psl_VertexShaderOutput psl_output = (Psl_VertexShaderOutput)0;
psl_output.pos = float4(psl_input.pos + psl_positionOffset, 1);
return psl_output;
}
float time : TIME;
float2 resolution : DIMENSION;
Psl_PixelShaderOutput Psl_PixelShaderFunction(float2 pos : VPOS)
{
Psl_PixelShaderOutput psl_output = (Psl_PixelShaderOutput)0;
float2 aspect = float2(resolution.x / resolution.y, 1.0);
float2 position = -aspect.xy + 2.0 * pos.xy / resolution.xy * aspect.xy;
float angle = 0.0;
float radius = length(position);
if (position.x != 0.0 && position.y != 0.0)
{
angle = degrees(atan2(position.y, position.x));
}
float amod = fmod((angle + 30.0 * time - 120.0 * log(radius)), 30.0);
if (amod < 15.0)
{
psl_output.Output0 = float4(0.0, 0.0, 0.0, 1.0);
return psl_output;
}
else
{
psl_output.Output0 = float4(1.0, 1.0, 1.0, 1.0);
return psl_output;
}
}
technique Default
{
pass P0
{
VertexShader = compile vs_3_0 Psl_VertexShaderFunction();
PixelShader = compile ps_3_0 Psl_PixelShaderFunction();
}
}
As you've noted, they're different. The GLSL mod will always have the same sign as y rather than x. Otherwise it's the same -- a value f such that x = i*y + f where i is an integer and |f| < |y|. If you're trying to make a repeating pattern of some kind, the GLSL mod is generally what you want.
For comparison, the HLSL fmod is equivalent to x - y * trunc(x/y). They're the same when x/y is positive, different when x/y is negative.

Resources