Lighting in Fragment Shader by calculating normals inside fragment shader

Hi,

Scenario 1:

  1. I am creating a 256x256 grid. Calculating normals for each vertex in grid on cpu. Sending both vertices and normal data using Opengl calls.
  2. Enabling lighting. Setting ambient, diffuse and specular values for both light source and material.

Scenario 2:

I am trying to write a Fragment Shader for lighting calculation.
I am not using a vertex shader. Not even a pass through vertex shader.Basically i am using Fixed pipeline combined with programmable pipeline.

Inputs I have for Fragment Shader:

  1. 2D Texture of size 256x256 (Each value represents a depth) (Its not a normal map)
  2. INC (increment value). In this case 1/256.

And here is the code:


#version 330 compatibility

uniform int textureSize;
uniform float inc;
uniform sampler2D zgrid;

const vec4 ambient = vec4(0.02f, 0.02f, 0.02f, 1.0f);    // product of light and material ambient reflectance
const vec4 diffuse = vec4(0.64f, 0.64f, 0.64f, 1.0f);    // product of light and material diffuse reflectance
const float shininess = 20.0f;                           // materail shininess
const vec4 specular = vec4(0.2f, 0.2f, 0.2f, 1.0f);      // product of light and material specular reflectance
const vec3 viewDir = vec3(0.0f, 0.0f, 1.0f);
const vec3 lightd = vec3(0f,0f,1f);

vec3 getNormalPerVertex(in vec3 p0, in vec3 p1, in vec3 p2, in vec3 p3, in vec3 p4)
{
  vec3 trivct1 = p0 - p1;
  vec3 trivct2 = p0 - p2;
  vec3 trivct3 = p0 - p3;
  vec3 trivct4 = p0 - p4;
  
  vec3 trin1 = cross(trivct1,trivct2);
  vec3 trin2 = cross(trivct2,trivct3);
  vec3 trin3 = cross(trivct3,trivct4);
  vec3 trin4 = cross(trivct4,trivct1);
  
  vec3 normalVector = trin1 + trin2 + trin3 + trin4;
  return normalize(normalVector);
}

float getLinearInterpolatedValue(in float x, in float y, in float x1, in float y1, in float x2, in float y2, in float x1y1val, in float x2y1val, in float x1y2val, in float x2y2val)
{
  float v1 = x1y1val * (x2-x) * (y2-y);
  float v2 = x2y1val * (x-x1) * (y2-y);
  float v3 = x1y2val * (x2-x) * (y-y1);
  float v4 = x2y2val * (x-x1) * (y-y1);
  
  float interpolatedValue = (1/(x2-x1)*(y2-y1)) * (v1 + v2 + v3 + v4);
  return interpolatedValue;
}

vec4 getSpecularColor(in vec3 normalVector)
{
    vec4 specularC = vec4(0.0f, 0.0f, 0.0f, 1.0f);
    if(dot(normalVector, normalize(lightd)) > 0.0f)
    {
      vec3 halfdir = normalize(lightd + viewDir);
      specularC = specular * pow(max(0.0, dot(halfdir, normalVector)), shininess);
    }
    return specularC;
}

vec4 getAmbientDiffuse(in vec3 normalVector)
{
  vec4 diffuseLight = diffuse*max(0.0f, dot(normalize(lightd),normalVector));
  return (ambient + diffuseLight);
}

vec4 getFinalColor()
{
  float x = gl_TexCoord[0].x;
  float y = gl_TexCoord[0].y;
  
  //get 4 near coordinates in texel space
  vec2 c1 = vec2(x+inc, y);
  vec2 c2 = vec2(x, y+inc);
  vec2 c3 = vec2(x-inc, y);
  vec2 c4 = vec2(x, y-inc);
  
  //convert co-ordinates from texel space to co-ordinates of cell in geometry space in which current pixel resides 
  vec2 gp1 = vec2(floor(c1.x * textureSize), ceil(c1.y * textureSize));
  vec2 gp2 = vec2(floor(c2.x * textureSize), floor(c2.y * textureSize));
  vec2 gp3 = vec2(ceil(c3.x * textureSize), floor(c3.y * textureSize));
  vec2 gp4 = vec2(ceil(c4.x * textureSize), ceil(c4.y * textureSize));
  
  vec2 gp5 = vec2(gp1.x+1, gp1.y);
  vec2 gp6 = vec2(gp1.x, gp1.y+1);
  vec2 gp7 = vec2(gp2.x, gp2.y+1);
  vec2 gp8 = vec2(gp2.x-1, gp2.y);
  vec2 gp9 = vec2(gp3.x-1, gp3.y);
  vec2 gp10 = vec2(gp3.x, gp3.y-1);
  vec2 gp11 = vec2(gp4.x, gp4.y-1);
  vec2 gp12 = vec2(gp4.x+1, gp4.y);
  
  //convert back to texel space
  c1 = gp1/textureSize;
  c2 = gp2/textureSize;
  c3 = gp3/textureSize;
  c4 = gp4/textureSize;
  vec2 c5 = gp5/textureSize;
  vec2 c6 = gp6/textureSize;
  vec2 c7 = gp7/textureSize;
  vec2 c8 = gp8/textureSize;
  vec2 c9 = gp9/textureSize;
  vec2 c10 = gp10/textureSize;
  vec2 c11 = gp11/textureSize;
  vec2 c12 = gp12/textureSize;
  
  //Get z values
  float z1 = texture(zgrid,c1);
  float z2 = texture(zgrid,c2);
  float z3 = texture(zgrid,c3);
  float z4 = texture(zgrid,c4);
  float z5 = texture(zgrid,c5);
  float z6 = texture(zgrid,c6);
  float z7 = texture(zgrid,c7);
  float z8 = texture(zgrid,c8);
  float z9 = texture(zgrid,c9);
  float z10 = texture(zgrid,c10);
  float z11 = texture(zgrid,c11);
  float z12 = texture(zgrid,c12);
  
  //create points for normal calculation
  vec3 p1 = vec3(c1, z1);
  vec3 p2 = vec3(c2, z2);
  vec3 p3 = vec3(c3, z3);
  vec3 p4 = vec3(c4, z4);
  vec3 p5 = vec3(c5, z5);
  vec3 p6 = vec3(c6, z6);
  vec3 p7 = vec3(c7, z7);
  vec3 p8 = vec3(c8, z8);
  vec3 p9 = vec3(c9, z9);
  vec3 p10 = vec3(c10, z10);
  vec3 p11 = vec3(c11, z11);
  vec3 p12 = vec3(c12, z12);

  vec3 normal1 = getNormalPerVertex(p1, p5, p6, p2, p4);
  vec3 normal2 = getNormalPerVertex(p2, p1, p7, p8, p3);
  vec3 normal3 = getNormalPerVertex(p3, p4, p2, p9, p10);
  vec3 normal4 = getNormalPerVertex(p4, p12, p1, p3, p11);
  
  vec4 lightcolor1 = getAmbientDiffuse(normal1) + getSpecularColor(normal1) + vec4(0.04, 0.04, 0.04, 1);
  vec4 lightcolor2 = getAmbientDiffuse(normal2) + getSpecularColor(normal2) + vec4(0.04, 0.04, 0.04, 1);
  vec4 lightcolor3 = getAmbientDiffuse(normal3) + getSpecularColor(normal3) + vec4(0.04, 0.04, 0.04, 1);
  vec4 lightcolor4 = getAmbientDiffuse(normal4) + getSpecularColor(normal4) + vec4(0.04, 0.04, 0.04, 1);
  
  float interpolatedR = getLinearInterpolatedValue(x*textureSize, y*textureSize, gp3.x, gp3.y, gp1.x, gp1.y, lightcolor3.r, lightcolor4.r, lightcolor2.r, lightcolor1.r);
  float interpolatedG = getLinearInterpolatedValue(x*textureSize, y*textureSize, gp3.x, gp3.y, gp1.x, gp1.y, lightcolor3.g, lightcolor4.g, lightcolor2.g, lightcolor1.g);
  float interpolatedB = getLinearInterpolatedValue(x*textureSize, y*textureSize, gp3.x, gp3.y, gp1.x, gp1.y, lightcolor3.b, lightcolor4.b, lightcolor2.b, lightcolor1.b);
  
  return vec4(interpolatedR, interpolatedG, interpolatedB, 1);
}


void main() {
  gl_FragColor = getFinalColor();
}

But i am not getting good results like fixed pipeline lighting.
I dont want to use a normal map(beacuse it has to be computed on cpu).

Inputs to vertex shader are four corner points of grid.Not the entire grid. I dont want to send all points in grid as well. only four corner points.

If anyone has any idea or finds something wrong with this code please respond. Dont suggest normal maps, per vertex normal calculation.

These 4 points form a diamond. You probably want a square. E.g.


vec2 xy = gl_TexCoord[0] * textureSize;
ivec2 gp1 = ivec2(floor(xy));
ivec2 gp2 = gp1 + ivec2(1,0);
ivec2 gp3 = gp1 + ivec2(0,1);
ivec2 gp3 = gp1 + ivec2(1,1);

This shouldn’t even compile. The texture function() returns a vec4, but you’re assigning it to a float. You need texture().x.

But you might be better off using textureGather() for this. Or texelFetchOffset().

I am getting square from those diamond points later.

//convert co-ordinates from texel space to co-ordinates of cell in geometry space in which current pixel resides
vec2 gp1 = vec2(floor(c1.x * textureSize), ceil(c1.y * textureSize));
vec2 gp2 = vec2(floor(c2.x * textureSize), floor(c2.y * textureSize));
vec2 gp3 = vec2(ceil(c3.x * textureSize), floor(c3.y * textureSize));
vec2 gp4 = vec2(ceil(c4.x * textureSize), ceil(c4.y * textureSize));

texture() function does compile. I am not sure why.Thanks for pointing that out. But even accessing texture().x gives the same result for me.

texture uses R32F as Internal format and GL.RED as type.

One issue (which probably doesn’t make much difference in practice, but should still be avoided) is the use of ceil(). If x is an integer, floor(x) and ceil(x) will be identical. Use floor(x) and floor(x)+1 instead.

The first question is whether the height values have the correct scale. Adjacent samples will be 1/256 units apart in X and Y, so a difference in Z (texture value) of 1/256 would result in a slope of 45 degrees. Are your texture values scaled correctly for that? (If the texture was a more typical 8-bit normalised format, where values range from 0 to 1 in steps of 1/255, the slope would be far too high, but you say that you’re using GL_R32F, so maybe this is correct).

The other issue is that you’re calculating a colour at each texel corner and then interpolating the colours. It’s more common to interpolate (and re-normalise) the normals then calculate a single colour from that.

If the texture filter is GL_NEAREST, you’re sampling at the worst possible point, as the smallest rounding errors can change which texel is sampled. If the filter is GL_LINEAR, then each sample will be the average of the four surrounding texels, which will effectively perform a low-pass filter on the texture (this may be desired, I don’t know).

Also: is this for a flat surface? Otherwise, the normals need to be transformed from tangent space to eye space (or whatever space viewDir and lightDir are in).

Other than that, it’s hard to give advice when you don’t actually describe the problem beyond

But i am not getting good results like fixed pipeline lighting.

Here is the detail problem:

Scenario 1:

  1. I have a grid of size say m x n. So there are m x n vertices. Each vertex has three co-ordinates x, y, z.
  2. Based on this information, i am calculating normals for each vertex in grid(z values are different for each vertex, so its not a flat surface…you can call it a heightfield).
  3. Then i am setting light and material properties. But when i am sending vertex data, i am sending only x,y values for each vertex. So default z values for each vertex are used i.e 0.
  4. Pre-calculated vec3 normals(calculated on basis of x,y,z values) are sent.

Scenario 2:

  1. I have a grid of size m x n. I am extracting z values from this grid and creating a texture.
  2. Only four corner points of this grid are sent to opengl(Again only x,y co-ordinates of four corner vertices).
  3. Texture is of type R32F and LINEAR.

So basically, all data that i have sent in both cases has z value as 0 but normals are calculated based on x,y,z values.
Light and View direction are in negative z direction (0, 0, 1). Light computation is performed in view space. In view space opengl uses right handed co-ordinate system. Hence, setting Light and View direction to point in negative z direction.
(I am using directional light, not point light).

Now, the normals that i am computing are using x,y texture co-ordinates and corresponding z values. So I assume my normal calculation to be in texel space(Am i right? I am not sure…).
Light and View Direction as said just pointing in negative z direction.(I dont know they are in which space but i think they should be in view space).

Do i need to convert these light and view direction to texel space as well?

I am interpolating colors because this is how fixed pipeline works. It calculates light at each vertex and then interpolates. And I want to mimic the fixed pipeline.

Thanks.

They’re in “surface space”, i.e. areas of constant height will have a normal of (0,0,1).

The vectors need to be correct given the orientation of the surface.

So you’re basically trying to mimic a height field using a single quad rather than using a vertex for each grid point?

In that case, the main thing that’s wrong is the fact that the texture sampling is taking the average of 4 texels rather than using the texel values directly.

When the filter is GL_LINEAR, sampling at the centre of a texel will yield the value for that texel. Sampling anywhere else (other than near the edges) will yield the result of bilinear interpolation between the surrounding texel values. When sampling at a location less than half a texel from the edge of a texture, the behaviour depends upon the wrap mode, as the interpolation somehow needs to obtain a value for the texel beyond the edge.

To obtain the texel values directly, you need to offset the texture coordinate by half a texel (i.e. inc/2) in each direction.

They’re in “surface space”, i.e. areas of constant height will have a normal of (0,0,1).

They don’t have constant height. Z value for each point is different. So height has to be different, right?

I was able to get the example working i.e mimic opengl fixed pipeline lighting.
I get same results now in both cases. But now there is other problem. This works only for square textures.
If i use a texture of size 256 by 128, results are not same.

Here is the final code:


#version 330 compatibility

precision highp float;

uniform int textureSizex;
uniform int textureSizey;
uniform float xinc;
uniform float yinc;
uniform sampler2D zgrid;
uniform mat3 normalMatrix;

uniform vec4 lightAmbient;
uniform vec4 lightDiffuse;
uniform vec4 lightSpecular;

uniform vec4 materialAmbient;
uniform vec4 materialDiffuse;
uniform vec4 materialSpecular;
uniform float materialShininess;

uniform vec4 materialEmissionIntensity;
uniform vec4 lightModelAmbientIntensity;

uniform vec3 lightDirection;
uniform vec3 viewDirection;

vec3 getNormalPerVertex(in vec3 p0, in vec3 p1, in vec3 p2, in vec3 p3, in vec3 p4)
{
  vec3 trivct1 = p0 - p1;
  vec3 trivct2 = p0 - p2;
  vec3 trivct3 = p0 - p3;
  vec3 trivct4 = p0 - p4;
  
  vec3 trin1 = cross(trivct1,trivct2);
  vec3 trin2 = cross(trivct2,trivct3);
  vec3 trin3 = cross(trivct3,trivct4);
  vec3 trin4 = cross(trivct4,trivct1);
  
  vec3 normalVector = normalize(trin1) + normalize(trin2) + normalize(trin3) + normalize(trin4);
  return normalize(normalVector * normalMatrix);
}

float getLinearInterpolatedValue(in float x, in float y, in float x1, in float y1, in float x2, in float y2, in float x1y1val, in float x2y1val, in float x1y2val, in float x2y2val)
{
  float v1 = x1y1val * (x2-x) * (y2-y);
  float v2 = x2y1val * (x-x1) * (y2-y);
  float v3 = x1y2val * (x2-x) * (y-y1);
  float v4 = x2y2val * (x-x1) * (y-y1);
  
  float interpolatedValue = (1/(x2-x1)*(y2-y1)) * (v1 + v2 + v3 + v4);
  return interpolatedValue;
}

vec4 getSpecularColor(in vec3 normalVector)
{
    vec4 specularC = vec4(0.0f, 0.0f, 0.0f, 1.0f);
    if(dot(normalVector, normalize(lightDirection)) > 0.0f)
    {
      vec3 halfdir = normalize(lightDirection + viewDirection);
      specularC = (lightSpecular * materialSpecular) * pow(max(0.0, dot(halfdir, normalVector)), materialShininess/4);
    }
    return specularC;
}

vec4 getAmbientDiffuse(in vec3 normalVector)
{
  vec4 diffuseLight = (lightDiffuse * materialDiffuse) * max(0.0f, dot(normalize(lightDirection),normalVector));
  return ((lightAmbient * materialAmbient) + diffuseLight);
}

vec4 getFinalColor()
{
  float x = gl_TexCoord[0].x;
  float y = gl_TexCoord[0].y;
  
  //get 4 near coordinates in texel space
  vec2 c1 = vec2(x+xinc  , y );
  vec2 c2 = vec2(x, y+yinc);
  vec2 c3 = vec2(x-xinc, y);
  vec2 c4 = vec2(x, y-yinc);
   
  //convert co-ordinates from texel space to co-ordinates of cell in geometry space in which current pixel resides 
  vec2 gp1 = vec2(floor(c1.x * textureSizex), floor(c1.y * textureSizey)+1);
  vec2 gp2 = vec2(floor(c2.x * textureSizex), floor(c2.y * textureSizey));
  vec2 gp3 = vec2(floor(c3.x * textureSizex)+1, floor(c3.y * textureSizey));
  vec2 gp4 = vec2(floor(c4.x * textureSizex)+1, floor(c4.y * textureSizey)+1);
  
  vec2 gp5 = vec2(gp1.x+1, gp1.y);
  vec2 gp6 = vec2(gp1.x, gp1.y+1);
  vec2 gp7 = vec2(gp2.x, gp2.y+1);
  vec2 gp8 = vec2(gp2.x-1, gp2.y);
  vec2 gp9 = vec2(gp3.x-1, gp3.y);
  vec2 gp10 = vec2(gp3.x, gp3.y-1);
  vec2 gp11 = vec2(gp4.x, gp4.y-1);
  vec2 gp12 = vec2(gp4.x+1, gp4.y);
  
  //convert back to texel space
  c1 = vec2(gp1.x/textureSizex, gp1.y/textureSizey);
  c2 = vec2(gp2.x/textureSizex, gp2.y/textureSizey);
  c3 = vec2(gp3.x/textureSizex, gp3.y/textureSizey);
  c4 = vec2(gp4.x/textureSizex, gp4.y/textureSizey);
  
  vec2 c5 = vec2(gp5.x/textureSizex, gp5.y/textureSizey);
  vec2 c6 = vec2(gp6.x/textureSizex, gp6.y/textureSizey);
  vec2 c7 = vec2(gp7.x/textureSizex, gp7.y/textureSizey);
  vec2 c8 = vec2(gp8.x/textureSizex, gp8.y/textureSizey);
  vec2 c9 = vec2(gp9.x/textureSizex, gp9.y/textureSizey);
  vec2 c10 = vec2(gp10.x/textureSizex, gp10.y/textureSizey);
  vec2 c11 = vec2(gp11.x/textureSizex, gp11.y/textureSizey);
  vec2 c12 = vec2(gp12.x/textureSizex, gp12.y/textureSizey);
  
  c1.x = c1.x + xinc/2;
  c2.x = c2.x + xinc/2;
  c3.x = c3.x + xinc/2;
  c4.x = c4.x + xinc/2;
  
  c5.x = c5.x + xinc/2;
  c6.x = c6.x + xinc/2;
  c7.x = c7.x + xinc/2;
  c8.x = c8.x + xinc/2;
  c9.x = c9.x + xinc/2;
  c10.x = c10.x + xinc/2;
  c11.x = c11.x + xinc/2;
  c12.x = c12.x + xinc/2;
  
  c1.y = c1.y + yinc/2;
  c2.y = c2.y + yinc/2;
  c3.y = c3.y + yinc/2;
  c4.y = c4.y + yinc/2;
  
  c5.y = c5.y + yinc/2;
  c6.y = c6.y + yinc/2;
  c7.y = c7.y + yinc/2;
  c8.y = c8.y + yinc/2;
  c9.y = c9.y + yinc/2;
  c10.y = c10.y + yinc/2;
  c11.y = c11.y + yinc/2;
  c12.y = c12.y + yinc/2;
  
  //Get z values
  float z1 = texture(zgrid,c1).x;
  float z2 = texture(zgrid,c2).x;
  float z3 = texture(zgrid,c3).x;
  float z4 = texture(zgrid,c4).x;
  float z5 = texture(zgrid,c5).x;
  float z6 = texture(zgrid,c6).x;
  float z7 = texture(zgrid,c7).x;
  float z8 = texture(zgrid,c8).x;
  float z9 = texture(zgrid,c9).x;
  float z10 = texture(zgrid,c10).x;
  float z11 = texture(zgrid,c11).x;
  float z12 = texture(zgrid,c12).x;
  
  //create points for normal calculation
  vec3 p1 = vec3(gp1, z1);
  vec3 p2 = vec3(gp2, z2);
  vec3 p3 = vec3(gp3, z3);
  vec3 p4 = vec3(gp4, z4);
  vec3 p5 = vec3(gp5, z5);
  vec3 p6 = vec3(gp6, z6);
  vec3 p7 = vec3(gp7, z7);
  vec3 p8 = vec3(gp8, z8);
  vec3 p9 = vec3(gp9, z9);
  vec3 p10 = vec3(gp10, z10);
  vec3 p11 = vec3(gp11, z11);
  vec3 p12 = vec3(gp12, z12);

  vec3 normal1 = getNormalPerVertex(p1, p5, p6, p2, p4);
  vec3 normal2 = getNormalPerVertex(p2, p1, p7, p8, p3);
  vec3 normal3 = getNormalPerVertex(p3, p4, p2, p9, p10);
  vec3 normal4 = getNormalPerVertex(p4, p12, p1, p3, p11);
  
  vec4 lightcolor1 = getAmbientDiffuse(normal1) + getSpecularColor(normal1) + materialEmissionIntensity + (materialAmbient * lightModelAmbientIntensity);
  vec4 lightcolor2 = getAmbientDiffuse(normal2) + getSpecularColor(normal2) + materialEmissionIntensity + (materialAmbient * lightModelAmbientIntensity);
  vec4 lightcolor3 = getAmbientDiffuse(normal3) + getSpecularColor(normal3) + materialEmissionIntensity + (materialAmbient * lightModelAmbientIntensity);
  vec4 lightcolor4 = getAmbientDiffuse(normal4) + getSpecularColor(normal4) + materialEmissionIntensity + (materialAmbient * lightModelAmbientIntensity);
  
  float interpolatedR = getLinearInterpolatedValue(x*textureSizex, y*textureSizey, gp3.x, gp3.y, gp1.x, gp1.y, lightcolor3.r, lightcolor4.r, lightcolor2.r, lightcolor1.r);
  float interpolatedG = getLinearInterpolatedValue(x*textureSizex, y*textureSizey, gp3.x, gp3.y, gp1.x, gp1.y, lightcolor3.g, lightcolor4.g, lightcolor2.g, lightcolor1.g);
  float interpolatedB = getLinearInterpolatedValue(x*textureSizex, y*textureSizey, gp3.x, gp3.y, gp1.x, gp1.y, lightcolor3.b, lightcolor4.b, lightcolor2.b, lightcolor1.b);
  
  return vec4(interpolatedR, interpolatedG, interpolatedB, 1);
}


void main() {
  gl_FragColor = getFinalColor();
}

This topic was automatically closed 183 days after the last reply. New replies are no longer allowed.