What is the correct way to convert from unsigned int texture to a normalized float and back again?
As a test I am currently trying to render an unsigned int texture to a standard RGB context and the following is working but it doesn't feel right
Relevant Draw Code:
ShaderPropertySetter.SetUniform(gl, "uTexture_us2", 0);
ShaderPropertySetter.SetUniform(gl, "maxIntensity_u", MaxIntensity);
ShaderPropertySetter.SetUniform(gl, "minIntensity_u", MinIntensity);
ShaderPropertySetter.SetUniformMat4(gl, "uModelMatrix_m4", modelMatrix);
canvas.Bind(gl);// this binds the vertex buffer
gl.BindTexture(OpenGL.GL_TEXTURE_2D, texture);
gl.DrawArrays(OpenGL.GL_QUADS, 0, 4);
Texture Creation
public static void FillTextureDataWithUintBuffer(OpenGL gl, uint[] buffer, int width, int height)
{
unsafe
{
fixed (uint* dataprt = buffer)
{
IntPtr pixels = new IntPtr(dataprt);
const int GL_R32UI = 0x8236; //GL_R32UI is not currently defined in SharpGL
gl.TexImage2D(OpenGL.GL_TEXTURE_2D,
0,
GL_R32UI,
width,
height,
0,
OpenGL.GL_RED_INTEGER,
OpenGL.GL_UNSIGNED_INT,
pixels);
}
}
OpenGLTesting.CheckForFailure(gl);
}
Current GLSL Code
---UPDATED---- (fixed stupid errors that commenters kindly pointed out) #version 150 core
in vec2 pass_texCord;
uniform usampler2D uTexture_us2;
uniform uint maxIntensity_u;
uniform uint minIntensity_u;
float linearNormalize(float value, in float max, in float min)
{
//normalized = (x-min(x))/(max(x)-min(x))
return (value - min) / (max - min);
}
void main(void)
{
uvec4 value = texture(uTexture_us2, pass_texCord);
float valuef = float(value.r);
float max = float(maxIntensity_u);
float min = float(minIntensity_u);
float normalized = linearNormalize(valuef,max,min) ;
gl_FragColor = vec4(normalized,normalized,normalized,1);
}
So I am not very happy with the current state of the GLSL code (especially as it doesn't work :p) because I am performing the float cast, which seems to defeat the point.
Reason:
I am working on a compositor where some of the textures are stored as single channel unsigned int and others are stored in triple channel float, when one gets blended with another, I want to convert the "blendee"
Note: I am using SharpGL