binding multiple texture in OpenGL does not work correctly

346 views Asked by At

I am tired of binding multiple textures I have something weird when I have 2 textures or more, it's over each other

This problem happens when using GPU: NVIDIA GeForce RTX 3070 Laptop GPU/PCIe/SSE2 and openGL Version 4.6 like this: cubeTextuerImage

but when using GPU: AMD Radeon(TM) Graphics it pretty good Like this cubeTextuerImage

main.cpp

shaderProgram.bind();
const int NUMBER_OF_TEXTURE = textures.size();
auto sample = new int[NUMBER_OF_TEXTURE];
for (int i = 0; i < NUMBER_OF_TEXTURE; i++)
    sample[i] = i;

shaderProgram.setUniform1iv("u_Textuers", NUMBER_OF_TEXTURE, sample);
delete[] sample;

while (!glfwWindowShouldClose(window))
{
    processInput(window);

    Renderer::clear();
    shaderProgram.bind();

    for (int i = 0; i < NUMBER_OF_TEXTURE; i++)
    {
        textures[i].Bind(i);
    }
    render.draw(shaderProgram);
    GLCall(glBindTexture(GL_TEXTURE_2D, 0))


    ImGui::Render();
    ImGui_ImplOpenGL3_RenderDrawData(ImGui::GetDrawData());

    glfwSwapBuffers(window);
    glfwPollEvents();
}

Texture.cpp

Texture::Texture(const FxrDataStructures::Image& image)
    :m_RendererID(0), m_LocalBuffer(nullptr),
    m_Width(0), m_Height(0), m_BPP(0)
{
    GLCall(glCreateTextures(GL_TEXTURE_2D, 1, &m_RendererID));
    GLCall(glBindTexture(GL_TEXTURE_2D, m_RendererID));

    // set the texture wrapping/filtering options (on the currently bound texture object)
    GLCall(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR))
    GLCall(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR))
    GLCall(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE))
    GLCall(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE))

    m_Width = image.getWidth();
    m_Height = image.getHeight();
    m_BPP = image.getBitDepth();

    cv::Mat mat = image.img;
    cv::flip(mat, mat, 0);
    m_LocalBuffer = mat.ptr();
    if (m_LocalBuffer)
    {
        GLCall(glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, m_Width, m_Height, 0, GL_RGBA, GL_UNSIGNED_BYTE, m_LocalBuffer))
        //GLCall(glGenerateMipmap(GL_TEXTURE_2D))
    }
    else
    {
        LOG_DEBUG("Failed to load texture ");
    }
    GLCall(glBindTexture(GL_TEXTURE_2D, 0))

}

void Texture::Bind(unsigned int slot) const
{

    GLCall(glBindTextureUnit(slot, m_RendererID))
}

fragment shader

in float IndexMaterial;

uniform sampler2D u_Textuers[32];
void main()
{
    float diff;
    const int index = int(IndexMaterial);

    FragColor = vec4(texture(u_Textuers[index], TexCoord));
}
1

There are 1 answers

0
user253751 On BEST ANSWER

I'd say this has something to do with the int-to-float conversion. It seems that the NVIDIA GPU adds a little bit of random noise when it interpolates IndexMaterial, while the AMD GPU does not.

Try using flat shading for IndexMaterial. This will cause the GPU to use the value from one vertex, instead of interpolating between vertices:

flat in float IndexMaterial;

See: "flat" qualifier in glsl?

You may also change IndexMaterial from float to int, since it has no good reason to be float.