0

I'm trying to port a Windows program to GNU/Linux, but a compute shader doesn't compile.

I have a few knowledge of GLSL. So I wonder if there are workarounds to get the shader working.

I have written a minimal example that shows the compilation errors.

Here is the compute shader:

#version 430 core
#extension GL_ARB_gpu_shader_int64 : enable
//#extension GL_ARB_bindless_texture : enable

layout(binding = 1) uniform TextureHandles {
    uvec2 texture_handles[512];
};

vec3 SampleTexture(uint texture_index, vec2 uv) {
    uv.y = 1.f - uv.y;
    sampler2D tex_sampler = sampler2D(texture_handles[texture_index]);
    return textureLod(tex_sampler, uv, 0.0f).xyz;
}

void main() {
}

Here are the compilation errors:

0:11(2): error: image/sampler variables may only be declared as function parameters or uniform-qualified global variables
0:11(2): error: opaque variables must be declared uniform
0:11(26): error: cannot initialize tex_sampler variable opaque
0:11(26): error: cannot construct opaque type `sampler2D'
0:12(20): warning: `tex_sampler' used uninitialized

About the construction of opaque type sampler2D, I have read on internet that the GL_ARB_bindless_texture extension needs to be enable, but when I enable it, I have an error saying that it is unsupported in a compute shader.

Here is the minimal program, using GLEW and GLFW, that shows the error:

#define GLFW_INCLUDE_NONE
#include <GLFW/glfw3.h>

#define GLEW_STATIC
#include <GL/glew.h>

#include <stdlib.h>
#include <stdio.h>

/////////////////////////////////////////////////////////
//
// Shader programs for core profile:
//
const char * csprog_core =
"#version 430 core\n\
#extension GL_ARB_gpu_shader_int64 : enable\n\
//#extension GL_ARB_bindless_texture : enable\n\
\n\
layout(binding = 1) uniform TextureHandles {\n\
    uvec2 texture_handles[512];\n\
};\n\
\n\
vec3 SampleTexture(uint texture_index, vec2 uv) {\n\
    uv.y = 1.f - uv.y;\n\
    sampler2D tex_sampler = sampler2D(texture_handles[texture_index]);\n\
    return textureLod(tex_sampler, uv, 0.0f).xyz;\n\
}\n\
\n\
void main() {\n\
}\n\
\n";


int width, height;

void error_callback(int error, const char* description) {
  fprintf(stderr, "Error: %s\n", description);
}

int main( int argc, char *argv[ ], char *envp[ ] ) { 
  if(!glfwInit()) {
    exit(EXIT_FAILURE);
  }
  glfwSetErrorCallback(error_callback);
  
  glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
  glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
  GLFWwindow* window = glfwCreateWindow(640, 480, "Program", NULL, NULL);
  if(!window) {
    exit(EXIT_FAILURE);
  }
  glfwMakeContextCurrent(window);
  glfwSwapInterval(1);
  
  glfwGetFramebufferSize(window, &width, &height);

  // get version info
  const GLubyte* renderer;
  const GLubyte* version;

  ///////////////////////////////////////////////////////////////////////
  //
  // start GLEW extension handler
  //
  glewExperimental = GL_TRUE;
  GLenum err = glewInit();
  if(GLEW_OK != err) {
    fprintf(stderr, "Error: %s\n", glewGetErrorString(err));
    return(-1);
  }
  fprintf(stdout, "Status: Using GLEW %s\n", glewGetString(GLEW_VERSION));
  
  // get version info
  renderer = glGetString(GL_RENDERER); // get renderer string
  version = glGetString(GL_VERSION); // version as a string
  printf("\nRenderer: %s", renderer);
  printf("\nOpenGL version supported %s\n", version);
  fflush(stdout);
  
  // tell GL to only draw onto a pixel if the shape is closer to the viewer
  glEnable(GL_DEPTH_TEST); // enable depth-testing
  glDepthFunc(GL_LESS); // depth-testing interprets a smaller value as "closer"

  //////////////////////////////////////////////////////////
  //
  // Shaders:
  //
  GLint params;
  GLint len;

  GLuint cscore = glCreateShader(GL_COMPUTE_SHADER);
  glShaderSource(cscore, 1, &csprog_core, NULL);
  glCompileShader(cscore);
  glGetShaderiv(cscore,GL_COMPILE_STATUS,&params);
  if(params == GL_FALSE) {
    GLchar log[100000];
    glGetShaderInfoLog(cscore,100000,&len,log);
    printf("\n\n%s\n\n",log);
    exit(EXIT_FAILURE);
  }
  //
  //////////////////////////////////////////////////////////
  
  glfwDestroyWindow(window);
  glfwTerminate();
  
  exit(EXIT_SUCCESS);
}
2
  • Sampler objects are opaque data types and cannot constructed like matrices, vectors or scalar types. I would recommend to read the following articles: Sampler (GLSL) (especially the section Binding textures to samplers), Bindless Texture, Interface Block (GLSL) and/or Compute Shader. Commented Oct 19, 2023 at 6:51
  • 1
    An alternative and simple solution would be the use of Array Texture. Commented Oct 19, 2023 at 6:52

1 Answer 1

0

Following Erdal Küçük comment, I have implemented a texture arrays. Setting its width and height to the value of the largest texture.

I have to set some uniforms, in order to compute in the shader the values u and v for each texture in function of their width and height. But the scale factor of u and v values could be precomputed on CPU.

Here is what the compute shader looks like:

#version 430 core
#extension GL_ARB_gpu_shader_int64 : enable

uniform sampler2DArray Textures;
uniform uint maxwidth;
uniform uint maxheight;
uniform uint texwidths[MAX_TEXTURES];
uniform uint texheights[MAX_TEXTURES];

vec3 SampleTexture(uint texture_index, vec2 uv) {
    uv.y = 1.f - uv.y;
    uv.x = uv.x * float(texwidths[texture_index]) / maxwidth;
    uv.y = uv.y * float(texheights[texture_index]) / maxheight;
    return textureLod(Textures, vec3(uv, texture_index), 0.0f).xyz;
}

void main() {
}

The maximum number of layers of an array, that the hardware support, can be known with the following call:

glGetIntegerv(GL_MAX_ARRAY_TEXTURE_LAYERS, &value);

To upload the texture to the GPU, I'm using this code, setting up a texture array of the maximum width and height values found in the textures:

GLuint texture_array = 0;
glGenTextures(1,&texture_array);
glBindTexture(GL_TEXTURE_2D_ARRAY,texture_array);
int maxwidth = 0;
int maxheight = 0;
for(auto i = 0; i < textures.size(); ++i) {
  if(maxwidth < textures[i].width) maxwidth = textures[i].width;
  if(maxheight < textures[i].height) maxheight = textures[i].height;
}
glTexStorage3D(GL_TEXTURE_2D_ARRAY, 1, GL_RGBA8, maxwidth, maxheight, textures.size());
    
std::vector<u_int32_t> clear_data(maxwidth * maxheight, 0);

for (auto i = 0; i < textures.size(); ++i) {
  auto & tex = textures[i];
      
  // Set to zero the texture at layer i:
  glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0, 0, 0, i, maxwidth, maxheight, 1, GL_RGBA, GL_UNSIGNED_BYTE, &clear_data[0]);
      
  // copy the texture to GPU at layer i:
  glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0, 0, 0, i, tex.width, tex.height, 1, GL_RGBA, GL_UNSIGNED_BYTE, &texture_data[textures[i].data_start]);
  
  texwidths_[i] = tex.width;
  texheights_[i] = tex.height;
}
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);

glBindTexture(GL_TEXTURE_2D_ARRAY, NULL);

Before to launch the shader computation, I just have to bind the texture array, and set the uniforms values about the width and the height of each texture.

Sign up to request clarification or add additional context in comments.

1 Comment

To clear a texture image (OpenGL 4.4+), one would use glClearTexImage or glClearTexSubImage. So, no extra data to allocate nor multiple uploads necessary.

Your Answer

By clicking “Post Your Answer”, you agree to our terms of service and acknowledge you have read our privacy policy.

Start asking to get answers

Find the answer to your question by asking.

Ask question

Explore related questions

See similar questions with these tags.