diff options
author | mano-wii <germano.costa@ig.com.br> | 2019-06-05 19:06:11 +0300 |
---|---|---|
committer | mano-wii <germano.costa@ig.com.br> | 2019-06-05 19:50:58 +0300 |
commit | ce66b22c427defa3db498d2d69ee615b3c913c5f (patch) | |
tree | 5b0b947f7c0cd22d100af33c23d3ad60eee2133c /source/blender/gpu/intern/gpu_shader.c | |
parent | dd81efa4a343ecbb7a208e06ea854ce673e20504 (diff) |
Fix crash when editing shaders on Intel HD 4000.
In the Intel HD 4000 driver a shader has to be deleted in the same context in which it is created.
However, because you can't use a rendering context on different threads, to maintain the multithreaded compilation, the solution was to use the `GL_ARB_get_program_binary` and copy the binary generated for the shader and generate a shader on the main context using that binary.
This solution is limited only to Intel HD 4000 and windows.
Reviewers: fclem
Reviewed By: fclem
Differential Revision: https://developer.blender.org/D5019
Diffstat (limited to 'source/blender/gpu/intern/gpu_shader.c')
-rw-r--r-- | source/blender/gpu/intern/gpu_shader.c | 47 |
1 files changed, 47 insertions, 0 deletions
diff --git a/source/blender/gpu/intern/gpu_shader.c b/source/blender/gpu/intern/gpu_shader.c index a7eb6726364..047cabda9f8 100644 --- a/source/blender/gpu/intern/gpu_shader.c +++ b/source/blender/gpu/intern/gpu_shader.c @@ -292,6 +292,36 @@ GPUShader *GPU_shader_create(const char *vertexcode, vertexcode, fragcode, geocode, libcode, defines, GPU_SHADER_TFB_NONE, NULL, 0, shname); } +GPUShader *GPU_shader_load_from_binary(const char *binary, + const int binary_format, + const int binary_len, + const char *shname) +{ + BLI_assert(GL_ARB_get_program_binary); + int success; + int program = glCreateProgram(); + + glProgramBinary(program, binary_format, binary, binary_len); + glGetProgramiv(program, GL_LINK_STATUS, &success); + + if (success) { + GPUShader *shader = MEM_callocN(sizeof(*shader), __func__); + shader->interface = GPU_shaderinterface_create(program); + shader->program = program; + +#ifndef NDEBUG + BLI_snprintf(shader->name, sizeof(shader->name), "%s_%u", shname, g_shaderid++); +#else + UNUSED_VARS(shname); +#endif + + return shader; + } + + glDeleteProgram(program); + return NULL; +} + #define DEBUG_SHADER_NONE "" #define DEBUG_SHADER_VERTEX "vert" #define DEBUG_SHADER_FRAGMENT "frag" @@ -815,6 +845,23 @@ int GPU_shader_get_attribute(GPUShader *shader, const char *name) return attr ? attr->location : -1; } +char *GPU_shader_get_binary(GPUShader *shader, int *r_binary_format, int *r_binary_len) +{ + BLI_assert(GLEW_ARB_get_program_binary); + char *r_binary; + int binary_len = 0; + + glGetProgramiv(shader->program, GL_PROGRAM_BINARY_LENGTH, &binary_len); + r_binary = MEM_mallocN(binary_len, __func__); + glGetProgramBinary(shader->program, binary_len, NULL, r_binary_format, r_binary); + + if (r_binary_len) { + *r_binary_len = binary_len; + } + + return r_binary; +} + static const GPUShaderStages builtin_shader_stages[GPU_SHADER_BUILTIN_LEN] = { [GPU_SHADER_TEXT] = { |