Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/HansKristian-Work/vkd3d-proton.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHans-Kristian Arntzen <post@arntzen-software.no>2022-03-21 14:06:47 +0300
committerHans-Kristian Arntzen <post@arntzen-software.no>2022-09-14 16:48:23 +0300
commita5d7951ab9cd7b9a91af44d9b29eca640e6800a5 (patch)
treed1ff1e5c1474bf2aec32d1f3f8fc7727a193331d
parent72a86806ef72bc0f5bd3a0c648ba87be329fb2c1 (diff)
vkd3d: Refactor stages of obtaining SPIR-V modules.
- Try to load SPIR-V from cache - Fallback compile to SPIR-V if necessary - Parse PSO metadata obtained from either compilation or cache lookup Also moves SPIR-V compilation to end of PSO init. Prepares for refactor where we completely decouple PSO creation info setup and SPIR-V compilation. Signed-off-by: Hans-Kristian Arntzen <post@arntzen-software.no>
-rw-r--r--libs/vkd3d/state.c138
1 files changed, 87 insertions, 51 deletions
diff --git a/libs/vkd3d/state.c b/libs/vkd3d/state.c
index a02fc421..667e20b1 100644
--- a/libs/vkd3d/state.c
+++ b/libs/vkd3d/state.c
@@ -2347,6 +2347,12 @@ static HRESULT vkd3d_create_shader_stage(struct d3d12_pipeline_state *state, str
return hresult_from_vkd3d_result(ret);
}
TRACE("Called vkd3d_shader_compile_dxbc.\n");
+
+ if (stage == VK_SHADER_STAGE_FRAGMENT_BIT)
+ {
+ /* At this point we don't need the map anymore. */
+ vkd3d_shader_stage_io_map_free(&state->graphics.cached_desc.stage_io_map_ms_ps);
+ }
}
/* Debug compare SPIR-V we got from cache, and SPIR-V we got from compilation. */
@@ -3198,6 +3204,78 @@ static HRESULT d3d12_pipeline_state_validate_blend_state(struct d3d12_pipeline_s
return S_OK;
}
+static void d3d12_pipeline_state_graphics_load_spirv_from_cached_state(
+ struct d3d12_pipeline_state *state, struct d3d12_device *device,
+ const struct d3d12_pipeline_state_desc *desc,
+ const struct d3d12_cached_pipeline_state *cached_pso)
+{
+ struct d3d12_graphics_pipeline_state *graphics = &state->graphics;
+ unsigned int i, j;
+
+ /* We only accept SPIR-V from cache if we can successfully load all shaders.
+ * We cannot partially fall back since we cannot handle any situation where we need inter-stage code-gen fixups.
+ * In this situation, just generate full SPIR-V from scratch.
+ * This really shouldn't happen unless we have corrupt cache entries. */
+ for (i = 0; i < graphics->stage_count; i++)
+ {
+ if (FAILED(vkd3d_load_spirv_from_cached_state(device, cached_pso,
+ graphics->cached_desc.bytecode_stages[i], &graphics->code[i])))
+ {
+ for (j = 0; j < i; j++)
+ {
+ if (vkd3d_config_flags & VKD3D_CONFIG_FLAG_PIPELINE_LIBRARY_LOG)
+ INFO("Discarding cached SPIR-V for stage #%x.\n", graphics->cached_desc.bytecode_stages[i]);
+ vkd3d_shader_free_shader_code(&graphics->code[j]);
+ memset(&graphics->code[j], 0, sizeof(graphics->code[j]));
+ }
+ break;
+ }
+ }
+}
+
+static HRESULT d3d12_pipeline_state_graphics_create_shader_stages(
+ struct d3d12_pipeline_state *state, struct d3d12_device *device,
+ const struct d3d12_pipeline_state_desc *desc)
+{
+ struct d3d12_graphics_pipeline_state *graphics = &state->graphics;
+ unsigned int i;
+ HRESULT hr;
+
+ /* Now create the actual shader modules. If we managed to load SPIR-V from cache, use that directly. */
+ for (i = 0; i < graphics->stage_count; i++)
+ {
+ if (FAILED(hr = vkd3d_create_shader_stage(state, device,
+ &graphics->stages[i],
+ graphics->cached_desc.bytecode_stages[i], NULL,
+ &graphics->cached_desc.bytecode[i], &graphics->code[i])))
+ return hr;
+ }
+
+ return S_OK;
+}
+
+static void d3d12_pipeline_state_graphics_handle_meta(struct d3d12_pipeline_state *state,
+ struct d3d12_device *device)
+{
+ struct d3d12_graphics_pipeline_state *graphics = &state->graphics;
+ unsigned int i;
+
+ for (i = 0; i < graphics->stage_count; i++)
+ {
+ if (graphics->cached_desc.bytecode_stages[i] == VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
+ graphics->patch_vertex_count = graphics->code[i].meta.patch_vertex_count;
+
+ if ((graphics->code[i].meta.flags & VKD3D_SHADER_META_FLAG_REPLACED) &&
+ device->debug_ring.active)
+ {
+ vkd3d_shader_debug_ring_init_spec_constant(device,
+ &graphics->spec_info[i],
+ graphics->code[i].meta.hash);
+ graphics->stages[i].pSpecializationInfo = &graphics->spec_info[i].spec_info;
+ }
+ }
+}
+
static HRESULT d3d12_pipeline_state_init_graphics(struct d3d12_pipeline_state *state,
struct d3d12_device *device, const struct d3d12_pipeline_state_desc *desc,
const struct d3d12_cached_pipeline_state *cached_pso)
@@ -3507,59 +3585,9 @@ static HRESULT d3d12_pipeline_state_init_graphics(struct d3d12_pipeline_state *s
++graphics->stage_count;
}
- /* We only accept SPIR-V from cache if we can successfully load all shaders.
- * We cannot partially fall back since we cannot handle any situation where we need inter-stage code-gen fixups.
- * In this situation, just generate full SPIR-V from scratch.
- * This really shouldn't happen unless we have corrupt cache entries. */
- for (i = 0; i < graphics->stage_count; i++)
- {
- if (FAILED(vkd3d_load_spirv_from_cached_state(device, cached_pso,
- graphics->cached_desc.bytecode_stages[i], &graphics->code[i])))
- {
- for (j = 0; j < i; j++)
- {
- if (vkd3d_config_flags & VKD3D_CONFIG_FLAG_PIPELINE_LIBRARY_LOG)
- INFO("Discarding cached SPIR-V for stage #%x.\n", graphics->cached_desc.bytecode_stages[j]);
- vkd3d_shader_free_shader_code(&graphics->code[j]);
- memset(&graphics->code[j], 0, sizeof(graphics->code[j]));
- }
- break;
- }
- }
-
- /* Now create the actual shader modules. If we managed to load SPIR-V from cache, use that directly.
- * Make sure we don't reset graphics->stage_count since that is a potential memory leak if
- * we fail to create shader module for whatever reason. */
- for (i = 0; i < graphics->stage_count; i++)
- {
- if (FAILED(hr = vkd3d_create_shader_stage(state, device,
- &graphics->stages[i],
- graphics->cached_desc.bytecode_stages[i], NULL,
- &graphics->cached_desc.bytecode[i], &graphics->code[i])))
- goto fail;
-
- if (graphics->cached_desc.bytecode_stages[i] == VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
- {
- graphics->patch_vertex_count = graphics->code[i].meta.patch_vertex_count;
- }
- else if (graphics->cached_desc.bytecode_stages[i] == VK_SHADER_STAGE_FRAGMENT_BIT)
- {
- /* We have consumed the MS/PS map at this point. */
- vkd3d_shader_stage_io_map_free(&state->graphics.cached_desc.stage_io_map_ms_ps);
- }
-
- if ((graphics->code[i].meta.flags & VKD3D_SHADER_META_FLAG_REPLACED) &&
- device->debug_ring.active)
- {
- vkd3d_shader_debug_ring_init_spec_constant(device,
- &graphics->spec_info[i],
- graphics->code[i].meta.hash);
- graphics->stages[i].pSpecializationInfo = &graphics->spec_info[i].spec_info;
- }
- }
-
graphics->attribute_count = (graphics->stage_flags & VK_PIPELINE_STAGE_MESH_SHADER_BIT_EXT)
? 0 : desc->input_layout.NumElements;
+
if (graphics->attribute_count > ARRAY_SIZE(graphics->attributes))
{
FIXME("InputLayout.NumElements %zu > %zu, ignoring extra elements.\n",
@@ -3756,6 +3784,14 @@ static HRESULT d3d12_pipeline_state_init_graphics(struct d3d12_pipeline_state *s
goto fail;
}
+ d3d12_pipeline_state_graphics_load_spirv_from_cached_state(state, device, desc, cached_pso);
+ if (FAILED(hr = d3d12_pipeline_state_graphics_create_shader_stages(state, device, desc)))
+ goto fail;
+
+ /* At this point, we will have valid meta structures set up.
+ * Deduce further PSO information from these structs. */
+ d3d12_pipeline_state_graphics_handle_meta(state, device);
+
if (graphics->stage_flags & VK_SHADER_STAGE_MESH_BIT_EXT)
{
can_compile_pipeline_early = true;