Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/KhronosGroup/SPIRV-Cross.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHans-Kristian Arntzen <post@arntzen-software.no>2020-09-14 11:42:31 +0300
committerHans-Kristian Arntzen <post@arntzen-software.no>2020-09-14 12:45:59 +0300
commit66afe8c4991f23f791b07339262f085d0a41a55d (patch)
treeb74e73ead65df0386f1be0c6b334bf3ca62be03e /spirv_hlsl.cpp
parentbdbef7b1f3982fe99a62d076043036abe6dd6d80 (diff)
Implement a simple evaluator of specialization constants.
In some cases, we need to get a literal value from a spec constant op. Mostly relevant when emitting buffers, so implement a 32-bit integer scalar subset of the evaluator. Can be extended as needed to support evaluating any specialization constant operation.
Diffstat (limited to 'spirv_hlsl.cpp')
-rw-r--r--spirv_hlsl.cpp20
1 files changed, 10 insertions, 10 deletions
diff --git a/spirv_hlsl.cpp b/spirv_hlsl.cpp
index 50eb5aea..070a83e5 100644
--- a/spirv_hlsl.cpp
+++ b/spirv_hlsl.cpp
@@ -790,7 +790,7 @@ uint32_t CompilerHLSL::type_to_consumed_locations(const SPIRType &type) const
if (type.array_size_literal[i])
array_multiplier *= type.array[i];
else
- array_multiplier *= get<SPIRConstant>(type.array[i]).scalar();
+ array_multiplier *= evaluate_constant_u32(type.array[i]);
}
elements += array_multiplier * type.columns;
}
@@ -2860,7 +2860,7 @@ void CompilerHLSL::emit_texture_op(const Instruction &i, bool sparse)
}
else if (gather)
{
- uint32_t comp_num = get<SPIRConstant>(comp).scalar();
+ uint32_t comp_num = evaluate_constant_u32(comp);
if (hlsl_options.shader_model >= 50)
{
switch (comp_num)
@@ -4454,7 +4454,7 @@ void CompilerHLSL::emit_subgroup_op(const Instruction &i)
uint32_t result_type = ops[0];
uint32_t id = ops[1];
- auto scope = static_cast<Scope>(get<SPIRConstant>(ops[2]).scalar());
+ auto scope = static_cast<Scope>(evaluate_constant_u32(ops[2]));
if (scope != ScopeSubgroup)
SPIRV_CROSS_THROW("Only subgroup scope is supported.");
@@ -4611,7 +4611,7 @@ case OpGroupNonUniform##op: \
case OpGroupNonUniformQuadSwap:
{
- uint32_t direction = get<SPIRConstant>(ops[4]).scalar();
+ uint32_t direction = evaluate_constant_u32(ops[4]);
if (direction == 0)
emit_unary_func_op(result_type, id, ops[3], "QuadReadAcrossX");
else if (direction == 1)
@@ -5269,13 +5269,13 @@ void CompilerHLSL::emit_instruction(const Instruction &instruction)
if (opcode == OpMemoryBarrier)
{
- memory = get<SPIRConstant>(ops[0]).scalar();
- semantics = get<SPIRConstant>(ops[1]).scalar();
+ memory = evaluate_constant_u32(ops[0]);
+ semantics = evaluate_constant_u32(ops[1]);
}
else
{
- memory = get<SPIRConstant>(ops[1]).scalar();
- semantics = get<SPIRConstant>(ops[2]).scalar();
+ memory = evaluate_constant_u32(ops[1]);
+ semantics = evaluate_constant_u32(ops[2]);
}
if (memory == ScopeSubgroup)
@@ -5295,8 +5295,8 @@ void CompilerHLSL::emit_instruction(const Instruction &instruction)
if (next && next->op == OpControlBarrier)
{
auto *next_ops = stream(*next);
- uint32_t next_memory = get<SPIRConstant>(next_ops[1]).scalar();
- uint32_t next_semantics = get<SPIRConstant>(next_ops[2]).scalar();
+ uint32_t next_memory = evaluate_constant_u32(next_ops[1]);
+ uint32_t next_semantics = evaluate_constant_u32(next_ops[2]);
next_semantics = mask_relevant_memory_semantics(next_semantics);
// There is no "just execution barrier" in HLSL.