Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/KhronosGroup/SPIRV-Cross.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHans-Kristian Arntzen <post@arntzen-software.no>2020-06-04 12:35:21 +0300
committerHans-Kristian Arntzen <post@arntzen-software.no>2020-06-04 13:33:56 +0300
commit2d5200650a39a159efa50bdb98d557344165f257 (patch)
treee957e81a5bbd6e650fd907ecca66c776f0f182d8 /spirv_hlsl.cpp
parentd385bf096f5dabbc4cdaeb6872b0f64be1a63ad0 (diff)
HLSL: Add native support for 16-bit types.
Adds support for templated load/store in SM 6.2 to deal with small types.
Diffstat (limited to 'spirv_hlsl.cpp')
-rw-r--r--spirv_hlsl.cpp176
1 files changed, 144 insertions, 32 deletions
diff --git a/spirv_hlsl.cpp b/spirv_hlsl.cpp
index ce94d4ef..3495564d 100644
--- a/spirv_hlsl.cpp
+++ b/spirv_hlsl.cpp
@@ -430,7 +430,20 @@ string CompilerHLSL::type_to_glsl(const SPIRType &type, uint32_t id)
case SPIRType::AtomicCounter:
return "atomic_uint";
case SPIRType::Half:
- return "min16float";
+ if (hlsl_options.enable_16bit_types)
+ return "half";
+ else
+ return "min16float";
+ case SPIRType::Short:
+ if (hlsl_options.enable_16bit_types)
+ return "int16_t";
+ else
+ return "min16int";
+ case SPIRType::UShort:
+ if (hlsl_options.enable_16bit_types)
+ return "uint16_t";
+ else
+ return "min16uint";
case SPIRType::Float:
return "float";
case SPIRType::Double:
@@ -458,7 +471,11 @@ string CompilerHLSL::type_to_glsl(const SPIRType &type, uint32_t id)
case SPIRType::UInt:
return join("uint", type.vecsize);
case SPIRType::Half:
- return join("min16float", type.vecsize);
+ return join(hlsl_options.enable_16bit_types ? "half" : "min16float", type.vecsize);
+ case SPIRType::Short:
+ return join(hlsl_options.enable_16bit_types ? "int16_t" : "min16int", type.vecsize);
+ case SPIRType::UShort:
+ return join(hlsl_options.enable_16bit_types ? "uint16_t" : "min16uint", type.vecsize);
case SPIRType::Float:
return join("float", type.vecsize);
case SPIRType::Double:
@@ -482,7 +499,11 @@ string CompilerHLSL::type_to_glsl(const SPIRType &type, uint32_t id)
case SPIRType::UInt:
return join("uint", type.columns, "x", type.vecsize);
case SPIRType::Half:
- return join("min16float", type.columns, "x", type.vecsize);
+ return join(hlsl_options.enable_16bit_types ? "half" : "min16float", type.columns, "x", type.vecsize);
+ case SPIRType::Short:
+ return join(hlsl_options.enable_16bit_types ? "int16_t" : "min16int", type.columns, "x", type.vecsize);
+ case SPIRType::UShort:
+ return join(hlsl_options.enable_16bit_types ? "uint16_t" : "min16uint", type.columns, "x", type.vecsize);
case SPIRType::Float:
return join("float", type.columns, "x", type.vecsize);
case SPIRType::Double:
@@ -3647,11 +3668,16 @@ void CompilerHLSL::read_access_chain(string *expr, const string &lhs, const SPIR
read_access_chain_struct(lhs, chain);
return;
}
- else if (type.width != 32)
- SPIRV_CROSS_THROW("Reading types other than 32-bit from ByteAddressBuffer not yet supported.");
+ else if (type.width != 32 && !hlsl_options.enable_16bit_types)
+ SPIRV_CROSS_THROW("Reading types other than 32-bit from ByteAddressBuffer not yet supported, unless SM 6.2 and native 16-bit types are enabled.");
+ bool templated_load = hlsl_options.shader_model >= 62;
string load_expr;
+ string template_expr;
+ if (templated_load)
+ template_expr = join("<", type_to_glsl(type), ">");
+
// Load a vector or scalar.
if (type.columns == 1 && !chain.row_major_matrix)
{
@@ -3674,12 +3700,24 @@ void CompilerHLSL::read_access_chain(string *expr, const string &lhs, const SPIR
SPIRV_CROSS_THROW("Unknown vector size.");
}
- load_expr = join(chain.base, ".", load_op, "(", chain.dynamic_index, chain.static_index, ")");
+ if (templated_load)
+ load_op = "Load";
+
+ load_expr = join(chain.base, ".", load_op, template_expr, "(", chain.dynamic_index, chain.static_index, ")");
}
else if (type.columns == 1)
{
// Strided load since we are loading a column from a row-major matrix.
- if (type.vecsize > 1)
+ if (templated_load)
+ {
+ auto scalar_type = type;
+ scalar_type.vecsize = 1;
+ scalar_type.columns = 1;
+ template_expr = join("<", type_to_glsl(scalar_type), ">");
+ if (type.vecsize > 1)
+ load_expr += type_to_glsl(type) + "(";
+ }
+ else if (type.vecsize > 1)
{
load_expr = type_to_glsl(target_type);
load_expr += "(";
@@ -3688,7 +3726,7 @@ void CompilerHLSL::read_access_chain(string *expr, const string &lhs, const SPIR
for (uint32_t r = 0; r < type.vecsize; r++)
{
load_expr +=
- join(chain.base, ".Load(", chain.dynamic_index, chain.static_index + r * chain.matrix_stride, ")");
+ join(chain.base, ".Load", template_expr, "(", chain.dynamic_index, chain.static_index + r * chain.matrix_stride, ")");
if (r + 1 < type.vecsize)
load_expr += ", ";
}
@@ -3718,13 +3756,25 @@ void CompilerHLSL::read_access_chain(string *expr, const string &lhs, const SPIR
SPIRV_CROSS_THROW("Unknown vector size.");
}
- // Note, this loading style in HLSL is *actually* row-major, but we always treat matrices as transposed in this backend,
- // so row-major is technically column-major ...
- load_expr = type_to_glsl(target_type);
+ if (templated_load)
+ {
+ auto vector_type = type;
+ vector_type.columns = 1;
+ template_expr = join("<", type_to_glsl(vector_type), ">");
+ load_expr = type_to_glsl(type);
+ load_op = "Load";
+ }
+ else
+ {
+ // Note, this loading style in HLSL is *actually* row-major, but we always treat matrices as transposed in this backend,
+ // so row-major is technically column-major ...
+ load_expr = type_to_glsl(target_type);
+ }
load_expr += "(";
+
for (uint32_t c = 0; c < type.columns; c++)
{
- load_expr += join(chain.base, ".", load_op, "(", chain.dynamic_index,
+ load_expr += join(chain.base, ".", load_op, template_expr, "(", chain.dynamic_index,
chain.static_index + c * chain.matrix_stride, ")");
if (c + 1 < type.columns)
load_expr += ", ";
@@ -3736,13 +3786,24 @@ void CompilerHLSL::read_access_chain(string *expr, const string &lhs, const SPIR
// Pick out elements one by one ... Hopefully compilers are smart enough to recognize this pattern
// considering HLSL is "row-major decl", but "column-major" memory layout (basically implicit transpose model, ugh) ...
- load_expr = type_to_glsl(target_type);
+ if (templated_load)
+ {
+ load_expr = type_to_glsl(type);
+ auto scalar_type = type;
+ scalar_type.vecsize = 1;
+ scalar_type.columns = 1;
+ template_expr = join("<", type_to_glsl(scalar_type), ">");
+ }
+ else
+ load_expr = type_to_glsl(target_type);
+
load_expr += "(";
+
for (uint32_t c = 0; c < type.columns; c++)
{
for (uint32_t r = 0; r < type.vecsize; r++)
{
- load_expr += join(chain.base, ".Load(", chain.dynamic_index,
+ load_expr += join(chain.base, ".Load", template_expr, "(", chain.dynamic_index,
chain.static_index + c * (type.width / 8) + r * chain.matrix_stride, ")");
if ((r + 1 < type.vecsize) || (c + 1 < type.columns))
@@ -3752,9 +3813,12 @@ void CompilerHLSL::read_access_chain(string *expr, const string &lhs, const SPIR
load_expr += ")";
}
- auto bitcast_op = bitcast_glsl_op(type, target_type);
- if (!bitcast_op.empty())
- load_expr = join(bitcast_op, "(", load_expr, ")");
+ if (!templated_load)
+ {
+ auto bitcast_op = bitcast_glsl_op(type, target_type);
+ if (!bitcast_op.empty())
+ load_expr = join(bitcast_op, "(", load_expr, ")");
+ }
if (lhs.empty())
{
@@ -3937,8 +4001,14 @@ void CompilerHLSL::write_access_chain(const SPIRAccessChain &chain, uint32_t val
register_write(chain.self);
return;
}
- else if (type.width != 32)
- SPIRV_CROSS_THROW("Writing types other than 32-bit to RWByteAddressBuffer not yet supported.");
+ else if (type.width != 32 && !hlsl_options.enable_16bit_types)
+ SPIRV_CROSS_THROW("Writing types other than 32-bit to RWByteAddressBuffer not yet supported, unless SM 6.2 and native 16-bit types are enabled.");
+
+ bool templated_store = hlsl_options.shader_model >= 62;
+
+ string template_expr;
+ if (templated_store)
+ template_expr = join("<", type_to_glsl(type), ">");
if (type.columns == 1 && !chain.row_major_matrix)
{
@@ -3962,13 +4032,27 @@ void CompilerHLSL::write_access_chain(const SPIRAccessChain &chain, uint32_t val
}
auto store_expr = write_access_chain_value(value, composite_chain, false);
- auto bitcast_op = bitcast_glsl_op(target_type, type);
- if (!bitcast_op.empty())
- store_expr = join(bitcast_op, "(", store_expr, ")");
- statement(chain.base, ".", store_op, "(", chain.dynamic_index, chain.static_index, ", ", store_expr, ");");
+
+ if (!templated_store)
+ {
+ auto bitcast_op = bitcast_glsl_op(target_type, type);
+ if (!bitcast_op.empty())
+ store_expr = join(bitcast_op, "(", store_expr, ")");
+ }
+ else
+ store_op = "Store";
+ statement(chain.base, ".", store_op, template_expr, "(", chain.dynamic_index, chain.static_index, ", ", store_expr, ");");
}
else if (type.columns == 1)
{
+ if (templated_store)
+ {
+ auto scalar_type = type;
+ scalar_type.vecsize = 1;
+ scalar_type.columns = 1;
+ template_expr = join("<", type_to_glsl(scalar_type), ">");
+ }
+
// Strided store.
for (uint32_t r = 0; r < type.vecsize; r++)
{
@@ -3980,10 +4064,14 @@ void CompilerHLSL::write_access_chain(const SPIRAccessChain &chain, uint32_t val
}
remove_duplicate_swizzle(store_expr);
- auto bitcast_op = bitcast_glsl_op(target_type, type);
- if (!bitcast_op.empty())
- store_expr = join(bitcast_op, "(", store_expr, ")");
- statement(chain.base, ".Store(", chain.dynamic_index, chain.static_index + chain.matrix_stride * r, ", ",
+ if (!templated_store)
+ {
+ auto bitcast_op = bitcast_glsl_op(target_type, type);
+ if (!bitcast_op.empty())
+ store_expr = join(bitcast_op, "(", store_expr, ")");
+ }
+
+ statement(chain.base, ".Store", template_expr, "(", chain.dynamic_index, chain.static_index + chain.matrix_stride * r, ", ",
store_expr, ");");
}
}
@@ -4008,18 +4096,39 @@ void CompilerHLSL::write_access_chain(const SPIRAccessChain &chain, uint32_t val
SPIRV_CROSS_THROW("Unknown vector size.");
}
+ if (templated_store)
+ {
+ store_op = "Store";
+ auto vector_type = type;
+ vector_type.columns = 1;
+ template_expr = join("<", type_to_glsl(vector_type), ">");
+ }
+
for (uint32_t c = 0; c < type.columns; c++)
{
auto store_expr = join(write_access_chain_value(value, composite_chain, true), "[", c, "]");
- auto bitcast_op = bitcast_glsl_op(target_type, type);
- if (!bitcast_op.empty())
- store_expr = join(bitcast_op, "(", store_expr, ")");
- statement(chain.base, ".", store_op, "(", chain.dynamic_index, chain.static_index + c * chain.matrix_stride,
+
+ if (!templated_store)
+ {
+ auto bitcast_op = bitcast_glsl_op(target_type, type);
+ if (!bitcast_op.empty())
+ store_expr = join(bitcast_op, "(", store_expr, ")");
+ }
+
+ statement(chain.base, ".", store_op, template_expr, "(", chain.dynamic_index, chain.static_index + c * chain.matrix_stride,
", ", store_expr, ");");
}
}
else
{
+ if (templated_store)
+ {
+ auto scalar_type = type;
+ scalar_type.vecsize = 1;
+ scalar_type.columns = 1;
+ template_expr = join("<", type_to_glsl(scalar_type), ">");
+ }
+
for (uint32_t r = 0; r < type.vecsize; r++)
{
for (uint32_t c = 0; c < type.columns; c++)
@@ -4030,7 +4139,7 @@ void CompilerHLSL::write_access_chain(const SPIRAccessChain &chain, uint32_t val
auto bitcast_op = bitcast_glsl_op(target_type, type);
if (!bitcast_op.empty())
store_expr = join(bitcast_op, "(", store_expr, ")");
- statement(chain.base, ".Store(", chain.dynamic_index,
+ statement(chain.base, ".Store", template_expr, "(", chain.dynamic_index,
chain.static_index + c * (type.width / 8) + r * chain.matrix_stride, ", ", store_expr, ");");
}
}
@@ -5423,6 +5532,9 @@ void CompilerHLSL::validate_shader_model()
if (ir.addressing_model != AddressingModelLogical)
SPIRV_CROSS_THROW("Only Logical addressing model can be used with HLSL.");
+
+ if (hlsl_options.enable_16bit_types && hlsl_options.shader_model < 62)
+ SPIRV_CROSS_THROW("Need at least shader model 6.2 when enabling native 16-bit type support.");
}
string CompilerHLSL::compile()