Updated spirv-cross.

This commit is contained in:
Бранимир Караџић 2020-07-03 18:29:29 -07:00
parent 4996129fd0
commit a2e11d9c1c
12 changed files with 239 additions and 83 deletions

View File

@ -617,6 +617,7 @@ static void print_version()
static void print_help_backend()
{
// clang-format off
fprintf(stderr, "\nSelect backend:\n"
"\tBy default, OpenGL-style GLSL is the target, with #version and GLSL/ESSL information inherited from the SPIR-V module if present.\n"
"\t[--vulkan-semantics] or [-V]:\n\t\tEmit Vulkan GLSL instead of plain GLSL. Makes use of Vulkan-only features to match SPIR-V.\n"
@ -625,10 +626,12 @@ static void print_help_backend()
"\t[--reflect]:\n\t\tEmit JSON reflection.\n"
"\t[--cpp]:\n\t\tDEPRECATED. Emits C++ code.\n"
);
// clang-format on
}
static void print_help_glsl()
{
// clang-format off
fprintf(stderr, "\nGLSL options:\n"
"\t[--es]:\n\t\tForce ESSL.\n"
"\t[--no-es]:\n\t\tForce desktop GLSL.\n"
@ -669,10 +672,12 @@ static void print_help_glsl()
"\t[--remap-variable-type <variable_name> <new_variable_type>]:\n\t\tRemaps a variable type based on name.\n"
"\t\tPrimary use case is supporting external samplers in ESSL for video rendering on Android where you could remap a texture to a YUV one.\n"
);
// clang-format on
}
static void print_help_hlsl()
{
// clang-format off
fprintf(stderr, "\nHLSL options:\n"
"\t[--shader-model]:\n\t\tEnables a specific shader model, e.g. --shader-model 50 for SM 5.0.\n"
"\t[--hlsl-enable-compat]:\n\t\tAllow point size and point coord to be used, even if they won't work as expected.\n"
@ -691,10 +696,12 @@ static void print_help_hlsl()
"\t\tOtherwise, TEXCOORD# is used as semantics, where # is location.\n"
"\t[--hlsl-enable-16bit-types]:\n\t\tEnables native use of half/int16_t/uint16_t and ByteAddressBuffer interaction with these types. Requires SM 6.2.\n"
);
// clang-format on
}
static void print_help_msl()
{
// clang-format off
fprintf(stderr, "\nMSL options:\n"
"\t[--msl-version <MMmmpp>]:\n\t\tUses a specific MSL version, e.g. --msl-version 20100 for MSL 2.1.\n"
"\t[--msl-capture-output]:\n\t\tWrites geometry varyings to a buffer instead of as stage-outputs.\n"
@ -743,10 +750,12 @@ static void print_help_msl()
"\t\t<format> can be 'u16', 'u8', or 'other', to indicate a 16-bit unsigned integer, 8-bit unsigned integer, "
"or other-typed variable. <size> is the vector length of the variable, which must be greater than or equal to that declared in the shader.\n"
"\t\tUseful if shader stage interfaces don't match up, as pipeline creation might otherwise fail.\n");
// clang-format on
}
static void print_help_common()
{
// clang-format off
fprintf(stderr, "\nCommon options:\n"
"\t[--entry name]:\n\t\tUse a specific entry point. By default, the first entry point in the module is used.\n"
"\t[--stage <stage (vert, frag, geom, tesc, tese comp)>]:\n\t\tForces use of a certain shader stage.\n"
@ -763,10 +772,12 @@ static void print_help_common()
"\t\tHLSL/MSL: Rewrites [-w, w] Z range (GL) to D3D/Metal/Vulkan-style [0, w].\n"
"\t[--flip-vert-y]:\n\t\tInverts gl_Position.y (or equivalent) at the end of a vertex shader. This is equivalent to using negative viewport height.\n"
);
// clang-format on
}
static void print_help_obscure()
{
// clang-format off
fprintf(stderr, "\nObscure options:\n"
"\tThese options are not meant to be used on a regular basis. They have some occasional uses in the test suite.\n"
@ -778,12 +789,14 @@ static void print_help_obscure()
"\t[--flatten-multidimensional-arrays]:\n\t\tDo not support multi-dimensional arrays and flatten them to one dimension.\n"
"\t[--cpp-interface-name <name>]:\n\t\tEmit a specific class name in C++ codegen.\n"
);
// clang-format on
}
static void print_help()
{
print_version();
// clang-format off
fprintf(stderr, "Usage: spirv-cross <...>\n"
"\nBasic:\n"
"\t[SPIR-V file]\n"
@ -791,6 +804,7 @@ static void print_help()
"\t[--dump-resources]:\n\t\tPrints a basic reflection of the SPIR-V module along with other output.\n"
"\t[--help]:\n\t\tPrints this help message.\n"
);
// clang-format on
print_help_backend();
print_help_common();

View File

@ -262,6 +262,29 @@ inline std::string convert_to_string(double t, char locale_radix_point)
return buf;
}
template <typename T>
struct ValueSaver
{
explicit ValueSaver(T &current_)
: current(current_)
, saved(current_)
{
}
void release()
{
current = saved;
}
~ValueSaver()
{
release();
}
T &current;
T saved;
};
#if defined(__clang__) || defined(__GNUC__)
#pragma GCC diagnostic pop
#elif defined(_MSC_VER)
@ -699,6 +722,9 @@ struct SPIRExpression : IVariant
// Used by access chain Store and Load since we read multiple expressions in this case.
SmallVector<ID> implied_read_expressions;
// The expression was emitted at a certain scope. Lets us track when an expression read means multiple reads.
uint32_t emitted_loop_level = 0;
SPIRV_CROSS_DECLARE_CLONE(SPIRExpression)
};
@ -1069,7 +1095,8 @@ struct SPIRConstant : IVariant
type = TypeConstant
};
union Constant {
union Constant
{
uint32_t u32;
int32_t i32;
float f32;
@ -1107,7 +1134,8 @@ struct SPIRConstant : IVariant
int e = (u16_value >> 10) & 0x1f;
int m = (u16_value >> 0) & 0x3ff;
union {
union
{
float f32;
uint32_t u32;
} u;

View File

@ -282,14 +282,14 @@ StorageClass Compiler::get_expression_effective_storage_class(uint32_t ptr)
// An access chain or forwarded OpLoads from such access chains
// will generally have the storage class of the underlying variable, but if the load was not forwarded
// we have lost any address space qualifiers.
bool forced_temporary = ir.ids[ptr].get_type() == TypeExpression &&
!get<SPIRExpression>(ptr).access_chain &&
bool forced_temporary = ir.ids[ptr].get_type() == TypeExpression && !get<SPIRExpression>(ptr).access_chain &&
(forced_temporaries.count(ptr) != 0 || forwarded_temporaries.count(ptr) == 0);
if (var && !forced_temporary)
{
// Normalize SSBOs to StorageBuffer here.
if (var->storage == StorageClassUniform && has_decoration(get<SPIRType>(var->basetype).self, DecorationBufferBlock))
if (var->storage == StorageClassUniform &&
has_decoration(get<SPIRType>(var->basetype).self, DecorationBufferBlock))
return StorageClassStorageBuffer;
else
return var->storage;
@ -4674,3 +4674,8 @@ bool Compiler::flush_phi_required(BlockID from, BlockID to) const
return true;
return false;
}
void Compiler::add_loop_level()
{
current_loop_level++;
}

View File

@ -513,9 +513,22 @@ protected:
SPIRFunction *current_function = nullptr;
SPIRBlock *current_block = nullptr;
uint32_t current_loop_level = 0;
std::unordered_set<VariableID> active_interface_variables;
bool check_active_interface_variables = false;
void add_loop_level();
void set_initializers(SPIRExpression &e)
{
e.emitted_loop_level = current_loop_level;
}
template <typename T>
void set_initializers(const T &)
{
}
// If our IDs are out of range here as part of opcodes, throw instead of
// undefined behavior.
template <typename T, typename... P>
@ -524,6 +537,7 @@ protected:
ir.add_typed_id(static_cast<Types>(T::type), id);
auto &var = variant_set<T>(ir.ids[id], std::forward<P>(args)...);
var.self = id;
set_initializers(var);
return var;
}

View File

@ -63,7 +63,8 @@ public:
private:
#if defined(_MSC_VER) && _MSC_VER < 1900
// MSVC 2013 workarounds, sigh ...
union {
union
{
char aligned_char[sizeof(T) * N];
double dummy_aligner;
} u;

View File

@ -341,6 +341,7 @@ void CompilerGLSL::reset()
statement_count = 0;
indent = 0;
current_loop_level = 0;
}
void CompilerGLSL::remap_pls_variables()
@ -539,6 +540,9 @@ string CompilerGLSL::compile()
backend.supports_extensions = true;
backend.use_array_constructor = true;
if (is_legacy_es())
backend.support_case_fallthrough = false;
// Scan the SPIR-V to find trivial uses of extensions.
fixup_type_alias();
reorder_type_alias();
@ -2952,12 +2956,13 @@ void CompilerGLSL::emit_resources()
{
auto *type = &id.get<SPIRType>();
bool is_natural_struct =
type->basetype == SPIRType::Struct && type->array.empty() && !type->pointer &&
(!has_decoration(type->self, DecorationBlock) && !has_decoration(type->self, DecorationBufferBlock));
bool is_natural_struct = type->basetype == SPIRType::Struct && type->array.empty() && !type->pointer &&
(!has_decoration(type->self, DecorationBlock) &&
!has_decoration(type->self, DecorationBufferBlock));
// Special case, ray payload and hit attribute blocks are not really blocks, just regular structs.
if (type->basetype == SPIRType::Struct && type->pointer && has_decoration(type->self, DecorationBlock) &&
if (type->basetype == SPIRType::Struct && type->pointer &&
has_decoration(type->self, DecorationBlock) &&
(type->storage == StorageClassRayPayloadNV || type->storage == StorageClassIncomingRayPayloadNV ||
type->storage == StorageClassHitAttributeNV))
{
@ -3432,9 +3437,8 @@ string CompilerGLSL::to_composite_constructor_expression(uint32_t id, bool uses_
{
auto &type = expression_type(id);
bool reroll_array = !type.array.empty() &&
(!backend.array_is_value_type ||
(uses_buffer_offset && !backend.buffer_offset_array_is_value_type));
bool reroll_array = !type.array.empty() && (!backend.array_is_value_type ||
(uses_buffer_offset && !backend.buffer_offset_array_is_value_type));
if (reroll_array)
{
@ -4547,6 +4551,17 @@ bool CompilerGLSL::expression_suppresses_usage_tracking(uint32_t id) const
return suppressed_usage_tracking.count(id) != 0;
}
bool CompilerGLSL::expression_read_implies_multiple_reads(uint32_t id) const
{
auto *expr = maybe_get<SPIRExpression>(id);
if (!expr)
return false;
// If we're emitting code at a deeper loop level than when we emitted the expression,
// we're probably reading the same expression over and over.
return current_loop_level > expr->emitted_loop_level;
}
SPIRExpression &CompilerGLSL::emit_op(uint32_t result_type, uint32_t result_id, const string &rhs, bool forwarding,
bool suppress_usage_tracking)
{
@ -4702,7 +4717,8 @@ bool CompilerGLSL::emit_complex_bitcast(uint32_t result_type, uint32_t id, uint3
if (output_type.basetype == SPIRType::Half && input_type.basetype == SPIRType::Float && input_type.vecsize == 1)
expr = join("unpackFloat2x16(floatBitsToUint(", to_unpacked_expression(op0), "))");
else if (output_type.basetype == SPIRType::Float && input_type.basetype == SPIRType::Half && input_type.vecsize == 2)
else if (output_type.basetype == SPIRType::Float && input_type.basetype == SPIRType::Half &&
input_type.vecsize == 2)
expr = join("uintBitsToFloat(packFloat2x16(", to_unpacked_expression(op0), "))");
else
return false;
@ -5368,8 +5384,8 @@ static inline bool image_opcode_is_sample_no_dref(Op op)
}
}
void CompilerGLSL::emit_sparse_feedback_temporaries(uint32_t result_type_id, uint32_t id,
uint32_t &feedback_id, uint32_t &texel_id)
void CompilerGLSL::emit_sparse_feedback_temporaries(uint32_t result_type_id, uint32_t id, uint32_t &feedback_id,
uint32_t &texel_id)
{
// Need to allocate two temporaries.
if (options.es)
@ -5421,7 +5437,8 @@ void CompilerGLSL::emit_texture_op(const Instruction &i, bool sparse)
if (sparse)
{
statement(to_expression(sparse_code_id), " = ", expr, ";");
expr = join(type_to_glsl(return_type), "(", to_expression(sparse_code_id), ", ", to_expression(sparse_texel_id), ")");
expr = join(type_to_glsl(return_type), "(", to_expression(sparse_code_id), ", ", to_expression(sparse_texel_id),
")");
forward = true;
inherited_expressions.clear();
}
@ -5708,8 +5725,7 @@ bool CompilerGLSL::expression_is_non_value_type_array(uint32_t ptr)
return false;
auto &backed_type = get<SPIRType>(var->basetype);
return !backend.buffer_offset_array_is_value_type &&
backed_type.basetype == SPIRType::Struct &&
return !backend.buffer_offset_array_is_value_type && backed_type.basetype == SPIRType::Struct &&
has_member_decoration(backed_type.self, 0, DecorationOffset);
}
@ -5853,7 +5869,8 @@ string CompilerGLSL::to_function_args(const TextureFunctionArguments &args, bool
// The IR can give us more components than we need, so chop them off as needed.
auto swizzle_expr = swizzle(args.coord_components, expression_type(args.coord).vecsize);
// Only enclose the UV expression if needed.
auto coord_expr = (*swizzle_expr == '\0') ? to_expression(args.coord) : (to_enclosed_expression(args.coord) + swizzle_expr);
auto coord_expr =
(*swizzle_expr == '\0') ? to_expression(args.coord) : (to_enclosed_expression(args.coord) + swizzle_expr);
// texelFetch only takes int, not uint.
auto &coord_type = expression_type(args.coord);
@ -5878,7 +5895,8 @@ string CompilerGLSL::to_function_args(const TextureFunctionArguments &args, bool
forward = forward && should_forward(args.dref);
// SPIR-V splits dref and coordinate.
if (args.base.is_gather || args.coord_components == 4) // GLSL also splits the arguments in two. Same for textureGather.
if (args.base.is_gather ||
args.coord_components == 4) // GLSL also splits the arguments in two. Same for textureGather.
{
farg_str += ", ";
farg_str += to_expression(args.coord);
@ -6994,7 +7012,7 @@ string CompilerGLSL::builtin_to_glsl(BuiltIn builtin, StorageClass storage)
default:
SPIRV_CROSS_THROW(
"Cannot implement gl_InstanceID in Vulkan GLSL. This shader was created with GL semantics.");
"Cannot implement gl_InstanceID in Vulkan GLSL. This shader was created with GL semantics.");
}
}
if (!options.es && options.version < 140)
@ -8169,6 +8187,13 @@ void CompilerGLSL::track_expression_read(uint32_t id)
auto &v = expression_usage_counts[id];
v++;
// If we create an expression outside a loop,
// but access it inside a loop, we're implicitly reading it multiple times.
// If the expression in question is expensive, we should hoist it out to avoid relying on loop-invariant code motion
// working inside the backend compiler.
if (expression_read_implies_multiple_reads(id))
v++;
if (v >= 2)
{
//if (v == 2)
@ -8435,8 +8460,8 @@ string CompilerGLSL::build_composite_combiner(uint32_t return_type, const uint32
if (i)
op += ", ";
bool uses_buffer_offset = type.basetype == SPIRType::Struct &&
has_member_decoration(type.self, i, DecorationOffset);
bool uses_buffer_offset =
type.basetype == SPIRType::Struct && has_member_decoration(type.self, i, DecorationOffset);
subop = to_composite_constructor_expression(elems[i], uses_buffer_offset);
}
@ -9683,7 +9708,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
{
auto &type = get<SPIRType>(ops[0]);
if (type.vecsize > 1)
GLSL_UFOP(not);
GLSL_UFOP(not );
else
GLSL_UOP(!);
break;
@ -10471,7 +10496,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
uint32_t operands = ops[4];
if (operands != ImageOperandsSampleMask || length != 6)
SPIRV_CROSS_THROW(
"Multisampled image used in OpImageRead, but unexpected operand mask was used.");
"Multisampled image used in OpImageRead, but unexpected operand mask was used.");
uint32_t samples = ops[5];
statement(to_expression(sparse_code_id), " = sparseImageLoadARB(", to_expression(ops[2]), ", ",
@ -10482,8 +10507,8 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
statement(to_expression(sparse_code_id), " = sparseImageLoadARB(", to_expression(ops[2]), ", ",
coord_expr, ", ", to_expression(sparse_texel_id), ");");
}
imgexpr = join(type_to_glsl(get<SPIRType>(result_type)), "(",
to_expression(sparse_code_id), ", ", to_expression(sparse_texel_id), ")");
imgexpr = join(type_to_glsl(get<SPIRType>(result_type)), "(", to_expression(sparse_code_id), ", ",
to_expression(sparse_texel_id), ")");
}
else
{
@ -13000,6 +13025,10 @@ void CompilerGLSL::emit_block_chain(SPIRBlock &block)
bool skip_direct_branch = false;
bool emitted_loop_header_variables = false;
bool force_complex_continue_block = false;
ValueSaver<uint32_t> loop_level_saver(current_loop_level);
if (block.merge == SPIRBlock::MergeLoop)
add_loop_level();
emit_hoisted_temporaries(block.declare_temporary);
@ -13272,6 +13301,8 @@ void CompilerGLSL::emit_block_chain(SPIRBlock &block)
// Order does not matter.
if (!injected_block)
block_declaration_order.push_back(block.default_block);
else if (is_legacy_es())
SPIRV_CROSS_THROW("Default case label fallthrough to other case label is not supported in ESSL 1.0.");
case_constructs[block.default_block] = {};
}
@ -13282,12 +13313,26 @@ void CompilerGLSL::emit_block_chain(SPIRBlock &block)
return is_unsigned_case ? convert_to_string(literal) : convert_to_string(int32_t(literal));
};
const auto to_legacy_case_label = [&](uint32_t condition, const SmallVector<uint32_t> &labels,
const char *suffix) -> string {
string ret;
size_t count = labels.size();
for (size_t i = 0; i < count; i++)
{
if (i)
ret += " || ";
ret += join(count > 1 ? "(" : "", to_enclosed_expression(condition), " == ", labels[i], suffix,
count > 1 ? ")" : "");
}
return ret;
};
// We need to deal with a complex scenario for OpPhi. If we have case-fallthrough and Phi in the picture,
// we need to flush phi nodes outside the switch block in a branch,
// and skip any Phi handling inside the case label to make fall-through work as expected.
// This kind of code-gen is super awkward and it's a last resort. Normally we would want to handle this
// inside the case label if at all possible.
for (size_t i = 1; i < num_blocks; i++)
for (size_t i = 1; backend.support_case_fallthrough && i < num_blocks; i++)
{
if (flush_phi_required(block.self, block_declaration_order[i]) &&
flush_phi_required(block_declaration_order[i - 1], block_declaration_order[i]))
@ -13341,8 +13386,18 @@ void CompilerGLSL::emit_block_chain(SPIRBlock &block)
// This is buggy on FXC, so just emit the logical equivalent of a do { } while(false), which is more idiomatic.
bool degenerate_switch = block.default_block != block.merge_block && block.cases.empty();
if (degenerate_switch)
statement("do");
if (degenerate_switch || is_legacy_es())
{
// ESSL 1.0 is not guaranteed to support do/while.
if (is_legacy_es())
{
uint32_t counter = statement_count;
statement("for (int SPIRV_Cross_Dummy", counter, " = 0; SPIRV_Cross_Dummy", counter,
" < 1; SPIRV_Cross_Dummy", counter, "++)");
}
else
statement("do");
}
else
{
emit_block_hints(block);
@ -13359,14 +13414,27 @@ void CompilerGLSL::emit_block_chain(SPIRBlock &block)
{
// Default case.
if (!degenerate_switch)
statement("default:");
{
if (is_legacy_es())
statement("else");
else
statement("default:");
}
}
else
{
for (auto &case_literal : literals)
if (is_legacy_es())
{
// The case label value must be sign-extended properly in SPIR-V, so we can assume 32-bit values here.
statement("case ", to_case_label(case_literal, unsigned_case), label_suffix, ":");
statement((i ? "else " : ""), "if (", to_legacy_case_label(block.condition, literals, label_suffix),
")");
}
else
{
for (auto &case_literal : literals)
{
// The case label value must be sign-extended properly in SPIR-V, so we can assume 32-bit values here.
statement("case ", to_case_label(case_literal, unsigned_case), label_suffix, ":");
}
}
}
@ -13401,7 +13469,12 @@ void CompilerGLSL::emit_block_chain(SPIRBlock &block)
statement("case ", to_case_label(case_literal, unsigned_case), label_suffix, ":");
if (block.default_block == block.next_block)
statement("default:");
{
if (is_legacy_es())
statement("else");
else
statement("default:");
}
begin_scope();
flush_phi(block.self, block.next_block);
@ -13410,7 +13483,7 @@ void CompilerGLSL::emit_block_chain(SPIRBlock &block)
}
}
if (degenerate_switch)
if (degenerate_switch && !is_legacy_es())
end_scope_decl("while(false)");
else
end_scope();
@ -13492,7 +13565,11 @@ void CompilerGLSL::emit_block_chain(SPIRBlock &block)
// If we hit this case, we're dealing with an unconditional branch, which means we will output
// that block after this. If we had selection merge, we already flushed phi variables.
if (block.merge != SPIRBlock::MergeSelection)
{
flush_phi(block.self, block.next_block);
// For a direct branch, need to remember to invalidate expressions in the next linear block instead.
get<SPIRBlock>(block.next_block).invalidate_expressions = block.invalidate_expressions;
}
// For switch fallthrough cases, we terminate the chain here, but we still need to handle Phi.
if (!current_emitting_switch_fallthrough)
@ -13546,6 +13623,8 @@ void CompilerGLSL::emit_block_chain(SPIRBlock &block)
else
end_scope();
loop_level_saver.release();
// We cannot break out of two loops at once, so don't check for break; here.
// Using block.self as the "from" block isn't quite right, but it has the same scope
// and dominance structure, so it's fine.

View File

@ -316,7 +316,8 @@ protected:
};
virtual std::string to_function_args(const TextureFunctionArguments &args, bool *p_forward);
void emit_sparse_feedback_temporaries(uint32_t result_type_id, uint32_t id, uint32_t &feedback_id, uint32_t &texel_id);
void emit_sparse_feedback_temporaries(uint32_t result_type_id, uint32_t id, uint32_t &feedback_id,
uint32_t &texel_id);
uint32_t get_sparse_feedback_texel_id(uint32_t id) const;
virtual void emit_buffer_block(const SPIRVariable &type);
virtual void emit_push_constant_block(const SPIRVariable &var);
@ -549,6 +550,7 @@ protected:
void emit_unary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op);
bool expression_is_forwarded(uint32_t id) const;
bool expression_suppresses_usage_tracking(uint32_t id) const;
bool expression_read_implies_multiple_reads(uint32_t id) const;
SPIRExpression &emit_op(uint32_t result_type, uint32_t result_id, const std::string &rhs, bool forward_rhs,
bool suppress_usage_tracking = false);

View File

@ -1486,7 +1486,8 @@ void CompilerHLSL::emit_resources()
{
static const char *qualifiers[] = { "", "unorm ", "snorm " };
static const char *vecsizes[] = { "", "2", "3", "4" };
emit_texture_size_variants(required_texture_size_variants.uav[norm][comp], vecsizes[comp], true, qualifiers[norm]);
emit_texture_size_variants(required_texture_size_variants.uav[norm][comp], vecsizes[comp], true,
qualifiers[norm]);
}
}
@ -1849,15 +1850,16 @@ void CompilerHLSL::emit_resources()
}
}
void CompilerHLSL::emit_texture_size_variants(uint64_t variant_mask, const char *vecsize_qualifier, bool uav, const char *type_qualifier)
void CompilerHLSL::emit_texture_size_variants(uint64_t variant_mask, const char *vecsize_qualifier, bool uav,
const char *type_qualifier)
{
if (variant_mask == 0)
return;
static const char *types[QueryTypeCount] = { "float", "int", "uint" };
static const char *dims[QueryDimCount] = { "Texture1D", "Texture1DArray", "Texture2D", "Texture2DArray",
"Texture3D", "Buffer", "TextureCube", "TextureCubeArray",
"Texture2DMS", "Texture2DMSArray" };
"Texture3D", "Buffer", "TextureCube", "TextureCubeArray",
"Texture2DMS", "Texture2DMSArray" };
static const bool has_lod[QueryDimCount] = { true, true, true, true, true, false, true, true, false, false };
@ -1880,8 +1882,8 @@ void CompilerHLSL::emit_texture_size_variants(uint64_t variant_mask, const char
continue;
statement(ret_types[index], " SPIRV_Cross_", (uav ? "image" : "texture"), "Size(", (uav ? "RW" : ""),
dims[index], "<", type_qualifier, types[type_index], vecsize_qualifier,
"> Tex, ", (uav ? "" : "uint Level, "), "out uint Param)");
dims[index], "<", type_qualifier, types[type_index], vecsize_qualifier, "> Tex, ",
(uav ? "" : "uint Level, "), "out uint Param)");
begin_scope();
statement(ret_types[index], " ret;");
switch (return_arguments[index])
@ -2971,7 +2973,8 @@ void CompilerHLSL::emit_texture_op(const Instruction &i, bool sparse)
{
for (uint32_t size = coord_components; size < 3; ++size)
coord_filler += ", 0.0";
coord_expr = "float4(" + coord_expr + coord_filler + ", " + to_extract_component_expression(coord, coord_components) + ")";
coord_expr = "float4(" + coord_expr + coord_filler + ", " +
to_extract_component_expression(coord, coord_components) + ")";
modifier_count++;
}
@ -3685,7 +3688,8 @@ void CompilerHLSL::read_access_chain(string *expr, const string &lhs, const SPIR
return;
}
else if (type.width != 32 && !hlsl_options.enable_16bit_types)
SPIRV_CROSS_THROW("Reading types other than 32-bit from ByteAddressBuffer not yet supported, unless SM 6.2 and native 16-bit types are enabled.");
SPIRV_CROSS_THROW("Reading types other than 32-bit from ByteAddressBuffer not yet supported, unless SM 6.2 and "
"native 16-bit types are enabled.");
bool templated_load = hlsl_options.shader_model >= 62;
string load_expr;
@ -3741,8 +3745,8 @@ void CompilerHLSL::read_access_chain(string *expr, const string &lhs, const SPIR
for (uint32_t r = 0; r < type.vecsize; r++)
{
load_expr +=
join(chain.base, ".Load", template_expr, "(", chain.dynamic_index, chain.static_index + r * chain.matrix_stride, ")");
load_expr += join(chain.base, ".Load", template_expr, "(", chain.dynamic_index,
chain.static_index + r * chain.matrix_stride, ")");
if (r + 1 < type.vecsize)
load_expr += ", ";
}
@ -4018,7 +4022,8 @@ void CompilerHLSL::write_access_chain(const SPIRAccessChain &chain, uint32_t val
return;
}
else if (type.width != 32 && !hlsl_options.enable_16bit_types)
SPIRV_CROSS_THROW("Writing types other than 32-bit to RWByteAddressBuffer not yet supported, unless SM 6.2 and native 16-bit types are enabled.");
SPIRV_CROSS_THROW("Writing types other than 32-bit to RWByteAddressBuffer not yet supported, unless SM 6.2 and "
"native 16-bit types are enabled.");
bool templated_store = hlsl_options.shader_model >= 62;
@ -4057,7 +4062,8 @@ void CompilerHLSL::write_access_chain(const SPIRAccessChain &chain, uint32_t val
}
else
store_op = "Store";
statement(chain.base, ".", store_op, template_expr, "(", chain.dynamic_index, chain.static_index, ", ", store_expr, ");");
statement(chain.base, ".", store_op, template_expr, "(", chain.dynamic_index, chain.static_index, ", ",
store_expr, ");");
}
else if (type.columns == 1)
{
@ -4087,8 +4093,8 @@ void CompilerHLSL::write_access_chain(const SPIRAccessChain &chain, uint32_t val
store_expr = join(bitcast_op, "(", store_expr, ")");
}
statement(chain.base, ".Store", template_expr, "(", chain.dynamic_index, chain.static_index + chain.matrix_stride * r, ", ",
store_expr, ");");
statement(chain.base, ".Store", template_expr, "(", chain.dynamic_index,
chain.static_index + chain.matrix_stride * r, ", ", store_expr, ");");
}
}
else if (!chain.row_major_matrix)
@ -4131,8 +4137,8 @@ void CompilerHLSL::write_access_chain(const SPIRAccessChain &chain, uint32_t val
store_expr = join(bitcast_op, "(", store_expr, ")");
}
statement(chain.base, ".", store_op, template_expr, "(", chain.dynamic_index, chain.static_index + c * chain.matrix_stride,
", ", store_expr, ");");
statement(chain.base, ".", store_op, template_expr, "(", chain.dynamic_index,
chain.static_index + c * chain.matrix_stride, ", ", store_expr, ");");
}
}
else
@ -4359,13 +4365,14 @@ void CompilerHLSL::emit_atomic(const uint32_t *ops, uint32_t length, spv::Op op)
if (data_type.storage == StorageClassImage || !chain)
{
statement(atomic_op, "(", to_expression(ops[0]), ", ", to_expression(ops[3]), ", ", to_expression(tmp_id), ");");
statement(atomic_op, "(", to_expression(ops[0]), ", ", to_expression(ops[3]), ", ", to_expression(tmp_id),
");");
}
else
{
// RWByteAddress buffer is always uint in its underlying type.
statement(chain->base, ".", atomic_op, "(", chain->dynamic_index, chain->static_index, ", ", to_expression(ops[3]),
", ", to_expression(tmp_id), ");");
statement(chain->base, ".", atomic_op, "(", chain->dynamic_index, chain->static_index, ", ",
to_expression(ops[3]), ", ", to_expression(tmp_id), ");");
}
}
else
@ -5456,8 +5463,9 @@ void CompilerHLSL::require_texture_query_variant(uint32_t var_id)
}
auto norm_state = image_format_to_normalized_state(type.image.format);
auto &variant = uav ? required_texture_size_variants.uav[uint32_t(norm_state)][image_format_to_components(type.image.format) - 1] :
required_texture_size_variants.srv;
auto &variant = uav ? required_texture_size_variants
.uav[uint32_t(norm_state)][image_format_to_components(type.image.format) - 1] :
required_texture_size_variants.srv;
uint64_t mask = 1ull << bit;
if ((variant & mask) == 0)
@ -5683,8 +5691,8 @@ bool CompilerHLSL::is_hlsl_force_storage_buffer_as_uav(ID id) const
const uint32_t desc_set = get_decoration(id, spv::DecorationDescriptorSet);
const uint32_t binding = get_decoration(id, spv::DecorationBinding);
return (force_uav_buffer_bindings.find({desc_set, binding}) != force_uav_buffer_bindings.end());
return (force_uav_buffer_bindings.find({ desc_set, binding }) != force_uav_buffer_bindings.end());
}
void CompilerHLSL::set_hlsl_force_storage_buffer_as_uav(uint32_t desc_set, uint32_t binding)

View File

@ -291,7 +291,8 @@ private:
} required_texture_size_variants;
void require_texture_query_variant(uint32_t var_id);
void emit_texture_size_variants(uint64_t variant_mask, const char *vecsize_qualifier, bool uav, const char *type_qualifier);
void emit_texture_size_variants(uint64_t variant_mask, const char *vecsize_qualifier, bool uav,
const char *type_qualifier);
enum TextureQueryVariantDim
{

View File

@ -3806,20 +3806,21 @@ void CompilerMSL::emit_custom_functions()
{
// Unfortunately we cannot template on the address space, so combinatorial explosion it is.
static const char *function_name_tags[] = {
"FromConstantToStack", "FromConstantToThreadGroup", "FromStackToStack",
"FromStackToThreadGroup", "FromThreadGroupToStack", "FromThreadGroupToThreadGroup",
"FromDeviceToDevice", "FromConstantToDevice", "FromStackToDevice",
"FromThreadGroupToDevice", "FromDeviceToStack", "FromDeviceToThreadGroup",
"FromConstantToStack", "FromConstantToThreadGroup", "FromStackToStack",
"FromStackToThreadGroup", "FromThreadGroupToStack", "FromThreadGroupToThreadGroup",
"FromDeviceToDevice", "FromConstantToDevice", "FromStackToDevice",
"FromThreadGroupToDevice", "FromDeviceToStack", "FromDeviceToThreadGroup",
};
static const char *src_address_space[] = {
"constant", "constant", "thread const", "thread const", "threadgroup const", "threadgroup const",
"device const", "constant", "thread const", "threadgroup const", "device const", "device const",
"constant", "constant", "thread const", "thread const",
"threadgroup const", "threadgroup const", "device const", "constant",
"thread const", "threadgroup const", "device const", "device const",
};
static const char *dst_address_space[] = {
"thread", "threadgroup", "thread", "threadgroup", "thread", "threadgroup",
"device", "device", "device", "device", "thread", "threadgroup",
"device", "device", "device", "device", "thread", "threadgroup",
};
for (uint32_t variant = 0; variant < 12; variant++)
@ -6281,8 +6282,7 @@ void CompilerMSL::emit_instruction(const Instruction &instruction)
args.lod = lod;
statement(join(to_expression(img_id), ".write(",
remap_swizzle(store_type, texel_type.vecsize, to_expression(texel_id)), ", ",
CompilerMSL::to_function_args(args, &forward),
");"));
CompilerMSL::to_function_args(args, &forward), ");"));
if (p_var && variable_storage_is_aliased(*p_var))
flush_all_aliased_variables();
@ -7866,9 +7866,10 @@ string CompilerMSL::to_function_args(const TextureFunctionArguments &args, bool
if (is_cube_fetch)
farg_str += ", uint(" + to_extract_component_expression(args.coord, 2) + ")";
else
farg_str += ", uint(spvCubemapTo2DArrayFace(" + tex_coords + ").z) + (uint(" +
round_fp_tex_coords(to_extract_component_expression(args.coord, alt_coord_component), coord_is_fp) +
") * 6u)";
farg_str +=
", uint(spvCubemapTo2DArrayFace(" + tex_coords + ").z) + (uint(" +
round_fp_tex_coords(to_extract_component_expression(args.coord, alt_coord_component), coord_is_fp) +
") * 6u)";
add_spv_func_and_recompile(SPVFuncImplCubemapTo2DArrayFace);
}
@ -7896,7 +7897,8 @@ string CompilerMSL::to_function_args(const TextureFunctionArguments &args, bool
else
farg_str +=
", uint(" +
round_fp_tex_coords(to_extract_component_expression(args.coord, alt_coord_component), coord_is_fp) + ")";
round_fp_tex_coords(to_extract_component_expression(args.coord, alt_coord_component), coord_is_fp) +
")";
}
}
@ -7910,8 +7912,8 @@ string CompilerMSL::to_function_args(const TextureFunctionArguments &args, bool
string dref_expr;
if (args.base.is_proj)
dref_expr =
join(to_enclosed_expression(args.dref), " / ", to_extract_component_expression(args.coord, alt_coord_component));
dref_expr = join(to_enclosed_expression(args.dref), " / ",
to_extract_component_expression(args.coord, alt_coord_component));
else
dref_expr = to_expression(args.dref);
@ -8130,7 +8132,8 @@ void CompilerMSL::emit_sampled_image_op(uint32_t result_type, uint32_t result_id
set<SPIRCombinedImageSampler>(result_id, result_type, image_id, samp_id);
}
string CompilerMSL::to_texture_op(const Instruction &i, bool sparse, bool *forward, SmallVector<uint32_t> &inherited_expressions)
string CompilerMSL::to_texture_op(const Instruction &i, bool sparse, bool *forward,
SmallVector<uint32_t> &inherited_expressions)
{
auto *ops = stream(i);
uint32_t result_type_id = ops[0];
@ -10145,8 +10148,8 @@ uint32_t CompilerMSL::get_metal_resource_index(SPIRVariable &var, SPIRType::Base
bool CompilerMSL::type_is_msl_framebuffer_fetch(const SPIRType &type) const
{
return type.basetype == SPIRType::Image && type.image.dim == DimSubpassData &&
msl_options.is_ios() && msl_options.ios_use_framebuffer_fetch_subpasses;
return type.basetype == SPIRType::Image && type.image.dim == DimSubpassData && msl_options.is_ios() &&
msl_options.ios_use_framebuffer_fetch_subpasses;
}
string CompilerMSL::argument_decl(const SPIRFunction::Parameter &arg)

View File

@ -889,7 +889,8 @@ protected:
bool descriptor_set_is_argument_buffer(uint32_t desc_set) const;
uint32_t get_target_components_for_fragment_location(uint32_t location) const;
uint32_t build_extended_vector_type(uint32_t type_id, uint32_t components, SPIRType::BaseType basetype = SPIRType::Unknown);
uint32_t build_extended_vector_type(uint32_t type_id, uint32_t components,
SPIRType::BaseType basetype = SPIRType::Unknown);
bool suppress_missing_prototypes = false;

View File

@ -305,8 +305,8 @@ void CompilerReflection::emit_types()
else if (type_is_reference(type))
{
if (!naturally_emit_type(this->get<SPIRType>(type.parent_type)) &&
find(physical_pointee_types.begin(), physical_pointee_types.end(),
type.parent_type) == physical_pointee_types.end())
find(physical_pointee_types.begin(), physical_pointee_types.end(), type.parent_type) ==
physical_pointee_types.end())
{
physical_pointee_types.push_back(type.parent_type);
}