mirror of https://github.com/bkaradzic/bgfx
Updated spirv-cross.
This commit is contained in:
parent
3e82b5bd67
commit
c9cebd1b23
|
@ -677,6 +677,7 @@ struct CLIArguments
|
|||
bool msl_check_discarded_frag_stores = false;
|
||||
bool msl_sample_dref_lod_array_as_grad = false;
|
||||
bool msl_runtime_array_rich_descriptor = false;
|
||||
bool msl_replace_recursive_inputs = false;
|
||||
const char *msl_combined_sampler_suffix = nullptr;
|
||||
bool glsl_emit_push_constant_as_ubo = false;
|
||||
bool glsl_emit_ubo_as_plain_uniforms = false;
|
||||
|
@ -867,6 +868,7 @@ static void print_help_msl()
|
|||
"\t\tUses same values as Metal MTLArgumentBuffersTier enumeration (0 = Tier1, 1 = Tier2).\n"
|
||||
"\t\tNOTE: Setting this value no longer enables msl-argument-buffers implicitly.\n"
|
||||
"\t[--msl-runtime-array-rich-descriptor]:\n\t\tWhen declaring a runtime array of SSBOs, declare an array of {ptr, len} pairs to support OpArrayLength.\n"
|
||||
"\t[--msl-replace-recursive-inputs]:\n\t\tWorks around a Metal 3.1 regression bug, which causes an infinite recursion crash during Metal's analysis of an entry point input structure that itself contains internal recursion.\n"
|
||||
"\t[--msl-texture-buffer-native]:\n\t\tEnable native support for texel buffers. Otherwise, it is emulated as a normal texture.\n"
|
||||
"\t[--msl-framebuffer-fetch]:\n\t\tImplement subpass inputs with frame buffer fetch.\n"
|
||||
"\t\tEmits [[color(N)]] inputs in fragment stage.\n"
|
||||
|
@ -1233,6 +1235,7 @@ static string compile_iteration(const CLIArguments &args, std::vector<uint32_t>
|
|||
msl_opts.sample_dref_lod_array_as_grad = args.msl_sample_dref_lod_array_as_grad;
|
||||
msl_opts.ios_support_base_vertex_instance = true;
|
||||
msl_opts.runtime_array_rich_descriptor = args.msl_runtime_array_rich_descriptor;
|
||||
msl_opts.replace_recursive_inputs = args.msl_replace_recursive_inputs;
|
||||
msl_comp->set_msl_options(msl_opts);
|
||||
for (auto &v : args.msl_discrete_descriptor_sets)
|
||||
msl_comp->add_discrete_descriptor_set(v);
|
||||
|
@ -1792,6 +1795,8 @@ static int main_inner(int argc, char *argv[])
|
|||
});
|
||||
cbs.add("--msl-runtime-array-rich-descriptor",
|
||||
[&args](CLIParser &) { args.msl_runtime_array_rich_descriptor = true; });
|
||||
cbs.add("--msl-replace-recursive-inputs",
|
||||
[&args](CLIParser &) { args.msl_replace_recursive_inputs = true; });
|
||||
cbs.add("--extension", [&args](CLIParser &parser) { args.extensions.push_back(parser.next_string()); });
|
||||
cbs.add("--rename-entry-point", [&args](CLIParser &parser) {
|
||||
auto old_name = parser.next_string();
|
||||
|
|
|
@ -1048,20 +1048,25 @@ ShaderResources Compiler::get_shader_resources(const unordered_set<VariableID> *
|
|||
return res;
|
||||
}
|
||||
|
||||
bool Compiler::type_is_block_like(const SPIRType &type) const
|
||||
bool Compiler::type_is_top_level_block(const SPIRType &type) const
|
||||
{
|
||||
if (type.basetype != SPIRType::Struct)
|
||||
return false;
|
||||
return has_decoration(type.self, DecorationBlock) || has_decoration(type.self, DecorationBufferBlock);
|
||||
}
|
||||
|
||||
if (has_decoration(type.self, DecorationBlock) || has_decoration(type.self, DecorationBufferBlock))
|
||||
{
|
||||
bool Compiler::type_is_block_like(const SPIRType &type) const
|
||||
{
|
||||
if (type_is_top_level_block(type))
|
||||
return true;
|
||||
}
|
||||
|
||||
// Block-like types may have Offset decorations.
|
||||
for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++)
|
||||
if (has_member_decoration(type.self, i, DecorationOffset))
|
||||
return true;
|
||||
if (type.basetype == SPIRType::Struct)
|
||||
{
|
||||
// Block-like types may have Offset decorations.
|
||||
for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++)
|
||||
if (has_member_decoration(type.self, i, DecorationOffset))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -5460,6 +5465,36 @@ void Compiler::analyze_interlocked_resource_usage()
|
|||
}
|
||||
}
|
||||
|
||||
// Helper function
|
||||
bool Compiler::check_internal_recursion(const SPIRType &type, std::unordered_set<uint32_t> &checked_ids)
|
||||
{
|
||||
if (type.basetype != SPIRType::Struct)
|
||||
return false;
|
||||
|
||||
if (checked_ids.count(type.self))
|
||||
return true;
|
||||
|
||||
// Recurse into struct members
|
||||
bool is_recursive = false;
|
||||
checked_ids.insert(type.self);
|
||||
uint32_t mbr_cnt = uint32_t(type.member_types.size());
|
||||
for (uint32_t mbr_idx = 0; !is_recursive && mbr_idx < mbr_cnt; mbr_idx++)
|
||||
{
|
||||
uint32_t mbr_type_id = type.member_types[mbr_idx];
|
||||
auto &mbr_type = get<SPIRType>(mbr_type_id);
|
||||
is_recursive |= check_internal_recursion(mbr_type, checked_ids);
|
||||
}
|
||||
checked_ids.erase(type.self);
|
||||
return is_recursive;
|
||||
}
|
||||
|
||||
// Return whether the struct type contains a structural recursion nested somewhere within its content.
|
||||
bool Compiler::type_contains_recursion(const SPIRType &type)
|
||||
{
|
||||
std::unordered_set<uint32_t> checked_ids;
|
||||
return check_internal_recursion(type, checked_ids);
|
||||
}
|
||||
|
||||
bool Compiler::type_is_array_of_pointers(const SPIRType &type) const
|
||||
{
|
||||
if (!type_is_top_level_array(type))
|
||||
|
|
|
@ -1145,11 +1145,14 @@ protected:
|
|||
bool has_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration) const;
|
||||
void unset_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration);
|
||||
|
||||
bool check_internal_recursion(const SPIRType &type, std::unordered_set<uint32_t> &checked_ids);
|
||||
bool type_contains_recursion(const SPIRType &type);
|
||||
bool type_is_array_of_pointers(const SPIRType &type) const;
|
||||
bool type_is_top_level_physical_pointer(const SPIRType &type) const;
|
||||
bool type_is_top_level_pointer(const SPIRType &type) const;
|
||||
bool type_is_top_level_array(const SPIRType &type) const;
|
||||
bool type_is_block_like(const SPIRType &type) const;
|
||||
bool type_is_top_level_block(const SPIRType &type) const;
|
||||
bool type_is_opaque_value(const SPIRType &type) const;
|
||||
|
||||
bool reflection_ssbo_instance_name_is_significant() const;
|
||||
|
|
|
@ -553,7 +553,7 @@ void CompilerGLSL::find_static_extensions()
|
|||
SPIRV_CROSS_THROW("GL_EXT_buffer_reference requires ESSL 320.");
|
||||
else if (!options.es && options.version < 450)
|
||||
SPIRV_CROSS_THROW("GL_EXT_buffer_reference requires GLSL 450.");
|
||||
require_extension_internal("GL_EXT_buffer_reference");
|
||||
require_extension_internal("GL_EXT_buffer_reference2");
|
||||
}
|
||||
else if (ir.addressing_model != AddressingModelLogical)
|
||||
{
|
||||
|
@ -2465,6 +2465,10 @@ void CompilerGLSL::emit_buffer_block_native(const SPIRVariable &var)
|
|||
i++;
|
||||
}
|
||||
|
||||
// Don't declare empty blocks in GLSL, this is not allowed.
|
||||
if (type_is_empty(type) && !backend.supports_empty_struct)
|
||||
statement("int empty_struct_member;");
|
||||
|
||||
// var.self can be used as a backup name for the block name,
|
||||
// so we need to make sure we don't disturb the name here on a recompile.
|
||||
// It will need to be reset if we have to recompile.
|
||||
|
@ -2803,6 +2807,9 @@ string CompilerGLSL::constant_value_macro_name(uint32_t id)
|
|||
void CompilerGLSL::emit_specialization_constant_op(const SPIRConstantOp &constant)
|
||||
{
|
||||
auto &type = get<SPIRType>(constant.basetype);
|
||||
// This will break. It is bogus and should not be legal.
|
||||
if (type_is_top_level_block(type))
|
||||
return;
|
||||
add_resource_name(constant.self);
|
||||
auto name = to_name(constant.self);
|
||||
statement("const ", variable_decl(type, name), " = ", constant_op_expression(constant), ";");
|
||||
|
@ -2832,6 +2839,10 @@ void CompilerGLSL::emit_constant(const SPIRConstant &constant)
|
|||
{
|
||||
auto &type = get<SPIRType>(constant.constant_type);
|
||||
|
||||
// This will break. It is bogus and should not be legal.
|
||||
if (type_is_top_level_block(type))
|
||||
return;
|
||||
|
||||
SpecializationConstant wg_x, wg_y, wg_z;
|
||||
ID workgroup_size_id = get_work_group_size_specialization_constants(wg_x, wg_y, wg_z);
|
||||
|
||||
|
@ -3581,6 +3592,10 @@ void CompilerGLSL::emit_resources()
|
|||
{
|
||||
auto &id = ir.ids[id_];
|
||||
|
||||
// Skip declaring any bogus constants or undefs which use block types.
|
||||
// We don't declare block types directly, so this will never work.
|
||||
// Should not be legal SPIR-V, so this is considered a workaround.
|
||||
|
||||
if (id.get_type() == TypeConstant)
|
||||
{
|
||||
auto &c = id.get<SPIRConstant>();
|
||||
|
@ -3638,6 +3653,10 @@ void CompilerGLSL::emit_resources()
|
|||
if (type.basetype == SPIRType::Void)
|
||||
return;
|
||||
|
||||
// This will break. It is bogus and should not be legal.
|
||||
if (type_is_top_level_block(type))
|
||||
return;
|
||||
|
||||
string initializer;
|
||||
if (options.force_zero_initialized_variables && type_can_zero_initialize(type))
|
||||
initializer = join(" = ", to_zero_initialized_expression(undef.basetype));
|
||||
|
@ -10139,10 +10158,11 @@ string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indice
|
|||
if (!pending_array_enclose)
|
||||
expr += "]";
|
||||
}
|
||||
// Some builtins are arrays in SPIR-V but not in other languages, e.g. gl_SampleMask[] is an array in SPIR-V but not in Metal.
|
||||
// By throwing away the index, we imply the index was 0, which it must be for gl_SampleMask.
|
||||
else if (!builtin_translates_to_nonarray(BuiltIn(get_decoration(base, DecorationBuiltIn))))
|
||||
else if (index_is_literal || !builtin_translates_to_nonarray(BuiltIn(get_decoration(base, DecorationBuiltIn))))
|
||||
{
|
||||
// Some builtins are arrays in SPIR-V but not in other languages, e.g. gl_SampleMask[] is an array in SPIR-V but not in Metal.
|
||||
// By throwing away the index, we imply the index was 0, which it must be for gl_SampleMask.
|
||||
// For literal indices we are working on composites, so we ignore this since we have already converted to proper array.
|
||||
append_index(index, is_literal);
|
||||
}
|
||||
|
||||
|
@ -17718,6 +17738,25 @@ void CompilerGLSL::cast_from_variable_load(uint32_t source_id, std::string &expr
|
|||
expr = bitcast_expression(expr_type, expected_type, expr);
|
||||
}
|
||||
|
||||
SPIRType::BaseType CompilerGLSL::get_builtin_basetype(BuiltIn builtin, SPIRType::BaseType default_type)
|
||||
{
|
||||
// TODO: Fill in for more builtins.
|
||||
switch (builtin)
|
||||
{
|
||||
case BuiltInLayer:
|
||||
case BuiltInPrimitiveId:
|
||||
case BuiltInViewportIndex:
|
||||
case BuiltInFragStencilRefEXT:
|
||||
case BuiltInSampleMask:
|
||||
case BuiltInPrimitiveShadingRateKHR:
|
||||
case BuiltInShadingRateKHR:
|
||||
return SPIRType::Int;
|
||||
|
||||
default:
|
||||
return default_type;
|
||||
}
|
||||
}
|
||||
|
||||
void CompilerGLSL::cast_to_variable_store(uint32_t target_id, std::string &expr, const SPIRType &expr_type)
|
||||
{
|
||||
auto *var = maybe_get_backing_variable(target_id);
|
||||
|
@ -17729,24 +17768,7 @@ void CompilerGLSL::cast_to_variable_store(uint32_t target_id, std::string &expr,
|
|||
return;
|
||||
|
||||
auto builtin = static_cast<BuiltIn>(get_decoration(target_id, DecorationBuiltIn));
|
||||
auto expected_type = expr_type.basetype;
|
||||
|
||||
// TODO: Fill in for more builtins.
|
||||
switch (builtin)
|
||||
{
|
||||
case BuiltInLayer:
|
||||
case BuiltInPrimitiveId:
|
||||
case BuiltInViewportIndex:
|
||||
case BuiltInFragStencilRefEXT:
|
||||
case BuiltInSampleMask:
|
||||
case BuiltInPrimitiveShadingRateKHR:
|
||||
case BuiltInShadingRateKHR:
|
||||
expected_type = SPIRType::Int;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
auto expected_type = get_builtin_basetype(builtin, expr_type.basetype);
|
||||
|
||||
if (expected_type != expr_type.basetype)
|
||||
{
|
||||
|
|
|
@ -993,6 +993,7 @@ protected:
|
|||
// Builtins in GLSL are always specific signedness, but the SPIR-V can declare them
|
||||
// as either unsigned or signed.
|
||||
// Sometimes we will need to automatically perform casts on load and store to make this work.
|
||||
virtual SPIRType::BaseType get_builtin_basetype(spv::BuiltIn builtin, SPIRType::BaseType default_type);
|
||||
virtual void cast_to_variable_store(uint32_t target_id, std::string &expr, const SPIRType &expr_type);
|
||||
virtual void cast_from_variable_load(uint32_t source_id, std::string &expr, const SPIRType &expr_type);
|
||||
void unroll_array_from_complex_load(uint32_t target_id, uint32_t source_id, std::string &expr);
|
||||
|
|
|
@ -1149,29 +1149,48 @@ void CompilerHLSL::emit_builtin_variables()
|
|||
builtins.merge_or(active_output_builtins);
|
||||
|
||||
std::unordered_map<uint32_t, ID> builtin_to_initializer;
|
||||
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
|
||||
if (!is_builtin_variable(var) || var.storage != StorageClassOutput || !var.initializer)
|
||||
return;
|
||||
|
||||
auto *c = this->maybe_get<SPIRConstant>(var.initializer);
|
||||
if (!c)
|
||||
// We need to declare sample mask with the same type that module declares it.
|
||||
// Sample mask is somewhat special in that SPIR-V has an array, and we can copy that array, so we need to
|
||||
// match sign.
|
||||
SPIRType::BaseType sample_mask_in_basetype = SPIRType::Void;
|
||||
SPIRType::BaseType sample_mask_out_basetype = SPIRType::Void;
|
||||
|
||||
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
|
||||
if (!is_builtin_variable(var))
|
||||
return;
|
||||
|
||||
auto &type = this->get<SPIRType>(var.basetype);
|
||||
if (type.basetype == SPIRType::Struct)
|
||||
auto builtin = BuiltIn(get_decoration(var.self, DecorationBuiltIn));
|
||||
|
||||
if (var.storage == StorageClassInput && builtin == BuiltInSampleMask)
|
||||
sample_mask_in_basetype = type.basetype;
|
||||
else if (var.storage == StorageClassOutput && builtin == BuiltInSampleMask)
|
||||
sample_mask_out_basetype = type.basetype;
|
||||
|
||||
if (var.initializer && var.storage == StorageClassOutput)
|
||||
{
|
||||
uint32_t member_count = uint32_t(type.member_types.size());
|
||||
for (uint32_t i = 0; i < member_count; i++)
|
||||
auto *c = this->maybe_get<SPIRConstant>(var.initializer);
|
||||
if (!c)
|
||||
return;
|
||||
|
||||
if (type.basetype == SPIRType::Struct)
|
||||
{
|
||||
if (has_member_decoration(type.self, i, DecorationBuiltIn))
|
||||
uint32_t member_count = uint32_t(type.member_types.size());
|
||||
for (uint32_t i = 0; i < member_count; i++)
|
||||
{
|
||||
builtin_to_initializer[get_member_decoration(type.self, i, DecorationBuiltIn)] =
|
||||
c->subconstants[i];
|
||||
if (has_member_decoration(type.self, i, DecorationBuiltIn))
|
||||
{
|
||||
builtin_to_initializer[get_member_decoration(type.self, i, DecorationBuiltIn)] =
|
||||
c->subconstants[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (has_decoration(var.self, DecorationBuiltIn))
|
||||
{
|
||||
builtin_to_initializer[builtin] = var.initializer;
|
||||
}
|
||||
}
|
||||
else if (has_decoration(var.self, DecorationBuiltIn))
|
||||
builtin_to_initializer[get_decoration(var.self, DecorationBuiltIn)] = var.initializer;
|
||||
});
|
||||
|
||||
// Emit global variables for the interface variables which are statically used by the shader.
|
||||
|
@ -1288,7 +1307,11 @@ void CompilerHLSL::emit_builtin_variables()
|
|||
break;
|
||||
|
||||
case BuiltInSampleMask:
|
||||
type = "int";
|
||||
if (active_input_builtins.get(BuiltInSampleMask))
|
||||
type = sample_mask_in_basetype == SPIRType::UInt ? "uint" : "int";
|
||||
else
|
||||
type = sample_mask_out_basetype == SPIRType::UInt ? "uint" : "int";
|
||||
array_size = 1;
|
||||
break;
|
||||
|
||||
case BuiltInPrimitiveId:
|
||||
|
@ -1322,7 +1345,11 @@ void CompilerHLSL::emit_builtin_variables()
|
|||
// declared the input variable and we need to add the output one now.
|
||||
if (builtin == BuiltInSampleMask && storage == StorageClassInput && this->active_output_builtins.get(i))
|
||||
{
|
||||
statement("static ", type, " ", this->builtin_to_glsl(builtin, StorageClassOutput), init_expr, ";");
|
||||
type = sample_mask_out_basetype == SPIRType::UInt ? "uint" : "int";
|
||||
if (array_size)
|
||||
statement("static ", type, " ", this->builtin_to_glsl(builtin, StorageClassOutput), "[", array_size, "]", init_expr, ";");
|
||||
else
|
||||
statement("static ", type, " ", this->builtin_to_glsl(builtin, StorageClassOutput), init_expr, ";");
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -1536,6 +1563,18 @@ void CompilerHLSL::replace_illegal_names()
|
|||
CompilerGLSL::replace_illegal_names();
|
||||
}
|
||||
|
||||
SPIRType::BaseType CompilerHLSL::get_builtin_basetype(BuiltIn builtin, SPIRType::BaseType default_type)
|
||||
{
|
||||
switch (builtin)
|
||||
{
|
||||
case BuiltInSampleMask:
|
||||
// We declare sample mask array with module type, so always use default_type here.
|
||||
return default_type;
|
||||
default:
|
||||
return CompilerGLSL::get_builtin_basetype(builtin, default_type);
|
||||
}
|
||||
}
|
||||
|
||||
void CompilerHLSL::emit_resources()
|
||||
{
|
||||
auto &execution = get_entry_point();
|
||||
|
@ -3121,6 +3160,10 @@ void CompilerHLSL::emit_hlsl_entry_point()
|
|||
statement(builtin, " = int(stage_input.", builtin, ");");
|
||||
break;
|
||||
|
||||
case BuiltInSampleMask:
|
||||
statement(builtin, "[0] = stage_input.", builtin, ";");
|
||||
break;
|
||||
|
||||
case BuiltInNumWorkgroups:
|
||||
case BuiltInPointCoord:
|
||||
case BuiltInSubgroupSize:
|
||||
|
@ -3295,6 +3338,10 @@ void CompilerHLSL::emit_hlsl_entry_point()
|
|||
cull, "];");
|
||||
break;
|
||||
|
||||
case BuiltInSampleMask:
|
||||
statement("stage_output.gl_SampleMask = gl_SampleMask[0];");
|
||||
break;
|
||||
|
||||
default:
|
||||
{
|
||||
auto builtin_expr = builtin_to_glsl(static_cast<BuiltIn>(i), StorageClassOutput);
|
||||
|
@ -4696,7 +4743,12 @@ void CompilerHLSL::emit_load(const Instruction &instruction)
|
|||
void CompilerHLSL::write_access_chain_array(const SPIRAccessChain &chain, uint32_t value,
|
||||
const SmallVector<uint32_t> &composite_chain)
|
||||
{
|
||||
auto &type = get<SPIRType>(chain.basetype);
|
||||
auto *ptype = &get<SPIRType>(chain.basetype);
|
||||
while (ptype->pointer)
|
||||
{
|
||||
ptype = &get<SPIRType>(ptype->basetype);
|
||||
}
|
||||
auto &type = *ptype;
|
||||
|
||||
// Need to use a reserved identifier here since it might shadow an identifier in the access chain input or other loops.
|
||||
auto ident = get_unique_identifier();
|
||||
|
@ -6746,11 +6798,6 @@ void CompilerHLSL::set_hlsl_force_storage_buffer_as_uav(uint32_t desc_set, uint3
|
|||
force_uav_buffer_bindings.insert(pair);
|
||||
}
|
||||
|
||||
bool CompilerHLSL::builtin_translates_to_nonarray(spv::BuiltIn builtin) const
|
||||
{
|
||||
return (builtin == BuiltInSampleMask);
|
||||
}
|
||||
|
||||
bool CompilerHLSL::is_user_type_structured(uint32_t id) const
|
||||
{
|
||||
if (hlsl_options.preserve_structured_buffers)
|
||||
|
|
|
@ -126,7 +126,7 @@ public:
|
|||
// By default, a readonly storage buffer will be declared as ByteAddressBuffer (SRV) instead.
|
||||
// Alternatively, use set_hlsl_force_storage_buffer_as_uav to specify individually.
|
||||
bool force_storage_buffer_as_uav = false;
|
||||
|
||||
|
||||
// Forces any storage image type marked as NonWritable to be considered an SRV instead.
|
||||
// For this to work with function call parameters, NonWritable must be considered to be part of the type system
|
||||
// so that NonWritable image arguments are also translated to Texture rather than RWTexture.
|
||||
|
@ -290,6 +290,8 @@ private:
|
|||
const char *to_storage_qualifiers_glsl(const SPIRVariable &var) override;
|
||||
void replace_illegal_names() override;
|
||||
|
||||
SPIRType::BaseType get_builtin_basetype(spv::BuiltIn builtin, SPIRType::BaseType default_type) override;
|
||||
|
||||
bool is_hlsl_force_storage_buffer_as_uav(ID id) const;
|
||||
|
||||
Options hlsl_options;
|
||||
|
@ -400,9 +402,6 @@ private:
|
|||
bool used = false;
|
||||
} base_vertex_info;
|
||||
|
||||
// Returns true for BuiltInSampleMask because gl_SampleMask[] is an array in SPIR-V, but SV_Coverage is a scalar in HLSL.
|
||||
bool builtin_translates_to_nonarray(spv::BuiltIn builtin) const override;
|
||||
|
||||
// Returns true if the specified ID has a UserTypeGOOGLE decoration for StructuredBuffer or RWStructuredBuffer resources.
|
||||
bool is_user_type_structured(uint32_t id) const override;
|
||||
|
||||
|
|
|
@ -1898,9 +1898,18 @@ void CompilerMSL::extract_global_variables_from_function(uint32_t func_id, std::
|
|||
case OpAtomicOr:
|
||||
case OpAtomicXor:
|
||||
case OpImageWrite:
|
||||
{
|
||||
if (needs_frag_discard_checks())
|
||||
added_arg_ids.insert(builtin_helper_invocation_id);
|
||||
uint32_t ptr = 0;
|
||||
if (op == OpAtomicStore || op == OpImageWrite)
|
||||
ptr = ops[0];
|
||||
else
|
||||
ptr = ops[2];
|
||||
if (global_var_ids.find(ptr) != global_var_ids.end())
|
||||
added_arg_ids.insert(ptr);
|
||||
break;
|
||||
}
|
||||
|
||||
// Emulate texture2D atomic operations
|
||||
case OpImageTexelPointer:
|
||||
|
@ -4902,9 +4911,18 @@ void CompilerMSL::emit_store_statement(uint32_t lhs_expression, uint32_t rhs_exp
|
|||
|
||||
bool transpose = lhs_e && lhs_e->need_transpose;
|
||||
|
||||
// No physical type remapping, and no packed type, so can just emit a store directly.
|
||||
if (!lhs_remapped_type && !lhs_packed_type)
|
||||
if (has_decoration(lhs_expression, DecorationBuiltIn) &&
|
||||
BuiltIn(get_decoration(lhs_expression, DecorationBuiltIn)) == BuiltInSampleMask &&
|
||||
type_is_top_level_array(type))
|
||||
{
|
||||
// Storing an array to SampleMask, have to remove the array-ness before storing.
|
||||
statement(to_expression(lhs_expression), " = ", to_enclosed_unpacked_expression(rhs_expression), "[0];");
|
||||
register_write(lhs_expression);
|
||||
}
|
||||
else if (!lhs_remapped_type && !lhs_packed_type)
|
||||
{
|
||||
// No physical type remapping, and no packed type, so can just emit a store directly.
|
||||
|
||||
// We might not be dealing with remapped physical types or packed types,
|
||||
// but we might be doing a clean store to a row-major matrix.
|
||||
// In this case, we just flip transpose states, and emit the store, a transpose must be in the RHS expression, if any.
|
||||
|
@ -7290,7 +7308,8 @@ void CompilerMSL::emit_custom_functions()
|
|||
end_scope_decl();
|
||||
statement("");
|
||||
|
||||
if (msl_options.runtime_array_rich_descriptor)
|
||||
if (msl_options.runtime_array_rich_descriptor &&
|
||||
spv_function_implementations.count(SPVFuncImplVariableSizedDescriptor) != 0)
|
||||
{
|
||||
statement("template<typename T>");
|
||||
statement("struct spvDescriptorArray<device T*>");
|
||||
|
@ -9365,6 +9384,7 @@ void CompilerMSL::emit_instruction(const Instruction &instruction)
|
|||
case OpRayQueryInitializeKHR:
|
||||
{
|
||||
flush_variable_declaration(ops[0]);
|
||||
register_write(ops[0]);
|
||||
add_spv_func_and_recompile(SPVFuncImplRayQueryIntersectionParams);
|
||||
|
||||
statement(to_expression(ops[0]), ".reset(", "ray(", to_expression(ops[4]), ", ", to_expression(ops[6]), ", ",
|
||||
|
@ -9375,6 +9395,7 @@ void CompilerMSL::emit_instruction(const Instruction &instruction)
|
|||
case OpRayQueryProceedKHR:
|
||||
{
|
||||
flush_variable_declaration(ops[0]);
|
||||
register_write(ops[2]);
|
||||
emit_op(ops[0], ops[1], join(to_expression(ops[2]), ".next()"), false);
|
||||
break;
|
||||
}
|
||||
|
@ -9435,14 +9456,17 @@ void CompilerMSL::emit_instruction(const Instruction &instruction)
|
|||
}
|
||||
case OpRayQueryConfirmIntersectionKHR:
|
||||
flush_variable_declaration(ops[0]);
|
||||
register_write(ops[0]);
|
||||
statement(to_expression(ops[0]), ".commit_triangle_intersection();");
|
||||
break;
|
||||
case OpRayQueryGenerateIntersectionKHR:
|
||||
flush_variable_declaration(ops[0]);
|
||||
register_write(ops[0]);
|
||||
statement(to_expression(ops[0]), ".commit_bounding_box_intersection(", to_expression(ops[1]), ");");
|
||||
break;
|
||||
case OpRayQueryTerminateKHR:
|
||||
flush_variable_declaration(ops[0]);
|
||||
register_write(ops[0]);
|
||||
statement(to_expression(ops[0]), ".abort();");
|
||||
break;
|
||||
#undef MSL_RAY_QUERY_GET_OP
|
||||
|
@ -13247,8 +13271,13 @@ void CompilerMSL::entry_point_args_discrete_descriptors(string &ep_args)
|
|||
{
|
||||
if (!ep_args.empty())
|
||||
ep_args += ", ";
|
||||
ep_args +=
|
||||
get_argument_address_space(var) + " " + type_to_glsl(type) + "& " + to_restrict(var_id, true) + r.name;
|
||||
ep_args += get_argument_address_space(var) + " ";
|
||||
|
||||
if (recursive_inputs.count(type.self))
|
||||
ep_args += string("void* ") + to_restrict(var_id, true) + r.name + "_vp";
|
||||
else
|
||||
ep_args += type_to_glsl(type) + "& " + to_restrict(var_id, true) + r.name;
|
||||
|
||||
ep_args += " [[buffer(" + convert_to_string(r.index) + ")";
|
||||
if (interlocked_resources.count(var_id))
|
||||
ep_args += ", raster_order_group(0)";
|
||||
|
@ -13431,6 +13460,19 @@ void CompilerMSL::fix_up_shader_inputs_outputs()
|
|||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (msl_options.replace_recursive_inputs && type_contains_recursion(type) &&
|
||||
(var.storage == StorageClassUniform || var.storage == StorageClassUniformConstant ||
|
||||
var.storage == StorageClassPushConstant || var.storage == StorageClassStorageBuffer))
|
||||
{
|
||||
recursive_inputs.insert(type.self);
|
||||
entry_func.fixup_hooks_in.push_back([this, &type, &var, var_id]() {
|
||||
auto addr_space = get_argument_address_space(var);
|
||||
auto var_name = to_name(var_id);
|
||||
statement(addr_space, " auto& ", to_restrict(var_id, true), var_name,
|
||||
" = *(", addr_space, " ", type_to_glsl(type), "*)", var_name, "_vp;");
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Builtin variables
|
||||
|
@ -17145,6 +17187,7 @@ void CompilerMSL::cast_from_variable_load(uint32_t source_id, std::string &expr,
|
|||
case BuiltInInstanceIndex:
|
||||
case BuiltInBaseInstance:
|
||||
case BuiltInBaseVertex:
|
||||
case BuiltInSampleMask:
|
||||
expected_type = SPIRType::UInt;
|
||||
expected_width = 32;
|
||||
break;
|
||||
|
@ -17162,9 +17205,17 @@ void CompilerMSL::cast_from_variable_load(uint32_t source_id, std::string &expr,
|
|||
break;
|
||||
}
|
||||
|
||||
if (expected_type != expr_type.basetype)
|
||||
if (type_is_top_level_array(expr_type) && builtin == BuiltInSampleMask)
|
||||
{
|
||||
if (!expr_type.array.empty() && (builtin == BuiltInTessLevelInner || builtin == BuiltInTessLevelOuter))
|
||||
// Needs special handling.
|
||||
auto wrap_expr = join(type_to_glsl(expr_type), "({ ");
|
||||
wrap_expr += join(type_to_glsl(get<SPIRType>(expr_type.parent_type)), "(", expr, ")");
|
||||
wrap_expr += " })";
|
||||
expr = std::move(wrap_expr);
|
||||
}
|
||||
else if (expected_type != expr_type.basetype)
|
||||
{
|
||||
if (type_is_top_level_array(expr_type) && (builtin == BuiltInTessLevelInner || builtin == BuiltInTessLevelOuter))
|
||||
{
|
||||
// Triggers when loading TessLevel directly as an array.
|
||||
// Need explicit padding + cast.
|
||||
|
|
|
@ -505,6 +505,13 @@ public:
|
|||
// Note: Only Apple's GPU compiler takes advantage of the lack of coherency, so make sure to test on Apple GPUs if you disable this.
|
||||
bool readwrite_texture_fences = true;
|
||||
|
||||
// Metal 3.1 introduced a Metal regression bug which causes infinite recursion during
|
||||
// Metal's analysis of an entry point input structure that is itself recursive. Enabling
|
||||
// this option will replace the recursive input declaration with a alternate variable of
|
||||
// type void*, and then cast to the correct type at the top of the entry point function.
|
||||
// The bug has been reported to Apple, and will hopefully be fixed in future releases.
|
||||
bool replace_recursive_inputs = false;
|
||||
|
||||
bool is_ios() const
|
||||
{
|
||||
return platform == iOS;
|
||||
|
@ -1194,6 +1201,7 @@ protected:
|
|||
SmallVector<uint32_t> buffer_aliases_discrete;
|
||||
std::unordered_set<uint32_t> atomic_image_vars; // Emulate texture2D atomic operations
|
||||
std::unordered_set<uint32_t> pull_model_inputs;
|
||||
std::unordered_set<uint32_t> recursive_inputs;
|
||||
|
||||
SmallVector<SPIRVariable *> entry_point_bindings;
|
||||
|
||||
|
|
Loading…
Reference in New Issue