Updated spirv-cross.

This commit is contained in:
Бранимир Караџић 2023-01-14 18:27:49 -08:00
parent 2d52b5f9af
commit ce55c18a8d
12 changed files with 187 additions and 80 deletions

View File

@ -645,6 +645,7 @@ struct CLIArguments
bool msl_pad_fragment_output = false;
bool msl_domain_lower_left = false;
bool msl_argument_buffers = false;
uint32_t msl_argument_buffers_tier = 0; // Tier 1
bool msl_texture_buffer_native = false;
bool msl_framebuffer_fetch = false;
bool msl_invariant_float_math = false;
@ -856,8 +857,11 @@ static void print_help_msl()
"\t[--msl-pad-fragment-output]:\n\t\tAlways emit color outputs as 4-component variables.\n"
"\t\tIn Metal, the fragment shader must emit at least as many components as the render target format.\n"
"\t[--msl-domain-lower-left]:\n\t\tUse a lower-left tessellation domain.\n"
"\t[--msl-argument-buffers]:\n\t\tEmit Indirect Argument buffers instead of plain bindings.\n"
"\t[--msl-argument-buffers]:\n\t\tEmit Metal argument buffers instead of discrete resource bindings.\n"
"\t\tRequires MSL 2.0 to be enabled.\n"
"\t[--msl-argument-buffers-tier]:\n\t\tWhen using Metal argument buffers, indicate the Metal argument buffer tier level supported by the Metal platform.\n"
"\t\tUses same values as Metal MTLArgumentBuffersTier enumeration (0 = Tier1, 1 = Tier2).\n"
"\t\tSetting this value also enables msl-argument-buffers.\n"
"\t[--msl-texture-buffer-native]:\n\t\tEnable native support for texel buffers. Otherwise, it is emulated as a normal texture.\n"
"\t[--msl-framebuffer-fetch]:\n\t\tImplement subpass inputs with frame buffer fetch.\n"
"\t\tEmits [[color(N)]] inputs in fragment stage.\n"
@ -1190,6 +1194,7 @@ static string compile_iteration(const CLIArguments &args, std::vector<uint32_t>
msl_opts.pad_fragment_output_components = args.msl_pad_fragment_output;
msl_opts.tess_domain_origin_lower_left = args.msl_domain_lower_left;
msl_opts.argument_buffers = args.msl_argument_buffers;
msl_opts.argument_buffers_tier = static_cast<CompilerMSL::Options::ArgumentBuffersTier>(args.msl_argument_buffers_tier);
msl_opts.texture_buffer_native = args.msl_texture_buffer_native;
msl_opts.multiview = args.msl_multiview;
msl_opts.multiview_layered_rendering = args.msl_multiview_layered_rendering;
@ -1621,6 +1626,10 @@ static int main_inner(int argc, char *argv[])
cbs.add("--msl-pad-fragment-output", [&args](CLIParser &) { args.msl_pad_fragment_output = true; });
cbs.add("--msl-domain-lower-left", [&args](CLIParser &) { args.msl_domain_lower_left = true; });
cbs.add("--msl-argument-buffers", [&args](CLIParser &) { args.msl_argument_buffers = true; });
cbs.add("--msl-argument-buffer-tier", [&args](CLIParser &parser) {
args.msl_argument_buffers_tier = parser.next_uint();
args.msl_argument_buffers = true;
});
cbs.add("--msl-discrete-descriptor-set",
[&args](CLIParser &parser) { args.msl_discrete_descriptor_sets.push_back(parser.next_uint()); });
cbs.add("--msl-device-argument-buffer",

View File

@ -644,7 +644,8 @@ struct SPIRExtension : IVariant
SPV_AMD_shader_trinary_minmax,
SPV_AMD_gcn_shader,
NonSemanticDebugPrintf,
NonSemanticShaderDebugInfo
NonSemanticShaderDebugInfo,
NonSemanticGeneric
};
explicit SPIRExtension(Extension ext_)

View File

@ -1468,6 +1468,58 @@ bool Compiler::get_binary_offset_for_decoration(VariableID id, spv::Decoration d
return true;
}
bool Compiler::block_is_noop(const SPIRBlock &block) const
{
if (block.terminator != SPIRBlock::Direct)
return false;
auto &child = get<SPIRBlock>(block.next_block);
// If this block participates in PHI, the block isn't really noop.
for (auto &phi : block.phi_variables)
if (phi.parent == block.self || phi.parent == child.self)
return false;
for (auto &phi : child.phi_variables)
if (phi.parent == block.self)
return false;
// Verify all instructions have no semantic impact.
for (auto &i : block.ops)
{
auto op = static_cast<Op>(i.op);
switch (op)
{
// Non-Semantic instructions.
case OpLine:
case OpNoLine:
break;
case OpExtInst:
{
auto *ops = stream(i);
auto ext = get<SPIRExtension>(ops[2]).ext;
bool ext_is_nonsemantic_only =
ext == SPIRExtension::NonSemanticShaderDebugInfo ||
ext == SPIRExtension::SPV_debug_info ||
ext == SPIRExtension::NonSemanticGeneric;
if (!ext_is_nonsemantic_only)
return false;
break;
}
default:
return false;
}
}
return true;
}
bool Compiler::block_is_loop_candidate(const SPIRBlock &block, SPIRBlock::Method method) const
{
// Tried and failed.
@ -1525,7 +1577,7 @@ bool Compiler::block_is_loop_candidate(const SPIRBlock &block, SPIRBlock::Method
{
// Empty loop header that just sets up merge target
// and branches to loop body.
bool ret = block.terminator == SPIRBlock::Direct && block.merge == SPIRBlock::MergeLoop && block.ops.empty();
bool ret = block.terminator == SPIRBlock::Direct && block.merge == SPIRBlock::MergeLoop && block_is_noop(block);
if (!ret)
return false;
@ -1551,19 +1603,8 @@ bool Compiler::block_is_loop_candidate(const SPIRBlock &block, SPIRBlock::Method
ret = child.terminator == SPIRBlock::Select && child.merge == SPIRBlock::MergeNone &&
(positive_candidate || negative_candidate);
// If we have OpPhi which depends on branches which came from our own block,
// we need to flush phi variables in else block instead of a trivial break,
// so we cannot assume this is a for loop candidate.
if (ret)
{
for (auto &phi : block.phi_variables)
if (phi.parent == block.self || phi.parent == child.self)
return false;
for (auto &phi : child.phi_variables)
if (phi.parent == block.self)
return false;
auto *merge = maybe_get<SPIRBlock>(block.merge_block);
if (merge)
for (auto &phi : merge->phi_variables)
@ -1588,15 +1629,10 @@ bool Compiler::execution_is_noop(const SPIRBlock &from, const SPIRBlock &to) con
if (start->self == to.self)
return true;
if (!start->ops.empty())
if (!block_is_noop(*start))
return false;
auto &next = get<SPIRBlock>(start->next_block);
// Flushing phi variables does not count as noop.
for (auto &phi : next.phi_variables)
if (phi.parent == start->self)
return false;
start = &next;
}
}
@ -3213,8 +3249,8 @@ void Compiler::AnalyzeVariableScopeAccessHandler::notify_variable_access(uint32_
return;
// Access chains used in multiple blocks mean hoisting all the variables used to construct the access chain as not all backends can use pointers.
auto itr = access_chain_children.find(id);
if (itr != end(access_chain_children))
auto itr = rvalue_forward_children.find(id);
if (itr != end(rvalue_forward_children))
for (auto child_id : itr->second)
notify_variable_access(child_id, block);
@ -3322,14 +3358,14 @@ bool Compiler::AnalyzeVariableScopeAccessHandler::handle(spv::Op op, const uint3
if (var)
{
accessed_variables_to_block[var->self].insert(current_block->self);
access_chain_children[args[1]].insert(var->self);
rvalue_forward_children[args[1]].insert(var->self);
}
// args[2] might be another access chain we have to track use of.
for (uint32_t i = 2; i < length; i++)
{
notify_variable_access(args[i], current_block->self);
access_chain_children[args[1]].insert(args[i]);
rvalue_forward_children[args[1]].insert(args[i]);
}
// Also keep track of the access chain pointer itself.
@ -3411,6 +3447,12 @@ bool Compiler::AnalyzeVariableScopeAccessHandler::handle(spv::Op op, const uint3
// Might be an access chain we have to track use of.
notify_variable_access(args[2], current_block->self);
// If we're loading an opaque type we cannot lower it to a temporary,
// we must defer access of args[2] until it's used.
auto &type = compiler.get<SPIRType>(args[0]);
if (compiler.type_is_opaque_value(type))
rvalue_forward_children[args[1]].insert(args[2]);
break;
}

View File

@ -752,6 +752,7 @@ protected:
bool is_force_recompile = false;
bool is_force_recompile_forward_progress = false;
bool block_is_noop(const SPIRBlock &block) const;
bool block_is_loop_candidate(const SPIRBlock &block, SPIRBlock::Method method) const;
bool types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const;
@ -1015,7 +1016,8 @@ protected:
std::unordered_map<uint32_t, std::unordered_set<uint32_t>> partial_write_variables_to_block;
std::unordered_set<uint32_t> access_chain_expressions;
// Access chains used in multiple blocks mean hoisting all the variables used to construct the access chain as not all backends can use pointers.
std::unordered_map<uint32_t, std::unordered_set<uint32_t>> access_chain_children;
// This is also relevant when forwarding opaque objects since we cannot lower these to temporaries.
std::unordered_map<uint32_t, std::unordered_set<uint32_t>> rvalue_forward_children;
const SPIRBlock *current_block = nullptr;
};

View File

@ -479,6 +479,9 @@ spvc_result spvc_compiler_options_set_uint(spvc_compiler_options options, spvc_c
case SPVC_COMPILER_OPTION_RELAX_NAN_CHECKS:
options->glsl.relax_nan_checks = value != 0;
break;
case SPVC_COMPILER_OPTION_GLSL_ENABLE_ROW_MAJOR_LOAD_WORKAROUND:
options->glsl.enable_row_major_load_workaround = value != 0;
break;
#endif
#if SPIRV_CROSS_C_API_HLSL
@ -731,6 +734,10 @@ spvc_result spvc_compiler_options_set_uint(spvc_compiler_options options, spvc_c
case SPVC_COMPILER_OPTION_MSL_CHECK_DISCARDED_FRAG_STORES:
options->msl.check_discarded_frag_stores = value != 0;
break;
case SPVC_COMPILER_OPTION_MSL_ARGUMENT_BUFFERS_TIER:
options->msl.argument_buffers_tier = static_cast<CompilerMSL::Options::ArgumentBuffersTier>(value);
break;
#endif
default:

View File

@ -40,7 +40,7 @@ extern "C" {
/* Bumped if ABI or API breaks backwards compatibility. */
#define SPVC_C_API_VERSION_MAJOR 0
/* Bumped if APIs or enumerations are added in a backwards compatible way. */
#define SPVC_C_API_VERSION_MINOR 52
#define SPVC_C_API_VERSION_MINOR 54
/* Bumped if internal implementation details change. */
#define SPVC_C_API_VERSION_PATCH 0
@ -721,6 +721,10 @@ typedef enum spvc_compiler_option
SPVC_COMPILER_OPTION_MSL_MANUAL_HELPER_INVOCATION_UPDATES = 81 | SPVC_COMPILER_OPTION_MSL_BIT,
SPVC_COMPILER_OPTION_MSL_CHECK_DISCARDED_FRAG_STORES = 82 | SPVC_COMPILER_OPTION_MSL_BIT,
SPVC_COMPILER_OPTION_GLSL_ENABLE_ROW_MAJOR_LOAD_WORKAROUND = 83 | SPVC_COMPILER_OPTION_GLSL_BIT,
SPVC_COMPILER_OPTION_MSL_ARGUMENT_BUFFERS_TIER = 84 | SPVC_COMPILER_OPTION_MSL_BIT,
SPVC_COMPILER_OPTION_INT_MAX = 0x7fffffff
} spvc_compiler_option;

View File

@ -226,7 +226,7 @@ public:
}
template <typename U, size_t M>
SmallVector(const U (&init)[M]) SPIRV_CROSS_NOEXCEPT : SmallVector(init, init + M)
explicit SmallVector(const U (&init)[M]) SPIRV_CROSS_NOEXCEPT : SmallVector(init, init + M)
{
}

View File

@ -3181,7 +3181,6 @@ void CompilerGLSL::emit_declared_builtin_block(StorageClass storage, ExecutionMo
Bitset global_builtins;
const SPIRVariable *block_var = nullptr;
bool emitted_block = false;
bool builtin_array = false;
// Need to use declared size in the type.
// These variables might have been declared, but not statically used, so we haven't deduced their size yet.
@ -3305,7 +3304,6 @@ void CompilerGLSL::emit_declared_builtin_block(StorageClass storage, ExecutionMo
emitted_builtins = builtins;
emitted_block = true;
builtin_array = !type.array.empty();
block_var = &var;
});
@ -3404,12 +3402,23 @@ void CompilerGLSL::emit_declared_builtin_block(StorageClass storage, ExecutionMo
statement("float gl_CullDistance[", cull_distance_size, "];");
}
bool builtin_array = model == ExecutionModelTessellationControl ||
(model == ExecutionModelMeshEXT && storage == StorageClassOutput) ||
(model == ExecutionModelGeometry && storage == StorageClassInput) ||
(model == ExecutionModelTessellationEvaluation && storage == StorageClassInput);
if (builtin_array)
{
if (model == ExecutionModelTessellationControl && storage == StorageClassOutput)
end_scope_decl(join(to_name(block_var->self), "[", get_entry_point().output_vertices, "]"));
const char *instance_name;
if (model == ExecutionModelMeshEXT)
instance_name = "gl_MeshVerticesEXT"; // Per primitive is never synthesized.
else
end_scope_decl(join(to_name(block_var->self), "[]"));
instance_name = storage == StorageClassInput ? "gl_in" : "gl_out";
if (model == ExecutionModelTessellationControl && storage == StorageClassOutput)
end_scope_decl(join(instance_name, "[", get_entry_point().output_vertices, "]"));
else
end_scope_decl(join(instance_name, "[]"));
}
else
end_scope_decl();
@ -4362,8 +4371,18 @@ void CompilerGLSL::emit_extension_workarounds(spv::ExecutionModel model)
for (auto &type_id : workaround_ubo_load_overload_types)
{
auto &type = get<SPIRType>(type_id);
statement(type_to_glsl(type), " spvWorkaroundRowMajor(", type_to_glsl(type),
" wrap) { return wrap; }");
if (options.es && is_matrix(type))
{
// Need both variants.
// GLSL cannot overload on precision, so need to dispatch appropriately.
statement("highp ", type_to_glsl(type), " spvWorkaroundRowMajor(highp ", type_to_glsl(type), " wrap) { return wrap; }");
statement("mediump ", type_to_glsl(type), " spvWorkaroundRowMajorMP(mediump ", type_to_glsl(type), " wrap) { return wrap; }");
}
else
{
statement(type_to_glsl(type), " spvWorkaroundRowMajor(", type_to_glsl(type), " wrap) { return wrap; }");
}
}
statement("");
}
@ -4955,9 +4974,9 @@ SmallVector<ConstantID> CompilerGLSL::get_composite_constant_ids(ConstantID cons
if (is_array(type) || type.basetype == SPIRType::Struct)
return constant->subconstants;
if (is_matrix(type))
return constant->m.id;
return SmallVector<ConstantID>(constant->m.id);
if (is_vector(type))
return constant->m.c[0].id;
return SmallVector<ConstantID>(constant->m.c[0].id);
SPIRV_CROSS_THROW("Unexpected scalar constant!");
}
if (!const_composite_insert_ids.count(const_id))
@ -7280,8 +7299,14 @@ std::string CompilerGLSL::to_texture_op(const Instruction &i, bool sparse, bool
args.grad_x = grad_x;
args.grad_y = grad_y;
args.lod = lod;
args.coffset = coffset;
args.offset = offset;
if (coffsets)
args.offset = coffsets;
else if (coffset)
args.offset = coffset;
else
args.offset = offset;
args.bias = bias;
args.component = comp;
args.sample = sample;
@ -7673,13 +7698,7 @@ string CompilerGLSL::to_function_args(const TextureFunctionArguments &args, bool
farg_str += ", 0";
}
if (args.coffset)
{
forward = forward && should_forward(args.coffset);
farg_str += ", ";
farg_str += bitcast_expression(SPIRType::Int, args.coffset);
}
else if (args.offset)
if (args.offset)
{
forward = forward && should_forward(args.offset);
farg_str += ", ";
@ -10082,7 +10101,7 @@ bool CompilerGLSL::should_dereference(uint32_t id)
// same type. Can't check type.self, because for some reason that's
// usually the base type with pointers stripped off. This check is
// complex enough that I've hoisted it out of the while condition.
if (src_type.pointer != type.pointer || src_type.pointer_depth != type.pointer ||
if (src_type.pointer != type.pointer || src_type.pointer_depth != type.pointer_depth ||
src_type.parent_type != type.parent_type)
break;
if ((var = maybe_get<SPIRVariable>(expr->loaded_from)))
@ -13310,7 +13329,8 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
emit_spv_amd_gcn_shader_op(ops[0], ops[1], ops[3], &ops[4], length - 4);
}
else if (ext == SPIRExtension::SPV_debug_info ||
ext == SPIRExtension::NonSemanticShaderDebugInfo)
ext == SPIRExtension::NonSemanticShaderDebugInfo ||
ext == SPIRExtension::NonSemanticGeneric)
{
break; // Ignore SPIR-V debug information extended instructions.
}
@ -17335,6 +17355,7 @@ void CompilerGLSL::rewrite_load_for_wrapped_row_major(std::string &expr, TypeID
auto *type = &get<SPIRType>(loaded_type);
bool rewrite = false;
bool relaxed = options.es;
if (is_matrix(*type))
{
@ -17345,24 +17366,31 @@ void CompilerGLSL::rewrite_load_for_wrapped_row_major(std::string &expr, TypeID
// If an access chain occurred, the workaround is not required, so loading vectors or scalars don't need workaround.
type = &backing_type;
}
else
{
// If we're loading a composite, we don't have overloads like these.
relaxed = false;
}
if (type->basetype == SPIRType::Struct)
{
// If we're loading a struct where any member is a row-major matrix, apply the workaround.
for (uint32_t i = 0; i < uint32_t(type->member_types.size()); i++)
{
if (combined_decoration_for_member(*type, i).get(DecorationRowMajor))
{
auto decorations = combined_decoration_for_member(*type, i);
if (decorations.get(DecorationRowMajor))
rewrite = true;
break;
}
// Since we decide on a per-struct basis, only use mediump wrapper if all candidates are mediump.
if (!decorations.get(DecorationRelaxedPrecision))
relaxed = false;
}
}
if (rewrite)
{
request_workaround_wrapper_overload(loaded_type);
expr = join("spvWorkaroundRowMajor(", expr, ")");
expr = join("spvWorkaroundRowMajor", (relaxed ? "MP" : ""), "(", expr, ")");
}
}

View File

@ -448,7 +448,7 @@ protected:
TextureFunctionArguments() = default;
TextureFunctionBaseArguments base;
uint32_t coord = 0, coord_components = 0, dref = 0;
uint32_t grad_x = 0, grad_y = 0, lod = 0, coffset = 0, offset = 0;
uint32_t grad_x = 0, grad_y = 0, lod = 0, offset = 0;
uint32_t bias = 0, component = 0, sample = 0, sparse_texel = 0, min_lod = 0;
bool nonuniform_expression = false;
};

View File

@ -10701,25 +10701,24 @@ string CompilerMSL::to_function_args(const TextureFunctionArguments &args, bool
break;
}
if (args.base.is_fetch && (args.offset || args.coffset))
if (args.base.is_fetch && args.offset)
{
uint32_t offset_expr = args.offset ? args.offset : args.coffset;
// Fetch offsets must be applied directly to the coordinate.
forward = forward && should_forward(offset_expr);
auto &type = expression_type(offset_expr);
forward = forward && should_forward(args.offset);
auto &type = expression_type(args.offset);
if (imgtype.image.dim == Dim1D && msl_options.texture_1D_as_2D)
{
if (type.basetype != SPIRType::UInt)
tex_coords += join(" + uint2(", bitcast_expression(SPIRType::UInt, offset_expr), ", 0)");
tex_coords += join(" + uint2(", bitcast_expression(SPIRType::UInt, args.offset), ", 0)");
else
tex_coords += join(" + uint2(", to_enclosed_expression(offset_expr), ", 0)");
tex_coords += join(" + uint2(", to_enclosed_expression(args.offset), ", 0)");
}
else
{
if (type.basetype != SPIRType::UInt)
tex_coords += " + " + bitcast_expression(SPIRType::UInt, offset_expr);
tex_coords += " + " + bitcast_expression(SPIRType::UInt, args.offset);
else
tex_coords += " + " + to_enclosed_expression(offset_expr);
tex_coords += " + " + to_enclosed_expression(args.offset);
}
}
@ -10923,13 +10922,7 @@ string CompilerMSL::to_function_args(const TextureFunctionArguments &args, bool
// Add offsets
string offset_expr;
const SPIRType *offset_type = nullptr;
if (args.coffset && !args.base.is_fetch)
{
forward = forward && should_forward(args.coffset);
offset_expr = to_expression(args.coffset);
offset_type = &expression_type(args.coffset);
}
else if (args.offset && !args.base.is_fetch)
if (args.offset && !args.base.is_fetch)
{
forward = forward && should_forward(args.offset);
offset_expr = to_expression(args.offset);
@ -11608,11 +11601,14 @@ string CompilerMSL::to_struct_member(const SPIRType &type, uint32_t member_type_
}
}
// Very specifically, image load-store in argument buffers are disallowed on MSL on iOS.
if (msl_options.is_ios() && physical_type.basetype == SPIRType::Image && physical_type.image.sampled == 2)
// iOS Tier 1 argument buffers do not support writable images.
if (physical_type.basetype == SPIRType::Image &&
physical_type.image.sampled == 2 &&
msl_options.is_ios() &&
msl_options.argument_buffers_tier <= Options::ArgumentBuffersTier::Tier1 &&
!has_decoration(orig_id, DecorationNonWritable))
{
if (!has_decoration(orig_id, DecorationNonWritable))
SPIRV_CROSS_THROW("Writable images are not allowed in argument buffers on iOS.");
SPIRV_CROSS_THROW("Writable images are not allowed on Tier1 argument buffers on iOS.");
}
// Array information is baked into these types.
@ -16953,13 +16949,14 @@ bool CompilerMSL::descriptor_set_is_argument_buffer(uint32_t desc_set) const
bool CompilerMSL::is_supported_argument_buffer_type(const SPIRType &type) const
{
// Very specifically, image load-store in argument buffers are disallowed on MSL on iOS.
// But we won't know when the argument buffer is encoded whether this image will have
// a NonWritable decoration. So just use discrete arguments for all storage images
// on iOS.
bool is_storage_image = type.basetype == SPIRType::Image && type.image.sampled == 2;
bool is_supported_type = !msl_options.is_ios() || !is_storage_image;
return !type_is_msl_framebuffer_fetch(type) && is_supported_type;
// iOS Tier 1 argument buffers do not support writable images.
// When the argument buffer is encoded, we don't know whether this image will have a
// NonWritable decoration, so just use discrete arguments for all storage images on iOS.
bool is_supported_type = !(type.basetype == SPIRType::Image &&
type.image.sampled == 2 &&
msl_options.is_ios() &&
msl_options.argument_buffers_tier <= Options::ArgumentBuffersTier::Tier1);
return is_supported_type && !type_is_msl_framebuffer_fetch(type);
}
void CompilerMSL::analyze_argument_buffers()

View File

@ -339,10 +339,25 @@ public:
bool dispatch_base = false;
bool texture_1D_as_2D = false;
// Enable use of MSL 2.0 indirect argument buffers.
// Enable use of Metal argument buffers.
// MSL 2.0 must also be enabled.
bool argument_buffers = false;
// Defines Metal argument buffer tier levels.
// Uses same values as Metal MTLArgumentBuffersTier enumeration.
enum class ArgumentBuffersTier
{
Tier1 = 0,
Tier2 = 1,
};
// When using Metal argument buffers, indicates the Metal argument buffer tier level supported by the Metal platform.
// Ignored when Options::argument_buffers is disabled.
// - Tier1 supports writable images on macOS, but not on iOS.
// - Tier2 supports writable images on macOS and iOS, and higher resource count limits.
// Tier capabilities based on recommendations from Apple engineering.
ArgumentBuffersTier argument_buffers_tier = ArgumentBuffersTier::Tier1;
// Ensures vertex and instance indices start at zero. This reflects the behavior of HLSL with SV_VertexID and SV_InstanceID.
bool enable_base_index_zero = false;

View File

@ -295,6 +295,8 @@ void Parser::parse(const Instruction &instruction)
spirv_ext = SPIRExtension::NonSemanticDebugPrintf;
else if (ext == "NonSemantic.Shader.DebugInfo.100")
spirv_ext = SPIRExtension::NonSemanticShaderDebugInfo;
else if (ext.find("NonSemantic.") == 0)
spirv_ext = SPIRExtension::NonSemanticGeneric;
set<SPIRExtension>(id, spirv_ext);
// Other SPIR-V extensions which have ExtInstrs are currently not supported.