Updated spirv-cross.

This commit is contained in:
Бранимир Караџић 2023-12-22 18:15:52 -08:00
parent 1fc080a8f7
commit d7df6cf1b4
12 changed files with 565 additions and 391 deletions

View File

@ -678,6 +678,8 @@ struct CLIArguments
bool msl_sample_dref_lod_array_as_grad = false;
bool msl_runtime_array_rich_descriptor = false;
bool msl_replace_recursive_inputs = false;
bool msl_readwrite_texture_fences = true;
bool msl_agx_manual_cube_grad_fixup = false;
const char *msl_combined_sampler_suffix = nullptr;
bool glsl_emit_push_constant_as_ubo = false;
bool glsl_emit_ubo_as_plain_uniforms = false;
@ -958,6 +960,14 @@ static void print_help_msl()
"\t\tSome Metal devices have a bug where the level() argument to\n"
"\t\tdepth2d_array<T>::sample_compare() in a fragment shader is biased by some\n"
"\t\tunknown amount. This prevents the bias from being added.\n"
"\t[--msl-no-readwrite-texture-fences]:\n\t\tDo not insert fences before each read of a\n"
"\t\tread_write texture. MSL does not guarantee coherence between writes and later reads\n"
"\t\tof read_write textures. If you don't rely on this, you can disable this for a\n"
"\t\tpossible performance improvement.\n"
"\t[--msl-agx-manual-cube-grad-fixup]:\n\t\tManually transform cube texture gradients.\n"
"\t\tAll released Apple Silicon GPUs to date ignore one of the three partial derivatives\n"
"\t\tbased on the selected major axis, and expect the remaining derivatives to be\n"
"\t\tpartially transformed. This fixup gives correct results on Apple Silicon.\n"
"\t[--msl-combined-sampler-suffix <suffix>]:\n\t\tUses a custom suffix for combined samplers.\n");
// clang-format on
}
@ -1236,6 +1246,8 @@ static string compile_iteration(const CLIArguments &args, std::vector<uint32_t>
msl_opts.ios_support_base_vertex_instance = true;
msl_opts.runtime_array_rich_descriptor = args.msl_runtime_array_rich_descriptor;
msl_opts.replace_recursive_inputs = args.msl_replace_recursive_inputs;
msl_opts.readwrite_texture_fences = args.msl_readwrite_texture_fences;
msl_opts.agx_manual_cube_grad_fixup = args.msl_agx_manual_cube_grad_fixup;
msl_comp->set_msl_options(msl_opts);
for (auto &v : args.msl_discrete_descriptor_sets)
msl_comp->add_discrete_descriptor_set(v);
@ -1790,6 +1802,8 @@ static int main_inner(int argc, char *argv[])
cbs.add("--msl-check-discarded-frag-stores", [&args](CLIParser &) { args.msl_check_discarded_frag_stores = true; });
cbs.add("--msl-sample-dref-lod-array-as-grad",
[&args](CLIParser &) { args.msl_sample_dref_lod_array_as_grad = true; });
cbs.add("--msl-no-readwrite-texture-fences", [&args](CLIParser &) { args.msl_readwrite_texture_fences = false; });
cbs.add("--msl-agx-manual-cube-grad-fixup", [&args](CLIParser &) { args.msl_agx_manual_cube_grad_fixup = true; });
cbs.add("--msl-combined-sampler-suffix", [&args](CLIParser &parser) {
args.msl_combined_sampler_suffix = parser.next_string();
});

View File

@ -548,6 +548,9 @@ struct SPIRType : IVariant
type = TypeType
};
spv::Op op = spv::Op::OpNop;
explicit SPIRType(spv::Op op_) : op(op_) {}
enum BaseType
{
Unknown,
@ -618,7 +621,7 @@ struct SPIRType : IVariant
uint32_t sampled;
spv::ImageFormat format;
spv::AccessQualifier access;
} image;
} image = {};
// Structs can be declared multiple times if they are used as part of interface blocks.
// We want to detect this so that we only emit the struct definition once.

View File

@ -627,15 +627,22 @@ bool Compiler::is_matrix(const SPIRType &type) const
bool Compiler::is_array(const SPIRType &type) const
{
return !type.array.empty();
return type.op == OpTypeArray || type.op == OpTypeRuntimeArray;
}
bool Compiler::is_pointer(const SPIRType &type) const
{
return type.op == OpTypePointer && type.basetype != SPIRType::Unknown; // Ignore function pointers.
}
bool Compiler::is_physical_pointer(const SPIRType &type) const
{
return type.op == OpTypePointer && type.storage == StorageClassPhysicalStorageBuffer;
}
bool Compiler::is_runtime_size_array(const SPIRType &type)
{
if (type.array.empty())
return false;
assert(type.array.size() == type.array_size_literal.size());
return type.array_size_literal.back() && type.array.back() == 0;
return type.op == OpTypeRuntimeArray;
}
ShaderResources Compiler::get_shader_resources() const
@ -2738,8 +2745,8 @@ void Compiler::CombinedImageSamplerHandler::register_combined_image_sampler(SPIR
auto ptr_type_id = id + 1;
auto combined_id = id + 2;
auto &base = compiler.expression_type(image_id);
auto &type = compiler.set<SPIRType>(type_id);
auto &ptr_type = compiler.set<SPIRType>(ptr_type_id);
auto &type = compiler.set<SPIRType>(type_id, OpTypeSampledImage);
auto &ptr_type = compiler.set<SPIRType>(ptr_type_id, OpTypePointer);
type = base;
type.self = type_id;
@ -2998,7 +3005,7 @@ bool Compiler::CombinedImageSamplerHandler::handle(Op opcode, const uint32_t *ar
{
// Have to invent the sampled image type.
sampled_type = compiler.ir.increase_bound_by(1);
auto &type = compiler.set<SPIRType>(sampled_type);
auto &type = compiler.set<SPIRType>(sampled_type, OpTypeSampledImage);
type = compiler.expression_type(args[2]);
type.self = sampled_type;
type.basetype = SPIRType::SampledImage;
@ -3017,7 +3024,7 @@ bool Compiler::CombinedImageSamplerHandler::handle(Op opcode, const uint32_t *ar
// Make a new type, pointer to OpTypeSampledImage, so we can make a variable of this type.
// We will probably have this type lying around, but it doesn't hurt to make duplicates for internal purposes.
auto &type = compiler.set<SPIRType>(type_id);
auto &type = compiler.set<SPIRType>(type_id, OpTypePointer);
auto &base = compiler.get<SPIRType>(sampled_type);
type = base;
type.pointer = true;
@ -3063,11 +3070,10 @@ VariableID Compiler::build_dummy_sampler_for_combined_images()
auto ptr_type_id = offset + 1;
auto var_id = offset + 2;
SPIRType sampler_type;
auto &sampler = set<SPIRType>(type_id);
auto &sampler = set<SPIRType>(type_id, OpTypeSampler);
sampler.basetype = SPIRType::Sampler;
auto &ptr_sampler = set<SPIRType>(ptr_type_id);
auto &ptr_sampler = set<SPIRType>(ptr_type_id, OpTypePointer);
ptr_sampler = sampler;
ptr_sampler.self = type_id;
ptr_sampler.storage = StorageClassUniformConstant;
@ -5497,7 +5503,7 @@ bool Compiler::type_contains_recursion(const SPIRType &type)
bool Compiler::type_is_array_of_pointers(const SPIRType &type) const
{
if (!type_is_top_level_array(type))
if (!is_array(type))
return false;
// BDA types must have parent type hierarchy.
@ -5506,45 +5512,10 @@ bool Compiler::type_is_array_of_pointers(const SPIRType &type) const
// Punch through all array layers.
auto *parent = &get<SPIRType>(type.parent_type);
while (type_is_top_level_array(*parent))
while (is_array(*parent))
parent = &get<SPIRType>(parent->parent_type);
return type_is_top_level_pointer(*parent);
}
bool Compiler::type_is_top_level_pointer(const SPIRType &type) const
{
if (!type.pointer)
return false;
// Function pointers, should not be hit by valid SPIR-V.
// Parent type will be SPIRFunction instead.
if (type.basetype == SPIRType::Unknown)
return false;
// Some types are synthesized in-place without complete type hierarchy and might not have parent types,
// but these types are never array-of-pointer or any complicated BDA type, infer reasonable defaults.
if (type.parent_type)
return type.pointer_depth > get<SPIRType>(type.parent_type).pointer_depth;
else
return true;
}
bool Compiler::type_is_top_level_physical_pointer(const SPIRType &type) const
{
return type_is_top_level_pointer(type) && type.storage == StorageClassPhysicalStorageBuffer;
}
bool Compiler::type_is_top_level_array(const SPIRType &type) const
{
if (type.array.empty())
return false;
// If we have pointer and array, we infer pointer-to-array as it's the only meaningful thing outside BDA.
if (type.parent_type)
return type.array.size() > get<SPIRType>(type.parent_type).array.size();
else
return !type.pointer;
return is_pointer(*parent);
}
bool Compiler::flush_phi_required(BlockID from, BlockID to) const

View File

@ -683,6 +683,8 @@ protected:
bool is_vector(const SPIRType &type) const;
bool is_matrix(const SPIRType &type) const;
bool is_array(const SPIRType &type) const;
bool is_pointer(const SPIRType &type) const;
bool is_physical_pointer(const SPIRType &type) const;
static bool is_runtime_size_array(const SPIRType &type);
uint32_t expression_type_id(uint32_t id) const;
const SPIRType &expression_type(uint32_t id) const;
@ -1148,9 +1150,6 @@ protected:
bool check_internal_recursion(const SPIRType &type, std::unordered_set<uint32_t> &checked_ids);
bool type_contains_recursion(const SPIRType &type);
bool type_is_array_of_pointers(const SPIRType &type) const;
bool type_is_top_level_physical_pointer(const SPIRType &type) const;
bool type_is_top_level_pointer(const SPIRType &type) const;
bool type_is_top_level_array(const SPIRType &type) const;
bool type_is_block_like(const SPIRType &type) const;
bool type_is_top_level_block(const SPIRType &type) const;
bool type_is_opaque_value(const SPIRType &type) const;

View File

@ -742,6 +742,18 @@ spvc_result spvc_compiler_options_set_uint(spvc_compiler_options options, spvc_c
case SPVC_COMPILER_OPTION_MSL_SAMPLE_DREF_LOD_ARRAY_AS_GRAD:
options->msl.sample_dref_lod_array_as_grad = value != 0;
break;
case SPVC_COMPILER_OPTION_MSL_READWRITE_TEXTURE_FENCES:
options->msl.readwrite_texture_fences = value != 0;
break;
case SPVC_COMPILER_OPTION_MSL_REPLACE_RECURSIVE_INPUTS:
options->msl.replace_recursive_inputs = value != 0;
break;
case SPVC_COMPILER_OPTION_MSL_AGX_MANUAL_CUBE_GRAD_FIXUP:
options->msl.agx_manual_cube_grad_fixup = value != 0;
break;
#endif
default:

View File

@ -40,7 +40,7 @@ extern "C" {
/* Bumped if ABI or API breaks backwards compatibility. */
#define SPVC_C_API_VERSION_MAJOR 0
/* Bumped if APIs or enumerations are added in a backwards compatible way. */
#define SPVC_C_API_VERSION_MINOR 57
#define SPVC_C_API_VERSION_MINOR 58
/* Bumped if internal implementation details change. */
#define SPVC_C_API_VERSION_PATCH 0
@ -725,6 +725,9 @@ typedef enum spvc_compiler_option
SPVC_COMPILER_OPTION_MSL_ARGUMENT_BUFFERS_TIER = 84 | SPVC_COMPILER_OPTION_MSL_BIT,
SPVC_COMPILER_OPTION_MSL_SAMPLE_DREF_LOD_ARRAY_AS_GRAD = 85 | SPVC_COMPILER_OPTION_MSL_BIT,
SPVC_COMPILER_OPTION_MSL_READWRITE_TEXTURE_FENCES = 86 | SPVC_COMPILER_OPTION_MSL_BIT,
SPVC_COMPILER_OPTION_MSL_REPLACE_RECURSIVE_INPUTS = 87 | SPVC_COMPILER_OPTION_MSL_BIT,
SPVC_COMPILER_OPTION_MSL_AGX_MANUAL_CUBE_GRAD_FIXUP = 88 | SPVC_COMPILER_OPTION_MSL_BIT,
SPVC_COMPILER_OPTION_INT_MAX = 0x7fffffff
} spvc_compiler_option;

View File

@ -223,7 +223,7 @@ static const char *to_pls_layout(PlsFormat format)
}
}
static SPIRType::BaseType pls_format_to_basetype(PlsFormat format)
static std::pair<spv::Op, SPIRType::BaseType> pls_format_to_basetype(PlsFormat format)
{
switch (format)
{
@ -234,17 +234,17 @@ static SPIRType::BaseType pls_format_to_basetype(PlsFormat format)
case PlsRGB10A2:
case PlsRGBA8:
case PlsRG16:
return SPIRType::Float;
return std::make_pair(spv::OpTypeFloat, SPIRType::Float);
case PlsRGBA8I:
case PlsRG16I:
return SPIRType::Int;
return std::make_pair(spv::OpTypeInt, SPIRType::Int);
case PlsRGB10A2UI:
case PlsRGBA8UI:
case PlsRG16UI:
case PlsR32UI:
return SPIRType::UInt;
return std::make_pair(spv::OpTypeInt, SPIRType::UInt);
}
}
@ -1529,7 +1529,7 @@ uint32_t CompilerGLSL::type_to_packed_alignment(const SPIRType &type, const Bits
{
// If using PhysicalStorageBufferEXT storage class, this is a pointer,
// and is 64-bit.
if (type_is_top_level_physical_pointer(type))
if (is_physical_pointer(type))
{
if (!type.pointer)
SPIRV_CROSS_THROW("Types in PhysicalStorageBufferEXT must be pointers.");
@ -1544,7 +1544,7 @@ uint32_t CompilerGLSL::type_to_packed_alignment(const SPIRType &type, const Bits
else
SPIRV_CROSS_THROW("AddressingModelPhysicalStorageBuffer64EXT must be used for PhysicalStorageBufferEXT.");
}
else if (type_is_top_level_array(type))
else if (is_array(type))
{
uint32_t minimum_alignment = 1;
if (packing_is_vec4_padded(packing))
@ -1652,7 +1652,7 @@ uint32_t CompilerGLSL::type_to_packed_size(const SPIRType &type, const Bitset &f
{
// If using PhysicalStorageBufferEXT storage class, this is a pointer,
// and is 64-bit.
if (type_is_top_level_physical_pointer(type))
if (is_physical_pointer(type))
{
if (!type.pointer)
SPIRV_CROSS_THROW("Types in PhysicalStorageBufferEXT must be pointers.");
@ -1662,7 +1662,7 @@ uint32_t CompilerGLSL::type_to_packed_size(const SPIRType &type, const Bitset &f
else
SPIRV_CROSS_THROW("AddressingModelPhysicalStorageBuffer64EXT must be used for PhysicalStorageBufferEXT.");
}
else if (type_is_top_level_array(type))
else if (is_array(type))
{
uint32_t packed_size = to_array_size_literal(type) * type_to_packed_array_stride(type, flags, packing);
@ -1840,7 +1840,7 @@ bool CompilerGLSL::buffer_is_packing_standard(const SPIRType &type, BufferPackin
}
// Verify array stride rules.
if (type_is_top_level_array(memb_type) &&
if (is_array(memb_type) &&
type_to_packed_array_stride(memb_type, member_flags, packing) !=
type_struct_member_array_stride(type, i))
{
@ -2489,7 +2489,7 @@ void CompilerGLSL::emit_buffer_block_flattened(const SPIRVariable &var)
SPIRType::BaseType basic_type;
if (get_common_basic_type(type, basic_type))
{
SPIRType tmp;
SPIRType tmp { OpTypeVector };
tmp.basetype = basic_type;
tmp.vecsize = 4;
if (basic_type != SPIRType::Float && basic_type != SPIRType::Int && basic_type != SPIRType::UInt)
@ -3926,6 +3926,7 @@ void CompilerGLSL::emit_output_variable_initializer(const SPIRVariable &var)
auto &member_type = get<SPIRType>(member_type_id);
auto array_type = member_type;
array_type.parent_type = member_type_id;
array_type.op = OpTypeArray;
array_type.array.push_back(array_size);
array_type.array_size_literal.push_back(true);
@ -3949,10 +3950,9 @@ void CompilerGLSL::emit_output_variable_initializer(const SPIRVariable &var)
if (is_control_point)
{
uint32_t ids = ir.increase_bound_by(3);
SPIRType uint_type;
auto &uint_type = set<SPIRType>(ids, OpTypeInt);
uint_type.basetype = SPIRType::UInt;
uint_type.width = 32;
set<SPIRType>(ids, uint_type);
set<SPIRExpression>(ids + 1, builtin_to_glsl(BuiltInInvocationId, StorageClassInput), ids, true);
set<SPIRConstant>(ids + 2, ids, i, false);
invocation_id = ids + 1;
@ -5148,7 +5148,7 @@ string CompilerGLSL::to_rerolled_array_expression(const SPIRType &parent_type,
type.basetype == SPIRType::Boolean &&
backend.boolean_in_struct_remapped_type != SPIRType::Boolean;
SPIRType tmp_type;
SPIRType tmp_type { OpNop };
if (remapped_boolean)
{
tmp_type = get<SPIRType>(type.parent_type);
@ -5169,7 +5169,7 @@ string CompilerGLSL::to_rerolled_array_expression(const SPIRType &parent_type,
for (uint32_t i = 0; i < size; i++)
{
auto subexpr = join(base_expr, "[", convert_to_string(i), "]");
if (!type_is_top_level_array(parent))
if (!is_array(parent))
{
if (remapped_boolean)
subexpr = join(type_to_glsl(tmp_type), "(", subexpr, ")");
@ -5195,7 +5195,7 @@ string CompilerGLSL::to_composite_constructor_expression(const SPIRType &parent_
type.basetype == SPIRType::Boolean &&
backend.boolean_in_struct_remapped_type != SPIRType::Boolean;
if (type_is_top_level_array(type))
if (is_array(type))
{
reroll_array = !backend.array_is_value_type ||
(block_like_type && !backend.array_is_value_type_in_buffer_blocks);
@ -5748,7 +5748,7 @@ string CompilerGLSL::constant_expression(const SPIRConstant &c,
{
auto &type = get<SPIRType>(c.constant_type);
if (type_is_top_level_pointer(type))
if (is_pointer(type))
{
return backend.null_pointer_literal;
}
@ -5763,21 +5763,21 @@ string CompilerGLSL::constant_expression(const SPIRConstant &c,
// with Offset = 0, using no ArrayStride on the enclosed array type.
// A particular CTS test hits this scenario.
bool array_type_decays = inside_block_like_struct_scope &&
type_is_top_level_array(type) &&
is_array(type) &&
!backend.array_is_value_type_in_buffer_blocks;
// Allow Metal to use the array<T> template to make arrays a value type
bool needs_trailing_tracket = false;
if (backend.use_initializer_list && backend.use_typed_initializer_list && type.basetype == SPIRType::Struct &&
!type_is_top_level_array(type))
!is_array(type))
{
res = type_to_glsl_constructor(type) + "{ ";
}
else if (backend.use_initializer_list && backend.use_typed_initializer_list && backend.array_is_value_type &&
type_is_top_level_array(type) && !array_type_decays)
is_array(type) && !array_type_decays)
{
const auto *p_type = &type;
SPIRType tmp_type;
SPIRType tmp_type { OpNop };
if (inside_struct_scope &&
backend.boolean_in_struct_remapped_type != SPIRType::Boolean &&
@ -5818,7 +5818,7 @@ string CompilerGLSL::constant_expression(const SPIRConstant &c,
res += to_name(elem);
else
{
if (!type_is_top_level_array(type) && type.basetype == SPIRType::Struct)
if (!is_array(type) && type.basetype == SPIRType::Struct)
{
// When we get down to emitting struct members, override the block-like information.
// For constants, we can freely mix and match block-like state.
@ -5916,7 +5916,7 @@ string CompilerGLSL::convert_half_to_string(const SPIRConstant &c, uint32_t col,
// of complicated workarounds, just value-cast to the half type always.
if (std::isnan(float_value) || std::isinf(float_value))
{
SPIRType type;
SPIRType type { OpTypeFloat };
type.basetype = SPIRType::Half;
type.vecsize = 1;
type.columns = 1;
@ -5932,7 +5932,7 @@ string CompilerGLSL::convert_half_to_string(const SPIRConstant &c, uint32_t col,
}
else
{
SPIRType type;
SPIRType type { OpTypeFloat };
type.basetype = SPIRType::Half;
type.vecsize = 1;
type.columns = 1;
@ -5952,8 +5952,8 @@ string CompilerGLSL::convert_float_to_string(const SPIRConstant &c, uint32_t col
// Use special representation.
if (!is_legacy())
{
SPIRType out_type;
SPIRType in_type;
SPIRType out_type { OpTypeFloat };
SPIRType in_type { OpTypeInt };
out_type.basetype = SPIRType::Float;
in_type.basetype = SPIRType::UInt;
out_type.vecsize = 1;
@ -6022,8 +6022,8 @@ std::string CompilerGLSL::convert_double_to_string(const SPIRConstant &c, uint32
// Use special representation.
if (!is_legacy())
{
SPIRType out_type;
SPIRType in_type;
SPIRType out_type { OpTypeFloat };
SPIRType in_type { OpTypeInt };
out_type.basetype = SPIRType::Double;
in_type.basetype = SPIRType::UInt64;
out_type.vecsize = 1;
@ -6731,7 +6731,7 @@ SPIRType CompilerGLSL::binary_op_bitcast_helper(string &cast_op0, string &cast_o
// Create a fake type so we can bitcast to it.
// We only deal with regular arithmetic types here like int, uints and so on.
SPIRType expected_type;
SPIRType expected_type{type0.op};
expected_type.basetype = input_type;
expected_type.vecsize = type0.vecsize;
expected_type.columns = type0.columns;
@ -7085,7 +7085,9 @@ void CompilerGLSL::emit_bitfield_insert_op(uint32_t result_type, uint32_t result
auto op2_expr = to_unpacked_expression(op2);
auto op3_expr = to_unpacked_expression(op3);
SPIRType target_type;
assert(offset_count_type == SPIRType::UInt || offset_count_type == SPIRType::Int);
SPIRType target_type { OpTypeInt };
target_type.width = 32;
target_type.vecsize = 1;
target_type.basetype = offset_count_type;
@ -7876,7 +7878,7 @@ bool CompilerGLSL::expression_is_constant_null(uint32_t id) const
bool CompilerGLSL::expression_is_non_value_type_array(uint32_t ptr)
{
auto &type = expression_type(ptr);
if (!type_is_top_level_array(get_pointee_type(type)))
if (!is_array(get_pointee_type(type)))
return false;
if (!backend.array_is_value_type)
@ -9610,6 +9612,8 @@ string CompilerGLSL::builtin_to_glsl(BuiltIn builtin, StorageClass storage)
return "gl_TessLevelInner";
case BuiltInTessCoord:
return "gl_TessCoord";
case BuiltInPatchVertices:
return "gl_PatchVerticesIn";
case BuiltInFragCoord:
return "gl_FragCoord";
case BuiltInPointCoord:
@ -9912,16 +9916,21 @@ void CompilerGLSL::access_chain_internal_append_index(std::string &expr, uint32_
if (ptr_chain && access_chain_is_arrayed)
{
size_t split_pos = expr.find_last_of(']');
string expr_front = expr.substr(0, split_pos);
string expr_back = expr.substr(split_pos);
expr = expr_front + " + " + enclose_expression(idx_expr) + expr_back;
}
else
{
expr += "[";
expr += idx_expr;
expr += "]";
size_t enclose_split = expr.find_last_of(')');
// If we have already enclosed the expression, don't try to be clever, it will break.
if (split_pos > enclose_split || enclose_split == string::npos)
{
string expr_front = expr.substr(0, split_pos);
string expr_back = expr.substr(split_pos);
expr = expr_front + " + " + enclose_expression(idx_expr) + expr_back;
return;
}
}
expr += "[";
expr += idx_expr;
expr += "]";
}
bool CompilerGLSL::access_chain_needs_stage_io_builtin_translation(uint32_t)
@ -9956,6 +9965,7 @@ string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indice
// Start traversing type hierarchy at the proper non-pointer types,
// but keep type_id referencing the original pointer for use below.
uint32_t type_id = expression_type_id(base);
const auto *type = &get_pointee_type(type_id);
if (!backend.native_pointers)
{
@ -9965,13 +9975,10 @@ string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indice
// Wrapped buffer reference pointer types will need to poke into the internal "value" member before
// continuing the access chain.
if (should_dereference(base))
{
auto &type = get<SPIRType>(type_id);
expr = dereference_expression(type, expr);
}
expr = dereference_expression(get<SPIRType>(type_id), expr);
}
const auto *type = &get_pointee_type(type_id);
else if (should_dereference(base) && type->basetype != SPIRType::Struct && !ptr_chain)
expr = join("(", dereference_expression(*type, expr), ")");
bool access_chain_is_arrayed = expr.find_first_of('[') != string::npos;
bool row_major_matrix_needs_conversion = is_non_native_row_major_matrix(base);
@ -10012,9 +10019,21 @@ string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indice
index &= 0x7fffffffu;
}
// Pointer chains
bool ptr_chain_array_entry = ptr_chain && i == 0 && is_array(*type);
if (ptr_chain_array_entry)
{
// This is highly unusual code, since normally we'd use plain AccessChain, but it's still allowed.
// We are considered to have a pointer to array and one element shifts by one array at a time.
// If we use normal array indexing, we'll first decay to pointer, and lose the array-ness,
// so we have to take pointer to array explicitly.
if (!should_dereference(base))
expr = enclose_expression(address_of_expression(expr));
}
if (ptr_chain && i == 0)
{
// Pointer chains
// If we are flattening multidimensional arrays, only create opening bracket on first
// array index.
if (options.flatten_multidimensional_arrays)
@ -10059,6 +10078,12 @@ string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indice
}
access_chain_is_arrayed = true;
// Explicitly enclose the expression if this is one of the weird pointer-to-array cases.
// We don't want any future indexing to add to this array dereference.
// Enclosing the expression blocks that and avoids any shenanigans with operand priority.
if (ptr_chain_array_entry)
expr = join("(", expr, ")");
}
// Arrays
else if (!type->array.empty())
@ -15340,9 +15365,16 @@ string CompilerGLSL::pls_decl(const PlsRemap &var)
{
auto &variable = get<SPIRVariable>(var.id);
SPIRType type;
type.vecsize = pls_format_to_components(var.format);
type.basetype = pls_format_to_basetype(var.format);
auto op_and_basetype = pls_format_to_basetype(var.format);
SPIRType type { op_and_basetype.first };
type.basetype = op_and_basetype.second;
auto vecsize = pls_format_to_components(var.format);
if (vecsize > 1)
{
type.op = OpTypeVector;
type.vecsize = vecsize;
}
return join(to_pls_layout(var.format), to_pls_qualifiers_glsl(variable), type_to_glsl(type), " ",
to_name(variable.self));
@ -17653,7 +17685,7 @@ bool CompilerGLSL::unroll_array_to_complex_store(uint32_t target_id, uint32_t so
else
array_expr = to_expression(type.array.back());
SPIRType target_type;
SPIRType target_type { OpTypeInt };
target_type.basetype = SPIRType::Int;
statement("for (int i = 0; i < int(", array_expr, "); i++)");
@ -17718,7 +17750,7 @@ void CompilerGLSL::unroll_array_from_complex_load(uint32_t target_id, uint32_t s
statement(new_expr, "[i] = gl_in[i].", expr, ";");
else if (is_sample_mask)
{
SPIRType target_type;
SPIRType target_type { OpTypeInt };
target_type.basetype = SPIRType::Int;
statement(new_expr, "[i] = ", bitcast_expression(target_type, type.basetype, join(expr, "[i]")), ";");
}

View File

@ -2432,7 +2432,7 @@ void CompilerHLSL::analyze_meshlet_writes()
uint32_t op_ptr = op_type + 2;
uint32_t op_var = op_type + 3;
auto &type = set<SPIRType>(op_type);
auto &type = set<SPIRType>(op_type, OpTypeStruct);
type.basetype = SPIRType::Struct;
set_name(op_type, block_name);
set_decoration(op_type, DecorationBlock);
@ -4508,7 +4508,7 @@ void CompilerHLSL::read_access_chain(string *expr, const string &lhs, const SPIR
{
auto &type = get<SPIRType>(chain.basetype);
SPIRType target_type;
SPIRType target_type { is_scalar(type) ? OpTypeInt : type.op };
target_type.basetype = SPIRType::UInt;
target_type.vecsize = type.vecsize;
target_type.columns = type.columns;
@ -4755,7 +4755,7 @@ void CompilerHLSL::write_access_chain_array(const SPIRAccessChain &chain, uint32
uint32_t id = ir.increase_bound_by(2);
uint32_t int_type_id = id + 1;
SPIRType int_type;
SPIRType int_type { OpTypeInt };
int_type.basetype = SPIRType::Int;
int_type.width = 32;
set<SPIRType>(int_type_id, int_type);
@ -4843,7 +4843,7 @@ void CompilerHLSL::write_access_chain(const SPIRAccessChain &chain, uint32_t val
// Make sure we trigger a read of the constituents in the access chain.
track_expression_read(chain.self);
SPIRType target_type;
SPIRType target_type { is_scalar(type) ? OpTypeInt : type.op };
target_type.basetype = SPIRType::UInt;
target_type.vecsize = type.vecsize;
target_type.columns = type.columns;
@ -6583,14 +6583,14 @@ VariableID CompilerHLSL::remap_num_workgroups_builtin()
uint32_t block_pointer_type_id = offset + 2;
uint32_t variable_id = offset + 3;
SPIRType uint_type;
SPIRType uint_type { OpTypeVector };
uint_type.basetype = SPIRType::UInt;
uint_type.width = 32;
uint_type.vecsize = 3;
uint_type.columns = 1;
set<SPIRType>(uint_type_id, uint_type);
SPIRType block_type;
SPIRType block_type { OpTypeStruct };
block_type.basetype = SPIRType::Struct;
block_type.member_types.push_back(uint_type_id);
set<SPIRType>(block_type_id, block_type);

File diff suppressed because it is too large Load Diff

View File

@ -512,6 +512,13 @@ public:
// The bug has been reported to Apple, and will hopefully be fixed in future releases.
bool replace_recursive_inputs = false;
// If set, manual fixups of gradient vectors for cube texture lookups will be performed.
// All released Apple Silicon GPUs to date behave incorrectly when sampling a cube texture
// with explicit gradients. They will ignore one of the three partial derivatives based
// on the selected major axis, and expect the remaining derivatives to be partially
// transformed.
bool agx_manual_cube_grad_fixup = false;
bool is_ios() const
{
return platform == iOS;
@ -756,6 +763,7 @@ protected:
SPVFuncImplArrayOfArrayCopy6Dim = SPVFuncImplArrayCopyMultidimBase + 6,
SPVFuncImplTexelBufferCoords,
SPVFuncImplImage2DAtomicCoords, // Emulate texture2D atomic operations
SPVFuncImplGradientCube,
SPVFuncImplFMul,
SPVFuncImplFAdd,
SPVFuncImplFSub,
@ -849,9 +857,6 @@ protected:
std::string type_to_array_glsl(const SPIRType &type) override;
std::string constant_op_expression(const SPIRConstantOp &cop) override;
// Threadgroup arrays can't have a wrapper type
std::string variable_decl(const SPIRVariable &variable) override;
bool variable_decl_is_remapped_storage(const SPIRVariable &variable, spv::StorageClass storage) const override;
// GCC workaround of lambdas calling protected functions (for older GCC versions)
@ -1201,7 +1206,7 @@ protected:
std::unordered_set<uint32_t> buffers_requiring_array_length;
SmallVector<std::pair<uint32_t, uint32_t>> buffer_aliases_argument;
SmallVector<uint32_t> buffer_aliases_discrete;
std::unordered_set<uint32_t> atomic_image_vars; // Emulate texture2D atomic operations
std::unordered_set<uint32_t> atomic_image_vars_emulated; // Emulate texture2D atomic operations
std::unordered_set<uint32_t> pull_model_inputs;
std::unordered_set<uint32_t> recursive_inputs;
@ -1271,7 +1276,7 @@ protected:
CompilerMSL &compiler;
std::unordered_map<uint32_t, uint32_t> result_types;
std::unordered_map<uint32_t, uint32_t> image_pointers; // Emulate texture2D atomic operations
std::unordered_map<uint32_t, uint32_t> image_pointers_emulated; // Emulate texture2D atomic operations
bool suppress_missing_prototypes = false;
bool uses_atomics = false;
bool uses_image_write = false;

View File

@ -517,7 +517,7 @@ void Parser::parse(const Instruction &instruction)
case OpTypeVoid:
{
uint32_t id = ops[0];
auto &type = set<SPIRType>(id);
auto &type = set<SPIRType>(id, op);
type.basetype = SPIRType::Void;
break;
}
@ -525,7 +525,7 @@ void Parser::parse(const Instruction &instruction)
case OpTypeBool:
{
uint32_t id = ops[0];
auto &type = set<SPIRType>(id);
auto &type = set<SPIRType>(id, op);
type.basetype = SPIRType::Boolean;
type.width = 1;
break;
@ -535,7 +535,7 @@ void Parser::parse(const Instruction &instruction)
{
uint32_t id = ops[0];
uint32_t width = ops[1];
auto &type = set<SPIRType>(id);
auto &type = set<SPIRType>(id, op);
if (width == 64)
type.basetype = SPIRType::Double;
else if (width == 32)
@ -553,7 +553,7 @@ void Parser::parse(const Instruction &instruction)
uint32_t id = ops[0];
uint32_t width = ops[1];
bool signedness = ops[2] != 0;
auto &type = set<SPIRType>(id);
auto &type = set<SPIRType>(id, op);
type.basetype = signedness ? to_signed_basetype(width) : to_unsigned_basetype(width);
type.width = width;
break;
@ -568,9 +568,9 @@ void Parser::parse(const Instruction &instruction)
uint32_t vecsize = ops[2];
auto &base = get<SPIRType>(ops[1]);
auto &vecbase = set<SPIRType>(id);
auto &vecbase = set<SPIRType>(id, base);
vecbase = base;
vecbase.op = op;
vecbase.vecsize = vecsize;
vecbase.self = id;
vecbase.parent_type = ops[1];
@ -583,9 +583,9 @@ void Parser::parse(const Instruction &instruction)
uint32_t colcount = ops[2];
auto &base = get<SPIRType>(ops[1]);
auto &matrixbase = set<SPIRType>(id);
auto &matrixbase = set<SPIRType>(id, base);
matrixbase = base;
matrixbase.op = op;
matrixbase.columns = colcount;
matrixbase.self = id;
matrixbase.parent_type = ops[1];
@ -595,12 +595,11 @@ void Parser::parse(const Instruction &instruction)
case OpTypeArray:
{
uint32_t id = ops[0];
auto &arraybase = set<SPIRType>(id);
uint32_t tid = ops[1];
auto &base = get<SPIRType>(tid);
auto &arraybase = set<SPIRType>(id, base);
arraybase = base;
arraybase.op = op;
arraybase.parent_type = tid;
uint32_t cid = ops[2];
@ -615,7 +614,9 @@ void Parser::parse(const Instruction &instruction)
arraybase.array_size_literal.push_back(literal);
arraybase.array.push_back(literal ? c->scalar() : cid);
// Do NOT set arraybase.self!
// .self resolves down to non-array/non-pointer type.
arraybase.self = base.self;
break;
}
@ -624,25 +625,27 @@ void Parser::parse(const Instruction &instruction)
uint32_t id = ops[0];
auto &base = get<SPIRType>(ops[1]);
auto &arraybase = set<SPIRType>(id);
auto &arraybase = set<SPIRType>(id, base);
// We're copying type information into Array types, so we'll need a fixup for any physical pointer
// references.
if (base.forward_pointer)
forward_pointer_fixups.push_back({ id, ops[1] });
arraybase = base;
arraybase.op = op;
arraybase.array.push_back(0);
arraybase.array_size_literal.push_back(true);
arraybase.parent_type = ops[1];
// Do NOT set arraybase.self!
// .self resolves down to non-array/non-pointer type.
arraybase.self = base.self;
break;
}
case OpTypeImage:
{
uint32_t id = ops[0];
auto &type = set<SPIRType>(id);
auto &type = set<SPIRType>(id, op);
type.basetype = SPIRType::Image;
type.image.type = ops[1];
type.image.dim = static_cast<Dim>(ops[2]);
@ -659,7 +662,7 @@ void Parser::parse(const Instruction &instruction)
{
uint32_t id = ops[0];
uint32_t imagetype = ops[1];
auto &type = set<SPIRType>(id);
auto &type = set<SPIRType>(id, op);
type = get<SPIRType>(imagetype);
type.basetype = SPIRType::SampledImage;
type.self = id;
@ -669,7 +672,7 @@ void Parser::parse(const Instruction &instruction)
case OpTypeSampler:
{
uint32_t id = ops[0];
auto &type = set<SPIRType>(id);
auto &type = set<SPIRType>(id, op);
type.basetype = SPIRType::Sampler;
break;
}
@ -682,10 +685,13 @@ void Parser::parse(const Instruction &instruction)
// We won't be able to compile it, but we shouldn't crash when parsing.
// We should be able to reflect.
auto *base = maybe_get<SPIRType>(ops[2]);
auto &ptrbase = set<SPIRType>(id);
auto &ptrbase = set<SPIRType>(id, op);
if (base)
{
ptrbase = *base;
ptrbase.op = op;
}
ptrbase.pointer = true;
ptrbase.pointer_depth++;
@ -706,7 +712,7 @@ void Parser::parse(const Instruction &instruction)
case OpTypeForwardPointer:
{
uint32_t id = ops[0];
auto &ptrbase = set<SPIRType>(id);
auto &ptrbase = set<SPIRType>(id, op);
ptrbase.pointer = true;
ptrbase.pointer_depth++;
ptrbase.storage = static_cast<StorageClass>(ops[1]);
@ -721,7 +727,7 @@ void Parser::parse(const Instruction &instruction)
case OpTypeStruct:
{
uint32_t id = ops[0];
auto &type = set<SPIRType>(id);
auto &type = set<SPIRType>(id, op);
type.basetype = SPIRType::Struct;
for (uint32_t i = 1; i < length; i++)
type.member_types.push_back(ops[i]);
@ -770,7 +776,7 @@ void Parser::parse(const Instruction &instruction)
case OpTypeAccelerationStructureKHR:
{
uint32_t id = ops[0];
auto &type = set<SPIRType>(id);
auto &type = set<SPIRType>(id, op);
type.basetype = SPIRType::AccelerationStructure;
break;
}
@ -778,7 +784,7 @@ void Parser::parse(const Instruction &instruction)
case OpTypeRayQueryKHR:
{
uint32_t id = ops[0];
auto &type = set<SPIRType>(id);
auto &type = set<SPIRType>(id, op);
type.basetype = SPIRType::RayQuery;
break;
}
@ -1025,10 +1031,9 @@ void Parser::parse(const Instruction &instruction)
{
uint32_t ids = ir.increase_bound_by(2);
SPIRType type;
auto &type = set<SPIRType>(ids, OpTypeInt);
type.basetype = SPIRType::Int;
type.width = 32;
set<SPIRType>(ids, type);
auto &c = set<SPIRConstant>(ids + 1, ids);
current_block->condition = c.self;

View File

@ -291,7 +291,7 @@ static bool naturally_emit_type(const SPIRType &type)
bool CompilerReflection::type_is_reference(const SPIRType &type) const
{
// Physical pointers and arrays of physical pointers need to refer to the pointee's type.
return type_is_top_level_physical_pointer(type) ||
return is_physical_pointer(type) ||
(type_is_array_of_pointers(type) && type.storage == StorageClassPhysicalStorageBuffer);
}
@ -341,7 +341,7 @@ void CompilerReflection::emit_type(uint32_t type_id, bool &emitted_open_tag)
json_stream->emit_json_key_object("_" + std::to_string(type_id));
json_stream->emit_json_key_value("name", name);
if (type_is_top_level_physical_pointer(type))
if (is_physical_pointer(type))
{
json_stream->emit_json_key_value("type", "_" + std::to_string(type.parent_type));
json_stream->emit_json_key_value("physical_pointer", true);
@ -404,7 +404,7 @@ void CompilerReflection::emit_type_member(const SPIRType &type, uint32_t index)
void CompilerReflection::emit_type_array(const SPIRType &type)
{
if (!type_is_top_level_physical_pointer(type) && !type.array.empty())
if (!is_physical_pointer(type) && !type.array.empty())
{
json_stream->emit_json_key_array("array");
// Note that we emit the zeros here as a means of identifying
@ -444,7 +444,7 @@ void CompilerReflection::emit_type_member_qualifiers(const SPIRType &type, uint3
if (dec.decoration_flags.get(DecorationRowMajor))
json_stream->emit_json_key_value("row_major", true);
if (type_is_top_level_physical_pointer(membertype))
if (is_physical_pointer(membertype))
json_stream->emit_json_key_value("physical_pointer", true);
}
}