Updated spirv-tools.

This commit is contained in:
Бранимир Караџић 2024-08-30 20:21:05 -07:00
parent cce563101d
commit 7cda7c988f
58 changed files with 1899 additions and 512 deletions

View File

@ -1 +1 @@
"v2024.2", "SPIRV-Tools v2024.2 v2024.2.rc1-28-gf2bbb12a"
"v2024.3", "SPIRV-Tools v2024.3 v2024.3-39-g45227aed"

View File

@ -1,5 +1,6 @@
static const spv::Capability pygen_variable_caps_Addresses[] = {spv::Capability::Addresses};
static const spv::Capability pygen_variable_caps_AddressesPhysicalStorageBufferAddresses[] = {spv::Capability::Addresses, spv::Capability::PhysicalStorageBufferAddresses};
static const spv::Capability pygen_variable_caps_AddressesUntypedPointersKHR[] = {spv::Capability::Addresses, spv::Capability::UntypedPointersKHR};
static const spv::Capability pygen_variable_caps_AddressesVariablePointersVariablePointersStorageBuffer[] = {spv::Capability::Addresses, spv::Capability::VariablePointers, spv::Capability::VariablePointersStorageBuffer};
static const spv::Capability pygen_variable_caps_AddressesVariablePointersVariablePointersStorageBufferPhysicalStorageBufferAddresses[] = {spv::Capability::Addresses, spv::Capability::VariablePointers, spv::Capability::VariablePointersStorageBuffer, spv::Capability::PhysicalStorageBufferAddresses};
static const spv::Capability pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL[] = {spv::Capability::ArbitraryPrecisionFixedPointINTEL};
@ -76,6 +77,7 @@ static const spv::Capability pygen_variable_caps_SubgroupAvcMotionEstimationINTE
static const spv::Capability pygen_variable_caps_SubgroupAvcMotionEstimationINTELSubgroupAvcMotionEstimationIntraINTEL[] = {spv::Capability::SubgroupAvcMotionEstimationINTEL, spv::Capability::SubgroupAvcMotionEstimationIntraINTEL};
static const spv::Capability pygen_variable_caps_SubgroupBallotKHR[] = {spv::Capability::SubgroupBallotKHR};
static const spv::Capability pygen_variable_caps_SubgroupBufferBlockIOINTEL[] = {spv::Capability::SubgroupBufferBlockIOINTEL};
static const spv::Capability pygen_variable_caps_SubgroupBufferPrefetchINTEL[] = {spv::Capability::SubgroupBufferPrefetchINTEL};
static const spv::Capability pygen_variable_caps_SubgroupDispatch[] = {spv::Capability::SubgroupDispatch};
static const spv::Capability pygen_variable_caps_SubgroupImageBlockIOINTEL[] = {spv::Capability::SubgroupImageBlockIOINTEL};
static const spv::Capability pygen_variable_caps_SubgroupImageMediaBlockIOINTEL[] = {spv::Capability::SubgroupImageMediaBlockIOINTEL};
@ -90,6 +92,7 @@ static const spv::Capability pygen_variable_caps_TileImageDepthReadAccessEXT[] =
static const spv::Capability pygen_variable_caps_TileImageStencilReadAccessEXT[] = {spv::Capability::TileImageStencilReadAccessEXT};
static const spv::Capability pygen_variable_caps_USMStorageClassesINTEL[] = {spv::Capability::USMStorageClassesINTEL};
static const spv::Capability pygen_variable_caps_UnstructuredLoopControlsINTEL[] = {spv::Capability::UnstructuredLoopControlsINTEL};
static const spv::Capability pygen_variable_caps_UntypedPointersKHR[] = {spv::Capability::UntypedPointersKHR};
static const spv::Capability pygen_variable_caps_VariableLengthArrayINTEL[] = {spv::Capability::VariableLengthArrayINTEL};
static const spv::Capability pygen_variable_caps_VectorComputeINTEL[] = {spv::Capability::VectorComputeINTEL};
@ -143,7 +146,7 @@ static const spv_opcode_desc_t kOpcodeTableEntries[] = {
{"TypeVoid", spv::Op::OpTypeVoid, 0, nullptr, 1, {SPV_OPERAND_TYPE_RESULT_ID}, 1, 0, 0, nullptr, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"TypeBool", spv::Op::OpTypeBool, 0, nullptr, 1, {SPV_OPERAND_TYPE_RESULT_ID}, 1, 0, 0, nullptr, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"TypeInt", spv::Op::OpTypeInt, 0, nullptr, 3, {SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 0, 0, nullptr, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"TypeFloat", spv::Op::OpTypeFloat, 0, nullptr, 2, {SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 0, 0, nullptr, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"TypeFloat", spv::Op::OpTypeFloat, 0, nullptr, 3, {SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_OPTIONAL_FPENCODING}, 1, 0, 0, nullptr, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"TypeVector", spv::Op::OpTypeVector, 0, nullptr, 3, {SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 0, 0, nullptr, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"TypeMatrix", spv::Op::OpTypeMatrix, 1, pygen_variable_caps_Matrix, 3, {SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 0, 0, nullptr, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"TypeImage", spv::Op::OpTypeImage, 0, nullptr, 9, {SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_DIMENSIONALITY, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_SAMPLER_IMAGE_FORMAT, SPV_OPERAND_TYPE_OPTIONAL_ACCESS_QUALIFIER}, 1, 0, 0, nullptr, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
@ -181,7 +184,7 @@ static const spv_opcode_desc_t kOpcodeTableEntries[] = {
{"Load", spv::Op::OpLoad, 0, nullptr, 4, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_OPTIONAL_MEMORY_ACCESS}, 1, 1, 0, nullptr, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"Store", spv::Op::OpStore, 0, nullptr, 3, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_OPTIONAL_MEMORY_ACCESS}, 0, 0, 0, nullptr, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"CopyMemory", spv::Op::OpCopyMemory, 0, nullptr, 4, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_OPTIONAL_MEMORY_ACCESS, SPV_OPERAND_TYPE_OPTIONAL_MEMORY_ACCESS}, 0, 0, 0, nullptr, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"CopyMemorySized", spv::Op::OpCopyMemorySized, 1, pygen_variable_caps_Addresses, 5, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_OPTIONAL_MEMORY_ACCESS, SPV_OPERAND_TYPE_OPTIONAL_MEMORY_ACCESS}, 0, 0, 0, nullptr, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"CopyMemorySized", spv::Op::OpCopyMemorySized, 2, pygen_variable_caps_AddressesUntypedPointersKHR, 5, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_OPTIONAL_MEMORY_ACCESS, SPV_OPERAND_TYPE_OPTIONAL_MEMORY_ACCESS}, 0, 0, 0, nullptr, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"AccessChain", spv::Op::OpAccessChain, 0, nullptr, 4, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_VARIABLE_ID}, 1, 1, 0, nullptr, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"InBoundsAccessChain", spv::Op::OpInBoundsAccessChain, 0, nullptr, 4, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_VARIABLE_ID}, 1, 1, 0, nullptr, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"PtrAccessChain", spv::Op::OpPtrAccessChain, 4, pygen_variable_caps_AddressesVariablePointersVariablePointersStorageBufferPhysicalStorageBufferAddresses, 5, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_VARIABLE_ID}, 1, 1, 0, nullptr, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
@ -472,8 +475,16 @@ static const spv_opcode_desc_t kOpcodeTableEntries[] = {
{"DepthAttachmentReadEXT", spv::Op::OpDepthAttachmentReadEXT, 1, pygen_variable_caps_TileImageDepthReadAccessEXT, 3, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_OPTIONAL_ID}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
{"StencilAttachmentReadEXT", spv::Op::OpStencilAttachmentReadEXT, 1, pygen_variable_caps_TileImageStencilReadAccessEXT, 3, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_OPTIONAL_ID}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
{"TerminateInvocation", spv::Op::OpTerminateInvocation, 1, pygen_variable_caps_Shader, 0, {}, 0, 0, 1, pygen_variable_exts_SPV_KHR_terminate_invocation, SPV_SPIRV_VERSION_WORD(1,6), 0xffffffffu},
{"TypeUntypedPointerKHR", spv::Op::OpTypeUntypedPointerKHR, 1, pygen_variable_caps_UntypedPointersKHR, 2, {SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_STORAGE_CLASS}, 1, 0, 0, nullptr, 0xffffffffu, 0xffffffffu},
{"UntypedVariableKHR", spv::Op::OpUntypedVariableKHR, 1, pygen_variable_caps_UntypedPointersKHR, 5, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_STORAGE_CLASS, SPV_OPERAND_TYPE_OPTIONAL_ID, SPV_OPERAND_TYPE_OPTIONAL_ID}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
{"UntypedAccessChainKHR", spv::Op::OpUntypedAccessChainKHR, 1, pygen_variable_caps_UntypedPointersKHR, 5, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_VARIABLE_ID}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
{"UntypedInBoundsAccessChainKHR", spv::Op::OpUntypedInBoundsAccessChainKHR, 1, pygen_variable_caps_UntypedPointersKHR, 5, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_VARIABLE_ID}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
{"SubgroupBallotKHR", spv::Op::OpSubgroupBallotKHR, 1, pygen_variable_caps_SubgroupBallotKHR, 3, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID}, 1, 1, 1, pygen_variable_exts_SPV_KHR_shader_ballot, 0xffffffffu, 0xffffffffu},
{"SubgroupFirstInvocationKHR", spv::Op::OpSubgroupFirstInvocationKHR, 1, pygen_variable_caps_SubgroupBallotKHR, 3, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID}, 1, 1, 1, pygen_variable_exts_SPV_KHR_shader_ballot, 0xffffffffu, 0xffffffffu},
{"UntypedPtrAccessChainKHR", spv::Op::OpUntypedPtrAccessChainKHR, 1, pygen_variable_caps_UntypedPointersKHR, 6, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_VARIABLE_ID}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
{"UntypedInBoundsPtrAccessChainKHR", spv::Op::OpUntypedInBoundsPtrAccessChainKHR, 1, pygen_variable_caps_UntypedPointersKHR, 6, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_VARIABLE_ID}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
{"UntypedArrayLengthKHR", spv::Op::OpUntypedArrayLengthKHR, 1, pygen_variable_caps_UntypedPointersKHR, 5, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
{"UntypedPrefetchKHR", spv::Op::OpUntypedPrefetchKHR, 1, pygen_variable_caps_UntypedPointersKHR, 5, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_OPTIONAL_ID, SPV_OPERAND_TYPE_OPTIONAL_ID, SPV_OPERAND_TYPE_OPTIONAL_ID}, 0, 0, 0, nullptr, 0xffffffffu, 0xffffffffu},
{"SubgroupAllKHR", spv::Op::OpSubgroupAllKHR, 1, pygen_variable_caps_SubgroupVoteKHR, 3, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID}, 1, 1, 1, pygen_variable_exts_SPV_KHR_subgroup_vote, 0xffffffffu, 0xffffffffu},
{"SubgroupAnyKHR", spv::Op::OpSubgroupAnyKHR, 1, pygen_variable_caps_SubgroupVoteKHR, 3, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID}, 1, 1, 1, pygen_variable_exts_SPV_KHR_subgroup_vote, 0xffffffffu, 0xffffffffu},
{"SubgroupAllEqualKHR", spv::Op::OpSubgroupAllEqualKHR, 1, pygen_variable_caps_SubgroupVoteKHR, 3, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID}, 1, 1, 1, pygen_variable_exts_SPV_KHR_subgroup_vote, 0xffffffffu, 0xffffffffu},
@ -851,6 +862,7 @@ static const spv_opcode_desc_t kOpcodeTableEntries[] = {
{"ConvertBF16ToFINTEL", spv::Op::OpConvertBF16ToFINTEL, 1, pygen_variable_caps_BFloat16ConversionINTEL, 3, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
{"ControlBarrierArriveINTEL", spv::Op::OpControlBarrierArriveINTEL, 1, pygen_variable_caps_SplitBarrierINTEL, 3, {SPV_OPERAND_TYPE_SCOPE_ID, SPV_OPERAND_TYPE_SCOPE_ID, SPV_OPERAND_TYPE_MEMORY_SEMANTICS_ID}, 0, 0, 0, nullptr, 0xffffffffu, 0xffffffffu},
{"ControlBarrierWaitINTEL", spv::Op::OpControlBarrierWaitINTEL, 1, pygen_variable_caps_SplitBarrierINTEL, 3, {SPV_OPERAND_TYPE_SCOPE_ID, SPV_OPERAND_TYPE_SCOPE_ID, SPV_OPERAND_TYPE_MEMORY_SEMANTICS_ID}, 0, 0, 0, nullptr, 0xffffffffu, 0xffffffffu},
{"SubgroupBlockPrefetchINTEL", spv::Op::OpSubgroupBlockPrefetchINTEL, 1, pygen_variable_caps_SubgroupBufferPrefetchINTEL, 3, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_OPTIONAL_MEMORY_ACCESS}, 0, 0, 0, nullptr, 0xffffffffu, 0xffffffffu},
{"GroupIMulKHR", spv::Op::OpGroupIMulKHR, 1, pygen_variable_caps_GroupUniformArithmeticKHR, 5, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_SCOPE_ID, SPV_OPERAND_TYPE_GROUP_OPERATION, SPV_OPERAND_TYPE_ID}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
{"GroupFMulKHR", spv::Op::OpGroupFMulKHR, 1, pygen_variable_caps_GroupUniformArithmeticKHR, 5, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_SCOPE_ID, SPV_OPERAND_TYPE_GROUP_OPERATION, SPV_OPERAND_TYPE_ID}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
{"GroupBitwiseAndKHR", spv::Op::OpGroupBitwiseAndKHR, 1, pygen_variable_caps_GroupUniformArithmeticKHR, 5, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_SCOPE_ID, SPV_OPERAND_TYPE_GROUP_OPERATION, SPV_OPERAND_TYPE_ID}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},

File diff suppressed because one or more lines are too long

View File

@ -20,6 +20,7 @@ kSPV_EXT_fragment_shader_interlock,
kSPV_EXT_mesh_shader,
kSPV_EXT_opacity_micromap,
kSPV_EXT_physical_storage_buffer,
kSPV_EXT_relaxed_printf_string_address_space,
kSPV_EXT_replicated_composites,
kSPV_EXT_shader_atomic_float16_add,
kSPV_EXT_shader_atomic_float_add,
@ -68,6 +69,7 @@ kSPV_INTEL_optnone,
kSPV_INTEL_runtime_aligned,
kSPV_INTEL_shader_integer_functions2,
kSPV_INTEL_split_barrier,
kSPV_INTEL_subgroup_buffer_prefetch,
kSPV_INTEL_subgroups,
kSPV_INTEL_unstructured_loop_controls,
kSPV_INTEL_usm_storage_classes,
@ -107,6 +109,7 @@ kSPV_KHR_subgroup_uniform_control_flow,
kSPV_KHR_subgroup_vote,
kSPV_KHR_terminate_invocation,
kSPV_KHR_uniform_group_instructions,
kSPV_KHR_untyped_pointers,
kSPV_KHR_variable_pointers,
kSPV_KHR_vulkan_memory_model,
kSPV_KHR_workgroup_memory_explicit_layout,

View File

@ -20,7 +20,7 @@
{19, "Tellusim", "Clay Shader Compiler", "Tellusim Clay Shader Compiler"},
{20, "W3C WebGPU Group", "WHLSL Shader Translator", "W3C WebGPU Group WHLSL Shader Translator"},
{21, "Google", "Clspv", "Google Clspv"},
{22, "Google", "MLIR SPIR-V Serializer", "Google MLIR SPIR-V Serializer"},
{22, "LLVM", "MLIR SPIR-V Serializer", "LLVM MLIR SPIR-V Serializer"},
{23, "Google", "Tint Compiler", "Google Tint Compiler"},
{24, "Google", "ANGLE Shader Compiler", "Google ANGLE Shader Compiler"},
{25, "Netease Games", "Messiah Shader Compiler", "Netease Games Messiah Shader Compiler"},
@ -41,4 +41,5 @@
{40, "NVIDIA", "Slang Compiler", "NVIDIA Slang Compiler"},
{41, "Zig Software Foundation", "Zig Compiler", "Zig Software Foundation Zig Compiler"},
{42, "Rendong Liang", "spq", "Rendong Liang spq"},
{43, "LLVM", "LLVM SPIR-V Backend", "LLVM LLVM SPIR-V Backend"},
{43, "LLVM", "LLVM SPIR-V Backend", "LLVM LLVM SPIR-V Backend"},
{44, "Robert Konrad", "Kongruent", "Robert Konrad Kongruent"},

View File

@ -6,7 +6,7 @@ static const spv_ext_inst_desc_t nonsemantic_vkspreflection_entries[] = {
{"StopCounter", 3, 0, nullptr, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_NONE}},
{"PushConstants", 4, 0, nullptr, {SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_STRING, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_NONE}},
{"SpecializationMapEntry", 5, 0, nullptr, {SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_NONE}},
{"DescriptorSetBuffer", 6, 0, nullptr, {SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_NONE}},
{"DescriptorSetBuffer", 6, 0, nullptr, {SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_NONE}},
{"DescriptorSetImage", 7, 0, nullptr, {SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_NONE}},
{"DescriptorSetSampler", 8, 0, nullptr, {SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_FLOAT, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_FLOAT, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_FLOAT, SPV_OPERAND_TYPE_LITERAL_FLOAT, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_NONE}}
};

View File

@ -211,6 +211,7 @@ static const spvtools::Extension pygen_variable_exts_SPV_INTEL_optnone[] = {spvt
static const spvtools::Extension pygen_variable_exts_SPV_INTEL_runtime_aligned[] = {spvtools::Extension::kSPV_INTEL_runtime_aligned};
static const spvtools::Extension pygen_variable_exts_SPV_INTEL_shader_integer_functions2[] = {spvtools::Extension::kSPV_INTEL_shader_integer_functions2};
static const spvtools::Extension pygen_variable_exts_SPV_INTEL_split_barrier[] = {spvtools::Extension::kSPV_INTEL_split_barrier};
static const spvtools::Extension pygen_variable_exts_SPV_INTEL_subgroup_buffer_prefetch[] = {spvtools::Extension::kSPV_INTEL_subgroup_buffer_prefetch};
static const spvtools::Extension pygen_variable_exts_SPV_INTEL_subgroups[] = {spvtools::Extension::kSPV_INTEL_subgroups};
static const spvtools::Extension pygen_variable_exts_SPV_INTEL_unstructured_loop_controls[] = {spvtools::Extension::kSPV_INTEL_unstructured_loop_controls};
static const spvtools::Extension pygen_variable_exts_SPV_INTEL_usm_storage_classes[] = {spvtools::Extension::kSPV_INTEL_usm_storage_classes};
@ -248,6 +249,7 @@ static const spvtools::Extension pygen_variable_exts_SPV_KHR_subgroup_rotate[] =
static const spvtools::Extension pygen_variable_exts_SPV_KHR_subgroup_uniform_control_flow[] = {spvtools::Extension::kSPV_KHR_subgroup_uniform_control_flow};
static const spvtools::Extension pygen_variable_exts_SPV_KHR_subgroup_vote[] = {spvtools::Extension::kSPV_KHR_subgroup_vote};
static const spvtools::Extension pygen_variable_exts_SPV_KHR_uniform_group_instructions[] = {spvtools::Extension::kSPV_KHR_uniform_group_instructions};
static const spvtools::Extension pygen_variable_exts_SPV_KHR_untyped_pointers[] = {spvtools::Extension::kSPV_KHR_untyped_pointers};
static const spvtools::Extension pygen_variable_exts_SPV_KHR_variable_pointers[] = {spvtools::Extension::kSPV_KHR_variable_pointers};
static const spvtools::Extension pygen_variable_exts_SPV_KHR_vulkan_memory_model[] = {spvtools::Extension::kSPV_KHR_vulkan_memory_model};
static const spvtools::Extension pygen_variable_exts_SPV_KHR_workgroup_memory_explicit_layout[] = {spvtools::Extension::kSPV_KHR_workgroup_memory_explicit_layout};
@ -725,7 +727,8 @@ static const spv_operand_desc_t pygen_variable_ImageChannelDataTypeEntries[] = {
{"UnormInt24", 15, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"UnormInt101010_2", 16, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"UnsignedIntRaw10EXT", 19, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"UnsignedIntRaw12EXT", 20, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu}
{"UnsignedIntRaw12EXT", 20, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"UnormInt2_101010EXT", 21, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu}
};
static const spv_operand_desc_t pygen_variable_FPRoundingModeEntries[] = {
@ -1215,6 +1218,7 @@ static const spv_operand_desc_t pygen_variable_CapabilityEntries[] = {
{"RoundingModeRTZ", 4468, 0, nullptr, 1, pygen_variable_exts_SPV_KHR_float_controls, {}, SPV_SPIRV_VERSION_WORD(1,4), 0xffffffffu},
{"RayQueryProvisionalKHR", 4471, 1, pygen_variable_caps_Shader, 1, pygen_variable_exts_SPV_KHR_ray_query, {}, 0xffffffffu, 0xffffffffu},
{"RayQueryKHR", 4472, 1, pygen_variable_caps_Shader, 1, pygen_variable_exts_SPV_KHR_ray_query, {}, 0xffffffffu, 0xffffffffu},
{"UntypedPointersKHR", 4473, 0, nullptr, 1, pygen_variable_exts_SPV_KHR_untyped_pointers, {}, 0xffffffffu, 0xffffffffu},
{"RayTraversalPrimitiveCullingKHR", 4478, 2, pygen_variable_caps_RayQueryKHRRayTracingKHR, 2, pygen_variable_exts_SPV_KHR_ray_querySPV_KHR_ray_tracing, {}, 0xffffffffu, 0xffffffffu},
{"RayTracingKHR", 4479, 1, pygen_variable_caps_Shader, 1, pygen_variable_exts_SPV_KHR_ray_tracing, {}, 0xffffffffu, 0xffffffffu},
{"TextureSampleWeightedQCOM", 4484, 0, nullptr, 1, pygen_variable_exts_SPV_QCOM_image_processing, {}, 0xffffffffu, 0xffffffffu},
@ -1368,6 +1372,7 @@ static const spv_operand_desc_t pygen_variable_CapabilityEntries[] = {
{"FPGAArgumentInterfacesINTEL", 6174, 0, nullptr, 1, pygen_variable_exts_SPV_INTEL_fpga_argument_interfaces, {}, 0xffffffffu, 0xffffffffu},
{"GlobalVariableHostAccessINTEL", 6187, 0, nullptr, 1, pygen_variable_exts_SPV_INTEL_global_variable_host_access, {}, 0xffffffffu, 0xffffffffu},
{"GlobalVariableFPGADecorationsINTEL", 6189, 0, nullptr, 1, pygen_variable_exts_SPV_INTEL_global_variable_fpga_decorations, {}, 0xffffffffu, 0xffffffffu},
{"SubgroupBufferPrefetchINTEL", 6220, 0, nullptr, 1, pygen_variable_exts_SPV_INTEL_subgroup_buffer_prefetch, {}, 0xffffffffu, 0xffffffffu},
{"GroupUniformArithmeticKHR", 6400, 0, nullptr, 1, pygen_variable_exts_SPV_KHR_uniform_group_instructions, {}, 0xffffffffu, 0xffffffffu},
{"MaskedGatherScatterINTEL", 6427, 0, nullptr, 1, pygen_variable_exts_SPV_INTEL_masked_gather_scatter, {}, 0xffffffffu, 0xffffffffu},
{"CacheControlsINTEL", 6441, 0, nullptr, 1, pygen_variable_exts_SPV_INTEL_cache_controls, {}, 0xffffffffu, 0xffffffffu},
@ -1441,6 +1446,10 @@ static const spv_operand_desc_t pygen_variable_NamedMaximumNumberOfRegistersEntr
{"AutoINTEL", 0, 1, pygen_variable_caps_RegisterLimitsINTEL, 0, nullptr, {}, 0xffffffffu, 0xffffffffu}
};
static const spv_operand_desc_t pygen_variable_FPEncodingEntries[] = {
{"place holder", 0, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(999,0), 0}
};
static const spv_operand_desc_t pygen_variable_DebugInfoFlagsEntries[] = {
{"None", 0x0000, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1, 0), 0xffffffffu},
{"FlagIsProtected", 0x01, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1, 0), 0xffffffffu},
@ -1609,6 +1618,7 @@ static const spv_operand_desc_group_t pygen_variable_OperandInfoTable[] = {
{SPV_OPERAND_TYPE_LOAD_CACHE_CONTROL, ARRAY_SIZE(pygen_variable_LoadCacheControlEntries), pygen_variable_LoadCacheControlEntries},
{SPV_OPERAND_TYPE_STORE_CACHE_CONTROL, ARRAY_SIZE(pygen_variable_StoreCacheControlEntries), pygen_variable_StoreCacheControlEntries},
{SPV_OPERAND_TYPE_NAMED_MAXIMUM_NUMBER_OF_REGISTERS, ARRAY_SIZE(pygen_variable_NamedMaximumNumberOfRegistersEntries), pygen_variable_NamedMaximumNumberOfRegistersEntries},
{SPV_OPERAND_TYPE_FPENCODING, ARRAY_SIZE(pygen_variable_FPEncodingEntries), pygen_variable_FPEncodingEntries},
{SPV_OPERAND_TYPE_DEBUG_INFO_FLAGS, ARRAY_SIZE(pygen_variable_DebugInfoFlagsEntries), pygen_variable_DebugInfoFlagsEntries},
{SPV_OPERAND_TYPE_DEBUG_BASE_TYPE_ATTRIBUTE_ENCODING, ARRAY_SIZE(pygen_variable_DebugBaseTypeAttributeEncodingEntries), pygen_variable_DebugBaseTypeAttributeEncodingEntries},
{SPV_OPERAND_TYPE_DEBUG_COMPOSITE_TYPE, ARRAY_SIZE(pygen_variable_DebugCompositeTypeEntries), pygen_variable_DebugCompositeTypeEntries},
@ -1625,5 +1635,6 @@ static const spv_operand_desc_group_t pygen_variable_OperandInfoTable[] = {
{SPV_OPERAND_TYPE_OPTIONAL_RAW_ACCESS_CHAIN_OPERANDS, ARRAY_SIZE(pygen_variable_RawAccessChainOperandsEntries), pygen_variable_RawAccessChainOperandsEntries},
{SPV_OPERAND_TYPE_OPTIONAL_ACCESS_QUALIFIER, ARRAY_SIZE(pygen_variable_AccessQualifierEntries), pygen_variable_AccessQualifierEntries},
{SPV_OPERAND_TYPE_OPTIONAL_PACKED_VECTOR_FORMAT, ARRAY_SIZE(pygen_variable_PackedVectorFormatEntries), pygen_variable_PackedVectorFormatEntries},
{SPV_OPERAND_TYPE_OPTIONAL_COOPERATIVE_MATRIX_OPERANDS, ARRAY_SIZE(pygen_variable_CooperativeMatrixOperandsEntries), pygen_variable_CooperativeMatrixOperandsEntries}
{SPV_OPERAND_TYPE_OPTIONAL_COOPERATIVE_MATRIX_OPERANDS, ARRAY_SIZE(pygen_variable_CooperativeMatrixOperandsEntries), pygen_variable_CooperativeMatrixOperandsEntries},
{SPV_OPERAND_TYPE_OPTIONAL_FPENCODING, ARRAY_SIZE(pygen_variable_FPEncodingEntries), pygen_variable_FPEncodingEntries}
};

View File

@ -175,6 +175,7 @@ typedef enum spv_operand_type_t {
SPV_OPERAND_TYPE_KERNEL_ENQ_FLAGS, // SPIR-V Sec 3.29
SPV_OPERAND_TYPE_KERNEL_PROFILING_INFO, // SPIR-V Sec 3.30
SPV_OPERAND_TYPE_CAPABILITY, // SPIR-V Sec 3.31
SPV_OPERAND_TYPE_FPENCODING, // SPIR-V Sec 3.51
// NOTE: New concrete enum values should be added at the end.
@ -236,6 +237,8 @@ typedef enum spv_operand_type_t {
// assemble regardless of where they occur -- literals, IDs, immediate
// integers, etc.
SPV_OPERAND_TYPE_OPTIONAL_CIV,
// An optional floating point encoding enum
SPV_OPERAND_TYPE_OPTIONAL_FPENCODING,
// A variable operand represents zero or more logical operands.
// In an instruction definition, this may only appear at the end of the
@ -383,6 +386,11 @@ typedef enum spv_binary_to_text_options_t {
SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES = SPV_BIT(6),
// Add some comments to the generated assembly
SPV_BINARY_TO_TEXT_OPTION_COMMENT = SPV_BIT(7),
// Use nested indentation for more readable SPIR-V
SPV_BINARY_TO_TEXT_OPTION_NESTED_INDENT = SPV_BIT(8),
// Reorder blocks to match the structured control flow of SPIR-V to increase
// readability.
SPV_BINARY_TO_TEXT_OPTION_REORDER_BLOCKS = SPV_BIT(9),
SPV_FORCE_32_BIT_ENUM(spv_binary_to_text_options_t)
} spv_binary_to_text_options_t;

View File

@ -16,7 +16,6 @@
#define INCLUDE_SPIRV_TOOLS_LINKER_HPP_
#include <cstdint>
#include <memory>
#include <vector>
@ -63,11 +62,17 @@ class SPIRV_TOOLS_EXPORT LinkerOptions {
use_highest_version_ = use_highest_vers;
}
bool GetAllowPtrTypeMismatch() const { return allow_ptr_type_mismatch_; }
void SetAllowPtrTypeMismatch(bool allow_ptr_type_mismatch) {
allow_ptr_type_mismatch_ = allow_ptr_type_mismatch;
}
private:
bool create_library_{false};
bool verify_ids_{false};
bool allow_partial_linkage_{false};
bool use_highest_version_{false};
bool allow_ptr_type_mismatch_{false};
};
// Links one or more SPIR-V modules into a new SPIR-V module. That is, combine

View File

@ -827,14 +827,19 @@ Optimizer::PassToken CreateReplaceDescArrayAccessUsingVarIndexPass();
// Create descriptor scalar replacement pass.
// This pass replaces every array variable |desc| that has a DescriptorSet and
// Binding decorations with a new variable for each element of the array.
// Suppose |desc| was bound at binding |b|. Then the variable corresponding to
// |desc[i]| will have binding |b+i|. The descriptor set will be the same. It
// is assumed that no other variable already has a binding that will used by one
// of the new variables. If not, the pass will generate invalid Spir-V. All
// accesses to |desc| must be OpAccessChain instructions with a literal index
// for the first index.
// Binding decorations with a new variable for each element of the
// array/composite. Suppose |desc| was bound at binding |b|. Then the variable
// corresponding to |desc[i]| will have binding |b+i|. The descriptor set will
// be the same. It is assumed that no other variable already has a binding that
// will used by one of the new variables. If not, the pass will generate
// invalid Spir-V. All accesses to |desc| must be OpAccessChain instructions
// with a literal index for the first index. This variant flattens both
// composites and arrays.
Optimizer::PassToken CreateDescriptorScalarReplacementPass();
// This variant flattens only composites.
Optimizer::PassToken CreateDescriptorCompositeScalarReplacementPass();
// This variant flattens only arrays.
Optimizer::PassToken CreateDescriptorArrayScalarReplacementPass();
// Create a pass to replace each OpKill instruction with a function call to a
// function that has a single OpKill. Also replace each OpTerminateInvocation

View File

@ -671,6 +671,10 @@ spv_result_t Parser::parseOperand(size_t inst_offset,
case SPV_OPERAND_TYPE_OVERFLOW_MODES:
case SPV_OPERAND_TYPE_PACKED_VECTOR_FORMAT:
case SPV_OPERAND_TYPE_OPTIONAL_PACKED_VECTOR_FORMAT:
case SPV_OPERAND_TYPE_FPENCODING:
case SPV_OPERAND_TYPE_OPTIONAL_FPENCODING:
case SPV_OPERAND_TYPE_LOAD_CACHE_CONTROL:
case SPV_OPERAND_TYPE_STORE_CACHE_CONTROL:
case SPV_OPERAND_TYPE_NAMED_MAXIMUM_NUMBER_OF_REGISTERS: {
// A single word that is a plain enum value.
@ -679,6 +683,8 @@ spv_result_t Parser::parseOperand(size_t inst_offset,
parsed_operand.type = SPV_OPERAND_TYPE_ACCESS_QUALIFIER;
if (type == SPV_OPERAND_TYPE_OPTIONAL_PACKED_VECTOR_FORMAT)
parsed_operand.type = SPV_OPERAND_TYPE_PACKED_VECTOR_FORMAT;
if (type == SPV_OPERAND_TYPE_OPTIONAL_FPENCODING)
parsed_operand.type = SPV_OPERAND_TYPE_FPENCODING;
spv_operand_desc entry;
if (grammar_.lookupOperand(type, word, &entry)) {
@ -699,7 +705,7 @@ spv_result_t Parser::parseOperand(size_t inst_offset,
<< ", if you are creating a new source language please use "
"value 0 "
"(Unknown) and when ready, add your source language to "
"SPRIV-Headers";
"SPIRV-Headers";
}
// Prepare to accept operands to this operand, if needed.
spvPushOperandTypes(entry->operandTypes, expected_operands);

View File

@ -24,6 +24,8 @@
#include <cstring>
#include <iomanip>
#include <memory>
#include <set>
#include <stack>
#include <unordered_map>
#include <utility>
@ -43,6 +45,70 @@
namespace spvtools {
namespace {
// Indices to ControlFlowGraph's list of blocks from one block to its successors
struct BlockSuccessors {
// Merge block in OpLoopMerge and OpSelectionMerge
uint32_t merge_block_id = 0;
// The continue block in OpLoopMerge
uint32_t continue_block_id = 0;
// The true and false blocks in OpBranchConditional
uint32_t true_block_id = 0;
uint32_t false_block_id = 0;
// The body block of a loop, as specified by OpBranch after a merge
// instruction
uint32_t body_block_id = 0;
// The same-nesting-level block that follows this one, indicated by an
// OpBranch with no merge instruction.
uint32_t next_block_id = 0;
// The cases (including default) of an OpSwitch
std::vector<uint32_t> case_block_ids;
};
class ParsedInstruction {
public:
ParsedInstruction(const spv_parsed_instruction_t* instruction) {
// Make a copy of the parsed instruction, including stable memory for its
// operands.
instruction_ = *instruction;
operands_ =
std::make_unique<spv_parsed_operand_t[]>(instruction->num_operands);
memcpy(operands_.get(), instruction->operands,
instruction->num_operands * sizeof(*instruction->operands));
instruction_.operands = operands_.get();
}
const spv_parsed_instruction_t* get() const { return &instruction_; }
private:
spv_parsed_instruction_t instruction_;
std::unique_ptr<spv_parsed_operand_t[]> operands_;
};
// One block in the CFG
struct SingleBlock {
// The byte offset in the SPIR-V where the block starts. Used for printing in
// a comment.
size_t byte_offset;
// Block instructions
std::vector<ParsedInstruction> instructions;
// Successors of this block
BlockSuccessors successors;
// The nesting level for this block.
uint32_t nest_level = 0;
bool nest_level_assigned = false;
// Whether the block was reachable
bool reachable = false;
};
// CFG for one function
struct ControlFlowGraph {
std::vector<SingleBlock> blocks;
};
// A Disassembler instance converts a SPIR-V binary to its assembly
// representation.
class Disassembler {
@ -50,6 +116,10 @@ class Disassembler {
Disassembler(const AssemblyGrammar& grammar, uint32_t options,
NameMapper name_mapper)
: print_(spvIsInBitfield(SPV_BINARY_TO_TEXT_OPTION_PRINT, options)),
nested_indent_(
spvIsInBitfield(SPV_BINARY_TO_TEXT_OPTION_NESTED_INDENT, options)),
reorder_blocks_(
spvIsInBitfield(SPV_BINARY_TO_TEXT_OPTION_REORDER_BLOCKS, options)),
text_(),
out_(print_ ? out_stream() : out_stream(text_)),
instruction_disassembler_(grammar, out_.get(), options, name_mapper),
@ -70,7 +140,13 @@ class Disassembler {
spv_result_t SaveTextResult(spv_text* text_result) const;
private:
void EmitCFG();
const bool print_; // Should we also print to the standard output stream?
const bool nested_indent_; // Should the blocks be indented according to the
// control flow structure?
const bool
reorder_blocks_; // Should the blocks be reordered for readability?
spv_endianness_t endian_; // The detected endianness of the binary.
std::stringstream text_; // Captures the text, if not printing.
out_stream out_; // The Output stream. Either to text_ or standard output.
@ -80,6 +156,9 @@ class Disassembler {
bool inserted_decoration_space_ = false;
bool inserted_debug_space_ = false;
bool inserted_type_space_ = false;
// The CFG for the current function
ControlFlowGraph current_function_cfg_;
};
spv_result_t Disassembler::HandleHeader(spv_endianness_t endian,
@ -106,13 +185,336 @@ spv_result_t Disassembler::HandleInstruction(
inserted_debug_space_,
inserted_type_space_);
instruction_disassembler_.EmitInstruction(inst, byte_offset_);
// When nesting needs to be calculated or when the blocks are reordered, we
// have to have the full picture of the CFG first. Defer processing of the
// instructions until the entire function is visited. This is not done
// without those options (even if simpler) to improve debuggability; for
// example to be able to see whatever is parsed so far even if there is a
// parse error.
if (nested_indent_ || reorder_blocks_) {
switch (static_cast<spv::Op>(inst.opcode)) {
case spv::Op::OpLabel: {
// Add a new block to the CFG
SingleBlock new_block;
new_block.byte_offset = byte_offset_;
new_block.instructions.emplace_back(&inst);
current_function_cfg_.blocks.push_back(std::move(new_block));
break;
}
case spv::Op::OpFunctionEnd:
// Process the CFG and output the instructions
EmitCFG();
// Output OpFunctionEnd itself too
[[fallthrough]];
default:
if (!current_function_cfg_.blocks.empty()) {
// If in a function, stash the instruction for later.
current_function_cfg_.blocks.back().instructions.emplace_back(&inst);
} else {
// Otherwise emit the instruction right away.
instruction_disassembler_.EmitInstruction(inst, byte_offset_);
}
break;
}
} else {
instruction_disassembler_.EmitInstruction(inst, byte_offset_);
}
byte_offset_ += inst.num_words * sizeof(uint32_t);
return SPV_SUCCESS;
}
// Helper to get the operand of an instruction as an id.
uint32_t GetOperand(const spv_parsed_instruction_t* instruction,
uint32_t operand) {
return instruction->words[instruction->operands[operand].offset];
}
std::unordered_map<uint32_t, uint32_t> BuildControlFlowGraph(
ControlFlowGraph& cfg) {
std::unordered_map<uint32_t, uint32_t> id_to_index;
for (size_t index = 0; index < cfg.blocks.size(); ++index) {
SingleBlock& block = cfg.blocks[index];
// For future use, build the ID->index map
assert(static_cast<spv::Op>(block.instructions[0].get()->opcode) ==
spv::Op::OpLabel);
const uint32_t id = block.instructions[0].get()->result_id;
id_to_index[id] = static_cast<uint32_t>(index);
// Look for a merge instruction first. The function of OpBranch depends on
// that.
if (block.instructions.size() >= 3) {
const spv_parsed_instruction_t* maybe_merge =
block.instructions[block.instructions.size() - 2].get();
switch (static_cast<spv::Op>(maybe_merge->opcode)) {
case spv::Op::OpLoopMerge:
block.successors.merge_block_id = GetOperand(maybe_merge, 0);
block.successors.continue_block_id = GetOperand(maybe_merge, 1);
break;
case spv::Op::OpSelectionMerge:
block.successors.merge_block_id = GetOperand(maybe_merge, 0);
break;
default:
break;
}
}
// Then look at the last instruction; it must be a branch
assert(block.instructions.size() >= 2);
const spv_parsed_instruction_t* branch = block.instructions.back().get();
switch (static_cast<spv::Op>(branch->opcode)) {
case spv::Op::OpBranch:
if (block.successors.merge_block_id != 0) {
block.successors.body_block_id = GetOperand(branch, 0);
} else {
block.successors.next_block_id = GetOperand(branch, 0);
}
break;
case spv::Op::OpBranchConditional:
block.successors.true_block_id = GetOperand(branch, 1);
block.successors.false_block_id = GetOperand(branch, 2);
break;
case spv::Op::OpSwitch:
for (uint32_t case_index = 1; case_index < branch->num_operands;
case_index += 2) {
block.successors.case_block_ids.push_back(
GetOperand(branch, case_index));
}
break;
default:
break;
}
}
return id_to_index;
}
// Helper to deal with nesting and non-existing ids / previously-assigned
// levels. It assigns a given nesting level `level` to the block identified by
// `id` (unless that block already has a nesting level assigned).
void Nest(ControlFlowGraph& cfg,
const std::unordered_map<uint32_t, uint32_t>& id_to_index,
uint32_t id, uint32_t level) {
if (id == 0) {
return;
}
const uint32_t block_index = id_to_index.at(id);
SingleBlock& block = cfg.blocks[block_index];
if (!block.nest_level_assigned) {
block.nest_level = level;
block.nest_level_assigned = true;
}
}
// For a given block, assign nesting level to its successors.
void NestSuccessors(ControlFlowGraph& cfg, const SingleBlock& block,
const std::unordered_map<uint32_t, uint32_t>& id_to_index) {
assert(block.nest_level_assigned);
// Nest loops as such:
//
// %loop = OpLabel
// OpLoopMerge %merge %cont ...
// OpBranch %body
// %body = OpLabel
// Op...
// %cont = OpLabel
// Op...
// %merge = OpLabel
// Op...
//
// Nest conditional branches as such:
//
// %header = OpLabel
// OpSelectionMerge %merge ...
// OpBranchConditional ... %true %false
// %true = OpLabel
// Op...
// %false = OpLabel
// Op...
// %merge = OpLabel
// Op...
//
// Nest switch/case as such:
//
// %header = OpLabel
// OpSelectionMerge %merge ...
// OpSwitch ... %default ... %case0 ... %case1 ...
// %default = OpLabel
// Op...
// %case0 = OpLabel
// Op...
// %case1 = OpLabel
// Op...
// ...
// %merge = OpLabel
// Op...
//
// The following can be observed:
//
// - In all cases, the merge block has the same nesting as this block
// - The continue block of loops is nested 1 level deeper
// - The body/branches/cases are nested 2 levels deeper
//
// Back branches to the header block, branches to the merge block, etc
// are correctly handled by processing the header block first (that is
// _this_ block, already processed), then following the above rules
// (in the same order) for any block that is not already processed.
Nest(cfg, id_to_index, block.successors.merge_block_id, block.nest_level);
Nest(cfg, id_to_index, block.successors.continue_block_id,
block.nest_level + 1);
Nest(cfg, id_to_index, block.successors.true_block_id, block.nest_level + 2);
Nest(cfg, id_to_index, block.successors.false_block_id, block.nest_level + 2);
Nest(cfg, id_to_index, block.successors.body_block_id, block.nest_level + 2);
Nest(cfg, id_to_index, block.successors.next_block_id, block.nest_level);
for (uint32_t case_block_id : block.successors.case_block_ids) {
Nest(cfg, id_to_index, case_block_id, block.nest_level + 2);
}
}
struct StackEntry {
// The index of the block (in ControlFlowGraph::blocks) to process.
uint32_t block_index;
// Whether this is the pre or post visit of the block. Because a post-visit
// traversal is needed, the same block is pushed back on the stack on
// pre-visit so it can be visited again on post-visit.
bool post_visit = false;
};
// Helper to deal with DFS traversal and non-existing ids
void VisitSuccesor(std::stack<StackEntry>* dfs_stack,
const std::unordered_map<uint32_t, uint32_t>& id_to_index,
uint32_t id) {
if (id != 0) {
dfs_stack->push({id_to_index.at(id), false});
}
}
// Given the control flow graph, calculates and returns the reverse post-order
// ordering of the blocks. The blocks are then disassembled in that order for
// readability.
std::vector<uint32_t> OrderBlocks(
ControlFlowGraph& cfg,
const std::unordered_map<uint32_t, uint32_t>& id_to_index) {
std::vector<uint32_t> post_order;
// Nest level of a function's first block is 0.
cfg.blocks[0].nest_level = 0;
cfg.blocks[0].nest_level_assigned = true;
// Stack of block indices as they are visited.
std::stack<StackEntry> dfs_stack;
dfs_stack.push({0, false});
std::set<uint32_t> visited;
while (!dfs_stack.empty()) {
const uint32_t block_index = dfs_stack.top().block_index;
const bool post_visit = dfs_stack.top().post_visit;
dfs_stack.pop();
// If this is the second time the block is visited, that's the post-order
// visit.
if (post_visit) {
post_order.push_back(block_index);
continue;
}
// If already visited, another path got to it first (like a case
// fallthrough), avoid reprocessing it.
if (visited.count(block_index) > 0) {
continue;
}
visited.insert(block_index);
// Push it back in the stack for post-order visit
dfs_stack.push({block_index, true});
SingleBlock& block = cfg.blocks[block_index];
// Assign nest levels of successors right away. The successors are either
// nested under this block, or are back or forward edges to blocks outside
// this nesting level (no farther than the merge block), whose nesting
// levels are already assigned before this block is visited.
NestSuccessors(cfg, block, id_to_index);
block.reachable = true;
// The post-order visit yields the order in which the blocks are naturally
// ordered _backwards_. So blocks to be ordered last should be visited
// first. In other words, they should be pushed to the DFS stack last.
VisitSuccesor(&dfs_stack, id_to_index, block.successors.true_block_id);
VisitSuccesor(&dfs_stack, id_to_index, block.successors.false_block_id);
VisitSuccesor(&dfs_stack, id_to_index, block.successors.body_block_id);
VisitSuccesor(&dfs_stack, id_to_index, block.successors.next_block_id);
for (uint32_t case_block_id : block.successors.case_block_ids) {
VisitSuccesor(&dfs_stack, id_to_index, case_block_id);
}
VisitSuccesor(&dfs_stack, id_to_index, block.successors.continue_block_id);
VisitSuccesor(&dfs_stack, id_to_index, block.successors.merge_block_id);
}
std::vector<uint32_t> order(post_order.rbegin(), post_order.rend());
// Finally, dump all unreachable blocks at the end
for (size_t index = 0; index < cfg.blocks.size(); ++index) {
SingleBlock& block = cfg.blocks[index];
if (!block.reachable) {
order.push_back(static_cast<uint32_t>(index));
block.nest_level = 0;
block.nest_level_assigned = true;
}
}
return order;
}
void Disassembler::EmitCFG() {
// Build the CFG edges. At the same time, build an ID->block index map to
// simplify building the CFG edges.
const std::unordered_map<uint32_t, uint32_t> id_to_index =
BuildControlFlowGraph(current_function_cfg_);
// Walk the CFG in reverse post-order to find the best ordering of blocks for
// presentation
std::vector<uint32_t> block_order =
OrderBlocks(current_function_cfg_, id_to_index);
assert(block_order.size() == current_function_cfg_.blocks.size());
// Walk the CFG either in block order or input order based on whether the
// reorder_blocks_ option is given.
for (uint32_t index = 0; index < current_function_cfg_.blocks.size();
++index) {
const uint32_t block_index = reorder_blocks_ ? block_order[index] : index;
const SingleBlock& block = current_function_cfg_.blocks[block_index];
// Emit instructions for this block
size_t byte_offset = block.byte_offset;
assert(block.nest_level_assigned);
for (const ParsedInstruction& inst : block.instructions) {
instruction_disassembler_.EmitInstructionInBlock(*inst.get(), byte_offset,
block.nest_level);
byte_offset += inst.get()->num_words * sizeof(uint32_t);
}
}
current_function_cfg_.blocks.clear();
}
spv_result_t Disassembler::SaveTextResult(spv_text* text_result) const {
if (!print_) {
size_t length = text_.str().size();
@ -203,7 +605,7 @@ uint32_t GetLineLengthWithoutColor(const std::string line) {
if (line[i] == '\x1b') {
do {
++i;
} while (line[i] != 'm');
} while (i < line.size() && line[i] != 'm');
continue;
}
@ -214,6 +616,8 @@ uint32_t GetLineLengthWithoutColor(const std::string line) {
}
constexpr int kStandardIndent = 15;
constexpr int kBlockNestIndent = 2;
constexpr int kBlockBodyIndentOffset = 2;
constexpr uint32_t kCommentColumn = 50;
} // namespace
@ -229,6 +633,8 @@ InstructionDisassembler::InstructionDisassembler(const AssemblyGrammar& grammar,
indent_(spvIsInBitfield(SPV_BINARY_TO_TEXT_OPTION_INDENT, options)
? kStandardIndent
: 0),
nested_indent_(
spvIsInBitfield(SPV_BINARY_TO_TEXT_OPTION_NESTED_INDENT, options)),
comment_(spvIsInBitfield(SPV_BINARY_TO_TEXT_OPTION_COMMENT, options)),
show_byte_offset_(
spvIsInBitfield(SPV_BINARY_TO_TEXT_OPTION_SHOW_BYTE_OFFSET, options)),
@ -265,12 +671,29 @@ void InstructionDisassembler::EmitHeaderSchema(uint32_t schema) {
void InstructionDisassembler::EmitInstruction(
const spv_parsed_instruction_t& inst, size_t inst_byte_offset) {
EmitInstructionImpl(inst, inst_byte_offset, 0, false);
}
void InstructionDisassembler::EmitInstructionInBlock(
const spv_parsed_instruction_t& inst, size_t inst_byte_offset,
uint32_t block_indent) {
EmitInstructionImpl(inst, inst_byte_offset, block_indent, true);
}
void InstructionDisassembler::EmitInstructionImpl(
const spv_parsed_instruction_t& inst, size_t inst_byte_offset,
uint32_t block_indent, bool is_in_block) {
auto opcode = static_cast<spv::Op>(inst.opcode);
// To better align the comments (if any), write the instruction to a line
// first so its length can be readily available.
std::ostringstream line;
if (nested_indent_ && opcode == spv::Op::OpLabel) {
// Separate the blocks by an empty line to make them easier to separate
stream_ << std::endl;
}
if (inst.result_id) {
SetBlue();
const std::string id_name = name_mapper_(inst.result_id);
@ -283,6 +706,17 @@ void InstructionDisassembler::EmitInstruction(
line << std::string(indent_, ' ');
}
if (nested_indent_ && is_in_block) {
// Output OpLabel at the specified nest level, and instructions inside
// blocks nested a little more.
uint32_t indent = block_indent;
bool body_indent = opcode != spv::Op::OpLabel;
line << std::string(
indent * kBlockNestIndent + (body_indent ? kBlockBodyIndentOffset : 0),
' ');
}
line << "Op" << spvOpcodeString(opcode);
for (uint16_t i = 0; i < inst.num_operands; i++) {
@ -386,6 +820,11 @@ void InstructionDisassembler::EmitSectionComment(
auto opcode = static_cast<spv::Op>(inst.opcode);
if (comment_ && opcode == spv::Op::OpFunction) {
stream_ << std::endl;
if (nested_indent_) {
// Double the empty lines between Function sections since nested_indent_
// also separates blocks by a blank.
stream_ << std::endl;
}
stream_ << std::string(indent_, ' ');
stream_ << "; Function " << name_mapper_(inst.result_id) << std::endl;
}
@ -505,6 +944,7 @@ void InstructionDisassembler::EmitOperand(std::ostream& stream,
case SPV_OPERAND_TYPE_FPDENORM_MODE:
case SPV_OPERAND_TYPE_FPOPERATION_MODE:
case SPV_OPERAND_TYPE_QUANTIZATION_MODES:
case SPV_OPERAND_TYPE_FPENCODING:
case SPV_OPERAND_TYPE_OVERFLOW_MODES: {
spv_operand_desc entry;
if (grammar_.lookupOperand(operand.type, word, &entry))

View File

@ -58,6 +58,11 @@ class InstructionDisassembler {
// Emits the assembly text for the given instruction.
void EmitInstruction(const spv_parsed_instruction_t& inst,
size_t inst_byte_offset);
// Same as EmitInstruction, but only for block instructions (including
// OpLabel) and useful for nested indentation. If nested indentation is not
// desired, EmitInstruction can still be used for block instructions.
void EmitInstructionInBlock(const spv_parsed_instruction_t& inst,
size_t inst_byte_offset, uint32_t block_indent);
// Emits a comment between different sections of the module.
void EmitSectionComment(const spv_parsed_instruction_t& inst,
@ -82,6 +87,10 @@ class InstructionDisassembler {
void SetRed(std::ostream& stream) const;
void SetGreen(std::ostream& stream) const;
void EmitInstructionImpl(const spv_parsed_instruction_t& inst,
size_t inst_byte_offset, uint32_t block_indent,
bool is_in_block);
// Emits an operand for the given instruction, where the instruction
// is at offset words from the start of the binary.
void EmitOperand(std::ostream& stream, const spv_parsed_instruction_t& inst,
@ -97,10 +106,11 @@ class InstructionDisassembler {
const spvtools::AssemblyGrammar& grammar_;
std::ostream& stream_;
const bool print_; // Should we also print to the standard output stream?
const bool color_; // Should we print in colour?
const int indent_; // How much to indent. 0 means don't indent
const int comment_; // Should we comment the source
const bool print_; // Should we also print to the standard output stream?
const bool color_; // Should we print in colour?
const int indent_; // How much to indent. 0 means don't indent
const bool nested_indent_; // Whether indentation should indicate nesting
const int comment_; // Should we comment the source
const bool show_byte_offset_; // Should we print byte offset, in hex?
spvtools::NameMapper name_mapper_;

View File

@ -31,6 +31,7 @@
#include "source/opt/build_module.h"
#include "source/opt/compact_ids_pass.h"
#include "source/opt/decoration_manager.h"
#include "source/opt/ir_builder.h"
#include "source/opt/ir_loader.h"
#include "source/opt/pass_manager.h"
#include "source/opt/remove_duplicates_pass.h"
@ -46,12 +47,14 @@ namespace spvtools {
namespace {
using opt::Instruction;
using opt::InstructionBuilder;
using opt::IRContext;
using opt::Module;
using opt::PassManager;
using opt::RemoveDuplicatesPass;
using opt::analysis::DecorationManager;
using opt::analysis::DefUseManager;
using opt::analysis::Function;
using opt::analysis::Type;
using opt::analysis::TypeManager;
@ -126,6 +129,7 @@ spv_result_t GetImportExportPairs(const MessageConsumer& consumer,
// checked.
spv_result_t CheckImportExportCompatibility(const MessageConsumer& consumer,
const LinkageTable& linkings_to_do,
bool allow_ptr_type_mismatch,
opt::IRContext* context);
// Remove linkage specific instructions, such as prototypes of imported
@ -502,6 +506,7 @@ spv_result_t GetImportExportPairs(const MessageConsumer& consumer,
spv_result_t CheckImportExportCompatibility(const MessageConsumer& consumer,
const LinkageTable& linkings_to_do,
bool allow_ptr_type_mismatch,
opt::IRContext* context) {
spv_position_t position = {};
@ -513,7 +518,34 @@ spv_result_t CheckImportExportCompatibility(const MessageConsumer& consumer,
type_manager.GetType(linking_entry.imported_symbol.type_id);
Type* exported_symbol_type =
type_manager.GetType(linking_entry.exported_symbol.type_id);
if (!(*imported_symbol_type == *exported_symbol_type))
if (!(*imported_symbol_type == *exported_symbol_type)) {
Function* imported_symbol_type_func = imported_symbol_type->AsFunction();
Function* exported_symbol_type_func = exported_symbol_type->AsFunction();
if (imported_symbol_type_func && exported_symbol_type_func) {
const auto& imported_params = imported_symbol_type_func->param_types();
const auto& exported_params = exported_symbol_type_func->param_types();
// allow_ptr_type_mismatch allows linking functions where the pointer
// type of arguments doesn't match. Everything else still needs to be
// equal. This is to workaround LLVM-17+ not having typed pointers and
// generated SPIR-Vs not knowing the actual pointer types in some cases.
if (allow_ptr_type_mismatch &&
imported_params.size() == exported_params.size()) {
bool correct = true;
for (size_t i = 0; i < imported_params.size(); i++) {
const auto& imported_param = imported_params[i];
const auto& exported_param = exported_params[i];
if (!imported_param->IsSame(exported_param) &&
(imported_param->kind() != Type::kPointer ||
exported_param->kind() != Type::kPointer)) {
correct = false;
break;
}
}
if (correct) continue;
}
}
return DiagnosticStream(position, consumer, "", SPV_ERROR_INVALID_BINARY)
<< "Type mismatch on symbol \""
<< linking_entry.imported_symbol.name
@ -521,6 +553,7 @@ spv_result_t CheckImportExportCompatibility(const MessageConsumer& consumer,
<< linking_entry.imported_symbol.id
<< " and exported variable/function %"
<< linking_entry.exported_symbol.id << ".";
}
}
// Ensure the import and export decorations are similar
@ -696,6 +729,57 @@ spv_result_t VerifyLimits(const MessageConsumer& consumer,
return SPV_SUCCESS;
}
spv_result_t FixFunctionCallTypes(opt::IRContext& context,
const LinkageTable& linkings) {
auto mod = context.module();
const auto type_manager = context.get_type_mgr();
const auto def_use_mgr = context.get_def_use_mgr();
for (auto& func : *mod) {
func.ForEachInst([&](Instruction* inst) {
if (inst->opcode() != spv::Op::OpFunctionCall) return;
opt::Operand& target = inst->GetInOperand(0);
// only fix calls to imported functions
auto linking = std::find_if(
linkings.begin(), linkings.end(), [&](const auto& entry) {
return entry.exported_symbol.id == target.AsId();
});
if (linking == linkings.end()) return;
auto builder = InstructionBuilder(&context, inst);
for (uint32_t i = 1; i < inst->NumInOperands(); ++i) {
auto exported_func_param =
def_use_mgr->GetDef(linking->exported_symbol.parameter_ids[i - 1]);
const Type* target_type =
type_manager->GetType(exported_func_param->type_id());
if (target_type->kind() != Type::kPointer) continue;
opt::Operand& arg = inst->GetInOperand(i);
const Type* param_type =
type_manager->GetType(def_use_mgr->GetDef(arg.AsId())->type_id());
// No need to cast if it already matches
if (*param_type == *target_type) continue;
auto new_id = context.TakeNextId();
// cast to the expected pointer type
builder.AddInstruction(MakeUnique<opt::Instruction>(
&context, spv::Op::OpBitcast, exported_func_param->type_id(),
new_id,
opt::Instruction::OperandList(
{{SPV_OPERAND_TYPE_ID, {arg.AsId()}}})));
inst->SetInOperand(i, {new_id});
}
});
}
context.InvalidateAnalyses(opt::IRContext::kAnalysisDefUse |
opt::IRContext::kAnalysisInstrToBlockMapping);
return SPV_SUCCESS;
}
} // namespace
spv_result_t Link(const Context& context,
@ -773,7 +857,14 @@ spv_result_t Link(const Context& context, const uint32_t* const* binaries,
if (res != SPV_SUCCESS) return res;
}
// Phase 4: Find the import/export pairs
// Phase 4: Remove duplicates
PassManager manager;
manager.SetMessageConsumer(consumer);
manager.AddPass<RemoveDuplicatesPass>();
opt::Pass::Status pass_res = manager.Run(&linked_context);
if (pass_res == opt::Pass::Status::Failure) return SPV_ERROR_INVALID_DATA;
// Phase 5: Find the import/export pairs
LinkageTable linkings_to_do;
res = GetImportExportPairs(consumer, linked_context,
*linked_context.get_def_use_mgr(),
@ -781,18 +872,12 @@ spv_result_t Link(const Context& context, const uint32_t* const* binaries,
options.GetAllowPartialLinkage(), &linkings_to_do);
if (res != SPV_SUCCESS) return res;
// Phase 5: Ensure the import and export have the same types and decorations.
res =
CheckImportExportCompatibility(consumer, linkings_to_do, &linked_context);
// Phase 6: Ensure the import and export have the same types and decorations.
res = CheckImportExportCompatibility(consumer, linkings_to_do,
options.GetAllowPtrTypeMismatch(),
&linked_context);
if (res != SPV_SUCCESS) return res;
// Phase 6: Remove duplicates
PassManager manager;
manager.SetMessageConsumer(consumer);
manager.AddPass<RemoveDuplicatesPass>();
opt::Pass::Status pass_res = manager.Run(&linked_context);
if (pass_res == opt::Pass::Status::Failure) return SPV_ERROR_INVALID_DATA;
// Phase 7: Remove all names and decorations of import variables/functions
for (const auto& linking_entry : linkings_to_do) {
linked_context.KillNamesAndDecorates(linking_entry.imported_symbol.id);
@ -815,21 +900,27 @@ spv_result_t Link(const Context& context, const uint32_t* const* binaries,
&linked_context);
if (res != SPV_SUCCESS) return res;
// Phase 10: Compact the IDs used in the module
// Phase 10: Optionally fix function call types
if (options.GetAllowPtrTypeMismatch()) {
res = FixFunctionCallTypes(linked_context, linkings_to_do);
if (res != SPV_SUCCESS) return res;
}
// Phase 11: Compact the IDs used in the module
manager.AddPass<opt::CompactIdsPass>();
pass_res = manager.Run(&linked_context);
if (pass_res == opt::Pass::Status::Failure) return SPV_ERROR_INVALID_DATA;
// Phase 11: Recompute EntryPoint variables
// Phase 12: Recompute EntryPoint variables
manager.AddPass<opt::RemoveUnusedInterfaceVariablesPass>();
pass_res = manager.Run(&linked_context);
if (pass_res == opt::Pass::Status::Failure) return SPV_ERROR_INVALID_DATA;
// Phase 12: Warn if SPIR-V limits were exceeded
// Phase 13: Warn if SPIR-V limits were exceeded
res = VerifyLimits(consumer, linked_context);
if (res != SPV_SUCCESS) return res;
// Phase 13: Output the module
// Phase 14: Output the module
linked_context.module()->ToBinary(linked_binary, true);
return SPV_SUCCESS;

View File

@ -218,6 +218,7 @@ spv_result_t FriendlyNameMapper::ParseInstruction(
} break;
case spv::Op::OpTypeFloat: {
const auto bit_width = inst.words[2];
// TODO: Handle optional fpencoding enum once actually used.
switch (bit_width) {
case 16:
SaveName(result_id, "half");
@ -255,6 +256,11 @@ spv_result_t FriendlyNameMapper::ParseInstruction(
inst.words[2]) +
"_" + NameForId(inst.words[3]));
break;
case spv::Op::OpTypeUntypedPointerKHR:
SaveName(result_id, std::string("_ptr_") +
NameForEnumOperand(SPV_OPERAND_TYPE_STORAGE_CLASS,
inst.words[2]));
break;
case spv::Op::OpTypePipe:
SaveName(result_id,
std::string("Pipe") +

View File

@ -276,6 +276,7 @@ int32_t spvOpcodeIsComposite(const spv::Op opcode) {
case spv::Op::OpTypeMatrix:
case spv::Op::OpTypeArray:
case spv::Op::OpTypeStruct:
case spv::Op::OpTypeRuntimeArray:
case spv::Op::OpTypeCooperativeMatrixNV:
case spv::Op::OpTypeCooperativeMatrixKHR:
return true;
@ -287,8 +288,11 @@ int32_t spvOpcodeIsComposite(const spv::Op opcode) {
bool spvOpcodeReturnsLogicalVariablePointer(const spv::Op opcode) {
switch (opcode) {
case spv::Op::OpVariable:
case spv::Op::OpUntypedVariableKHR:
case spv::Op::OpAccessChain:
case spv::Op::OpInBoundsAccessChain:
case spv::Op::OpUntypedAccessChainKHR:
case spv::Op::OpUntypedInBoundsAccessChainKHR:
case spv::Op::OpFunctionParameter:
case spv::Op::OpImageTexelPointer:
case spv::Op::OpCopyObject:
@ -296,6 +300,7 @@ bool spvOpcodeReturnsLogicalVariablePointer(const spv::Op opcode) {
case spv::Op::OpPhi:
case spv::Op::OpFunctionCall:
case spv::Op::OpPtrAccessChain:
case spv::Op::OpUntypedPtrAccessChainKHR:
case spv::Op::OpLoad:
case spv::Op::OpConstantNull:
case spv::Op::OpRawAccessChainNV:
@ -308,8 +313,11 @@ bool spvOpcodeReturnsLogicalVariablePointer(const spv::Op opcode) {
int32_t spvOpcodeReturnsLogicalPointer(const spv::Op opcode) {
switch (opcode) {
case spv::Op::OpVariable:
case spv::Op::OpUntypedVariableKHR:
case spv::Op::OpAccessChain:
case spv::Op::OpInBoundsAccessChain:
case spv::Op::OpUntypedAccessChainKHR:
case spv::Op::OpUntypedInBoundsAccessChainKHR:
case spv::Op::OpFunctionParameter:
case spv::Op::OpImageTexelPointer:
case spv::Op::OpCopyObject:
@ -351,6 +359,7 @@ int32_t spvOpcodeGeneratesType(spv::Op op) {
// spv::Op::OpTypeAccelerationStructureNV
case spv::Op::OpTypeRayQueryKHR:
case spv::Op::OpTypeHitObjectNV:
case spv::Op::OpTypeUntypedPointerKHR:
return true;
default:
// In particular, OpTypeForwardPointer does not generate a type,
@ -792,3 +801,16 @@ bool spvOpcodeIsBit(spv::Op opcode) {
return false;
}
}
bool spvOpcodeGeneratesUntypedPointer(spv::Op opcode) {
switch (opcode) {
case spv::Op::OpUntypedVariableKHR:
case spv::Op::OpUntypedAccessChainKHR:
case spv::Op::OpUntypedInBoundsAccessChainKHR:
case spv::Op::OpUntypedPtrAccessChainKHR:
case spv::Op::OpUntypedInBoundsPtrAccessChainKHR:
return true;
default:
return false;
}
}

View File

@ -162,4 +162,7 @@ bool spvOpcodeIsBit(spv::Op opcode);
// Gets the name of an instruction, without the "Op" prefix.
const char* spvOpcodeString(const spv::Op opcode);
// Returns true for opcodes that generate an untyped pointer result.
bool spvOpcodeGeneratesUntypedPointer(spv::Op opcode);
#endif // SOURCE_OPCODE_H_

View File

@ -252,6 +252,9 @@ const char* spvOperandTypeStr(spv_operand_type_t type) {
return "OpenCL.DebugInfo.100 debug operation";
case SPV_OPERAND_TYPE_CLDEBUG100_DEBUG_IMPORTED_ENTITY:
return "OpenCL.DebugInfo.100 debug imported entity";
case SPV_OPERAND_TYPE_FPENCODING:
case SPV_OPERAND_TYPE_OPTIONAL_FPENCODING:
return "FP encoding";
// The next values are for values returned from an instruction, not actually
// an operand. So the specific strings don't matter. But let's add them
@ -366,6 +369,7 @@ bool spvOperandIsConcrete(spv_operand_type_t type) {
case SPV_OPERAND_TYPE_LOAD_CACHE_CONTROL:
case SPV_OPERAND_TYPE_STORE_CACHE_CONTROL:
case SPV_OPERAND_TYPE_NAMED_MAXIMUM_NUMBER_OF_REGISTERS:
case SPV_OPERAND_TYPE_FPENCODING:
return true;
default:
break;
@ -407,6 +411,7 @@ bool spvOperandIsOptional(spv_operand_type_t type) {
case SPV_OPERAND_TYPE_OPTIONAL_COOPERATIVE_MATRIX_OPERANDS:
case SPV_OPERAND_TYPE_OPTIONAL_CIV:
case SPV_OPERAND_TYPE_OPTIONAL_RAW_ACCESS_CHAIN_OPERANDS:
case SPV_OPERAND_TYPE_OPTIONAL_FPENCODING:
return true;
default:
break;

View File

@ -134,7 +134,12 @@ void AggressiveDCEPass::AddStores(Function* func, uint32_t ptrId) {
}
break;
// If default, assume it stores e.g. frexp, modf, function call
case spv::Op::OpStore:
case spv::Op::OpStore: {
const uint32_t kStoreTargetAddrInIdx = 0;
if (user->GetSingleWordInOperand(kStoreTargetAddrInIdx) == ptrId)
AddToWorklist(user);
break;
}
default:
AddToWorklist(user);
break;
@ -1004,7 +1009,10 @@ void AggressiveDCEPass::InitExtensions() {
"SPV_NV_bindless_texture",
"SPV_EXT_shader_atomic_float_add",
"SPV_EXT_fragment_shader_interlock",
"SPV_NV_compute_shader_derivatives"
"SPV_NV_compute_shader_derivatives",
"SPV_NV_cooperative_matrix",
"SPV_KHR_cooperative_matrix",
"SPV_KHR_ray_tracing_position_fetch"
});
// clang-format on
}

View File

@ -24,7 +24,7 @@
namespace spvtools {
// Builds an Module returns the owning IRContext from the given SPIR-V
// Builds a Module and returns the owning IRContext from the given SPIR-V
// |binary|. |size| specifies number of words in |binary|. The |binary| will be
// decoded according to the given target |env|. Returns nullptr if errors occur
// and sends the errors to |consumer|. When |extra_line_tracking| is true,
@ -41,7 +41,7 @@ std::unique_ptr<opt::IRContext> BuildModule(spv_target_env env,
const uint32_t* binary,
size_t size);
// Builds an Module and returns the owning IRContext from the given
// Builds a Module and returns the owning IRContext from the given
// SPIR-V assembly |text|. The |text| will be encoded according to the given
// target |env|. Returns nullptr if errors occur and sends the errors to
// |consumer|.

View File

@ -21,59 +21,6 @@ namespace opt {
namespace {
constexpr uint32_t kExtractCompositeIdInIdx = 0;
// Returns the value obtained by extracting the |number_of_bits| least
// significant bits from |value|, and sign-extending it to 64-bits.
uint64_t SignExtendValue(uint64_t value, uint32_t number_of_bits) {
if (number_of_bits == 64) return value;
uint64_t mask_for_sign_bit = 1ull << (number_of_bits - 1);
uint64_t mask_for_significant_bits = (mask_for_sign_bit << 1) - 1ull;
if (value & mask_for_sign_bit) {
// Set upper bits to 1
value |= ~mask_for_significant_bits;
} else {
// Clear the upper bits
value &= mask_for_significant_bits;
}
return value;
}
// Returns the value obtained by extracting the |number_of_bits| least
// significant bits from |value|, and zero-extending it to 64-bits.
uint64_t ZeroExtendValue(uint64_t value, uint32_t number_of_bits) {
if (number_of_bits == 64) return value;
uint64_t mask_for_first_bit_to_clear = 1ull << (number_of_bits);
uint64_t mask_for_bits_to_keep = mask_for_first_bit_to_clear - 1;
value &= mask_for_bits_to_keep;
return value;
}
// Returns a constant whose value is `value` and type is `type`. This constant
// will be generated by `const_mgr`. The type must be a scalar integer type.
const analysis::Constant* GenerateIntegerConstant(
const analysis::Integer* integer_type, uint64_t result,
analysis::ConstantManager* const_mgr) {
assert(integer_type != nullptr);
std::vector<uint32_t> words;
if (integer_type->width() == 64) {
// In the 64-bit case, two words are needed to represent the value.
words = {static_cast<uint32_t>(result),
static_cast<uint32_t>(result >> 32)};
} else {
// In all other cases, only a single word is needed.
assert(integer_type->width() <= 32);
if (integer_type->IsSigned()) {
result = SignExtendValue(result, integer_type->width());
} else {
result = ZeroExtendValue(result, integer_type->width());
}
words = {static_cast<uint32_t>(result)};
}
return const_mgr->GetConstant(integer_type, words);
}
// Returns a constants with the value NaN of the given type. Only works for
// 32-bit and 64-bit float point types. Returns |nullptr| if an error occurs.
const analysis::Constant* GetNan(const analysis::Type* type,
@ -1730,7 +1677,7 @@ BinaryScalarFoldingRule FoldBinaryIntegerOperation(uint64_t (*op)(uint64_t,
uint64_t result = op(ia, ib);
const analysis::Constant* result_constant =
GenerateIntegerConstant(integer_type, result, const_mgr);
const_mgr->GenerateIntegerConstant(integer_type, result);
return result_constant;
};
}
@ -1745,7 +1692,7 @@ const analysis::Constant* FoldScalarSConvert(
const analysis::Integer* integer_type = result_type->AsInteger();
assert(integer_type && "The result type of an SConvert");
int64_t value = a->GetSignExtendedValue();
return GenerateIntegerConstant(integer_type, value, const_mgr);
return const_mgr->GenerateIntegerConstant(integer_type, value);
}
// A scalar folding rule that folds OpUConvert.
@ -1762,8 +1709,8 @@ const analysis::Constant* FoldScalarUConvert(
// If the operand was an unsigned value with less than 32-bit, it would have
// been sign extended earlier, and we need to clear those bits.
auto* operand_type = a->type()->AsInteger();
value = ZeroExtendValue(value, operand_type->width());
return GenerateIntegerConstant(integer_type, value, const_mgr);
value = utils::ClearHighBits(value, 64 - operand_type->width());
return const_mgr->GenerateIntegerConstant(integer_type, value);
}
} // namespace

View File

@ -525,6 +525,28 @@ uint32_t ConstantManager::GetNullConstId(const Type* type) {
return GetDefiningInstruction(c)->result_id();
}
const Constant* ConstantManager::GenerateIntegerConstant(
const analysis::Integer* integer_type, uint64_t result) {
assert(integer_type != nullptr);
std::vector<uint32_t> words;
if (integer_type->width() == 64) {
// In the 64-bit case, two words are needed to represent the value.
words = {static_cast<uint32_t>(result),
static_cast<uint32_t>(result >> 32)};
} else {
// In all other cases, only a single word is needed.
assert(integer_type->width() <= 32);
if (integer_type->IsSigned()) {
result = utils::SignExtendValue(result, integer_type->width());
} else {
result = utils::ZeroExtendValue(result, integer_type->width());
}
words = {static_cast<uint32_t>(result)};
}
return GetConstant(integer_type, words);
}
std::vector<const analysis::Constant*> Constant::GetVectorComponents(
analysis::ConstantManager* const_mgr) const {
std::vector<const analysis::Constant*> components;

View File

@ -671,6 +671,11 @@ class ConstantManager {
// Returns the id of a OpConstantNull with type of |type|.
uint32_t GetNullConstId(const Type* type);
// Returns a constant whose value is `value` and type is `type`. This constant
// will be generated by `const_mgr`. The type must be a scalar integer type.
const Constant* GenerateIntegerConstant(const analysis::Integer* integer_type,
uint64_t result);
private:
// Creates a Constant instance with the given type and a vector of constant
// defining words. Returns a unique pointer to the created Constant instance

View File

@ -751,6 +751,8 @@ void CopyPropagateArrays::UpdateUses(Instruction* original_ptr_inst,
uint32_t pointee_type_id =
pointer_type->GetSingleWordInOperand(kTypePointerPointeeInIdx);
uint32_t copy = GenerateCopy(original_ptr_inst, pointee_type_id, use);
assert(copy != 0 &&
"Should not be updating uses unless we know it can be done.");
context()->ForgetUses(use);
use->SetInOperand(index, {copy});

View File

@ -31,11 +31,14 @@ bool IsDecorationBinding(Instruction* inst) {
Pass::Status DescriptorScalarReplacement::Process() {
bool modified = false;
std::vector<Instruction*> vars_to_kill;
for (Instruction& var : context()->types_values()) {
if (descsroautil::IsDescriptorArray(context(), &var)) {
bool is_candidate =
flatten_arrays_ && descsroautil::IsDescriptorArray(context(), &var);
is_candidate |= flatten_composites_ &&
descsroautil::IsDescriptorStruct(context(), &var);
if (is_candidate) {
modified = true;
if (!ReplaceCandidate(&var)) {
return Status::Failure;

View File

@ -32,9 +32,16 @@ namespace opt {
// Documented in optimizer.hpp
class DescriptorScalarReplacement : public Pass {
public:
DescriptorScalarReplacement() {}
DescriptorScalarReplacement(bool flatten_composites, bool flatten_arrays)
: flatten_composites_(flatten_composites),
flatten_arrays_(flatten_arrays) {}
const char* name() const override { return "descriptor-scalar-replacement"; }
const char* name() const override {
if (flatten_composites_ && flatten_arrays_)
return "descriptor-scalar-replacement";
if (flatten_composites_) return "descriptor-compososite-scalar-replacement";
return "descriptor-array-scalar-replacement";
}
Status Process() override;
@ -141,6 +148,9 @@ class DescriptorScalarReplacement : public Pass {
// array |var|. If the entry is |0|, then the variable has not been
// created yet.
std::map<Instruction*, std::vector<uint32_t>> replacement_variables_;
bool flatten_composites_;
bool flatten_arrays_;
};
} // namespace opt

View File

@ -29,41 +29,58 @@ uint32_t GetLengthOfArrayType(IRContext* context, Instruction* type) {
return length_const->GetU32();
}
} // namespace
bool HasDescriptorDecorations(IRContext* context, Instruction* var) {
const auto& decoration_mgr = context->get_decoration_mgr();
return decoration_mgr->HasDecoration(
var->result_id(), uint32_t(spv::Decoration::DescriptorSet)) &&
decoration_mgr->HasDecoration(var->result_id(),
uint32_t(spv::Decoration::Binding));
}
namespace descsroautil {
bool IsDescriptorArray(IRContext* context, Instruction* var) {
Instruction* GetVariableType(IRContext* context, Instruction* var) {
if (var->opcode() != spv::Op::OpVariable) {
return false;
return nullptr;
}
uint32_t ptr_type_id = var->type_id();
Instruction* ptr_type_inst = context->get_def_use_mgr()->GetDef(ptr_type_id);
if (ptr_type_inst->opcode() != spv::Op::OpTypePointer) {
return false;
return nullptr;
}
uint32_t var_type_id = ptr_type_inst->GetSingleWordInOperand(1);
Instruction* var_type_inst = context->get_def_use_mgr()->GetDef(var_type_id);
if (var_type_inst->opcode() != spv::Op::OpTypeArray &&
var_type_inst->opcode() != spv::Op::OpTypeStruct) {
return false;
return context->get_def_use_mgr()->GetDef(var_type_id);
}
} // namespace
namespace descsroautil {
bool IsDescriptorArray(IRContext* context, Instruction* var) {
Instruction* var_type_inst = GetVariableType(context, var);
if (var_type_inst == nullptr) return false;
return var_type_inst->opcode() == spv::Op::OpTypeArray &&
HasDescriptorDecorations(context, var);
}
bool IsDescriptorStruct(IRContext* context, Instruction* var) {
Instruction* var_type_inst = GetVariableType(context, var);
if (var_type_inst == nullptr) return false;
while (var_type_inst->opcode() == spv::Op::OpTypeArray) {
var_type_inst = context->get_def_use_mgr()->GetDef(
var_type_inst->GetInOperand(0).AsId());
}
if (var_type_inst->opcode() != spv::Op::OpTypeStruct) return false;
// All structures with descriptor assignments must be replaced by variables,
// one for each of their members - with the exceptions of buffers.
if (IsTypeOfStructuredBuffer(context, var_type_inst)) {
return false;
}
if (!context->get_decoration_mgr()->HasDecoration(
var->result_id(), uint32_t(spv::Decoration::DescriptorSet))) {
return false;
}
return context->get_decoration_mgr()->HasDecoration(
var->result_id(), uint32_t(spv::Decoration::Binding));
return HasDescriptorDecorations(context, var);
}
bool IsTypeOfStructuredBuffer(IRContext* context, const Instruction* type) {

View File

@ -27,6 +27,10 @@ namespace descsroautil {
// descriptor array.
bool IsDescriptorArray(IRContext* context, Instruction* var);
// Returns true if |var| is an OpVariable instruction that represents a
// struct containing descriptors.
bool IsDescriptorStruct(IRContext* context, Instruction* var);
// Returns true if |type| is a type that could be used for a structured buffer
// as opposed to a type that would be used for a structure of resource
// descriptors.

View File

@ -141,22 +141,26 @@ bool FixStorageClass::IsPointerResultType(Instruction* inst) {
if (inst->type_id() == 0) {
return false;
}
const analysis::Type* ret_type =
context()->get_type_mgr()->GetType(inst->type_id());
return ret_type->AsPointer() != nullptr;
Instruction* type_def = get_def_use_mgr()->GetDef(inst->type_id());
return type_def->opcode() == spv::Op::OpTypePointer;
}
bool FixStorageClass::IsPointerToStorageClass(Instruction* inst,
spv::StorageClass storage_class) {
analysis::TypeManager* type_mgr = context()->get_type_mgr();
analysis::Type* pType = type_mgr->GetType(inst->type_id());
const analysis::Pointer* result_type = pType->AsPointer();
if (result_type == nullptr) {
if (inst->type_id() == 0) {
return false;
}
return (result_type->storage_class() == storage_class);
Instruction* type_def = get_def_use_mgr()->GetDef(inst->type_id());
if (type_def->opcode() != spv::Op::OpTypePointer) {
return false;
}
const uint32_t kPointerTypeStorageClassIndex = 0;
spv::StorageClass pointer_storage_class = static_cast<spv::StorageClass>(
type_def->GetSingleWordInOperand(kPointerTypeStorageClassIndex));
return pointer_storage_class == storage_class;
}
bool FixStorageClass::ChangeResultType(Instruction* inst,
@ -233,6 +237,9 @@ bool FixStorageClass::PropagateType(Instruction* inst, uint32_t type_id,
}
uint32_t copy_id = GenerateCopy(obj_inst, pointee_type_id, inst);
if (copy_id == 0) {
return false;
}
inst->SetInOperand(1, {copy_id});
context()->UpdateDefUse(inst);
}
@ -301,9 +308,11 @@ uint32_t FixStorageClass::WalkAccessChainType(Instruction* inst, uint32_t id) {
break;
}
Instruction* orig_type_inst = get_def_use_mgr()->GetDef(id);
assert(orig_type_inst->opcode() == spv::Op::OpTypePointer);
id = orig_type_inst->GetSingleWordInOperand(1);
Instruction* id_type_inst = get_def_use_mgr()->GetDef(id);
assert(id_type_inst->opcode() == spv::Op::OpTypePointer);
id = id_type_inst->GetSingleWordInOperand(1);
spv::StorageClass input_storage_class =
static_cast<spv::StorageClass>(id_type_inst->GetSingleWordInOperand(0));
for (uint32_t i = start_idx; i < inst->NumInOperands(); ++i) {
Instruction* type_inst = get_def_use_mgr()->GetDef(id);
@ -312,6 +321,7 @@ uint32_t FixStorageClass::WalkAccessChainType(Instruction* inst, uint32_t id) {
case spv::Op::OpTypeRuntimeArray:
case spv::Op::OpTypeMatrix:
case spv::Op::OpTypeVector:
case spv::Op::OpTypeCooperativeMatrixKHR:
id = type_inst->GetSingleWordInOperand(0);
break;
case spv::Op::OpTypeStruct: {
@ -335,9 +345,19 @@ uint32_t FixStorageClass::WalkAccessChainType(Instruction* inst, uint32_t id) {
"Tried to extract from an object where it cannot be done.");
}
return context()->get_type_mgr()->FindPointerToType(
id, static_cast<spv::StorageClass>(
orig_type_inst->GetSingleWordInOperand(0)));
Instruction* orig_type_inst = get_def_use_mgr()->GetDef(inst->type_id());
spv::StorageClass orig_storage_class =
static_cast<spv::StorageClass>(orig_type_inst->GetSingleWordInOperand(0));
assert(orig_type_inst->opcode() == spv::Op::OpTypePointer);
if (orig_type_inst->GetSingleWordInOperand(1) == id &&
input_storage_class == orig_storage_class) {
// The existing type is correct. Avoid the search for the type. Note that if
// there is a duplicate type, the search below could return a different type
// forcing more changes to the code than necessary.
return inst->type_id();
}
return context()->get_type_mgr()->FindPointerToType(id, input_storage_class);
}
// namespace opt

View File

@ -247,18 +247,7 @@ utils::SmallVector<uint32_t, 2> EncodeIntegerAsWords(const analysis::Type& type,
// Truncate first_word if the |type| has width less than uint32.
if (bit_width < bits_per_word) {
const uint32_t num_high_bits_to_mask = bits_per_word - bit_width;
const bool is_negative_after_truncation =
result_type_signed &&
utils::IsBitAtPositionSet(first_word, bit_width - 1);
if (is_negative_after_truncation) {
// Truncate and sign-extend |first_word|. No padding words will be
// added and |pad_value| can be left as-is.
first_word = utils::SetHighBits(first_word, num_high_bits_to_mask);
} else {
first_word = utils::ClearHighBits(first_word, num_high_bits_to_mask);
}
first_word = utils::SignExtendValue(first_word, bit_width);
}
utils::SmallVector<uint32_t, 2> words = {first_word};

View File

@ -112,6 +112,12 @@ bool IsValidResult(T val) {
}
}
// Returns true if `type` is a cooperative matrix.
bool IsCooperativeMatrix(const analysis::Type* type) {
return type->kind() == analysis::Type::kCooperativeMatrixKHR ||
type->kind() == analysis::Type::kCooperativeMatrixNV;
}
const analysis::Constant* ConstInput(
const std::vector<const analysis::Constant*>& constants) {
return constants[0] ? constants[0] : constants[1];
@ -180,8 +186,14 @@ std::vector<uint32_t> GetWordsFromNumericScalarOrVectorConstant(
const analysis::Constant* ConvertWordsToNumericScalarOrVectorConstant(
analysis::ConstantManager* const_mgr, const std::vector<uint32_t>& words,
const analysis::Type* type) {
if (type->AsInteger() || type->AsFloat())
return const_mgr->GetConstant(type, words);
const spvtools::opt::analysis::Integer* int_type = type->AsInteger();
if (int_type && int_type->width() <= 32) {
assert(words.size() == 1);
return const_mgr->GenerateIntegerConstant(int_type, words[0]);
}
if (int_type || type->AsFloat()) return const_mgr->GetConstant(type, words);
if (const auto* vec_type = type->AsVector())
return const_mgr->GetNumericVectorConstantWithWords(vec_type, words);
return nullptr;
@ -307,6 +319,11 @@ FoldingRule ReciprocalFDiv() {
analysis::ConstantManager* const_mgr = context->get_constant_mgr();
const analysis::Type* type =
context->get_type_mgr()->GetType(inst->type_id());
if (IsCooperativeMatrix(type)) {
return false;
}
if (!inst->IsFloatingPointFoldingAllowed()) return false;
uint32_t width = ElementWidth(type);
@ -388,6 +405,11 @@ FoldingRule MergeNegateMulDivArithmetic() {
analysis::ConstantManager* const_mgr = context->get_constant_mgr();
const analysis::Type* type =
context->get_type_mgr()->GetType(inst->type_id());
if (IsCooperativeMatrix(type)) {
return false;
}
if (HasFloatingPoint(type) && !inst->IsFloatingPointFoldingAllowed())
return false;
@ -449,6 +471,11 @@ FoldingRule MergeNegateAddSubArithmetic() {
analysis::ConstantManager* const_mgr = context->get_constant_mgr();
const analysis::Type* type =
context->get_type_mgr()->GetType(inst->type_id());
if (IsCooperativeMatrix(type)) {
return false;
}
if (HasFloatingPoint(type) && !inst->IsFloatingPointFoldingAllowed())
return false;
@ -680,6 +707,11 @@ FoldingRule MergeMulMulArithmetic() {
analysis::ConstantManager* const_mgr = context->get_constant_mgr();
const analysis::Type* type =
context->get_type_mgr()->GetType(inst->type_id());
if (IsCooperativeMatrix(type)) {
return false;
}
if (HasFloatingPoint(type) && !inst->IsFloatingPointFoldingAllowed())
return false;
@ -734,6 +766,11 @@ FoldingRule MergeMulDivArithmetic() {
const analysis::Type* type =
context->get_type_mgr()->GetType(inst->type_id());
if (IsCooperativeMatrix(type)) {
return false;
}
if (!inst->IsFloatingPointFoldingAllowed()) return false;
uint32_t width = ElementWidth(type);
@ -807,6 +844,11 @@ FoldingRule MergeMulNegateArithmetic() {
analysis::ConstantManager* const_mgr = context->get_constant_mgr();
const analysis::Type* type =
context->get_type_mgr()->GetType(inst->type_id());
if (IsCooperativeMatrix(type)) {
return false;
}
bool uses_float = HasFloatingPoint(type);
if (uses_float && !inst->IsFloatingPointFoldingAllowed()) return false;
@ -847,6 +889,11 @@ FoldingRule MergeDivDivArithmetic() {
analysis::ConstantManager* const_mgr = context->get_constant_mgr();
const analysis::Type* type =
context->get_type_mgr()->GetType(inst->type_id());
if (IsCooperativeMatrix(type)) {
return false;
}
if (!inst->IsFloatingPointFoldingAllowed()) return false;
uint32_t width = ElementWidth(type);
@ -920,6 +967,11 @@ FoldingRule MergeDivMulArithmetic() {
const analysis::Type* type =
context->get_type_mgr()->GetType(inst->type_id());
if (IsCooperativeMatrix(type)) {
return false;
}
if (!inst->IsFloatingPointFoldingAllowed()) return false;
uint32_t width = ElementWidth(type);
@ -1062,6 +1114,11 @@ FoldingRule MergeSubNegateArithmetic() {
analysis::ConstantManager* const_mgr = context->get_constant_mgr();
const analysis::Type* type =
context->get_type_mgr()->GetType(inst->type_id());
if (IsCooperativeMatrix(type)) {
return false;
}
bool uses_float = HasFloatingPoint(type);
if (uses_float && !inst->IsFloatingPointFoldingAllowed()) return false;
@ -1110,6 +1167,11 @@ FoldingRule MergeAddAddArithmetic() {
inst->opcode() == spv::Op::OpIAdd);
const analysis::Type* type =
context->get_type_mgr()->GetType(inst->type_id());
if (IsCooperativeMatrix(type)) {
return false;
}
analysis::ConstantManager* const_mgr = context->get_constant_mgr();
bool uses_float = HasFloatingPoint(type);
if (uses_float && !inst->IsFloatingPointFoldingAllowed()) return false;
@ -1158,6 +1220,11 @@ FoldingRule MergeAddSubArithmetic() {
inst->opcode() == spv::Op::OpIAdd);
const analysis::Type* type =
context->get_type_mgr()->GetType(inst->type_id());
if (IsCooperativeMatrix(type)) {
return false;
}
analysis::ConstantManager* const_mgr = context->get_constant_mgr();
bool uses_float = HasFloatingPoint(type);
if (uses_float && !inst->IsFloatingPointFoldingAllowed()) return false;
@ -1218,6 +1285,11 @@ FoldingRule MergeSubAddArithmetic() {
inst->opcode() == spv::Op::OpISub);
const analysis::Type* type =
context->get_type_mgr()->GetType(inst->type_id());
if (IsCooperativeMatrix(type)) {
return false;
}
analysis::ConstantManager* const_mgr = context->get_constant_mgr();
bool uses_float = HasFloatingPoint(type);
if (uses_float && !inst->IsFloatingPointFoldingAllowed()) return false;
@ -1284,6 +1356,11 @@ FoldingRule MergeSubSubArithmetic() {
inst->opcode() == spv::Op::OpISub);
const analysis::Type* type =
context->get_type_mgr()->GetType(inst->type_id());
if (IsCooperativeMatrix(type)) {
return false;
}
analysis::ConstantManager* const_mgr = context->get_constant_mgr();
bool uses_float = HasFloatingPoint(type);
if (uses_float && !inst->IsFloatingPointFoldingAllowed()) return false;
@ -1377,6 +1454,11 @@ FoldingRule MergeGenericAddSubArithmetic() {
inst->opcode() == spv::Op::OpIAdd);
const analysis::Type* type =
context->get_type_mgr()->GetType(inst->type_id());
if (IsCooperativeMatrix(type)) {
return false;
}
bool uses_float = HasFloatingPoint(type);
if (uses_float && !inst->IsFloatingPointFoldingAllowed()) return false;

View File

@ -428,8 +428,9 @@ void LocalAccessChainConvertPass::InitExtensions() {
"SPV_KHR_uniform_group_instructions",
"SPV_KHR_fragment_shader_barycentric", "SPV_KHR_vulkan_memory_model",
"SPV_NV_bindless_texture", "SPV_EXT_shader_atomic_float_add",
"SPV_EXT_fragment_shader_interlock",
"SPV_NV_compute_shader_derivatives"});
"SPV_EXT_fragment_shader_interlock", "SPV_NV_compute_shader_derivatives",
"SPV_NV_cooperative_matrix", "SPV_KHR_cooperative_matrix",
"SPV_KHR_ray_tracing_position_fetch"});
}
bool LocalAccessChainConvertPass::AnyIndexIsOutOfBounds(

View File

@ -291,7 +291,10 @@ void LocalSingleBlockLoadStoreElimPass::InitExtensions() {
"SPV_NV_bindless_texture",
"SPV_EXT_shader_atomic_float_add",
"SPV_EXT_fragment_shader_interlock",
"SPV_NV_compute_shader_derivatives"});
"SPV_NV_compute_shader_derivatives",
"SPV_NV_cooperative_matrix",
"SPV_KHR_cooperative_matrix",
"SPV_KHR_ray_tracing_position_fetch"});
}
} // namespace opt

View File

@ -141,7 +141,10 @@ void LocalSingleStoreElimPass::InitExtensionAllowList() {
"SPV_NV_bindless_texture",
"SPV_EXT_shader_atomic_float_add",
"SPV_EXT_fragment_shader_interlock",
"SPV_NV_compute_shader_derivatives"});
"SPV_NV_compute_shader_derivatives",
"SPV_NV_cooperative_matrix",
"SPV_KHR_cooperative_matrix",
"SPV_KHR_ray_tracing_position_fetch"});
}
bool LocalSingleStoreElimPass::ProcessVariable(Instruction* var_inst) {
std::vector<Instruction*> users;

View File

@ -43,6 +43,8 @@ bool MemPass::IsBaseTargetType(const Instruction* typeInst) const {
case spv::Op::OpTypeSampler:
case spv::Op::OpTypeSampledImage:
case spv::Op::OpTypePointer:
case spv::Op::OpTypeCooperativeMatrixNV:
case spv::Op::OpTypeCooperativeMatrixKHR:
return true;
default:
break;

View File

@ -364,6 +364,10 @@ bool Optimizer::RegisterPassFromFlag(const std::string& flag,
RegisterPass(CreateSpreadVolatileSemanticsPass());
} else if (pass_name == "descriptor-scalar-replacement") {
RegisterPass(CreateDescriptorScalarReplacementPass());
} else if (pass_name == "descriptor-composite-scalar-replacement") {
RegisterPass(CreateDescriptorCompositeScalarReplacementPass());
} else if (pass_name == "descriptor-array-scalar-replacement") {
RegisterPass(CreateDescriptorArrayScalarReplacementPass());
} else if (pass_name == "eliminate-dead-code-aggressive") {
RegisterPass(CreateAggressiveDCEPass(preserve_interface));
} else if (pass_name == "eliminate-insert-extract") {
@ -1059,7 +1063,20 @@ Optimizer::PassToken CreateSpreadVolatileSemanticsPass() {
Optimizer::PassToken CreateDescriptorScalarReplacementPass() {
return MakeUnique<Optimizer::PassToken::Impl>(
MakeUnique<opt::DescriptorScalarReplacement>());
MakeUnique<opt::DescriptorScalarReplacement>(
/* flatten_composites= */ true, /* flatten_arrays= */ true));
}
Optimizer::PassToken CreateDescriptorCompositeScalarReplacementPass() {
return MakeUnique<Optimizer::PassToken::Impl>(
MakeUnique<opt::DescriptorScalarReplacement>(
/* flatten_composites= */ true, /* flatten_arrays= */ false));
}
Optimizer::PassToken CreateDescriptorArrayScalarReplacementPass() {
return MakeUnique<Optimizer::PassToken::Impl>(
MakeUnique<opt::DescriptorScalarReplacement>(
/* flatten_composites= */ false, /* flatten_arrays= */ true));
}
Optimizer::PassToken CreateWrapOpKillPass() {

View File

@ -83,7 +83,6 @@ uint32_t Pass::GetNullId(uint32_t type_id) {
uint32_t Pass::GenerateCopy(Instruction* object_to_copy, uint32_t new_type_id,
Instruction* insertion_position) {
analysis::TypeManager* type_mgr = context()->get_type_mgr();
analysis::ConstantManager* const_mgr = context()->get_constant_mgr();
uint32_t original_type_id = object_to_copy->type_id();
@ -95,57 +94,63 @@ uint32_t Pass::GenerateCopy(Instruction* object_to_copy, uint32_t new_type_id,
context(), insertion_position,
IRContext::kAnalysisInstrToBlockMapping | IRContext::kAnalysisDefUse);
analysis::Type* original_type = type_mgr->GetType(original_type_id);
analysis::Type* new_type = type_mgr->GetType(new_type_id);
Instruction* original_type = get_def_use_mgr()->GetDef(original_type_id);
Instruction* new_type = get_def_use_mgr()->GetDef(new_type_id);
if (const analysis::Array* original_array_type = original_type->AsArray()) {
uint32_t original_element_type_id =
type_mgr->GetId(original_array_type->element_type());
analysis::Array* new_array_type = new_type->AsArray();
assert(new_array_type != nullptr && "Can't copy an array to a non-array.");
uint32_t new_element_type_id =
type_mgr->GetId(new_array_type->element_type());
std::vector<uint32_t> element_ids;
const analysis::Constant* length_const =
const_mgr->FindDeclaredConstant(original_array_type->LengthId());
assert(length_const->AsIntConstant());
uint32_t array_length = length_const->AsIntConstant()->GetU32();
for (uint32_t i = 0; i < array_length; i++) {
Instruction* extract = ir_builder.AddCompositeExtract(
original_element_type_id, object_to_copy->result_id(), {i});
element_ids.push_back(
GenerateCopy(extract, new_element_type_id, insertion_position));
}
return ir_builder.AddCompositeConstruct(new_type_id, element_ids)
->result_id();
} else if (const analysis::Struct* original_struct_type =
original_type->AsStruct()) {
analysis::Struct* new_struct_type = new_type->AsStruct();
const std::vector<const analysis::Type*>& original_types =
original_struct_type->element_types();
const std::vector<const analysis::Type*>& new_types =
new_struct_type->element_types();
std::vector<uint32_t> element_ids;
for (uint32_t i = 0; i < original_types.size(); i++) {
Instruction* extract = ir_builder.AddCompositeExtract(
type_mgr->GetId(original_types[i]), object_to_copy->result_id(), {i});
element_ids.push_back(GenerateCopy(extract, type_mgr->GetId(new_types[i]),
insertion_position));
}
return ir_builder.AddCompositeConstruct(new_type_id, element_ids)
->result_id();
} else {
// If we do not have an aggregate type, then we have a problem. Either we
// found multiple instances of the same type, or we are copying to an
// incompatible type. Either way the code is illegal.
assert(false &&
"Don't know how to copy this type. Code is likely illegal.");
if (new_type->opcode() != original_type->opcode()) {
return 0;
}
switch (original_type->opcode()) {
case spv::Op::OpTypeArray: {
uint32_t original_element_type_id =
original_type->GetSingleWordInOperand(0);
uint32_t new_element_type_id = new_type->GetSingleWordInOperand(0);
std::vector<uint32_t> element_ids;
uint32_t length_id = original_type->GetSingleWordInOperand(1);
const analysis::Constant* length_const =
const_mgr->FindDeclaredConstant(length_id);
assert(length_const->AsIntConstant());
uint32_t array_length = length_const->AsIntConstant()->GetU32();
for (uint32_t i = 0; i < array_length; i++) {
Instruction* extract = ir_builder.AddCompositeExtract(
original_element_type_id, object_to_copy->result_id(), {i});
uint32_t new_id =
GenerateCopy(extract, new_element_type_id, insertion_position);
if (new_id == 0) {
return 0;
}
element_ids.push_back(new_id);
}
return ir_builder.AddCompositeConstruct(new_type_id, element_ids)
->result_id();
}
case spv::Op::OpTypeStruct: {
std::vector<uint32_t> element_ids;
for (uint32_t i = 0; i < original_type->NumInOperands(); i++) {
uint32_t orig_member_type_id = original_type->GetSingleWordInOperand(i);
uint32_t new_member_type_id = new_type->GetSingleWordInOperand(i);
Instruction* extract = ir_builder.AddCompositeExtract(
orig_member_type_id, object_to_copy->result_id(), {i});
uint32_t new_id =
GenerateCopy(extract, new_member_type_id, insertion_position);
if (new_id == 0) {
return 0;
}
element_ids.push_back(new_id);
}
return ir_builder.AddCompositeConstruct(new_type_id, element_ids)
->result_id();
}
default:
// If we do not have an aggregate type, then we have a problem. Either we
// found multiple instances of the same type, or we are copying to an
// incompatible type. Either way the code is illegal. Leave the code as
// is and let the caller deal with it.
return 0;
}
return 0;
}
} // namespace opt

View File

@ -145,7 +145,8 @@ class Pass {
// Returns the id whose value is the same as |object_to_copy| except its type
// is |new_type_id|. Any instructions needed to generate this value will be
// inserted before |insertion_position|.
// inserted before |insertion_position|. Returns 0 if a copy could not be
// done.
uint32_t GenerateCopy(Instruction* object_to_copy, uint32_t new_type_id,
Instruction* insertion_position);

View File

@ -245,6 +245,7 @@ uint32_t TypeManager::GetTypeInstruction(const Type* type) {
{(type->AsInteger()->IsSigned() ? 1u : 0u)}}});
break;
case Type::kFloat:
// TODO: Handle FP encoding enums once actually used.
typeInst = MakeUnique<Instruction>(
context(), spv::Op::OpTypeFloat, 0, id,
std::initializer_list<Operand>{

View File

@ -329,8 +329,9 @@ spv_result_t AssemblyContext::recordTypeDefinition(
types_[value] = {pInst->words[2], pInst->words[3] != 0,
IdTypeClass::kScalarIntegerType};
} else if (pInst->opcode == spv::Op::OpTypeFloat) {
if (pInst->words.size() != 3)
if ((pInst->words.size() != 3) && (pInst->words.size() != 4))
return diagnostic() << "Invalid OpTypeFloat instruction";
// TODO(kpet) Do we need to record the FP Encoding here?
types_[value] = {pInst->words[2], false, IdTypeClass::kScalarFloatType};
} else {
types_[value] = {0, false, IdTypeClass::kOtherType};

View File

@ -97,7 +97,7 @@ template <typename T>
size_t CountSetBits(T word) {
static_assert(std::is_integral<T>::value,
"CountSetBits requires integer type");
size_t count = 0;
uint32_t count = 0;
while (word) {
word &= word - 1;
++count;
@ -181,6 +181,31 @@ T ClearHighBits(T word, size_t num_bits_to_set) {
false);
}
// Returns the value obtained by extracting the |number_of_bits| least
// significant bits from |value|, and sign-extending it to 64-bits.
template <typename T>
T SignExtendValue(T value, uint32_t number_of_bits) {
const uint32_t bit_width = sizeof(value) * 8;
if (number_of_bits == bit_width) return value;
bool is_negative = utils::IsBitAtPositionSet(value, number_of_bits - 1);
if (is_negative) {
value = utils::SetHighBits(value, bit_width - number_of_bits);
} else {
value = utils::ClearHighBits(value, bit_width - number_of_bits);
}
return value;
}
// Returns the value obtained by extracting the |number_of_bits| least
// significant bits from |value|, and zero-extending it to 64-bits.
template <typename T>
T ZeroExtendValue(T value, uint32_t number_of_bits) {
const uint32_t bit_width = sizeof(value) * 8;
if (number_of_bits == bit_width) return value;
return utils::ClearHighBits(value, bit_width - number_of_bits);
}
} // namespace utils
} // namespace spvtools

View File

@ -117,6 +117,15 @@ spv_result_t ValidateAdjacency(ValidationState_t& _) {
"first instructions in the first block.";
}
break;
case spv::Op::OpUntypedVariableKHR:
if (inst.GetOperandAs<spv::StorageClass>(2) ==
spv::StorageClass::Function &&
adjacency_status != IN_ENTRY_BLOCK) {
return _.diag(SPV_ERROR_INVALID_DATA, &inst)
<< "All OpUntypedVariableKHR instructions in a function must "
"be the first instructions in the first block.";
}
break;
default:
adjacency_status = PHI_AND_VAR_INVALID;
break;

View File

@ -123,12 +123,14 @@ spv_result_t ValidateDecorationTarget(ValidationState_t& _, spv::Decoration dec,
case spv::Decoration::ArrayStride:
if (target->opcode() != spv::Op::OpTypeArray &&
target->opcode() != spv::Op::OpTypeRuntimeArray &&
target->opcode() != spv::Op::OpTypePointer) {
target->opcode() != spv::Op::OpTypePointer &&
target->opcode() != spv::Op::OpTypeUntypedPointerKHR) {
return fail(0) << "must be an array or pointer type";
}
break;
case spv::Decoration::BuiltIn:
if (target->opcode() != spv::Op::OpVariable &&
target->opcode() != spv::Op::OpUntypedVariableKHR &&
!spvOpcodeIsConstant(target->opcode())) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "BuiltIns can only target variables, structure members or "
@ -139,7 +141,8 @@ spv_result_t ValidateDecorationTarget(ValidationState_t& _, spv::Decoration dec,
if (!spvOpcodeIsConstant(target->opcode())) {
return fail(0) << "must be a constant for WorkgroupSize";
}
} else if (target->opcode() != spv::Op::OpVariable) {
} else if (target->opcode() != spv::Op::OpVariable &&
target->opcode() != spv::Op::OpUntypedVariableKHR) {
return fail(0) << "must be a variable";
}
break;
@ -161,11 +164,12 @@ spv_result_t ValidateDecorationTarget(ValidationState_t& _, spv::Decoration dec,
case spv::Decoration::RestrictPointer:
case spv::Decoration::AliasedPointer:
if (target->opcode() != spv::Op::OpVariable &&
target->opcode() != spv::Op::OpUntypedVariableKHR &&
target->opcode() != spv::Op::OpFunctionParameter &&
target->opcode() != spv::Op::OpRawAccessChainNV) {
return fail(0) << "must be a memory object declaration";
}
if (_.GetIdOpcode(target->type_id()) != spv::Op::OpTypePointer) {
if (!_.IsPointerType(target->type_id())) {
return fail(0) << "must be a pointer type";
}
break;
@ -176,7 +180,8 @@ spv_result_t ValidateDecorationTarget(ValidationState_t& _, spv::Decoration dec,
case spv::Decoration::Binding:
case spv::Decoration::DescriptorSet:
case spv::Decoration::InputAttachmentIndex:
if (target->opcode() != spv::Op::OpVariable) {
if (target->opcode() != spv::Op::OpVariable &&
target->opcode() != spv::Op::OpUntypedVariableKHR) {
return fail(0) << "must be a variable";
}
break;

View File

@ -183,7 +183,44 @@ spv_result_t AtomicsPass(ValidationState_t& _, const Instruction* inst) {
if (!_.GetPointerTypeInfo(pointer_type, &data_type, &storage_class)) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": expected Pointer to be of type OpTypePointer";
<< ": expected Pointer to be a pointer type";
}
// If the pointer is an untyped pointer, get the data type elsewhere.
if (data_type == 0) {
switch (opcode) {
case spv::Op::OpAtomicLoad:
case spv::Op::OpAtomicExchange:
case spv::Op::OpAtomicFAddEXT:
case spv::Op::OpAtomicCompareExchange:
case spv::Op::OpAtomicCompareExchangeWeak:
case spv::Op::OpAtomicIIncrement:
case spv::Op::OpAtomicIDecrement:
case spv::Op::OpAtomicIAdd:
case spv::Op::OpAtomicISub:
case spv::Op::OpAtomicSMin:
case spv::Op::OpAtomicUMin:
case spv::Op::OpAtomicFMinEXT:
case spv::Op::OpAtomicSMax:
case spv::Op::OpAtomicUMax:
case spv::Op::OpAtomicFMaxEXT:
case spv::Op::OpAtomicAnd:
case spv::Op::OpAtomicOr:
case spv::Op::OpAtomicXor:
data_type = inst->type_id();
break;
case spv::Op::OpAtomicFlagTestAndSet:
case spv::Op::OpAtomicFlagClear:
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Untyped pointers are not supported by atomic flag "
"instructions";
break;
case spv::Op::OpAtomicStore:
data_type = _.FindDef(inst->GetOperandAs<uint32_t>(3))->type_id();
break;
default:
break;
}
}
// Can't use result_type because OpAtomicStore doesn't have a result

View File

@ -97,12 +97,16 @@ spv_result_t GetUnderlyingType(ValidationState_t& _,
spv::StorageClass GetStorageClass(const Instruction& inst) {
switch (inst.opcode()) {
case spv::Op::OpTypePointer:
case spv::Op::OpTypeUntypedPointerKHR:
case spv::Op::OpTypeForwardPointer: {
return spv::StorageClass(inst.word(2));
}
case spv::Op::OpVariable: {
return spv::StorageClass(inst.word(3));
}
case spv::Op::OpUntypedVariableKHR: {
return spv::StorageClass(inst.word(4));
}
case spv::Op::OpGenericCastToPtrExplicit: {
return spv::StorageClass(inst.word(4));
}

View File

@ -250,7 +250,8 @@ spv_result_t ValidateReturnValue(ValidationState_t& _,
}
if (_.addressing_model() == spv::AddressingModel::Logical &&
spv::Op::OpTypePointer == value_type->opcode() &&
(spv::Op::OpTypePointer == value_type->opcode() ||
spv::Op::OpTypeUntypedPointerKHR == value_type->opcode()) &&
!_.features().variable_pointers && !_.options()->relax_logical_pointer) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "OpReturnValue value's type <id> "

View File

@ -324,6 +324,7 @@ bool IsTypeNullable(const std::vector<uint32_t>& instruction,
}
return true;
}
case spv::Op::OpTypeUntypedPointerKHR:
case spv::Op::OpTypePointer:
if (spv::StorageClass(instruction[2]) ==
spv::StorageClass::PhysicalStorageBuffer) {

View File

@ -224,6 +224,7 @@ uint32_t getBaseAlignment(uint32_t member_id, bool roundUp,
break;
}
case spv::Op::OpTypePointer:
case spv::Op::OpTypeUntypedPointerKHR:
baseAlignment = vstate.pointer_size_and_alignment();
break;
default:
@ -270,6 +271,7 @@ uint32_t getScalarAlignment(uint32_t type_id, ValidationState_t& vstate) {
return max_member_alignment;
} break;
case spv::Op::OpTypePointer:
case spv::Op::OpTypeUntypedPointerKHR:
return vstate.pointer_size_and_alignment();
default:
assert(0);
@ -359,6 +361,7 @@ uint32_t getSize(uint32_t member_id, const LayoutConstraints& inherited,
return offset + getSize(lastMember, constraint, constraints, vstate);
}
case spv::Op::OpTypePointer:
case spv::Op::OpTypeUntypedPointerKHR:
return vstate.pointer_size_and_alignment();
default:
assert(0);
@ -432,9 +435,9 @@ spv_result_t checkLayout(uint32_t struct_id, const char* storage_class_str,
return ds;
};
// If we are checking physical storage buffer pointers, we may not actually
// have a struct here. Instead, pretend we have a struct with a single member
// at offset 0.
// If we are checking the layout of untyped pointers or physical storage
// buffer pointers, we may not actually have a struct here. Instead, pretend
// we have a struct with a single member at offset 0.
const auto& struct_type = vstate.FindDef(struct_id);
std::vector<uint32_t> members;
if (struct_type->opcode() == spv::Op::OpTypeStruct) {
@ -451,8 +454,8 @@ spv_result_t checkLayout(uint32_t struct_id, const char* storage_class_str,
};
std::vector<MemberOffsetPair> member_offsets;
// With physical storage buffers, we might be checking layouts that do not
// originate from a structure.
// With untyped pointers or physical storage buffers, we might be checking
// layouts that do not originate from a structure.
if (struct_type->opcode() == spv::Op::OpTypeStruct) {
member_offsets.reserve(members.size());
for (uint32_t memberIdx = 0, numMembers = uint32_t(members.size());
@ -770,14 +773,19 @@ spv_result_t CheckDecorationsOfEntryPoints(ValidationState_t& vstate) {
std::unordered_set<spv::BuiltIn> output_var_builtin;
for (auto interface : desc.interfaces) {
Instruction* var_instr = vstate.FindDef(interface);
if (!var_instr || spv::Op::OpVariable != var_instr->opcode()) {
if (!var_instr ||
(spv::Op::OpVariable != var_instr->opcode() &&
spv::Op::OpUntypedVariableKHR != var_instr->opcode())) {
return vstate.diag(SPV_ERROR_INVALID_ID, var_instr)
<< "Interfaces passed to OpEntryPoint must be of type "
"OpTypeVariable. Found Op"
<< "Interfaces passed to OpEntryPoint must be variables. "
"Found Op"
<< spvOpcodeString(var_instr->opcode()) << ".";
}
const bool untyped_pointers =
var_instr->opcode() == spv::Op::OpUntypedVariableKHR;
const auto sc_index = 2u;
const spv::StorageClass storage_class =
var_instr->GetOperandAs<spv::StorageClass>(2);
var_instr->GetOperandAs<spv::StorageClass>(sc_index);
if (vstate.version() >= SPV_SPIRV_VERSION_WORD(1, 4)) {
// Starting in 1.4, OpEntryPoint must list all global variables
// it statically uses and those interfaces must be unique.
@ -804,12 +812,13 @@ spv_result_t CheckDecorationsOfEntryPoints(ValidationState_t& vstate) {
}
}
const uint32_t ptr_id = var_instr->word(1);
Instruction* ptr_instr = vstate.FindDef(ptr_id);
// It is guaranteed (by validator ID checks) that ptr_instr is
// OpTypePointer. Word 3 of this instruction is the type being pointed
// to.
const uint32_t type_id = ptr_instr->word(3);
// to. For untyped variables, the pointee type comes from the data type
// operand.
const uint32_t type_id =
untyped_pointers ? var_instr->word(4)
: vstate.FindDef(var_instr->word(1))->word(3);
Instruction* type_instr = vstate.FindDef(type_id);
const bool is_struct =
type_instr && spv::Op::OpTypeStruct == type_instr->opcode();
@ -874,12 +883,25 @@ spv_result_t CheckDecorationsOfEntryPoints(ValidationState_t& vstate) {
if (storage_class == spv::StorageClass::Workgroup) {
++num_workgroup_variables;
if (is_struct) {
if (hasDecoration(type_id, spv::Decoration::Block, vstate))
++num_workgroup_variables_with_block;
if (hasDecoration(var_instr->id(), spv::Decoration::Aliased,
vstate))
++num_workgroup_variables_with_aliased;
if (type_instr) {
if (spv::Op::OpTypeStruct == type_instr->opcode()) {
if (hasDecoration(type_id, spv::Decoration::Block, vstate)) {
++num_workgroup_variables_with_block;
} else if (untyped_pointers &&
vstate.HasCapability(spv::Capability::Shader)) {
return vstate.diag(SPV_ERROR_INVALID_ID, var_instr)
<< "Untyped workgroup variables in shaders must be "
"block decorated";
}
if (hasDecoration(var_instr->id(), spv::Decoration::Aliased,
vstate))
++num_workgroup_variables_with_aliased;
} else if (untyped_pointers &&
vstate.HasCapability(spv::Capability::Shader)) {
return vstate.diag(SPV_ERROR_INVALID_ID, var_instr)
<< "Untyped workgroup variables in shaders must be block "
"decorated structs";
}
}
}
@ -960,25 +982,33 @@ spv_result_t CheckDecorationsOfEntryPoints(ValidationState_t& vstate) {
const bool workgroup_blocks_allowed = vstate.HasCapability(
spv::Capability::WorkgroupMemoryExplicitLayoutKHR);
if (workgroup_blocks_allowed && num_workgroup_variables > 0 &&
if (workgroup_blocks_allowed &&
!vstate.HasCapability(spv::Capability::UntypedPointersKHR) &&
num_workgroup_variables > 0 &&
num_workgroup_variables_with_block > 0) {
if (num_workgroup_variables != num_workgroup_variables_with_block) {
return vstate.diag(SPV_ERROR_INVALID_BINARY, vstate.FindDef(entry_point))
return vstate.diag(SPV_ERROR_INVALID_BINARY,
vstate.FindDef(entry_point))
<< "When declaring WorkgroupMemoryExplicitLayoutKHR, "
"either all or none of the Workgroup Storage Class variables "
"either all or none of the Workgroup Storage Class "
"variables "
"in the entry point interface must point to struct types "
"decorated with Block. Entry point id "
"decorated with Block (unless the "
"UntypedPointersKHR capability is declared). "
"Entry point id "
<< entry_point << " does not meet this requirement.";
}
if (num_workgroup_variables_with_block > 1 &&
num_workgroup_variables_with_block !=
num_workgroup_variables_with_aliased) {
return vstate.diag(SPV_ERROR_INVALID_BINARY, vstate.FindDef(entry_point))
return vstate.diag(SPV_ERROR_INVALID_BINARY,
vstate.FindDef(entry_point))
<< "When declaring WorkgroupMemoryExplicitLayoutKHR, "
"if more than one Workgroup Storage Class variable in "
"the entry point interface point to a type decorated "
"with Block, all of them must be decorated with Aliased. "
"Entry point id "
"with Block, all of them must be decorated with Aliased "
"(unless the UntypedPointerWorkgroupKHR capability is "
"declared). Entry point id "
<< entry_point << " does not meet this requirement.";
}
} else if (!workgroup_blocks_allowed &&
@ -1084,11 +1114,17 @@ spv_result_t CheckDecorationsOfBuffers(ValidationState_t& vstate) {
const auto& words = inst.words();
auto type_id = inst.type_id();
const Instruction* type_inst = vstate.FindDef(type_id);
if (spv::Op::OpVariable == inst.opcode()) {
bool scalar_block_layout = false;
MemberConstraints constraints;
if (spv::Op::OpVariable == inst.opcode() ||
spv::Op::OpUntypedVariableKHR == inst.opcode()) {
const bool untyped_pointer =
inst.opcode() == spv::Op::OpUntypedVariableKHR;
const auto var_id = inst.id();
// For storage class / decoration combinations, see Vulkan 14.5.4 "Offset
// and Stride Assignment".
const auto storageClass = inst.GetOperandAs<spv::StorageClass>(2);
const auto storageClassVal = words[3];
const auto storageClass = spv::StorageClass(storageClassVal);
const bool uniform = storageClass == spv::StorageClass::Uniform;
const bool uniform_constant =
storageClass == spv::StorageClass::UniformConstant;
@ -1167,20 +1203,24 @@ spv_result_t CheckDecorationsOfBuffers(ValidationState_t& vstate) {
if (uniform || push_constant || storage_buffer || phys_storage_buffer ||
workgroup) {
const auto ptrInst = vstate.FindDef(words[1]);
assert(spv::Op::OpTypePointer == ptrInst->opcode());
auto id = ptrInst->words()[3];
auto id_inst = vstate.FindDef(id);
// Jump through one level of arraying.
if (!workgroup && (id_inst->opcode() == spv::Op::OpTypeArray ||
id_inst->opcode() == spv::Op::OpTypeRuntimeArray)) {
id = id_inst->GetOperandAs<uint32_t>(1u);
id_inst = vstate.FindDef(id);
assert(spv::Op::OpTypePointer == ptrInst->opcode() ||
spv::Op::OpTypeUntypedPointerKHR == ptrInst->opcode());
auto id = untyped_pointer ? (words.size() > 4 ? words[4] : 0)
: ptrInst->words()[3];
if (id != 0) {
auto id_inst = vstate.FindDef(id);
// Jump through one level of arraying.
if (!workgroup &&
(id_inst->opcode() == spv::Op::OpTypeArray ||
id_inst->opcode() == spv::Op::OpTypeRuntimeArray)) {
id = id_inst->GetOperandAs<uint32_t>(1u);
id_inst = vstate.FindDef(id);
}
// Struct requirement is checked on variables so just move on here.
if (spv::Op::OpTypeStruct != id_inst->opcode()) continue;
ComputeMemberConstraintsForStruct(&constraints, id,
LayoutConstraints(), vstate);
}
// Struct requirement is checked on variables so just move on here.
if (spv::Op::OpTypeStruct != id_inst->opcode()) continue;
MemberConstraints constraints;
ComputeMemberConstraintsForStruct(&constraints, id, LayoutConstraints(),
vstate);
// Prepare for messages
const char* sc_str =
uniform ? "Uniform"
@ -1250,88 +1290,91 @@ spv_result_t CheckDecorationsOfBuffers(ValidationState_t& vstate) {
}
}
for (const auto& dec : vstate.id_decorations(id)) {
const bool blockDeco = spv::Decoration::Block == dec.dec_type();
const bool bufferDeco =
spv::Decoration::BufferBlock == dec.dec_type();
const bool blockRules = uniform && blockDeco;
const bool bufferRules =
(uniform && bufferDeco) ||
((push_constant || storage_buffer ||
phys_storage_buffer || workgroup) && blockDeco);
if (uniform && blockDeco) {
vstate.RegisterPointerToUniformBlock(ptrInst->id());
vstate.RegisterStructForUniformBlock(id);
}
if ((uniform && bufferDeco) ||
((storage_buffer || phys_storage_buffer) && blockDeco)) {
vstate.RegisterPointerToStorageBuffer(ptrInst->id());
vstate.RegisterStructForStorageBuffer(id);
}
if (blockRules || bufferRules) {
const char* deco_str = blockDeco ? "Block" : "BufferBlock";
spv_result_t recursive_status = SPV_SUCCESS;
const bool scalar_block_layout = workgroup ?
vstate.options()->workgroup_scalar_block_layout :
vstate.options()->scalar_block_layout;
if (isMissingOffsetInStruct(id, vstate)) {
return vstate.diag(SPV_ERROR_INVALID_ID, vstate.FindDef(id))
<< "Structure id " << id << " decorated as " << deco_str
<< " must be explicitly laid out with Offset "
"decorations.";
if (id != 0) {
for (const auto& dec : vstate.id_decorations(id)) {
const bool blockDeco = spv::Decoration::Block == dec.dec_type();
const bool bufferDeco =
spv::Decoration::BufferBlock == dec.dec_type();
const bool blockRules = uniform && blockDeco;
const bool bufferRules = (uniform && bufferDeco) ||
((push_constant || storage_buffer ||
phys_storage_buffer || workgroup) &&
blockDeco);
if (uniform && blockDeco) {
vstate.RegisterPointerToUniformBlock(ptrInst->id());
vstate.RegisterStructForUniformBlock(id);
}
if ((uniform && bufferDeco) ||
((storage_buffer || phys_storage_buffer) && blockDeco)) {
vstate.RegisterPointerToStorageBuffer(ptrInst->id());
vstate.RegisterStructForStorageBuffer(id);
}
if (!checkForRequiredDecoration(
id,
[](spv::Decoration d) {
return d == spv::Decoration::ArrayStride;
},
spv::Op::OpTypeArray, vstate)) {
return vstate.diag(SPV_ERROR_INVALID_ID, vstate.FindDef(id))
<< "Structure id " << id << " decorated as " << deco_str
<< " must be explicitly laid out with ArrayStride "
"decorations.";
}
if (blockRules || bufferRules) {
const char* deco_str = blockDeco ? "Block" : "BufferBlock";
spv_result_t recursive_status = SPV_SUCCESS;
scalar_block_layout =
workgroup ? vstate.options()->workgroup_scalar_block_layout
: vstate.options()->scalar_block_layout;
if (!checkForRequiredDecoration(
id,
[](spv::Decoration d) {
return d == spv::Decoration::MatrixStride;
},
spv::Op::OpTypeMatrix, vstate)) {
return vstate.diag(SPV_ERROR_INVALID_ID, vstate.FindDef(id))
<< "Structure id " << id << " decorated as " << deco_str
<< " must be explicitly laid out with MatrixStride "
"decorations.";
}
if (isMissingOffsetInStruct(id, vstate)) {
return vstate.diag(SPV_ERROR_INVALID_ID, vstate.FindDef(id))
<< "Structure id " << id << " decorated as " << deco_str
<< " must be explicitly laid out with Offset "
"decorations.";
}
if (!checkForRequiredDecoration(
id,
[](spv::Decoration d) {
return d == spv::Decoration::RowMajor ||
d == spv::Decoration::ColMajor;
},
spv::Op::OpTypeMatrix, vstate)) {
return vstate.diag(SPV_ERROR_INVALID_ID, vstate.FindDef(id))
<< "Structure id " << id << " decorated as " << deco_str
<< " must be explicitly laid out with RowMajor or "
"ColMajor decorations.";
}
if (!checkForRequiredDecoration(
id,
[](spv::Decoration d) {
return d == spv::Decoration::ArrayStride;
},
spv::Op::OpTypeArray, vstate)) {
return vstate.diag(SPV_ERROR_INVALID_ID, vstate.FindDef(id))
<< "Structure id " << id << " decorated as " << deco_str
<< " must be explicitly laid out with ArrayStride "
"decorations.";
}
if (spvIsVulkanEnv(vstate.context()->target_env)) {
if (blockRules && (SPV_SUCCESS != (recursive_status = checkLayout(
id, sc_str, deco_str, true,
if (!checkForRequiredDecoration(
id,
[](spv::Decoration d) {
return d == spv::Decoration::MatrixStride;
},
spv::Op::OpTypeMatrix, vstate)) {
return vstate.diag(SPV_ERROR_INVALID_ID, vstate.FindDef(id))
<< "Structure id " << id << " decorated as " << deco_str
<< " must be explicitly laid out with MatrixStride "
"decorations.";
}
if (!checkForRequiredDecoration(
id,
[](spv::Decoration d) {
return d == spv::Decoration::RowMajor ||
d == spv::Decoration::ColMajor;
},
spv::Op::OpTypeMatrix, vstate)) {
return vstate.diag(SPV_ERROR_INVALID_ID, vstate.FindDef(id))
<< "Structure id " << id << " decorated as " << deco_str
<< " must be explicitly laid out with RowMajor or "
"ColMajor decorations.";
}
if (spvIsVulkanEnv(vstate.context()->target_env)) {
if (blockRules &&
(SPV_SUCCESS !=
(recursive_status = checkLayout(id, sc_str, deco_str, true,
scalar_block_layout, 0,
constraints, vstate)))) {
return recursive_status;
} else if (bufferRules &&
(SPV_SUCCESS !=
(recursive_status = checkLayout(
id, sc_str, deco_str, false, scalar_block_layout,
0, constraints, vstate)))) {
return recursive_status;
return recursive_status;
} else if (bufferRules &&
(SPV_SUCCESS != (recursive_status = checkLayout(
id, sc_str, deco_str, false,
scalar_block_layout, 0,
constraints, vstate)))) {
return recursive_status;
}
}
}
}
@ -1340,19 +1383,97 @@ spv_result_t CheckDecorationsOfBuffers(ValidationState_t& vstate) {
} else if (type_inst && type_inst->opcode() == spv::Op::OpTypePointer &&
type_inst->GetOperandAs<spv::StorageClass>(1u) ==
spv::StorageClass::PhysicalStorageBuffer) {
const bool scalar_block_layout = vstate.options()->scalar_block_layout;
MemberConstraints constraints;
const bool buffer = true;
const auto data_type_id = type_inst->GetOperandAs<uint32_t>(2u);
const auto* data_type_inst = vstate.FindDef(data_type_id);
const auto pointee_type_id = type_inst->GetOperandAs<uint32_t>(2u);
const auto* data_type_inst = vstate.FindDef(pointee_type_id);
scalar_block_layout = vstate.options()->scalar_block_layout;
if (data_type_inst->opcode() == spv::Op::OpTypeStruct) {
ComputeMemberConstraintsForStruct(&constraints, pointee_type_id,
LayoutConstraints(), vstate);
}
if (auto res = checkLayout(pointee_type_id, "PhysicalStorageBuffer",
"Block", !buffer, scalar_block_layout, 0,
constraints, vstate)) {
return res;
}
} else if (vstate.HasCapability(spv::Capability::UntypedPointersKHR) &&
spvIsVulkanEnv(vstate.context()->target_env)) {
// Untyped variables are checked above. Here we check that instructions
// using an untyped pointer have a valid layout.
uint32_t ptr_ty_id = 0;
uint32_t data_type_id = 0;
switch (inst.opcode()) {
case spv::Op::OpUntypedAccessChainKHR:
case spv::Op::OpUntypedInBoundsAccessChainKHR:
case spv::Op::OpUntypedPtrAccessChainKHR:
case spv::Op::OpUntypedInBoundsPtrAccessChainKHR:
ptr_ty_id = inst.type_id();
data_type_id = inst.GetOperandAs<uint32_t>(2);
break;
case spv::Op::OpLoad:
if (vstate.GetIdOpcode(vstate.GetOperandTypeId(&inst, 2)) ==
spv::Op::OpTypeUntypedPointerKHR) {
const auto ptr_id = inst.GetOperandAs<uint32_t>(2);
ptr_ty_id = vstate.FindDef(ptr_id)->type_id();
data_type_id = inst.type_id();
}
break;
case spv::Op::OpStore:
if (vstate.GetIdOpcode(vstate.GetOperandTypeId(&inst, 0)) ==
spv::Op::OpTypeUntypedPointerKHR) {
const auto ptr_id = inst.GetOperandAs<uint32_t>(0);
ptr_ty_id = vstate.FindDef(ptr_id)->type_id();
data_type_id = vstate.GetOperandTypeId(&inst, 1);
}
break;
case spv::Op::OpUntypedArrayLengthKHR:
ptr_ty_id = vstate.FindDef(inst.GetOperandAs<uint32_t>(3))->type_id();
data_type_id = inst.GetOperandAs<uint32_t>(2);
break;
default:
break;
}
if (ptr_ty_id == 0 || data_type_id == 0) {
// Not an untyped pointer.
continue;
}
const auto sc =
vstate.FindDef(ptr_ty_id)->GetOperandAs<spv::StorageClass>(1);
const char* sc_str =
sc == spv::StorageClass::Uniform
? "Uniform"
: (sc == spv::StorageClass::PushConstant
? "PushConstant"
: (sc == spv::StorageClass::Workgroup ? "Workgroup"
: "StorageBuffer"));
const auto data_type = vstate.FindDef(data_type_id);
scalar_block_layout =
sc == spv::StorageClass::Workgroup
? vstate.options()->workgroup_scalar_block_layout
: vstate.options()->scalar_block_layout;
// Assume uniform storage class uses block rules unless we see a
// BufferBlock decorated struct in the data type.
bool bufferRules = sc == spv::StorageClass::Uniform ? false : true;
if (data_type->opcode() == spv::Op::OpTypeStruct) {
if (sc == spv::StorageClass::Uniform) {
bufferRules =
vstate.HasDecoration(data_type_id, spv::Decoration::BufferBlock);
}
ComputeMemberConstraintsForStruct(&constraints, data_type_id,
LayoutConstraints(), vstate);
}
if (auto res = checkLayout(data_type_id, "PhysicalStorageBuffer", "Block",
!buffer, scalar_block_layout, 0, constraints,
vstate)) {
return res;
const char* deco_str =
bufferRules
? (sc == spv::StorageClass::Uniform ? "BufferBlock" : "Block")
: "Block";
if (auto result =
checkLayout(data_type_id, sc_str, deco_str, !bufferRules,
scalar_block_layout, 0, constraints, vstate)) {
return result;
}
}
}
@ -1585,15 +1706,19 @@ spv_result_t CheckNonWritableDecoration(ValidationState_t& vstate,
const auto opcode = inst.opcode();
const auto type_id = inst.type_id();
if (opcode != spv::Op::OpVariable &&
opcode != spv::Op::OpUntypedVariableKHR &&
opcode != spv::Op::OpFunctionParameter &&
opcode != spv::Op::OpRawAccessChainNV) {
return vstate.diag(SPV_ERROR_INVALID_ID, &inst)
<< "Target of NonWritable decoration must be a memory object "
"declaration (a variable or a function parameter)";
}
const auto var_storage_class = opcode == spv::Op::OpVariable
? inst.GetOperandAs<spv::StorageClass>(2)
: spv::StorageClass::Max;
const auto var_storage_class =
opcode == spv::Op::OpVariable
? inst.GetOperandAs<spv::StorageClass>(2)
: opcode == spv::Op::OpUntypedVariableKHR
? inst.GetOperandAs<spv::StorageClass>(3)
: spv::StorageClass::Max;
if ((var_storage_class == spv::StorageClass::Function ||
var_storage_class == spv::StorageClass::Private) &&
vstate.features().nonwritable_var_in_function_or_private) {

View File

@ -2962,12 +2962,38 @@ spv_result_t ValidateExtInst(ValidationState_t& _, const Instruction* inst) {
<< "expected operand Format to be a pointer";
}
if (format_storage_class != spv::StorageClass::UniformConstant) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< ext_inst_name() << ": "
<< "expected Format storage class to be UniformConstant";
if (_.HasExtension(
Extension::kSPV_EXT_relaxed_printf_string_address_space)) {
if (format_storage_class != spv::StorageClass::UniformConstant &&
// Extension SPV_EXT_relaxed_printf_string_address_space allows
// format strings in Global, Local, Private and Generic address
// spaces
// Global
format_storage_class != spv::StorageClass::CrossWorkgroup &&
// Local
format_storage_class != spv::StorageClass::Workgroup &&
// Private
format_storage_class != spv::StorageClass::Function &&
// Generic
format_storage_class != spv::StorageClass::Generic) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< ext_inst_name() << ": "
<< "expected Format storage class to be UniformConstant, "
"Crossworkgroup, Workgroup, Function, or Generic";
}
} else {
if (format_storage_class != spv::StorageClass::UniformConstant) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< ext_inst_name() << ": "
<< "expected Format storage class to be UniformConstant";
}
}
// If pointer points to an array, get the type of an element
if (_.IsIntArrayType(format_data_type))
format_data_type = _.GetComponentType(format_data_type);
if (!_.IsIntScalarType(format_data_type) ||
_.GetBitWidth(format_data_type) != 8) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)

View File

@ -156,7 +156,9 @@ spv_result_t ValidateFunctionParameter(ValidationState_t& _,
param_nonarray_type_id =
_.FindDef(param_nonarray_type_id)->GetOperandAs<uint32_t>(1u);
}
if (_.GetIdOpcode(param_nonarray_type_id) == spv::Op::OpTypePointer) {
if (_.GetIdOpcode(param_nonarray_type_id) == spv::Op::OpTypePointer ||
_.GetIdOpcode(param_nonarray_type_id) ==
spv::Op::OpTypeUntypedPointerKHR) {
auto param_nonarray_type = _.FindDef(param_nonarray_type_id);
if (param_nonarray_type->GetOperandAs<spv::StorageClass>(1u) ==
spv::StorageClass::PhysicalStorageBuffer) {
@ -185,7 +187,7 @@ spv_result_t ValidateFunctionParameter(ValidationState_t& _,
<< ": can't specify both Aliased and Restrict for "
"PhysicalStorageBuffer pointer.";
}
} else {
} else if (param_nonarray_type->opcode() == spv::Op::OpTypePointer) {
const auto pointee_type_id =
param_nonarray_type->GetOperandAs<uint32_t>(2);
const auto pointee_type = _.FindDef(pointee_type_id);
@ -288,7 +290,8 @@ spv_result_t ValidateFunctionCall(ValidationState_t& _,
}
if (_.addressing_model() == spv::AddressingModel::Logical) {
if (parameter_type->opcode() == spv::Op::OpTypePointer &&
if ((parameter_type->opcode() == spv::Op::OpTypePointer ||
parameter_type->opcode() == spv::Op::OpTypeUntypedPointerKHR) &&
!_.options()->relax_logical_pointer) {
spv::StorageClass sc =
parameter_type->GetOperandAs<spv::StorageClass>(1u);
@ -317,9 +320,11 @@ spv_result_t ValidateFunctionCall(ValidationState_t& _,
// Validate memory object declaration requirements.
if (argument->opcode() != spv::Op::OpVariable &&
argument->opcode() != spv::Op::OpUntypedVariableKHR &&
argument->opcode() != spv::Op::OpFunctionParameter) {
const bool ssbo_vptr = _.features().variable_pointers &&
sc == spv::StorageClass::StorageBuffer;
const bool ssbo_vptr =
_.HasCapability(spv::Capability::VariablePointersStorageBuffer) &&
sc == spv::StorageClass::StorageBuffer;
const bool wg_vptr =
_.HasCapability(spv::Capability::VariablePointers) &&
sc == spv::StorageClass::Workgroup;

View File

@ -165,6 +165,8 @@ spv_result_t IdPass(ValidationState_t& _, Instruction* inst) {
!spvOpcodeIsDecoration(opcode) && opcode != spv::Op::OpFunction &&
opcode != spv::Op::OpCooperativeMatrixLengthNV &&
opcode != spv::Op::OpCooperativeMatrixLengthKHR &&
!spvOpcodeGeneratesUntypedPointer(opcode) &&
opcode != spv::Op::OpUntypedArrayLengthKHR &&
!(opcode == spv::Op::OpSpecConstantOp &&
(spv::Op(inst->word(3)) ==
spv::Op::OpCooperativeMatrixLengthNV ||
@ -185,6 +187,8 @@ spv_result_t IdPass(ValidationState_t& _, Instruction* inst) {
opcode != spv::Op::OpFunction &&
opcode != spv::Op::OpCooperativeMatrixLengthNV &&
opcode != spv::Op::OpCooperativeMatrixLengthKHR &&
!spvOpcodeGeneratesUntypedPointer(opcode) &&
opcode != spv::Op::OpUntypedArrayLengthKHR &&
!(opcode == spv::Op::OpSpecConstantOp &&
(spv::Op(inst->word(3)) ==
spv::Op::OpCooperativeMatrixLengthNV ||

View File

@ -1018,8 +1018,8 @@ spv_result_t ValidateSampledImage(ValidationState_t& _,
}
if (type_inst->GetOperandAs<uint32_t>(1) != image_type) {
// return _.diag(SPV_ERROR_INVALID_DATA, inst)
// << "Expected Image to have the same type as Result Type Image";
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Expected Image to have the same type as Result Type Image";
}
ImageTypeInfo info;
@ -1121,7 +1121,8 @@ spv_result_t ValidateSampledImage(ValidationState_t& _,
spv_result_t ValidateImageTexelPointer(ValidationState_t& _,
const Instruction* inst) {
const auto result_type = _.FindDef(inst->type_id());
if (result_type->opcode() != spv::Op::OpTypePointer) {
if (result_type->opcode() != spv::Op::OpTypePointer &&
result_type->opcode() == spv::Op::OpTypeUntypedPointerKHR) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Expected Result Type to be OpTypePointer";
}
@ -1133,16 +1134,20 @@ spv_result_t ValidateImageTexelPointer(ValidationState_t& _,
"operand is Image";
}
const auto ptr_type = result_type->GetOperandAs<uint32_t>(2);
const auto ptr_opcode = _.GetIdOpcode(ptr_type);
if (ptr_opcode != spv::Op::OpTypeInt && ptr_opcode != spv::Op::OpTypeFloat &&
ptr_opcode != spv::Op::OpTypeVoid &&
!(ptr_opcode == spv::Op::OpTypeVector &&
_.HasCapability(spv::Capability::AtomicFloat16VectorNV) &&
_.IsFloat16Vector2Or4Type(ptr_type))) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Expected Result Type to be OpTypePointer whose Type operand "
"must be a scalar numerical type or OpTypeVoid";
uint32_t ptr_type = 0;
if (result_type->opcode() == spv::Op::OpTypePointer) {
ptr_type = result_type->GetOperandAs<uint32_t>(2);
const auto ptr_opcode = _.GetIdOpcode(ptr_type);
if (ptr_opcode != spv::Op::OpTypeInt &&
ptr_opcode != spv::Op::OpTypeFloat &&
ptr_opcode != spv::Op::OpTypeVoid &&
!(ptr_opcode == spv::Op::OpTypeVector &&
_.HasCapability(spv::Capability::AtomicFloat16VectorNV) &&
_.IsFloat16Vector2Or4Type(ptr_type))) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Expected Result Type to be OpTypePointer whose Type operand "
"must be a scalar numerical type or OpTypeVoid";
}
}
const auto image_ptr = _.FindDef(_.GetOperandTypeId(inst, 2));
@ -1163,7 +1168,8 @@ spv_result_t ValidateImageTexelPointer(ValidationState_t& _,
<< "Corrupt image type definition";
}
if (info.sampled_type != ptr_type &&
if (result_type->opcode() == spv::Op::OpTypePointer &&
info.sampled_type != ptr_type &&
!(_.HasCapability(spv::Capability::AtomicFloat16VectorNV) &&
_.IsFloat16Vector2Or4Type(ptr_type) &&
_.GetIdOpcode(info.sampled_type) == spv::Op::OpTypeFloat &&

View File

@ -34,11 +34,13 @@ const uint32_t kMaxLocations = 4096 * 4;
bool is_interface_variable(const Instruction* inst, bool is_spv_1_4) {
if (is_spv_1_4) {
// Starting in SPIR-V 1.4, all global variables are interface variables.
return inst->opcode() == spv::Op::OpVariable &&
return (inst->opcode() == spv::Op::OpVariable ||
inst->opcode() == spv::Op::OpUntypedVariableKHR) &&
inst->GetOperandAs<spv::StorageClass>(2u) !=
spv::StorageClass::Function;
} else {
return inst->opcode() == spv::Op::OpVariable &&
return (inst->opcode() == spv::Op::OpVariable ||
inst->opcode() == spv::Op::OpUntypedVariableKHR) &&
(inst->GetOperandAs<spv::StorageClass>(2u) ==
spv::StorageClass::Input ||
inst->GetOperandAs<spv::StorageClass>(2u) ==
@ -242,8 +244,9 @@ spv_result_t GetLocationsForVariable(
std::unordered_set<uint32_t>* output_index1_locations) {
const bool is_fragment = entry_point->GetOperandAs<spv::ExecutionModel>(0) ==
spv::ExecutionModel::Fragment;
const bool is_output =
variable->GetOperandAs<spv::StorageClass>(2) == spv::StorageClass::Output;
const auto sc_index = 2u;
const bool is_output = variable->GetOperandAs<spv::StorageClass>(sc_index) ==
spv::StorageClass::Output;
auto ptr_type_id = variable->GetOperandAs<uint32_t>(0);
auto ptr_type = _.FindDef(ptr_type_id);
auto type_id = ptr_type->GetOperandAs<uint32_t>(2);
@ -525,7 +528,9 @@ spv_result_t ValidateLocations(ValidationState_t& _,
for (uint32_t i = 3; i < entry_point->operands().size(); ++i) {
auto interface_id = entry_point->GetOperandAs<uint32_t>(i);
auto interface_var = _.FindDef(interface_id);
auto storage_class = interface_var->GetOperandAs<spv::StorageClass>(2);
const auto sc_index = 2u;
auto storage_class =
interface_var->GetOperandAs<spv::StorageClass>(sc_index);
if (storage_class != spv::StorageClass::Input &&
storage_class != spv::StorageClass::Output) {
continue;

View File

@ -159,9 +159,11 @@ spv_result_t LogicalsPass(ValidationState_t& _, const Instruction* inst) {
const spv::Op type_opcode = type_inst->opcode();
switch (type_opcode) {
case spv::Op::OpTypeUntypedPointerKHR:
case spv::Op::OpTypePointer: {
if (_.addressing_model() == spv::AddressingModel::Logical &&
!_.features().variable_pointers)
!_.HasCapability(
spv::Capability::VariablePointersStorageBuffer))
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Using pointers with OpSelect requires capability "
<< "VariablePointers or VariablePointersStorageBuffer";

View File

@ -407,19 +407,58 @@ spv_result_t CheckMemoryAccess(ValidationState_t& _, const Instruction* inst,
}
spv_result_t ValidateVariable(ValidationState_t& _, const Instruction* inst) {
const bool untyped_pointer = inst->opcode() == spv::Op::OpUntypedVariableKHR;
auto result_type = _.FindDef(inst->type_id());
if (!result_type || result_type->opcode() != spv::Op::OpTypePointer) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "OpVariable Result Type <id> " << _.getIdName(inst->type_id())
<< " is not a pointer type.";
if (untyped_pointer) {
if (!result_type ||
result_type->opcode() != spv::Op::OpTypeUntypedPointerKHR)
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Result type must be an untyped pointer";
} else {
if (!result_type || result_type->opcode() != spv::Op::OpTypePointer) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "OpVariable Result Type <id> " << _.getIdName(inst->type_id())
<< " is not a pointer type.";
}
}
const auto type_index = 2;
const auto value_id = result_type->GetOperandAs<uint32_t>(type_index);
auto value_type = _.FindDef(value_id);
const auto storage_class_index = 2u;
auto storage_class =
inst->GetOperandAs<spv::StorageClass>(storage_class_index);
uint32_t value_id = 0;
if (untyped_pointer) {
const auto has_data_type = 3u < inst->operands().size();
if (has_data_type) {
value_id = inst->GetOperandAs<uint32_t>(3u);
auto data_type = _.FindDef(value_id);
if (!data_type || !spvOpcodeGeneratesType(data_type->opcode())) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Data type must be a type instruction";
}
} else {
if (storage_class == spv::StorageClass::Function ||
storage_class == spv::StorageClass::Private ||
storage_class == spv::StorageClass::Workgroup) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Data type must be specified for Function, Private, and "
"Workgroup storage classes";
}
if (spvIsVulkanEnv(_.context()->target_env)) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Vulkan requires that data type be specified";
}
}
}
const auto initializer_index = 3;
const auto storage_class_index = 2;
// For OpVariable the data type comes from pointee type of the result type,
// while for OpUntypedVariableKHR the data type comes from the operand.
if (!untyped_pointer) {
value_id = result_type->GetOperandAs<uint32_t>(2);
}
auto value_type = value_id == 0 ? nullptr : _.FindDef(value_id);
const auto initializer_index = untyped_pointer ? 4u : 3u;
if (initializer_index < inst->operands().size()) {
const auto initializer_id = inst->GetOperandAs<uint32_t>(initializer_index);
const auto initializer = _.FindDef(initializer_id);
@ -431,18 +470,15 @@ spv_result_t ValidateVariable(ValidationState_t& _, const Instruction* inst) {
initializer && spvOpcodeIsConstant(initializer->opcode());
if (!initializer || !(is_constant || is_module_scope_var)) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "OpVariable Initializer <id> " << _.getIdName(initializer_id)
<< "Variable Initializer <id> " << _.getIdName(initializer_id)
<< " is not a constant or module-scope variable.";
}
if (initializer->type_id() != value_id) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Initializer type must match the type pointed to by the Result "
"Type";
<< "Initializer type must match the data type";
}
}
auto storage_class =
inst->GetOperandAs<spv::StorageClass>(storage_class_index);
if (storage_class != spv::StorageClass::Workgroup &&
storage_class != spv::StorageClass::CrossWorkgroup &&
storage_class != spv::StorageClass::Private &&
@ -466,7 +502,7 @@ spv_result_t ValidateVariable(ValidationState_t& _, const Instruction* inst) {
}
}
}
if (!builtin &&
if (!builtin && value_type &&
ContainsInvalidBool(_, value_type, storage_input_or_output)) {
if (storage_input_or_output) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
@ -495,7 +531,7 @@ spv_result_t ValidateVariable(ValidationState_t& _, const Instruction* inst) {
if (storage_class == spv::StorageClass::Generic) {
return _.diag(SPV_ERROR_INVALID_BINARY, inst)
<< "OpVariable storage class cannot be Generic";
<< "Variable storage class cannot be Generic";
}
if (inst->function() && storage_class != spv::StorageClass::Function) {
@ -517,17 +553,17 @@ spv_result_t ValidateVariable(ValidationState_t& _, const Instruction* inst) {
result_type->GetOperandAs<spv::StorageClass>(result_storage_class_index);
if (storage_class != result_storage_class) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "From SPIR-V spec, section 3.32.8 on OpVariable:\n"
<< "Its Storage Class operand must be the same as the Storage Class "
<< "operand of the result type.";
<< "Storage class must match result type storage class";
}
// Variable pointer related restrictions.
const auto pointee = _.FindDef(result_type->word(3));
const auto pointee = untyped_pointer
? value_id == 0 ? nullptr : _.FindDef(value_id)
: _.FindDef(result_type->word(3));
if (_.addressing_model() == spv::AddressingModel::Logical &&
!_.options()->relax_logical_pointer) {
// VariablePointersStorageBuffer is implied by VariablePointers.
if (pointee->opcode() == spv::Op::OpTypePointer) {
if (pointee && pointee->opcode() == spv::Op::OpTypePointer) {
if (!_.HasCapability(spv::Capability::VariablePointersStorageBuffer)) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "In Logical addressing, variables may not allocate a pointer "
@ -546,7 +582,7 @@ spv_result_t ValidateVariable(ValidationState_t& _, const Instruction* inst) {
// Vulkan Push Constant Interface section: Check type of PushConstant
// variables.
if (storage_class == spv::StorageClass::PushConstant) {
if (pointee->opcode() != spv::Op::OpTypeStruct) {
if (pointee && pointee->opcode() != spv::Op::OpTypeStruct) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< _.VkErrorID(6808) << "PushConstant OpVariable <id> "
<< _.getIdName(inst->id()) << " has illegal type.\n"
@ -558,11 +594,11 @@ spv_result_t ValidateVariable(ValidationState_t& _, const Instruction* inst) {
// Vulkan Descriptor Set Interface: Check type of UniformConstant and
// Uniform variables.
if (storage_class == spv::StorageClass::UniformConstant) {
if (!IsAllowedTypeOrArrayOfSame(
_, pointee,
{spv::Op::OpTypeImage, spv::Op::OpTypeSampler,
spv::Op::OpTypeSampledImage,
spv::Op::OpTypeAccelerationStructureKHR})) {
if (pointee && !IsAllowedTypeOrArrayOfSame(
_, pointee,
{spv::Op::OpTypeImage, spv::Op::OpTypeSampler,
spv::Op::OpTypeSampledImage,
spv::Op::OpTypeAccelerationStructureKHR})) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< _.VkErrorID(4655) << "UniformConstant OpVariable <id> "
<< _.getIdName(inst->id()) << " has illegal type.\n"
@ -575,7 +611,8 @@ spv_result_t ValidateVariable(ValidationState_t& _, const Instruction* inst) {
}
if (storage_class == spv::StorageClass::Uniform) {
if (!IsAllowedTypeOrArrayOfSame(_, pointee, {spv::Op::OpTypeStruct})) {
if (pointee &&
!IsAllowedTypeOrArrayOfSame(_, pointee, {spv::Op::OpTypeStruct})) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< _.VkErrorID(6807) << "Uniform OpVariable <id> "
<< _.getIdName(inst->id()) << " has illegal type.\n"
@ -588,7 +625,8 @@ spv_result_t ValidateVariable(ValidationState_t& _, const Instruction* inst) {
}
if (storage_class == spv::StorageClass::StorageBuffer) {
if (!IsAllowedTypeOrArrayOfSame(_, pointee, {spv::Op::OpTypeStruct})) {
if (pointee &&
!IsAllowedTypeOrArrayOfSame(_, pointee, {spv::Op::OpTypeStruct})) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< _.VkErrorID(6807) << "StorageBuffer OpVariable <id> "
<< _.getIdName(inst->id()) << " has illegal type.\n"
@ -621,11 +659,17 @@ spv_result_t ValidateVariable(ValidationState_t& _, const Instruction* inst) {
}
}
}
}
// Initializers in Vulkan are only allowed in some storage clases
if (inst->operands().size() > 3) {
// Vulkan Appendix A: Check that if contains initializer, then
// storage class is Output, Private, or Function.
if (inst->operands().size() > initializer_index &&
storage_class != spv::StorageClass::Output &&
storage_class != spv::StorageClass::Private &&
storage_class != spv::StorageClass::Function) {
if (spvIsVulkanEnv(_.context()->target_env)) {
if (storage_class == spv::StorageClass::Workgroup) {
auto init_id = inst->GetOperandAs<uint32_t>(3);
auto init_id = inst->GetOperandAs<uint32_t>(initializer_index);
auto init = _.FindDef(init_id);
if (init->opcode() != spv::Op::OpConstantNull) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
@ -652,7 +696,7 @@ spv_result_t ValidateVariable(ValidationState_t& _, const Instruction* inst) {
}
}
if (inst->operands().size() > 3) {
if (initializer_index < inst->operands().size()) {
if (storage_class == spv::StorageClass::TaskPayloadWorkgroupEXT) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "OpVariable, <id> " << _.getIdName(inst->id())
@ -676,10 +720,10 @@ spv_result_t ValidateVariable(ValidationState_t& _, const Instruction* inst) {
}
auto pointee_base = pointee;
while (pointee_base->opcode() == spv::Op::OpTypeArray) {
while (pointee_base && pointee_base->opcode() == spv::Op::OpTypeArray) {
pointee_base = _.FindDef(pointee_base->GetOperandAs<uint32_t>(1u));
}
if (pointee_base->opcode() == spv::Op::OpTypePointer) {
if (pointee_base && pointee_base->opcode() == spv::Op::OpTypePointer) {
if (pointee_base->GetOperandAs<spv::StorageClass>(1u) ==
spv::StorageClass::PhysicalStorageBuffer) {
// check for AliasedPointer/RestrictPointer
@ -769,7 +813,7 @@ spv_result_t ValidateVariable(ValidationState_t& _, const Instruction* inst) {
// Cooperative matrix types can only be allocated in Function or Private
if ((storage_class != spv::StorageClass::Function &&
storage_class != spv::StorageClass::Private) &&
ContainsCooperativeMatrix(_, pointee)) {
pointee && ContainsCooperativeMatrix(_, pointee)) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Cooperative matrix types (or types containing them) can only be "
"allocated "
@ -785,7 +829,8 @@ spv_result_t ValidateVariable(ValidationState_t& _, const Instruction* inst) {
(!_.HasCapability(spv::Capability::Float16) &&
_.ContainsSizedIntOrFloatType(value_id, spv::Op::OpTypeFloat, 16))) {
auto underlying_type = value_type;
while (underlying_type->opcode() == spv::Op::OpTypePointer) {
while (underlying_type &&
underlying_type->opcode() == spv::Op::OpTypePointer) {
storage_class = underlying_type->GetOperandAs<spv::StorageClass>(1u);
underlying_type =
_.FindDef(underlying_type->GetOperandAs<uint32_t>(2u));
@ -801,7 +846,8 @@ spv_result_t ValidateVariable(ValidationState_t& _, const Instruction* inst) {
}
break;
case spv::StorageClass::Uniform:
if (!_.HasCapability(
if (underlying_type &&
!_.HasCapability(
spv::Capability::UniformAndStorageBuffer16BitAccess)) {
if (underlying_type->opcode() == spv::Op::OpTypeArray ||
underlying_type->opcode() == spv::Op::OpTypeRuntimeArray) {
@ -849,7 +895,8 @@ spv_result_t ValidateVariable(ValidationState_t& _, const Instruction* inst) {
if (!_.HasCapability(spv::Capability::Int8) &&
_.ContainsSizedIntOrFloatType(value_id, spv::Op::OpTypeInt, 8)) {
auto underlying_type = value_type;
while (underlying_type->opcode() == spv::Op::OpTypePointer) {
while (underlying_type &&
underlying_type->opcode() == spv::Op::OpTypePointer) {
storage_class = underlying_type->GetOperandAs<spv::StorageClass>(1u);
underlying_type =
_.FindDef(underlying_type->GetOperandAs<uint32_t>(2u));
@ -865,7 +912,8 @@ spv_result_t ValidateVariable(ValidationState_t& _, const Instruction* inst) {
}
break;
case spv::StorageClass::Uniform:
if (!_.HasCapability(
if (underlying_type &&
!_.HasCapability(
spv::Capability::UniformAndStorageBuffer8BitAccess)) {
if (underlying_type->opcode() == spv::Op::OpTypeArray ||
underlying_type->opcode() == spv::Op::OpTypeRuntimeArray) {
@ -930,21 +978,23 @@ spv_result_t ValidateLoad(ValidationState_t& _, const Instruction* inst) {
}
const auto pointer_type = _.FindDef(pointer->type_id());
if (!pointer_type || pointer_type->opcode() != spv::Op::OpTypePointer) {
if (!pointer_type ||
(pointer_type->opcode() != spv::Op::OpTypePointer &&
pointer_type->opcode() != spv::Op::OpTypeUntypedPointerKHR)) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "OpLoad type for pointer <id> " << _.getIdName(pointer_id)
<< " is not a pointer type.";
}
uint32_t pointee_data_type;
spv::StorageClass storage_class;
if (!_.GetPointerTypeInfo(pointer_type->id(), &pointee_data_type,
&storage_class) ||
result_type->id() != pointee_data_type) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "OpLoad Result Type <id> " << _.getIdName(inst->type_id())
<< " does not match Pointer <id> " << _.getIdName(pointer->id())
<< "s type.";
if (pointer_type->opcode() == spv::Op::OpTypePointer) {
const auto pointee_type =
_.FindDef(pointer_type->GetOperandAs<uint32_t>(2));
if (!pointee_type || result_type->id() != pointee_type->id()) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "OpLoad Result Type <id> " << _.getIdName(inst->type_id())
<< " does not match Pointer <id> " << _.getIdName(pointer->id())
<< "s type.";
}
}
if (!_.options()->before_hlsl_legalization &&
@ -987,17 +1037,23 @@ spv_result_t ValidateStore(ValidationState_t& _, const Instruction* inst) {
<< " is not a logical pointer.";
}
const auto pointer_type = _.FindDef(pointer->type_id());
if (!pointer_type || pointer_type->opcode() != spv::Op::OpTypePointer) {
if (!pointer_type ||
(pointer_type->opcode() != spv::Op::OpTypePointer &&
pointer_type->opcode() != spv::Op::OpTypeUntypedPointerKHR)) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "OpStore type for pointer <id> " << _.getIdName(pointer_id)
<< " is not a pointer type.";
}
const auto type_id = pointer_type->GetOperandAs<uint32_t>(2);
const auto type = _.FindDef(type_id);
if (!type || spv::Op::OpTypeVoid == type->opcode()) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "OpStore Pointer <id> " << _.getIdName(pointer_id)
<< "s type is void.";
Instruction* type = nullptr;
if (pointer_type->opcode() == spv::Op::OpTypePointer) {
const auto type_id = pointer_type->GetOperandAs<uint32_t>(2);
type = _.FindDef(type_id);
if (!type || spv::Op::OpTypeVoid == type->opcode()) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "OpStore Pointer <id> " << _.getIdName(pointer_id)
<< "s type is void.";
}
}
// validate storage class
@ -1074,7 +1130,7 @@ spv_result_t ValidateStore(ValidationState_t& _, const Instruction* inst) {
<< "s type is void.";
}
if (type->id() != object_type->id()) {
if (type && (type->id() != object_type->id())) {
if (!_.options()->relax_struct_store ||
type->opcode() != spv::Op::OpTypeStruct ||
object_type->opcode() != spv::Op::OpTypeStruct) {
@ -1179,7 +1235,8 @@ spv_result_t ValidateCopyMemory(ValidationState_t& _, const Instruction* inst) {
const auto target_pointer_type = _.FindDef(target->type_id());
if (!target_pointer_type ||
target_pointer_type->opcode() != spv::Op::OpTypePointer) {
(target_pointer_type->opcode() != spv::Op::OpTypePointer &&
target_pointer_type->opcode() != spv::Op::OpTypeUntypedPointerKHR)) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Target operand <id> " << _.getIdName(target_id)
<< " is not a pointer.";
@ -1187,35 +1244,52 @@ spv_result_t ValidateCopyMemory(ValidationState_t& _, const Instruction* inst) {
const auto source_pointer_type = _.FindDef(source->type_id());
if (!source_pointer_type ||
source_pointer_type->opcode() != spv::Op::OpTypePointer) {
(source_pointer_type->opcode() != spv::Op::OpTypePointer &&
source_pointer_type->opcode() != spv::Op::OpTypeUntypedPointerKHR)) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Source operand <id> " << _.getIdName(source_id)
<< " is not a pointer.";
}
if (inst->opcode() == spv::Op::OpCopyMemory) {
const auto target_type =
_.FindDef(target_pointer_type->GetOperandAs<uint32_t>(2));
if (!target_type || target_type->opcode() == spv::Op::OpTypeVoid) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Target operand <id> " << _.getIdName(target_id)
<< " cannot be a void pointer.";
const bool target_typed =
target_pointer_type->opcode() == spv::Op::OpTypePointer;
const bool source_typed =
source_pointer_type->opcode() == spv::Op::OpTypePointer;
Instruction* target_type = nullptr;
Instruction* source_type = nullptr;
if (target_typed) {
target_type = _.FindDef(target_pointer_type->GetOperandAs<uint32_t>(2));
if (!target_type || target_type->opcode() == spv::Op::OpTypeVoid) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Target operand <id> " << _.getIdName(target_id)
<< " cannot be a void pointer.";
}
}
const auto source_type =
_.FindDef(source_pointer_type->GetOperandAs<uint32_t>(2));
if (!source_type || source_type->opcode() == spv::Op::OpTypeVoid) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Source operand <id> " << _.getIdName(source_id)
<< " cannot be a void pointer.";
if (source_typed) {
source_type = _.FindDef(source_pointer_type->GetOperandAs<uint32_t>(2));
if (!source_type || source_type->opcode() == spv::Op::OpTypeVoid) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Source operand <id> " << _.getIdName(source_id)
<< " cannot be a void pointer.";
}
}
if (target_type->id() != source_type->id()) {
if (target_type && source_type && target_type->id() != source_type->id()) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Target <id> " << _.getIdName(source_id)
<< "s type does not match Source <id> "
<< _.getIdName(source_type->id()) << "s type.";
}
if (!target_type && !source_type) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "One of Source or Target must be a typed pointer";
}
if (auto error = CheckMemoryAccess(_, inst, 2)) return error;
} else {
const auto size_id = inst->GetOperandAs<uint32_t>(2);
const auto size = _.FindDef(size_id);
@ -1231,7 +1305,6 @@ spv_result_t ValidateCopyMemory(ValidationState_t& _, const Instruction* inst) {
<< "Size operand <id> " << _.getIdName(size_id)
<< " must be a scalar integer type.";
}
bool is_zero = true;
switch (size->opcode()) {
case spv::Op::OpConstantNull:
@ -1258,18 +1331,125 @@ spv_result_t ValidateCopyMemory(ValidationState_t& _, const Instruction* inst) {
// Cannot infer any other opcodes.
break;
}
if (_.HasCapability(spv::Capability::Shader)) {
bool is_int = false;
bool is_const = false;
uint32_t value = 0;
std::tie(is_int, is_const, value) = _.EvalInt32IfConst(size_id);
if (is_const) {
if (value % 4 != 0) {
const auto source_sc =
source_pointer_type->GetOperandAs<spv::StorageClass>(1);
const auto target_sc =
target_pointer_type->GetOperandAs<spv::StorageClass>(1);
const bool int8 = _.HasCapability(spv::Capability::Int8);
const bool ubo_int8 = _.HasCapability(
spv::Capability::UniformAndStorageBuffer8BitAccess);
const bool ssbo_int8 =
_.HasCapability(spv::Capability::StorageBuffer8BitAccess) ||
ubo_int8;
const bool pc_int8 =
_.HasCapability(spv::Capability::StoragePushConstant8);
const bool wg_int8 = _.HasCapability(
spv::Capability::WorkgroupMemoryExplicitLayout8BitAccessKHR);
const bool int16 = _.HasCapability(spv::Capability::Int16) || int8;
const bool ubo_int16 =
_.HasCapability(
spv::Capability::UniformAndStorageBuffer16BitAccess) ||
ubo_int8;
const bool ssbo_int16 =
_.HasCapability(spv::Capability::StorageBuffer16BitAccess) ||
ubo_int16 || ssbo_int8;
const bool pc_int16 =
_.HasCapability(spv::Capability::StoragePushConstant16) ||
pc_int8;
const bool io_int16 =
_.HasCapability(spv::Capability::StorageInputOutput16);
const bool wg_int16 = _.HasCapability(
spv::Capability::WorkgroupMemoryExplicitLayout16BitAccessKHR);
bool source_int16_match = false;
bool target_int16_match = false;
bool source_int8_match = false;
bool target_int8_match = false;
switch (source_sc) {
case spv::StorageClass::StorageBuffer:
source_int16_match = ssbo_int16;
source_int8_match = ssbo_int8;
break;
case spv::StorageClass::Uniform:
source_int16_match = ubo_int16;
source_int8_match = ubo_int8;
break;
case spv::StorageClass::PushConstant:
source_int16_match = pc_int16;
source_int8_match = pc_int8;
break;
case spv::StorageClass::Input:
case spv::StorageClass::Output:
source_int16_match = io_int16;
break;
case spv::StorageClass::Workgroup:
source_int16_match = wg_int16;
source_int8_match = wg_int8;
break;
default:
break;
}
switch (target_sc) {
case spv::StorageClass::StorageBuffer:
target_int16_match = ssbo_int16;
target_int8_match = ssbo_int8;
break;
case spv::StorageClass::Uniform:
target_int16_match = ubo_int16;
target_int8_match = ubo_int8;
break;
case spv::StorageClass::PushConstant:
target_int16_match = pc_int16;
target_int8_match = pc_int8;
break;
// Input is read-only so it cannot be the target pointer.
case spv::StorageClass::Output:
target_int16_match = io_int16;
break;
case spv::StorageClass::Workgroup:
target_int16_match = wg_int16;
target_int8_match = wg_int8;
break;
default:
break;
}
if (!int8 && !int16 && !(source_int16_match && target_int16_match)) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Size must be a multiple of 4";
}
if (value % 2 != 0) {
if (!int8 && !(source_int8_match && target_int8_match)) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Size must be a multiple of 2";
}
}
}
}
}
if (auto error = CheckMemoryAccess(_, inst, 3)) return error;
}
if (auto error = ValidateCopyMemoryMemoryAccess(_, inst)) return error;
// Get past the pointers to avoid checking a pointer copy.
auto sub_type = _.FindDef(target_pointer_type->GetOperandAs<uint32_t>(2));
while (sub_type->opcode() == spv::Op::OpTypePointer) {
sub_type = _.FindDef(sub_type->GetOperandAs<uint32_t>(2));
}
if (_.HasCapability(spv::Capability::Shader) &&
_.ContainsLimitedUseIntOrFloatType(sub_type->id())) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Cannot copy memory of objects containing 8- or 16-bit types";
if (target_pointer_type->opcode() == spv::Op::OpTypePointer) {
auto sub_type = _.FindDef(target_pointer_type->GetOperandAs<uint32_t>(2));
while (sub_type->opcode() == spv::Op::OpTypePointer) {
sub_type = _.FindDef(sub_type->GetOperandAs<uint32_t>(2));
}
if (_.HasCapability(spv::Capability::Shader) &&
_.ContainsLimitedUseIntOrFloatType(sub_type->id())) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Cannot copy memory of objects containing 8- or 16-bit types";
}
}
return SPV_SUCCESS;
@ -1280,27 +1460,50 @@ spv_result_t ValidateAccessChain(ValidationState_t& _,
std::string instr_name =
"Op" + std::string(spvOpcodeString(static_cast<spv::Op>(inst->opcode())));
// The result type must be OpTypePointer.
const bool untyped_pointer = spvOpcodeGeneratesUntypedPointer(inst->opcode());
// The result type must be OpTypePointer for regular access chains and an
// OpTypeUntypedPointerKHR for untyped access chains.
auto result_type = _.FindDef(inst->type_id());
if (spv::Op::OpTypePointer != result_type->opcode()) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "The Result Type of " << instr_name << " <id> "
<< _.getIdName(inst->id()) << " must be OpTypePointer. Found Op"
<< spvOpcodeString(static_cast<spv::Op>(result_type->opcode()))
<< ".";
if (untyped_pointer) {
if (!result_type ||
spv::Op::OpTypeUntypedPointerKHR != result_type->opcode()) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "The Result Type of " << instr_name << " <id> "
<< _.getIdName(inst->id())
<< " must be OpTypeUntypedPointerKHR. Found Op"
<< spvOpcodeString(static_cast<spv::Op>(result_type->opcode()))
<< ".";
}
} else {
if (!result_type || spv::Op::OpTypePointer != result_type->opcode()) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "The Result Type of " << instr_name << " <id> "
<< _.getIdName(inst->id()) << " must be OpTypePointer. Found Op"
<< spvOpcodeString(static_cast<spv::Op>(result_type->opcode()))
<< ".";
}
}
// Result type is a pointer. Find out what it's pointing to.
// This will be used to make sure the indexing results in the same type.
// OpTypePointer word 3 is the type being pointed to.
const auto result_type_pointee = _.FindDef(result_type->word(3));
if (untyped_pointer) {
// Base type must be a non-pointer type.
const auto base_type = _.FindDef(inst->GetOperandAs<uint32_t>(2));
if (!base_type || !spvOpcodeGeneratesType(base_type->opcode()) ||
base_type->opcode() == spv::Op::OpTypePointer ||
base_type->opcode() == spv::Op::OpTypeUntypedPointerKHR) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Base type must be a non-pointer type";
}
}
// Base must be a pointer, pointing to the base of a composite object.
const auto base_index = 2;
const auto base_index = untyped_pointer ? 3 : 2;
const auto base_id = inst->GetOperandAs<uint32_t>(base_index);
const auto base = _.FindDef(base_id);
const auto base_type = _.FindDef(base->type_id());
if (!base_type || spv::Op::OpTypePointer != base_type->opcode()) {
if (!base_type || !(spv::Op::OpTypePointer == base_type->opcode() ||
(untyped_pointer && spv::Op::OpTypeUntypedPointerKHR ==
base_type->opcode()))) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "The Base <id> " << _.getIdName(base_id) << " in " << instr_name
<< " instruction must be a pointer.";
@ -1318,14 +1521,18 @@ spv_result_t ValidateAccessChain(ValidationState_t& _,
}
// The type pointed to by OpTypePointer (word 3) must be a composite type.
auto type_pointee = _.FindDef(base_type->word(3));
auto type_pointee = untyped_pointer
? _.FindDef(inst->GetOperandAs<uint32_t>(2))
: _.FindDef(base_type->word(3));
// Check Universal Limit (SPIR-V Spec. Section 2.17).
// The number of indexes passed to OpAccessChain may not exceed 255
// The instruction includes 4 words + N words (for N indexes)
size_t num_indexes = inst->words().size() - 4;
if (inst->opcode() == spv::Op::OpPtrAccessChain ||
inst->opcode() == spv::Op::OpInBoundsPtrAccessChain) {
inst->opcode() == spv::Op::OpInBoundsPtrAccessChain ||
inst->opcode() == spv::Op::OpUntypedPtrAccessChainKHR ||
inst->opcode() == spv::Op::OpUntypedInBoundsPtrAccessChainKHR) {
// In pointer access chains, the element operand is required, but not
// counted as an index.
--num_indexes;
@ -1344,9 +1551,11 @@ spv_result_t ValidateAccessChain(ValidationState_t& _,
// instruction. The second index will apply similarly to that result, and so
// on. Once any non-composite type is reached, there must be no remaining
// (unused) indexes.
auto starting_index = 4;
auto starting_index = untyped_pointer ? 5 : 4;
if (inst->opcode() == spv::Op::OpPtrAccessChain ||
inst->opcode() == spv::Op::OpInBoundsPtrAccessChain) {
inst->opcode() == spv::Op::OpInBoundsPtrAccessChain ||
inst->opcode() == spv::Op::OpUntypedPtrAccessChainKHR ||
inst->opcode() == spv::Op::OpUntypedInBoundsPtrAccessChainKHR) {
++starting_index;
}
for (size_t i = starting_index; i < inst->words().size(); ++i) {
@ -1411,18 +1620,25 @@ spv_result_t ValidateAccessChain(ValidationState_t& _,
}
}
}
// At this point, we have fully walked down from the base using the indices.
// The type being pointed to should be the same as the result type.
if (type_pointee->id() != result_type_pointee->id()) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< instr_name << " result type (Op"
<< spvOpcodeString(
static_cast<spv::Op>(result_type_pointee->opcode()))
<< ") does not match the type that results from indexing into the "
"base "
"<id> (Op"
<< spvOpcodeString(static_cast<spv::Op>(type_pointee->opcode()))
<< ").";
if (!untyped_pointer) {
// Result type is a pointer. Find out what it's pointing to.
// This will be used to make sure the indexing results in the same type.
// OpTypePointer word 3 is the type being pointed to.
const auto result_type_pointee = _.FindDef(result_type->word(3));
// At this point, we have fully walked down from the base using the indeces.
// The type being pointed to should be the same as the result type.
if (type_pointee->id() != result_type_pointee->id()) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< instr_name << " result type (Op"
<< spvOpcodeString(
static_cast<spv::Op>(result_type_pointee->opcode()))
<< ") does not match the type that results from indexing into the "
"base "
"<id> (Op"
<< spvOpcodeString(static_cast<spv::Op>(type_pointee->opcode()))
<< ").";
}
}
return SPV_SUCCESS;
@ -1550,7 +1766,8 @@ spv_result_t ValidateRawAccessChain(ValidationState_t& _,
spv_result_t ValidatePtrAccessChain(ValidationState_t& _,
const Instruction* inst) {
if (_.addressing_model() == spv::AddressingModel::Logical) {
if (_.addressing_model() == spv::AddressingModel::Logical &&
inst->opcode() == spv::Op::OpPtrAccessChain) {
if (!_.features().variable_pointers) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Generating variable pointers requires capability "
@ -1561,9 +1778,13 @@ spv_result_t ValidatePtrAccessChain(ValidationState_t& _,
// Need to call first, will make sure Base is a valid ID
if (auto error = ValidateAccessChain(_, inst)) return error;
const bool untyped_pointer = spvOpcodeGeneratesUntypedPointer(inst->opcode());
const auto base_id = inst->GetOperandAs<uint32_t>(2);
const auto base = _.FindDef(base_id);
const auto base_type = _.FindDef(base->type_id());
const auto base_type = untyped_pointer
? _.FindDef(inst->GetOperandAs<uint32_t>(2))
: _.FindDef(base->type_id());
const auto base_type_storage_class =
base_type->GetOperandAs<spv::StorageClass>(1);
@ -1581,15 +1802,17 @@ spv_result_t ValidatePtrAccessChain(ValidationState_t& _,
}
if (spvIsVulkanEnv(_.context()->target_env)) {
const auto untyped_cap =
untyped_pointer && _.HasCapability(spv::Capability::UntypedPointersKHR);
if (base_type_storage_class == spv::StorageClass::Workgroup) {
if (!_.HasCapability(spv::Capability::VariablePointers)) {
if (!_.HasCapability(spv::Capability::VariablePointers) && !untyped_cap) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< _.VkErrorID(7651)
<< "OpPtrAccessChain Base operand pointing to Workgroup "
"storage class must use VariablePointers capability";
}
} else if (base_type_storage_class == spv::StorageClass::StorageBuffer) {
if (!_.features().variable_pointers) {
if (!_.features().variable_pointers && !untyped_cap) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< _.VkErrorID(7652)
<< "OpPtrAccessChain Base operand pointing to StorageBuffer "
@ -1597,7 +1820,8 @@ spv_result_t ValidatePtrAccessChain(ValidationState_t& _,
"VariablePointersStorageBuffer capability";
}
} else if (base_type_storage_class !=
spv::StorageClass::PhysicalStorageBuffer) {
spv::StorageClass::PhysicalStorageBuffer &&
!untyped_cap) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< _.VkErrorID(7650)
<< "OpPtrAccessChain Base operand must point to Workgroup, "
@ -1624,18 +1848,28 @@ spv_result_t ValidateArrayLength(ValidationState_t& state,
<< " must be OpTypeInt with width 32 and signedness 0.";
}
// The structure that is passed in must be an pointer to a structure, whose
// last element is a runtime array.
auto pointer = state.FindDef(inst->GetOperandAs<uint32_t>(2));
auto pointer_type = state.FindDef(pointer->type_id());
if (pointer_type->opcode() != spv::Op::OpTypePointer) {
const bool untyped = inst->opcode() == spv::Op::OpUntypedArrayLengthKHR;
auto pointer_ty_id = state.GetOperandTypeId(inst, (untyped ? 3 : 2));
auto pointer_ty = state.FindDef(pointer_ty_id);
if (untyped) {
if (pointer_ty->opcode() != spv::Op::OpTypeUntypedPointerKHR) {
return state.diag(SPV_ERROR_INVALID_ID, inst)
<< "Pointer must be an untyped pointer";
}
} else if (pointer_ty->opcode() != spv::Op::OpTypePointer) {
return state.diag(SPV_ERROR_INVALID_ID, inst)
<< "The Structure's type in " << instr_name << " <id> "
<< state.getIdName(inst->id())
<< " must be a pointer to an OpTypeStruct.";
}
auto structure_type = state.FindDef(pointer_type->GetOperandAs<uint32_t>(2));
Instruction* structure_type = nullptr;
if (untyped) {
structure_type = state.FindDef(inst->GetOperandAs<uint32_t>(2));
} else {
structure_type = state.FindDef(pointer_ty->GetOperandAs<uint32_t>(2));
}
if (structure_type->opcode() != spv::Op::OpTypeStruct) {
return state.diag(SPV_ERROR_INVALID_ID, inst)
<< "The Structure's type in " << instr_name << " <id> "
@ -1654,11 +1888,12 @@ spv_result_t ValidateArrayLength(ValidationState_t& state,
// The array member must the index of the last element (the run time
// array).
if (inst->GetOperandAs<uint32_t>(3) != num_of_members - 1) {
const auto index = untyped ? 4 : 3;
if (inst->GetOperandAs<uint32_t>(index) != num_of_members - 1) {
return state.diag(SPV_ERROR_INVALID_ID, inst)
<< "The array member in " << instr_name << " <id> "
<< state.getIdName(inst->id())
<< " must be an the last member of the struct.";
<< " must be the last member of the struct.";
}
return SPV_SUCCESS;
}
@ -1843,12 +2078,16 @@ spv_result_t ValidateCooperativeMatrixLoadStoreKHR(ValidationState_t& _,
const auto pointer_type_id = pointer->type_id();
const auto pointer_type = _.FindDef(pointer_type_id);
if (!pointer_type || pointer_type->opcode() != spv::Op::OpTypePointer) {
if (!pointer_type ||
!(pointer_type->opcode() == spv::Op::OpTypePointer ||
pointer_type->opcode() == spv::Op::OpTypeUntypedPointerKHR)) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< opname << " type for pointer <id> " << _.getIdName(pointer_id)
<< " is not a pointer type.";
}
const bool untyped =
pointer_type->opcode() == spv::Op::OpTypeUntypedPointerKHR;
const auto storage_class_index = 1u;
const auto storage_class =
pointer_type->GetOperandAs<spv::StorageClass>(storage_class_index);
@ -1863,27 +2102,36 @@ spv_result_t ValidateCooperativeMatrixLoadStoreKHR(ValidationState_t& _,
<< " is not Workgroup, StorageBuffer, or PhysicalStorageBuffer.";
}
const auto pointee_id = pointer_type->GetOperandAs<uint32_t>(2);
const auto pointee_type = _.FindDef(pointee_id);
if (!pointee_type || !(_.IsIntScalarOrVectorType(pointee_id) ||
_.IsFloatScalarOrVectorType(pointee_id))) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< opname << " Pointer <id> " << _.getIdName(pointer->id())
<< "s Type must be a scalar or vector type.";
if (!untyped) {
const auto pointee_id = pointer_type->GetOperandAs<uint32_t>(2);
const auto pointee_type = _.FindDef(pointee_id);
if (!pointee_type || !(_.IsIntScalarOrVectorType(pointee_id) ||
_.IsFloatScalarOrVectorType(pointee_id))) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< opname << " Pointer <id> " << _.getIdName(pointer->id())
<< "s Type must be a scalar or vector type.";
}
}
const auto layout_index =
(inst->opcode() == spv::Op::OpCooperativeMatrixLoadKHR) ? 3u : 2u;
const auto colmajor_id = inst->GetOperandAs<uint32_t>(layout_index);
const auto colmajor = _.FindDef(colmajor_id);
if (!colmajor || !_.IsIntScalarType(colmajor->type_id()) ||
!(spvOpcodeIsConstant(colmajor->opcode()) ||
spvOpcodeIsSpecConstant(colmajor->opcode()))) {
const auto layout_id = inst->GetOperandAs<uint32_t>(layout_index);
const auto layout_inst = _.FindDef(layout_id);
if (!layout_inst || !_.IsIntScalarType(layout_inst->type_id()) ||
!spvOpcodeIsConstant(layout_inst->opcode())) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "MemoryLayout operand <id> " << _.getIdName(colmajor_id)
<< "MemoryLayout operand <id> " << _.getIdName(layout_id)
<< " must be a 32-bit integer constant instruction.";
}
bool stride_required = false;
uint64_t layout;
if (_.EvalConstantValUint64(layout_id, &layout)) {
stride_required =
(layout == (uint64_t)spv::CooperativeMatrixLayout::RowMajorKHR) ||
(layout == (uint64_t)spv::CooperativeMatrixLayout::ColumnMajorKHR);
}
const auto stride_index =
(inst->opcode() == spv::Op::OpCooperativeMatrixLoadKHR) ? 4u : 3u;
if (inst->operands().size() > stride_index) {
@ -1894,6 +2142,9 @@ spv_result_t ValidateCooperativeMatrixLoadStoreKHR(ValidationState_t& _,
<< "Stride operand <id> " << _.getIdName(stride_id)
<< " must be a scalar integer type.";
}
} else if (stride_required) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "MemoryLayout " << layout << " requires a Stride.";
}
const auto memory_access_index =
@ -1935,7 +2186,8 @@ spv_result_t ValidatePtrComparison(ValidationState_t& _,
<< "The types of Operand 1 and Operand 2 must match";
}
const auto op1_type = _.FindDef(op1->type_id());
if (!op1_type || op1_type->opcode() != spv::Op::OpTypePointer) {
if (!op1_type || (op1_type->opcode() != spv::Op::OpTypePointer &&
op1_type->opcode() != spv::Op::OpTypeUntypedPointerKHR)) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Operand type must be a pointer";
}
@ -1967,6 +2219,7 @@ spv_result_t ValidatePtrComparison(ValidationState_t& _,
spv_result_t MemoryPass(ValidationState_t& _, const Instruction* inst) {
switch (inst->opcode()) {
case spv::Op::OpVariable:
case spv::Op::OpUntypedVariableKHR:
if (auto error = ValidateVariable(_, inst)) return error;
break;
case spv::Op::OpLoad:
@ -1980,17 +2233,22 @@ spv_result_t MemoryPass(ValidationState_t& _, const Instruction* inst) {
if (auto error = ValidateCopyMemory(_, inst)) return error;
break;
case spv::Op::OpPtrAccessChain:
case spv::Op::OpUntypedPtrAccessChainKHR:
case spv::Op::OpUntypedInBoundsPtrAccessChainKHR:
if (auto error = ValidatePtrAccessChain(_, inst)) return error;
break;
case spv::Op::OpAccessChain:
case spv::Op::OpInBoundsAccessChain:
case spv::Op::OpInBoundsPtrAccessChain:
case spv::Op::OpUntypedAccessChainKHR:
case spv::Op::OpUntypedInBoundsAccessChainKHR:
if (auto error = ValidateAccessChain(_, inst)) return error;
break;
case spv::Op::OpRawAccessChainNV:
if (auto error = ValidateRawAccessChain(_, inst)) return error;
break;
case spv::Op::OpArrayLength:
case spv::Op::OpUntypedArrayLengthKHR:
if (auto error = ValidateArrayLength(_, inst)) return error;
break;
case spv::Op::OpCooperativeMatrixLoadNV:

View File

@ -36,6 +36,7 @@ spv_result_t ValidateUniqueness(ValidationState_t& _, const Instruction* inst) {
const auto opcode = inst->opcode();
if (opcode != spv::Op::OpTypeArray && opcode != spv::Op::OpTypeRuntimeArray &&
opcode != spv::Op::OpTypeStruct && opcode != spv::Op::OpTypePointer &&
opcode != spv::Op::OpTypeUntypedPointerKHR &&
!_.RegisterUniqueTypeDeclaration(inst)) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Duplicate non-aggregate type declarations are not allowed. "
@ -583,6 +584,33 @@ spv_result_t ValidateTypeCooperativeMatrix(ValidationState_t& _,
return SPV_SUCCESS;
}
spv_result_t ValidateTypeUntypedPointerKHR(ValidationState_t& _,
const Instruction* inst) {
if (spvIsVulkanEnv(_.context()->target_env)) {
const auto sc = inst->GetOperandAs<spv::StorageClass>(1);
switch (sc) {
case spv::StorageClass::Workgroup:
if (!_.HasCapability(
spv::Capability::WorkgroupMemoryExplicitLayoutKHR)) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Workgroup storage class untyped pointers in Vulkan "
"require WorkgroupMemoryExplicitLayoutKHR be declared";
}
break;
case spv::StorageClass::StorageBuffer:
case spv::StorageClass::PhysicalStorageBuffer:
case spv::StorageClass::Uniform:
case spv::StorageClass::PushConstant:
break;
default:
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "In Vulkan, untyped pointers can only be used in an "
"explicitly laid out storage class";
}
}
return SPV_SUCCESS;
}
} // namespace
spv_result_t TypePass(ValidationState_t& _, const Instruction* inst) {
@ -628,6 +656,9 @@ spv_result_t TypePass(ValidationState_t& _, const Instruction* inst) {
case spv::Op::OpTypeCooperativeMatrixKHR:
if (auto error = ValidateTypeCooperativeMatrix(_, inst)) return error;
break;
case spv::Op::OpTypeUntypedPointerKHR:
if (auto error = ValidateTypeUntypedPointerKHR(_, inst)) return error;
break;
default:
break;
}

View File

@ -73,6 +73,7 @@ ModuleLayoutSection InstructionLayoutSection(
case spv::Op::OpTypeForwardPointer:
return kLayoutTypes;
case spv::Op::OpVariable:
case spv::Op::OpUntypedVariableKHR:
if (current_section == kLayoutTypes) return kLayoutTypes;
return kLayoutFunctionDefinitions;
case spv::Op::OpExtInst:
@ -869,6 +870,9 @@ uint32_t ValidationState_t::GetComponentType(uint32_t id) const {
case spv::Op::OpTypeBool:
return id;
case spv::Op::OpTypeArray:
return inst->word(2);
case spv::Op::OpTypeVector:
return inst->word(2);
@ -992,6 +996,19 @@ bool ValidationState_t::IsIntScalarType(uint32_t id) const {
return inst && inst->opcode() == spv::Op::OpTypeInt;
}
bool ValidationState_t::IsIntArrayType(uint32_t id) const {
const Instruction* inst = FindDef(id);
if (!inst) {
return false;
}
if (inst->opcode() == spv::Op::OpTypeArray) {
return IsIntScalarType(GetComponentType(id));
}
return false;
}
bool ValidationState_t::IsIntVectorType(uint32_t id) const {
const Instruction* inst = FindDef(id);
if (!inst) {
@ -1169,7 +1186,9 @@ bool ValidationState_t::GetStructMemberTypes(
bool ValidationState_t::IsPointerType(uint32_t id) const {
const Instruction* inst = FindDef(id);
return inst && inst->opcode() == spv::Op::OpTypePointer;
assert(inst);
return inst->opcode() == spv::Op::OpTypePointer ||
inst->opcode() == spv::Op::OpTypeUntypedPointerKHR;
}
bool ValidationState_t::GetPointerTypeInfo(
@ -1179,6 +1198,12 @@ bool ValidationState_t::GetPointerTypeInfo(
const Instruction* inst = FindDef(id);
assert(inst);
if (inst->opcode() == spv::Op::OpTypeUntypedPointerKHR) {
*storage_class = spv::StorageClass(inst->word(2));
*data_type = 0;
return true;
}
if (inst->opcode() != spv::Op::OpTypePointer) return false;
*storage_class = spv::StorageClass(inst->word(2));
@ -1689,6 +1714,39 @@ bool ValidationState_t::ContainsRuntimeArray(uint32_t id) const {
return ContainsType(id, f, /* traverse_all_types = */ false);
}
bool ValidationState_t::ContainsUntypedPointer(uint32_t id) const {
const auto inst = FindDef(id);
if (!inst) return false;
if (!spvOpcodeGeneratesType(inst->opcode())) return false;
if (inst->opcode() == spv::Op::OpTypeUntypedPointerKHR) return true;
switch (inst->opcode()) {
case spv::Op::OpTypeArray:
case spv::Op::OpTypeRuntimeArray:
case spv::Op::OpTypeVector:
case spv::Op::OpTypeMatrix:
case spv::Op::OpTypeImage:
case spv::Op::OpTypeSampledImage:
case spv::Op::OpTypeCooperativeMatrixNV:
return ContainsUntypedPointer(inst->GetOperandAs<uint32_t>(1u));
case spv::Op::OpTypePointer:
if (IsForwardPointer(id)) return false;
return ContainsUntypedPointer(inst->GetOperandAs<uint32_t>(2u));
case spv::Op::OpTypeFunction:
case spv::Op::OpTypeStruct: {
for (uint32_t i = 1; i < inst->operands().size(); ++i) {
if (ContainsUntypedPointer(inst->GetOperandAs<uint32_t>(i)))
return true;
}
return false;
}
default:
return false;
}
return false;
}
bool ValidationState_t::IsValidStorageClass(
spv::StorageClass storage_class) const {
if (spvIsVulkanEnv(context()->target_env)) {

View File

@ -606,6 +606,7 @@ class ValidationState_t {
bool IsFloatScalarOrVectorType(uint32_t id) const;
bool IsFloatMatrixType(uint32_t id) const;
bool IsIntScalarType(uint32_t id) const;
bool IsIntArrayType(uint32_t id) const;
bool IsIntVectorType(uint32_t id) const;
bool IsIntScalarOrVectorType(uint32_t id) const;
bool IsUnsignedIntScalarType(uint32_t id) const;
@ -648,6 +649,9 @@ class ValidationState_t {
const std::function<bool(const Instruction*)>& f,
bool traverse_all_types = true) const;
// Returns true if |id| is type id that contains an untyped pointer.
bool ContainsUntypedPointer(uint32_t id) const;
// Returns type_id if id has type or zero otherwise.
uint32_t GetTypeId(uint32_t id) const;