Updated spirv-tools.

This commit is contained in:
Бранимир Караџић 2023-11-03 17:50:15 -07:00
parent a651b93fac
commit 3e82b5bd67
38 changed files with 1393 additions and 1643 deletions

View File

@ -1 +1 @@
"v2023.4", "SPIRV-Tools v2023.4 v2022.4-324-gfaa88377"
"v2023.5", "SPIRV-Tools v2023.5 v2022.4-368-g9e3a4402"

View File

@ -16,6 +16,7 @@ static const spv::Capability pygen_variable_caps_DemoteToHelperInvocation[] = {s
static const spv::Capability pygen_variable_caps_DemoteToHelperInvocationEXT[] = {spv::Capability::DemoteToHelperInvocationEXT};
static const spv::Capability pygen_variable_caps_DerivativeControl[] = {spv::Capability::DerivativeControl};
static const spv::Capability pygen_variable_caps_DeviceEnqueue[] = {spv::Capability::DeviceEnqueue};
static const spv::Capability pygen_variable_caps_DisplacementMicromapNV[] = {spv::Capability::DisplacementMicromapNV};
static const spv::Capability pygen_variable_caps_DotProduct[] = {spv::Capability::DotProduct};
static const spv::Capability pygen_variable_caps_DotProductKHR[] = {spv::Capability::DotProductKHR};
static const spv::Capability pygen_variable_caps_ExpectAssumeKHR[] = {spv::Capability::ExpectAssumeKHR};
@ -557,6 +558,8 @@ static const spv_opcode_desc_t kOpcodeTableEntries[] = {
{"SetMeshOutputsEXT", spv::Op::OpSetMeshOutputsEXT, 1, pygen_variable_caps_MeshShadingEXT, 2, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID}, 0, 0, 0, nullptr, 0xffffffffu, 0xffffffffu},
{"GroupNonUniformPartitionNV", spv::Op::OpGroupNonUniformPartitionNV, 1, pygen_variable_caps_GroupNonUniformPartitionedNV, 3, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID}, 1, 1, 1, pygen_variable_exts_SPV_NV_shader_subgroup_partitioned, 0xffffffffu, 0xffffffffu},
{"WritePackedPrimitiveIndices4x8NV", spv::Op::OpWritePackedPrimitiveIndices4x8NV, 1, pygen_variable_caps_MeshShadingNV, 2, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID}, 0, 0, 1, pygen_variable_exts_SPV_NV_mesh_shader, 0xffffffffu, 0xffffffffu},
{"FetchMicroTriangleVertexPositionNV", spv::Op::OpFetchMicroTriangleVertexPositionNV, 1, pygen_variable_caps_DisplacementMicromapNV, 7, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
{"FetchMicroTriangleVertexBarycentricNV", spv::Op::OpFetchMicroTriangleVertexBarycentricNV, 1, pygen_variable_caps_DisplacementMicromapNV, 7, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
{"ReportIntersectionKHR", spv::Op::OpReportIntersectionKHR, 2, pygen_variable_caps_RayTracingNVRayTracingKHR, 4, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID}, 1, 1, 2, pygen_variable_exts_SPV_NV_ray_tracingSPV_KHR_ray_tracing, 0xffffffffu, 0xffffffffu},
{"ReportIntersectionNV", spv::Op::OpReportIntersectionNV, 2, pygen_variable_caps_RayTracingNVRayTracingKHR, 4, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID}, 1, 1, 2, pygen_variable_exts_SPV_NV_ray_tracingSPV_KHR_ray_tracing, 0xffffffffu, 0xffffffffu},
{"IgnoreIntersectionNV", spv::Op::OpIgnoreIntersectionNV, 1, pygen_variable_caps_RayTracingNV, 0, {}, 0, 0, 1, pygen_variable_exts_SPV_NV_ray_tracing, 0xffffffffu, 0xffffffffu},

File diff suppressed because one or more lines are too long

View File

@ -34,6 +34,7 @@ kSPV_INTEL_arbitrary_precision_floating_point,
kSPV_INTEL_arbitrary_precision_integers,
kSPV_INTEL_bfloat16_conversion,
kSPV_INTEL_blocking_pipes,
kSPV_INTEL_cache_controls,
kSPV_INTEL_debug_module,
kSPV_INTEL_device_side_avc_motion_estimation,
kSPV_INTEL_float_controls2,
@ -105,6 +106,7 @@ kSPV_NVX_multiview_per_view_attributes,
kSPV_NV_bindless_texture,
kSPV_NV_compute_shader_derivatives,
kSPV_NV_cooperative_matrix,
kSPV_NV_displacement_micromap,
kSPV_NV_fragment_shader_barycentric,
kSPV_NV_geometry_shader_passthrough,
kSPV_NV_mesh_shader,

View File

@ -38,3 +38,4 @@
{37, "heroseh", "Hero C Compiler", "heroseh Hero C Compiler"},
{38, "Meta", "SparkSL", "Meta SparkSL"},
{39, "SirLynix", "Nazara ShaderLang Compiler", "SirLynix Nazara ShaderLang Compiler"},
{40, "NVIDIA", "Slang Compiler", "NVIDIA Slang Compiler"},

View File

@ -40,5 +40,6 @@ static const spv_ext_inst_desc_t nonsemantic_clspvreflection_entries[] = {
{"ProgramScopeVariablePointerPushConstant", 37, 0, nullptr, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_NONE}},
{"PrintfInfo", 38, 0, nullptr, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_VARIABLE_ID, SPV_OPERAND_TYPE_NONE}},
{"PrintfBufferStorageBuffer", 39, 0, nullptr, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_NONE}},
{"PrintfBufferPointerPushConstant", 40, 0, nullptr, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_NONE}}
{"PrintfBufferPointerPushConstant", 40, 0, nullptr, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_NONE}},
{"NormalizedSamplerMaskPushConstant", 41, 0, nullptr, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_NONE}}
};

View File

@ -3,6 +3,7 @@ static const spv::Capability pygen_variable_caps_ArbitraryPrecisionFixedPointINT
static const spv::Capability pygen_variable_caps_AsmINTEL[] = {spv::Capability::AsmINTEL};
static const spv::Capability pygen_variable_caps_AtomicStorage[] = {spv::Capability::AtomicStorage};
static const spv::Capability pygen_variable_caps_BindlessTextureNV[] = {spv::Capability::BindlessTextureNV};
static const spv::Capability pygen_variable_caps_CacheControlsINTEL[] = {spv::Capability::CacheControlsINTEL};
static const spv::Capability pygen_variable_caps_ClipDistance[] = {spv::Capability::ClipDistance};
static const spv::Capability pygen_variable_caps_ComputeDerivativeGroupLinearNV[] = {spv::Capability::ComputeDerivativeGroupLinearNV};
static const spv::Capability pygen_variable_caps_ComputeDerivativeGroupQuadsNV[] = {spv::Capability::ComputeDerivativeGroupQuadsNV};
@ -84,6 +85,7 @@ static const spv::Capability pygen_variable_caps_Pipes[] = {spv::Capability::Pip
static const spv::Capability pygen_variable_caps_RayCullMaskKHR[] = {spv::Capability::RayCullMaskKHR};
static const spv::Capability pygen_variable_caps_RayQueryKHR[] = {spv::Capability::RayQueryKHR};
static const spv::Capability pygen_variable_caps_RayQueryKHRRayTracingKHR[] = {spv::Capability::RayQueryKHR, spv::Capability::RayTracingKHR};
static const spv::Capability pygen_variable_caps_RayTracingDisplacementMicromapNV[] = {spv::Capability::RayTracingDisplacementMicromapNV};
static const spv::Capability pygen_variable_caps_RayTracingKHR[] = {spv::Capability::RayTracingKHR};
static const spv::Capability pygen_variable_caps_RayTracingMotionBlurNV[] = {spv::Capability::RayTracingMotionBlurNV};
static const spv::Capability pygen_variable_caps_RayTracingNV[] = {spv::Capability::RayTracingNV};
@ -169,6 +171,7 @@ static const spvtools::Extension pygen_variable_exts_SPV_INTEL_arbitrary_precisi
static const spvtools::Extension pygen_variable_exts_SPV_INTEL_arbitrary_precision_integers[] = {spvtools::Extension::kSPV_INTEL_arbitrary_precision_integers};
static const spvtools::Extension pygen_variable_exts_SPV_INTEL_bfloat16_conversion[] = {spvtools::Extension::kSPV_INTEL_bfloat16_conversion};
static const spvtools::Extension pygen_variable_exts_SPV_INTEL_blocking_pipes[] = {spvtools::Extension::kSPV_INTEL_blocking_pipes};
static const spvtools::Extension pygen_variable_exts_SPV_INTEL_cache_controls[] = {spvtools::Extension::kSPV_INTEL_cache_controls};
static const spvtools::Extension pygen_variable_exts_SPV_INTEL_debug_module[] = {spvtools::Extension::kSPV_INTEL_debug_module};
static const spvtools::Extension pygen_variable_exts_SPV_INTEL_device_side_avc_motion_estimation[] = {spvtools::Extension::kSPV_INTEL_device_side_avc_motion_estimation};
static const spvtools::Extension pygen_variable_exts_SPV_INTEL_float_controls2[] = {spvtools::Extension::kSPV_INTEL_float_controls2};
@ -240,6 +243,7 @@ static const spvtools::Extension pygen_variable_exts_SPV_NVX_multiview_per_view_
static const spvtools::Extension pygen_variable_exts_SPV_NV_bindless_texture[] = {spvtools::Extension::kSPV_NV_bindless_texture};
static const spvtools::Extension pygen_variable_exts_SPV_NV_compute_shader_derivatives[] = {spvtools::Extension::kSPV_NV_compute_shader_derivatives};
static const spvtools::Extension pygen_variable_exts_SPV_NV_cooperative_matrix[] = {spvtools::Extension::kSPV_NV_cooperative_matrix};
static const spvtools::Extension pygen_variable_exts_SPV_NV_displacement_micromap[] = {spvtools::Extension::kSPV_NV_displacement_micromap};
static const spvtools::Extension pygen_variable_exts_SPV_NV_geometry_shader_passthrough[] = {spvtools::Extension::kSPV_NV_geometry_shader_passthrough};
static const spvtools::Extension pygen_variable_exts_SPV_NV_mesh_shader[] = {spvtools::Extension::kSPV_NV_mesh_shader};
static const spvtools::Extension pygen_variable_exts_SPV_NV_mesh_shaderSPV_NV_viewport_array2[] = {spvtools::Extension::kSPV_NV_mesh_shader, spvtools::Extension::kSPV_NV_viewport_array2};
@ -402,7 +406,8 @@ static const spv_operand_desc_t pygen_variable_SourceLanguageEntries[] = {
{"SYCL", 7, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"HERO_C", 8, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"NZSL", 9, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"WGSL", 10, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu}
{"WGSL", 10, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"Slang", 11, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu}
};
static const spv_operand_desc_t pygen_variable_ExecutionModelEntries[] = {
@ -588,16 +593,16 @@ static const spv_operand_desc_t pygen_variable_DimEntries[] = {
};
static const spv_operand_desc_t pygen_variable_SamplerAddressingModeEntries[] = {
{"None", 0, 1, pygen_variable_caps_Kernel, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"ClampToEdge", 1, 1, pygen_variable_caps_Kernel, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"Clamp", 2, 1, pygen_variable_caps_Kernel, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"Repeat", 3, 1, pygen_variable_caps_Kernel, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"RepeatMirrored", 4, 1, pygen_variable_caps_Kernel, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu}
{"None", 0, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"ClampToEdge", 1, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"Clamp", 2, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"Repeat", 3, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"RepeatMirrored", 4, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu}
};
static const spv_operand_desc_t pygen_variable_SamplerFilterModeEntries[] = {
{"Nearest", 0, 1, pygen_variable_caps_Kernel, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"Linear", 1, 1, pygen_variable_caps_Kernel, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu}
{"Nearest", 0, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"Linear", 1, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu}
};
static const spv_operand_desc_t pygen_variable_ImageFormatEntries[] = {
@ -816,7 +821,7 @@ static const spv_operand_desc_t pygen_variable_DecorationEntries[] = {
{"OverrideCoverageNV", 5248, 1, pygen_variable_caps_SampleMaskOverrideCoverageNV, 1, pygen_variable_exts_SPV_NV_sample_mask_override_coverage, {}, 0xffffffffu, 0xffffffffu},
{"PassthroughNV", 5250, 1, pygen_variable_caps_GeometryShaderPassthroughNV, 1, pygen_variable_exts_SPV_NV_geometry_shader_passthrough, {}, 0xffffffffu, 0xffffffffu},
{"ViewportRelativeNV", 5252, 1, pygen_variable_caps_ShaderViewportMaskNV, 0, nullptr, {}, 0xffffffffu, 0xffffffffu},
{"SecondaryViewportRelativeNV", 5256, 1, pygen_variable_caps_ShaderStereoViewNV, 1, pygen_variable_exts_SPV_NV_stereo_view_rendering, {SPV_OPERAND_TYPE_LITERAL_INTEGER}, SPV_SPIRV_VERSION_WORD(1,0), 0xffffffffu},
{"SecondaryViewportRelativeNV", 5256, 1, pygen_variable_caps_ShaderStereoViewNV, 1, pygen_variable_exts_SPV_NV_stereo_view_rendering, {SPV_OPERAND_TYPE_LITERAL_INTEGER}, 0xffffffffu, 0xffffffffu},
{"PerPrimitiveNV", 5271, 2, pygen_variable_caps_MeshShadingNVMeshShadingEXT, 2, pygen_variable_exts_SPV_EXT_mesh_shaderSPV_NV_mesh_shader, {}, 0xffffffffu, 0xffffffffu},
{"PerPrimitiveEXT", 5271, 2, pygen_variable_caps_MeshShadingNVMeshShadingEXT, 2, pygen_variable_exts_SPV_EXT_mesh_shaderSPV_NV_mesh_shader, {}, 0xffffffffu, 0xffffffffu},
{"PerViewNV", 5272, 1, pygen_variable_caps_MeshShadingNV, 1, pygen_variable_exts_SPV_NV_mesh_shader, {}, 0xffffffffu, 0xffffffffu},
@ -862,6 +867,9 @@ static const spv_operand_desc_t pygen_variable_DecorationEntries[] = {
{"MergeINTEL", 5834, 1, pygen_variable_caps_FPGAMemoryAttributesINTEL, 1, pygen_variable_exts_SPV_INTEL_fpga_memory_attributes, {SPV_OPERAND_TYPE_LITERAL_STRING, SPV_OPERAND_TYPE_LITERAL_STRING}, 0xffffffffu, 0xffffffffu},
{"BankBitsINTEL", 5835, 1, pygen_variable_caps_FPGAMemoryAttributesINTEL, 1, pygen_variable_exts_SPV_INTEL_fpga_memory_attributes, {SPV_OPERAND_TYPE_LITERAL_INTEGER}, 0xffffffffu, 0xffffffffu},
{"ForcePow2DepthINTEL", 5836, 1, pygen_variable_caps_FPGAMemoryAttributesINTEL, 1, pygen_variable_exts_SPV_INTEL_fpga_memory_attributes, {SPV_OPERAND_TYPE_LITERAL_INTEGER}, 0xffffffffu, 0xffffffffu},
{"StridesizeINTEL", 5883, 1, pygen_variable_caps_FPGAMemoryAttributesINTEL, 0, nullptr, {SPV_OPERAND_TYPE_LITERAL_INTEGER}, 0xffffffffu, 0xffffffffu},
{"WordsizeINTEL", 5884, 1, pygen_variable_caps_FPGAMemoryAttributesINTEL, 0, nullptr, {SPV_OPERAND_TYPE_LITERAL_INTEGER}, 0xffffffffu, 0xffffffffu},
{"TrueDualPortINTEL", 5885, 1, pygen_variable_caps_FPGAMemoryAttributesINTEL, 0, nullptr, {}, 0xffffffffu, 0xffffffffu},
{"BurstCoalesceINTEL", 5899, 1, pygen_variable_caps_FPGAMemoryAccessesINTEL, 0, nullptr, {}, 0xffffffffu, 0xffffffffu},
{"CacheSizeINTEL", 5900, 1, pygen_variable_caps_FPGAMemoryAccessesINTEL, 0, nullptr, {SPV_OPERAND_TYPE_LITERAL_INTEGER}, 0xffffffffu, 0xffffffffu},
{"DontStaticallyCoalesceINTEL", 5901, 1, pygen_variable_caps_FPGAMemoryAccessesINTEL, 0, nullptr, {}, 0xffffffffu, 0xffffffffu},
@ -894,7 +902,9 @@ static const spv_operand_desc_t pygen_variable_DecorationEntries[] = {
{"MMHostInterfaceReadWriteModeINTEL", 6180, 1, pygen_variable_caps_FPGAArgumentInterfacesINTEL, 0, nullptr, {SPV_OPERAND_TYPE_ACCESS_QUALIFIER}, 0xffffffffu, 0xffffffffu},
{"MMHostInterfaceMaxBurstINTEL", 6181, 1, pygen_variable_caps_FPGAArgumentInterfacesINTEL, 0, nullptr, {SPV_OPERAND_TYPE_LITERAL_INTEGER}, 0xffffffffu, 0xffffffffu},
{"MMHostInterfaceWaitRequestINTEL", 6182, 1, pygen_variable_caps_FPGAArgumentInterfacesINTEL, 0, nullptr, {SPV_OPERAND_TYPE_LITERAL_INTEGER}, 0xffffffffu, 0xffffffffu},
{"StableKernelArgumentINTEL", 6183, 1, pygen_variable_caps_FPGAArgumentInterfacesINTEL, 0, nullptr, {}, 0xffffffffu, 0xffffffffu}
{"StableKernelArgumentINTEL", 6183, 1, pygen_variable_caps_FPGAArgumentInterfacesINTEL, 0, nullptr, {}, 0xffffffffu, 0xffffffffu},
{"CacheControlLoadINTEL", 6442, 1, pygen_variable_caps_CacheControlsINTEL, 0, nullptr, {SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LOAD_CACHE_CONTROL}, 0xffffffffu, 0xffffffffu},
{"CacheControlStoreINTEL", 6443, 1, pygen_variable_caps_CacheControlsINTEL, 0, nullptr, {SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_STORE_CACHE_CONTROL}, 0xffffffffu, 0xffffffffu}
};
static const spv_operand_desc_t pygen_variable_BuiltInEntries[] = {
@ -1024,6 +1034,8 @@ static const spv_operand_desc_t pygen_variable_BuiltInEntries[] = {
{"HitKindKHR", 5333, 2, pygen_variable_caps_RayTracingNVRayTracingKHR, 2, pygen_variable_exts_SPV_KHR_ray_tracingSPV_NV_ray_tracing, {}, 0xffffffffu, 0xffffffffu},
{"CurrentRayTimeNV", 5334, 1, pygen_variable_caps_RayTracingMotionBlurNV, 1, pygen_variable_exts_SPV_NV_ray_tracing_motion_blur, {}, 0xffffffffu, 0xffffffffu},
{"HitTriangleVertexPositionsKHR", 5335, 1, pygen_variable_caps_RayTracingPositionFetchKHR, 0, nullptr, {}, 0xffffffffu, 0xffffffffu},
{"HitMicroTriangleVertexPositionsNV", 5337, 1, pygen_variable_caps_RayTracingDisplacementMicromapNV, 0, nullptr, {}, 0xffffffffu, 0xffffffffu},
{"HitMicroTriangleVertexBarycentricsNV", 5344, 1, pygen_variable_caps_RayTracingDisplacementMicromapNV, 0, nullptr, {}, 0xffffffffu, 0xffffffffu},
{"IncomingRayFlagsNV", 5351, 2, pygen_variable_caps_RayTracingNVRayTracingKHR, 2, pygen_variable_exts_SPV_KHR_ray_tracingSPV_NV_ray_tracing, {}, 0xffffffffu, 0xffffffffu},
{"IncomingRayFlagsKHR", 5351, 2, pygen_variable_caps_RayTracingNVRayTracingKHR, 2, pygen_variable_exts_SPV_KHR_ray_tracingSPV_NV_ray_tracing, {}, 0xffffffffu, 0xffffffffu},
{"RayGeometryIndexKHR", 5352, 1, pygen_variable_caps_RayTracingKHR, 1, pygen_variable_exts_SPV_KHR_ray_tracing, {}, 0xffffffffu, 0xffffffffu},
@ -1031,6 +1043,8 @@ static const spv_operand_desc_t pygen_variable_BuiltInEntries[] = {
{"SMCountNV", 5375, 1, pygen_variable_caps_ShaderSMBuiltinsNV, 1, pygen_variable_exts_SPV_NV_shader_sm_builtins, {}, 0xffffffffu, 0xffffffffu},
{"WarpIDNV", 5376, 1, pygen_variable_caps_ShaderSMBuiltinsNV, 1, pygen_variable_exts_SPV_NV_shader_sm_builtins, {}, 0xffffffffu, 0xffffffffu},
{"SMIDNV", 5377, 1, pygen_variable_caps_ShaderSMBuiltinsNV, 1, pygen_variable_exts_SPV_NV_shader_sm_builtins, {}, 0xffffffffu, 0xffffffffu},
{"HitKindFrontFacingMicroTriangleNV", 5405, 1, pygen_variable_caps_RayTracingDisplacementMicromapNV, 0, nullptr, {}, 0xffffffffu, 0xffffffffu},
{"HitKindBackFacingMicroTriangleNV", 5406, 1, pygen_variable_caps_RayTracingDisplacementMicromapNV, 0, nullptr, {}, 0xffffffffu, 0xffffffffu},
{"CullMaskKHR", 6021, 1, pygen_variable_caps_RayCullMaskKHR, 1, pygen_variable_exts_SPV_KHR_ray_cull_mask, {}, 0xffffffffu, 0xffffffffu}
};
@ -1237,10 +1251,12 @@ static const spv_operand_desc_t pygen_variable_CapabilityEntries[] = {
{"FragmentShaderPixelInterlockEXT", 5378, 1, pygen_variable_caps_Shader, 1, pygen_variable_exts_SPV_EXT_fragment_shader_interlock, {}, 0xffffffffu, 0xffffffffu},
{"DemoteToHelperInvocation", 5379, 1, pygen_variable_caps_Shader, 1, pygen_variable_exts_SPV_EXT_demote_to_helper_invocation, {}, SPV_SPIRV_VERSION_WORD(1,6), 0xffffffffu},
{"DemoteToHelperInvocationEXT", 5379, 1, pygen_variable_caps_Shader, 1, pygen_variable_exts_SPV_EXT_demote_to_helper_invocation, {}, SPV_SPIRV_VERSION_WORD(1,6), 0xffffffffu},
{"DisplacementMicromapNV", 5380, 1, pygen_variable_caps_Shader, 1, pygen_variable_exts_SPV_NV_displacement_micromap, {}, 0xffffffffu, 0xffffffffu},
{"RayTracingOpacityMicromapEXT", 5381, 2, pygen_variable_caps_RayQueryKHRRayTracingKHR, 1, pygen_variable_exts_SPV_EXT_opacity_micromap, {}, 0xffffffffu, 0xffffffffu},
{"ShaderInvocationReorderNV", 5383, 1, pygen_variable_caps_RayTracingKHR, 1, pygen_variable_exts_SPV_NV_shader_invocation_reorder, {}, 0xffffffffu, 0xffffffffu},
{"BindlessTextureNV", 5390, 0, nullptr, 1, pygen_variable_exts_SPV_NV_bindless_texture, {}, 0xffffffffu, 0xffffffffu},
{"RayQueryPositionFetchKHR", 5391, 1, pygen_variable_caps_Shader, 1, pygen_variable_exts_SPV_KHR_ray_tracing_position_fetch, {}, 0xffffffffu, 0xffffffffu},
{"RayTracingDisplacementMicromapNV", 5409, 1, pygen_variable_caps_RayTracingKHR, 1, pygen_variable_exts_SPV_NV_displacement_micromap, {}, 0xffffffffu, 0xffffffffu},
{"SubgroupShuffleINTEL", 5568, 0, nullptr, 1, pygen_variable_exts_SPV_INTEL_subgroups, {}, 0xffffffffu, 0xffffffffu},
{"SubgroupBufferBlockIOINTEL", 5569, 0, nullptr, 1, pygen_variable_exts_SPV_INTEL_subgroups, {}, 0xffffffffu, 0xffffffffu},
{"SubgroupImageBlockIOINTEL", 5570, 0, nullptr, 1, pygen_variable_exts_SPV_INTEL_subgroups, {}, 0xffffffffu, 0xffffffffu},
@ -1309,7 +1325,8 @@ static const spv_operand_desc_t pygen_variable_CapabilityEntries[] = {
{"FPMaxErrorINTEL", 6169, 0, nullptr, 1, pygen_variable_exts_SPV_INTEL_fp_max_error, {}, 0xffffffffu, 0xffffffffu},
{"FPGALatencyControlINTEL", 6171, 0, nullptr, 1, pygen_variable_exts_SPV_INTEL_fpga_latency_control, {}, 0xffffffffu, 0xffffffffu},
{"FPGAArgumentInterfacesINTEL", 6174, 0, nullptr, 1, pygen_variable_exts_SPV_INTEL_fpga_argument_interfaces, {}, 0xffffffffu, 0xffffffffu},
{"GroupUniformArithmeticKHR", 6400, 0, nullptr, 1, pygen_variable_exts_SPV_KHR_uniform_group_instructions, {}, 0xffffffffu, 0xffffffffu}
{"GroupUniformArithmeticKHR", 6400, 0, nullptr, 1, pygen_variable_exts_SPV_KHR_uniform_group_instructions, {}, 0xffffffffu, 0xffffffffu},
{"CacheControlsINTEL", 6441, 0, nullptr, 1, pygen_variable_exts_SPV_INTEL_cache_controls, {}, 0xffffffffu, 0xffffffffu}
};
static const spv_operand_desc_t pygen_variable_RayQueryIntersectionEntries[] = {
@ -1358,6 +1375,21 @@ static const spv_operand_desc_t pygen_variable_InitializationModeQualifierEntrie
{"InitOnDeviceResetINTEL", 1, 1, pygen_variable_caps_GlobalVariableFPGADecorationsINTEL, 0, nullptr, {}, 0xffffffffu, 0xffffffffu}
};
static const spv_operand_desc_t pygen_variable_LoadCacheControlEntries[] = {
{"UncachedINTEL", 0, 1, pygen_variable_caps_CacheControlsINTEL, 0, nullptr, {}, 0xffffffffu, 0xffffffffu},
{"CachedINTEL", 1, 1, pygen_variable_caps_CacheControlsINTEL, 0, nullptr, {}, 0xffffffffu, 0xffffffffu},
{"StreamingINTEL", 2, 1, pygen_variable_caps_CacheControlsINTEL, 0, nullptr, {}, 0xffffffffu, 0xffffffffu},
{"InvalidateAfterReadINTEL", 3, 1, pygen_variable_caps_CacheControlsINTEL, 0, nullptr, {}, 0xffffffffu, 0xffffffffu},
{"ConstCachedINTEL", 4, 1, pygen_variable_caps_CacheControlsINTEL, 0, nullptr, {}, 0xffffffffu, 0xffffffffu}
};
static const spv_operand_desc_t pygen_variable_StoreCacheControlEntries[] = {
{"UncachedINTEL", 0, 1, pygen_variable_caps_CacheControlsINTEL, 0, nullptr, {}, 0xffffffffu, 0xffffffffu},
{"WriteThroughINTEL", 1, 1, pygen_variable_caps_CacheControlsINTEL, 0, nullptr, {}, 0xffffffffu, 0xffffffffu},
{"WriteBackINTEL", 2, 1, pygen_variable_caps_CacheControlsINTEL, 0, nullptr, {}, 0xffffffffu, 0xffffffffu},
{"StreamingINTEL", 3, 1, pygen_variable_caps_CacheControlsINTEL, 0, nullptr, {}, 0xffffffffu, 0xffffffffu}
};
static const spv_operand_desc_t pygen_variable_DebugInfoFlagsEntries[] = {
{"None", 0x0000, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1, 0), 0xffffffffu},
{"FlagIsProtected", 0x01, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1, 0), 0xffffffffu},
@ -1522,6 +1554,8 @@ static const spv_operand_desc_group_t pygen_variable_OperandInfoTable[] = {
{SPV_OPERAND_TYPE_COOPERATIVE_MATRIX_LAYOUT, ARRAY_SIZE(pygen_variable_CooperativeMatrixLayoutEntries), pygen_variable_CooperativeMatrixLayoutEntries},
{SPV_OPERAND_TYPE_COOPERATIVE_MATRIX_USE, ARRAY_SIZE(pygen_variable_CooperativeMatrixUseEntries), pygen_variable_CooperativeMatrixUseEntries},
{SPV_OPERAND_TYPE_INITIALIZATION_MODE_QUALIFIER, ARRAY_SIZE(pygen_variable_InitializationModeQualifierEntries), pygen_variable_InitializationModeQualifierEntries},
{SPV_OPERAND_TYPE_LOAD_CACHE_CONTROL, ARRAY_SIZE(pygen_variable_LoadCacheControlEntries), pygen_variable_LoadCacheControlEntries},
{SPV_OPERAND_TYPE_STORE_CACHE_CONTROL, ARRAY_SIZE(pygen_variable_StoreCacheControlEntries), pygen_variable_StoreCacheControlEntries},
{SPV_OPERAND_TYPE_DEBUG_INFO_FLAGS, ARRAY_SIZE(pygen_variable_DebugInfoFlagsEntries), pygen_variable_DebugInfoFlagsEntries},
{SPV_OPERAND_TYPE_DEBUG_BASE_TYPE_ATTRIBUTE_ENCODING, ARRAY_SIZE(pygen_variable_DebugBaseTypeAttributeEncodingEntries), pygen_variable_DebugBaseTypeAttributeEncodingEntries},
{SPV_OPERAND_TYPE_DEBUG_COMPOSITE_TYPE, ARRAY_SIZE(pygen_variable_DebugCompositeTypeEntries), pygen_variable_DebugCompositeTypeEntries},

View File

@ -133,71 +133,6 @@ static const int kInstTaskOutGlobalInvocationIdZ = kInstCommonOutCnt + 2;
// Size of Common and Stage-specific Members
static const int kInstStageOutCnt = kInstCommonOutCnt + 3;
// Validation Error Code Offset
//
// This identifies the validation error. It also helps to identify
// how many words follow in the record and their meaning.
static const int kInstValidationOutError = kInstStageOutCnt;
// Validation-specific Output Record Offsets
//
// Each different validation will generate a potentially different
// number of words at the end of the record giving more specifics
// about the validation error.
//
// A bindless bounds error will output the index and the bound.
static const int kInstBindlessBoundsOutDescSet = kInstStageOutCnt + 1;
static const int kInstBindlessBoundsOutDescBinding = kInstStageOutCnt + 2;
static const int kInstBindlessBoundsOutDescIndex = kInstStageOutCnt + 3;
static const int kInstBindlessBoundsOutDescBound = kInstStageOutCnt + 4;
static const int kInstBindlessBoundsOutUnused = kInstStageOutCnt + 5;
static const int kInstBindlessBoundsOutCnt = kInstStageOutCnt + 6;
// A descriptor uninitialized error will output the index.
static const int kInstBindlessUninitOutDescSet = kInstStageOutCnt + 1;
static const int kInstBindlessUninitOutBinding = kInstStageOutCnt + 2;
static const int kInstBindlessUninitOutDescIndex = kInstStageOutCnt + 3;
static const int kInstBindlessUninitOutUnused = kInstStageOutCnt + 4;
static const int kInstBindlessUninitOutUnused2 = kInstStageOutCnt + 5;
static const int kInstBindlessUninitOutCnt = kInstStageOutCnt + 6;
// A buffer out-of-bounds error will output the descriptor
// index, the buffer offset and the buffer size
static const int kInstBindlessBuffOOBOutDescSet = kInstStageOutCnt + 1;
static const int kInstBindlessBuffOOBOutDescBinding = kInstStageOutCnt + 2;
static const int kInstBindlessBuffOOBOutDescIndex = kInstStageOutCnt + 3;
static const int kInstBindlessBuffOOBOutBuffOff = kInstStageOutCnt + 4;
static const int kInstBindlessBuffOOBOutBuffSize = kInstStageOutCnt + 5;
static const int kInstBindlessBuffOOBOutCnt = kInstStageOutCnt + 6;
// A buffer address unalloc error will output the 64-bit pointer in
// two 32-bit pieces, lower bits first.
static const int kInstBuffAddrUnallocOutDescPtrLo = kInstStageOutCnt + 1;
static const int kInstBuffAddrUnallocOutDescPtrHi = kInstStageOutCnt + 2;
static const int kInstBuffAddrUnallocOutCnt = kInstStageOutCnt + 3;
// Maximum Output Record Member Count
static const int kInstMaxOutCnt = kInstStageOutCnt + 6;
// Validation Error Codes
//
// These are the possible validation error codes.
static const int kInstErrorBindlessBounds = 1;
static const int kInstErrorBindlessUninit = 2;
static const int kInstErrorBuffAddrUnallocRef = 3;
static const int kInstErrorOOB = 4;
static const int kInstErrorMax = kInstErrorOOB;
// Direct Input Buffer Offsets
//
// The following values provide member offsets into the input buffers
// consumed by InstrumentPass::GenDebugDirectRead(). This method is utilized
// by InstBindlessCheckPass.
//
// The only object in an input buffer is a runtime array of unsigned
// integers. Each validation will have its own formatting of this array.
static const int kDebugInputDataOffset = 0;
// Debug Buffer Bindings
//
// These are the bindings for the different buffers which are
@ -216,63 +151,6 @@ static const int kDebugInputBindingBuffAddr = 2;
// This is the output buffer written by InstDebugPrintfPass.
static const int kDebugOutputPrintfStream = 3;
// clang-format off
// Bindless Validation Input Buffer Format
//
// An input buffer for bindless validation has this structure:
// GLSL:
// layout(buffer_reference, std430, buffer_reference_align = 8) buffer DescriptorSetData {
// uint num_bindings;
// uint data[];
// };
//
// layout(set = 7, binding = 1, std430) buffer inst_bindless_InputBuffer
// {
// DescriptorSetData desc_sets[32];
// } inst_bindless_input_buffer;
//
//
// To look up the length of a binding:
// uint length = inst_bindless_input_buffer[set].data[binding];
// Scalar bindings have a length of 1.
//
// To look up the initialization state of a descriptor in a binding:
// uint num_bindings = inst_bindless_input_buffer[set].num_bindings;
// uint binding_state_start = inst_bindless_input_buffer[set].data[num_bindings + binding];
// uint init_state = inst_bindless_input_buffer[set].data[binding_state_start + index];
//
// For scalar bindings, use 0 for the index.
// clang-format on
//
// The size of the inst_bindless_input_buffer array, regardless of how many
// descriptor sets the device supports.
static const int kDebugInputBindlessMaxDescSets = 32;
// Buffer Device Address Input Buffer Format
//
// An input buffer for buffer device address validation consists of a single
// array of unsigned 64-bit integers we will call Data[]. This array is
// formatted as follows:
//
// At offset kDebugInputBuffAddrPtrOffset is a list of sorted valid buffer
// addresses. The list is terminated with the address 0xffffffffffffffff.
// If 0x0 is not a valid buffer address, this address is inserted at the
// start of the list.
//
static const int kDebugInputBuffAddrPtrOffset = 1;
//
// At offset kDebugInputBuffAddrLengthOffset in Data[] is a single uint64 which
// gives an offset to the start of the buffer length data. More
// specifically, for a buffer whose pointer is located at input buffer offset
// i, the length is located at:
//
// Data[ i - kDebugInputBuffAddrPtrOffset
// + Data[ kDebugInputBuffAddrLengthOffset ] ]
//
// The length associated with the 0xffffffffffffffff address is zero. If
// not a valid buffer, the length associated with the 0x0 address is zero.
static const int kDebugInputBuffAddrLengthOffset = 0;
} // namespace spvtools
#endif // INCLUDE_SPIRV_TOOLS_INSTRUMENT_HPP_

View File

@ -298,6 +298,10 @@ typedef enum spv_operand_type_t {
SPV_OPERAND_TYPE_INITIALIZATION_MODE_QUALIFIER,
// Enum type from SPV_INTEL_global_variable_host_access
SPV_OPERAND_TYPE_HOST_ACCESS_QUALIFIER,
// Enum type from SPV_INTEL_cache_controls
SPV_OPERAND_TYPE_LOAD_CACHE_CONTROL,
// Enum type from SPV_INTEL_cache_controls
SPV_OPERAND_TYPE_STORE_CACHE_CONTROL,
// This is a sentinel value, and does not represent an operand type.
// It should come last.

View File

@ -766,11 +766,9 @@ Optimizer::PassToken CreateCombineAccessChainsPass();
// potentially de-optimizing the instrument code, for example, inlining
// the debug record output function throughout the module.
//
// The instrumentation will read and write buffers in debug
// descriptor set |desc_set|. It will write |shader_id| in each output record
// The instrumentation will write |shader_id| in each output record
// to identify the shader module which generated the record.
Optimizer::PassToken CreateInstBindlessCheckPass(uint32_t desc_set,
uint32_t shader_id);
Optimizer::PassToken CreateInstBindlessCheckPass(uint32_t shader_id);
// Create a pass to instrument physical buffer address checking
// This pass instruments all physical buffer address references to check that
@ -791,8 +789,7 @@ Optimizer::PassToken CreateInstBindlessCheckPass(uint32_t desc_set,
// The instrumentation will read and write buffers in debug
// descriptor set |desc_set|. It will write |shader_id| in each output record
// to identify the shader module which generated the record.
Optimizer::PassToken CreateInstBuffAddrCheckPass(uint32_t desc_set,
uint32_t shader_id);
Optimizer::PassToken CreateInstBuffAddrCheckPass(uint32_t shader_id);
// Create a pass to instrument OpDebugPrintf instructions.
// This pass replaces all OpDebugPrintf instructions with instructions to write
@ -997,6 +994,12 @@ Optimizer::PassToken CreateTrimCapabilitiesPass();
// use the new value |ds_to|.
Optimizer::PassToken CreateSwitchDescriptorSetPass(uint32_t ds_from,
uint32_t ds_to);
// Creates an invocation interlock placement pass.
// This pass ensures that an entry point will have at most one
// OpBeginInterlockInvocationEXT and one OpEndInterlockInvocationEXT, in that
// order.
Optimizer::PassToken CreateInvocationInterlockPlacementPass();
} // namespace spvtools
#endif // INCLUDE_SPIRV_TOOLS_OPTIMIZER_HPP_

View File

@ -216,6 +216,10 @@ const char* spvOperandTypeStr(spv_operand_type_t type) {
return "initialization mode qualifier";
case SPV_OPERAND_TYPE_HOST_ACCESS_QUALIFIER:
return "host access qualifier";
case SPV_OPERAND_TYPE_LOAD_CACHE_CONTROL:
return "load cache control";
case SPV_OPERAND_TYPE_STORE_CACHE_CONTROL:
return "store cache control";
case SPV_OPERAND_TYPE_IMAGE:
case SPV_OPERAND_TYPE_OPTIONAL_IMAGE:
return "image";
@ -354,6 +358,8 @@ bool spvOperandIsConcrete(spv_operand_type_t type) {
case SPV_OPERAND_TYPE_COOPERATIVE_MATRIX_USE:
case SPV_OPERAND_TYPE_INITIALIZATION_MODE_QUALIFIER:
case SPV_OPERAND_TYPE_HOST_ACCESS_QUALIFIER:
case SPV_OPERAND_TYPE_LOAD_CACHE_CONTROL:
case SPV_OPERAND_TYPE_STORE_CACHE_CONTROL:
return true;
default:
break;

View File

@ -941,6 +941,8 @@ Pass::Status AggressiveDCEPass::Process() {
void AggressiveDCEPass::InitExtensions() {
extensions_allowlist_.clear();
// clang-format off
extensions_allowlist_.insert({
"SPV_AMD_shader_explicit_vertex_parameter",
"SPV_AMD_shader_trinary_minmax",
@ -988,6 +990,7 @@ void AggressiveDCEPass::InitExtensions() {
"SPV_KHR_ray_query",
"SPV_EXT_fragment_invocation_density",
"SPV_EXT_physical_storage_buffer",
"SPV_KHR_physical_storage_buffer",
"SPV_KHR_terminate_invocation",
"SPV_KHR_shader_clock",
"SPV_KHR_vulkan_memory_model",
@ -999,7 +1002,10 @@ void AggressiveDCEPass::InitExtensions() {
"SPV_KHR_fragment_shader_barycentric",
"SPV_NV_bindless_texture",
"SPV_EXT_shader_atomic_float_add",
"SPV_EXT_fragment_shader_interlock",
"SPV_NV_compute_shader_derivatives"
});
// clang-format on
}
Instruction* AggressiveDCEPass::GetHeaderBranch(BasicBlock* blk) {

View File

@ -142,7 +142,7 @@ class DecorationManager {
uint32_t decoration_value);
// Add |decoration, decoration_value| of |inst_id, member| to module.
void AddMemberDecoration(uint32_t member, uint32_t inst_id,
void AddMemberDecoration(uint32_t inst_id, uint32_t member,
uint32_t decoration, uint32_t decoration_value);
friend bool operator==(const DecorationManager&, const DecorationManager&);

View File

@ -318,7 +318,13 @@ uint32_t FixStorageClass::WalkAccessChainType(Instruction* inst, uint32_t id) {
const analysis::Constant* index_const =
context()->get_constant_mgr()->FindDeclaredConstant(
inst->GetSingleWordInOperand(i));
uint32_t index = index_const->GetU32();
// It is highly unlikely that any type would have more fields than could
// be indexed by a 32-bit integer, and GetSingleWordInOperand only takes
// a 32-bit value, so we would not be able to handle it anyway. But the
// specification does allow any scalar integer type, treated as signed,
// so we simply downcast the index to 32-bits.
uint32_t index =
static_cast<uint32_t>(index_const->GetSignExtendedValue());
id = type_inst->GetSingleWordInOperand(index);
break;
}

View File

@ -2067,7 +2067,8 @@ FoldingRule FMixFeedingExtract() {
}
// Returns the number of elements in the composite type |type|. Returns 0 if
// |type| is a scalar value.
// |type| is a scalar value. Return UINT32_MAX when the size is unknown at
// compile time.
uint32_t GetNumberOfElements(const analysis::Type* type) {
if (auto* vector_type = type->AsVector()) {
return vector_type->element_count();
@ -2079,21 +2080,27 @@ uint32_t GetNumberOfElements(const analysis::Type* type) {
return static_cast<uint32_t>(struct_type->element_types().size());
}
if (auto* array_type = type->AsArray()) {
return array_type->length_info().words[0];
if (array_type->length_info().words[0] ==
analysis::Array::LengthInfo::kConstant &&
array_type->length_info().words.size() == 2) {
return array_type->length_info().words[1];
}
return UINT32_MAX;
}
return 0;
}
// Returns a map with the set of values that were inserted into an object by
// the chain of OpCompositeInsertInstruction starting with |inst|.
// The map will map the index to the value inserted at that index.
// The map will map the index to the value inserted at that index. An empty map
// will be returned if the map could not be properly generated.
std::map<uint32_t, uint32_t> GetInsertedValues(Instruction* inst) {
analysis::DefUseManager* def_use_mgr = inst->context()->get_def_use_mgr();
std::map<uint32_t, uint32_t> values_inserted;
Instruction* current_inst = inst;
while (current_inst->opcode() == spv::Op::OpCompositeInsert) {
if (current_inst->NumInOperands() > inst->NumInOperands()) {
// This is the catch the case
// This is to catch the case
// %2 = OpCompositeInsert %m2x2int %v2int_1_0 %m2x2int_undef 0
// %3 = OpCompositeInsert %m2x2int %int_4 %2 0 0
// %4 = OpCompositeInsert %m2x2int %v2int_2_3 %3 1

View File

@ -39,149 +39,11 @@ constexpr int kSpvTypeImageArrayed = 3;
constexpr int kSpvTypeImageMS = 4;
} // namespace
void InstBindlessCheckPass::SetupInputBufferIds() {
if (input_buffer_id_ != 0) {
return;
}
AddStorageBufferExt();
if (!get_feature_mgr()->HasExtension(kSPV_KHR_physical_storage_buffer)) {
context()->AddExtension("SPV_KHR_physical_storage_buffer");
}
context()->AddCapability(spv::Capability::PhysicalStorageBufferAddresses);
Instruction* memory_model = get_module()->GetMemoryModel();
// TODO should this be just Physical64?
memory_model->SetInOperand(
0u, {uint32_t(spv::AddressingModel::PhysicalStorageBuffer64)});
analysis::DecorationManager* deco_mgr = get_decoration_mgr();
analysis::TypeManager* type_mgr = context()->get_type_mgr();
constexpr uint32_t width = 32u;
// declare the DescriptorSetData struct
analysis::Struct* desc_set_struct =
GetStruct({type_mgr->GetUIntType(), GetUintRuntimeArrayType(width)});
desc_set_type_id_ = type_mgr->GetTypeInstruction(desc_set_struct);
// By the Vulkan spec, a pre-existing struct containing a RuntimeArray
// must be a block, and will therefore be decorated with Block. Therefore
// the undecorated type returned here will not be pre-existing and can
// safely be decorated. Since this type is now decorated, it is out of
// sync with the TypeManager and therefore the TypeManager must be
// invalidated after this pass.
assert(context()->get_def_use_mgr()->NumUses(desc_set_type_id_) == 0 &&
"used struct type returned");
deco_mgr->AddDecoration(desc_set_type_id_, uint32_t(spv::Decoration::Block));
deco_mgr->AddMemberDecoration(desc_set_type_id_, 0,
uint32_t(spv::Decoration::Offset), 0);
deco_mgr->AddMemberDecoration(desc_set_type_id_, 1,
uint32_t(spv::Decoration::Offset), 4);
context()->AddDebug2Inst(
NewGlobalName(desc_set_type_id_, "DescriptorSetData"));
context()->AddDebug2Inst(NewMemberName(desc_set_type_id_, 0, "num_bindings"));
context()->AddDebug2Inst(NewMemberName(desc_set_type_id_, 1, "data"));
// declare buffer address reference to DescriptorSetData
desc_set_ptr_id_ = type_mgr->FindPointerToType(
desc_set_type_id_, spv::StorageClass::PhysicalStorageBuffer);
// runtime array of buffer addresses
analysis::Type* rarr_ty = GetArray(type_mgr->GetType(desc_set_ptr_id_),
kDebugInputBindlessMaxDescSets);
deco_mgr->AddDecorationVal(type_mgr->GetId(rarr_ty),
uint32_t(spv::Decoration::ArrayStride), 8u);
// declare the InputBuffer type, a struct wrapper around the runtime array
analysis::Struct* input_buffer_struct = GetStruct({rarr_ty});
input_buffer_struct_id_ = type_mgr->GetTypeInstruction(input_buffer_struct);
deco_mgr->AddDecoration(input_buffer_struct_id_,
uint32_t(spv::Decoration::Block));
deco_mgr->AddMemberDecoration(input_buffer_struct_id_, 0,
uint32_t(spv::Decoration::Offset), 0);
context()->AddDebug2Inst(
NewGlobalName(input_buffer_struct_id_, "InputBuffer"));
context()->AddDebug2Inst(
NewMemberName(input_buffer_struct_id_, 0, "desc_sets"));
input_buffer_ptr_id_ = type_mgr->FindPointerToType(
input_buffer_struct_id_, spv::StorageClass::StorageBuffer);
// declare the input_buffer global variable
input_buffer_id_ = TakeNextId();
const std::vector<Operand> var_operands = {
{spv_operand_type_t::SPV_OPERAND_TYPE_LITERAL_INTEGER,
{uint32_t(spv::StorageClass::StorageBuffer)}},
};
auto new_var_op = spvtools::MakeUnique<Instruction>(
context(), spv::Op::OpVariable, input_buffer_ptr_id_, input_buffer_id_,
var_operands);
context()->AddGlobalValue(std::move(new_var_op));
context()->AddDebug2Inst(NewGlobalName(input_buffer_id_, "input_buffer"));
deco_mgr->AddDecorationVal(
input_buffer_id_, uint32_t(spv::Decoration::DescriptorSet), desc_set_);
deco_mgr->AddDecorationVal(input_buffer_id_,
uint32_t(spv::Decoration::Binding),
GetInputBufferBinding());
if (get_module()->version() >= SPV_SPIRV_VERSION_WORD(1, 4)) {
// Add the new buffer to all entry points.
for (auto& entry : get_module()->entry_points()) {
entry.AddOperand({SPV_OPERAND_TYPE_ID, {input_buffer_id_}});
context()->AnalyzeUses(&entry);
}
}
}
// This is a stub function for use with Import linkage
// clang-format off
// GLSL:
//bool inst_bindless_check_desc(uint shader_id, uint inst_num, uvec4 stage_info, uint desc_set, uint binding, uint desc_index,
// uint byte_offset)
//{
// uint error = 0u;
// uint param5 = 0u;
// uint param6 = 0u;
// uint num_bindings = 0u;
// uint init_state = 0u;
// if (desc_set >= 32u) {
// error = 1u;
// }
// inst_bindless_DescriptorSetData set_data;
// if (error == 0u) {
// set_data = inst_bindless_input_buffer.desc_sets[desc_set];
// uvec2 ptr_vec = uvec2(set_data);
// if ((ptr_vec.x == 0u) && (ptr_vec.y == 0u)) {
// error = 1u;
// }
// }
// if (error == 0u) {
// num_bindings = set_data.num_bindings;
// if (binding >= num_bindings) {
// error = 1u;
// }
// }
// if (error == 0u) {
// if (desc_index >= set_data.data[binding]) {
// error = 1u;
// param5 = set_data.data[binding];
// }
// }
// if (0u == error) {
// uint state_index = set_data.data[num_bindings + binding] + desc_index;
// init_state = set_data.data[state_index];
// if (init_state == 0u) {
// error = 2u;
// }
// }
// if (error == 0u) {
// if (byte_offset >= init_state) {
// error = 4u;
// param5 = byte_offset;
// param6 = init_state;
// }
// }
// if (0u != error) {
// inst_bindless_stream_write_6(shader_id, inst_num, stage_info, error, desc_set, binding, desc_index, param5, param6);
// return false;
// }
// return true;
//bool inst_bindless_check_desc(const uint shader_id, const uint inst_num, const uvec4 stage_info, const uint desc_set,
// const uint binding, const uint desc_index, const uint byte_offset) {
//}
// clang-format on
uint32_t InstBindlessCheckPass::GenDescCheckFunctionId() {
@ -195,11 +57,10 @@ uint32_t InstBindlessCheckPass::GenDescCheckFunctionId() {
kByteOffset = 6,
kNumArgs
};
if (desc_check_func_id_ != 0) {
return desc_check_func_id_;
if (check_desc_func_id_ != 0) {
return check_desc_func_id_;
}
SetupInputBufferIds();
analysis::TypeManager* type_mgr = context()->get_type_mgr();
const analysis::Integer* uint_type = GetInteger(32, false);
const analysis::Vector v4uint(uint_type, 4);
@ -211,454 +72,32 @@ uint32_t InstBindlessCheckPass::GenDescCheckFunctionId() {
std::unique_ptr<Function> func =
StartFunction(func_id, type_mgr->GetBoolType(), param_types);
const std::vector<uint32_t> param_ids = AddParameters(*func, param_types);
const uint32_t func_uint_ptr =
type_mgr->FindPointerToType(GetUintId(), spv::StorageClass::Function);
// Create block
auto new_blk_ptr = MakeUnique<BasicBlock>(NewLabel(TakeNextId()));
InstructionBuilder builder(
context(), new_blk_ptr.get(),
IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
Instruction* inst;
const uint32_t zero_id = builder.GetUintConstantId(0);
const uint32_t false_id = builder.GetBoolConstantId(false);
const uint32_t true_id = builder.GetBoolConstantId(true);
const uint32_t uint_ptr = type_mgr->FindPointerToType(
GetUintId(), spv::StorageClass::PhysicalStorageBuffer);
inst = builder.AddBinaryOp(func_uint_ptr, spv::Op::OpVariable,
uint32_t(spv::StorageClass::Function), zero_id);
const uint32_t error_var = inst->result_id();
inst = builder.AddBinaryOp(func_uint_ptr, spv::Op::OpVariable,
uint32_t(spv::StorageClass::Function), zero_id);
const uint32_t param5_var = inst->result_id();
inst = builder.AddBinaryOp(func_uint_ptr, spv::Op::OpVariable,
uint32_t(spv::StorageClass::Function), zero_id);
const uint32_t param6_var = inst->result_id();
inst = builder.AddBinaryOp(func_uint_ptr, spv::Op::OpVariable,
uint32_t(spv::StorageClass::Function), zero_id);
const uint32_t num_bindings_var = inst->result_id();
inst = builder.AddBinaryOp(func_uint_ptr, spv::Op::OpVariable,
uint32_t(spv::StorageClass::Function), zero_id);
const uint32_t init_status_var = inst->result_id();
const uint32_t desc_set_ptr_ptr = type_mgr->FindPointerToType(
desc_set_ptr_id_, spv::StorageClass::Function);
inst = builder.AddUnaryOp(desc_set_ptr_ptr, spv::Op::OpVariable,
uint32_t(spv::StorageClass::Function));
const uint32_t desc_set_ptr_var = inst->result_id();
get_decoration_mgr()->AddDecoration(
desc_set_ptr_var, uint32_t(spv::Decoration::AliasedPointer));
uint32_t check_label_id = TakeNextId();
auto check_label = NewLabel(check_label_id);
uint32_t skip_label_id = TakeNextId();
auto skip_label = NewLabel(skip_label_id);
inst = builder.AddBinaryOp(
GetBoolId(), spv::Op::OpUGreaterThanEqual, param_ids[kDescSet],
builder.GetUintConstantId(kDebugInputBindlessMaxDescSets));
const uint32_t desc_cmp_id = inst->result_id();
(void)builder.AddConditionalBranch(desc_cmp_id, check_label_id, skip_label_id,
skip_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
// set error
new_blk_ptr = MakeUnique<BasicBlock>(std::move(check_label));
builder.SetInsertPoint(&*new_blk_ptr);
builder.AddStore(error_var,
builder.GetUintConstantId(kInstErrorBindlessBounds));
builder.AddBranch(skip_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
// check descriptor set table entry is non-null
new_blk_ptr = MakeUnique<BasicBlock>(std::move(skip_label));
builder.SetInsertPoint(&*new_blk_ptr);
check_label_id = TakeNextId();
check_label = NewLabel(check_label_id);
skip_label_id = TakeNextId();
skip_label = NewLabel(skip_label_id);
inst = builder.AddLoad(GetUintId(), error_var);
uint32_t error_val_id = inst->result_id();
inst = builder.AddBinaryOp(GetBoolId(), spv::Op::OpIEqual, error_val_id,
zero_id);
uint32_t no_error_id = inst->result_id();
(void)builder.AddConditionalBranch(no_error_id, check_label_id, skip_label_id,
skip_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
new_blk_ptr = MakeUnique<BasicBlock>(std::move(check_label));
builder.SetInsertPoint(&*new_blk_ptr);
{
const uint32_t desc_set_ptr_ptr_sb = type_mgr->FindPointerToType(
desc_set_ptr_id_, spv::StorageClass::StorageBuffer);
inst = builder.AddAccessChain(desc_set_ptr_ptr_sb, input_buffer_id_,
{zero_id, param_ids[kDescSet]});
const uint32_t set_access_chain_id = inst->result_id();
inst = builder.AddLoad(desc_set_ptr_id_, set_access_chain_id);
const uint32_t desc_set_ptr_id = inst->result_id();
builder.AddStore(desc_set_ptr_var, desc_set_ptr_id);
inst = builder.AddUnaryOp(GetVecUintId(2), spv::Op::OpBitcast,
desc_set_ptr_id);
const uint32_t ptr_as_uvec_id = inst->result_id();
inst = builder.AddCompositeExtract(GetUintId(), ptr_as_uvec_id, {0});
const uint32_t uvec_x = inst->result_id();
inst = builder.AddBinaryOp(GetBoolId(), spv::Op::OpIEqual, uvec_x, zero_id);
const uint32_t x_is_zero_id = inst->result_id();
inst = builder.AddCompositeExtract(GetUintId(), ptr_as_uvec_id, {1});
const uint32_t uvec_y = inst->result_id();
inst = builder.AddBinaryOp(GetBoolId(), spv::Op::OpIEqual, uvec_y, zero_id);
const uint32_t y_is_zero_id = inst->result_id();
inst = builder.AddBinaryOp(GetBoolId(), spv::Op::OpLogicalAnd, x_is_zero_id,
y_is_zero_id);
const uint32_t is_null_id = inst->result_id();
const uint32_t error_label_id = TakeNextId();
auto error_label = NewLabel(error_label_id);
const uint32_t merge_label_id = TakeNextId();
auto merge_label = NewLabel(merge_label_id);
(void)builder.AddConditionalBranch(is_null_id, error_label_id,
merge_label_id, merge_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
// set error
new_blk_ptr = MakeUnique<BasicBlock>(std::move(error_label));
builder.SetInsertPoint(&*new_blk_ptr);
builder.AddStore(error_var,
builder.GetUintConstantId(kInstErrorBindlessBounds));
builder.AddBranch(merge_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
new_blk_ptr = MakeUnique<BasicBlock>(std::move(merge_label));
builder.SetInsertPoint(&*new_blk_ptr);
builder.AddBranch(skip_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
}
new_blk_ptr = MakeUnique<BasicBlock>(std::move(skip_label));
builder.SetInsertPoint(&*new_blk_ptr);
check_label_id = TakeNextId();
check_label = NewLabel(check_label_id);
skip_label_id = TakeNextId();
skip_label = NewLabel(skip_label_id);
inst = builder.AddLoad(GetUintId(), error_var);
error_val_id = inst->result_id();
inst = builder.AddBinaryOp(GetBoolId(), spv::Op::OpIEqual, error_val_id,
zero_id);
no_error_id = inst->result_id();
(void)builder.AddConditionalBranch(no_error_id, check_label_id, skip_label_id,
skip_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
// check binding is in range
new_blk_ptr = MakeUnique<BasicBlock>(std::move(check_label));
builder.SetInsertPoint(&*new_blk_ptr);
{
inst = builder.AddLoad(desc_set_ptr_id_, desc_set_ptr_var);
const uint32_t desc_set_ptr_id = inst->result_id();
inst = builder.AddAccessChain(uint_ptr, desc_set_ptr_id, {zero_id});
const uint32_t binding_access_chain_id = inst->result_id();
inst = builder.AddLoad(GetUintId(), binding_access_chain_id, 8);
const uint32_t num_bindings_id = inst->result_id();
builder.AddStore(num_bindings_var, num_bindings_id);
inst = builder.AddBinaryOp(GetBoolId(), spv::Op::OpUGreaterThanEqual,
param_ids[kDescBinding], num_bindings_id);
const uint32_t bindings_cmp_id = inst->result_id();
const uint32_t error_label_id = TakeNextId();
auto error_label = NewLabel(error_label_id);
const uint32_t merge_label_id = TakeNextId();
auto merge_label = NewLabel(merge_label_id);
(void)builder.AddConditionalBranch(bindings_cmp_id, error_label_id,
merge_label_id, merge_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
// set error
new_blk_ptr = MakeUnique<BasicBlock>(std::move(error_label));
builder.SetInsertPoint(&*new_blk_ptr);
builder.AddStore(error_var,
builder.GetUintConstantId(kInstErrorBindlessBounds));
builder.AddBranch(merge_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
new_blk_ptr = MakeUnique<BasicBlock>(std::move(merge_label));
builder.SetInsertPoint(&*new_blk_ptr);
builder.AddBranch(skip_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
}
// read binding length
new_blk_ptr = MakeUnique<BasicBlock>(std::move(skip_label));
builder.SetInsertPoint(&*new_blk_ptr);
check_label_id = TakeNextId();
check_label = NewLabel(check_label_id);
skip_label_id = TakeNextId();
skip_label = NewLabel(skip_label_id);
inst = builder.AddLoad(GetUintId(), error_var);
error_val_id = inst->result_id();
inst = builder.AddBinaryOp(GetBoolId(), spv::Op::OpIEqual, error_val_id,
zero_id);
no_error_id = inst->result_id();
(void)builder.AddConditionalBranch(no_error_id, check_label_id, skip_label_id,
skip_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
new_blk_ptr = MakeUnique<BasicBlock>(std::move(check_label));
builder.SetInsertPoint(&*new_blk_ptr);
{
inst = builder.AddLoad(desc_set_ptr_id_, desc_set_ptr_var);
const uint32_t desc_set_ptr_id = inst->result_id();
inst = builder.AddAccessChain(
uint_ptr, desc_set_ptr_id,
{{builder.GetUintConstantId(1), param_ids[kDescBinding]}});
const uint32_t length_ac_id = inst->result_id();
inst = builder.AddLoad(GetUintId(), length_ac_id, sizeof(uint32_t));
const uint32_t length_id = inst->result_id();
// Check descriptor index in bounds
inst = builder.AddBinaryOp(GetBoolId(), spv::Op::OpUGreaterThanEqual,
param_ids[kDescIndex], length_id);
const uint32_t desc_idx_range_id = inst->result_id();
const uint32_t error_label_id = TakeNextId();
auto error_label = NewLabel(error_label_id);
const uint32_t merge_label_id = TakeNextId();
auto merge_label = NewLabel(merge_label_id);
(void)builder.AddConditionalBranch(desc_idx_range_id, error_label_id,
merge_label_id, merge_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
// set error
new_blk_ptr = MakeUnique<BasicBlock>(std::move(error_label));
builder.SetInsertPoint(&*new_blk_ptr);
builder.AddStore(error_var,
builder.GetUintConstantId(kInstErrorBindlessBounds));
builder.AddStore(param5_var, length_id);
builder.AddBranch(merge_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
new_blk_ptr = MakeUnique<BasicBlock>(std::move(merge_label));
builder.SetInsertPoint(&*new_blk_ptr);
builder.AddBranch(skip_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
}
new_blk_ptr = MakeUnique<BasicBlock>(std::move(skip_label));
builder.SetInsertPoint(&*new_blk_ptr);
inst = builder.AddLoad(GetUintId(), error_var);
error_val_id = inst->result_id();
check_label_id = TakeNextId();
check_label = NewLabel(check_label_id);
skip_label_id = TakeNextId();
skip_label = NewLabel(skip_label_id);
inst = builder.AddLoad(GetUintId(), error_var);
error_val_id = inst->result_id();
inst = builder.AddBinaryOp(GetBoolId(), spv::Op::OpIEqual, zero_id,
error_val_id);
no_error_id = inst->result_id();
(void)builder.AddConditionalBranch(no_error_id, check_label_id, skip_label_id,
skip_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
// Read descriptor init status
new_blk_ptr = MakeUnique<BasicBlock>(std::move(check_label));
builder.SetInsertPoint(&*new_blk_ptr);
{
inst = builder.AddLoad(desc_set_ptr_id_, desc_set_ptr_var);
const uint32_t desc_set_ptr_id = inst->result_id();
inst = builder.AddLoad(GetUintId(), num_bindings_var);
const uint32_t num_bindings_id = inst->result_id();
inst =
builder.AddIAdd(GetUintId(), num_bindings_id, param_ids[kDescBinding]);
const uint32_t state_offset_id = inst->result_id();
inst = builder.AddAccessChain(
uint_ptr, desc_set_ptr_id,
{{builder.GetUintConstantId(1), state_offset_id}});
const uint32_t state_start_ac_id = inst->result_id();
inst = builder.AddLoad(GetUintId(), state_start_ac_id, sizeof(uint32_t));
const uint32_t state_start_id = inst->result_id();
inst = builder.AddIAdd(GetUintId(), state_start_id, param_ids[kDescIndex]);
const uint32_t state_entry_id = inst->result_id();
// Note: length starts from the beginning of the buffer, not the beginning
// of the data array
inst = builder.AddAccessChain(
uint_ptr, desc_set_ptr_id,
{{builder.GetUintConstantId(1), state_entry_id}});
const uint32_t init_ac_id = inst->result_id();
inst = builder.AddLoad(GetUintId(), init_ac_id, sizeof(uint32_t));
const uint32_t init_status_id = inst->result_id();
builder.AddStore(init_status_var, init_status_id);
// Check for uninitialized descriptor
inst = builder.AddBinaryOp(GetBoolId(), spv::Op::OpIEqual, init_status_id,
zero_id);
const uint32_t uninit_check_id = inst->result_id();
const uint32_t error_label_id = TakeNextId();
auto error_label = NewLabel(error_label_id);
const uint32_t merge_label_id = TakeNextId();
auto merge_label = NewLabel(merge_label_id);
(void)builder.AddConditionalBranch(uninit_check_id, error_label_id,
merge_label_id, merge_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
new_blk_ptr = MakeUnique<BasicBlock>(std::move(error_label));
builder.SetInsertPoint(&*new_blk_ptr);
builder.AddStore(error_var,
builder.GetUintConstantId(kInstErrorBindlessUninit));
builder.AddBranch(merge_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
new_blk_ptr = MakeUnique<BasicBlock>(std::move(merge_label));
builder.SetInsertPoint(&*new_blk_ptr);
builder.AddBranch(skip_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
}
// Check for OOB.
new_blk_ptr = MakeUnique<BasicBlock>(std::move(skip_label));
builder.SetInsertPoint(&*new_blk_ptr);
check_label_id = TakeNextId();
check_label = NewLabel(check_label_id);
skip_label_id = TakeNextId();
skip_label = NewLabel(skip_label_id);
inst = builder.AddLoad(GetUintId(), error_var);
error_val_id = inst->result_id();
inst = builder.AddBinaryOp(GetBoolId(), spv::Op::OpIEqual, error_val_id,
zero_id);
no_error_id = inst->result_id();
(void)builder.AddConditionalBranch(no_error_id, check_label_id, skip_label_id,
skip_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
new_blk_ptr = MakeUnique<BasicBlock>(std::move(check_label));
builder.SetInsertPoint(&*new_blk_ptr);
{
inst = builder.AddLoad(GetUintId(), init_status_var);
const uint32_t init_status_id = inst->result_id();
inst = builder.AddBinaryOp(GetBoolId(), spv::Op::OpUGreaterThanEqual,
param_ids[kByteOffset], init_status_id);
const uint32_t buf_offset_range_id = inst->result_id();
const uint32_t error_label_id = TakeNextId();
const uint32_t merge_label_id = TakeNextId();
auto error_label = NewLabel(error_label_id);
auto merge_label = NewLabel(merge_label_id);
(void)builder.AddConditionalBranch(buf_offset_range_id, error_label_id,
merge_label_id, merge_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
// set error
new_blk_ptr = MakeUnique<BasicBlock>(std::move(error_label));
builder.SetInsertPoint(&*new_blk_ptr);
builder.AddStore(error_var, builder.GetUintConstantId(kInstErrorOOB));
builder.AddStore(param5_var, param_ids[kByteOffset]);
builder.AddStore(param6_var, init_status_id);
builder.AddBranch(merge_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
new_blk_ptr = MakeUnique<BasicBlock>(std::move(merge_label));
builder.SetInsertPoint(&*new_blk_ptr);
builder.AddBranch(skip_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
}
// check for error
new_blk_ptr = MakeUnique<BasicBlock>(std::move(skip_label));
builder.SetInsertPoint(&*new_blk_ptr);
inst = builder.AddLoad(GetUintId(), error_var);
error_val_id = inst->result_id();
inst = builder.AddBinaryOp(GetBoolId(), spv::Op::OpINotEqual, zero_id,
error_val_id);
const uint32_t is_error_id = inst->result_id();
const uint32_t error_label_id = TakeNextId();
auto error_label = NewLabel(error_label_id);
const uint32_t merge_label_id = TakeNextId();
auto merge_label = NewLabel(merge_label_id);
(void)builder.AddConditionalBranch(is_error_id, error_label_id,
merge_label_id, merge_label_id);
func->AddBasicBlock(std::move(new_blk_ptr));
new_blk_ptr = MakeUnique<BasicBlock>(std::move(error_label));
builder.SetInsertPoint(&*new_blk_ptr);
// error output
inst = builder.AddLoad(GetUintId(), param5_var);
const uint32_t param5_val_id = inst->result_id();
inst = builder.AddLoad(GetUintId(), param6_var);
const uint32_t param6_val_id = inst->result_id();
GenDebugStreamWrite(
param_ids[kShaderId], param_ids[kInstructionIndex], param_ids[kStageInfo],
{error_val_id, param_ids[kDescSet], param_ids[kDescBinding],
param_ids[kDescIndex], param5_val_id, param6_val_id},
&builder);
(void)builder.AddUnaryOp(0, spv::Op::OpReturnValue, false_id);
func->AddBasicBlock(std::move(new_blk_ptr));
// Success return
new_blk_ptr = MakeUnique<BasicBlock>(std::move(merge_label));
builder.SetInsertPoint(&*new_blk_ptr);
(void)builder.AddUnaryOp(0, spv::Op::OpReturnValue, true_id);
func->AddBasicBlock(std::move(new_blk_ptr));
func->SetFunctionEnd(EndFunction());
context()->AddFunction(std::move(func));
context()->AddDebug2Inst(NewGlobalName(func_id, "desc_check"));
static const std::string func_name{"inst_bindless_check_desc"};
context()->AddFunctionDeclaration(std::move(func));
context()->AddDebug2Inst(NewName(func_id, func_name));
std::vector<Operand> operands{
{spv_operand_type_t::SPV_OPERAND_TYPE_ID, {func_id}},
{spv_operand_type_t::SPV_OPERAND_TYPE_LITERAL_INTEGER,
{uint32_t(spv::Decoration::LinkageAttributes)}},
{spv_operand_type_t::SPV_OPERAND_TYPE_LITERAL_STRING,
utils::MakeVector(func_name.c_str())},
{spv_operand_type_t::SPV_OPERAND_TYPE_LINKAGE_TYPE,
{uint32_t(spv::LinkageType::Import)}},
};
get_decoration_mgr()->AddDecoration(spv::Op::OpDecorate, operands);
desc_check_func_id_ = func_id;
check_desc_func_id_ = func_id;
// Make sure function doesn't get processed by
// InstrumentPass::InstProcessCallTreeFromRoots()
param2output_func_id_[3] = func_id;
return desc_check_func_id_;
return check_desc_func_id_;
}
// clang-format off
// GLSL:
// result = inst_bindless_desc_check(shader_id, inst_idx, stage_info, desc_set, binding, desc_idx, offset);
// result = inst_bindless_check_desc(shader_id, inst_idx, stage_info, desc_set, binding, desc_idx, offset);
//
// clang-format on
uint32_t InstBindlessCheckPass::GenDescCheckCall(
@ -1134,8 +573,7 @@ uint32_t InstBindlessCheckPass::GenLastByteIdx(RefAnalysis* ref,
}
void InstBindlessCheckPass::GenCheckCode(
uint32_t check_id, uint32_t error_id, uint32_t offset_id,
uint32_t length_id, uint32_t stage_idx, RefAnalysis* ref,
uint32_t check_id, RefAnalysis* ref,
std::vector<std::unique_ptr<BasicBlock>>* new_blocks) {
BasicBlock* back_blk_ptr = &*new_blocks->back();
InstructionBuilder builder(
@ -1164,31 +602,7 @@ void InstBindlessCheckPass::GenCheckCode(
// Gen invalid block
new_blk_ptr.reset(new BasicBlock(std::move(invalid_label)));
builder.SetInsertPoint(&*new_blk_ptr);
if (error_id != 0) {
const uint32_t u_shader_id = builder.GetUintConstantId(shader_id_);
const uint32_t u_inst_id =
builder.GetUintConstantId(ref->ref_inst->unique_id());
const uint32_t shader_info_id = GenStageInfo(stage_idx, &builder);
const uint32_t u_set_id = builder.GetUintConstantId(ref->set);
const uint32_t u_binding_id = builder.GetUintConstantId(ref->binding);
const uint32_t u_index_id = GenUintCastCode(ref->desc_idx_id, &builder);
const uint32_t u_length_id = GenUintCastCode(length_id, &builder);
if (offset_id != 0) {
const uint32_t u_offset_id = GenUintCastCode(offset_id, &builder);
// Buffer OOB
GenDebugStreamWrite(u_shader_id, u_inst_id, shader_info_id,
{error_id, u_set_id, u_binding_id, u_index_id,
u_offset_id, u_length_id},
&builder);
} else {
// Uninitialized Descriptor - Return additional unused zero so all error
// modes will use same debug stream write function
GenDebugStreamWrite(u_shader_id, u_inst_id, shader_info_id,
{error_id, u_set_id, u_binding_id, u_index_id,
u_length_id, builder.GetUintConstantId(0)},
&builder);
}
}
// Generate a ConstantNull, converting to uint64 if the type cannot be a null.
if (new_ref_id != 0) {
analysis::TypeManager* type_mgr = context()->get_type_mgr();
@ -1283,7 +697,7 @@ void InstBindlessCheckPass::GenDescCheckCode(
// Generate runtime initialization/bounds test code with true branch
// being full reference and false branch being zero
// for the referenced value.
GenCheckCode(check_id, 0, 0, 0, stage_idx, &ref, new_blocks);
GenCheckCode(check_id, &ref, new_blocks);
// Move original block's remaining code into remainder/merge block and add
// to new blocks
@ -1310,7 +724,20 @@ void InstBindlessCheckPass::InitializeInstBindlessCheck() {
}
Pass::Status InstBindlessCheckPass::ProcessImpl() {
bool modified = false;
// The memory model and linkage must always be updated for spirv-link to work
// correctly.
AddStorageBufferExt();
if (!get_feature_mgr()->HasExtension(kSPV_KHR_physical_storage_buffer)) {
context()->AddExtension("SPV_KHR_physical_storage_buffer");
}
context()->AddCapability(spv::Capability::PhysicalStorageBufferAddresses);
Instruction* memory_model = get_module()->GetMemoryModel();
memory_model->SetInOperand(
0u, {uint32_t(spv::AddressingModel::PhysicalStorageBuffer64)});
context()->AddCapability(spv::Capability::Linkage);
InstProcessFunction pfn =
[this](BasicBlock::iterator ref_inst_itr,
UptrVectorIterator<BasicBlock> ref_block_itr, uint32_t stage_idx,
@ -1319,8 +746,10 @@ Pass::Status InstBindlessCheckPass::ProcessImpl() {
new_blocks);
};
modified = InstProcessEntryPointCallTree(pfn);
return modified ? Status::SuccessWithChange : Status::SuccessWithoutChange;
InstProcessEntryPointCallTree(pfn);
// This pass always changes the memory model, so that linking will work
// properly.
return Status::SuccessWithChange;
}
Pass::Status InstBindlessCheckPass::Process() {

View File

@ -28,8 +28,8 @@ namespace opt {
// external design may change as the layer evolves.
class InstBindlessCheckPass : public InstrumentPass {
public:
InstBindlessCheckPass(uint32_t desc_set, uint32_t shader_id)
: InstrumentPass(desc_set, shader_id, kInstValidationIdBindless, true) {}
InstBindlessCheckPass(uint32_t shader_id)
: InstrumentPass(0, shader_id, true) {}
~InstBindlessCheckPass() override = default;
@ -44,8 +44,6 @@ class InstBindlessCheckPass : public InstrumentPass {
uint32_t stage_idx,
std::vector<std::unique_ptr<BasicBlock>>* new_blocks);
void SetupInputBufferIds();
uint32_t GenDescCheckFunctionId();
uint32_t GenDescCheckCall(uint32_t inst_idx, uint32_t stage_idx,
@ -107,8 +105,7 @@ class InstBindlessCheckPass : public InstrumentPass {
// writes debug error output utilizing |ref|, |error_id|, |length_id| and
// |stage_idx|. Generate merge block for valid and invalid branches. Kill
// original reference.
void GenCheckCode(uint32_t check_id, uint32_t error_id, uint32_t offset_id,
uint32_t length_id, uint32_t stage_idx, RefAnalysis* ref,
void GenCheckCode(uint32_t check_id, RefAnalysis* ref,
std::vector<std::unique_ptr<BasicBlock>>* new_blocks);
// Initialize state for instrumenting bindless checking
@ -124,11 +121,7 @@ class InstBindlessCheckPass : public InstrumentPass {
// Mapping from variable to binding
std::unordered_map<uint32_t, uint32_t> var2binding_;
uint32_t desc_check_func_id_{0};
uint32_t desc_set_type_id_{0};
uint32_t desc_set_ptr_id_{0};
uint32_t input_buffer_struct_id_{0};
uint32_t input_buffer_ptr_id_{0};
uint32_t check_desc_func_id_{0};
};
} // namespace opt

View File

@ -19,24 +19,6 @@
namespace spvtools {
namespace opt {
bool InstBuffAddrCheckPass::InstrumentFunction(Function* func,
uint32_t stage_idx,
InstProcessFunction& pfn) {
// The bindless instrumentation pass adds functions that use
// BufferDeviceAddress They should not be instrumented by this pass.
Instruction* func_name_inst =
context()->GetNames(func->DefInst().result_id()).begin()->second;
if (func_name_inst) {
static const std::string kPrefix{"inst_bindless_"};
std::string func_name = func_name_inst->GetOperand(1).AsString();
if (func_name.size() >= kPrefix.size() &&
func_name.compare(0, kPrefix.size(), kPrefix) == 0) {
return false;
}
}
return InstrumentPass::InstrumentFunction(func, stage_idx, pfn);
}
uint32_t InstBuffAddrCheckPass::CloneOriginalReference(
Instruction* ref_inst, InstructionBuilder* builder) {
// Clone original ref with new result id (if load)
@ -76,8 +58,7 @@ bool InstBuffAddrCheckPass::IsPhysicalBuffAddrReference(Instruction* ref_inst) {
// TODO(greg-lunarg): Refactor with InstBindlessCheckPass::GenCheckCode() ??
void InstBuffAddrCheckPass::GenCheckCode(
uint32_t check_id, uint32_t error_id, uint32_t ref_uptr_id,
uint32_t stage_idx, Instruction* ref_inst,
uint32_t check_id, Instruction* ref_inst,
std::vector<std::unique_ptr<BasicBlock>>* new_blocks) {
BasicBlock* back_blk_ptr = &*new_blocks->back();
InstructionBuilder builder(
@ -104,20 +85,6 @@ void InstBuffAddrCheckPass::GenCheckCode(
// Gen invalid block
new_blk_ptr.reset(new BasicBlock(std::move(invalid_label)));
builder.SetInsertPoint(&*new_blk_ptr);
// Convert uptr from uint64 to 2 uint32
Instruction* lo_uptr_inst =
builder.AddUnaryOp(GetUintId(), spv::Op::OpUConvert, ref_uptr_id);
Instruction* rshift_uptr_inst =
builder.AddBinaryOp(GetUint64Id(), spv::Op::OpShiftRightLogical,
ref_uptr_id, builder.GetUintConstantId(32));
Instruction* hi_uptr_inst = builder.AddUnaryOp(
GetUintId(), spv::Op::OpUConvert, rshift_uptr_inst->result_id());
GenDebugStreamWrite(
builder.GetUintConstantId(shader_id_),
builder.GetUintConstantId(uid2offset_[ref_inst->unique_id()]),
GenStageInfo(stage_idx, &builder),
{error_id, lo_uptr_inst->result_id(), hi_uptr_inst->result_id()},
&builder);
// Gen zero for invalid load. If pointer type, need to convert uint64
// zero to pointer; cannot create ConstantNull of pointer type.
uint32_t null_id = 0;
@ -206,201 +173,86 @@ void InstBuffAddrCheckPass::AddParam(uint32_t type_id,
(*input_func)->AddParameter(std::move(param_inst));
}
// This is a stub function for use with Import linkage
// clang-format off
// GLSL:
//bool inst_bindless_search_and_test(const uint shader_id, const uint inst_num, const uvec4 stage_info,
// const uint64 ref_ptr, const uint length) {
//}
// clang-format on
uint32_t InstBuffAddrCheckPass::GetSearchAndTestFuncId() {
if (search_test_func_id_ == 0) {
// Generate function "bool search_and_test(uint64_t ref_ptr, uint32_t len)"
// which searches input buffer for buffer which most likely contains the
// pointer value |ref_ptr| and verifies that the entire reference of
// length |len| bytes is contained in the buffer.
search_test_func_id_ = TakeNextId();
analysis::TypeManager* type_mgr = context()->get_type_mgr();
std::vector<const analysis::Type*> param_types = {
type_mgr->GetType(GetUint64Id()), type_mgr->GetType(GetUintId())};
analysis::Function func_ty(type_mgr->GetType(GetBoolId()), param_types);
analysis::Type* reg_func_ty = type_mgr->GetRegisteredType(&func_ty);
std::unique_ptr<Instruction> func_inst(
new Instruction(get_module()->context(), spv::Op::OpFunction,
GetBoolId(), search_test_func_id_,
{{spv_operand_type_t::SPV_OPERAND_TYPE_LITERAL_INTEGER,
{uint32_t(spv::FunctionControlMask::MaskNone)}},
{spv_operand_type_t::SPV_OPERAND_TYPE_ID,
{type_mgr->GetTypeInstruction(reg_func_ty)}}}));
get_def_use_mgr()->AnalyzeInstDefUse(&*func_inst);
std::unique_ptr<Function> input_func =
MakeUnique<Function>(std::move(func_inst));
std::vector<uint32_t> param_vec;
// Add ref_ptr and length parameters
AddParam(GetUint64Id(), &param_vec, &input_func);
AddParam(GetUintId(), &param_vec, &input_func);
// Empty first block.
uint32_t first_blk_id = TakeNextId();
std::unique_ptr<Instruction> first_blk_label(NewLabel(first_blk_id));
std::unique_ptr<BasicBlock> first_blk_ptr =
MakeUnique<BasicBlock>(std::move(first_blk_label));
InstructionBuilder builder(
context(), &*first_blk_ptr,
IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
uint32_t hdr_blk_id = TakeNextId();
// Branch to search loop header
std::unique_ptr<Instruction> hdr_blk_label(NewLabel(hdr_blk_id));
(void)builder.AddBranch(hdr_blk_id);
input_func->AddBasicBlock(std::move(first_blk_ptr));
// Linear search loop header block
// TODO(greg-lunarg): Implement binary search
std::unique_ptr<BasicBlock> hdr_blk_ptr =
MakeUnique<BasicBlock>(std::move(hdr_blk_label));
builder.SetInsertPoint(&*hdr_blk_ptr);
// Phi for search index. Starts with 1.
uint32_t cont_blk_id = TakeNextId();
std::unique_ptr<Instruction> cont_blk_label(NewLabel(cont_blk_id));
// Deal with def-use cycle caused by search loop index computation.
// Create Add and Phi instructions first, then do Def analysis on Add.
// Add Phi and Add instructions and do Use analysis later.
uint32_t idx_phi_id = TakeNextId();
uint32_t idx_inc_id = TakeNextId();
std::unique_ptr<Instruction> idx_inc_inst(new Instruction(
context(), spv::Op::OpIAdd, GetUintId(), idx_inc_id,
{{spv_operand_type_t::SPV_OPERAND_TYPE_ID, {idx_phi_id}},
{spv_operand_type_t::SPV_OPERAND_TYPE_ID,
{builder.GetUintConstantId(1u)}}}));
std::unique_ptr<Instruction> idx_phi_inst(new Instruction(
context(), spv::Op::OpPhi, GetUintId(), idx_phi_id,
{{spv_operand_type_t::SPV_OPERAND_TYPE_ID,
{builder.GetUintConstantId(1u)}},
{spv_operand_type_t::SPV_OPERAND_TYPE_ID, {first_blk_id}},
{spv_operand_type_t::SPV_OPERAND_TYPE_ID, {idx_inc_id}},
{spv_operand_type_t::SPV_OPERAND_TYPE_ID, {cont_blk_id}}}));
get_def_use_mgr()->AnalyzeInstDef(&*idx_inc_inst);
// Add (previously created) search index phi
(void)builder.AddInstruction(std::move(idx_phi_inst));
// LoopMerge
uint32_t bound_test_blk_id = TakeNextId();
std::unique_ptr<Instruction> bound_test_blk_label(
NewLabel(bound_test_blk_id));
(void)builder.AddLoopMerge(bound_test_blk_id, cont_blk_id,
uint32_t(spv::LoopControlMask::MaskNone));
// Branch to continue/work block
(void)builder.AddBranch(cont_blk_id);
input_func->AddBasicBlock(std::move(hdr_blk_ptr));
// Continue/Work Block. Read next buffer pointer and break if greater
// than ref_ptr arg.
std::unique_ptr<BasicBlock> cont_blk_ptr =
MakeUnique<BasicBlock>(std::move(cont_blk_label));
builder.SetInsertPoint(&*cont_blk_ptr);
// Add (previously created) search index increment now.
(void)builder.AddInstruction(std::move(idx_inc_inst));
// Load next buffer address from debug input buffer
uint32_t ibuf_id = GetInputBufferId();
uint32_t ibuf_ptr_id = GetInputBufferPtrId();
Instruction* uptr_ac_inst = builder.AddTernaryOp(
ibuf_ptr_id, spv::Op::OpAccessChain, ibuf_id,
builder.GetUintConstantId(kDebugInputDataOffset), idx_inc_id);
uint32_t ibuf_type_id = GetInputBufferTypeId();
Instruction* uptr_load_inst = builder.AddUnaryOp(
ibuf_type_id, spv::Op::OpLoad, uptr_ac_inst->result_id());
// If loaded address greater than ref_ptr arg, break, else branch back to
// loop header
Instruction* uptr_test_inst =
builder.AddBinaryOp(GetBoolId(), spv::Op::OpUGreaterThan,
uptr_load_inst->result_id(), param_vec[0]);
(void)builder.AddConditionalBranch(
uptr_test_inst->result_id(), bound_test_blk_id, hdr_blk_id, kInvalidId,
uint32_t(spv::SelectionControlMask::MaskNone));
input_func->AddBasicBlock(std::move(cont_blk_ptr));
// Bounds test block. Read length of selected buffer and test that
// all len arg bytes are in buffer.
std::unique_ptr<BasicBlock> bound_test_blk_ptr =
MakeUnique<BasicBlock>(std::move(bound_test_blk_label));
builder.SetInsertPoint(&*bound_test_blk_ptr);
// Decrement index to point to previous/candidate buffer address
Instruction* cand_idx_inst =
builder.AddBinaryOp(GetUintId(), spv::Op::OpISub, idx_inc_id,
builder.GetUintConstantId(1u));
// Load candidate buffer address
Instruction* cand_ac_inst =
builder.AddTernaryOp(ibuf_ptr_id, spv::Op::OpAccessChain, ibuf_id,
builder.GetUintConstantId(kDebugInputDataOffset),
cand_idx_inst->result_id());
Instruction* cand_load_inst = builder.AddUnaryOp(
ibuf_type_id, spv::Op::OpLoad, cand_ac_inst->result_id());
// Compute offset of ref_ptr from candidate buffer address
Instruction* offset_inst =
builder.AddBinaryOp(ibuf_type_id, spv::Op::OpISub, param_vec[0],
cand_load_inst->result_id());
// Convert ref length to uint64
Instruction* ref_len_64_inst =
builder.AddUnaryOp(ibuf_type_id, spv::Op::OpUConvert, param_vec[1]);
// Add ref length to ref offset to compute end of reference
Instruction* ref_end_inst = builder.AddBinaryOp(
ibuf_type_id, spv::Op::OpIAdd, offset_inst->result_id(),
ref_len_64_inst->result_id());
// Load starting index of lengths in input buffer and convert to uint32
Instruction* len_start_ac_inst =
builder.AddTernaryOp(ibuf_ptr_id, spv::Op::OpAccessChain, ibuf_id,
builder.GetUintConstantId(kDebugInputDataOffset),
builder.GetUintConstantId(0u));
Instruction* len_start_load_inst = builder.AddUnaryOp(
ibuf_type_id, spv::Op::OpLoad, len_start_ac_inst->result_id());
Instruction* len_start_32_inst = builder.AddUnaryOp(
GetUintId(), spv::Op::OpUConvert, len_start_load_inst->result_id());
// Decrement search index to get candidate buffer length index
Instruction* cand_len_idx_inst = builder.AddBinaryOp(
GetUintId(), spv::Op::OpISub, cand_idx_inst->result_id(),
builder.GetUintConstantId(1u));
// Add candidate length index to start index
Instruction* len_idx_inst = builder.AddBinaryOp(
GetUintId(), spv::Op::OpIAdd, cand_len_idx_inst->result_id(),
len_start_32_inst->result_id());
// Load candidate buffer length
Instruction* len_ac_inst =
builder.AddTernaryOp(ibuf_ptr_id, spv::Op::OpAccessChain, ibuf_id,
builder.GetUintConstantId(kDebugInputDataOffset),
len_idx_inst->result_id());
Instruction* len_load_inst = builder.AddUnaryOp(
ibuf_type_id, spv::Op::OpLoad, len_ac_inst->result_id());
// Test if reference end within candidate buffer length
Instruction* len_test_inst = builder.AddBinaryOp(
GetBoolId(), spv::Op::OpULessThanEqual, ref_end_inst->result_id(),
len_load_inst->result_id());
// Return test result
(void)builder.AddUnaryOp(0, spv::Op::OpReturnValue,
len_test_inst->result_id());
// Close block
input_func->AddBasicBlock(std::move(bound_test_blk_ptr));
// Close function and add function to module
std::unique_ptr<Instruction> func_end_inst(new Instruction(
get_module()->context(), spv::Op::OpFunctionEnd, 0, 0, {}));
get_def_use_mgr()->AnalyzeInstDefUse(&*func_end_inst);
input_func->SetFunctionEnd(std::move(func_end_inst));
context()->AddFunction(std::move(input_func));
context()->AddDebug2Inst(
NewGlobalName(search_test_func_id_, "search_and_test"));
enum {
kShaderId = 0,
kInstructionIndex = 1,
kStageInfo = 2,
kRefPtr = 3,
kLength = 4,
kNumArgs
};
if (search_test_func_id_ != 0) {
return search_test_func_id_;
}
// Generate function "bool search_and_test(uint64_t ref_ptr, uint32_t len)"
// which searches input buffer for buffer which most likely contains the
// pointer value |ref_ptr| and verifies that the entire reference of
// length |len| bytes is contained in the buffer.
analysis::TypeManager* type_mgr = context()->get_type_mgr();
const analysis::Integer* uint_type = GetInteger(32, false);
const analysis::Vector v4uint(uint_type, 4);
const analysis::Type* v4uint_type = type_mgr->GetRegisteredType(&v4uint);
std::vector<const analysis::Type*> param_types = {
uint_type, uint_type, v4uint_type, type_mgr->GetType(GetUint64Id()),
uint_type};
const std::string func_name{"inst_buff_addr_search_and_test"};
const uint32_t func_id = TakeNextId();
std::unique_ptr<Function> func =
StartFunction(func_id, type_mgr->GetBoolType(), param_types);
func->SetFunctionEnd(EndFunction());
context()->AddFunctionDeclaration(std::move(func));
context()->AddDebug2Inst(NewName(func_id, func_name));
std::vector<Operand> operands{
{spv_operand_type_t::SPV_OPERAND_TYPE_ID, {func_id}},
{spv_operand_type_t::SPV_OPERAND_TYPE_LITERAL_INTEGER,
{uint32_t(spv::Decoration::LinkageAttributes)}},
{spv_operand_type_t::SPV_OPERAND_TYPE_LITERAL_STRING,
utils::MakeVector(func_name.c_str())},
{spv_operand_type_t::SPV_OPERAND_TYPE_LINKAGE_TYPE,
{uint32_t(spv::LinkageType::Import)}},
};
get_decoration_mgr()->AddDecoration(spv::Op::OpDecorate, operands);
search_test_func_id_ = func_id;
return search_test_func_id_;
}
uint32_t InstBuffAddrCheckPass::GenSearchAndTest(Instruction* ref_inst,
InstructionBuilder* builder,
uint32_t* ref_uptr_id) {
uint32_t* ref_uptr_id,
uint32_t stage_idx) {
// Enable Int64 if necessary
context()->AddCapability(spv::Capability::Int64);
// Convert reference pointer to uint64
uint32_t ref_ptr_id = ref_inst->GetSingleWordInOperand(0);
const uint32_t ref_ptr_id = ref_inst->GetSingleWordInOperand(0);
Instruction* ref_uptr_inst =
builder->AddUnaryOp(GetUint64Id(), spv::Op::OpConvertPtrToU, ref_ptr_id);
*ref_uptr_id = ref_uptr_inst->result_id();
// Compute reference length in bytes
analysis::DefUseManager* du_mgr = get_def_use_mgr();
Instruction* ref_ptr_inst = du_mgr->GetDef(ref_ptr_id);
uint32_t ref_ptr_ty_id = ref_ptr_inst->type_id();
const uint32_t ref_ptr_ty_id = ref_ptr_inst->type_id();
Instruction* ref_ptr_ty_inst = du_mgr->GetDef(ref_ptr_ty_id);
uint32_t ref_len = GetTypeLength(ref_ptr_ty_inst->GetSingleWordInOperand(1));
uint32_t ref_len_id = builder->GetUintConstantId(ref_len);
const uint32_t ref_len =
GetTypeLength(ref_ptr_ty_inst->GetSingleWordInOperand(1));
// Gen call to search and test function
Instruction* call_inst = builder->AddFunctionCall(
GetBoolId(), GetSearchAndTestFuncId(), {*ref_uptr_id, ref_len_id});
uint32_t retval = call_inst->result_id();
return retval;
const uint32_t func_id = GetSearchAndTestFuncId();
const std::vector<uint32_t> args = {
builder->GetUintConstantId(shader_id_),
builder->GetUintConstantId(ref_inst->unique_id()),
GenStageInfo(stage_idx, builder), *ref_uptr_id,
builder->GetUintConstantId(ref_len)};
return GenReadFunctionCall(GetBoolId(), func_id, args, builder);
}
void InstBuffAddrCheckPass::GenBuffAddrCheckCode(
@ -418,16 +270,16 @@ void InstBuffAddrCheckPass::GenBuffAddrCheckCode(
context(), &*new_blk_ptr,
IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
new_blocks->push_back(std::move(new_blk_ptr));
uint32_t error_id = builder.GetUintConstantId(kInstErrorBuffAddrUnallocRef);
// Generate code to do search and test if all bytes of reference
// are within a listed buffer. Return reference pointer converted to uint64.
uint32_t ref_uptr_id;
uint32_t valid_id = GenSearchAndTest(ref_inst, &builder, &ref_uptr_id);
uint32_t valid_id =
GenSearchAndTest(ref_inst, &builder, &ref_uptr_id, stage_idx);
// Generate test of search results with true branch
// being full reference and false branch being debug output and zero
// for the referenced value.
GenCheckCode(valid_id, error_id, ref_uptr_id, stage_idx, ref_inst,
new_blocks);
GenCheckCode(valid_id, ref_inst, new_blocks);
// Move original block's remaining code into remainder/merge block and add
// to new blocks
BasicBlock* back_blk_ptr = &*new_blocks->back();
@ -442,6 +294,20 @@ void InstBuffAddrCheckPass::InitInstBuffAddrCheck() {
}
Pass::Status InstBuffAddrCheckPass::ProcessImpl() {
// The memory model and linkage must always be updated for spirv-link to work
// correctly.
AddStorageBufferExt();
if (!get_feature_mgr()->HasExtension(kSPV_KHR_physical_storage_buffer)) {
context()->AddExtension("SPV_KHR_physical_storage_buffer");
}
context()->AddCapability(spv::Capability::PhysicalStorageBufferAddresses);
Instruction* memory_model = get_module()->GetMemoryModel();
memory_model->SetInOperand(
0u, {uint32_t(spv::AddressingModel::PhysicalStorageBuffer64)});
context()->AddCapability(spv::Capability::Int64);
context()->AddCapability(spv::Capability::Linkage);
// Perform bindless bounds check on each entry point function in module
InstProcessFunction pfn =
[this](BasicBlock::iterator ref_inst_itr,
@ -450,14 +316,13 @@ Pass::Status InstBuffAddrCheckPass::ProcessImpl() {
return GenBuffAddrCheckCode(ref_inst_itr, ref_block_itr, stage_idx,
new_blocks);
};
bool modified = InstProcessEntryPointCallTree(pfn);
return modified ? Status::SuccessWithChange : Status::SuccessWithoutChange;
InstProcessEntryPointCallTree(pfn);
// This pass always changes the memory model, so that linking will work
// properly.
return Status::SuccessWithChange;
}
Pass::Status InstBuffAddrCheckPass::Process() {
if (!get_feature_mgr()->HasCapability(
spv::Capability::PhysicalStorageBufferAddressesEXT))
return Status::SuccessWithoutChange;
InitInstBuffAddrCheck();
return ProcessImpl();
}

View File

@ -29,10 +29,9 @@ namespace opt {
class InstBuffAddrCheckPass : public InstrumentPass {
public:
// For test harness only
InstBuffAddrCheckPass() : InstrumentPass(7, 23, kInstValidationIdBuffAddr) {}
InstBuffAddrCheckPass() : InstrumentPass(0, 23) {}
// For all other interfaces
InstBuffAddrCheckPass(uint32_t desc_set, uint32_t shader_id)
: InstrumentPass(desc_set, shader_id, kInstValidationIdBuffAddr) {}
InstBuffAddrCheckPass(uint32_t shader_id) : InstrumentPass(0, shader_id) {}
~InstBuffAddrCheckPass() override = default;
@ -41,9 +40,6 @@ class InstBuffAddrCheckPass : public InstrumentPass {
const char* name() const override { return "inst-buff-addr-check-pass"; }
bool InstrumentFunction(Function* func, uint32_t stage_idx,
InstProcessFunction& pfn) override;
private:
// Return byte length of type |type_id|. Must be int, float, vector, matrix,
// struct, array or physical pointer. Uses std430 alignment and sizes.
@ -61,7 +57,7 @@ class InstBuffAddrCheckPass : public InstrumentPass {
// are within the buffer. Returns id of boolean value which is true if
// search and test is successful, false otherwise.
uint32_t GenSearchAndTest(Instruction* ref_inst, InstructionBuilder* builder,
uint32_t* ref_uptr_id);
uint32_t* ref_uptr_id, uint32_t stage_idx);
// This function does checking instrumentation on a single
// instruction which references through a physical storage buffer address.
@ -114,8 +110,7 @@ class InstBuffAddrCheckPass : public InstrumentPass {
// writes debug error output utilizing |ref_inst|, |error_id| and
// |stage_idx|. Generate merge block for valid and invalid reference blocks.
// Kill original reference.
void GenCheckCode(uint32_t check_id, uint32_t error_id, uint32_t length_id,
uint32_t stage_idx, Instruction* ref_inst,
void GenCheckCode(uint32_t check_id, Instruction* ref_inst,
std::vector<std::unique_ptr<BasicBlock>>* new_blocks);
// Initialize state for instrumenting physical buffer address checking

View File

@ -16,6 +16,7 @@
#include "inst_debug_printf_pass.h"
#include "source/spirv_constant.h"
#include "source/util/string_utils.h"
#include "spirv/unified1/NonSemanticDebugPrintf.h"
@ -210,9 +211,244 @@ void InstDebugPrintfPass::GenDebugPrintfCode(
new_blocks->push_back(std::move(new_blk_ptr));
}
// Return id for output buffer
uint32_t InstDebugPrintfPass::GetOutputBufferId() {
if (output_buffer_id_ == 0) {
// If not created yet, create one
analysis::DecorationManager* deco_mgr = get_decoration_mgr();
analysis::TypeManager* type_mgr = context()->get_type_mgr();
analysis::RuntimeArray* reg_uint_rarr_ty = GetUintRuntimeArrayType(32);
analysis::Integer* reg_uint_ty = GetInteger(32, false);
analysis::Type* reg_buf_ty =
GetStruct({reg_uint_ty, reg_uint_ty, reg_uint_rarr_ty});
uint32_t obufTyId = type_mgr->GetTypeInstruction(reg_buf_ty);
// By the Vulkan spec, a pre-existing struct containing a RuntimeArray
// must be a block, and will therefore be decorated with Block. Therefore
// the undecorated type returned here will not be pre-existing and can
// safely be decorated. Since this type is now decorated, it is out of
// sync with the TypeManager and therefore the TypeManager must be
// invalidated after this pass.
assert(context()->get_def_use_mgr()->NumUses(obufTyId) == 0 &&
"used struct type returned");
deco_mgr->AddDecoration(obufTyId, uint32_t(spv::Decoration::Block));
deco_mgr->AddMemberDecoration(obufTyId, kDebugOutputFlagsOffset,
uint32_t(spv::Decoration::Offset), 0);
deco_mgr->AddMemberDecoration(obufTyId, kDebugOutputSizeOffset,
uint32_t(spv::Decoration::Offset), 4);
deco_mgr->AddMemberDecoration(obufTyId, kDebugOutputDataOffset,
uint32_t(spv::Decoration::Offset), 8);
uint32_t obufTyPtrId_ =
type_mgr->FindPointerToType(obufTyId, spv::StorageClass::StorageBuffer);
output_buffer_id_ = TakeNextId();
std::unique_ptr<Instruction> newVarOp(new Instruction(
context(), spv::Op::OpVariable, obufTyPtrId_, output_buffer_id_,
{{spv_operand_type_t::SPV_OPERAND_TYPE_LITERAL_INTEGER,
{uint32_t(spv::StorageClass::StorageBuffer)}}}));
context()->AddGlobalValue(std::move(newVarOp));
context()->AddDebug2Inst(NewGlobalName(obufTyId, "OutputBuffer"));
context()->AddDebug2Inst(NewMemberName(obufTyId, 0, "flags"));
context()->AddDebug2Inst(NewMemberName(obufTyId, 1, "written_count"));
context()->AddDebug2Inst(NewMemberName(obufTyId, 2, "data"));
context()->AddDebug2Inst(NewGlobalName(output_buffer_id_, "output_buffer"));
deco_mgr->AddDecorationVal(
output_buffer_id_, uint32_t(spv::Decoration::DescriptorSet), desc_set_);
deco_mgr->AddDecorationVal(output_buffer_id_,
uint32_t(spv::Decoration::Binding),
GetOutputBufferBinding());
AddStorageBufferExt();
if (get_module()->version() >= SPV_SPIRV_VERSION_WORD(1, 4)) {
// Add the new buffer to all entry points.
for (auto& entry : get_module()->entry_points()) {
entry.AddOperand({SPV_OPERAND_TYPE_ID, {output_buffer_id_}});
context()->AnalyzeUses(&entry);
}
}
}
return output_buffer_id_;
}
uint32_t InstDebugPrintfPass::GetOutputBufferPtrId() {
if (output_buffer_ptr_id_ == 0) {
output_buffer_ptr_id_ = context()->get_type_mgr()->FindPointerToType(
GetUintId(), spv::StorageClass::StorageBuffer);
}
return output_buffer_ptr_id_;
}
uint32_t InstDebugPrintfPass::GetOutputBufferBinding() {
return kDebugOutputPrintfStream;
}
void InstDebugPrintfPass::GenDebugOutputFieldCode(uint32_t base_offset_id,
uint32_t field_offset,
uint32_t field_value_id,
InstructionBuilder* builder) {
// Cast value to 32-bit unsigned if necessary
uint32_t val_id = GenUintCastCode(field_value_id, builder);
// Store value
Instruction* data_idx_inst = builder->AddIAdd(
GetUintId(), base_offset_id, builder->GetUintConstantId(field_offset));
uint32_t buf_id = GetOutputBufferId();
uint32_t buf_uint_ptr_id = GetOutputBufferPtrId();
Instruction* achain_inst = builder->AddAccessChain(
buf_uint_ptr_id, buf_id,
{builder->GetUintConstantId(kDebugOutputDataOffset),
data_idx_inst->result_id()});
(void)builder->AddStore(achain_inst->result_id(), val_id);
}
uint32_t InstDebugPrintfPass::GetStreamWriteFunctionId(uint32_t param_cnt) {
enum {
kShaderId = 0,
kInstructionIndex = 1,
kStageInfo = 2,
kFirstParam = 3,
};
// Total param count is common params plus validation-specific
// params
if (param2output_func_id_[param_cnt] == 0) {
// Create function
param2output_func_id_[param_cnt] = TakeNextId();
analysis::TypeManager* type_mgr = context()->get_type_mgr();
const analysis::Type* uint_type = GetInteger(32, false);
const analysis::Vector v4uint(uint_type, 4);
const analysis::Type* v4uint_type = type_mgr->GetRegisteredType(&v4uint);
std::vector<const analysis::Type*> param_types(kFirstParam + param_cnt,
uint_type);
param_types[kStageInfo] = v4uint_type;
std::unique_ptr<Function> output_func = StartFunction(
param2output_func_id_[param_cnt], type_mgr->GetVoidType(), param_types);
std::vector<uint32_t> param_ids = AddParameters(*output_func, param_types);
// Create first block
auto new_blk_ptr = MakeUnique<BasicBlock>(NewLabel(TakeNextId()));
InstructionBuilder builder(
context(), &*new_blk_ptr,
IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
// Gen test if debug output buffer size will not be exceeded.
const uint32_t val_spec_offset = kInstStageOutCnt;
const uint32_t obuf_record_sz = val_spec_offset + param_cnt;
const uint32_t buf_id = GetOutputBufferId();
const uint32_t buf_uint_ptr_id = GetOutputBufferPtrId();
Instruction* obuf_curr_sz_ac_inst = builder.AddAccessChain(
buf_uint_ptr_id, buf_id,
{builder.GetUintConstantId(kDebugOutputSizeOffset)});
// Fetch the current debug buffer written size atomically, adding the
// size of the record to be written.
uint32_t obuf_record_sz_id = builder.GetUintConstantId(obuf_record_sz);
uint32_t mask_none_id =
builder.GetUintConstantId(uint32_t(spv::MemoryAccessMask::MaskNone));
uint32_t scope_invok_id =
builder.GetUintConstantId(uint32_t(spv::Scope::Invocation));
Instruction* obuf_curr_sz_inst = builder.AddQuadOp(
GetUintId(), spv::Op::OpAtomicIAdd, obuf_curr_sz_ac_inst->result_id(),
scope_invok_id, mask_none_id, obuf_record_sz_id);
uint32_t obuf_curr_sz_id = obuf_curr_sz_inst->result_id();
// Compute new written size
Instruction* obuf_new_sz_inst =
builder.AddIAdd(GetUintId(), obuf_curr_sz_id,
builder.GetUintConstantId(obuf_record_sz));
// Fetch the data bound
Instruction* obuf_bnd_inst =
builder.AddIdLiteralOp(GetUintId(), spv::Op::OpArrayLength,
GetOutputBufferId(), kDebugOutputDataOffset);
// Test that new written size is less than or equal to debug output
// data bound
Instruction* obuf_safe_inst = builder.AddBinaryOp(
GetBoolId(), spv::Op::OpULessThanEqual, obuf_new_sz_inst->result_id(),
obuf_bnd_inst->result_id());
uint32_t merge_blk_id = TakeNextId();
uint32_t write_blk_id = TakeNextId();
std::unique_ptr<Instruction> merge_label(NewLabel(merge_blk_id));
std::unique_ptr<Instruction> write_label(NewLabel(write_blk_id));
(void)builder.AddConditionalBranch(
obuf_safe_inst->result_id(), write_blk_id, merge_blk_id, merge_blk_id,
uint32_t(spv::SelectionControlMask::MaskNone));
// Close safety test block and gen write block
output_func->AddBasicBlock(std::move(new_blk_ptr));
new_blk_ptr = MakeUnique<BasicBlock>(std::move(write_label));
builder.SetInsertPoint(&*new_blk_ptr);
// Generate common and stage-specific debug record members
GenDebugOutputFieldCode(obuf_curr_sz_id, kInstCommonOutSize,
builder.GetUintConstantId(obuf_record_sz),
&builder);
// Store Shader Id
GenDebugOutputFieldCode(obuf_curr_sz_id, kInstCommonOutShaderId,
param_ids[kShaderId], &builder);
// Store Instruction Idx
GenDebugOutputFieldCode(obuf_curr_sz_id, kInstCommonOutInstructionIdx,
param_ids[kInstructionIndex], &builder);
// Store stage info. Stage Idx + 3 words of stage-specific data.
for (uint32_t i = 0; i < 4; ++i) {
Instruction* field =
builder.AddCompositeExtract(GetUintId(), param_ids[kStageInfo], {i});
GenDebugOutputFieldCode(obuf_curr_sz_id, kInstCommonOutStageIdx + i,
field->result_id(), &builder);
}
// Gen writes of validation specific data
for (uint32_t i = 0; i < param_cnt; ++i) {
GenDebugOutputFieldCode(obuf_curr_sz_id, val_spec_offset + i,
param_ids[kFirstParam + i], &builder);
}
// Close write block and gen merge block
(void)builder.AddBranch(merge_blk_id);
output_func->AddBasicBlock(std::move(new_blk_ptr));
new_blk_ptr = MakeUnique<BasicBlock>(std::move(merge_label));
builder.SetInsertPoint(&*new_blk_ptr);
// Close merge block and function and add function to module
(void)builder.AddNullaryOp(0, spv::Op::OpReturn);
output_func->AddBasicBlock(std::move(new_blk_ptr));
output_func->SetFunctionEnd(EndFunction());
context()->AddFunction(std::move(output_func));
std::string name("stream_write_");
name += std::to_string(param_cnt);
context()->AddDebug2Inst(
NewGlobalName(param2output_func_id_[param_cnt], name));
}
return param2output_func_id_[param_cnt];
}
void InstDebugPrintfPass::GenDebugStreamWrite(
uint32_t shader_id, uint32_t instruction_idx_id, uint32_t stage_info_id,
const std::vector<uint32_t>& validation_ids, InstructionBuilder* builder) {
// Call debug output function. Pass func_idx, instruction_idx and
// validation ids as args.
uint32_t val_id_cnt = static_cast<uint32_t>(validation_ids.size());
std::vector<uint32_t> args = {shader_id, instruction_idx_id, stage_info_id};
(void)args.insert(args.end(), validation_ids.begin(), validation_ids.end());
(void)builder->AddFunctionCall(GetVoidId(),
GetStreamWriteFunctionId(val_id_cnt), args);
}
std::unique_ptr<Instruction> InstDebugPrintfPass::NewGlobalName(
uint32_t id, const std::string& name_str) {
std::string prefixed_name{"inst_printf_"};
prefixed_name += name_str;
return NewName(id, prefixed_name);
}
std::unique_ptr<Instruction> InstDebugPrintfPass::NewMemberName(
uint32_t id, uint32_t member_index, const std::string& name_str) {
return MakeUnique<Instruction>(
context(), spv::Op::OpMemberName, 0, 0,
std::initializer_list<Operand>{
{SPV_OPERAND_TYPE_ID, {id}},
{SPV_OPERAND_TYPE_LITERAL_INTEGER, {member_index}},
{SPV_OPERAND_TYPE_LITERAL_STRING, utils::MakeVector(name_str)}});
}
void InstDebugPrintfPass::InitializeInstDebugPrintf() {
// Initialize base class
InitializeInstrument();
output_buffer_id_ = 0;
output_buffer_ptr_id_ = 0;
}
Pass::Status InstDebugPrintfPass::ProcessImpl() {

View File

@ -28,10 +28,10 @@ namespace opt {
class InstDebugPrintfPass : public InstrumentPass {
public:
// For test harness only
InstDebugPrintfPass() : InstrumentPass(7, 23, kInstValidationIdDebugPrintf) {}
InstDebugPrintfPass() : InstrumentPass(7, 23) {}
// For all other interfaces
InstDebugPrintfPass(uint32_t desc_set, uint32_t shader_id)
: InstrumentPass(desc_set, shader_id, kInstValidationIdDebugPrintf) {}
: InstrumentPass(desc_set, shader_id) {}
~InstDebugPrintfPass() override = default;
@ -41,6 +41,104 @@ class InstDebugPrintfPass : public InstrumentPass {
const char* name() const override { return "inst-printf-pass"; }
private:
// Gen code into |builder| to write |field_value_id| into debug output
// buffer at |base_offset_id| + |field_offset|.
void GenDebugOutputFieldCode(uint32_t base_offset_id, uint32_t field_offset,
uint32_t field_value_id,
InstructionBuilder* builder);
// Generate instructions in |builder| which will atomically fetch and
// increment the size of the debug output buffer stream of the current
// validation and write a record to the end of the stream, if enough space
// in the buffer remains. The record will contain the index of the function
// and instruction within that function |func_idx, instruction_idx| which
// generated the record. It will also contain additional information to
// identify the instance of the shader, depending on the stage |stage_idx|
// of the shader. Finally, the record will contain validation-specific
// data contained in |validation_ids| which will identify the validation
// error as well as the values involved in the error.
//
// The output buffer binding written to by the code generated by the function
// is determined by the validation id specified when each specific
// instrumentation pass is created.
//
// The output buffer is a sequence of 32-bit values with the following
// format (where all elements are unsigned 32-bit unless otherwise noted):
//
// Size
// Record0
// Record1
// Record2
// ...
//
// Size is the number of 32-bit values that have been written or
// attempted to be written to the output buffer, excluding the Size. It is
// initialized to 0. If the size of attempts to write the buffer exceeds
// the actual size of the buffer, it is possible that this field can exceed
// the actual size of the buffer.
//
// Each Record* is a variable-length sequence of 32-bit values with the
// following format defined using static const offsets in the .cpp file:
//
// Record Size
// Shader ID
// Instruction Index
// Stage
// Stage-specific Word 0
// Stage-specific Word 1
// ...
// Validation Error Code
// Validation-specific Word 0
// Validation-specific Word 1
// Validation-specific Word 2
// ...
//
// Each record consists of three subsections: members common across all
// validation, members specific to the stage, and members specific to a
// validation.
//
// The Record Size is the number of 32-bit words in the record, including
// the Record Size word.
//
// Shader ID is a value that identifies which shader has generated the
// validation error. It is passed when the instrumentation pass is created.
//
// The Instruction Index is the position of the instruction within the
// SPIR-V file which is in error.
//
// The Stage is the pipeline stage which has generated the error as defined
// by the SpvExecutionModel_ enumeration. This is used to interpret the
// following Stage-specific words.
//
// The Stage-specific Words identify which invocation of the shader generated
// the error. Every stage will write a fixed number of words. Vertex shaders
// will write the Vertex and Instance ID. Fragment shaders will write
// FragCoord.xy. Compute shaders will write the GlobalInvocation ID.
// The tessellation eval shader will write the Primitive ID and TessCoords.uv.
// The tessellation control shader and geometry shader will write the
// Primitive ID and Invocation ID.
//
// The Validation Error Code specifies the exact error which has occurred.
// These are enumerated with the kInstError* static consts. This allows
// multiple validation layers to use the same, single output buffer.
//
// The Validation-specific Words are a validation-specific number of 32-bit
// words which give further information on the validation error that
// occurred. These are documented further in each file containing the
// validation-specific class which derives from this base class.
//
// Because the code that is generated checks against the size of the buffer
// before writing, the size of the debug out buffer can be used by the
// validation layer to control the number of error records that are written.
void GenDebugStreamWrite(uint32_t shader_id, uint32_t instruction_idx_id,
uint32_t stage_info_id,
const std::vector<uint32_t>& validation_ids,
InstructionBuilder* builder);
// Return id for output function. Define if it doesn't exist with
// |val_spec_param_cnt| validation-specific uint32 parameters.
uint32_t GetStreamWriteFunctionId(uint32_t val_spec_param_cnt);
// Generate instructions for OpDebugPrintf.
//
// If |ref_inst_itr| is an OpDebugPrintf, return in |new_blocks| the result
@ -80,13 +178,37 @@ class InstDebugPrintfPass : public InstrumentPass {
void GenOutputCode(Instruction* printf_inst, uint32_t stage_idx,
std::vector<std::unique_ptr<BasicBlock>>* new_blocks);
// Set the name for a function or global variable, names will be
// prefixed to identify which instrumentation pass generated them.
std::unique_ptr<Instruction> NewGlobalName(uint32_t id,
const std::string& name_str);
// Set the name for a structure member
std::unique_ptr<Instruction> NewMemberName(uint32_t id, uint32_t member_index,
const std::string& name_str);
// Return id for debug output buffer
uint32_t GetOutputBufferId();
// Return id for buffer uint type
uint32_t GetOutputBufferPtrId();
// Return binding for output buffer for current validation.
uint32_t GetOutputBufferBinding();
// Initialize state for instrumenting bindless checking
void InitializeInstDebugPrintf();
// Apply GenDebugPrintfCode to every instruction in module.
Pass::Status ProcessImpl();
uint32_t ext_inst_printf_id_;
uint32_t ext_inst_printf_id_{0};
// id for output buffer variable
uint32_t output_buffer_id_{0};
// ptr type id for output buffer element
uint32_t output_buffer_ptr_id_{0};
};
} // namespace opt

View File

@ -131,38 +131,6 @@ std::unique_ptr<Instruction> InstrumentPass::NewName(
{SPV_OPERAND_TYPE_LITERAL_STRING, utils::MakeVector(name_str)}});
}
std::unique_ptr<Instruction> InstrumentPass::NewGlobalName(
uint32_t id, const std::string& name_str) {
std::string prefixed_name;
switch (validation_id_) {
case kInstValidationIdBindless:
prefixed_name = "inst_bindless_";
break;
case kInstValidationIdBuffAddr:
prefixed_name = "inst_buff_addr_";
break;
case kInstValidationIdDebugPrintf:
prefixed_name = "inst_printf_";
break;
default:
assert(false); // add new instrumentation pass here
prefixed_name = "inst_pass_";
break;
}
prefixed_name += name_str;
return NewName(id, prefixed_name);
}
std::unique_ptr<Instruction> InstrumentPass::NewMemberName(
uint32_t id, uint32_t member_index, const std::string& name_str) {
return MakeUnique<Instruction>(
context(), spv::Op::OpMemberName, 0, 0,
std::initializer_list<Operand>{
{SPV_OPERAND_TYPE_ID, {id}},
{SPV_OPERAND_TYPE_LITERAL_INTEGER, {member_index}},
{SPV_OPERAND_TYPE_LITERAL_STRING, utils::MakeVector(name_str)}});
}
uint32_t InstrumentPass::Gen32BitCvtCode(uint32_t val_id,
InstructionBuilder* builder) {
// Convert integer value to 32-bit if necessary
@ -195,24 +163,6 @@ uint32_t InstrumentPass::GenUintCastCode(uint32_t val_id,
->result_id();
}
void InstrumentPass::GenDebugOutputFieldCode(uint32_t base_offset_id,
uint32_t field_offset,
uint32_t field_value_id,
InstructionBuilder* builder) {
// Cast value to 32-bit unsigned if necessary
uint32_t val_id = GenUintCastCode(field_value_id, builder);
// Store value
Instruction* data_idx_inst = builder->AddIAdd(
GetUintId(), base_offset_id, builder->GetUintConstantId(field_offset));
uint32_t buf_id = GetOutputBufferId();
uint32_t buf_uint_ptr_id = GetOutputBufferPtrId();
Instruction* achain_inst = builder->AddAccessChain(
buf_uint_ptr_id, buf_id,
{builder->GetUintConstantId(kDebugOutputDataOffset),
data_idx_inst->result_id()});
(void)builder->AddStore(achain_inst->result_id(), val_id);
}
uint32_t InstrumentPass::GenVarLoad(uint32_t var_id,
InstructionBuilder* builder) {
Instruction* var_inst = get_def_use_mgr()->GetDef(var_id);
@ -329,18 +279,6 @@ uint32_t InstrumentPass::GenStageInfo(uint32_t stage_idx,
return builder->AddCompositeConstruct(GetVec4UintId(), ids)->result_id();
}
void InstrumentPass::GenDebugStreamWrite(
uint32_t shader_id, uint32_t instruction_idx_id, uint32_t stage_info_id,
const std::vector<uint32_t>& validation_ids, InstructionBuilder* builder) {
// Call debug output function. Pass func_idx, instruction_idx and
// validation ids as args.
uint32_t val_id_cnt = static_cast<uint32_t>(validation_ids.size());
std::vector<uint32_t> args = {shader_id, instruction_idx_id, stage_info_id};
(void)args.insert(args.end(), validation_ids.begin(), validation_ids.end());
(void)builder->AddFunctionCall(GetVoidId(),
GetStreamWriteFunctionId(val_id_cnt), args);
}
bool InstrumentPass::AllConstant(const std::vector<uint32_t>& ids) {
for (auto& id : ids) {
Instruction* id_inst = context()->get_def_use_mgr()->GetDef(id);
@ -349,14 +287,6 @@ bool InstrumentPass::AllConstant(const std::vector<uint32_t>& ids) {
return true;
}
uint32_t InstrumentPass::GenDebugDirectRead(
const std::vector<uint32_t>& offset_ids, InstructionBuilder* builder) {
// Call debug input function. Pass func_idx and offset ids as args.
const uint32_t off_id_cnt = static_cast<uint32_t>(offset_ids.size());
const uint32_t input_func_id = GetDirectReadFunctionId(off_id_cnt);
return GenReadFunctionCall(GetUintId(), input_func_id, offset_ids, builder);
}
uint32_t InstrumentPass::GenReadFunctionCall(
uint32_t return_id, uint32_t func_id,
const std::vector<uint32_t>& func_call_args,
@ -450,53 +380,6 @@ void InstrumentPass::UpdateSucceedingPhis(
});
}
uint32_t InstrumentPass::GetOutputBufferPtrId() {
if (output_buffer_ptr_id_ == 0) {
output_buffer_ptr_id_ = context()->get_type_mgr()->FindPointerToType(
GetUintId(), spv::StorageClass::StorageBuffer);
}
return output_buffer_ptr_id_;
}
uint32_t InstrumentPass::GetInputBufferTypeId() {
return (validation_id_ == kInstValidationIdBuffAddr) ? GetUint64Id()
: GetUintId();
}
uint32_t InstrumentPass::GetInputBufferPtrId() {
if (input_buffer_ptr_id_ == 0) {
input_buffer_ptr_id_ = context()->get_type_mgr()->FindPointerToType(
GetInputBufferTypeId(), spv::StorageClass::StorageBuffer);
}
return input_buffer_ptr_id_;
}
uint32_t InstrumentPass::GetOutputBufferBinding() {
switch (validation_id_) {
case kInstValidationIdBindless:
return kDebugOutputBindingStream;
case kInstValidationIdBuffAddr:
return kDebugOutputBindingStream;
case kInstValidationIdDebugPrintf:
return kDebugOutputPrintfStream;
default:
assert(false && "unexpected validation id");
}
return 0;
}
uint32_t InstrumentPass::GetInputBufferBinding() {
switch (validation_id_) {
case kInstValidationIdBindless:
return kDebugInputBindingBindless;
case kInstValidationIdBuffAddr:
return kDebugInputBindingBuffAddr;
default:
assert(false && "unexpected validation id");
}
return 0;
}
analysis::Integer* InstrumentPass::GetInteger(uint32_t width, bool is_signed) {
analysis::Integer i(width, is_signed);
analysis::Type* type = context()->get_type_mgr()->GetRegisteredType(&i);
@ -577,110 +460,6 @@ void InstrumentPass::AddStorageBufferExt() {
storage_buffer_ext_defined_ = true;
}
// Return id for output buffer
uint32_t InstrumentPass::GetOutputBufferId() {
if (output_buffer_id_ == 0) {
// If not created yet, create one
analysis::DecorationManager* deco_mgr = get_decoration_mgr();
analysis::TypeManager* type_mgr = context()->get_type_mgr();
analysis::RuntimeArray* reg_uint_rarr_ty = GetUintRuntimeArrayType(32);
analysis::Integer* reg_uint_ty = GetInteger(32, false);
analysis::Type* reg_buf_ty =
GetStruct({reg_uint_ty, reg_uint_ty, reg_uint_rarr_ty});
uint32_t obufTyId = type_mgr->GetTypeInstruction(reg_buf_ty);
// By the Vulkan spec, a pre-existing struct containing a RuntimeArray
// must be a block, and will therefore be decorated with Block. Therefore
// the undecorated type returned here will not be pre-existing and can
// safely be decorated. Since this type is now decorated, it is out of
// sync with the TypeManager and therefore the TypeManager must be
// invalidated after this pass.
assert(context()->get_def_use_mgr()->NumUses(obufTyId) == 0 &&
"used struct type returned");
deco_mgr->AddDecoration(obufTyId, uint32_t(spv::Decoration::Block));
deco_mgr->AddMemberDecoration(obufTyId, kDebugOutputFlagsOffset,
uint32_t(spv::Decoration::Offset), 0);
deco_mgr->AddMemberDecoration(obufTyId, kDebugOutputSizeOffset,
uint32_t(spv::Decoration::Offset), 4);
deco_mgr->AddMemberDecoration(obufTyId, kDebugOutputDataOffset,
uint32_t(spv::Decoration::Offset), 8);
uint32_t obufTyPtrId_ =
type_mgr->FindPointerToType(obufTyId, spv::StorageClass::StorageBuffer);
output_buffer_id_ = TakeNextId();
std::unique_ptr<Instruction> newVarOp(new Instruction(
context(), spv::Op::OpVariable, obufTyPtrId_, output_buffer_id_,
{{spv_operand_type_t::SPV_OPERAND_TYPE_LITERAL_INTEGER,
{uint32_t(spv::StorageClass::StorageBuffer)}}}));
context()->AddGlobalValue(std::move(newVarOp));
context()->AddDebug2Inst(NewGlobalName(obufTyId, "OutputBuffer"));
context()->AddDebug2Inst(NewMemberName(obufTyId, 0, "flags"));
context()->AddDebug2Inst(NewMemberName(obufTyId, 1, "written_count"));
context()->AddDebug2Inst(NewMemberName(obufTyId, 2, "data"));
context()->AddDebug2Inst(NewGlobalName(output_buffer_id_, "output_buffer"));
deco_mgr->AddDecorationVal(
output_buffer_id_, uint32_t(spv::Decoration::DescriptorSet), desc_set_);
deco_mgr->AddDecorationVal(output_buffer_id_,
uint32_t(spv::Decoration::Binding),
GetOutputBufferBinding());
AddStorageBufferExt();
if (get_module()->version() >= SPV_SPIRV_VERSION_WORD(1, 4)) {
// Add the new buffer to all entry points.
for (auto& entry : get_module()->entry_points()) {
entry.AddOperand({SPV_OPERAND_TYPE_ID, {output_buffer_id_}});
context()->AnalyzeUses(&entry);
}
}
}
return output_buffer_id_;
}
uint32_t InstrumentPass::GetInputBufferId() {
if (input_buffer_id_ == 0) {
// If not created yet, create one
analysis::DecorationManager* deco_mgr = get_decoration_mgr();
analysis::TypeManager* type_mgr = context()->get_type_mgr();
uint32_t width = (validation_id_ == kInstValidationIdBuffAddr) ? 64u : 32u;
analysis::Type* reg_uint_rarr_ty = GetUintRuntimeArrayType(width);
analysis::Struct* reg_buf_ty = GetStruct({reg_uint_rarr_ty});
uint32_t ibufTyId = type_mgr->GetTypeInstruction(reg_buf_ty);
// By the Vulkan spec, a pre-existing struct containing a RuntimeArray
// must be a block, and will therefore be decorated with Block. Therefore
// the undecorated type returned here will not be pre-existing and can
// safely be decorated. Since this type is now decorated, it is out of
// sync with the TypeManager and therefore the TypeManager must be
// invalidated after this pass.
assert(context()->get_def_use_mgr()->NumUses(ibufTyId) == 0 &&
"used struct type returned");
deco_mgr->AddDecoration(ibufTyId, uint32_t(spv::Decoration::Block));
deco_mgr->AddMemberDecoration(ibufTyId, 0,
uint32_t(spv::Decoration::Offset), 0);
uint32_t ibufTyPtrId_ =
type_mgr->FindPointerToType(ibufTyId, spv::StorageClass::StorageBuffer);
input_buffer_id_ = TakeNextId();
std::unique_ptr<Instruction> newVarOp(new Instruction(
context(), spv::Op::OpVariable, ibufTyPtrId_, input_buffer_id_,
{{spv_operand_type_t::SPV_OPERAND_TYPE_LITERAL_INTEGER,
{uint32_t(spv::StorageClass::StorageBuffer)}}}));
context()->AddGlobalValue(std::move(newVarOp));
context()->AddDebug2Inst(NewGlobalName(ibufTyId, "InputBuffer"));
context()->AddDebug2Inst(NewMemberName(ibufTyId, 0, "data"));
context()->AddDebug2Inst(NewGlobalName(input_buffer_id_, "input_buffer"));
deco_mgr->AddDecorationVal(
input_buffer_id_, uint32_t(spv::Decoration::DescriptorSet), desc_set_);
deco_mgr->AddDecorationVal(input_buffer_id_,
uint32_t(spv::Decoration::Binding),
GetInputBufferBinding());
AddStorageBufferExt();
if (get_module()->version() >= SPV_SPIRV_VERSION_WORD(1, 4)) {
// Add the new buffer to all entry points.
for (auto& entry : get_module()->entry_points()) {
entry.AddOperand({SPV_OPERAND_TYPE_ID, {input_buffer_id_}});
context()->AnalyzeUses(&entry);
}
}
}
return input_buffer_id_;
}
uint32_t InstrumentPass::GetFloatId() {
if (float_id_ == 0) {
analysis::TypeManager* type_mgr = context()->get_type_mgr();
@ -773,181 +552,6 @@ uint32_t InstrumentPass::GetVoidId() {
return void_id_;
}
uint32_t InstrumentPass::GetStreamWriteFunctionId(uint32_t param_cnt) {
enum {
kShaderId = 0,
kInstructionIndex = 1,
kStageInfo = 2,
kFirstParam = 3,
};
// Total param count is common params plus validation-specific
// params
if (param2output_func_id_[param_cnt] == 0) {
// Create function
param2output_func_id_[param_cnt] = TakeNextId();
analysis::TypeManager* type_mgr = context()->get_type_mgr();
const analysis::Type* uint_type = GetInteger(32, false);
const analysis::Vector v4uint(uint_type, 4);
const analysis::Type* v4uint_type = type_mgr->GetRegisteredType(&v4uint);
std::vector<const analysis::Type*> param_types(kFirstParam + param_cnt,
uint_type);
param_types[kStageInfo] = v4uint_type;
std::unique_ptr<Function> output_func = StartFunction(
param2output_func_id_[param_cnt], type_mgr->GetVoidType(), param_types);
std::vector<uint32_t> param_ids = AddParameters(*output_func, param_types);
// Create first block
auto new_blk_ptr = MakeUnique<BasicBlock>(NewLabel(TakeNextId()));
InstructionBuilder builder(
context(), &*new_blk_ptr,
IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
// Gen test if debug output buffer size will not be exceeded.
const uint32_t val_spec_offset = kInstStageOutCnt;
const uint32_t obuf_record_sz = val_spec_offset + param_cnt;
const uint32_t buf_id = GetOutputBufferId();
const uint32_t buf_uint_ptr_id = GetOutputBufferPtrId();
Instruction* obuf_curr_sz_ac_inst = builder.AddAccessChain(
buf_uint_ptr_id, buf_id,
{builder.GetUintConstantId(kDebugOutputSizeOffset)});
// Fetch the current debug buffer written size atomically, adding the
// size of the record to be written.
uint32_t obuf_record_sz_id = builder.GetUintConstantId(obuf_record_sz);
uint32_t mask_none_id =
builder.GetUintConstantId(uint32_t(spv::MemoryAccessMask::MaskNone));
uint32_t scope_invok_id =
builder.GetUintConstantId(uint32_t(spv::Scope::Invocation));
Instruction* obuf_curr_sz_inst = builder.AddQuadOp(
GetUintId(), spv::Op::OpAtomicIAdd, obuf_curr_sz_ac_inst->result_id(),
scope_invok_id, mask_none_id, obuf_record_sz_id);
uint32_t obuf_curr_sz_id = obuf_curr_sz_inst->result_id();
// Compute new written size
Instruction* obuf_new_sz_inst =
builder.AddIAdd(GetUintId(), obuf_curr_sz_id,
builder.GetUintConstantId(obuf_record_sz));
// Fetch the data bound
Instruction* obuf_bnd_inst =
builder.AddIdLiteralOp(GetUintId(), spv::Op::OpArrayLength,
GetOutputBufferId(), kDebugOutputDataOffset);
// Test that new written size is less than or equal to debug output
// data bound
Instruction* obuf_safe_inst = builder.AddBinaryOp(
GetBoolId(), spv::Op::OpULessThanEqual, obuf_new_sz_inst->result_id(),
obuf_bnd_inst->result_id());
uint32_t merge_blk_id = TakeNextId();
uint32_t write_blk_id = TakeNextId();
std::unique_ptr<Instruction> merge_label(NewLabel(merge_blk_id));
std::unique_ptr<Instruction> write_label(NewLabel(write_blk_id));
(void)builder.AddConditionalBranch(
obuf_safe_inst->result_id(), write_blk_id, merge_blk_id, merge_blk_id,
uint32_t(spv::SelectionControlMask::MaskNone));
// Close safety test block and gen write block
output_func->AddBasicBlock(std::move(new_blk_ptr));
new_blk_ptr = MakeUnique<BasicBlock>(std::move(write_label));
builder.SetInsertPoint(&*new_blk_ptr);
// Generate common and stage-specific debug record members
GenDebugOutputFieldCode(obuf_curr_sz_id, kInstCommonOutSize,
builder.GetUintConstantId(obuf_record_sz),
&builder);
// Store Shader Id
GenDebugOutputFieldCode(obuf_curr_sz_id, kInstCommonOutShaderId,
param_ids[kShaderId], &builder);
// Store Instruction Idx
GenDebugOutputFieldCode(obuf_curr_sz_id, kInstCommonOutInstructionIdx,
param_ids[kInstructionIndex], &builder);
// Store stage info. Stage Idx + 3 words of stage-specific data.
for (uint32_t i = 0; i < 4; ++i) {
Instruction* field =
builder.AddCompositeExtract(GetUintId(), param_ids[kStageInfo], {i});
GenDebugOutputFieldCode(obuf_curr_sz_id, kInstCommonOutStageIdx + i,
field->result_id(), &builder);
}
// Gen writes of validation specific data
for (uint32_t i = 0; i < param_cnt; ++i) {
GenDebugOutputFieldCode(obuf_curr_sz_id, val_spec_offset + i,
param_ids[kFirstParam + i], &builder);
}
// Close write block and gen merge block
(void)builder.AddBranch(merge_blk_id);
output_func->AddBasicBlock(std::move(new_blk_ptr));
new_blk_ptr = MakeUnique<BasicBlock>(std::move(merge_label));
builder.SetInsertPoint(&*new_blk_ptr);
// Close merge block and function and add function to module
(void)builder.AddNullaryOp(0, spv::Op::OpReturn);
output_func->AddBasicBlock(std::move(new_blk_ptr));
output_func->SetFunctionEnd(EndFunction());
context()->AddFunction(std::move(output_func));
std::string name("stream_write_");
name += std::to_string(param_cnt);
context()->AddDebug2Inst(
NewGlobalName(param2output_func_id_[param_cnt], name));
}
return param2output_func_id_[param_cnt];
}
uint32_t InstrumentPass::GetDirectReadFunctionId(uint32_t param_cnt) {
uint32_t func_id = param2input_func_id_[param_cnt];
if (func_id != 0) return func_id;
// Create input function for param_cnt.
func_id = TakeNextId();
analysis::Integer* uint_type = GetInteger(32, false);
std::vector<const analysis::Type*> param_types(param_cnt, uint_type);
std::unique_ptr<Function> input_func =
StartFunction(func_id, uint_type, param_types);
std::vector<uint32_t> param_ids = AddParameters(*input_func, param_types);
// Create block
auto new_blk_ptr = MakeUnique<BasicBlock>(NewLabel(TakeNextId()));
InstructionBuilder builder(
context(), &*new_blk_ptr,
IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping);
// For each offset parameter, generate new offset with parameter, adding last
// loaded value if it exists, and load value from input buffer at new offset.
// Return last loaded value.
uint32_t ibuf_type_id = GetInputBufferTypeId();
uint32_t buf_id = GetInputBufferId();
uint32_t buf_ptr_id = GetInputBufferPtrId();
uint32_t last_value_id = 0;
for (uint32_t p = 0; p < param_cnt; ++p) {
uint32_t offset_id;
if (p == 0) {
offset_id = param_ids[0];
} else {
if (ibuf_type_id != GetUintId()) {
last_value_id =
builder.AddUnaryOp(GetUintId(), spv::Op::OpUConvert, last_value_id)
->result_id();
}
offset_id = builder.AddIAdd(GetUintId(), last_value_id, param_ids[p])
->result_id();
}
Instruction* ac_inst = builder.AddAccessChain(
buf_ptr_id, buf_id,
{builder.GetUintConstantId(kDebugInputDataOffset), offset_id});
last_value_id =
builder.AddLoad(ibuf_type_id, ac_inst->result_id())->result_id();
}
(void)builder.AddUnaryOp(0, spv::Op::OpReturnValue, last_value_id);
// Close block and function and add function to module
input_func->AddBasicBlock(std::move(new_blk_ptr));
input_func->SetFunctionEnd(EndFunction());
context()->AddFunction(std::move(input_func));
std::string name("direct_read_");
name += std::to_string(param_cnt);
context()->AddDebug2Inst(NewGlobalName(func_id, name));
param2input_func_id_[param_cnt] = func_id;
return func_id;
}
void InstrumentPass::SplitBlock(
BasicBlock::iterator inst_itr, UptrVectorIterator<BasicBlock> block_itr,
std::vector<std::unique_ptr<BasicBlock>>* new_blocks) {
@ -1091,10 +695,6 @@ bool InstrumentPass::InstProcessEntryPointCallTree(InstProcessFunction& pfn) {
}
void InstrumentPass::InitializeInstrument() {
output_buffer_id_ = 0;
output_buffer_ptr_id_ = 0;
input_buffer_ptr_id_ = 0;
input_buffer_id_ = 0;
float_id_ = 0;
v4float_id_ = 0;
uint_id_ = 0;

View File

@ -55,14 +55,6 @@
namespace spvtools {
namespace opt {
namespace {
// Validation Ids
// These are used to identify the general validation being done and map to
// its output buffers.
constexpr uint32_t kInstValidationIdBindless = 0;
constexpr uint32_t kInstValidationIdBuffAddr = 1;
constexpr uint32_t kInstValidationIdDebugPrintf = 2;
} // namespace
class InstrumentPass : public Pass {
using cbb_ptr = const BasicBlock*;
@ -85,12 +77,11 @@ class InstrumentPass : public Pass {
// set |desc_set| for debug input and output buffers and writes |shader_id|
// into debug output records. |opt_direct_reads| indicates that the pass
// will see direct input buffer reads and should prepare to optimize them.
InstrumentPass(uint32_t desc_set, uint32_t shader_id, uint32_t validation_id,
InstrumentPass(uint32_t desc_set, uint32_t shader_id,
bool opt_direct_reads = false)
: Pass(),
desc_set_(desc_set),
shader_id_(shader_id),
validation_id_(validation_id),
opt_direct_reads_(opt_direct_reads) {}
// Initialize state for instrumentation of module.
@ -113,108 +104,9 @@ class InstrumentPass : public Pass {
void MovePostludeCode(UptrVectorIterator<BasicBlock> ref_block_itr,
BasicBlock* new_blk_ptr);
// Generate instructions in |builder| which will atomically fetch and
// increment the size of the debug output buffer stream of the current
// validation and write a record to the end of the stream, if enough space
// in the buffer remains. The record will contain the index of the function
// and instruction within that function |func_idx, instruction_idx| which
// generated the record. It will also contain additional information to
// identify the instance of the shader, depending on the stage |stage_idx|
// of the shader. Finally, the record will contain validation-specific
// data contained in |validation_ids| which will identify the validation
// error as well as the values involved in the error.
//
// The output buffer binding written to by the code generated by the function
// is determined by the validation id specified when each specific
// instrumentation pass is created.
//
// The output buffer is a sequence of 32-bit values with the following
// format (where all elements are unsigned 32-bit unless otherwise noted):
//
// Size
// Record0
// Record1
// Record2
// ...
//
// Size is the number of 32-bit values that have been written or
// attempted to be written to the output buffer, excluding the Size. It is
// initialized to 0. If the size of attempts to write the buffer exceeds
// the actual size of the buffer, it is possible that this field can exceed
// the actual size of the buffer.
//
// Each Record* is a variable-length sequence of 32-bit values with the
// following format defined using static const offsets in the .cpp file:
//
// Record Size
// Shader ID
// Instruction Index
// Stage
// Stage-specific Word 0
// Stage-specific Word 1
// ...
// Validation Error Code
// Validation-specific Word 0
// Validation-specific Word 1
// Validation-specific Word 2
// ...
//
// Each record consists of three subsections: members common across all
// validation, members specific to the stage, and members specific to a
// validation.
//
// The Record Size is the number of 32-bit words in the record, including
// the Record Size word.
//
// Shader ID is a value that identifies which shader has generated the
// validation error. It is passed when the instrumentation pass is created.
//
// The Instruction Index is the position of the instruction within the
// SPIR-V file which is in error.
//
// The Stage is the pipeline stage which has generated the error as defined
// by the SpvExecutionModel_ enumeration. This is used to interpret the
// following Stage-specific words.
//
// The Stage-specific Words identify which invocation of the shader generated
// the error. Every stage will write a fixed number of words. Vertex shaders
// will write the Vertex and Instance ID. Fragment shaders will write
// FragCoord.xy. Compute shaders will write the GlobalInvocation ID.
// The tessellation eval shader will write the Primitive ID and TessCoords.uv.
// The tessellation control shader and geometry shader will write the
// Primitive ID and Invocation ID.
//
// The Validation Error Code specifies the exact error which has occurred.
// These are enumerated with the kInstError* static consts. This allows
// multiple validation layers to use the same, single output buffer.
//
// The Validation-specific Words are a validation-specific number of 32-bit
// words which give further information on the validation error that
// occurred. These are documented further in each file containing the
// validation-specific class which derives from this base class.
//
// Because the code that is generated checks against the size of the buffer
// before writing, the size of the debug out buffer can be used by the
// validation layer to control the number of error records that are written.
void GenDebugStreamWrite(uint32_t shader_id, uint32_t instruction_idx_id,
uint32_t stage_info_id,
const std::vector<uint32_t>& validation_ids,
InstructionBuilder* builder);
// Return true if all instructions in |ids| are constants or spec constants.
bool AllConstant(const std::vector<uint32_t>& ids);
// Generate in |builder| instructions to read the unsigned integer from the
// input buffer specified by the offsets in |offset_ids|. Given offsets
// o0, o1, ... oN, and input buffer ibuf, return the id for the value:
//
// ibuf[...ibuf[ibuf[o0]+o1]...+oN]
//
// The binding and the format of the input buffer is determined by each
// specific validation, which is specified at the creation of the pass.
uint32_t GenDebugDirectRead(const std::vector<uint32_t>& offset_ids,
InstructionBuilder* builder);
uint32_t GenReadFunctionCall(uint32_t return_id, uint32_t func_id,
const std::vector<uint32_t>& args,
InstructionBuilder* builder);
@ -243,15 +135,6 @@ class InstrumentPass : public Pass {
std::unique_ptr<Instruction> NewName(uint32_t id,
const std::string& name_str);
// Set the name for a function or global variable, names will be
// prefixed to identify which instrumentation pass generated them.
std::unique_ptr<Instruction> NewGlobalName(uint32_t id,
const std::string& name_str);
// Set the name for a structure member
std::unique_ptr<Instruction> NewMemberName(uint32_t id, uint32_t member_index,
const std::string& name_str);
// Return id for 32-bit unsigned type
uint32_t GetUintId();
@ -283,30 +166,9 @@ class InstrumentPass : public Pass {
// Return pointer to type for runtime array of uint
analysis::RuntimeArray* GetUintRuntimeArrayType(uint32_t width);
// Return id for buffer uint type
uint32_t GetOutputBufferPtrId();
// Return id for buffer uint type
uint32_t GetInputBufferTypeId();
// Return id for buffer uint type
uint32_t GetInputBufferPtrId();
// Return binding for output buffer for current validation.
uint32_t GetOutputBufferBinding();
// Return binding for input buffer for current validation.
uint32_t GetInputBufferBinding();
// Add storage buffer extension if needed
void AddStorageBufferExt();
// Return id for debug output buffer
uint32_t GetOutputBufferId();
// Return id for debug input buffer
uint32_t GetInputBufferId();
// Return id for 32-bit float type
uint32_t GetFloatId();
@ -322,14 +184,6 @@ class InstrumentPass : public Pass {
// Return id for v3uint type
uint32_t GetVec3UintId();
// Return id for output function. Define if it doesn't exist with
// |val_spec_param_cnt| validation-specific uint32 parameters.
uint32_t GetStreamWriteFunctionId(uint32_t val_spec_param_cnt);
// Return id for input function taking |param_cnt| uint32 parameters. Define
// if it doesn't exist.
uint32_t GetDirectReadFunctionId(uint32_t param_cnt);
// Split block |block_itr| into two new blocks where the second block
// contains |inst_itr| and place in |new_blocks|.
void SplitBlock(BasicBlock::iterator inst_itr,
@ -349,12 +203,6 @@ class InstrumentPass : public Pass {
std::queue<uint32_t>* roots,
uint32_t stage_idx);
// Gen code into |builder| to write |field_value_id| into debug output
// buffer at |base_offset_id| + |field_offset|.
void GenDebugOutputFieldCode(uint32_t base_offset_id, uint32_t field_offset,
uint32_t field_value_id,
InstructionBuilder* builder);
// Generate instructions into |builder| which will load |var_id| and return
// its result id.
uint32_t GenVarLoad(uint32_t var_id, InstructionBuilder* builder);
@ -395,62 +243,47 @@ class InstrumentPass : public Pass {
// Map from instruction's unique id to offset in original file.
std::unordered_map<uint32_t, uint32_t> uid2offset_;
// result id for OpConstantFalse
uint32_t validation_id_;
// id for output buffer variable
uint32_t output_buffer_id_;
// ptr type id for output buffer element
uint32_t output_buffer_ptr_id_;
// ptr type id for input buffer element
uint32_t input_buffer_ptr_id_;
// id for debug output function
std::unordered_map<uint32_t, uint32_t> param2output_func_id_;
// ids for debug input functions
std::unordered_map<uint32_t, uint32_t> param2input_func_id_;
// id for input buffer variable
uint32_t input_buffer_id_;
// id for 32-bit float type
uint32_t float_id_;
uint32_t float_id_{0};
// id for v4float type
uint32_t v4float_id_;
uint32_t v4float_id_{0};
// id for v4uint type
uint32_t v4uint_id_;
uint32_t v4uint_id_{0};
// id for v3uint type
uint32_t v3uint_id_;
uint32_t v3uint_id_{0};
// id for 32-bit unsigned type
uint32_t uint_id_;
uint32_t uint_id_{0};
// id for 64-bit unsigned type
uint32_t uint64_id_;
uint32_t uint64_id_{0};
// id for 8-bit unsigned type
uint32_t uint8_id_;
uint32_t uint8_id_{0};
// id for bool type
uint32_t bool_id_;
uint32_t bool_id_{0};
// id for void type
uint32_t void_id_;
uint32_t void_id_{0};
// boolean to remember storage buffer extension
bool storage_buffer_ext_defined_;
bool storage_buffer_ext_defined_{false};
// runtime array of uint type
analysis::RuntimeArray* uint64_rarr_ty_;
analysis::RuntimeArray* uint64_rarr_ty_{nullptr};
// runtime array of uint type
analysis::RuntimeArray* uint32_rarr_ty_;
analysis::RuntimeArray* uint32_rarr_ty_{nullptr};
// Pre-instrumentation same-block insts
std::unordered_map<uint32_t, Instruction*> same_block_pre_;
@ -475,11 +308,11 @@ class InstrumentPass : public Pass {
std::unordered_map<std::vector<uint32_t>, uint32_t, vector_hash_> call2id_;
// Function currently being instrumented
Function* curr_func_;
Function* curr_func_{nullptr};
// Optimize direct debug input buffer reads. Specifically, move all such
// reads with constant args to first block and reuse them.
bool opt_direct_reads_;
bool opt_direct_reads_{false};
};
} // namespace opt

View File

@ -0,0 +1,493 @@
// Copyright (c) 2023 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "source/opt/invocation_interlock_placement_pass.h"
#include <algorithm>
#include <array>
#include <cassert>
#include <functional>
#include <optional>
#include <queue>
#include <stack>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "source/enum_set.h"
#include "source/enum_string_mapping.h"
#include "source/opt/ir_context.h"
#include "source/opt/reflect.h"
#include "source/spirv_target_env.h"
#include "source/util/string_utils.h"
namespace spvtools {
namespace opt {
namespace {
constexpr uint32_t kEntryPointExecutionModelInIdx = 0;
constexpr uint32_t kEntryPointFunctionIdInIdx = 1;
constexpr uint32_t kFunctionCallFunctionIdInIdx = 0;
} // namespace
bool InvocationInterlockPlacementPass::hasSingleNextBlock(uint32_t block_id,
bool reverse_cfg) {
if (reverse_cfg) {
// We are traversing forward, so check whether there is a single successor.
BasicBlock* block = cfg()->block(block_id);
switch (block->tail()->opcode()) {
case spv::Op::OpBranchConditional:
return false;
case spv::Op::OpSwitch:
return block->tail()->NumInOperandWords() == 1;
default:
return !block->tail()->IsReturnOrAbort();
}
} else {
// We are traversing backward, so check whether there is a single
// predecessor.
return cfg()->preds(block_id).size() == 1;
}
}
void InvocationInterlockPlacementPass::forEachNext(
uint32_t block_id, bool reverse_cfg, std::function<void(uint32_t)> f) {
if (reverse_cfg) {
BasicBlock* block = cfg()->block(block_id);
block->ForEachSuccessorLabel([f](uint32_t succ_id) { f(succ_id); });
} else {
for (uint32_t pred_id : cfg()->preds(block_id)) {
f(pred_id);
}
}
}
void InvocationInterlockPlacementPass::addInstructionAtBlockBoundary(
BasicBlock* block, spv::Op opcode, bool at_end) {
if (at_end) {
assert(block->begin()->opcode() != spv::Op::OpPhi &&
"addInstructionAtBlockBoundary expects to be called with at_end == "
"true only if there is a single successor to block");
// Insert a begin instruction at the end of the block.
Instruction* begin_inst = new Instruction(context(), opcode);
begin_inst->InsertAfter(&*--block->tail());
} else {
assert(block->begin()->opcode() != spv::Op::OpPhi &&
"addInstructionAtBlockBoundary expects to be called with at_end == "
"false only if there is a single predecessor to block");
// Insert an end instruction at the beginning of the block.
Instruction* end_inst = new Instruction(context(), opcode);
end_inst->InsertBefore(&*block->begin());
}
}
bool InvocationInterlockPlacementPass::killDuplicateBegin(BasicBlock* block) {
bool found = false;
return context()->KillInstructionIf(
block->begin(), block->end(), [&found](Instruction* inst) {
if (inst->opcode() == spv::Op::OpBeginInvocationInterlockEXT) {
if (found) {
return true;
}
found = true;
}
return false;
});
}
bool InvocationInterlockPlacementPass::killDuplicateEnd(BasicBlock* block) {
std::vector<Instruction*> to_kill;
block->ForEachInst([&to_kill](Instruction* inst) {
if (inst->opcode() == spv::Op::OpEndInvocationInterlockEXT) {
to_kill.push_back(inst);
}
});
if (to_kill.size() <= 1) {
return false;
}
to_kill.pop_back();
for (Instruction* inst : to_kill) {
context()->KillInst(inst);
}
return true;
}
void InvocationInterlockPlacementPass::recordBeginOrEndInFunction(
Function* func) {
if (extracted_functions_.count(func)) {
return;
}
bool had_begin = false;
bool had_end = false;
func->ForEachInst([this, &had_begin, &had_end](Instruction* inst) {
switch (inst->opcode()) {
case spv::Op::OpBeginInvocationInterlockEXT:
had_begin = true;
break;
case spv::Op::OpEndInvocationInterlockEXT:
had_end = true;
break;
case spv::Op::OpFunctionCall: {
uint32_t function_id =
inst->GetSingleWordInOperand(kFunctionCallFunctionIdInIdx);
Function* inner_func = context()->GetFunction(function_id);
recordBeginOrEndInFunction(inner_func);
ExtractionResult result = extracted_functions_[inner_func];
had_begin = had_begin || result.had_begin;
had_end = had_end || result.had_end;
break;
}
default:
break;
}
});
ExtractionResult result = {had_begin, had_end};
extracted_functions_[func] = result;
}
bool InvocationInterlockPlacementPass::
removeBeginAndEndInstructionsFromFunction(Function* func) {
bool modified = false;
func->ForEachInst([this, &modified](Instruction* inst) {
switch (inst->opcode()) {
case spv::Op::OpBeginInvocationInterlockEXT:
context()->KillInst(inst);
modified = true;
break;
case spv::Op::OpEndInvocationInterlockEXT:
context()->KillInst(inst);
modified = true;
break;
default:
break;
}
});
return modified;
}
bool InvocationInterlockPlacementPass::extractInstructionsFromCalls(
std::vector<BasicBlock*> blocks) {
bool modified = false;
for (BasicBlock* block : blocks) {
block->ForEachInst([this, &modified](Instruction* inst) {
if (inst->opcode() == spv::Op::OpFunctionCall) {
uint32_t function_id =
inst->GetSingleWordInOperand(kFunctionCallFunctionIdInIdx);
Function* func = context()->GetFunction(function_id);
ExtractionResult result = extracted_functions_[func];
if (result.had_begin) {
Instruction* new_inst = new Instruction(
context(), spv::Op::OpBeginInvocationInterlockEXT);
new_inst->InsertBefore(inst);
modified = true;
}
if (result.had_end) {
Instruction* new_inst =
new Instruction(context(), spv::Op::OpEndInvocationInterlockEXT);
new_inst->InsertAfter(inst);
modified = true;
}
}
});
}
return modified;
}
void InvocationInterlockPlacementPass::recordExistingBeginAndEndBlock(
std::vector<BasicBlock*> blocks) {
for (BasicBlock* block : blocks) {
block->ForEachInst([this, block](Instruction* inst) {
switch (inst->opcode()) {
case spv::Op::OpBeginInvocationInterlockEXT:
begin_.insert(block->id());
break;
case spv::Op::OpEndInvocationInterlockEXT:
end_.insert(block->id());
break;
default:
break;
}
});
}
}
InvocationInterlockPlacementPass::BlockSet
InvocationInterlockPlacementPass::computeReachableBlocks(
BlockSet& previous_inside, const BlockSet& starting_nodes,
bool reverse_cfg) {
BlockSet inside = starting_nodes;
std::deque<uint32_t> worklist;
worklist.insert(worklist.begin(), starting_nodes.begin(),
starting_nodes.end());
while (!worklist.empty()) {
uint32_t block_id = worklist.front();
worklist.pop_front();
forEachNext(block_id, reverse_cfg,
[&inside, &previous_inside, &worklist](uint32_t next_id) {
previous_inside.insert(next_id);
if (inside.insert(next_id).second) {
worklist.push_back(next_id);
}
});
}
return inside;
}
bool InvocationInterlockPlacementPass::removeUnneededInstructions(
BasicBlock* block) {
bool modified = false;
if (!predecessors_after_begin_.count(block->id()) &&
after_begin_.count(block->id())) {
// None of the previous blocks are in the critical section, but this block
// is. This can only happen if this block already has at least one begin
// instruction. Leave the first begin instruction, and remove any others.
modified |= killDuplicateBegin(block);
} else if (predecessors_after_begin_.count(block->id())) {
// At least one previous block is in the critical section; remove all
// begin instructions in this block.
modified |= context()->KillInstructionIf(
block->begin(), block->end(), [](Instruction* inst) {
return inst->opcode() == spv::Op::OpBeginInvocationInterlockEXT;
});
}
if (!successors_before_end_.count(block->id()) &&
before_end_.count(block->id())) {
// Same as above
modified |= killDuplicateEnd(block);
} else if (successors_before_end_.count(block->id())) {
modified |= context()->KillInstructionIf(
block->begin(), block->end(), [](Instruction* inst) {
return inst->opcode() == spv::Op::OpEndInvocationInterlockEXT;
});
}
return modified;
}
BasicBlock* InvocationInterlockPlacementPass::splitEdge(BasicBlock* block,
uint32_t succ_id) {
// Create a new block to replace the critical edge.
auto new_succ_temp = MakeUnique<BasicBlock>(
MakeUnique<Instruction>(context(), spv::Op::OpLabel, 0, TakeNextId(),
std::initializer_list<Operand>{}));
auto* new_succ = new_succ_temp.get();
// Insert the new block into the function.
block->GetParent()->InsertBasicBlockAfter(std::move(new_succ_temp), block);
new_succ->AddInstruction(MakeUnique<Instruction>(
context(), spv::Op::OpBranch, 0, 0,
std::initializer_list<Operand>{
Operand(spv_operand_type_t::SPV_OPERAND_TYPE_ID, {succ_id})}));
assert(block->tail()->opcode() == spv::Op::OpBranchConditional ||
block->tail()->opcode() == spv::Op::OpSwitch);
// Update the first branch to successor to instead branch to
// the new successor. If there are multiple edges, we arbitrarily choose the
// first time it appears in the list. The other edges to `succ_id` will have
// to be split by another call to `splitEdge`.
block->tail()->WhileEachInId([new_succ, succ_id](uint32_t* branch_id) {
if (*branch_id == succ_id) {
*branch_id = new_succ->id();
return false;
}
return true;
});
return new_succ;
}
bool InvocationInterlockPlacementPass::placeInstructionsForEdge(
BasicBlock* block, uint32_t next_id, BlockSet& inside,
BlockSet& previous_inside, spv::Op opcode, bool reverse_cfg) {
bool modified = false;
if (previous_inside.count(next_id) && !inside.count(block->id())) {
// This block is not in the critical section but the next has at least one
// other previous block that is, so this block should be enter it as well.
// We need to add begin or end instructions to the edge.
modified = true;
if (hasSingleNextBlock(block->id(), reverse_cfg)) {
// This is the only next block.
// Additionally, because `next_id` is in `previous_inside`, we know that
// `next_id` has at least one previous block in `inside`. And because
// 'block` is not in `inside`, that means the `next_id` has to have at
// least one other previous block in `inside`.
// This is solely for a debug assertion. It is essentially recomputing the
// value of `previous_inside` to verify that it was computed correctly
// such that the above statement is true.
bool next_has_previous_inside = false;
// By passing !reverse_cfg to forEachNext, we are actually iterating over
// the previous blocks.
forEachNext(next_id, !reverse_cfg,
[&next_has_previous_inside, inside](uint32_t previous_id) {
if (inside.count(previous_id)) {
next_has_previous_inside = true;
}
});
assert(next_has_previous_inside &&
"`previous_inside` must be the set of blocks with at least one "
"previous block in `inside`");
addInstructionAtBlockBoundary(block, opcode, reverse_cfg);
} else {
// This block has multiple next blocks. Split the edge and insert the
// instruction in the new next block.
BasicBlock* new_branch;
if (reverse_cfg) {
new_branch = splitEdge(block, next_id);
} else {
new_branch = splitEdge(cfg()->block(next_id), block->id());
}
auto inst = new Instruction(context(), opcode);
inst->InsertBefore(&*new_branch->tail());
}
}
return modified;
}
bool InvocationInterlockPlacementPass::placeInstructions(BasicBlock* block) {
bool modified = false;
block->ForEachSuccessorLabel([this, block, &modified](uint32_t succ_id) {
modified |= placeInstructionsForEdge(
block, succ_id, after_begin_, predecessors_after_begin_,
spv::Op::OpBeginInvocationInterlockEXT, /* reverse_cfg= */ true);
modified |= placeInstructionsForEdge(cfg()->block(succ_id), block->id(),
before_end_, successors_before_end_,
spv::Op::OpEndInvocationInterlockEXT,
/* reverse_cfg= */ false);
});
return modified;
}
bool InvocationInterlockPlacementPass::processFragmentShaderEntry(
Function* entry_func) {
bool modified = false;
// Save the original order of blocks in the function, so we don't iterate over
// newly-added blocks.
std::vector<BasicBlock*> original_blocks;
for (auto bi = entry_func->begin(); bi != entry_func->end(); ++bi) {
original_blocks.push_back(&*bi);
}
modified |= extractInstructionsFromCalls(original_blocks);
recordExistingBeginAndEndBlock(original_blocks);
after_begin_ = computeReachableBlocks(predecessors_after_begin_, begin_,
/* reverse_cfg= */ true);
before_end_ = computeReachableBlocks(successors_before_end_, end_,
/* reverse_cfg= */ false);
for (BasicBlock* block : original_blocks) {
modified |= removeUnneededInstructions(block);
modified |= placeInstructions(block);
}
return modified;
}
bool InvocationInterlockPlacementPass::isFragmentShaderInterlockEnabled() {
if (!context()->get_feature_mgr()->HasExtension(
kSPV_EXT_fragment_shader_interlock)) {
return false;
}
if (context()->get_feature_mgr()->HasCapability(
spv::Capability::FragmentShaderSampleInterlockEXT)) {
return true;
}
if (context()->get_feature_mgr()->HasCapability(
spv::Capability::FragmentShaderPixelInterlockEXT)) {
return true;
}
if (context()->get_feature_mgr()->HasCapability(
spv::Capability::FragmentShaderShadingRateInterlockEXT)) {
return true;
}
return false;
}
Pass::Status InvocationInterlockPlacementPass::Process() {
// Skip this pass if the necessary extension or capability is missing
if (!isFragmentShaderInterlockEnabled()) {
return Status::SuccessWithoutChange;
}
bool modified = false;
std::unordered_set<Function*> entry_points;
for (Instruction& entry_inst : context()->module()->entry_points()) {
uint32_t entry_id =
entry_inst.GetSingleWordInOperand(kEntryPointFunctionIdInIdx);
entry_points.insert(context()->GetFunction(entry_id));
}
for (auto fi = context()->module()->begin(); fi != context()->module()->end();
++fi) {
Function* func = &*fi;
recordBeginOrEndInFunction(func);
if (!entry_points.count(func) && extracted_functions_.count(func)) {
modified |= removeBeginAndEndInstructionsFromFunction(func);
}
}
for (Instruction& entry_inst : context()->module()->entry_points()) {
uint32_t entry_id =
entry_inst.GetSingleWordInOperand(kEntryPointFunctionIdInIdx);
Function* entry_func = context()->GetFunction(entry_id);
auto execution_model = spv::ExecutionModel(
entry_inst.GetSingleWordInOperand(kEntryPointExecutionModelInIdx));
if (execution_model != spv::ExecutionModel::Fragment) {
continue;
}
modified |= processFragmentShaderEntry(entry_func);
}
return modified ? Pass::Status::SuccessWithChange
: Pass::Status::SuccessWithoutChange;
}
} // namespace opt
} // namespace spvtools

View File

@ -0,0 +1,158 @@
// Copyright (c) 2023 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef SOURCE_OPT_DEDUPE_INTERLOCK_INVOCATION_PASS_H_
#define SOURCE_OPT_DEDUPE_INTERLOCK_INVOCATION_PASS_H_
#include <algorithm>
#include <array>
#include <functional>
#include <optional>
#include <unordered_map>
#include <unordered_set>
#include "source/enum_set.h"
#include "source/extensions.h"
#include "source/opt/ir_context.h"
#include "source/opt/module.h"
#include "source/opt/pass.h"
#include "source/spirv_target_env.h"
namespace spvtools {
namespace opt {
// This pass will ensure that an entry point will only have at most one
// OpBeginInterlockInvocationEXT and one OpEndInterlockInvocationEXT, in that
// order
class InvocationInterlockPlacementPass : public Pass {
public:
InvocationInterlockPlacementPass() {}
InvocationInterlockPlacementPass(const InvocationInterlockPlacementPass&) =
delete;
InvocationInterlockPlacementPass(InvocationInterlockPlacementPass&&) = delete;
const char* name() const override { return "dedupe-interlock-invocation"; }
Status Process() override;
private:
using BlockSet = std::unordered_set<uint32_t>;
// Specifies whether a function originally had a begin or end instruction.
struct ExtractionResult {
bool had_begin : 1;
bool had_end : 2;
};
// Check if a block has only a single next block, depending on the directing
// that we are traversing the CFG. If reverse_cfg is true, we are walking
// forward through the CFG, and will return if the block has only one
// successor. Otherwise, we are walking backward through the CFG, and will
// return if the block has only one predecessor.
bool hasSingleNextBlock(uint32_t block_id, bool reverse_cfg);
// Iterate over each of a block's predecessors or successors, depending on
// direction. If reverse_cfg is true, we are walking forward through the CFG,
// and need to iterate over the successors. Otherwise, we are walking backward
// through the CFG, and need to iterate over the predecessors.
void forEachNext(uint32_t block_id, bool reverse_cfg,
std::function<void(uint32_t)> f);
// Add either a begin or end instruction to the edge of the basic block. If
// at_end is true, add the instruction to the end of the block; otherwise add
// the instruction to the beginning of the basic block.
void addInstructionAtBlockBoundary(BasicBlock* block, spv::Op opcode,
bool at_end);
// Remove every OpBeginInvocationInterlockEXT instruction in block after the
// first. Returns whether any instructions were removed.
bool killDuplicateBegin(BasicBlock* block);
// Remove every OpBeginInvocationInterlockEXT instruction in block before the
// last. Returns whether any instructions were removed.
bool killDuplicateEnd(BasicBlock* block);
// Records whether a function will potentially execute a begin or end
// instruction.
void recordBeginOrEndInFunction(Function* func);
// Recursively removes any begin or end instructions from func and any
// function func calls. Returns whether any instructions were removed.
bool removeBeginAndEndInstructionsFromFunction(Function* func);
// For every function call in any of the passed blocks, move any begin or end
// instructions outside of the function call. Returns whether any extractions
// occurred.
bool extractInstructionsFromCalls(std::vector<BasicBlock*> blocks);
// Finds the sets of blocks that contain OpBeginInvocationInterlockEXT and
// OpEndInvocationInterlockEXT, storing them in the member variables begin_
// and end_ respectively.
void recordExistingBeginAndEndBlock(std::vector<BasicBlock*> blocks);
// Compute the set of blocks including or after the barrier instruction, and
// the set of blocks with any previous blocks inside the barrier instruction.
// If reverse_cfg is true, move forward through the CFG, computing
// after_begin_ and predecessors_after_begin_computing after_begin_ and
// predecessors_after_begin_, otherwise, move backward through the CFG,
// computing before_end_ and successors_before_end_.
BlockSet computeReachableBlocks(BlockSet& in_set,
const BlockSet& starting_nodes,
bool reverse_cfg);
// Remove unneeded begin and end instructions in block.
bool removeUnneededInstructions(BasicBlock* block);
// Given a block which branches to multiple successors, and a specific
// successor, creates a new empty block, and update the branch instruction to
// branch to the new block instead.
BasicBlock* splitEdge(BasicBlock* block, uint32_t succ_id);
// For the edge from block to next_id, places a begin or end instruction on
// the edge, based on the direction we are walking the CFG, specified in
// reverse_cfg.
bool placeInstructionsForEdge(BasicBlock* block, uint32_t next_id,
BlockSet& inside, BlockSet& previous_inside,
spv::Op opcode, bool reverse_cfg);
// Calls placeInstructionsForEdge for each edge in block.
bool placeInstructions(BasicBlock* block);
// Processes a single fragment shader entry function.
bool processFragmentShaderEntry(Function* entry_func);
// Returns whether the module has the SPV_EXT_fragment_shader_interlock
// extension and one of the FragmentShader*InterlockEXT capabilities.
bool isFragmentShaderInterlockEnabled();
// Maps a function to whether that function originally held a begin or end
// instruction.
std::unordered_map<Function*, ExtractionResult> extracted_functions_;
// The set of blocks which have an OpBeginInvocationInterlockEXT instruction.
BlockSet begin_;
// The set of blocks which have an OpEndInvocationInterlockEXT instruction.
BlockSet end_;
// The set of blocks which either have a begin instruction, or have a
// predecessor which has a begin instruction.
BlockSet after_begin_;
// The set of blocks which either have an end instruction, or have a successor
// which have an end instruction.
BlockSet before_end_;
// The set of blocks which have a predecessor in after_begin_.
BlockSet predecessors_after_begin_;
// The set of blocks which have a successor in before_end_.
BlockSet successors_before_end_;
};
} // namespace opt
} // namespace spvtools
#endif // SOURCE_OPT_DEDUPE_INTERLOCK_INVOCATION_PASS_H_

View File

@ -252,6 +252,8 @@ class IRContext {
inline void AddType(std::unique_ptr<Instruction>&& t);
// Appends a constant, global variable, or OpUndef instruction to this module.
inline void AddGlobalValue(std::unique_ptr<Instruction>&& v);
// Prepends a function declaration to this module.
inline void AddFunctionDeclaration(std::unique_ptr<Function>&& f);
// Appends a function to this module.
inline void AddFunction(std::unique_ptr<Function>&& f);
@ -1213,6 +1215,10 @@ void IRContext::AddGlobalValue(std::unique_ptr<Instruction>&& v) {
module()->AddGlobalValue(std::move(v));
}
void IRContext::AddFunctionDeclaration(std::unique_ptr<Function>&& f) {
module()->AddFunctionDeclaration(std::move(f));
}
void IRContext::AddFunction(std::unique_ptr<Function>&& f) {
module()->AddFunction(std::move(f));
}

View File

@ -427,7 +427,9 @@ void LocalAccessChainConvertPass::InitExtensions() {
"SPV_EXT_shader_image_int64", "SPV_KHR_non_semantic_info",
"SPV_KHR_uniform_group_instructions",
"SPV_KHR_fragment_shader_barycentric", "SPV_KHR_vulkan_memory_model",
"SPV_NV_bindless_texture", "SPV_EXT_shader_atomic_float_add"});
"SPV_NV_bindless_texture", "SPV_EXT_shader_atomic_float_add",
"SPV_EXT_fragment_shader_interlock",
"SPV_NV_compute_shader_derivatives"});
}
bool LocalAccessChainConvertPass::AnyIndexIsOutOfBounds(

View File

@ -278,6 +278,7 @@ void LocalSingleBlockLoadStoreElimPass::InitExtensions() {
"SPV_KHR_ray_query",
"SPV_EXT_fragment_invocation_density",
"SPV_EXT_physical_storage_buffer",
"SPV_KHR_physical_storage_buffer",
"SPV_KHR_terminate_invocation",
"SPV_KHR_subgroup_uniform_control_flow",
"SPV_KHR_integer_dot_product",
@ -287,7 +288,9 @@ void LocalSingleBlockLoadStoreElimPass::InitExtensions() {
"SPV_KHR_fragment_shader_barycentric",
"SPV_KHR_vulkan_memory_model",
"SPV_NV_bindless_texture",
"SPV_EXT_shader_atomic_float_add"});
"SPV_EXT_shader_atomic_float_add",
"SPV_EXT_fragment_shader_interlock",
"SPV_NV_compute_shader_derivatives"});
}
} // namespace opt

View File

@ -128,6 +128,7 @@ void LocalSingleStoreElimPass::InitExtensionAllowList() {
"SPV_KHR_ray_query",
"SPV_EXT_fragment_invocation_density",
"SPV_EXT_physical_storage_buffer",
"SPV_KHR_physical_storage_buffer",
"SPV_KHR_terminate_invocation",
"SPV_KHR_subgroup_uniform_control_flow",
"SPV_KHR_integer_dot_product",
@ -137,7 +138,9 @@ void LocalSingleStoreElimPass::InitExtensionAllowList() {
"SPV_KHR_fragment_shader_barycentric",
"SPV_KHR_vulkan_memory_model",
"SPV_NV_bindless_texture",
"SPV_EXT_shader_atomic_float_add"});
"SPV_EXT_shader_atomic_float_add",
"SPV_EXT_fragment_shader_interlock",
"SPV_NV_compute_shader_derivatives"});
}
bool LocalSingleStoreElimPass::ProcessVariable(Instruction* var_inst) {
std::vector<Instruction*> users;

View File

@ -23,7 +23,7 @@
#include "spirv-tools/libspirv.hpp"
// Asserts the given condition is true. Otherwise, sends a message to the
// consumer and exits the problem with failure code. Accepts the following
// consumer and exits the program with failure code. Accepts the following
// formats:
//
// SPIRV_ASSERT(<message-consumer>, <condition-expression>);
@ -36,7 +36,9 @@
#if !defined(NDEBUG)
#define SPIRV_ASSERT(consumer, ...) SPIRV_ASSERT_IMPL(consumer, __VA_ARGS__)
#else
#define SPIRV_ASSERT(consumer, ...)
// Adding a use to avoid errors in the release build related to unused
// consumers.
#define SPIRV_ASSERT(consumer, ...) (void)(consumer)
#endif
// Logs a debug message to the consumer. Accepts the following formats:
@ -49,26 +51,11 @@
#if !defined(NDEBUG) && defined(SPIRV_LOG_DEBUG)
#define SPIRV_DEBUG(consumer, ...) SPIRV_DEBUG_IMPL(consumer, __VA_ARGS__)
#else
#define SPIRV_DEBUG(consumer, ...)
// Adding a use to avoid errors in the release build related to unused
// consumers.
#define SPIRV_DEBUG(consumer, ...) (void)(consumer)
#endif
// Logs an error message to the consumer saying the given feature is
// unimplemented.
#define SPIRV_UNIMPLEMENTED(consumer, feature) \
do { \
spvtools::Log(consumer, SPV_MSG_INTERNAL_ERROR, __FILE__, \
{static_cast<size_t>(__LINE__), 0, 0}, \
"unimplemented: " feature); \
} while (0)
// Logs an error message to the consumer saying the code location
// should be unreachable.
#define SPIRV_UNREACHABLE(consumer) \
do { \
spvtools::Log(consumer, SPV_MSG_INTERNAL_ERROR, __FILE__, \
{static_cast<size_t>(__LINE__), 0, 0}, "unreachable"); \
} while (0)
// Helper macros for concatenating arguments.
#define SPIRV_CONCATENATE(a, b) SPIRV_CONCATENATE_(a, b)
#define SPIRV_CONCATENATE_(a, b) a##b

View File

@ -120,6 +120,9 @@ class Module {
// Appends a constant, global variable, or OpUndef instruction to this module.
inline void AddGlobalValue(std::unique_ptr<Instruction> v);
// Prepends a function declaration to this module.
inline void AddFunctionDeclaration(std::unique_ptr<Function> f);
// Appends a function to this module.
inline void AddFunction(std::unique_ptr<Function> f);
@ -380,6 +383,11 @@ inline void Module::AddGlobalValue(std::unique_ptr<Instruction> v) {
types_values_.push_back(std::move(v));
}
inline void Module::AddFunctionDeclaration(std::unique_ptr<Function> f) {
// function declarations must come before function definitions.
functions_.emplace(functions_.begin(), std::move(f));
}
inline void Module::AddFunction(std::unique_ptr<Function> f) {
functions_.emplace_back(std::move(f));
}

View File

@ -158,7 +158,8 @@ Optimizer& Optimizer::RegisterLegalizationPasses(bool preserve_interface) {
.RegisterPass(CreateDeadInsertElimPass())
.RegisterPass(CreateReduceLoadSizePass())
.RegisterPass(CreateAggressiveDCEPass(preserve_interface))
.RegisterPass(CreateInterpolateFixupPass());
.RegisterPass(CreateInterpolateFixupPass())
.RegisterPass(CreateInvocationInterlockPlacementPass());
}
Optimizer& Optimizer::RegisterLegalizationPasses() {
@ -434,14 +435,12 @@ bool Optimizer::RegisterPassFromFlag(const std::string& flag) {
pass_name == "inst-desc-idx-check" ||
pass_name == "inst-buff-oob-check") {
// preserve legacy names
RegisterPass(CreateInstBindlessCheckPass(7, 23));
RegisterPass(CreateInstBindlessCheckPass(23));
RegisterPass(CreateSimplificationPass());
RegisterPass(CreateDeadBranchElimPass());
RegisterPass(CreateBlockMergePass());
RegisterPass(CreateAggressiveDCEPass(true));
} else if (pass_name == "inst-buff-addr-check") {
RegisterPass(CreateInstBuffAddrCheckPass(7, 23));
RegisterPass(CreateAggressiveDCEPass(true));
RegisterPass(CreateInstBuffAddrCheckPass(23));
} else if (pass_name == "convert-relaxed-to-half") {
RegisterPass(CreateConvertRelaxedToHalfPass());
} else if (pass_name == "relax-float-ops") {
@ -980,10 +979,9 @@ Optimizer::PassToken CreateUpgradeMemoryModelPass() {
MakeUnique<opt::UpgradeMemoryModel>());
}
Optimizer::PassToken CreateInstBindlessCheckPass(uint32_t desc_set,
uint32_t shader_id) {
Optimizer::PassToken CreateInstBindlessCheckPass(uint32_t shader_id) {
return MakeUnique<Optimizer::PassToken::Impl>(
MakeUnique<opt::InstBindlessCheckPass>(desc_set, shader_id));
MakeUnique<opt::InstBindlessCheckPass>(shader_id));
}
Optimizer::PassToken CreateInstDebugPrintfPass(uint32_t desc_set,
@ -992,10 +990,9 @@ Optimizer::PassToken CreateInstDebugPrintfPass(uint32_t desc_set,
MakeUnique<opt::InstDebugPrintfPass>(desc_set, shader_id));
}
Optimizer::PassToken CreateInstBuffAddrCheckPass(uint32_t desc_set,
uint32_t shader_id) {
Optimizer::PassToken CreateInstBuffAddrCheckPass(uint32_t shader_id) {
return MakeUnique<Optimizer::PassToken::Impl>(
MakeUnique<opt::InstBuffAddrCheckPass>(desc_set, shader_id));
MakeUnique<opt::InstBuffAddrCheckPass>(shader_id));
}
Optimizer::PassToken CreateConvertRelaxedToHalfPass() {
@ -1115,6 +1112,11 @@ Optimizer::PassToken CreateSwitchDescriptorSetPass(uint32_t from, uint32_t to) {
return MakeUnique<Optimizer::PassToken::Impl>(
MakeUnique<opt::SwitchDescriptorSetPass>(from, to));
}
Optimizer::PassToken CreateInvocationInterlockPlacementPass() {
return MakeUnique<Optimizer::PassToken::Impl>(
MakeUnique<opt::InvocationInterlockPlacementPass>());
}
} // namespace spvtools
extern "C" {

View File

@ -53,6 +53,7 @@
#include "source/opt/inst_debug_printf_pass.h"
#include "source/opt/interface_var_sroa.h"
#include "source/opt/interp_fixup_pass.h"
#include "source/opt/invocation_interlock_placement_pass.h"
#include "source/opt/licm_pass.h"
#include "source/opt/local_access_chain_convert_pass.h"
#include "source/opt/local_redundancy_elimination.h"

View File

@ -36,10 +36,15 @@ namespace spvtools {
namespace opt {
namespace {
constexpr uint32_t kOpTypeFloatSizeIndex = 0;
constexpr uint32_t kOpTypePointerStorageClassIndex = 0;
constexpr uint32_t kTypeArrayTypeIndex = 0;
constexpr uint32_t kOpTypeScalarBitWidthIndex = 0;
constexpr uint32_t kTypePointerTypeIdInIdx = 1;
constexpr uint32_t kTypePointerTypeIdInIndex = 1;
constexpr uint32_t kOpTypeIntSizeIndex = 0;
constexpr uint32_t kOpTypeImageArrayedIndex = 3;
constexpr uint32_t kOpTypeImageMSIndex = kOpTypeImageArrayedIndex + 1;
constexpr uint32_t kOpTypeImageSampledIndex = kOpTypeImageMSIndex + 1;
// DFS visit of the type defined by `instruction`.
// If `condition` is true, children of the current node are visited.
@ -60,7 +65,7 @@ static void DFSWhile(const Instruction* instruction, UnaryPredicate condition) {
if (item->opcode() == spv::Op::OpTypePointer) {
instructions_to_visit.push(
item->GetSingleWordInOperand(kTypePointerTypeIdInIdx));
item->GetSingleWordInOperand(kTypePointerTypeIdInIndex));
continue;
}
@ -128,6 +133,16 @@ static bool Has16BitCapability(const FeatureManager* feature_manager) {
// Handler names follow the following convention:
// Handler_<Opcode>_<Capability>()
static std::optional<spv::Capability> Handler_OpTypeFloat_Float64(
const Instruction* instruction) {
assert(instruction->opcode() == spv::Op::OpTypeFloat &&
"This handler only support OpTypeFloat opcodes.");
const uint32_t size =
instruction->GetSingleWordInOperand(kOpTypeFloatSizeIndex);
return size == 64 ? std::optional(spv::Capability::Float64) : std::nullopt;
}
static std::optional<spv::Capability>
Handler_OpTypePointer_StorageInputOutput16(const Instruction* instruction) {
assert(instruction->opcode() == spv::Op::OpTypePointer &&
@ -255,13 +270,43 @@ static std::optional<spv::Capability> Handler_OpTypePointer_StorageUniform16(
: std::nullopt;
}
static std::optional<spv::Capability> Handler_OpTypeInt_Int64(
const Instruction* instruction) {
assert(instruction->opcode() == spv::Op::OpTypeInt &&
"This handler only support OpTypeInt opcodes.");
const uint32_t size =
instruction->GetSingleWordInOperand(kOpTypeIntSizeIndex);
return size == 64 ? std::optional(spv::Capability::Int64) : std::nullopt;
}
static std::optional<spv::Capability> Handler_OpTypeImage_ImageMSArray(
const Instruction* instruction) {
assert(instruction->opcode() == spv::Op::OpTypeImage &&
"This handler only support OpTypeImage opcodes.");
const uint32_t arrayed =
instruction->GetSingleWordInOperand(kOpTypeImageArrayedIndex);
const uint32_t ms = instruction->GetSingleWordInOperand(kOpTypeImageMSIndex);
const uint32_t sampled =
instruction->GetSingleWordInOperand(kOpTypeImageSampledIndex);
return arrayed == 1 && sampled == 2 && ms == 1
? std::optional(spv::Capability::ImageMSArray)
: std::nullopt;
}
// Opcode of interest to determine capabilities requirements.
constexpr std::array<std::pair<spv::Op, OpcodeHandler>, 4> kOpcodeHandlers{{
constexpr std::array<std::pair<spv::Op, OpcodeHandler>, 8> kOpcodeHandlers{{
// clang-format off
{spv::Op::OpTypeFloat, Handler_OpTypeFloat_Float64 },
{spv::Op::OpTypeImage, Handler_OpTypeImage_ImageMSArray},
{spv::Op::OpTypeInt, Handler_OpTypeInt_Int64 },
{spv::Op::OpTypePointer, Handler_OpTypePointer_StorageInputOutput16},
{spv::Op::OpTypePointer, Handler_OpTypePointer_StoragePushConstant16},
{spv::Op::OpTypePointer, Handler_OpTypePointer_StorageUniform16},
{spv::Op::OpTypePointer, Handler_OpTypePointer_StorageUniform16},
{spv::Op::OpTypePointer, Handler_OpTypePointer_StorageUniformBufferBlock16},
{spv::Op::OpTypePointer, Handler_OpTypePointer_StorageUniform16}
// clang-format on
}};

View File

@ -74,18 +74,26 @@ class TrimCapabilitiesPass : public Pass {
// contains unsupported instruction, the pass could yield bad results.
static constexpr std::array kSupportedCapabilities{
// clang-format off
spv::Capability::Float64,
spv::Capability::FragmentShaderPixelInterlockEXT,
spv::Capability::FragmentShaderSampleInterlockEXT,
spv::Capability::FragmentShaderShadingRateInterlockEXT,
spv::Capability::Groups,
spv::Capability::Int64,
spv::Capability::Linkage,
spv::Capability::MinLod,
spv::Capability::RayQueryKHR,
spv::Capability::RayTracingKHR,
spv::Capability::RayTraversalPrimitiveCullingKHR,
spv::Capability::Shader,
spv::Capability::ShaderClockKHR,
spv::Capability::StorageInputOutput16,
spv::Capability::StoragePushConstant16,
spv::Capability::StorageUniform16,
spv::Capability::StorageUniformBufferBlock16
spv::Capability::StorageUniformBufferBlock16,
spv::Capability::ImageMSArray,
spv::Capability::ComputeDerivativeGroupQuadsNV,
spv::Capability::ComputeDerivativeGroupLinearNV
// clang-format on
};

View File

@ -901,7 +901,7 @@ Type* TypeManager::RecordIfTypeDefinition(const Instruction& inst) {
type = new HitObjectNV();
break;
default:
SPIRV_UNIMPLEMENTED(consumer_, "unhandled type");
assert(false && "Type not handled by the type manager.");
break;
}
@ -943,12 +943,10 @@ void TypeManager::AttachDecoration(const Instruction& inst, Type* type) {
}
if (Struct* st = type->AsStruct()) {
st->AddMemberDecoration(index, std::move(data));
} else {
SPIRV_UNIMPLEMENTED(consumer_, "OpMemberDecorate non-struct type");
}
} break;
default:
SPIRV_UNREACHABLE(consumer_);
assert(false && "Unexpected opcode for a decoration instruction.");
break;
}
}

View File

@ -922,9 +922,9 @@ spv_result_t CheckDecorationsOfEntryPoints(ValidationState_t& vstate) {
}
}
if (vstate.HasCapability(
spv::Capability::WorkgroupMemoryExplicitLayoutKHR) &&
num_workgroup_variables > 0 &&
const bool workgroup_blocks_allowed = vstate.HasCapability(
spv::Capability::WorkgroupMemoryExplicitLayoutKHR);
if (workgroup_blocks_allowed && num_workgroup_variables > 0 &&
num_workgroup_variables_with_block > 0) {
if (num_workgroup_variables != num_workgroup_variables_with_block) {
return vstate.diag(SPV_ERROR_INVALID_BINARY, vstate.FindDef(entry_point))
@ -945,6 +945,13 @@ spv_result_t CheckDecorationsOfEntryPoints(ValidationState_t& vstate) {
"Entry point id "
<< entry_point << " does not meet this requirement.";
}
} else if (!workgroup_blocks_allowed &&
num_workgroup_variables_with_block > 0) {
return vstate.diag(SPV_ERROR_INVALID_BINARY,
vstate.FindDef(entry_point))
<< "Workgroup Storage Class variables can't be decorated with "
"Block unless declaring the WorkgroupMemoryExplicitLayoutKHR "
"capability.";
}
}
}

View File

@ -693,16 +693,11 @@ spv_result_t ValidateImageReadWrite(ValidationState_t& _,
<< "storage image";
}
if (info.multisampled == 1 &&
if (info.multisampled == 1 && info.arrayed == 1 && info.sampled == 2 &&
!_.HasCapability(spv::Capability::ImageMSArray)) {
#if 0
// TODO(atgoo@github.com) The description of this rule in the spec
// is unclear and Glslang doesn't declare ImageMSArray. Need to clarify
// and reenable.
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Capability ImageMSArray is required to access storage "
<< "image";
#endif
<< "Capability ImageMSArray is required to access storage "
<< "image";
}
} else if (info.sampled != 0) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)