Updated spirv-tools.

This commit is contained in:
Бранимир Караџић 2021-03-21 22:36:21 -07:00
parent da1cd83850
commit 99383a0eb5
15 changed files with 279 additions and 173 deletions

View File

@ -1 +1 @@
"v2021.0-dev", "SPIRV-Tools v2021.0-dev 57472fd18a1e4e6f7665df2fa2cf8ab3c6e1ab3a"
"v2021.0-dev", "SPIRV-Tools v2021.0-dev 43f70fa964b646111f63b5d44d32e756203881dc"

View File

@ -30,18 +30,14 @@ namespace opt {
// std::unique_ptr managed elements in the vector, behaving like we are using
// std::vector<|ValueType|>.
template <typename ValueType, bool IsConst = false>
class UptrVectorIterator
: public std::iterator<std::random_access_iterator_tag,
typename std::conditional<IsConst, const ValueType,
ValueType>::type> {
class UptrVectorIterator {
public:
using super = std::iterator<
std::random_access_iterator_tag,
typename std::conditional<IsConst, const ValueType, ValueType>::type>;
using iterator_category = std::random_access_iterator_tag;
using value_type = ValueType;
using pointer = typename super::pointer;
using reference = typename super::reference;
using difference_type = typename super::difference_type;
using pointer = value_type*;
using reference = value_type&;
using difference_type = std::ptrdiff_t;
// Type aliases. We need to apply constness properly if |IsConst| is true.
using Uptr = std::unique_ptr<ValueType>;
@ -174,11 +170,7 @@ inline IteratorRange<IteratorType> make_const_range(
//
// Currently this iterator is always an input iterator.
template <typename SubIterator, typename Predicate>
class FilterIterator
: public std::iterator<
std::input_iterator_tag, typename SubIterator::value_type,
typename SubIterator::difference_type, typename SubIterator::pointer,
typename SubIterator::reference> {
class FilterIterator {
public:
// Iterator interface.
using iterator_category = typename SubIterator::iterator_category;

View File

@ -861,6 +861,9 @@ bool ScalarReplacementPass::CheckUsesRelaxed(const Instruction* inst) const {
case SpvOpStore:
if (!CheckStore(user, index)) ok = false;
break;
case SpvOpImageTexelPointer:
if (!CheckImageTexelPointer(index)) ok = false;
break;
default:
ok = false;
break;
@ -870,6 +873,10 @@ bool ScalarReplacementPass::CheckUsesRelaxed(const Instruction* inst) const {
return ok;
}
bool ScalarReplacementPass::CheckImageTexelPointer(uint32_t index) const {
return index == 2u;
}
bool ScalarReplacementPass::CheckLoad(const Instruction* inst,
uint32_t index) const {
if (index != 2u) return false;

View File

@ -142,6 +142,10 @@ class ScalarReplacementPass : public Pass {
// of |inst| and the store is not to volatile memory.
bool CheckStore(const Instruction* inst, uint32_t index) const;
// Returns true if |index| is the pointer operand of an OpImageTexelPointer
// instruction.
bool CheckImageTexelPointer(uint32_t index) const;
// Creates a variable of type |typeId| from the |index|'th element of
// |varInst|. The new variable is added to |replacements|. If the variable
// could not be created, then |nullptr| is appended to |replacements|.

View File

@ -139,9 +139,14 @@ class BasicBlock {
/// @brief A BasicBlock dominator iterator class
///
/// This iterator will iterate over the (post)dominators of the block
class DominatorIterator
: public std::iterator<std::forward_iterator_tag, BasicBlock*> {
class DominatorIterator {
public:
using iterator_category = std::forward_iterator_tag;
using value_type = BasicBlock*;
using pointer = value_type*;
using reference = value_type&;
using difference_type = std::ptrdiff_t;
/// @brief Constructs the end of dominator iterator
///
/// This will create an iterator which will represent the element

View File

@ -143,6 +143,7 @@ spv_result_t ValidateEntryPoints(ValidationState_t& _) {
if (_.recursive_entry_points().find(entry_point) !=
_.recursive_entry_points().end()) {
return _.diag(SPV_ERROR_INVALID_BINARY, _.FindDef(entry_point))
<< _.VkErrorID(4634)
<< "Entry points may not have a call graph with cycles.";
}
}

View File

@ -47,6 +47,70 @@ bool IsStorageClassAllowedByUniversalRules(uint32_t storage_class) {
}
}
bool HasReturnType(uint32_t opcode) {
switch (opcode) {
case SpvOpAtomicStore:
case SpvOpAtomicFlagClear:
return false;
break;
default:
return true;
}
}
bool HasOnlyFloatReturnType(uint32_t opcode) {
switch (opcode) {
case SpvOpAtomicFAddEXT:
return true;
break;
default:
return false;
}
}
bool HasOnlyIntReturnType(uint32_t opcode) {
switch (opcode) {
case SpvOpAtomicCompareExchange:
case SpvOpAtomicCompareExchangeWeak:
case SpvOpAtomicIIncrement:
case SpvOpAtomicIDecrement:
case SpvOpAtomicIAdd:
case SpvOpAtomicISub:
case SpvOpAtomicSMin:
case SpvOpAtomicUMin:
case SpvOpAtomicSMax:
case SpvOpAtomicUMax:
case SpvOpAtomicAnd:
case SpvOpAtomicOr:
case SpvOpAtomicXor:
return true;
break;
default:
return false;
}
}
bool HasIntOrFloatReturnType(uint32_t opcode) {
switch (opcode) {
case SpvOpAtomicLoad:
case SpvOpAtomicExchange:
return true;
break;
default:
return false;
}
}
bool HasOnlyBoolReturnType(uint32_t opcode) {
switch (opcode) {
case SpvOpAtomicFlagTestAndSet:
return true;
break;
default:
return false;
}
}
} // namespace
namespace spvtools {
@ -55,12 +119,6 @@ namespace val {
// Validates correctness of atomic instructions.
spv_result_t AtomicsPass(ValidationState_t& _, const Instruction* inst) {
const SpvOp opcode = inst->opcode();
const uint32_t result_type = inst->type_id();
bool is_atomic_float_opcode = false;
if (opcode == SpvOpAtomicLoad || opcode == SpvOpAtomicStore ||
opcode == SpvOpAtomicFAddEXT || opcode == SpvOpAtomicExchange) {
is_atomic_float_opcode = true;
}
switch (opcode) {
case SpvOpAtomicLoad:
case SpvOpAtomicStore:
@ -81,121 +139,38 @@ spv_result_t AtomicsPass(ValidationState_t& _, const Instruction* inst) {
case SpvOpAtomicXor:
case SpvOpAtomicFlagTestAndSet:
case SpvOpAtomicFlagClear: {
if (_.HasCapability(SpvCapabilityKernel) &&
(opcode == SpvOpAtomicLoad || opcode == SpvOpAtomicExchange ||
opcode == SpvOpAtomicCompareExchange)) {
if (!_.IsFloatScalarType(result_type) &&
!_.IsIntScalarType(result_type)) {
const uint32_t result_type = inst->type_id();
// All current atomics only are scalar result
// Validate return type first so can just check if pointer type is same
// (if applicable)
if (HasReturnType(opcode)) {
if (HasOnlyFloatReturnType(opcode) &&
!_.IsFloatScalarType(result_type)) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": expected Result Type to be int or float scalar type";
}
} else if (opcode == SpvOpAtomicFlagTestAndSet) {
if (!_.IsBoolScalarType(result_type)) {
<< ": expected Result Type to be float scalar type";
} else if (HasOnlyIntReturnType(opcode) &&
!_.IsIntScalarType(result_type)) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": expected Result Type to be integer scalar type";
} else if (HasIntOrFloatReturnType(opcode) &&
!_.IsFloatScalarType(result_type) &&
!_.IsIntScalarType(result_type)) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": expected Result Type to be integer or float scalar type";
} else if (HasOnlyBoolReturnType(opcode) &&
!_.IsBoolScalarType(result_type)) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": expected Result Type to be bool scalar type";
}
} else if (opcode == SpvOpAtomicFlagClear || opcode == SpvOpAtomicStore) {
assert(result_type == 0);
} else {
if (_.IsFloatScalarType(result_type)) {
if (is_atomic_float_opcode) {
if (opcode == SpvOpAtomicFAddEXT) {
if ((_.GetBitWidth(result_type) == 32) &&
(!_.HasCapability(SpvCapabilityAtomicFloat32AddEXT))) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": float add atomics require the AtomicFloat32AddEXT "
"capability";
}
if ((_.GetBitWidth(result_type) == 64) &&
(!_.HasCapability(SpvCapabilityAtomicFloat64AddEXT))) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": float add atomics require the AtomicFloat64AddEXT "
"capability";
}
}
} else {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": expected Result Type to be int scalar type";
}
} else if (_.IsIntScalarType(result_type) &&
opcode == SpvOpAtomicFAddEXT) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": expected Result Type to be float scalar type";
} else if (!_.IsFloatScalarType(result_type) &&
!_.IsIntScalarType(result_type)) {
switch (opcode) {
case SpvOpAtomicFAddEXT:
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": expected Result Type to be float scalar type";
case SpvOpAtomicIIncrement:
case SpvOpAtomicIDecrement:
case SpvOpAtomicIAdd:
case SpvOpAtomicISub:
case SpvOpAtomicSMin:
case SpvOpAtomicSMax:
case SpvOpAtomicUMin:
case SpvOpAtomicUMax:
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": expected Result Type to be integer scalar type";
default:
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": expected Result Type to be int or float scalar type";
}
}
if (spvIsVulkanEnv(_.context()->target_env) &&
(_.GetBitWidth(result_type) != 32 &&
(_.GetBitWidth(result_type) != 64 ||
!_.HasCapability(SpvCapabilityInt64ImageEXT)))) {
switch (opcode) {
case SpvOpAtomicSMin:
case SpvOpAtomicUMin:
case SpvOpAtomicSMax:
case SpvOpAtomicUMax:
case SpvOpAtomicAnd:
case SpvOpAtomicOr:
case SpvOpAtomicXor:
case SpvOpAtomicIAdd:
case SpvOpAtomicISub:
case SpvOpAtomicFAddEXT:
case SpvOpAtomicLoad:
case SpvOpAtomicStore:
case SpvOpAtomicExchange:
case SpvOpAtomicIIncrement:
case SpvOpAtomicIDecrement:
case SpvOpAtomicCompareExchangeWeak:
case SpvOpAtomicCompareExchange: {
if (_.GetBitWidth(result_type) == 64 &&
_.IsIntScalarType(result_type) &&
!_.HasCapability(SpvCapabilityInt64Atomics))
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": 64-bit atomics require the Int64Atomics "
"capability";
} break;
default:
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": according to the Vulkan spec atomic Result Type "
"needs "
"to be a 32-bit int scalar type";
}
}
}
uint32_t operand_index =
opcode == SpvOpAtomicFlagClear || opcode == SpvOpAtomicStore ? 0 : 2;
uint32_t operand_index = HasReturnType(opcode) ? 2 : 0;
const uint32_t pointer_type = _.GetOperandTypeId(inst, operand_index++);
uint32_t data_type = 0;
uint32_t storage_class = 0;
if (!_.GetPointerTypeInfo(pointer_type, &data_type, &storage_class)) {
@ -204,6 +179,14 @@ spv_result_t AtomicsPass(ValidationState_t& _, const Instruction* inst) {
<< ": expected Pointer to be of type OpTypePointer";
}
// Can't use result_type because OpAtomicStore doesn't have a result
if (_.GetBitWidth(data_type) == 64 && _.IsIntScalarType(data_type) &&
!_.HasCapability(SpvCapabilityInt64Atomics)) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": 64-bit atomics require the Int64Atomics capability";
}
// Validate storage class against universal rules
if (!IsStorageClassAllowedByUniversalRules(storage_class)) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
@ -213,6 +196,7 @@ spv_result_t AtomicsPass(ValidationState_t& _, const Instruction* inst) {
// Then Shader rules
if (_.HasCapability(SpvCapabilityShader)) {
// Vulkan environment rule
if (spvIsVulkanEnv(_.context()->target_env)) {
if ((storage_class != SpvStorageClassUniform) &&
(storage_class != SpvStorageClassStorageBuffer) &&
@ -225,22 +209,30 @@ spv_result_t AtomicsPass(ValidationState_t& _, const Instruction* inst) {
"be: Uniform, Workgroup, Image, StorageBuffer, or "
"PhysicalStorageBuffer.";
}
// Can't use result_type because OpAtomicStore doesn't have a result
if (opcode == SpvOpAtomicStore && _.GetBitWidth(data_type) == 64 &&
_.IsIntScalarType(data_type) &&
!_.HasCapability(SpvCapabilityInt64Atomics)) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": 64-bit atomics require the Int64Atomics "
"capability";
}
} else if (storage_class == SpvStorageClassFunction) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": Function storage class forbidden when the Shader "
"capability is declared.";
}
if (opcode == SpvOpAtomicFAddEXT) {
// result type being float checked already
if ((_.GetBitWidth(result_type) == 32) &&
(!_.HasCapability(SpvCapabilityAtomicFloat32AddEXT))) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": float add atomics require the AtomicFloat32AddEXT "
"capability";
}
if ((_.GetBitWidth(result_type) == 64) &&
(!_.HasCapability(SpvCapabilityAtomicFloat64AddEXT))) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": float add atomics require the AtomicFloat64AddEXT "
"capability";
}
}
}
// And finally OpenCL environment rules
@ -264,27 +256,27 @@ spv_result_t AtomicsPass(ValidationState_t& _, const Instruction* inst) {
}
}
// If result and pointer type are different, need to do special check here
if (opcode == SpvOpAtomicFlagTestAndSet ||
opcode == SpvOpAtomicFlagClear) {
if (!_.IsIntScalarType(data_type) || _.GetBitWidth(data_type) != 32) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": expected Pointer to point to a value of 32-bit int type";
<< ": expected Pointer to point to a value of 32-bit integer "
"type";
}
} else if (opcode == SpvOpAtomicStore) {
if (!_.IsFloatScalarType(data_type) && !_.IsIntScalarType(data_type)) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": expected Pointer to be a pointer to int or float "
<< ": expected Pointer to be a pointer to integer or float "
<< "scalar type";
}
} else {
if (data_type != result_type) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": expected Pointer to point to a value of type Result "
"Type";
}
} else if (data_type != result_type) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< ": expected Pointer to point to a value of type Result "
"Type";
}
auto memory_scope = inst->GetOperandAs<const uint32_t>(operand_index++);
@ -293,14 +285,15 @@ spv_result_t AtomicsPass(ValidationState_t& _, const Instruction* inst) {
}
const auto equal_semantics_index = operand_index++;
if (auto error = ValidateMemorySemantics(_, inst, equal_semantics_index))
if (auto error = ValidateMemorySemantics(_, inst, equal_semantics_index,
memory_scope))
return error;
if (opcode == SpvOpAtomicCompareExchange ||
opcode == SpvOpAtomicCompareExchangeWeak) {
const auto unequal_semantics_index = operand_index++;
if (auto error =
ValidateMemorySemantics(_, inst, unequal_semantics_index))
if (auto error = ValidateMemorySemantics(
_, inst, unequal_semantics_index, memory_scope))
return error;
// Volatile bits must match for equal and unequal semantics. Previous

View File

@ -69,7 +69,7 @@ spv_result_t BarriersPass(ValidationState_t& _, const Instruction* inst) {
return error;
}
if (auto error = ValidateMemorySemantics(_, inst, 2)) {
if (auto error = ValidateMemorySemantics(_, inst, 2, memory_scope)) {
return error;
}
break;
@ -82,7 +82,7 @@ spv_result_t BarriersPass(ValidationState_t& _, const Instruction* inst) {
return error;
}
if (auto error = ValidateMemorySemantics(_, inst, 1)) {
if (auto error = ValidateMemorySemantics(_, inst, 1, memory_scope)) {
return error;
}
break;
@ -119,7 +119,7 @@ spv_result_t BarriersPass(ValidationState_t& _, const Instruction* inst) {
return error;
}
if (auto error = ValidateMemorySemantics(_, inst, 2)) {
if (auto error = ValidateMemorySemantics(_, inst, 2, memory_scope)) {
return error;
}
break;

View File

@ -466,6 +466,7 @@ spv_result_t ValidateVariable(ValidationState_t& _, const Instruction* inst) {
if (!_.IsValidStorageClass(storage_class)) {
return _.diag(SPV_ERROR_INVALID_BINARY, inst)
<< _.VkErrorID(4643)
<< "Invalid storage class for target environment";
}

View File

@ -25,7 +25,8 @@ namespace val {
spv_result_t ValidateMemorySemantics(ValidationState_t& _,
const Instruction* inst,
uint32_t operand_index) {
uint32_t operand_index,
uint32_t memory_scope) {
const SpvOp opcode = inst->opcode();
const auto id = inst->GetOperandAs<const uint32_t>(operand_index);
bool is_int32 = false, is_const_int32 = false;
@ -178,6 +179,18 @@ spv_result_t ValidateMemorySemantics(ValidationState_t& _,
"of the following bits set: Acquire, Release, "
"AcquireRelease "
"or SequentiallyConsistent";
} else if (opcode != SpvOpMemoryBarrier && num_memory_order_set_bits) {
// should leave only atomics and control barriers for Vulkan env
bool memory_is_int32 = false, memory_is_const_int32 = false;
uint32_t memory_value = 0;
std::tie(memory_is_int32, memory_is_const_int32, memory_value) =
_.EvalInt32IfConst(memory_scope);
if (memory_is_int32 && memory_value == SpvScopeInvocation) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< _.VkErrorID(4641) << spvOpcodeString(opcode)
<< ": Vulkan specification requires Memory Semantics to be None "
"if used with Invocation Memory Scope";
}
}
if (opcode == SpvOpMemoryBarrier && !includes_storage_class) {

View File

@ -22,7 +22,8 @@ namespace val {
spv_result_t ValidateMemorySemantics(ValidationState_t& _,
const Instruction* inst,
uint32_t operand_index);
uint32_t operand_index,
uint32_t memory_scope);
} // namespace val
} // namespace spvtools

View File

@ -72,6 +72,37 @@ spv_result_t ValidateShaderClock(ValidationState_t& _,
return SPV_SUCCESS;
}
spv_result_t ValidateAssumeTrue(ValidationState_t& _, const Instruction* inst) {
const auto operand_type_id = _.GetOperandTypeId(inst, 0);
if (!operand_type_id || !_.IsBoolScalarType(operand_type_id)) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Value operand of OpAssumeTrueKHR must be a boolean scalar";
}
return SPV_SUCCESS;
}
spv_result_t ValidateExpect(ValidationState_t& _, const Instruction* inst) {
const auto result_type = inst->type_id();
if (!_.IsBoolScalarOrVectorType(result_type) &&
!_.IsIntScalarOrVectorType(result_type)) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Result of OpExpectKHR must be a scalar or vector of integer "
"type or boolean type";
}
if (_.GetOperandTypeId(inst, 2) != result_type) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Type of Value operand of OpExpectKHR does not match the result "
"type ";
}
if (_.GetOperandTypeId(inst, 3) != result_type) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "Type of ExpectedValue operand of OpExpectKHR does not match the "
"result type ";
}
return SPV_SUCCESS;
}
} // namespace
spv_result_t MiscPass(ValidationState_t& _, const Instruction* inst) {
@ -152,6 +183,16 @@ spv_result_t MiscPass(ValidationState_t& _, const Instruction* inst) {
return error;
}
break;
case SpvOpAssumeTrueKHR:
if (auto error = ValidateAssumeTrue(_, inst)) {
return error;
}
break;
case SpvOpExpectKHR:
if (auto error = ValidateExpect(_, inst)) {
return error;
}
break;
default:
break;
}

View File

@ -105,21 +105,30 @@ spv_result_t ValidateExecutionScope(ValidationState_t& _,
}
}
// If OpControlBarrier is used in fragment, vertex, tessellation evaluation,
// or geometry stages, the execution Scope must be Subgroup.
// OpControlBarrier must only use Subgroup execution scope for a subset of
// execution models.
if (opcode == SpvOpControlBarrier && value != SpvScopeSubgroup) {
std::string errorVUID = _.VkErrorID(4682);
_.function(inst->function()->id())
->RegisterExecutionModelLimitation([](SpvExecutionModel model,
std::string* message) {
->RegisterExecutionModelLimitation([errorVUID](
SpvExecutionModel model,
std::string* message) {
if (model == SpvExecutionModelFragment ||
model == SpvExecutionModelVertex ||
model == SpvExecutionModelGeometry ||
model == SpvExecutionModelTessellationEvaluation) {
model == SpvExecutionModelTessellationEvaluation ||
model == SpvExecutionModelRayGenerationKHR ||
model == SpvExecutionModelIntersectionKHR ||
model == SpvExecutionModelAnyHitKHR ||
model == SpvExecutionModelClosestHitKHR ||
model == SpvExecutionModelMissKHR) {
if (message) {
*message =
"in Vulkan evironment, OpControlBarrier execution scope "
"must be Subgroup for Fragment, Vertex, Geometry and "
"TessellationEvaluation execution models";
errorVUID +
"in Vulkan environment, OpControlBarrier execution scope "
"must be Subgroup for Fragment, Vertex, Geometry, "
"TessellationEvaluation, RayGeneration, Intersection, "
"AnyHit, ClosestHit, and Miss execution models";
}
return false;
}
@ -127,11 +136,34 @@ spv_result_t ValidateExecutionScope(ValidationState_t& _,
});
}
// Only subset of execution models support Workgroup.
if (value == SpvScopeWorkgroup) {
std::string errorVUID = _.VkErrorID(4637);
_.function(inst->function()->id())
->RegisterExecutionModelLimitation(
[errorVUID](SpvExecutionModel model, std::string* message) {
if (model != SpvExecutionModelTaskNV &&
model != SpvExecutionModelMeshNV &&
model != SpvExecutionModelTessellationControl &&
model != SpvExecutionModelGLCompute) {
if (message) {
*message =
errorVUID +
"in Vulkan environment, Workgroup execution scope is "
"only for TaskNV, MeshNV, TessellationControl, and "
"GLCompute execution models";
}
return false;
}
return true;
});
}
// Vulkan generic rules
// Scope for execution must be limited to Workgroup or Subgroup
if (value != SpvScopeWorkgroup && value != SpvScopeSubgroup) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< spvOpcodeString(opcode)
<< _.VkErrorID(4636) << spvOpcodeString(opcode)
<< ": in Vulkan environment Execution Scope is limited to "
<< "Workgroup and Subgroup";
}

View File

@ -427,7 +427,8 @@ spv_result_t ValidateTypeStruct(ValidationState_t& _, const Instruction* inst) {
if (spvIsVulkanEnv(_.context()->target_env) &&
!_.options()->before_hlsl_legalization && ContainsOpaqueType(_, inst)) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< "In " << spvLogStringForEnv(_.context()->target_env)
<< _.VkErrorID(4667) << "In "
<< spvLogStringForEnv(_.context()->target_env)
<< ", OpTypeStruct must not contain an opaque type.";
}
@ -462,6 +463,7 @@ spv_result_t ValidateTypePointer(ValidationState_t& _,
if (!_.IsValidStorageClass(storage_class)) {
return _.diag(SPV_ERROR_INVALID_BINARY, inst)
<< _.VkErrorID(4643)
<< "Invalid storage class for target environment";
}

View File

@ -1255,12 +1255,12 @@ bool ValidationState_t::IsValidStorageClass(
case SpvStorageClassFunction:
case SpvStorageClassPushConstant:
case SpvStorageClassPhysicalStorageBuffer:
case SpvStorageClassRayPayloadNV:
case SpvStorageClassIncomingRayPayloadNV:
case SpvStorageClassHitAttributeNV:
case SpvStorageClassCallableDataNV:
case SpvStorageClassIncomingCallableDataNV:
case SpvStorageClassShaderRecordBufferNV:
case SpvStorageClassRayPayloadKHR:
case SpvStorageClassIncomingRayPayloadKHR:
case SpvStorageClassHitAttributeKHR:
case SpvStorageClassCallableDataKHR:
case SpvStorageClassIncomingCallableDataKHR:
case SpvStorageClassShaderRecordBufferKHR:
return true;
default:
return false;
@ -1676,16 +1676,26 @@ std::string ValidationState_t::VkErrorID(uint32_t id,
return VUID_WRAP(VUID-ShadingRateKHR-ShadingRateKHR-04492);
case 4633:
return VUID_WRAP(VUID-StandaloneSpirv-None-04633);
case 4634:
return VUID_WRAP(VUID-StandaloneSpirv-None-04634);
case 4635:
return VUID_WRAP(VUID-StandaloneSpirv-None-04635);
case 4636:
return VUID_WRAP(VUID-StandaloneSpirv-None-04636);
case 4637:
return VUID_WRAP(VUID-StandaloneSpirv-None-04637);
case 4638:
return VUID_WRAP(VUID-StandaloneSpirv-None-04638);
case 4639:
return VUID_WRAP(VUID-StandaloneSpirv-None-04639);
case 4640:
return VUID_WRAP(VUID-StandaloneSpirv-None-04640);
case 4641:
return VUID_WRAP(VUID-StandaloneSpirv-None-04641);
case 4642:
return VUID_WRAP(VUID-StandaloneSpirv-None-04642);
case 4643:
return VUID_WRAP(VUID-StandaloneSpirv-None-04643);
case 4651:
return VUID_WRAP(VUID-StandaloneSpirv-OpVariable-04651);
case 4652:
@ -1710,12 +1720,16 @@ std::string ValidationState_t::VkErrorID(uint32_t id,
return VUID_WRAP(VUID-StandaloneSpirv-Offset-04663);
case 4664:
return VUID_WRAP(VUID-StandaloneSpirv-OpImageGather-04664);
case 4667:
return VUID_WRAP(VUID-StandaloneSpirv-None-04667);
case 4669:
return VUID_WRAP(VUID-StandaloneSpirv-GLSLShared-04669);
case 4675:
return VUID_WRAP(VUID-StandaloneSpirv-FPRoundingMode-04675);
case 4677:
return VUID_WRAP(VUID-StandaloneSpirv-Invariant-04677);
case 4682:
return VUID_WRAP(VUID-StandaloneSpirv-OpControlBarrier-04682);
case 4683:
return VUID_WRAP(VUID-StandaloneSpirv-LocalSize-04683);
case 4685: