bgfx/3rdparty/spirv-cross/spirv_common.hpp

1781 lines
42 KiB
C++
Raw Normal View History

2019-01-10 06:23:47 +03:00
/*
2020-02-05 08:38:12 +03:00
* Copyright 2015-2020 Arm Limited
2019-01-10 06:23:47 +03:00
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SPIRV_CROSS_COMMON_HPP
#define SPIRV_CROSS_COMMON_HPP
#include "spirv.hpp"
2019-04-15 07:53:50 +03:00
#include "spirv_cross_containers.hpp"
#include "spirv_cross_error_handling.hpp"
2019-09-07 18:07:48 +03:00
#include <functional>
2019-01-10 06:23:47 +03:00
2019-03-31 07:03:28 +03:00
// A bit crude, but allows projects which embed SPIRV-Cross statically to
// effectively hide all the symbols from other projects.
// There is a case where we have:
// - Project A links against SPIRV-Cross statically.
// - Project A links against Project B statically.
// - Project B links against SPIRV-Cross statically (might be a different version).
// This leads to a conflict with extremely bizarre results.
// By overriding the namespace in one of the project builds, we can work around this.
// If SPIRV-Cross is embedded in dynamic libraries,
// prefer using -fvisibility=hidden on GCC/Clang instead.
#ifdef SPIRV_CROSS_NAMESPACE_OVERRIDE
#define SPIRV_CROSS_NAMESPACE SPIRV_CROSS_NAMESPACE_OVERRIDE
#else
#define SPIRV_CROSS_NAMESPACE spirv_cross
#endif
namespace SPIRV_CROSS_NAMESPACE
2019-01-10 06:23:47 +03:00
{
namespace inner
{
template <typename T>
2019-04-15 07:53:50 +03:00
void join_helper(StringStream<> &stream, T &&t)
2019-01-10 06:23:47 +03:00
{
stream << std::forward<T>(t);
}
template <typename T, typename... Ts>
2019-04-15 07:53:50 +03:00
void join_helper(StringStream<> &stream, T &&t, Ts &&... ts)
2019-01-10 06:23:47 +03:00
{
stream << std::forward<T>(t);
join_helper(stream, std::forward<Ts>(ts)...);
}
} // namespace inner
class Bitset
{
public:
Bitset() = default;
explicit inline Bitset(uint64_t lower_)
: lower(lower_)
{
}
inline bool get(uint32_t bit) const
{
if (bit < 64)
return (lower & (1ull << bit)) != 0;
else
return higher.count(bit) != 0;
}
inline void set(uint32_t bit)
{
if (bit < 64)
lower |= 1ull << bit;
else
higher.insert(bit);
}
inline void clear(uint32_t bit)
{
if (bit < 64)
lower &= ~(1ull << bit);
else
higher.erase(bit);
}
inline uint64_t get_lower() const
{
return lower;
}
inline void reset()
{
lower = 0;
higher.clear();
}
inline void merge_and(const Bitset &other)
{
lower &= other.lower;
std::unordered_set<uint32_t> tmp_set;
for (auto &v : higher)
if (other.higher.count(v) != 0)
tmp_set.insert(v);
higher = std::move(tmp_set);
}
inline void merge_or(const Bitset &other)
{
lower |= other.lower;
for (auto &v : other.higher)
higher.insert(v);
}
inline bool operator==(const Bitset &other) const
{
if (lower != other.lower)
return false;
if (higher.size() != other.higher.size())
return false;
for (auto &v : higher)
if (other.higher.count(v) == 0)
return false;
return true;
}
inline bool operator!=(const Bitset &other) const
{
return !(*this == other);
}
template <typename Op>
void for_each_bit(const Op &op) const
{
// TODO: Add ctz-based iteration.
for (uint32_t i = 0; i < 64; i++)
{
if (lower & (1ull << i))
op(i);
}
if (higher.empty())
return;
// Need to enforce an order here for reproducible results,
// but hitting this path should happen extremely rarely, so having this slow path is fine.
2019-04-15 07:53:50 +03:00
SmallVector<uint32_t> bits;
2019-01-10 06:23:47 +03:00
bits.reserve(higher.size());
for (auto &v : higher)
bits.push_back(v);
std::sort(std::begin(bits), std::end(bits));
for (auto &v : bits)
op(v);
}
inline bool empty() const
{
return lower == 0 && higher.empty();
}
private:
// The most common bits to set are all lower than 64,
// so optimize for this case. Bits spilling outside 64 go into a slower data structure.
// In almost all cases, higher data structure will not be used.
uint64_t lower = 0;
std::unordered_set<uint32_t> higher;
};
// Helper template to avoid lots of nasty string temporary munging.
template <typename... Ts>
std::string join(Ts &&... ts)
{
2019-04-15 07:53:50 +03:00
StringStream<> stream;
2019-01-10 06:23:47 +03:00
inner::join_helper(stream, std::forward<Ts>(ts)...);
return stream.str();
}
2019-06-22 17:30:47 +03:00
inline std::string merge(const SmallVector<std::string> &list, const char *between = ", ")
2019-01-10 06:23:47 +03:00
{
2019-04-15 07:53:50 +03:00
StringStream<> stream;
2019-01-10 06:23:47 +03:00
for (auto &elem : list)
{
2019-04-15 07:53:50 +03:00
stream << elem;
2019-01-10 06:23:47 +03:00
if (&elem != &list.back())
2019-06-22 17:30:47 +03:00
stream << between;
2019-01-10 06:23:47 +03:00
}
2019-04-15 07:53:50 +03:00
return stream.str();
2019-01-10 06:23:47 +03:00
}
2019-03-03 06:16:00 +03:00
// Make sure we don't accidentally call this with float or doubles with SFINAE.
// Have to use the radix-aware overload.
template <typename T, typename std::enable_if<!std::is_floating_point<T>::value, int>::type = 0>
inline std::string convert_to_string(const T &t)
2019-01-10 06:23:47 +03:00
{
2019-03-03 06:16:00 +03:00
return std::to_string(t);
2019-01-10 06:23:47 +03:00
}
// Allow implementations to set a convenient standard precision
#ifndef SPIRV_CROSS_FLT_FMT
#define SPIRV_CROSS_FLT_FMT "%.32g"
#endif
2020-04-10 02:55:23 +03:00
// Disable sprintf and strcat warnings.
// We cannot rely on snprintf and family existing because, ..., MSVC.
#if defined(__clang__) || defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#elif defined(_MSC_VER)
2019-01-10 06:23:47 +03:00
#pragma warning(push)
#pragma warning(disable : 4996)
#endif
2019-03-03 06:16:00 +03:00
static inline void fixup_radix_point(char *str, char radix_point)
{
// Setting locales is a very risky business in multi-threaded program,
// so just fixup locales instead. We only need to care about the radix point.
if (radix_point != '.')
{
while (*str != '\0')
{
if (*str == radix_point)
*str = '.';
str++;
}
}
}
inline std::string convert_to_string(float t, char locale_radix_point)
2019-01-10 06:23:47 +03:00
{
// std::to_string for floating point values is broken.
// Fallback to something more sane.
char buf[64];
sprintf(buf, SPIRV_CROSS_FLT_FMT, t);
2019-03-03 06:16:00 +03:00
fixup_radix_point(buf, locale_radix_point);
2019-01-10 06:23:47 +03:00
// Ensure that the literal is float.
if (!strchr(buf, '.') && !strchr(buf, 'e'))
strcat(buf, ".0");
return buf;
}
2019-03-03 06:16:00 +03:00
inline std::string convert_to_string(double t, char locale_radix_point)
2019-01-10 06:23:47 +03:00
{
// std::to_string for floating point values is broken.
// Fallback to something more sane.
char buf[64];
sprintf(buf, SPIRV_CROSS_FLT_FMT, t);
2019-03-03 06:16:00 +03:00
fixup_radix_point(buf, locale_radix_point);
2019-01-10 06:23:47 +03:00
// Ensure that the literal is float.
if (!strchr(buf, '.') && !strchr(buf, 'e'))
strcat(buf, ".0");
return buf;
}
2020-04-10 02:55:23 +03:00
#if defined(__clang__) || defined(__GNUC__)
#pragma GCC diagnostic pop
#elif defined(_MSC_VER)
2019-01-10 06:23:47 +03:00
#pragma warning(pop)
#endif
struct Instruction
{
uint16_t op = 0;
uint16_t count = 0;
uint32_t offset = 0;
uint32_t length = 0;
};
enum Types
{
TypeNone,
TypeType,
TypeVariable,
TypeConstant,
TypeFunction,
TypeFunctionPrototype,
TypeBlock,
TypeExtension,
TypeExpression,
TypeConstantOp,
TypeCombinedImageSampler,
TypeAccessChain,
2019-01-17 05:50:01 +03:00
TypeUndef,
2019-06-07 08:04:16 +03:00
TypeString,
2019-01-17 05:50:01 +03:00
TypeCount
2019-01-10 06:23:47 +03:00
};
2019-09-07 18:07:48 +03:00
template <Types type>
class TypedID;
template <>
class TypedID<TypeNone>
{
public:
TypedID() = default;
TypedID(uint32_t id_)
: id(id_)
{
}
template <Types U>
TypedID(const TypedID<U> &other)
{
*this = other;
}
template <Types U>
TypedID &operator=(const TypedID<U> &other)
{
id = uint32_t(other);
return *this;
}
// Implicit conversion to u32 is desired here.
// As long as we block implicit conversion between TypedID<A> and TypedID<B> we're good.
operator uint32_t() const
{
return id;
}
template <Types U>
operator TypedID<U>() const
{
return TypedID<U>(*this);
}
bool operator==(const TypedID &other) const
{
return id == other.id;
}
bool operator!=(const TypedID &other) const
{
return id != other.id;
}
template <Types type>
bool operator==(const TypedID<type> &other) const
{
return id == uint32_t(other);
}
template <Types type>
bool operator!=(const TypedID<type> &other) const
{
return id != uint32_t(other);
}
private:
uint32_t id = 0;
};
template <Types type>
class TypedID
{
public:
TypedID() = default;
TypedID(uint32_t id_)
: id(id_)
{
}
explicit TypedID(const TypedID<TypeNone> &other)
: id(uint32_t(other))
{
}
operator uint32_t() const
{
return id;
}
bool operator==(const TypedID &other) const
{
return id == other.id;
}
bool operator!=(const TypedID &other) const
{
return id != other.id;
}
bool operator==(const TypedID<TypeNone> &other) const
{
return id == uint32_t(other);
}
bool operator!=(const TypedID<TypeNone> &other) const
{
return id != uint32_t(other);
}
private:
uint32_t id = 0;
};
using VariableID = TypedID<TypeVariable>;
using TypeID = TypedID<TypeType>;
using ConstantID = TypedID<TypeConstant>;
using FunctionID = TypedID<TypeFunction>;
using BlockID = TypedID<TypeBlock>;
using ID = TypedID<TypeNone>;
// Helper for Variant interface.
struct IVariant
{
virtual ~IVariant() = default;
virtual IVariant *clone(ObjectPoolBase *pool) = 0;
ID self = 0;
};
#define SPIRV_CROSS_DECLARE_CLONE(T) \
IVariant *clone(ObjectPoolBase *pool) override \
{ \
return static_cast<ObjectPool<T> *>(pool)->allocate(*this); \
}
2019-01-10 06:23:47 +03:00
struct SPIRUndef : IVariant
{
enum
{
type = TypeUndef
};
2019-03-09 21:20:48 +03:00
2019-09-07 18:07:48 +03:00
explicit SPIRUndef(TypeID basetype_)
2019-01-10 06:23:47 +03:00
: basetype(basetype_)
{
}
2019-09-07 18:07:48 +03:00
TypeID basetype;
2019-01-10 06:23:47 +03:00
SPIRV_CROSS_DECLARE_CLONE(SPIRUndef)
};
2019-06-07 08:04:16 +03:00
struct SPIRString : IVariant
{
enum
{
type = TypeString
};
explicit SPIRString(std::string str_)
: str(std::move(str_))
{
}
std::string str;
SPIRV_CROSS_DECLARE_CLONE(SPIRString)
};
2019-01-10 06:23:47 +03:00
// This type is only used by backends which need to access the combined image and sampler IDs separately after
// the OpSampledImage opcode.
struct SPIRCombinedImageSampler : IVariant
{
enum
{
type = TypeCombinedImageSampler
};
2019-09-07 18:07:48 +03:00
SPIRCombinedImageSampler(TypeID type_, VariableID image_, VariableID sampler_)
2019-01-10 06:23:47 +03:00
: combined_type(type_)
, image(image_)
, sampler(sampler_)
{
}
2019-09-07 18:07:48 +03:00
TypeID combined_type;
VariableID image;
VariableID sampler;
2019-01-10 06:23:47 +03:00
SPIRV_CROSS_DECLARE_CLONE(SPIRCombinedImageSampler)
};
struct SPIRConstantOp : IVariant
{
enum
{
type = TypeConstantOp
};
2019-09-07 18:07:48 +03:00
SPIRConstantOp(TypeID result_type, spv::Op op, const uint32_t *args, uint32_t length)
2019-01-10 06:23:47 +03:00
: opcode(op)
, basetype(result_type)
{
2019-09-07 18:07:48 +03:00
arguments.reserve(length);
for (uint32_t i = 0; i < length; i++)
arguments.push_back(args[i]);
2019-01-10 06:23:47 +03:00
}
spv::Op opcode;
2019-04-15 07:53:50 +03:00
SmallVector<uint32_t> arguments;
2019-09-07 18:07:48 +03:00
TypeID basetype;
2019-01-10 06:23:47 +03:00
SPIRV_CROSS_DECLARE_CLONE(SPIRConstantOp)
};
struct SPIRType : IVariant
{
enum
{
type = TypeType
};
enum BaseType
{
Unknown,
Void,
Boolean,
SByte,
UByte,
Short,
UShort,
Int,
UInt,
Int64,
UInt64,
AtomicCounter,
Half,
Float,
Double,
Struct,
Image,
SampledImage,
2019-02-23 07:54:12 +03:00
Sampler,
2019-03-31 07:03:28 +03:00
AccelerationStructureNV,
// Keep internal types at the end.
ControlPointArray,
Char
2019-01-10 06:23:47 +03:00
};
// Scalar/vector/matrix support.
BaseType basetype = Unknown;
uint32_t width = 0;
uint32_t vecsize = 1;
uint32_t columns = 1;
// Arrays, support array of arrays by having a vector of array sizes.
2019-04-15 07:53:50 +03:00
SmallVector<uint32_t> array;
2019-01-10 06:23:47 +03:00
// Array elements can be either specialization constants or specialization ops.
// This array determines how to interpret the array size.
// If an element is true, the element is a literal,
// otherwise, it's an expression, which must be resolved on demand.
// The actual size is not really known until runtime.
2019-04-15 07:53:50 +03:00
SmallVector<bool> array_size_literal;
2019-01-10 06:23:47 +03:00
// Pointers
// Keep track of how many pointer layers we have.
uint32_t pointer_depth = 0;
bool pointer = false;
spv::StorageClass storage = spv::StorageClassGeneric;
2019-09-07 18:07:48 +03:00
SmallVector<TypeID> member_types;
2019-01-10 06:23:47 +03:00
struct ImageType
{
2019-09-07 18:07:48 +03:00
TypeID type;
2019-01-10 06:23:47 +03:00
spv::Dim dim;
bool depth;
bool arrayed;
bool ms;
uint32_t sampled;
spv::ImageFormat format;
spv::AccessQualifier access;
} image;
// Structs can be declared multiple times if they are used as part of interface blocks.
// We want to detect this so that we only emit the struct definition once.
// Since we cannot rely on OpName to be equal, we need to figure out aliases.
2019-09-07 18:07:48 +03:00
TypeID type_alias = 0;
2019-01-10 06:23:47 +03:00
// Denotes the type which this type is based on.
// Allows the backend to traverse how a complex type is built up during access chains.
2019-09-07 18:07:48 +03:00
TypeID parent_type = 0;
2019-01-10 06:23:47 +03:00
// Used in backends to avoid emitting members with conflicting names.
std::unordered_set<std::string> member_name_cache;
SPIRV_CROSS_DECLARE_CLONE(SPIRType)
};
struct SPIRExtension : IVariant
{
enum
{
type = TypeExtension
};
enum Extension
{
Unsupported,
GLSL,
2019-07-12 07:08:58 +03:00
SPV_debug_info,
2019-01-10 06:23:47 +03:00
SPV_AMD_shader_ballot,
SPV_AMD_shader_explicit_vertex_parameter,
SPV_AMD_shader_trinary_minmax,
SPV_AMD_gcn_shader
};
2019-03-09 21:20:48 +03:00
explicit SPIRExtension(Extension ext_)
2019-01-10 06:23:47 +03:00
: ext(ext_)
{
}
Extension ext;
SPIRV_CROSS_DECLARE_CLONE(SPIRExtension)
};
// SPIREntryPoint is not a variant since its IDs are used to decorate OpFunction,
// so in order to avoid conflicts, we can't stick them in the ids array.
struct SPIREntryPoint
{
2019-09-07 18:07:48 +03:00
SPIREntryPoint(FunctionID self_, spv::ExecutionModel execution_model, const std::string &entry_name)
2019-01-10 06:23:47 +03:00
: self(self_)
, name(entry_name)
, orig_name(entry_name)
, model(execution_model)
{
}
SPIREntryPoint() = default;
2019-09-07 18:07:48 +03:00
FunctionID self = 0;
2019-01-10 06:23:47 +03:00
std::string name;
std::string orig_name;
2019-09-07 18:07:48 +03:00
SmallVector<VariableID> interface_variables;
2019-01-10 06:23:47 +03:00
Bitset flags;
struct
{
uint32_t x = 0, y = 0, z = 0;
uint32_t constant = 0; // Workgroup size can be expressed as a constant/spec-constant instead.
} workgroup_size;
uint32_t invocations = 0;
uint32_t output_vertices = 0;
2019-03-09 21:20:48 +03:00
spv::ExecutionModel model = spv::ExecutionModelMax;
2020-02-05 08:38:12 +03:00
bool geometry_passthrough = false;
2019-01-10 06:23:47 +03:00
};
struct SPIRExpression : IVariant
{
enum
{
type = TypeExpression
};
// Only created by the backend target to avoid creating tons of temporaries.
2019-09-07 18:07:48 +03:00
SPIRExpression(std::string expr, TypeID expression_type_, bool immutable_)
2019-01-10 06:23:47 +03:00
: expression(move(expr))
, expression_type(expression_type_)
, immutable(immutable_)
{
}
// If non-zero, prepend expression with to_expression(base_expression).
// Used in amortizing multiple calls to to_expression()
// where in certain cases that would quickly force a temporary when not needed.
2019-09-07 18:07:48 +03:00
ID base_expression = 0;
2019-01-10 06:23:47 +03:00
std::string expression;
2019-09-07 18:07:48 +03:00
TypeID expression_type = 0;
2019-01-10 06:23:47 +03:00
// If this expression is a forwarded load,
// allow us to reference the original variable.
2019-09-07 18:07:48 +03:00
ID loaded_from = 0;
2019-01-10 06:23:47 +03:00
// If this expression will never change, we can avoid lots of temporaries
// in high level source.
// An expression being immutable can be speculative,
// it is assumed that this is true almost always.
bool immutable = false;
// Before use, this expression must be transposed.
// This is needed for targets which don't support row_major layouts.
bool need_transpose = false;
// Whether or not this is an access chain expression.
bool access_chain = false;
// A list of expressions which this expression depends on.
2019-09-07 18:07:48 +03:00
SmallVector<ID> expression_dependencies;
2019-01-10 06:23:47 +03:00
// By reading this expression, we implicitly read these expressions as well.
// Used by access chain Store and Load since we read multiple expressions in this case.
2019-09-07 18:07:48 +03:00
SmallVector<ID> implied_read_expressions;
2019-01-10 06:23:47 +03:00
SPIRV_CROSS_DECLARE_CLONE(SPIRExpression)
};
struct SPIRFunctionPrototype : IVariant
{
enum
{
type = TypeFunctionPrototype
};
2019-09-07 18:07:48 +03:00
explicit SPIRFunctionPrototype(TypeID return_type_)
2019-01-10 06:23:47 +03:00
: return_type(return_type_)
{
}
2019-09-07 18:07:48 +03:00
TypeID return_type;
2019-04-15 07:53:50 +03:00
SmallVector<uint32_t> parameter_types;
2019-01-10 06:23:47 +03:00
SPIRV_CROSS_DECLARE_CLONE(SPIRFunctionPrototype)
};
struct SPIRBlock : IVariant
{
enum
{
type = TypeBlock
};
enum Terminator
{
Unknown,
Direct, // Emit next block directly without a particular condition.
Select, // Block ends with an if/else block.
MultiSelect, // Block ends with switch statement.
Return, // Block ends with return.
Unreachable, // Noop
Kill // Discard
};
enum Merge
{
MergeNone,
MergeLoop,
MergeSelection
};
enum Hints
{
HintNone,
HintUnroll,
HintDontUnroll,
HintFlatten,
HintDontFlatten
};
enum Method
{
MergeToSelectForLoop,
MergeToDirectForLoop,
MergeToSelectContinueForLoop
};
enum ContinueBlockType
{
ContinueNone,
// Continue block is branchless and has at least one instruction.
ForLoop,
// Noop continue block.
WhileLoop,
// Continue block is conditional.
DoWhileLoop,
// Highly unlikely that anything will use this,
// since it is really awkward/impossible to express in GLSL.
ComplexLoop
};
enum
{
NoDominator = 0xffffffffu
};
Terminator terminator = Unknown;
Merge merge = MergeNone;
Hints hint = HintNone;
2019-09-07 18:07:48 +03:00
BlockID next_block = 0;
BlockID merge_block = 0;
BlockID continue_block = 0;
2019-01-10 06:23:47 +03:00
2019-09-07 18:07:48 +03:00
ID return_value = 0; // If 0, return nothing (void).
ID condition = 0;
BlockID true_block = 0;
BlockID false_block = 0;
BlockID default_block = 0;
2019-01-10 06:23:47 +03:00
2019-04-15 07:53:50 +03:00
SmallVector<Instruction> ops;
2019-01-10 06:23:47 +03:00
struct Phi
{
2019-09-07 18:07:48 +03:00
ID local_variable; // flush local variable ...
BlockID parent; // If we're in from_block and want to branch into this block ...
VariableID function_variable; // to this function-global "phi" variable first.
2019-01-10 06:23:47 +03:00
};
// Before entering this block flush out local variables to magical "phi" variables.
2019-04-15 07:53:50 +03:00
SmallVector<Phi> phi_variables;
2019-01-10 06:23:47 +03:00
// Declare these temporaries before beginning the block.
// Used for handling complex continue blocks which have side effects.
2019-09-07 18:07:48 +03:00
SmallVector<std::pair<TypeID, ID>> declare_temporary;
2019-01-10 06:23:47 +03:00
// Declare these temporaries, but only conditionally if this block turns out to be
// a complex loop header.
2019-09-07 18:07:48 +03:00
SmallVector<std::pair<TypeID, ID>> potential_declare_temporary;
2019-01-10 06:23:47 +03:00
struct Case
{
uint32_t value;
2019-09-07 18:07:48 +03:00
BlockID block;
2019-01-10 06:23:47 +03:00
};
2019-04-15 07:53:50 +03:00
SmallVector<Case> cases;
2019-01-10 06:23:47 +03:00
// If we have tried to optimize code for this block but failed,
// keep track of this.
bool disable_block_optimization = false;
// If the continue block is complex, fallback to "dumb" for loops.
bool complex_continue = false;
// Do we need a ladder variable to defer breaking out of a loop construct after a switch block?
bool need_ladder_break = false;
2019-06-22 17:30:47 +03:00
// If marked, we have explicitly handled Phi from this block, so skip any flushes related to that on a branch.
// Used to handle an edge case with switch and case-label fallthrough where fall-through writes to Phi.
2019-09-07 18:07:48 +03:00
BlockID ignore_phi_from_block = 0;
2019-06-22 17:30:47 +03:00
2019-01-10 06:23:47 +03:00
// The dominating block which this block might be within.
// Used in continue; blocks to determine if we really need to write continue.
2019-09-07 18:07:48 +03:00
BlockID loop_dominator = 0;
2019-01-10 06:23:47 +03:00
// All access to these variables are dominated by this block,
// so before branching anywhere we need to make sure that we declare these variables.
2019-09-07 18:07:48 +03:00
SmallVector<VariableID> dominated_variables;
2019-01-10 06:23:47 +03:00
// These are variables which should be declared in a for loop header, if we
// fail to use a classic for-loop,
// we remove these variables, and fall back to regular variables outside the loop.
2019-09-07 18:07:48 +03:00
SmallVector<VariableID> loop_variables;
2019-01-10 06:23:47 +03:00
// Some expressions are control-flow dependent, i.e. any instruction which relies on derivatives or
// sub-group-like operations.
// Make sure that we only use these expressions in the original block.
2019-09-07 18:07:48 +03:00
SmallVector<ID> invalidate_expressions;
2019-01-10 06:23:47 +03:00
SPIRV_CROSS_DECLARE_CLONE(SPIRBlock)
};
struct SPIRFunction : IVariant
{
enum
{
type = TypeFunction
};
2019-09-07 18:07:48 +03:00
SPIRFunction(TypeID return_type_, TypeID function_type_)
2019-01-10 06:23:47 +03:00
: return_type(return_type_)
, function_type(function_type_)
{
}
struct Parameter
{
2019-09-07 18:07:48 +03:00
TypeID type;
ID id;
2019-01-10 06:23:47 +03:00
uint32_t read_count;
uint32_t write_count;
// Set to true if this parameter aliases a global variable,
// used mostly in Metal where global variables
// have to be passed down to functions as regular arguments.
// However, for this kind of variable, we should not care about
// read and write counts as access to the function arguments
// is not local to the function in question.
bool alias_global_variable;
};
// When calling a function, and we're remapping separate image samplers,
// resolve these arguments into combined image samplers and pass them
// as additional arguments in this order.
// It gets more complicated as functions can pull in their own globals
// and combine them with parameters,
// so we need to distinguish if something is local parameter index
// or a global ID.
struct CombinedImageSamplerParameter
{
2019-09-07 18:07:48 +03:00
VariableID id;
VariableID image_id;
VariableID sampler_id;
2019-01-10 06:23:47 +03:00
bool global_image;
bool global_sampler;
bool depth;
};
2019-09-07 18:07:48 +03:00
TypeID return_type;
TypeID function_type;
2019-04-15 07:53:50 +03:00
SmallVector<Parameter> arguments;
2019-01-10 06:23:47 +03:00
// Can be used by backends to add magic arguments.
// Currently used by combined image/sampler implementation.
2019-04-15 07:53:50 +03:00
SmallVector<Parameter> shadow_arguments;
2019-09-07 18:07:48 +03:00
SmallVector<VariableID> local_variables;
BlockID entry_block = 0;
SmallVector<BlockID> blocks;
2019-04-15 07:53:50 +03:00
SmallVector<CombinedImageSamplerParameter> combined_parameters;
2019-01-10 06:23:47 +03:00
2019-06-07 08:04:16 +03:00
struct EntryLine
{
uint32_t file_id = 0;
uint32_t line_literal = 0;
};
EntryLine entry_line;
2019-09-07 18:07:48 +03:00
void add_local_variable(VariableID id)
2019-01-10 06:23:47 +03:00
{
local_variables.push_back(id);
}
2019-09-07 18:07:48 +03:00
void add_parameter(TypeID parameter_type, ID id, bool alias_global_variable = false)
2019-01-10 06:23:47 +03:00
{
// Arguments are read-only until proven otherwise.
arguments.push_back({ parameter_type, id, 0u, 0u, alias_global_variable });
}
// Hooks to be run when the function returns.
// Mostly used for lowering internal data structures onto flattened structures.
// Need to defer this, because they might rely on things which change during compilation.
2019-04-15 07:53:50 +03:00
// Intentionally not a small vector, this one is rare, and std::function can be large.
Vector<std::function<void()>> fixup_hooks_out;
2019-01-10 06:23:47 +03:00
// Hooks to be run when the function begins.
// Mostly used for populating internal data structures from flattened structures.
// Need to defer this, because they might rely on things which change during compilation.
2019-04-15 07:53:50 +03:00
// Intentionally not a small vector, this one is rare, and std::function can be large.
Vector<std::function<void()>> fixup_hooks_in;
2019-01-10 06:23:47 +03:00
2020-03-14 23:22:45 +03:00
// On function entry, make sure to copy a constant array into thread addr space to work around
// the case where we are passing a constant array by value to a function on backends which do not
// consider arrays value types.
SmallVector<ID> constant_arrays_needed_on_stack;
2019-01-10 06:23:47 +03:00
bool active = false;
bool flush_undeclared = true;
bool do_combined_parameters = true;
SPIRV_CROSS_DECLARE_CLONE(SPIRFunction)
};
struct SPIRAccessChain : IVariant
{
enum
{
type = TypeAccessChain
};
2019-09-07 18:07:48 +03:00
SPIRAccessChain(TypeID basetype_, spv::StorageClass storage_, std::string base_, std::string dynamic_index_,
2019-01-10 06:23:47 +03:00
int32_t static_index_)
: basetype(basetype_)
, storage(storage_)
2019-03-09 21:20:48 +03:00
, base(std::move(base_))
2019-01-10 06:23:47 +03:00
, dynamic_index(std::move(dynamic_index_))
, static_index(static_index_)
{
}
// The access chain represents an offset into a buffer.
// Some backends need more complicated handling of access chains to be able to use buffers, like HLSL
// which has no usable buffer type ala GLSL SSBOs.
// StructuredBuffer is too limited, so our only option is to deal with ByteAddressBuffer which works with raw addresses.
2019-09-07 18:07:48 +03:00
TypeID basetype;
2019-01-10 06:23:47 +03:00
spv::StorageClass storage;
std::string base;
std::string dynamic_index;
int32_t static_index;
2019-09-07 18:07:48 +03:00
VariableID loaded_from = 0;
2019-01-10 06:23:47 +03:00
uint32_t matrix_stride = 0;
2020-02-05 08:38:12 +03:00
uint32_t array_stride = 0;
2019-01-10 06:23:47 +03:00
bool row_major_matrix = false;
bool immutable = false;
// By reading this expression, we implicitly read these expressions as well.
// Used by access chain Store and Load since we read multiple expressions in this case.
2019-09-07 18:07:48 +03:00
SmallVector<ID> implied_read_expressions;
2019-01-10 06:23:47 +03:00
SPIRV_CROSS_DECLARE_CLONE(SPIRAccessChain)
};
struct SPIRVariable : IVariant
{
enum
{
type = TypeVariable
};
SPIRVariable() = default;
2019-09-07 18:07:48 +03:00
SPIRVariable(TypeID basetype_, spv::StorageClass storage_, ID initializer_ = 0, VariableID basevariable_ = 0)
2019-01-10 06:23:47 +03:00
: basetype(basetype_)
, storage(storage_)
, initializer(initializer_)
, basevariable(basevariable_)
{
}
2019-09-07 18:07:48 +03:00
TypeID basetype = 0;
2019-01-10 06:23:47 +03:00
spv::StorageClass storage = spv::StorageClassGeneric;
uint32_t decoration = 0;
2019-09-07 18:07:48 +03:00
ID initializer = 0;
VariableID basevariable = 0;
2019-01-10 06:23:47 +03:00
2019-04-15 07:53:50 +03:00
SmallVector<uint32_t> dereference_chain;
2019-01-10 06:23:47 +03:00
bool compat_builtin = false;
// If a variable is shadowed, we only statically assign to it
// and never actually emit a statement for it.
// When we read the variable as an expression, just forward
// shadowed_id as the expression.
bool statically_assigned = false;
2019-09-07 18:07:48 +03:00
ID static_expression = 0;
2019-01-10 06:23:47 +03:00
// Temporaries which can remain forwarded as long as this variable is not modified.
2019-09-07 18:07:48 +03:00
SmallVector<ID> dependees;
2019-01-10 06:23:47 +03:00
bool forwardable = true;
bool deferred_declaration = false;
bool phi_variable = false;
// Used to deal with Phi variable flushes. See flush_phi().
bool allocate_temporary_copy = false;
bool remapped_variable = false;
uint32_t remapped_components = 0;
// The block which dominates all access to this variable.
2019-09-07 18:07:48 +03:00
BlockID dominator = 0;
2019-01-10 06:23:47 +03:00
// If true, this variable is a loop variable, when accessing the variable
// outside a loop,
// we should statically forward it.
bool loop_variable = false;
// Set to true while we're inside the for loop.
bool loop_variable_enable = false;
SPIRFunction::Parameter *parameter = nullptr;
SPIRV_CROSS_DECLARE_CLONE(SPIRVariable)
};
struct SPIRConstant : IVariant
{
enum
{
type = TypeConstant
};
2020-02-05 08:38:12 +03:00
union Constant {
2019-01-10 06:23:47 +03:00
uint32_t u32;
int32_t i32;
float f32;
uint64_t u64;
int64_t i64;
double f64;
};
struct ConstantVector
{
Constant r[4];
// If != 0, this element is a specialization constant, and we should keep track of it as such.
2019-09-07 18:07:48 +03:00
ID id[4];
2019-01-10 06:23:47 +03:00
uint32_t vecsize = 1;
ConstantVector()
{
memset(r, 0, sizeof(r));
}
};
struct ConstantMatrix
{
ConstantVector c[4];
// If != 0, this column is a specialization constant, and we should keep track of it as such.
2019-09-07 18:07:48 +03:00
ID id[4];
2019-01-10 06:23:47 +03:00
uint32_t columns = 1;
};
static inline float f16_to_f32(uint16_t u16_value)
{
// Based on the GLM implementation.
int s = (u16_value >> 15) & 0x1;
int e = (u16_value >> 10) & 0x1f;
int m = (u16_value >> 0) & 0x3ff;
2020-02-05 08:38:12 +03:00
union {
2019-01-10 06:23:47 +03:00
float f32;
uint32_t u32;
} u;
if (e == 0)
{
if (m == 0)
{
u.u32 = uint32_t(s) << 31;
return u.f32;
}
else
{
while ((m & 0x400) == 0)
{
m <<= 1;
e--;
}
e++;
m &= ~0x400;
}
}
else if (e == 31)
{
if (m == 0)
{
u.u32 = (uint32_t(s) << 31) | 0x7f800000u;
return u.f32;
}
else
{
u.u32 = (uint32_t(s) << 31) | 0x7f800000u | (m << 13);
return u.f32;
}
}
e += 127 - 15;
m <<= 13;
u.u32 = (uint32_t(s) << 31) | (e << 23) | m;
return u.f32;
}
inline uint32_t specialization_constant_id(uint32_t col, uint32_t row) const
{
return m.c[col].id[row];
}
inline uint32_t specialization_constant_id(uint32_t col) const
{
return m.id[col];
}
inline uint32_t scalar(uint32_t col = 0, uint32_t row = 0) const
{
return m.c[col].r[row].u32;
}
inline int16_t scalar_i16(uint32_t col = 0, uint32_t row = 0) const
{
return int16_t(m.c[col].r[row].u32 & 0xffffu);
}
inline uint16_t scalar_u16(uint32_t col = 0, uint32_t row = 0) const
{
return uint16_t(m.c[col].r[row].u32 & 0xffffu);
}
2019-02-02 08:35:55 +03:00
inline int8_t scalar_i8(uint32_t col = 0, uint32_t row = 0) const
{
return int8_t(m.c[col].r[row].u32 & 0xffu);
}
inline uint8_t scalar_u8(uint32_t col = 0, uint32_t row = 0) const
{
return uint8_t(m.c[col].r[row].u32 & 0xffu);
}
2019-01-10 06:23:47 +03:00
inline float scalar_f16(uint32_t col = 0, uint32_t row = 0) const
{
return f16_to_f32(scalar_u16(col, row));
}
inline float scalar_f32(uint32_t col = 0, uint32_t row = 0) const
{
return m.c[col].r[row].f32;
}
inline int32_t scalar_i32(uint32_t col = 0, uint32_t row = 0) const
{
return m.c[col].r[row].i32;
}
inline double scalar_f64(uint32_t col = 0, uint32_t row = 0) const
{
return m.c[col].r[row].f64;
}
inline int64_t scalar_i64(uint32_t col = 0, uint32_t row = 0) const
{
return m.c[col].r[row].i64;
}
inline uint64_t scalar_u64(uint32_t col = 0, uint32_t row = 0) const
{
return m.c[col].r[row].u64;
}
inline const ConstantVector &vector() const
{
return m.c[0];
}
inline uint32_t vector_size() const
{
return m.c[0].vecsize;
}
inline uint32_t columns() const
{
return m.columns;
}
inline void make_null(const SPIRType &constant_type_)
{
m = {};
m.columns = constant_type_.columns;
for (auto &c : m.c)
c.vecsize = constant_type_.vecsize;
}
inline bool constant_is_null() const
{
if (specialization)
return false;
if (!subconstants.empty())
return false;
for (uint32_t col = 0; col < columns(); col++)
for (uint32_t row = 0; row < vector_size(); row++)
if (scalar_u64(col, row) != 0)
return false;
return true;
}
explicit SPIRConstant(uint32_t constant_type_)
: constant_type(constant_type_)
{
}
SPIRConstant() = default;
2019-09-07 18:07:48 +03:00
SPIRConstant(TypeID constant_type_, const uint32_t *elements, uint32_t num_elements, bool specialized)
2019-01-10 06:23:47 +03:00
: constant_type(constant_type_)
, specialization(specialized)
{
2019-09-07 18:07:48 +03:00
subconstants.reserve(num_elements);
for (uint32_t i = 0; i < num_elements; i++)
subconstants.push_back(elements[i]);
2019-01-10 06:23:47 +03:00
specialization = specialized;
}
// Construct scalar (32-bit).
2019-09-07 18:07:48 +03:00
SPIRConstant(TypeID constant_type_, uint32_t v0, bool specialized)
2019-01-10 06:23:47 +03:00
: constant_type(constant_type_)
, specialization(specialized)
{
m.c[0].r[0].u32 = v0;
m.c[0].vecsize = 1;
m.columns = 1;
}
// Construct scalar (64-bit).
2019-09-07 18:07:48 +03:00
SPIRConstant(TypeID constant_type_, uint64_t v0, bool specialized)
2019-01-10 06:23:47 +03:00
: constant_type(constant_type_)
, specialization(specialized)
{
m.c[0].r[0].u64 = v0;
m.c[0].vecsize = 1;
m.columns = 1;
}
// Construct vectors and matrices.
2019-09-07 18:07:48 +03:00
SPIRConstant(TypeID constant_type_, const SPIRConstant *const *vector_elements, uint32_t num_elements,
2019-01-10 06:23:47 +03:00
bool specialized)
: constant_type(constant_type_)
, specialization(specialized)
{
bool matrix = vector_elements[0]->m.c[0].vecsize > 1;
if (matrix)
{
m.columns = num_elements;
for (uint32_t i = 0; i < num_elements; i++)
{
m.c[i] = vector_elements[i]->m.c[0];
if (vector_elements[i]->specialization)
m.id[i] = vector_elements[i]->self;
}
}
else
{
m.c[0].vecsize = num_elements;
m.columns = 1;
for (uint32_t i = 0; i < num_elements; i++)
{
m.c[0].r[i] = vector_elements[i]->m.c[0].r[0];
if (vector_elements[i]->specialization)
m.c[0].id[i] = vector_elements[i]->self;
}
}
}
2019-09-07 18:07:48 +03:00
TypeID constant_type = 0;
2019-01-10 06:23:47 +03:00
ConstantMatrix m;
// If this constant is a specialization constant (i.e. created with OpSpecConstant*).
bool specialization = false;
// If this constant is used as an array length which creates specialization restrictions on some backends.
bool is_used_as_array_length = false;
// If true, this is a LUT, and should always be declared in the outer scope.
bool is_used_as_lut = false;
// For composites which are constant arrays, etc.
2019-09-07 18:07:48 +03:00
SmallVector<ConstantID> subconstants;
2019-01-10 06:23:47 +03:00
// Non-Vulkan GLSL, HLSL and sometimes MSL emits defines for each specialization constant,
// and uses them to initialize the constant. This allows the user
// to still be able to specialize the value by supplying corresponding
// preprocessor directives before compiling the shader.
std::string specialization_constant_macro_name;
SPIRV_CROSS_DECLARE_CLONE(SPIRConstant)
};
2019-04-15 07:53:50 +03:00
// Variants have a very specific allocation scheme.
struct ObjectPoolGroup
{
std::unique_ptr<ObjectPoolBase> pools[TypeCount];
};
2019-01-10 06:23:47 +03:00
class Variant
{
public:
2019-04-15 07:53:50 +03:00
explicit Variant(ObjectPoolGroup *group_)
: group(group_)
{
}
~Variant()
{
if (holder)
group->pools[type]->free_opaque(holder);
}
2019-01-10 06:23:47 +03:00
// Marking custom move constructor as noexcept is important.
Variant(Variant &&other) SPIRV_CROSS_NOEXCEPT
{
*this = std::move(other);
}
2019-04-15 07:53:50 +03:00
// We cannot copy from other variant without our own pool group.
// Have to explicitly copy.
Variant(const Variant &variant) = delete;
2019-01-10 06:23:47 +03:00
// Marking custom move constructor as noexcept is important.
Variant &operator=(Variant &&other) SPIRV_CROSS_NOEXCEPT
{
if (this != &other)
{
2019-04-15 07:53:50 +03:00
if (holder)
group->pools[type]->free_opaque(holder);
holder = other.holder;
group = other.group;
2019-01-10 06:23:47 +03:00
type = other.type;
allow_type_rewrite = other.allow_type_rewrite;
2019-04-15 07:53:50 +03:00
other.holder = nullptr;
2019-01-10 06:23:47 +03:00
other.type = TypeNone;
}
return *this;
}
// This copy/clone should only be called in the Compiler constructor.
// If this is called inside ::compile(), we invalidate any references we took higher in the stack.
// This should never happen.
Variant &operator=(const Variant &other)
{
2019-04-15 07:53:50 +03:00
//#define SPIRV_CROSS_COPY_CONSTRUCTOR_SANITIZE
2019-01-10 06:23:47 +03:00
#ifdef SPIRV_CROSS_COPY_CONSTRUCTOR_SANITIZE
abort();
#endif
if (this != &other)
{
2019-04-15 07:53:50 +03:00
if (holder)
group->pools[type]->free_opaque(holder);
2019-01-10 06:23:47 +03:00
if (other.holder)
2019-04-15 07:53:50 +03:00
holder = other.holder->clone(group->pools[other.type].get());
else
holder = nullptr;
2019-01-10 06:23:47 +03:00
type = other.type;
allow_type_rewrite = other.allow_type_rewrite;
}
return *this;
}
2019-04-15 07:53:50 +03:00
void set(IVariant *val, Types new_type)
2019-01-10 06:23:47 +03:00
{
2019-04-15 07:53:50 +03:00
if (holder)
group->pools[type]->free_opaque(holder);
holder = nullptr;
2019-01-10 06:23:47 +03:00
if (!allow_type_rewrite && type != TypeNone && type != new_type)
2019-04-15 07:53:50 +03:00
{
if (val)
group->pools[new_type]->free_opaque(val);
2019-01-10 06:23:47 +03:00
SPIRV_CROSS_THROW("Overwriting a variant with new type.");
2019-04-15 07:53:50 +03:00
}
holder = val;
2019-01-10 06:23:47 +03:00
type = new_type;
allow_type_rewrite = false;
}
2019-04-15 07:53:50 +03:00
template <typename T, typename... Ts>
T *allocate_and_set(Types new_type, Ts &&... ts)
{
T *val = static_cast<ObjectPool<T> &>(*group->pools[new_type]).allocate(std::forward<Ts>(ts)...);
set(val, new_type);
return val;
}
2019-01-10 06:23:47 +03:00
template <typename T>
T &get()
{
if (!holder)
SPIRV_CROSS_THROW("nullptr");
2019-01-17 05:50:01 +03:00
if (static_cast<Types>(T::type) != type)
2019-01-10 06:23:47 +03:00
SPIRV_CROSS_THROW("Bad cast");
2019-04-15 07:53:50 +03:00
return *static_cast<T *>(holder);
2019-01-10 06:23:47 +03:00
}
template <typename T>
const T &get() const
{
if (!holder)
SPIRV_CROSS_THROW("nullptr");
2019-01-17 05:50:01 +03:00
if (static_cast<Types>(T::type) != type)
2019-01-10 06:23:47 +03:00
SPIRV_CROSS_THROW("Bad cast");
2019-04-15 07:53:50 +03:00
return *static_cast<const T *>(holder);
2019-01-10 06:23:47 +03:00
}
2019-01-17 05:50:01 +03:00
Types get_type() const
2019-01-10 06:23:47 +03:00
{
return type;
}
2019-09-07 18:07:48 +03:00
ID get_id() const
2019-01-10 06:23:47 +03:00
{
2019-09-07 18:07:48 +03:00
return holder ? holder->self : ID(0);
2019-01-10 06:23:47 +03:00
}
bool empty() const
{
return !holder;
}
void reset()
{
2019-04-15 07:53:50 +03:00
if (holder)
group->pools[type]->free_opaque(holder);
holder = nullptr;
2019-01-10 06:23:47 +03:00
type = TypeNone;
}
void set_allow_type_rewrite()
{
allow_type_rewrite = true;
}
private:
2019-04-15 07:53:50 +03:00
ObjectPoolGroup *group = nullptr;
IVariant *holder = nullptr;
2019-01-17 05:50:01 +03:00
Types type = TypeNone;
2019-01-10 06:23:47 +03:00
bool allow_type_rewrite = false;
};
template <typename T>
T &variant_get(Variant &var)
{
return var.get<T>();
}
template <typename T>
const T &variant_get(const Variant &var)
{
return var.get<T>();
}
template <typename T, typename... P>
T &variant_set(Variant &var, P &&... args)
{
2019-04-15 07:53:50 +03:00
auto *ptr = var.allocate_and_set<T>(static_cast<Types>(T::type), std::forward<P>(args)...);
2019-01-10 06:23:47 +03:00
return *ptr;
}
struct AccessChainMeta
{
2019-07-25 06:25:58 +03:00
uint32_t storage_physical_type = 0;
2019-01-10 06:23:47 +03:00
bool need_transpose = false;
bool storage_is_packed = false;
bool storage_is_invariant = false;
};
2019-07-25 06:25:58 +03:00
enum ExtendedDecorations
{
// Marks if a buffer block is re-packed, i.e. member declaration might be subject to PhysicalTypeID remapping and padding.
SPIRVCrossDecorationBufferBlockRepacked = 0,
// A type in a buffer block might be declared with a different physical type than the logical type.
// If this is not set, PhysicalTypeID == the SPIR-V type as declared.
SPIRVCrossDecorationPhysicalTypeID,
// Marks if the physical type is to be declared with tight packing rules, i.e. packed_floatN on MSL and friends.
// If this is set, PhysicalTypeID might also be set. It can be set to same as logical type if all we're doing
// is converting float3 to packed_float3 for example.
// If this is marked on a struct, it means the struct itself must use only Packed types for all its members.
SPIRVCrossDecorationPhysicalTypePacked,
// The padding in bytes before declaring this struct member.
// If used on a struct type, marks the target size of a struct.
SPIRVCrossDecorationPaddingTarget,
SPIRVCrossDecorationInterfaceMemberIndex,
SPIRVCrossDecorationInterfaceOrigID,
SPIRVCrossDecorationResourceIndexPrimary,
// Used for decorations like resource indices for samplers when part of combined image samplers.
// A variable might need to hold two resource indices in this case.
SPIRVCrossDecorationResourceIndexSecondary,
2019-09-07 18:07:48 +03:00
// Used for resource indices for multiplanar images when part of combined image samplers.
SPIRVCrossDecorationResourceIndexTertiary,
SPIRVCrossDecorationResourceIndexQuaternary,
2019-07-25 06:25:58 +03:00
// Marks a buffer block for using explicit offsets (GLSL/HLSL).
SPIRVCrossDecorationExplicitOffset,
2019-08-10 06:33:38 +03:00
// Apply to a variable in the Input storage class; marks it as holding the base group passed to vkCmdDispatchBase().
// In MSL, this is used to adjust the WorkgroupId and GlobalInvocationId variables.
SPIRVCrossDecorationBuiltInDispatchBase,
2019-09-07 18:07:48 +03:00
// Apply to a variable that is a function parameter; marks it as being a "dynamic"
// combined image-sampler. In MSL, this is used when a function parameter might hold
// either a regular combined image-sampler or one that has an attached sampler
// Y'CbCr conversion.
SPIRVCrossDecorationDynamicImageSampler,
2019-07-25 06:25:58 +03:00
SPIRVCrossDecorationCount
};
2019-01-10 06:23:47 +03:00
struct Meta
{
struct Decoration
{
std::string alias;
std::string qualified_alias;
std::string hlsl_semantic;
Bitset decoration_flags;
spv::BuiltIn builtin_type = spv::BuiltInMax;
uint32_t location = 0;
uint32_t component = 0;
uint32_t set = 0;
uint32_t binding = 0;
uint32_t offset = 0;
2020-02-05 08:38:12 +03:00
uint32_t xfb_buffer = 0;
uint32_t xfb_stride = 0;
2019-01-10 06:23:47 +03:00
uint32_t array_stride = 0;
uint32_t matrix_stride = 0;
uint32_t input_attachment = 0;
uint32_t spec_id = 0;
uint32_t index = 0;
spv::FPRoundingMode fp_rounding_mode = spv::FPRoundingModeMax;
bool builtin = false;
2019-01-17 19:43:47 +03:00
2019-07-25 06:25:58 +03:00
struct Extended
2019-01-17 19:43:47 +03:00
{
2019-07-25 06:25:58 +03:00
Extended()
{
// MSVC 2013 workaround to init like this.
for (auto &v : values)
v = 0;
}
Bitset flags;
uint32_t values[SPIRVCrossDecorationCount];
2019-01-17 19:43:47 +03:00
} extended;
2019-01-10 06:23:47 +03:00
};
Decoration decoration;
2019-04-15 07:53:50 +03:00
// Intentionally not a SmallVector. Decoration is large and somewhat rare.
Vector<Decoration> members;
2019-01-10 06:23:47 +03:00
std::unordered_map<uint32_t, uint32_t> decoration_word_offset;
// For SPV_GOOGLE_hlsl_functionality1.
bool hlsl_is_magic_counter_buffer = false;
// ID for the sibling counter buffer.
uint32_t hlsl_magic_counter_buffer = 0;
};
// A user callback that remaps the type of any variable.
// var_name is the declared name of the variable.
// name_of_type is the textual name of the type which will be used in the code unless written to by the callback.
using VariableTypeRemapCallback =
std::function<void(const SPIRType &type, const std::string &var_name, std::string &name_of_type)>;
class Hasher
{
public:
inline void u32(uint32_t value)
{
h = (h * 0x100000001b3ull) ^ value;
}
inline uint64_t get() const
{
return h;
}
private:
uint64_t h = 0xcbf29ce484222325ull;
};
static inline bool type_is_floating_point(const SPIRType &type)
{
return type.basetype == SPIRType::Half || type.basetype == SPIRType::Float || type.basetype == SPIRType::Double;
}
static inline bool type_is_integral(const SPIRType &type)
{
return type.basetype == SPIRType::SByte || type.basetype == SPIRType::UByte || type.basetype == SPIRType::Short ||
type.basetype == SPIRType::UShort || type.basetype == SPIRType::Int || type.basetype == SPIRType::UInt ||
type.basetype == SPIRType::Int64 || type.basetype == SPIRType::UInt64;
}
2019-02-02 08:35:55 +03:00
static inline SPIRType::BaseType to_signed_basetype(uint32_t width)
{
switch (width)
{
case 8:
return SPIRType::SByte;
case 16:
return SPIRType::Short;
case 32:
return SPIRType::Int;
case 64:
return SPIRType::Int64;
default:
SPIRV_CROSS_THROW("Invalid bit width.");
}
}
static inline SPIRType::BaseType to_unsigned_basetype(uint32_t width)
{
switch (width)
{
case 8:
return SPIRType::UByte;
case 16:
return SPIRType::UShort;
case 32:
return SPIRType::UInt;
case 64:
return SPIRType::UInt64;
default:
SPIRV_CROSS_THROW("Invalid bit width.");
}
}
// Returns true if an arithmetic operation does not change behavior depending on signedness.
static inline bool opcode_is_sign_invariant(spv::Op opcode)
{
switch (opcode)
{
case spv::OpIEqual:
case spv::OpINotEqual:
case spv::OpISub:
case spv::OpIAdd:
case spv::OpIMul:
case spv::OpShiftLeftLogical:
case spv::OpBitwiseOr:
case spv::OpBitwiseXor:
case spv::OpBitwiseAnd:
return true;
default:
return false;
}
}
2020-02-05 08:38:12 +03:00
struct SetBindingPair
{
uint32_t desc_set;
uint32_t binding;
inline bool operator==(const SetBindingPair &other) const
{
return desc_set == other.desc_set && binding == other.binding;
}
inline bool operator<(const SetBindingPair &other) const
{
return desc_set < other.desc_set || (desc_set == other.desc_set && binding < other.binding);
}
};
struct StageSetBinding
{
spv::ExecutionModel model;
uint32_t desc_set;
uint32_t binding;
inline bool operator==(const StageSetBinding &other) const
{
return model == other.model && desc_set == other.desc_set && binding == other.binding;
}
};
struct InternalHasher
{
inline size_t operator()(const SetBindingPair &value) const
{
// Quality of hash doesn't really matter here.
auto hash_set = std::hash<uint32_t>()(value.desc_set);
auto hash_binding = std::hash<uint32_t>()(value.binding);
return (hash_set * 0x10001b31) ^ hash_binding;
}
inline size_t operator()(const StageSetBinding &value) const
{
// Quality of hash doesn't really matter here.
auto hash_model = std::hash<uint32_t>()(value.model);
auto hash_set = std::hash<uint32_t>()(value.desc_set);
auto tmp_hash = (hash_model * 0x10001b31) ^ hash_set;
return (tmp_hash * 0x10001b31) ^ value.binding;
}
};
// Special constant used in a {MSL,HLSL}ResourceBinding desc_set
// element to indicate the bindings for the push constants.
static const uint32_t ResourceBindingPushConstantDescriptorSet = ~(0u);
// Special constant used in a {MSL,HLSL}ResourceBinding binding
// element to indicate the bindings for the push constants.
static const uint32_t ResourceBindingPushConstantBinding = 0;
2019-04-15 07:53:50 +03:00
} // namespace SPIRV_CROSS_NAMESPACE
2019-01-10 06:23:47 +03:00
2019-09-07 18:07:48 +03:00
namespace std
{
template <SPIRV_CROSS_NAMESPACE::Types type>
struct hash<SPIRV_CROSS_NAMESPACE::TypedID<type>>
{
size_t operator()(const SPIRV_CROSS_NAMESPACE::TypedID<type> &value) const
{
return std::hash<uint32_t>()(value);
}
};
} // namespace std
2019-01-10 06:23:47 +03:00
#endif