Updated spirv-cross.

This commit is contained in:
Бранимир Караџић 2019-05-05 19:03:54 -07:00
parent f19690fc99
commit 0e1d19e14e
5 changed files with 177 additions and 14 deletions

View File

@ -0,0 +1,31 @@
#version 450
#extension GL_EXT_nonuniform_qualifier : require
layout(set = 0, binding = 2, std140) uniform UBO
{
vec4 v[64];
} ubos[];
layout(set = 0, binding = 3, std430) readonly buffer SSBO
{
vec4 v[];
} ssbos[];
layout(set = 0, binding = 0) uniform texture2D uSamplers[];
layout(set = 0, binding = 1) uniform sampler uSamps[];
layout(set = 0, binding = 4) uniform sampler2D uCombinedSamplers[];
layout(location = 0) flat in int vIndex;
layout(location = 0) out vec4 FragColor;
layout(location = 1) in vec2 vUV;
void main()
{
int _22 = vIndex + 10;
int _32 = vIndex + 40;
FragColor = texture(sampler2D(uSamplers[nonuniformEXT(_22)], uSamps[nonuniformEXT(_32)]), vUV);
FragColor = texture(uCombinedSamplers[nonuniformEXT(_22)], vUV);
FragColor += ubos[nonuniformEXT(vIndex + 20)].v[_32];
FragColor += ssbos[nonuniformEXT(vIndex + 50)].v[vIndex + 60];
}

View File

@ -0,0 +1,30 @@
#version 450
#extension GL_EXT_nonuniform_qualifier : require
layout(set = 0, binding = 2, std140) uniform UBO
{
vec4 v[64];
} ubos[];
layout(set = 0, binding = 3, std430) readonly buffer SSBO
{
vec4 v[];
} ssbos[];
layout(set = 0, binding = 0) uniform texture2D uSamplers[];
layout(set = 0, binding = 1) uniform sampler uSamps[];
layout(set = 0, binding = 4) uniform sampler2D uCombinedSamplers[];
layout(location = 0) flat in int vIndex;
layout(location = 0) out vec4 FragColor;
layout(location = 1) in vec2 vUV;
void main()
{
int i = vIndex;
FragColor = texture(sampler2D(uSamplers[nonuniformEXT(i + 10)], uSamps[nonuniformEXT(i + 40)]), vUV);
FragColor = texture(uCombinedSamplers[nonuniformEXT(i + 10)], vUV);
FragColor += ubos[nonuniformEXT(i + 20)].v[i + 40];
FragColor += ssbos[nonuniformEXT(i + 50)].v[i + 60];
}

View File

@ -0,0 +1,28 @@
#version 450
#extension GL_EXT_nonuniform_qualifier : require
layout(binding = 0) uniform texture2D uSamplers[];
layout(binding = 4) uniform sampler2D uCombinedSamplers[];
layout(binding = 1) uniform sampler uSamps[];
layout(location = 0) flat in int vIndex;
layout(location = 1) in vec2 vUV;
layout(location = 0) out vec4 FragColor;
layout(set = 0, binding = 2) uniform UBO
{
vec4 v[64];
} ubos[];
layout(set = 0, binding = 3) readonly buffer SSBO
{
vec4 v[];
} ssbos[];
void main()
{
int i = vIndex;
FragColor = texture(sampler2D(uSamplers[nonuniformEXT(i + 10)], uSamps[nonuniformEXT(i + 40)]), vUV);
FragColor = texture(uCombinedSamplers[nonuniformEXT(i + 10)], vUV);
FragColor += ubos[nonuniformEXT(i + 20)].v[nonuniformEXT(i + 40)];
FragColor += ssbos[nonuniformEXT(i + 50)].v[nonuniformEXT(i + 60)];
}

View File

@ -461,6 +461,29 @@ void CompilerGLSL::find_static_extensions()
{
SPIRV_CROSS_THROW("Only Logical and PhysicalStorageBuffer64EXT addressing models are supported.");
}
// Check for nonuniform qualifier.
// Instead of looping over all decorations to find this, just look at capabilities.
for (auto &cap : ir.declared_capabilities)
{
bool nonuniform_indexing = false;
switch (cap)
{
case CapabilityShaderNonUniformEXT:
case CapabilityRuntimeDescriptorArrayEXT:
if (!options.vulkan_semantics)
SPIRV_CROSS_THROW("GL_EXT_nonuniform_qualifier is only supported in Vulkan GLSL.");
require_extension_internal("GL_EXT_nonuniform_qualifier");
nonuniform_indexing = true;
break;
default:
break;
}
if (nonuniform_indexing)
break;
}
}
string CompilerGLSL::compile()
@ -798,6 +821,8 @@ void CompilerGLSL::emit_struct(SPIRType &type)
string CompilerGLSL::to_interpolation_qualifiers(const Bitset &flags)
{
string res;
if (flags.get(DecorationNonUniformEXT))
res += "nonuniformEXT ";
//if (flags & (1ull << DecorationSmooth))
// res += "smooth ";
if (flags.get(DecorationFlat))
@ -6140,19 +6165,34 @@ string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indice
bool pending_array_enclose = false;
bool dimension_flatten = false;
const auto append_index = [&](uint32_t index) {
expr += "[";
// If we are indexing into an array of SSBOs or UBOs, we need to index it with a non-uniform qualifier.
bool nonuniform_index =
has_decoration(index, DecorationNonUniformEXT) &&
(has_decoration(type->self, DecorationBlock) || has_decoration(type->self, DecorationBufferBlock));
if (nonuniform_index)
{
expr += backend.nonuniform_qualifier;
expr += "(";
}
if (index_is_literal)
expr += convert_to_string(index);
else
expr += to_expression(index, register_expression_read);
if (nonuniform_index)
expr += ")";
expr += "]";
};
for (uint32_t i = 0; i < count; i++)
{
uint32_t index = indices[i];
const auto append_index = [&]() {
expr += "[";
if (index_is_literal)
expr += convert_to_string(index);
else
expr += to_expression(index, register_expression_read);
expr += "]";
};
// Pointer chains
if (ptr_chain && i == 0)
{
@ -6190,7 +6230,7 @@ string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indice
}
else
{
append_index();
append_index(index);
}
if (type->basetype == SPIRType::ControlPointArray)
@ -6237,11 +6277,11 @@ string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indice
else if (var->storage == StorageClassOutput)
expr = join("gl_out[", to_expression(index, register_expression_read), "].", expr);
else
append_index();
append_index(index);
break;
default:
append_index();
append_index(index);
break;
}
}
@ -6271,7 +6311,7 @@ string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indice
}
else
{
append_index();
append_index(index);
}
type_id = type->parent_type;
@ -7412,13 +7452,16 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
// Similar workarounds are required for input arrays in tessellation.
unroll_array_from_complex_load(id, ptr, expr);
auto &type = get<SPIRType>(result_type);
if (has_decoration(id, DecorationNonUniformEXT))
convert_non_uniform_expression(type, expr);
if (ptr_expression)
ptr_expression->need_transpose = old_need_transpose;
// By default, suppress usage tracking since using same expression multiple times does not imply any extra work.
// However, if we try to load a complex, composite object from a flattened buffer,
// we should avoid emitting the same code over and over and lower the result to a temporary.
auto &type = get<SPIRType>(result_type);
bool usage_tracking = ptr_expression && flattened_buffer_blocks.count(ptr_expression->loaded_from) != 0 &&
(type.basetype == SPIRType::Struct || (type.columns > 1));
@ -11787,6 +11830,35 @@ void CompilerGLSL::bitcast_to_builtin_store(uint32_t target_id, std::string &exp
}
}
void CompilerGLSL::convert_non_uniform_expression(const SPIRType &type, std::string &expr)
{
// Handle SPV_EXT_descriptor_indexing.
if (type.basetype == SPIRType::Sampler || type.basetype == SPIRType::SampledImage ||
type.basetype == SPIRType::Image)
{
// The image/sampler ID must be declared as non-uniform.
// However, it is not legal GLSL to have
// nonuniformEXT(samplers[index]), so we must move the nonuniform qualifier
// to the array indexing, like
// samplers[nonuniformEXT(index)].
// While the access chain will generally be nonuniformEXT, it's not necessarily so,
// so we might have to fixup the OpLoad-ed expression late.
auto start_array_index = expr.find_first_of('[');
auto end_array_index = expr.find_last_of(']');
// Doesn't really make sense to declare a non-arrayed image with nonuniformEXT, but there's
// nothing we can do here to express that.
if (start_array_index == string::npos || end_array_index == string::npos || end_array_index < start_array_index)
return;
start_array_index++;
expr = join(expr.substr(0, start_array_index), backend.nonuniform_qualifier, "(",
expr.substr(start_array_index, end_array_index - start_array_index), ")",
expr.substr(end_array_index, string::npos));
}
}
void CompilerGLSL::emit_block_hints(const SPIRBlock &)
{
}

View File

@ -372,6 +372,7 @@ protected:
const char *basic_uint16_type = "uint16_t";
const char *int16_t_literal_suffix = "s";
const char *uint16_t_literal_suffix = "us";
const char *nonuniform_qualifier = "nonuniformEXT";
bool swizzle_is_function = false;
bool shared_is_implied = false;
bool flexible_member_array_supported = true;
@ -641,6 +642,7 @@ protected:
virtual void bitcast_to_builtin_store(uint32_t target_id, std::string &expr, const SPIRType &expr_type);
virtual void bitcast_from_builtin_load(uint32_t source_id, std::string &expr, const SPIRType &expr_type);
void unroll_array_from_complex_load(uint32_t target_id, uint32_t source_id, std::string &expr);
void convert_non_uniform_expression(const SPIRType &type, std::string &expr);
void handle_store_to_invariant_variable(uint32_t store_id, uint32_t value_id);
void disallow_forwarding_in_expression_chain(const SPIRExpression &expr);