Updated glslang.
This commit is contained in:
parent
86bd3ad9ad
commit
2cdcab3215
1
3rdparty/glslang/SPIRV/CMakeLists.txt
vendored
1
3rdparty/glslang/SPIRV/CMakeLists.txt
vendored
@ -98,6 +98,5 @@ if(ENABLE_GLSLANG_INSTALL)
|
||||
|
||||
install(EXPORT SPIRVTargets DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake)
|
||||
|
||||
install(FILES ${HEADERS} ${SPVREMAP_HEADERS} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/SPIRV/)
|
||||
install(FILES ${HEADERS} ${SPVREMAP_HEADERS} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/glslang/SPIRV/)
|
||||
endif(ENABLE_GLSLANG_INSTALL)
|
||||
|
58
3rdparty/glslang/SPIRV/GlslangToSpv.cpp
vendored
58
3rdparty/glslang/SPIRV/GlslangToSpv.cpp
vendored
@ -1292,7 +1292,8 @@ bool IsDescriptorResource(const glslang::TType& type)
|
||||
// basically samplerXXX/subpass/sampler/texture are all included
|
||||
// if they are the global-scope-class, not the function parameter
|
||||
// (or local, if they ever exist) class.
|
||||
if (type.getBasicType() == glslang::EbtSampler)
|
||||
if (type.getBasicType() == glslang::EbtSampler ||
|
||||
type.getBasicType() == glslang::EbtAccStruct)
|
||||
return type.getQualifier().isUniformOrBuffer();
|
||||
|
||||
// None of the above.
|
||||
@ -1710,16 +1711,19 @@ void TGlslangToSpvTraverser::visitSymbol(glslang::TIntermSymbol* symbol)
|
||||
spv::Id id = getSymbolId(symbol);
|
||||
|
||||
if (builder.isPointer(id)) {
|
||||
// Include all "static use" and "linkage only" interface variables on the OpEntryPoint instruction
|
||||
// Consider adding to the OpEntryPoint interface list.
|
||||
// Only looking at structures if they have at least one member.
|
||||
if (!symbol->getType().isStruct() || symbol->getType().getStruct()->size() > 0) {
|
||||
spv::StorageClass sc = builder.getStorageClass(id);
|
||||
// Before SPIR-V 1.4, we only want to include Input and Output.
|
||||
// Starting with SPIR-V 1.4, we want all globals.
|
||||
if ((glslangIntermediate->getSpv().spv >= glslang::EShTargetSpv_1_4 && sc != spv::StorageClassFunction) ||
|
||||
(sc == spv::StorageClassInput || sc == spv::StorageClassOutput)) {
|
||||
iOSet.insert(id);
|
||||
if (!symbol->getType().getQualifier().isParamInput() &&
|
||||
!symbol->getType().getQualifier().isParamOutput()) {
|
||||
// Include all "static use" and "linkage only" interface variables on the OpEntryPoint instruction
|
||||
// Consider adding to the OpEntryPoint interface list.
|
||||
// Only looking at structures if they have at least one member.
|
||||
if (!symbol->getType().isStruct() || symbol->getType().getStruct()->size() > 0) {
|
||||
spv::StorageClass sc = builder.getStorageClass(id);
|
||||
// Before SPIR-V 1.4, we only want to include Input and Output.
|
||||
// Starting with SPIR-V 1.4, we want all globals.
|
||||
if ((glslangIntermediate->getSpv().spv >= glslang::EShTargetSpv_1_4 && sc != spv::StorageClassFunction) ||
|
||||
(sc == spv::StorageClassInput || sc == spv::StorageClassOutput)) {
|
||||
iOSet.insert(id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -3524,7 +3528,18 @@ spv::Id TGlslangToSpvTraverser::createSpvVariable(const glslang::TIntermSymbol*
|
||||
if (glslang::IsAnonymous(name))
|
||||
name = "";
|
||||
|
||||
return builder.createVariable(storageClass, spvType, name);
|
||||
spv::Id initializer = spv::NoResult;
|
||||
|
||||
if (node->getType().getQualifier().storage == glslang::EvqUniform &&
|
||||
!node->getConstArray().empty()) {
|
||||
int nextConst = 0;
|
||||
initializer = createSpvConstantFromConstUnionArray(node->getType(),
|
||||
node->getConstArray(),
|
||||
nextConst,
|
||||
false /* specConst */);
|
||||
}
|
||||
|
||||
return builder.createVariable(storageClass, spvType, name, initializer);
|
||||
}
|
||||
|
||||
// Return type Id of the sampled type.
|
||||
@ -4388,8 +4403,10 @@ bool TGlslangToSpvTraverser::writableParam(glslang::TStorageQualifier qualifier)
|
||||
assert(qualifier == glslang::EvqIn ||
|
||||
qualifier == glslang::EvqOut ||
|
||||
qualifier == glslang::EvqInOut ||
|
||||
qualifier == glslang::EvqUniform ||
|
||||
qualifier == glslang::EvqConstReadOnly);
|
||||
return qualifier != glslang::EvqConstReadOnly;
|
||||
return qualifier != glslang::EvqConstReadOnly &&
|
||||
qualifier != glslang::EvqUniform;
|
||||
}
|
||||
|
||||
// Is parameter pass-by-original?
|
||||
@ -8270,6 +8287,18 @@ spv::Id TGlslangToSpvTraverser::createSpvConstant(const glslang::TIntermTyped& n
|
||||
|
||||
// We now know we have a specialization constant to build
|
||||
|
||||
// Extra capabilities may be needed.
|
||||
if (node.getType().contains8BitInt())
|
||||
builder.addCapability(spv::CapabilityInt8);
|
||||
if (node.getType().contains16BitFloat())
|
||||
builder.addCapability(spv::CapabilityFloat16);
|
||||
if (node.getType().contains16BitInt())
|
||||
builder.addCapability(spv::CapabilityInt16);
|
||||
if (node.getType().contains64BitInt())
|
||||
builder.addCapability(spv::CapabilityInt64);
|
||||
if (node.getType().containsDouble())
|
||||
builder.addCapability(spv::CapabilityFloat64);
|
||||
|
||||
// gl_WorkGroupSize is a special case until the front-end handles hierarchical specialization constants,
|
||||
// even then, it's specialization ids are handled by special case syntax in GLSL: layout(local_size_x = ...
|
||||
if (node.getType().getQualifier().builtIn == glslang::EbvWorkGroupSize) {
|
||||
@ -8625,7 +8654,8 @@ int GetSpirvGeneratorVersion()
|
||||
// return 6; // revert version 5 change, which makes a different (new) kind of incorrect code,
|
||||
// versions 4 and 6 each generate OpArrayLength as it has long been done
|
||||
// return 7; // GLSL volatile keyword maps to both SPIR-V decorations Volatile and Coherent
|
||||
return 8; // switch to new dead block eliminator; use OpUnreachable
|
||||
// return 8; // switch to new dead block eliminator; use OpUnreachable
|
||||
return 9; // don't include opaque function parameters in OpEntryPoint global's operand list
|
||||
}
|
||||
|
||||
// Write SPIR-V out to a binary file
|
||||
|
9
3rdparty/glslang/StandAlone/StandAlone.cpp
vendored
9
3rdparty/glslang/StandAlone/StandAlone.cpp
vendored
@ -555,6 +555,12 @@ void ProcessArguments(std::vector<std::unique_ptr<glslang::TWorkItem>>& workItem
|
||||
ReflectOptions |= EShReflectionAllBlockVariables;
|
||||
} else if (lowerword == "reflect-unwrap-io-blocks") {
|
||||
ReflectOptions |= EShReflectionUnwrapIOBlocks;
|
||||
} else if (lowerword == "reflect-all-io-variables") {
|
||||
ReflectOptions |= EShReflectionAllIOVariables;
|
||||
} else if (lowerword == "reflect-shared-std140-ubo") {
|
||||
ReflectOptions |= EShReflectionSharedStd140UBO;
|
||||
} else if (lowerword == "reflect-shared-std140-ssbo") {
|
||||
ReflectOptions |= EShReflectionSharedStd140SSBO;
|
||||
} else if (lowerword == "resource-set-bindings" || // synonyms
|
||||
lowerword == "resource-set-binding" ||
|
||||
lowerword == "rsb") {
|
||||
@ -1593,7 +1599,8 @@ void usage()
|
||||
" --hlsl-iomap perform IO mapping in HLSL register space\n"
|
||||
" --hlsl-enable-16bit-types allow 16-bit types in SPIR-V for HLSL\n"
|
||||
" --hlsl-dx9-compatible interprets sampler declarations as a\n"
|
||||
" texture/sampler combo like DirectX9 would.\n"
|
||||
" texture/sampler combo like DirectX9 would,\n"
|
||||
" and recognizes DirectX9-specific semantics\n"
|
||||
" --invert-y | --iy invert position.Y output in vertex shader\n"
|
||||
" --keep-uncalled | --ku don't eliminate uncalled functions\n"
|
||||
" --nan-clamp favor non-NaN operand in min, max, and clamp\n"
|
||||
|
@ -164,7 +164,9 @@ typedef enum {
|
||||
GLSLANG_REFLECTION_SEPARATE_BUFFERS_BIT = (1 << 3),
|
||||
GLSLANG_REFLECTION_ALL_BLOCK_VARIABLES_BIT = (1 << 4),
|
||||
GLSLANG_REFLECTION_UNWRAP_IO_BLOCKS_BIT = (1 << 5),
|
||||
GLSLANG_REFLECTION_SHARED_STD140_BLOCKS_BIT = (1 << 6),
|
||||
GLSLANG_REFLECTION_ALL_IO_VARIABLES_BIT = (1 << 6),
|
||||
GLSLANG_REFLECTION_SHARED_STD140_SSBO_BIT = (1 << 7),
|
||||
GLSLANG_REFLECTION_SHARED_STD140_UBO_BIT = (1 << 8),
|
||||
LAST_ELEMENT_MARKER(GLSLANG_REFLECTION_COUNT),
|
||||
} glslang_reflection_options_t;
|
||||
|
||||
|
2
3rdparty/glslang/glslang/Include/revision.h
vendored
2
3rdparty/glslang/glslang/Include/revision.h
vendored
@ -1,3 +1,3 @@
|
||||
// This header is generated by the make-revision script.
|
||||
|
||||
#define GLSLANG_PATCH_LEVEL 3766
|
||||
#define GLSLANG_PATCH_LEVEL 3795
|
||||
|
@ -5776,6 +5776,8 @@ void TParseContext::layoutTypeCheck(const TSourceLoc& loc, const TType& type)
|
||||
int repeated = intermediate.addXfbBufferOffset(type);
|
||||
if (repeated >= 0)
|
||||
error(loc, "overlapping offsets at", "xfb_offset", "offset %d in buffer %d", repeated, qualifier.layoutXfbBuffer);
|
||||
if (type.isUnsizedArray())
|
||||
error(loc, "unsized array", "xfb_offset", "in buffer %d", qualifier.layoutXfbBuffer);
|
||||
|
||||
// "The offset must be a multiple of the size of the first component of the first
|
||||
// qualified variable or block member, or a compile-time error results. Further, if applied to an aggregate
|
||||
@ -7461,8 +7463,8 @@ void TParseContext::declareBlock(const TSourceLoc& loc, TTypeList& typeList, con
|
||||
arraySizesCheck(memberLoc, currentBlockQualifier, memberType.getArraySizes(), nullptr, member == typeList.size() - 1);
|
||||
if (memberQualifier.hasOffset()) {
|
||||
if (spvVersion.spv == 0) {
|
||||
requireProfile(memberLoc, ~EEsProfile, "offset on block member");
|
||||
profileRequires(memberLoc, ~EEsProfile, 440, E_GL_ARB_enhanced_layouts, "offset on block member");
|
||||
profileRequires(memberLoc, ~EEsProfile, 440, E_GL_ARB_enhanced_layouts, "\"offset\" on block member");
|
||||
profileRequires(memberLoc, EEsProfile, 300, E_GL_ARB_enhanced_layouts, "\"offset\" on block member");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -167,7 +167,7 @@ void TParseVersions::initializeExtensionBehavior()
|
||||
|
||||
const extensionData exts[] = { {E_GL_EXT_ray_tracing, EShTargetSpv_1_4} };
|
||||
|
||||
for (int ii = 0; ii < sizeof(exts) / sizeof(exts[0]); ii++) {
|
||||
for (size_t ii = 0; ii < sizeof(exts) / sizeof(exts[0]); ii++) {
|
||||
// Add only extensions which require > spv1.0 to save space in map
|
||||
if (exts[ii].minSpvVersion > EShTargetSpv_1_0) {
|
||||
extensionMinSpv[E_GL_EXT_ray_tracing] = exts[ii].minSpvVersion;
|
||||
|
@ -309,26 +309,43 @@ struct TSymbolValidater
|
||||
TIntermSymbol* base = ent1.symbol;
|
||||
const TType& type = ent1.symbol->getType();
|
||||
const TString& name = entKey.first;
|
||||
TString mangleName1, mangleName2;
|
||||
type.appendMangledName(mangleName1);
|
||||
EShLanguage stage = ent1.stage;
|
||||
TString mangleName1, mangleName2;
|
||||
if (currentStage != stage) {
|
||||
preStage = currentStage;
|
||||
currentStage = stage;
|
||||
nextStage = EShLangCount;
|
||||
for (int i = currentStage + 1; i < EShLangCount; i++) {
|
||||
if (inVarMaps[i] != nullptr)
|
||||
if (inVarMaps[i] != nullptr) {
|
||||
nextStage = static_cast<EShLanguage>(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (type.getQualifier().isArrayedIo(stage)) {
|
||||
TType subType(type, 0);
|
||||
subType.appendMangledName(mangleName1);
|
||||
} else {
|
||||
type.appendMangledName(mangleName1);
|
||||
}
|
||||
|
||||
if (base->getQualifier().storage == EvqVaryingIn) {
|
||||
// validate stage in;
|
||||
if (preStage == EShLangCount)
|
||||
return;
|
||||
if (name == "gl_PerVertex")
|
||||
return;
|
||||
if (outVarMaps[preStage] != nullptr) {
|
||||
auto ent2 = outVarMaps[preStage]->find(name);
|
||||
if (ent2 != outVarMaps[preStage]->end()) {
|
||||
ent2->second.symbol->getType().appendMangledName(mangleName2);
|
||||
if (ent2->second.symbol->getType().getQualifier().isArrayedIo(preStage)) {
|
||||
TType subType(ent2->second.symbol->getType(), 0);
|
||||
subType.appendMangledName(mangleName2);
|
||||
}
|
||||
else {
|
||||
ent2->second.symbol->getType().appendMangledName(mangleName2);
|
||||
}
|
||||
if (mangleName1 == mangleName2)
|
||||
return;
|
||||
else {
|
||||
@ -343,10 +360,18 @@ struct TSymbolValidater
|
||||
// validate stage out;
|
||||
if (nextStage == EShLangCount)
|
||||
return;
|
||||
if (name == "gl_PerVertex")
|
||||
return;
|
||||
if (outVarMaps[nextStage] != nullptr) {
|
||||
auto ent2 = inVarMaps[nextStage]->find(name);
|
||||
if (ent2 != inVarMaps[nextStage]->end()) {
|
||||
ent2->second.symbol->getType().appendMangledName(mangleName2);
|
||||
if (ent2->second.symbol->getType().getQualifier().isArrayedIo(nextStage)) {
|
||||
TType subType(ent2->second.symbol->getType(), 0);
|
||||
subType.appendMangledName(mangleName2);
|
||||
}
|
||||
else {
|
||||
ent2->second.symbol->getType().appendMangledName(mangleName2);
|
||||
}
|
||||
if (mangleName1 == mangleName2)
|
||||
return;
|
||||
else {
|
||||
|
@ -1362,9 +1362,9 @@ unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains
|
||||
// that component's size. Aggregate types are flattened down to the component
|
||||
// level to get this sequence of components."
|
||||
|
||||
if (type.isArray()) {
|
||||
if (type.isSizedArray()) {
|
||||
// TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
|
||||
assert(type.isSizedArray());
|
||||
// Unsized array use to xfb should be a compile error.
|
||||
TType elementType(type, 0);
|
||||
return type.getOuterArraySize() * computeTypeXfbSize(elementType, contains64BitType, contains16BitType, contains16BitType);
|
||||
}
|
||||
@ -1550,7 +1550,9 @@ int TIntermediate::getBaseAlignment(const TType& type, int& size, int& stride, T
|
||||
RoundToPow2(size, alignment);
|
||||
stride = size; // uses full matrix size for stride of an array of matrices (not quite what rule 6/8, but what's expected)
|
||||
// uses the assumption for rule 10 in the comment above
|
||||
size = stride * type.getOuterArraySize();
|
||||
// use one element to represent the last member of SSBO which is unsized array
|
||||
int arraySize = (type.isUnsizedArray() && (type.getOuterArraySize() == 0)) ? 1 : type.getOuterArraySize();
|
||||
size = stride * arraySize;
|
||||
return alignment;
|
||||
}
|
||||
|
||||
|
@ -107,22 +107,13 @@ public:
|
||||
else
|
||||
baseName = "";
|
||||
|
||||
if (base.getType().isArray()) {
|
||||
TType derefType(base.getType(), 0);
|
||||
|
||||
assert(!anonymous);
|
||||
for (int e = 0; e < base.getType().getCumulativeArraySize(); ++e)
|
||||
blockIndex = addBlockName(blockName + "[" + String(e) + "]", derefType,
|
||||
intermediate.getBlockSize(base.getType()));
|
||||
}
|
||||
else
|
||||
blockIndex = addBlockName(blockName, base.getType(), intermediate.getBlockSize(base.getType()));
|
||||
blockIndex = addBlockName(blockName, base.getType(), intermediate.getBlockSize(base.getType()));
|
||||
}
|
||||
|
||||
// Use a degenerate (empty) set of dereferences to immediately put as at the end of
|
||||
// the dereference change expected by blowUpActiveAggregate.
|
||||
blowUpActiveAggregate(base.getType(), baseName, derefs, derefs.end(), offset, blockIndex, 0, 0,
|
||||
base.getQualifier().storage, updateStageMasks);
|
||||
blowUpActiveAggregate(base.getType(), baseName, derefs, derefs.end(), offset, blockIndex, 0, -1, 0,
|
||||
base.getQualifier().storage, updateStageMasks);
|
||||
}
|
||||
}
|
||||
|
||||
@ -259,7 +250,7 @@ public:
|
||||
// A value of 0 for arraySize will mean to use the full array's size.
|
||||
void blowUpActiveAggregate(const TType& baseType, const TString& baseName, const TList<TIntermBinary*>& derefs,
|
||||
TList<TIntermBinary*>::const_iterator deref, int offset, int blockIndex, int arraySize,
|
||||
int topLevelArrayStride, TStorageQualifier baseStorage, bool active)
|
||||
int topLevelArraySize, int topLevelArrayStride, TStorageQualifier baseStorage, bool active)
|
||||
{
|
||||
// when strictArraySuffix is enabled, we closely follow the rules from ARB_program_interface_query.
|
||||
// Broadly:
|
||||
@ -288,14 +279,15 @@ public:
|
||||
// Visit all the indices of this array, and for each one add on the remaining dereferencing
|
||||
for (int i = 0; i < std::max(visitNode->getLeft()->getType().getOuterArraySize(), 1); ++i) {
|
||||
TString newBaseName = name;
|
||||
if (strictArraySuffix && blockParent)
|
||||
if (terminalType->getBasicType() == EbtBlock) {}
|
||||
else if (strictArraySuffix && blockParent)
|
||||
newBaseName.append(TString("[0]"));
|
||||
else if (strictArraySuffix || baseType.getBasicType() != EbtBlock)
|
||||
newBaseName.append(TString("[") + String(i) + "]");
|
||||
TList<TIntermBinary*>::const_iterator nextDeref = deref;
|
||||
++nextDeref;
|
||||
blowUpActiveAggregate(*terminalType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize,
|
||||
topLevelArrayStride, baseStorage, active);
|
||||
topLevelArraySize, topLevelArrayStride, baseStorage, active);
|
||||
|
||||
if (offset >= 0)
|
||||
offset += stride;
|
||||
@ -308,9 +300,10 @@ public:
|
||||
int stride = getArrayStride(baseType, visitNode->getLeft()->getType());
|
||||
|
||||
index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
|
||||
if (strictArraySuffix && blockParent) {
|
||||
if (terminalType->getBasicType() == EbtBlock) {}
|
||||
else if (strictArraySuffix && blockParent)
|
||||
name.append(TString("[0]"));
|
||||
} else if (strictArraySuffix || baseType.getBasicType() != EbtBlock) {
|
||||
else if (strictArraySuffix || baseType.getBasicType() != EbtBlock) {
|
||||
name.append(TString("[") + String(index) + "]");
|
||||
|
||||
if (offset >= 0)
|
||||
@ -320,7 +313,10 @@ public:
|
||||
if (topLevelArrayStride == 0)
|
||||
topLevelArrayStride = stride;
|
||||
|
||||
blockParent = false;
|
||||
// expand top-level arrays in blocks with [0] suffix
|
||||
if (topLevelArrayStride != 0 && visitNode->getLeft()->getType().isArray()) {
|
||||
blockParent = false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case EOpIndexDirectStruct:
|
||||
@ -330,6 +326,12 @@ public:
|
||||
if (name.size() > 0)
|
||||
name.append(".");
|
||||
name.append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName());
|
||||
|
||||
// expand non top-level arrays with [x] suffix
|
||||
if (visitNode->getLeft()->getType().getBasicType() != EbtBlock && terminalType->isArray())
|
||||
{
|
||||
blockParent = false;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -349,14 +351,16 @@ public:
|
||||
if (offset >= 0)
|
||||
stride = getArrayStride(baseType, *terminalType);
|
||||
|
||||
if (topLevelArrayStride == 0)
|
||||
topLevelArrayStride = stride;
|
||||
|
||||
int arrayIterateSize = std::max(terminalType->getOuterArraySize(), 1);
|
||||
|
||||
// for top-level arrays in blocks, only expand [0] to avoid explosion of items
|
||||
if (strictArraySuffix && blockParent)
|
||||
if ((strictArraySuffix && blockParent) ||
|
||||
((topLevelArraySize == arrayIterateSize) && (topLevelArrayStride == 0))) {
|
||||
arrayIterateSize = 1;
|
||||
}
|
||||
|
||||
if (topLevelArrayStride == 0)
|
||||
topLevelArrayStride = stride;
|
||||
|
||||
for (int i = 0; i < arrayIterateSize; ++i) {
|
||||
TString newBaseName = name;
|
||||
@ -367,7 +371,7 @@ public:
|
||||
offset = baseOffset + stride * i;
|
||||
|
||||
blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
|
||||
topLevelArrayStride, baseStorage, active);
|
||||
topLevelArraySize, topLevelArrayStride, baseStorage, active);
|
||||
}
|
||||
} else {
|
||||
// Visit all members of this aggregate, and for each one,
|
||||
@ -396,8 +400,31 @@ public:
|
||||
arrayStride = getArrayStride(baseType, derefType);
|
||||
}
|
||||
|
||||
blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
|
||||
arrayStride, baseStorage, active);
|
||||
if (topLevelArraySize == -1 && arrayStride == 0 && blockParent)
|
||||
topLevelArraySize = 1;
|
||||
|
||||
if (strictArraySuffix && blockParent) {
|
||||
// if this member is an array, store the top-level array stride but start the explosion from
|
||||
// the inner struct type.
|
||||
if (derefType.isArray() && derefType.isStruct()) {
|
||||
newBaseName.append("[0]");
|
||||
auto dimSize = derefType.isUnsizedArray() ? 0 : derefType.getArraySizes()->getDimSize(0);
|
||||
blowUpActiveAggregate(TType(derefType, 0), newBaseName, derefs, derefs.end(), memberOffsets[i],
|
||||
blockIndex, 0, dimSize, arrayStride, terminalType->getQualifier().storage, false);
|
||||
}
|
||||
else if (derefType.isArray()) {
|
||||
auto dimSize = derefType.isUnsizedArray() ? 0 : derefType.getArraySizes()->getDimSize(0);
|
||||
blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), memberOffsets[i], blockIndex,
|
||||
0, dimSize, 0, terminalType->getQualifier().storage, false);
|
||||
}
|
||||
else {
|
||||
blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), memberOffsets[i], blockIndex,
|
||||
0, 1, 0, terminalType->getQualifier().storage, false);
|
||||
}
|
||||
} else {
|
||||
blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
|
||||
topLevelArraySize, arrayStride, baseStorage, active);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -433,6 +460,7 @@ public:
|
||||
if ((reflection.options & EShReflectionSeparateBuffers) && terminalType->isAtomic())
|
||||
reflection.atomicCounterUniformIndices.push_back(uniformIndex);
|
||||
|
||||
variables.back().topLevelArraySize = topLevelArraySize;
|
||||
variables.back().topLevelArrayStride = topLevelArrayStride;
|
||||
|
||||
if ((reflection.options & EShReflectionAllBlockVariables) && active) {
|
||||
@ -564,65 +592,17 @@ public:
|
||||
if (! anonymous)
|
||||
baseName = blockName;
|
||||
|
||||
if (base->getType().isArray()) {
|
||||
TType derefType(base->getType(), 0);
|
||||
|
||||
assert(! anonymous);
|
||||
for (int e = 0; e < base->getType().getCumulativeArraySize(); ++e)
|
||||
blockIndex = addBlockName(blockName + "[" + String(e) + "]", derefType,
|
||||
intermediate.getBlockSize(base->getType()));
|
||||
baseName.append(TString("[0]"));
|
||||
} else
|
||||
blockIndex = addBlockName(blockName, base->getType(), intermediate.getBlockSize(base->getType()));
|
||||
blockIndex = addBlockName(blockName, base->getType(), intermediate.getBlockSize(base->getType()));
|
||||
|
||||
if (reflection.options & EShReflectionAllBlockVariables) {
|
||||
// Use a degenerate (empty) set of dereferences to immediately put as at the end of
|
||||
// the dereference change expected by blowUpActiveAggregate.
|
||||
TList<TIntermBinary*> derefs;
|
||||
|
||||
// because we don't have any derefs, the first thing blowUpActiveAggregate will do is iterate over each
|
||||
// member in the struct definition. This will lose any information about whether the parent was a buffer
|
||||
// block. So if we're using strict array rules which don't expand the first child of a buffer block we
|
||||
// instead iterate over the children here.
|
||||
const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
|
||||
bool blockParent = (base->getType().getBasicType() == EbtBlock && base->getQualifier().storage == EvqBuffer);
|
||||
|
||||
if (strictArraySuffix && blockParent) {
|
||||
TType structDerefType(base->getType(), 0);
|
||||
|
||||
const TType &structType = base->getType().isArray() ? structDerefType : base->getType();
|
||||
const TTypeList& typeList = *structType.getStruct();
|
||||
|
||||
TVector<int> memberOffsets;
|
||||
|
||||
memberOffsets.resize(typeList.size());
|
||||
getOffsets(structType, memberOffsets);
|
||||
|
||||
for (int i = 0; i < (int)typeList.size(); ++i) {
|
||||
TType derefType(structType, i);
|
||||
TString name = baseName;
|
||||
if (name.size() > 0)
|
||||
name.append(".");
|
||||
name.append(typeList[i].type->getFieldName());
|
||||
|
||||
// if this member is an array, store the top-level array stride but start the explosion from
|
||||
// the inner struct type.
|
||||
if (derefType.isArray() && derefType.isStruct()) {
|
||||
name.append("[0]");
|
||||
blowUpActiveAggregate(TType(derefType, 0), name, derefs, derefs.end(), memberOffsets[i],
|
||||
blockIndex, 0, getArrayStride(structType, derefType),
|
||||
base->getQualifier().storage, false);
|
||||
} else {
|
||||
blowUpActiveAggregate(derefType, name, derefs, derefs.end(), memberOffsets[i], blockIndex,
|
||||
0, 0, base->getQualifier().storage, false);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// otherwise - if we're not using strict array suffix rules, or this isn't a block so we are
|
||||
// expanding root arrays anyway, just start the iteration from the base block type.
|
||||
blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.end(), 0, blockIndex, 0, 0,
|
||||
// otherwise - if we're not using strict array suffix rules, or this isn't a block so we are
|
||||
// expanding root arrays anyway, just start the iteration from the base block type.
|
||||
blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.end(), 0, blockIndex, 0, -1, 0,
|
||||
base->getQualifier().storage, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -653,31 +633,37 @@ public:
|
||||
else
|
||||
baseName = base->getName();
|
||||
}
|
||||
blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize, 0,
|
||||
blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize, -1, 0,
|
||||
base->getQualifier().storage, true);
|
||||
}
|
||||
|
||||
int addBlockName(const TString& name, const TType& type, int size)
|
||||
{
|
||||
TReflection::TMapIndexToReflection& blocks = reflection.GetBlockMapForStorage(type.getQualifier().storage);
|
||||
|
||||
int blockIndex;
|
||||
TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
|
||||
if (reflection.nameToIndex.find(name.c_str()) == reflection.nameToIndex.end()) {
|
||||
blockIndex = (int)blocks.size();
|
||||
reflection.nameToIndex[name.c_str()] = blockIndex;
|
||||
blocks.push_back(TObjectReflection(name.c_str(), type, -1, -1, size, -1));
|
||||
if (type.isArray()) {
|
||||
TType derefType(type, 0);
|
||||
for (int e = 0; e < type.getOuterArraySize(); ++e) {
|
||||
int memberBlockIndex = addBlockName(name + "[" + String(e) + "]", derefType, size);
|
||||
if (e == 0)
|
||||
blockIndex = memberBlockIndex;
|
||||
}
|
||||
} else {
|
||||
TReflection::TMapIndexToReflection& blocks = reflection.GetBlockMapForStorage(type.getQualifier().storage);
|
||||
|
||||
blocks.back().numMembers = countAggregateMembers(type);
|
||||
TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
|
||||
if (reflection.nameToIndex.find(name.c_str()) == reflection.nameToIndex.end()) {
|
||||
blockIndex = (int)blocks.size();
|
||||
reflection.nameToIndex[name.c_str()] = blockIndex;
|
||||
blocks.push_back(TObjectReflection(name.c_str(), type, -1, -1, size, blockIndex));
|
||||
|
||||
blocks.back().numMembers = countAggregateMembers(type);
|
||||
|
||||
if (updateStageMasks) {
|
||||
EShLanguageMask& stages = blocks.back().stages;
|
||||
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
|
||||
}
|
||||
} else {
|
||||
blockIndex = it->second;
|
||||
else {
|
||||
blockIndex = it->second;
|
||||
|
||||
if (updateStageMasks) {
|
||||
EShLanguageMask& stages = blocks[blockIndex].stages;
|
||||
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
|
||||
}
|
||||
@ -1064,7 +1050,7 @@ void TReflectionTraverser::visitSymbol(TIntermSymbol* base)
|
||||
{
|
||||
if (base->getQualifier().storage == EvqUniform) {
|
||||
if (base->getBasicType() == EbtBlock) {
|
||||
if (reflection.options & EShReflectionSharedStd140Blocks) {
|
||||
if (reflection.options & EShReflectionSharedStd140UBO) {
|
||||
addUniform(*base);
|
||||
}
|
||||
} else {
|
||||
@ -1072,6 +1058,13 @@ void TReflectionTraverser::visitSymbol(TIntermSymbol* base)
|
||||
}
|
||||
}
|
||||
|
||||
// #TODO add std140/layout active rules for ssbo, same with ubo.
|
||||
// Storage buffer blocks will be collected and expanding in this part.
|
||||
if((reflection.options & EShReflectionSharedStd140SSBO) &&
|
||||
(base->getQualifier().storage == EvqBuffer && base->getBasicType() == EbtBlock &&
|
||||
(base->getQualifier().layoutPacking == ElpStd140 || base->getQualifier().layoutPacking == ElpShared)))
|
||||
addUniform(*base);
|
||||
|
||||
if ((intermediate.getStage() == reflection.firstStage && base->getQualifier().isPipeInput()) ||
|
||||
(intermediate.getStage() == reflection.lastStage && base->getQualifier().isPipeOutput()))
|
||||
addPipeIOVariable(*base);
|
||||
@ -1182,15 +1175,23 @@ bool TReflection::addStage(EShLanguage stage, const TIntermediate& intermediate)
|
||||
TIntermAggregate* linkerObjects = sequnence->getAsAggregate();
|
||||
for (auto& sequnence : linkerObjects->getSequence()) {
|
||||
auto pNode = sequnence->getAsSymbolNode();
|
||||
if (pNode != nullptr && pNode->getQualifier().storage == EvqUniform &&
|
||||
(options & EShReflectionSharedStd140Blocks)) {
|
||||
if (pNode->getBasicType() == EbtBlock) {
|
||||
if (pNode != nullptr) {
|
||||
if ((pNode->getQualifier().storage == EvqUniform &&
|
||||
(options & EShReflectionSharedStd140UBO)) ||
|
||||
(pNode->getQualifier().storage == EvqBuffer &&
|
||||
(options & EShReflectionSharedStd140SSBO))) {
|
||||
// collect std140 and shared uniform block form AST
|
||||
if (pNode->getQualifier().layoutPacking == ElpStd140 ||
|
||||
pNode->getQualifier().layoutPacking == ElpShared) {
|
||||
pNode->traverse(&it);
|
||||
if ((pNode->getBasicType() == EbtBlock) &&
|
||||
((pNode->getQualifier().layoutPacking == ElpStd140) ||
|
||||
(pNode->getQualifier().layoutPacking == ElpShared))) {
|
||||
pNode->traverse(&it);
|
||||
}
|
||||
}
|
||||
else if ((options & EShReflectionAllIOVariables) &&
|
||||
(pNode->getQualifier().isPipeInput() || pNode->getQualifier().isPipeOutput()))
|
||||
{
|
||||
pNode->traverse(&it);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
7
3rdparty/glslang/glslang/Public/ShaderLang.h
vendored
7
3rdparty/glslang/glslang/Public/ShaderLang.h
vendored
@ -255,7 +255,7 @@ enum EShMessages : unsigned {
|
||||
EShMsgDebugInfo = (1 << 10), // save debug information
|
||||
EShMsgHlslEnable16BitTypes = (1 << 11), // enable use of 16-bit types in SPIR-V for HLSL
|
||||
EShMsgHlslLegalization = (1 << 12), // enable HLSL Legalization messages
|
||||
EShMsgHlslDX9Compatible = (1 << 13), // enable HLSL DX9 compatible mode (right now only for samplers)
|
||||
EShMsgHlslDX9Compatible = (1 << 13), // enable HLSL DX9 compatible mode (for samplers and semantics)
|
||||
EShMsgBuiltinSymbolTable = (1 << 14), // print the builtin symbol table
|
||||
LAST_ELEMENT_MARKER(EShMsgCount),
|
||||
};
|
||||
@ -271,7 +271,9 @@ typedef enum {
|
||||
EShReflectionSeparateBuffers = (1 << 3), // buffer variables and buffer blocks are reflected separately
|
||||
EShReflectionAllBlockVariables = (1 << 4), // reflect all variables in blocks, even if they are inactive
|
||||
EShReflectionUnwrapIOBlocks = (1 << 5), // unwrap input/output blocks the same as with uniform blocks
|
||||
EShReflectionSharedStd140Blocks = (1 << 6), // Apply std140/shared rules for ubo to ssbo
|
||||
EShReflectionAllIOVariables = (1 << 6), // reflect all input/output variables, even if they are inactive
|
||||
EShReflectionSharedStd140SSBO = (1 << 7), // Apply std140/shared rules for ubo to ssbo
|
||||
EShReflectionSharedStd140UBO = (1 << 8), // Apply std140/shared rules for ubo to ssbo
|
||||
LAST_ELEMENT_MARKER(EShReflectionCount),
|
||||
} EShReflectionOptions;
|
||||
|
||||
@ -696,6 +698,7 @@ public:
|
||||
int counterIndex;
|
||||
int numMembers;
|
||||
int arrayStride; // stride of an array variable
|
||||
int topLevelArraySize; // size of the top-level variable in a storage buffer member
|
||||
int topLevelArrayStride; // stride of the top-level variable in a storage buffer member
|
||||
EShLanguageMask stages;
|
||||
|
||||
|
14
3rdparty/glslang/hlsl/hlslGrammar.cpp
vendored
14
3rdparty/glslang/hlsl/hlslGrammar.cpp
vendored
@ -382,6 +382,16 @@ bool HlslGrammar::acceptDeclaration(TIntermNode*& nodeList)
|
||||
if (forbidDeclarators)
|
||||
return true;
|
||||
|
||||
// Check if there are invalid in/out qualifiers
|
||||
switch (declaredType.getQualifier().storage) {
|
||||
case EvqIn:
|
||||
case EvqOut:
|
||||
case EvqInOut:
|
||||
parseContext.error(token.loc, "in/out qualifiers are only valid on parameters", token.string->c_str(), "");
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
// declarator_list
|
||||
// : declarator
|
||||
// : identifier
|
||||
@ -697,7 +707,9 @@ bool HlslGrammar::acceptQualifier(TQualifier& qualifier)
|
||||
qualifier.noContraction = true;
|
||||
break;
|
||||
case EHTokIn:
|
||||
qualifier.storage = (qualifier.storage == EvqOut) ? EvqInOut : EvqIn;
|
||||
if (qualifier.storage != EvqUniform) {
|
||||
qualifier.storage = (qualifier.storage == EvqOut) ? EvqInOut : EvqIn;
|
||||
}
|
||||
break;
|
||||
case EHTokOut:
|
||||
qualifier.storage = (qualifier.storage == EvqIn) ? EvqInOut : EvqOut;
|
||||
|
56
3rdparty/glslang/hlsl/hlslParseHelper.cpp
vendored
56
3rdparty/glslang/hlsl/hlslParseHelper.cpp
vendored
@ -2111,6 +2111,23 @@ TIntermNode* HlslParseContext::transformEntryPoint(const TSourceLoc& loc, TFunct
|
||||
makeVariableInOut(*(*it));
|
||||
}
|
||||
|
||||
// Add uniform parameters to the $Global uniform block.
|
||||
TVector<TVariable*> opaque_uniforms;
|
||||
for (int i = 0; i < userFunction.getParamCount(); i++) {
|
||||
TType& paramType = *userFunction[i].type;
|
||||
TString& paramName = *userFunction[i].name;
|
||||
if (paramType.getQualifier().storage == EvqUniform) {
|
||||
if (!paramType.containsOpaque()) {
|
||||
// Add it to the global uniform block.
|
||||
growGlobalUniformBlock(loc, paramType, paramName);
|
||||
} else {
|
||||
// Declare it as a separate variable.
|
||||
TVariable *var = makeInternalVariable(paramName.c_str(), paramType);
|
||||
opaque_uniforms.push_back(var);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Synthesize the call
|
||||
|
||||
pushScope(); // matches the one in handleFunctionBody()
|
||||
@ -2131,6 +2148,7 @@ TIntermNode* HlslParseContext::transformEntryPoint(const TSourceLoc& loc, TFunct
|
||||
TVector<TVariable*> argVars;
|
||||
TIntermAggregate* synthBody = new TIntermAggregate();
|
||||
auto inputIt = inputs.begin();
|
||||
auto opaqueUniformIt = opaque_uniforms.begin();
|
||||
TIntermTyped* callingArgs = nullptr;
|
||||
|
||||
for (int i = 0; i < userFunction.getParamCount(); i++) {
|
||||
@ -2149,6 +2167,17 @@ TIntermNode* HlslParseContext::transformEntryPoint(const TSourceLoc& loc, TFunct
|
||||
intermediate.addSymbol(**inputIt)));
|
||||
inputIt++;
|
||||
}
|
||||
if (param.type->getQualifier().storage == EvqUniform) {
|
||||
if (!param.type->containsOpaque()) {
|
||||
// Look it up in the $Global uniform block.
|
||||
intermediate.growAggregate(synthBody, handleAssign(loc, EOpAssign, arg,
|
||||
handleVariable(loc, param.name)));
|
||||
} else {
|
||||
intermediate.growAggregate(synthBody, handleAssign(loc, EOpAssign, arg,
|
||||
intermediate.addSymbol(**opaqueUniformIt)));
|
||||
++opaqueUniformIt;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Call
|
||||
@ -6100,6 +6129,32 @@ void HlslParseContext::handleSemantic(TSourceLoc loc, TQualifier& qualifier, TBu
|
||||
return semanticNum;
|
||||
};
|
||||
|
||||
if (builtIn == EbvNone && hlslDX9Compatible()) {
|
||||
if (language == EShLangVertex) {
|
||||
if (qualifier.isParamOutput()) {
|
||||
if (upperCase == "POSITION") {
|
||||
builtIn = EbvPosition;
|
||||
}
|
||||
if (upperCase == "PSIZE") {
|
||||
builtIn = EbvPointSize;
|
||||
}
|
||||
}
|
||||
} else if (language == EShLangFragment) {
|
||||
if (qualifier.isParamInput() && upperCase == "VPOS") {
|
||||
builtIn = EbvFragCoord;
|
||||
}
|
||||
if (qualifier.isParamOutput()) {
|
||||
if (upperCase.compare(0, 5, "COLOR") == 0) {
|
||||
qualifier.layoutLocation = getSemanticNumber(upperCase, 0, nullptr);
|
||||
nextOutLocation = std::max(nextOutLocation, qualifier.layoutLocation + 1u);
|
||||
}
|
||||
if (upperCase == "DEPTH") {
|
||||
builtIn = EbvFragDepth;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch(builtIn) {
|
||||
case EbvNone:
|
||||
// Get location numbers from fragment outputs, instead of
|
||||
@ -6914,7 +6969,6 @@ void HlslParseContext::paramFix(TType& type)
|
||||
type.getQualifier().storage = EvqConstReadOnly;
|
||||
break;
|
||||
case EvqGlobal:
|
||||
case EvqUniform:
|
||||
case EvqTemporary:
|
||||
type.getQualifier().storage = EvqIn;
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user