Updated glslang.

This commit is contained in:
Бранимир Караџић
2020-05-15 23:21:00 -07:00
parent 7676a2f2bd
commit 8848e0dad1
9 changed files with 241 additions and 33 deletions

View File

@@ -164,6 +164,7 @@ typedef enum {
GLSLANG_REFLECTION_SEPARATE_BUFFERS_BIT = (1 << 3),
GLSLANG_REFLECTION_ALL_BLOCK_VARIABLES_BIT = (1 << 4),
GLSLANG_REFLECTION_UNWRAP_IO_BLOCKS_BIT = (1 << 5),
GLSLANG_REFLECTION_SHARED_STD140_BLOCKS_BIT = (1 << 6),
LAST_ELEMENT_MARKER(GLSLANG_REFLECTION_COUNT),
} glslang_reflection_options_t;

View File

@@ -1,3 +1,3 @@
// This header is generated by the make-revision script.
#define GLSLANG_PATCH_LEVEL 3743
#define GLSLANG_PATCH_LEVEL 3763

View File

@@ -146,6 +146,7 @@ EProfile EDesktopProfile = static_cast<EProfile>(ENoProfile | ECoreProfile | ECo
// Declare pointers to put into the table for versioning.
#ifdef GLSLANG_WEB
const Versioning* Es300Desktop130 = nullptr;
const Versioning* Es310Desktop420 = nullptr;
#else
const Versioning Es300Desktop130Version[] = { { EEsProfile, 0, 300, 0, nullptr },
{ EDesktopProfile, 0, 130, 0, nullptr },

View File

@@ -7593,6 +7593,8 @@ void TParseContext::declareBlock(const TSourceLoc& loc, TTypeList& typeList, con
fixBlockLocations(loc, currentBlockQualifier, typeList, memberWithLocation, memberWithoutLocation);
fixXfbOffsets(currentBlockQualifier, typeList);
fixBlockUniformOffsets(currentBlockQualifier, typeList);
fixBlockUniformLayoutMatrix(currentBlockQualifier, &typeList, nullptr);
fixBlockUniformLayoutPacking(currentBlockQualifier, &typeList, nullptr);
for (unsigned int member = 0; member < typeList.size(); ++member)
layoutTypeCheck(typeList[member].loc, *typeList[member].type);
@@ -7963,6 +7965,101 @@ void TParseContext::fixBlockUniformOffsets(TQualifier& qualifier, TTypeList& typ
}
}
//
// Spread LayoutMatrix to uniform block member, if a uniform block member is a struct,
// we need spread LayoutMatrix to this struct member too. and keep this rule for recursive.
//
void TParseContext::fixBlockUniformLayoutMatrix(TQualifier& qualifier, TTypeList* originTypeList,
TTypeList* tmpTypeList)
{
assert(tmpTypeList == nullptr || originTypeList->size() == tmpTypeList->size());
for (unsigned int member = 0; member < originTypeList->size(); ++member) {
if (qualifier.layoutPacking != ElpNone) {
if (tmpTypeList == nullptr) {
if (((*originTypeList)[member].type->isMatrix() ||
(*originTypeList)[member].type->getBasicType() == EbtStruct) &&
(*originTypeList)[member].type->getQualifier().layoutMatrix == ElmNone) {
(*originTypeList)[member].type->getQualifier().layoutMatrix = qualifier.layoutMatrix;
}
} else {
if (((*tmpTypeList)[member].type->isMatrix() ||
(*tmpTypeList)[member].type->getBasicType() == EbtStruct) &&
(*tmpTypeList)[member].type->getQualifier().layoutMatrix == ElmNone) {
(*tmpTypeList)[member].type->getQualifier().layoutMatrix = qualifier.layoutMatrix;
}
}
}
if ((*originTypeList)[member].type->getBasicType() == EbtStruct) {
TQualifier* memberQualifier = nullptr;
// block member can be declare a matrix style, so it should be update to the member's style
if ((*originTypeList)[member].type->getQualifier().layoutMatrix == ElmNone) {
memberQualifier = &qualifier;
} else {
memberQualifier = &((*originTypeList)[member].type->getQualifier());
}
const TType* tmpType = tmpTypeList == nullptr ?
(*originTypeList)[member].type->clone() : (*tmpTypeList)[member].type;
fixBlockUniformLayoutMatrix(*memberQualifier, (*originTypeList)[member].type->getWritableStruct(),
tmpType->getWritableStruct());
const TTypeList* structure = recordStructCopy(matrixFixRecord, (*originTypeList)[member].type, tmpType);
if (tmpTypeList == nullptr) {
(*originTypeList)[member].type->setStruct(const_cast<TTypeList*>(structure));
}
if (tmpTypeList != nullptr) {
(*tmpTypeList)[member].type->setStruct(const_cast<TTypeList*>(structure));
}
}
}
}
//
// Spread LayoutPacking to block member, if a block member is a struct, we need spread LayoutPacking to
// this struct member too. and keep this rule for recursive.
//
void TParseContext::fixBlockUniformLayoutPacking(TQualifier& qualifier, TTypeList* originTypeList,
TTypeList* tmpTypeList)
{
assert(tmpTypeList == nullptr || originTypeList->size() == tmpTypeList->size());
for (unsigned int member = 0; member < originTypeList->size(); ++member) {
if (qualifier.layoutPacking != ElpNone) {
if (tmpTypeList == nullptr) {
if ((*originTypeList)[member].type->getQualifier().layoutPacking == ElpNone) {
(*originTypeList)[member].type->getQualifier().layoutPacking = qualifier.layoutPacking;
}
} else {
if ((*tmpTypeList)[member].type->getQualifier().layoutPacking == ElpNone) {
(*tmpTypeList)[member].type->getQualifier().layoutPacking = qualifier.layoutPacking;
}
}
}
if ((*originTypeList)[member].type->getBasicType() == EbtStruct) {
// Deep copy the type in pool.
// Because, struct use in different block may have different layout qualifier.
// We have to new a object to distinguish between them.
const TType* tmpType = tmpTypeList == nullptr ?
(*originTypeList)[member].type->clone() : (*tmpTypeList)[member].type;
fixBlockUniformLayoutPacking(qualifier, (*originTypeList)[member].type->getWritableStruct(),
tmpType->getWritableStruct());
const TTypeList* structure = recordStructCopy(packingFixRecord, (*originTypeList)[member].type, tmpType);
if (tmpTypeList == nullptr) {
(*originTypeList)[member].type->setStruct(const_cast<TTypeList*>(structure));
}
if (tmpTypeList != nullptr) {
(*tmpTypeList)[member].type->setStruct(const_cast<TTypeList*>(structure));
}
}
}
}
// For an identifier that is already declared, add more qualification to it.
void TParseContext::addQualifierToExisting(const TSourceLoc& loc, TQualifier qualifier, const TString& identifier)
{
@@ -8421,5 +8518,43 @@ TIntermNode* TParseContext::addSwitch(const TSourceLoc& loc, TIntermTyped* expre
return switchNode;
}
//
// When a struct used in block, and has it's own layout packing, layout matrix,
// record the origin structure of a struct to map, and Record the structure copy to the copy table,
//
const TTypeList* TParseContext::recordStructCopy(TStructRecord& record, const TType* originType, const TType* tmpType)
{
size_t memberCount = tmpType->getStruct()->size();
size_t originHash = 0, tmpHash = 0;
std::hash<size_t> hasher;
for (uint32_t i = 0; i < memberCount; i++) {
size_t originMemberHash = hasher(originType->getStruct()->at(i).type->getQualifier().layoutPacking +
originType->getStruct()->at(i).type->getQualifier().layoutMatrix);
size_t tmpMemberHash = hasher(tmpType->getStruct()->at(i).type->getQualifier().layoutPacking +
tmpType->getStruct()->at(i).type->getQualifier().layoutMatrix);
originHash = hasher((originHash ^ originMemberHash) << 1);
tmpHash = hasher((tmpHash ^ tmpMemberHash) << 1);
}
const TTypeList* originStruct = originType->getStruct();
const TTypeList* tmpStruct = tmpType->getStruct();
if (originHash != tmpHash) {
auto fixRecords = record.find(originStruct);
if (fixRecords != record.end()) {
auto fixRecord = fixRecords->second.find(tmpHash);
if (fixRecord != fixRecords->second.end()) {
return fixRecord->second;
} else {
record[originStruct][tmpHash] = tmpStruct;
return tmpStruct;
}
} else {
record[originStruct] = std::map<size_t, const TTypeList*>();
record[originStruct][tmpHash] = tmpStruct;
return tmpStruct;
}
}
return originStruct;
}
} // end namespace glslang

View File

@@ -68,6 +68,7 @@ class TScanContext;
class TPpContext;
typedef std::set<int> TIdSetType;
typedef std::map<const TTypeList*, std::map<size_t, const TTypeList*>> TStructRecord;
//
// Sharable code (as well as what's in TParseVersions) across
@@ -418,12 +419,15 @@ public:
void fixBlockLocations(const TSourceLoc&, TQualifier&, TTypeList&, bool memberWithLocation, bool memberWithoutLocation);
void fixXfbOffsets(TQualifier&, TTypeList&);
void fixBlockUniformOffsets(TQualifier&, TTypeList&);
void fixBlockUniformLayoutMatrix(TQualifier&, TTypeList*, TTypeList*);
void fixBlockUniformLayoutPacking(TQualifier&, TTypeList*, TTypeList*);
void addQualifierToExisting(const TSourceLoc&, TQualifier, const TString& identifier);
void addQualifierToExisting(const TSourceLoc&, TQualifier, TIdentifierList&);
void invariantCheck(const TSourceLoc&, const TQualifier&);
void updateStandaloneQualifierDefaults(const TSourceLoc&, const TPublicType&);
void wrapupSwitchSubsequence(TIntermAggregate* statements, TIntermNode* branchNode);
TIntermNode* addSwitch(const TSourceLoc&, TIntermTyped* expression, TIntermAggregate* body);
const TTypeList* recordStructCopy(TStructRecord&, const TType*, const TType*);
#ifndef GLSLANG_WEB
TAttributeType attributeFromName(const TString& name) const;
@@ -484,6 +488,8 @@ protected:
bool anyIndexLimits;
TIdSetType inductiveLoopIds;
TVector<TIntermTyped*> needsIndexLimitationChecking;
TStructRecord matrixFixRecord;
TStructRecord packingFixRecord;
//
// Geometry shader input arrays:

View File

@@ -52,7 +52,7 @@
// Don't maintain an ordinal set of enums (0,1,2,3...) to avoid all possible
// defects from mixing the two different forms.
//
typedef enum {
typedef enum : unsigned {
EBadProfile = 0,
ENoProfile = (1 << 0), // only for desktop, before profiles showed up
ECoreProfile = (1 << 1),

View File

@@ -244,7 +244,7 @@ struct TVarLivePair : std::pair<const TString, TVarEntryInfo> {
second = _Right.second;
return (*this);
}
TVarLivePair(const TVarLivePair& src) { *this = src; }
TVarLivePair(const TVarLivePair& src) : pair(src) { }
};
typedef std::vector<TVarLivePair> TVarLiveVector;

View File

@@ -77,10 +77,10 @@ namespace glslang {
// This is in the glslang namespace directly so it can be a friend of TReflection.
//
class TReflectionTraverser : public TLiveTraverser {
class TReflectionTraverser : public TIntermTraverser {
public:
TReflectionTraverser(const TIntermediate& i, TReflection& r) :
TLiveTraverser(i), reflection(r) { }
TIntermTraverser(), intermediate(i), reflection(r), updateStageMasks(true) { }
virtual bool visitBinary(TVisit, TIntermBinary* node);
virtual void visitSymbol(TIntermSymbol* base);
@@ -92,11 +92,37 @@ public:
if (processedDerefs.find(&base) == processedDerefs.end()) {
processedDerefs.insert(&base);
uint32_t blockIndex = -1;
uint32_t offset = -1;
TList<TIntermBinary*> derefs;
TString baseName = base.getName();
if (base.getType().getBasicType() == EbtBlock) {
offset = 0;
bool anonymous = IsAnonymous(baseName);
const TString& blockName = base.getType().getTypeName();
if (!anonymous)
baseName = blockName;
else
baseName = "";
if (base.getType().isArray()) {
TType derefType(base.getType(), 0);
assert(!anonymous);
for (int e = 0; e < base.getType().getCumulativeArraySize(); ++e)
blockIndex = addBlockName(blockName + "[" + String(e) + "]", derefType,
intermediate.getBlockSize(base.getType()));
}
else
blockIndex = addBlockName(blockName, base.getType(), intermediate.getBlockSize(base.getType()));
}
// Use a degenerate (empty) set of dereferences to immediately put as at the end of
// the dereference change expected by blowUpActiveAggregate.
TList<TIntermBinary*> derefs;
blowUpActiveAggregate(base.getType(), base.getName(), derefs, derefs.end(), -1, -1, 0, 0,
base.getQualifier().storage, true);
blowUpActiveAggregate(base.getType(), baseName, derefs, derefs.end(), offset, blockIndex, 0, 0,
base.getQualifier().storage, updateStageMasks);
}
}
@@ -155,9 +181,9 @@ public:
void getOffsets(const TType& type, TVector<int>& offsets)
{
const TTypeList& memberList = *type.getStruct();
int memberSize = 0;
int offset = 0;
for (size_t m = 0; m < offsets.size(); ++m) {
// if the user supplied an offset, snap to it now
if (memberList[m].type->getQualifier().hasOffset())
@@ -334,7 +360,8 @@ public:
for (int i = 0; i < arrayIterateSize; ++i) {
TString newBaseName = name;
newBaseName.append(TString("[") + String(i) + "]");
if (terminalType->getBasicType() != EbtBlock)
newBaseName.append(TString("[") + String(i) + "]");
TType derefType(*terminalType, 0);
if (offset >= 0)
offset = baseOffset + stride * i;
@@ -643,13 +670,17 @@ public:
blocks.back().numMembers = countAggregateMembers(type);
EShLanguageMask& stages = blocks.back().stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
if (updateStageMasks) {
EShLanguageMask& stages = blocks.back().stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
}
} else {
blockIndex = it->second;
EShLanguageMask& stages = blocks[blockIndex].stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
if (updateStageMasks) {
EShLanguageMask& stages = blocks[blockIndex].stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
}
}
return blockIndex;
@@ -995,8 +1026,10 @@ public:
return type.isArray() ? type.getOuterArraySize() : 1;
}
const TIntermediate& intermediate;
TReflection& reflection;
std::set<const TIntermNode*> processedDerefs;
bool updateStageMasks;
protected:
TReflectionTraverser(TReflectionTraverser&);
@@ -1029,8 +1062,15 @@ bool TReflectionTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
// To reflect non-dereferenced objects.
void TReflectionTraverser::visitSymbol(TIntermSymbol* base)
{
if (base->getQualifier().storage == EvqUniform)
addUniform(*base);
if (base->getQualifier().storage == EvqUniform) {
if (base->getBasicType() == EbtBlock) {
if (reflection.options & EShReflectionSharedStd140Blocks) {
addUniform(*base);
}
} else {
addUniform(*base);
}
}
if ((intermediate.getStage() == reflection.firstStage && base->getQualifier().isPipeInput()) ||
(intermediate.getStage() == reflection.lastStage && base->getQualifier().isPipeOutput()))
@@ -1135,15 +1175,39 @@ bool TReflection::addStage(EShLanguage stage, const TIntermediate& intermediate)
TReflectionTraverser it(intermediate, *this);
// put the entry point on the list of functions to process
it.pushFunction(intermediate.getEntryPointMangledName().c_str());
// process all the functions
while (! it.functions.empty()) {
TIntermNode* function = it.functions.back();
it.functions.pop_back();
function->traverse(&it);
for (auto& sequnence : intermediate.getTreeRoot()->getAsAggregate()->getSequence()) {
if (sequnence->getAsAggregate() != nullptr) {
if (sequnence->getAsAggregate()->getOp() == glslang::EOpLinkerObjects) {
it.updateStageMasks = false;
TIntermAggregate* linkerObjects = sequnence->getAsAggregate();
for (auto& sequnence : linkerObjects->getSequence()) {
auto pNode = sequnence->getAsSymbolNode();
if (pNode != nullptr && pNode->getQualifier().storage == EvqUniform &&
(options & EShReflectionSharedStd140Blocks)) {
if (pNode->getBasicType() == EbtBlock) {
// collect std140 and shared uniform block form AST
if (pNode->getQualifier().layoutPacking == ElpStd140 ||
pNode->getQualifier().layoutPacking == ElpShared) {
pNode->traverse(&it);
}
}
}
}
} else {
// This traverser will travers all function in AST.
// If we want reflect uncalled function, we need set linke message EShMsgKeepUncalled.
// When EShMsgKeepUncalled been set to true, all function will be keep in AST, even it is a uncalled function.
// This will keep some uniform variables in reflection, if those uniform variables is used in these uncalled function.
//
// If we just want reflect only live node, we can use a default link message or set EShMsgKeepUncalled false.
// When linke message not been set EShMsgKeepUncalled, linker won't keep uncalled function in AST.
// So, travers all function node can equivalent to travers live function.
it.updateStageMasks = true;
sequnence->getAsAggregate()->traverse(&it);
}
}
}
it.updateStageMasks = true;
buildCounterIndices(intermediate);
buildUniformStageMask(intermediate);

19
3rdparty/glslang/glslang/Public/ShaderLang.h vendored Executable file → Normal file
View File

@@ -109,7 +109,7 @@ typedef enum {
LAST_ELEMENT_MARKER(EShLangCount),
} EShLanguage; // would be better as stage, but this is ancient now
typedef enum {
typedef enum : unsigned {
EShLangVertexMask = (1 << EShLangVertex),
EShLangTessControlMask = (1 << EShLangTessControl),
EShLangTessEvaluationMask = (1 << EShLangTessEvaluation),
@@ -240,7 +240,7 @@ typedef enum {
//
// Message choices for what errors and warnings are given.
//
enum EShMessages {
enum EShMessages : unsigned {
EShMsgDefault = 0, // default is to give all required errors and extra warnings
EShMsgRelaxedErrors = (1 << 0), // be liberal in accepting input
EShMsgSuppressWarnings = (1 << 1), // suppress all warnings, except those required by the specification
@@ -264,13 +264,14 @@ enum EShMessages {
// Options for building reflection
//
typedef enum {
EShReflectionDefault = 0, // default is original behaviour before options were added
EShReflectionStrictArraySuffix = (1 << 0), // reflection will follow stricter rules for array-of-structs suffixes
EShReflectionBasicArraySuffix = (1 << 1), // arrays of basic types will be appended with [0] as in GL reflection
EShReflectionIntermediateIO = (1 << 2), // reflect inputs and outputs to program, even with no vertex shader
EShReflectionSeparateBuffers = (1 << 3), // buffer variables and buffer blocks are reflected separately
EShReflectionAllBlockVariables = (1 << 4), // reflect all variables in blocks, even if they are inactive
EShReflectionUnwrapIOBlocks = (1 << 5), // unwrap input/output blocks the same as with uniform blocks
EShReflectionDefault = 0, // default is original behaviour before options were added
EShReflectionStrictArraySuffix = (1 << 0), // reflection will follow stricter rules for array-of-structs suffixes
EShReflectionBasicArraySuffix = (1 << 1), // arrays of basic types will be appended with [0] as in GL reflection
EShReflectionIntermediateIO = (1 << 2), // reflect inputs and outputs to program, even with no vertex shader
EShReflectionSeparateBuffers = (1 << 3), // buffer variables and buffer blocks are reflected separately
EShReflectionAllBlockVariables = (1 << 4), // reflect all variables in blocks, even if they are inactive
EShReflectionUnwrapIOBlocks = (1 << 5), // unwrap input/output blocks the same as with uniform blocks
EShReflectionSharedStd140Blocks = (1 << 6), // Apply std140/shared rules for ubo to ssbo
LAST_ELEMENT_MARKER(EShReflectionCount),
} EShReflectionOptions;