Updated spirv-cross.

This commit is contained in:
Бранимир Караџић
2025-11-20 09:34:36 -08:00
committed by Branimir Karadžić
parent a3df232816
commit e4cc183ac7
20 changed files with 900 additions and 597 deletions

View File

@@ -47,7 +47,7 @@
#include "gitversion.h"
#endif
using namespace spv;
using namespace SPIRV_CROSS_SPV_HEADER_NAMESPACE;
using namespace SPIRV_CROSS_NAMESPACE;
using namespace std;
@@ -285,7 +285,7 @@ static bool write_string_to_file(const char *path, const char *string)
#pragma warning(pop)
#endif
static void print_resources(const Compiler &compiler, spv::StorageClass storage,
static void print_resources(const Compiler &compiler, StorageClass storage,
const SmallVector<BuiltInResource> &resources)
{
fprintf(stderr, "%s\n", storage == StorageClassInput ? "builtin inputs" : "builtin outputs");
@@ -326,12 +326,12 @@ static void print_resources(const Compiler &compiler, spv::StorageClass storage,
string builtin_str;
switch (res.builtin)
{
case spv::BuiltInPosition: builtin_str = "Position"; break;
case spv::BuiltInPointSize: builtin_str = "PointSize"; break;
case spv::BuiltInCullDistance: builtin_str = "CullDistance"; break;
case spv::BuiltInClipDistance: builtin_str = "ClipDistance"; break;
case spv::BuiltInTessLevelInner: builtin_str = "TessLevelInner"; break;
case spv::BuiltInTessLevelOuter: builtin_str = "TessLevelOuter"; break;
case BuiltInPosition: builtin_str = "Position"; break;
case BuiltInPointSize: builtin_str = "PointSize"; break;
case BuiltInCullDistance: builtin_str = "CullDistance"; break;
case BuiltInClipDistance: builtin_str = "ClipDistance"; break;
case BuiltInTessLevelInner: builtin_str = "TessLevelInner"; break;
case BuiltInTessLevelOuter: builtin_str = "TessLevelOuter"; break;
default: builtin_str = string("builtin #") + to_string(res.builtin);
}
@@ -421,13 +421,13 @@ static void print_resources(const Compiler &compiler, const char *tag, const Sma
fprintf(stderr, "=============\n\n");
}
static const char *execution_model_to_str(spv::ExecutionModel model)
static const char *execution_model_to_str(ExecutionModel model)
{
switch (model)
{
case spv::ExecutionModelVertex:
case ExecutionModelVertex:
return "vertex";
case spv::ExecutionModelTessellationControl:
case ExecutionModelTessellationControl:
return "tessellation control";
case ExecutionModelTessellationEvaluation:
return "tessellation evaluation";
@@ -538,8 +538,8 @@ static void print_resources(const Compiler &compiler, const ShaderResources &res
print_resources(compiler, "acceleration structures", res.acceleration_structures);
print_resources(compiler, "tensors", res.tensors);
print_resources(compiler, "record buffers", res.shader_record_buffers);
print_resources(compiler, spv::StorageClassInput, res.builtin_inputs);
print_resources(compiler, spv::StorageClassOutput, res.builtin_outputs);
print_resources(compiler, StorageClassInput, res.builtin_inputs);
print_resources(compiler, StorageClassOutput, res.builtin_outputs);
}
static void print_push_constant_resources(const Compiler &compiler, const SmallVector<Resource> &res)
@@ -1175,9 +1175,9 @@ static ExecutionModel stage_to_execution_model(const std::string &stage)
else if (stage == "rcall")
return ExecutionModelCallableKHR;
else if (stage == "mesh")
return spv::ExecutionModelMeshEXT;
return ExecutionModelMeshEXT;
else if (stage == "task")
return spv::ExecutionModelTaskEXT;
return ExecutionModelTaskEXT;
else
SPIRV_CROSS_THROW("Invalid stage.");
}

View File

@@ -642,6 +642,7 @@ typedef enum SpvDecoration_ {
SpvDecorationHostAccessINTEL = 6188,
SpvDecorationInitModeINTEL = 6190,
SpvDecorationImplementInRegisterMapINTEL = 6191,
SpvDecorationConditionalINTEL = 6247,
SpvDecorationCacheControlLoadINTEL = 6442,
SpvDecorationCacheControlStoreINTEL = 6443,
SpvDecorationMax = 0x7fffffff,
@@ -1107,6 +1108,7 @@ typedef enum SpvCapability_ {
SpvCapabilityTextureBoxFilterQCOM = 4485,
SpvCapabilityTextureBlockMatchQCOM = 4486,
SpvCapabilityTileShadingQCOM = 4495,
SpvCapabilityCooperativeMatrixConversionQCOM = 4496,
SpvCapabilityTextureBlockMatch2QCOM = 4498,
SpvCapabilityFloat16ImageAMD = 5008,
SpvCapabilityImageGatherBiasLodAMD = 5009,
@@ -1258,6 +1260,7 @@ typedef enum SpvCapability_ {
SpvCapabilityBitInstructions = 6025,
SpvCapabilityGroupNonUniformRotateKHR = 6026,
SpvCapabilityFloatControls2 = 6029,
SpvCapabilityFMAKHR = 6030,
SpvCapabilityAtomicFloat32AddEXT = 6033,
SpvCapabilityAtomicFloat64AddEXT = 6034,
SpvCapabilityLongCompositesINTEL = 6089,
@@ -1282,6 +1285,9 @@ typedef enum SpvCapability_ {
SpvCapabilitySubgroup2DBlockTransposeINTEL = 6230,
SpvCapabilitySubgroupMatrixMultiplyAccumulateINTEL = 6236,
SpvCapabilityTernaryBitwiseFunctionINTEL = 6241,
SpvCapabilityUntypedVariableLengthArrayINTEL = 6243,
SpvCapabilitySpecConditionalINTEL = 6245,
SpvCapabilityFunctionVariantsINTEL = 6246,
SpvCapabilityGroupUniformArithmeticKHR = 6400,
SpvCapabilityTensorFloat32RoundingINTEL = 6425,
SpvCapabilityMaskedGatherScatterINTEL = 6427,
@@ -1970,12 +1976,14 @@ typedef enum SpvOp_ {
SpvOpUntypedInBoundsPtrAccessChainKHR = 4424,
SpvOpUntypedArrayLengthKHR = 4425,
SpvOpUntypedPrefetchKHR = 4426,
SpvOpFmaKHR = 4427,
SpvOpSubgroupAllKHR = 4428,
SpvOpSubgroupAnyKHR = 4429,
SpvOpSubgroupAllEqualKHR = 4430,
SpvOpGroupNonUniformRotateKHR = 4431,
SpvOpSubgroupReadInvocationKHR = 4432,
SpvOpExtInstWithForwardRefsKHR = 4433,
SpvOpUntypedGroupAsyncCopyKHR = 4434,
SpvOpTraceRayKHR = 4445,
SpvOpExecuteCallableKHR = 4446,
SpvOpConvertUToAccelerationStructureKHR = 4447,
@@ -2012,10 +2020,14 @@ typedef enum SpvOp_ {
SpvOpImageBoxFilterQCOM = 4481,
SpvOpImageBlockMatchSSDQCOM = 4482,
SpvOpImageBlockMatchSADQCOM = 4483,
SpvOpBitCastArrayQCOM = 4497,
SpvOpImageBlockMatchWindowSSDQCOM = 4500,
SpvOpImageBlockMatchWindowSADQCOM = 4501,
SpvOpImageBlockMatchGatherSSDQCOM = 4502,
SpvOpImageBlockMatchGatherSADQCOM = 4503,
SpvOpCompositeConstructCoopMatQCOM = 4540,
SpvOpCompositeExtractCoopMatQCOM = 4541,
SpvOpExtractSubArrayQCOM = 4542,
SpvOpGroupIAddNonUniformAMD = 5000,
SpvOpGroupFAddNonUniformAMD = 5001,
SpvOpGroupFMinNonUniformAMD = 5002,
@@ -2097,6 +2109,7 @@ typedef enum SpvOp_ {
SpvOpTypeAccelerationStructureNV = 5341,
SpvOpExecuteCallableNV = 5344,
SpvOpRayQueryGetClusterIdNV = 5345,
SpvOpRayQueryGetIntersectionClusterIdNV = 5345,
SpvOpHitObjectGetClusterIdNV = 5346,
SpvOpTypeCooperativeMatrixNV = 5358,
SpvOpCooperativeMatrixLoadNV = 5359,
@@ -2406,6 +2419,14 @@ typedef enum SpvOp_ {
SpvOpSubgroup2DBlockStoreINTEL = 6235,
SpvOpSubgroupMatrixMultiplyAccumulateINTEL = 6237,
SpvOpBitwiseFunctionINTEL = 6242,
SpvOpUntypedVariableLengthArrayINTEL = 6244,
SpvOpConditionalExtensionINTEL = 6248,
SpvOpConditionalEntryPointINTEL = 6249,
SpvOpConditionalCapabilityINTEL = 6250,
SpvOpSpecConstantTargetINTEL = 6251,
SpvOpSpecConstantArchitectureINTEL = 6252,
SpvOpSpecConstantCapabilitiesINTEL = 6253,
SpvOpConditionalCopyObjectINTEL = 6254,
SpvOpGroupIMulKHR = 6401,
SpvOpGroupFMulKHR = 6402,
SpvOpGroupBitwiseAndKHR = 6403,
@@ -2800,12 +2821,14 @@ inline void SpvHasResultAndType(SpvOp opcode, bool *hasResult, bool *hasResultTy
case SpvOpUntypedInBoundsPtrAccessChainKHR: *hasResult = true; *hasResultType = true; break;
case SpvOpUntypedArrayLengthKHR: *hasResult = true; *hasResultType = true; break;
case SpvOpUntypedPrefetchKHR: *hasResult = false; *hasResultType = false; break;
case SpvOpFmaKHR: *hasResult = true; *hasResultType = true; break;
case SpvOpSubgroupAllKHR: *hasResult = true; *hasResultType = true; break;
case SpvOpSubgroupAnyKHR: *hasResult = true; *hasResultType = true; break;
case SpvOpSubgroupAllEqualKHR: *hasResult = true; *hasResultType = true; break;
case SpvOpGroupNonUniformRotateKHR: *hasResult = true; *hasResultType = true; break;
case SpvOpSubgroupReadInvocationKHR: *hasResult = true; *hasResultType = true; break;
case SpvOpExtInstWithForwardRefsKHR: *hasResult = true; *hasResultType = true; break;
case SpvOpUntypedGroupAsyncCopyKHR: *hasResult = true; *hasResultType = true; break;
case SpvOpTraceRayKHR: *hasResult = false; *hasResultType = false; break;
case SpvOpExecuteCallableKHR: *hasResult = false; *hasResultType = false; break;
case SpvOpConvertUToAccelerationStructureKHR: *hasResult = true; *hasResultType = true; break;
@@ -2836,10 +2859,14 @@ inline void SpvHasResultAndType(SpvOp opcode, bool *hasResult, bool *hasResultTy
case SpvOpImageBoxFilterQCOM: *hasResult = true; *hasResultType = true; break;
case SpvOpImageBlockMatchSSDQCOM: *hasResult = true; *hasResultType = true; break;
case SpvOpImageBlockMatchSADQCOM: *hasResult = true; *hasResultType = true; break;
case SpvOpBitCastArrayQCOM: *hasResult = true; *hasResultType = true; break;
case SpvOpImageBlockMatchWindowSSDQCOM: *hasResult = true; *hasResultType = true; break;
case SpvOpImageBlockMatchWindowSADQCOM: *hasResult = true; *hasResultType = true; break;
case SpvOpImageBlockMatchGatherSSDQCOM: *hasResult = true; *hasResultType = true; break;
case SpvOpImageBlockMatchGatherSADQCOM: *hasResult = true; *hasResultType = true; break;
case SpvOpCompositeConstructCoopMatQCOM: *hasResult = true; *hasResultType = true; break;
case SpvOpCompositeExtractCoopMatQCOM: *hasResult = true; *hasResultType = true; break;
case SpvOpExtractSubArrayQCOM: *hasResult = true; *hasResultType = true; break;
case SpvOpGroupIAddNonUniformAMD: *hasResult = true; *hasResultType = true; break;
case SpvOpGroupFAddNonUniformAMD: *hasResult = true; *hasResultType = true; break;
case SpvOpGroupFMinNonUniformAMD: *hasResult = true; *hasResultType = true; break;
@@ -2918,7 +2945,7 @@ inline void SpvHasResultAndType(SpvOp opcode, bool *hasResult, bool *hasResultTy
case SpvOpRayQueryGetIntersectionTriangleVertexPositionsKHR: *hasResult = true; *hasResultType = true; break;
case SpvOpTypeAccelerationStructureKHR: *hasResult = true; *hasResultType = false; break;
case SpvOpExecuteCallableNV: *hasResult = false; *hasResultType = false; break;
case SpvOpRayQueryGetClusterIdNV: *hasResult = true; *hasResultType = true; break;
case SpvOpRayQueryGetIntersectionClusterIdNV: *hasResult = true; *hasResultType = true; break;
case SpvOpHitObjectGetClusterIdNV: *hasResult = true; *hasResultType = true; break;
case SpvOpTypeCooperativeMatrixNV: *hasResult = true; *hasResultType = false; break;
case SpvOpCooperativeMatrixLoadNV: *hasResult = true; *hasResultType = true; break;
@@ -3225,6 +3252,14 @@ inline void SpvHasResultAndType(SpvOp opcode, bool *hasResult, bool *hasResultTy
case SpvOpSubgroup2DBlockStoreINTEL: *hasResult = false; *hasResultType = false; break;
case SpvOpSubgroupMatrixMultiplyAccumulateINTEL: *hasResult = true; *hasResultType = true; break;
case SpvOpBitwiseFunctionINTEL: *hasResult = true; *hasResultType = true; break;
case SpvOpUntypedVariableLengthArrayINTEL: *hasResult = true; *hasResultType = true; break;
case SpvOpConditionalExtensionINTEL: *hasResult = false; *hasResultType = false; break;
case SpvOpConditionalEntryPointINTEL: *hasResult = false; *hasResultType = false; break;
case SpvOpConditionalCapabilityINTEL: *hasResult = false; *hasResultType = false; break;
case SpvOpSpecConstantTargetINTEL: *hasResult = true; *hasResultType = true; break;
case SpvOpSpecConstantArchitectureINTEL: *hasResult = true; *hasResultType = true; break;
case SpvOpSpecConstantCapabilitiesINTEL: *hasResult = true; *hasResultType = true; break;
case SpvOpConditionalCopyObjectINTEL: *hasResult = true; *hasResultType = true; break;
case SpvOpGroupIMulKHR: *hasResult = true; *hasResultType = true; break;
case SpvOpGroupFMulKHR: *hasResult = true; *hasResultType = true; break;
case SpvOpGroupBitwiseAndKHR: *hasResult = true; *hasResultType = true; break;
@@ -3765,6 +3800,7 @@ inline const char* SpvDecorationToString(SpvDecoration value) {
case SpvDecorationHostAccessINTEL: return "HostAccessINTEL";
case SpvDecorationInitModeINTEL: return "InitModeINTEL";
case SpvDecorationImplementInRegisterMapINTEL: return "ImplementInRegisterMapINTEL";
case SpvDecorationConditionalINTEL: return "ConditionalINTEL";
case SpvDecorationCacheControlLoadINTEL: return "CacheControlLoadINTEL";
case SpvDecorationCacheControlStoreINTEL: return "CacheControlStoreINTEL";
default: return "Unknown";
@@ -4055,6 +4091,7 @@ inline const char* SpvCapabilityToString(SpvCapability value) {
case SpvCapabilityTextureBoxFilterQCOM: return "TextureBoxFilterQCOM";
case SpvCapabilityTextureBlockMatchQCOM: return "TextureBlockMatchQCOM";
case SpvCapabilityTileShadingQCOM: return "TileShadingQCOM";
case SpvCapabilityCooperativeMatrixConversionQCOM: return "CooperativeMatrixConversionQCOM";
case SpvCapabilityTextureBlockMatch2QCOM: return "TextureBlockMatch2QCOM";
case SpvCapabilityFloat16ImageAMD: return "Float16ImageAMD";
case SpvCapabilityImageGatherBiasLodAMD: return "ImageGatherBiasLodAMD";
@@ -4181,6 +4218,7 @@ inline const char* SpvCapabilityToString(SpvCapability value) {
case SpvCapabilityBitInstructions: return "BitInstructions";
case SpvCapabilityGroupNonUniformRotateKHR: return "GroupNonUniformRotateKHR";
case SpvCapabilityFloatControls2: return "FloatControls2";
case SpvCapabilityFMAKHR: return "FMAKHR";
case SpvCapabilityAtomicFloat32AddEXT: return "AtomicFloat32AddEXT";
case SpvCapabilityAtomicFloat64AddEXT: return "AtomicFloat64AddEXT";
case SpvCapabilityLongCompositesINTEL: return "LongCompositesINTEL";
@@ -4204,6 +4242,9 @@ inline const char* SpvCapabilityToString(SpvCapability value) {
case SpvCapabilitySubgroup2DBlockTransposeINTEL: return "Subgroup2DBlockTransposeINTEL";
case SpvCapabilitySubgroupMatrixMultiplyAccumulateINTEL: return "SubgroupMatrixMultiplyAccumulateINTEL";
case SpvCapabilityTernaryBitwiseFunctionINTEL: return "TernaryBitwiseFunctionINTEL";
case SpvCapabilityUntypedVariableLengthArrayINTEL: return "UntypedVariableLengthArrayINTEL";
case SpvCapabilitySpecConditionalINTEL: return "SpecConditionalINTEL";
case SpvCapabilityFunctionVariantsINTEL: return "FunctionVariantsINTEL";
case SpvCapabilityGroupUniformArithmeticKHR: return "GroupUniformArithmeticKHR";
case SpvCapabilityTensorFloat32RoundingINTEL: return "TensorFloat32RoundingINTEL";
case SpvCapabilityMaskedGatherScatterINTEL: return "MaskedGatherScatterINTEL";
@@ -4773,12 +4814,14 @@ inline const char* SpvOpToString(SpvOp value) {
case SpvOpUntypedInBoundsPtrAccessChainKHR: return "OpUntypedInBoundsPtrAccessChainKHR";
case SpvOpUntypedArrayLengthKHR: return "OpUntypedArrayLengthKHR";
case SpvOpUntypedPrefetchKHR: return "OpUntypedPrefetchKHR";
case SpvOpFmaKHR: return "OpFmaKHR";
case SpvOpSubgroupAllKHR: return "OpSubgroupAllKHR";
case SpvOpSubgroupAnyKHR: return "OpSubgroupAnyKHR";
case SpvOpSubgroupAllEqualKHR: return "OpSubgroupAllEqualKHR";
case SpvOpGroupNonUniformRotateKHR: return "OpGroupNonUniformRotateKHR";
case SpvOpSubgroupReadInvocationKHR: return "OpSubgroupReadInvocationKHR";
case SpvOpExtInstWithForwardRefsKHR: return "OpExtInstWithForwardRefsKHR";
case SpvOpUntypedGroupAsyncCopyKHR: return "OpUntypedGroupAsyncCopyKHR";
case SpvOpTraceRayKHR: return "OpTraceRayKHR";
case SpvOpExecuteCallableKHR: return "OpExecuteCallableKHR";
case SpvOpConvertUToAccelerationStructureKHR: return "OpConvertUToAccelerationStructureKHR";
@@ -4809,10 +4852,14 @@ inline const char* SpvOpToString(SpvOp value) {
case SpvOpImageBoxFilterQCOM: return "OpImageBoxFilterQCOM";
case SpvOpImageBlockMatchSSDQCOM: return "OpImageBlockMatchSSDQCOM";
case SpvOpImageBlockMatchSADQCOM: return "OpImageBlockMatchSADQCOM";
case SpvOpBitCastArrayQCOM: return "OpBitCastArrayQCOM";
case SpvOpImageBlockMatchWindowSSDQCOM: return "OpImageBlockMatchWindowSSDQCOM";
case SpvOpImageBlockMatchWindowSADQCOM: return "OpImageBlockMatchWindowSADQCOM";
case SpvOpImageBlockMatchGatherSSDQCOM: return "OpImageBlockMatchGatherSSDQCOM";
case SpvOpImageBlockMatchGatherSADQCOM: return "OpImageBlockMatchGatherSADQCOM";
case SpvOpCompositeConstructCoopMatQCOM: return "OpCompositeConstructCoopMatQCOM";
case SpvOpCompositeExtractCoopMatQCOM: return "OpCompositeExtractCoopMatQCOM";
case SpvOpExtractSubArrayQCOM: return "OpExtractSubArrayQCOM";
case SpvOpGroupIAddNonUniformAMD: return "OpGroupIAddNonUniformAMD";
case SpvOpGroupFAddNonUniformAMD: return "OpGroupFAddNonUniformAMD";
case SpvOpGroupFMinNonUniformAMD: return "OpGroupFMinNonUniformAMD";
@@ -5198,6 +5245,14 @@ inline const char* SpvOpToString(SpvOp value) {
case SpvOpSubgroup2DBlockStoreINTEL: return "OpSubgroup2DBlockStoreINTEL";
case SpvOpSubgroupMatrixMultiplyAccumulateINTEL: return "OpSubgroupMatrixMultiplyAccumulateINTEL";
case SpvOpBitwiseFunctionINTEL: return "OpBitwiseFunctionINTEL";
case SpvOpUntypedVariableLengthArrayINTEL: return "OpUntypedVariableLengthArrayINTEL";
case SpvOpConditionalExtensionINTEL: return "OpConditionalExtensionINTEL";
case SpvOpConditionalEntryPointINTEL: return "OpConditionalEntryPointINTEL";
case SpvOpConditionalCapabilityINTEL: return "OpConditionalCapabilityINTEL";
case SpvOpSpecConstantTargetINTEL: return "OpSpecConstantTargetINTEL";
case SpvOpSpecConstantArchitectureINTEL: return "OpSpecConstantArchitectureINTEL";
case SpvOpSpecConstantCapabilitiesINTEL: return "OpSpecConstantCapabilitiesINTEL";
case SpvOpConditionalCopyObjectINTEL: return "OpConditionalCopyObjectINTEL";
case SpvOpGroupIMulKHR: return "OpGroupIMulKHR";
case SpvOpGroupFMulKHR: return "OpGroupFMulKHR";
case SpvOpGroupBitwiseAndKHR: return "OpGroupBitwiseAndKHR";

View File

@@ -638,6 +638,7 @@ enum Decoration {
DecorationHostAccessINTEL = 6188,
DecorationInitModeINTEL = 6190,
DecorationImplementInRegisterMapINTEL = 6191,
DecorationConditionalINTEL = 6247,
DecorationCacheControlLoadINTEL = 6442,
DecorationCacheControlStoreINTEL = 6443,
DecorationMax = 0x7fffffff,
@@ -1103,6 +1104,7 @@ enum Capability {
CapabilityTextureBoxFilterQCOM = 4485,
CapabilityTextureBlockMatchQCOM = 4486,
CapabilityTileShadingQCOM = 4495,
CapabilityCooperativeMatrixConversionQCOM = 4496,
CapabilityTextureBlockMatch2QCOM = 4498,
CapabilityFloat16ImageAMD = 5008,
CapabilityImageGatherBiasLodAMD = 5009,
@@ -1254,6 +1256,7 @@ enum Capability {
CapabilityBitInstructions = 6025,
CapabilityGroupNonUniformRotateKHR = 6026,
CapabilityFloatControls2 = 6029,
CapabilityFMAKHR = 6030,
CapabilityAtomicFloat32AddEXT = 6033,
CapabilityAtomicFloat64AddEXT = 6034,
CapabilityLongCompositesINTEL = 6089,
@@ -1278,6 +1281,9 @@ enum Capability {
CapabilitySubgroup2DBlockTransposeINTEL = 6230,
CapabilitySubgroupMatrixMultiplyAccumulateINTEL = 6236,
CapabilityTernaryBitwiseFunctionINTEL = 6241,
CapabilityUntypedVariableLengthArrayINTEL = 6243,
CapabilitySpecConditionalINTEL = 6245,
CapabilityFunctionVariantsINTEL = 6246,
CapabilityGroupUniformArithmeticKHR = 6400,
CapabilityTensorFloat32RoundingINTEL = 6425,
CapabilityMaskedGatherScatterINTEL = 6427,
@@ -1966,12 +1972,14 @@ enum Op {
OpUntypedInBoundsPtrAccessChainKHR = 4424,
OpUntypedArrayLengthKHR = 4425,
OpUntypedPrefetchKHR = 4426,
OpFmaKHR = 4427,
OpSubgroupAllKHR = 4428,
OpSubgroupAnyKHR = 4429,
OpSubgroupAllEqualKHR = 4430,
OpGroupNonUniformRotateKHR = 4431,
OpSubgroupReadInvocationKHR = 4432,
OpExtInstWithForwardRefsKHR = 4433,
OpUntypedGroupAsyncCopyKHR = 4434,
OpTraceRayKHR = 4445,
OpExecuteCallableKHR = 4446,
OpConvertUToAccelerationStructureKHR = 4447,
@@ -2008,10 +2016,14 @@ enum Op {
OpImageBoxFilterQCOM = 4481,
OpImageBlockMatchSSDQCOM = 4482,
OpImageBlockMatchSADQCOM = 4483,
OpBitCastArrayQCOM = 4497,
OpImageBlockMatchWindowSSDQCOM = 4500,
OpImageBlockMatchWindowSADQCOM = 4501,
OpImageBlockMatchGatherSSDQCOM = 4502,
OpImageBlockMatchGatherSADQCOM = 4503,
OpCompositeConstructCoopMatQCOM = 4540,
OpCompositeExtractCoopMatQCOM = 4541,
OpExtractSubArrayQCOM = 4542,
OpGroupIAddNonUniformAMD = 5000,
OpGroupFAddNonUniformAMD = 5001,
OpGroupFMinNonUniformAMD = 5002,
@@ -2093,6 +2105,7 @@ enum Op {
OpTypeAccelerationStructureNV = 5341,
OpExecuteCallableNV = 5344,
OpRayQueryGetClusterIdNV = 5345,
OpRayQueryGetIntersectionClusterIdNV = 5345,
OpHitObjectGetClusterIdNV = 5346,
OpTypeCooperativeMatrixNV = 5358,
OpCooperativeMatrixLoadNV = 5359,
@@ -2402,6 +2415,14 @@ enum Op {
OpSubgroup2DBlockStoreINTEL = 6235,
OpSubgroupMatrixMultiplyAccumulateINTEL = 6237,
OpBitwiseFunctionINTEL = 6242,
OpUntypedVariableLengthArrayINTEL = 6244,
OpConditionalExtensionINTEL = 6248,
OpConditionalEntryPointINTEL = 6249,
OpConditionalCapabilityINTEL = 6250,
OpSpecConstantTargetINTEL = 6251,
OpSpecConstantArchitectureINTEL = 6252,
OpSpecConstantCapabilitiesINTEL = 6253,
OpConditionalCopyObjectINTEL = 6254,
OpGroupIMulKHR = 6401,
OpGroupFMulKHR = 6402,
OpGroupBitwiseAndKHR = 6403,
@@ -2796,12 +2817,14 @@ inline void HasResultAndType(Op opcode, bool *hasResult, bool *hasResultType) {
case OpUntypedInBoundsPtrAccessChainKHR: *hasResult = true; *hasResultType = true; break;
case OpUntypedArrayLengthKHR: *hasResult = true; *hasResultType = true; break;
case OpUntypedPrefetchKHR: *hasResult = false; *hasResultType = false; break;
case OpFmaKHR: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAllKHR: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAnyKHR: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAllEqualKHR: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformRotateKHR: *hasResult = true; *hasResultType = true; break;
case OpSubgroupReadInvocationKHR: *hasResult = true; *hasResultType = true; break;
case OpExtInstWithForwardRefsKHR: *hasResult = true; *hasResultType = true; break;
case OpUntypedGroupAsyncCopyKHR: *hasResult = true; *hasResultType = true; break;
case OpTraceRayKHR: *hasResult = false; *hasResultType = false; break;
case OpExecuteCallableKHR: *hasResult = false; *hasResultType = false; break;
case OpConvertUToAccelerationStructureKHR: *hasResult = true; *hasResultType = true; break;
@@ -2832,10 +2855,14 @@ inline void HasResultAndType(Op opcode, bool *hasResult, bool *hasResultType) {
case OpImageBoxFilterQCOM: *hasResult = true; *hasResultType = true; break;
case OpImageBlockMatchSSDQCOM: *hasResult = true; *hasResultType = true; break;
case OpImageBlockMatchSADQCOM: *hasResult = true; *hasResultType = true; break;
case OpBitCastArrayQCOM: *hasResult = true; *hasResultType = true; break;
case OpImageBlockMatchWindowSSDQCOM: *hasResult = true; *hasResultType = true; break;
case OpImageBlockMatchWindowSADQCOM: *hasResult = true; *hasResultType = true; break;
case OpImageBlockMatchGatherSSDQCOM: *hasResult = true; *hasResultType = true; break;
case OpImageBlockMatchGatherSADQCOM: *hasResult = true; *hasResultType = true; break;
case OpCompositeConstructCoopMatQCOM: *hasResult = true; *hasResultType = true; break;
case OpCompositeExtractCoopMatQCOM: *hasResult = true; *hasResultType = true; break;
case OpExtractSubArrayQCOM: *hasResult = true; *hasResultType = true; break;
case OpGroupIAddNonUniformAMD: *hasResult = true; *hasResultType = true; break;
case OpGroupFAddNonUniformAMD: *hasResult = true; *hasResultType = true; break;
case OpGroupFMinNonUniformAMD: *hasResult = true; *hasResultType = true; break;
@@ -2914,7 +2941,7 @@ inline void HasResultAndType(Op opcode, bool *hasResult, bool *hasResultType) {
case OpRayQueryGetIntersectionTriangleVertexPositionsKHR: *hasResult = true; *hasResultType = true; break;
case OpTypeAccelerationStructureKHR: *hasResult = true; *hasResultType = false; break;
case OpExecuteCallableNV: *hasResult = false; *hasResultType = false; break;
case OpRayQueryGetClusterIdNV: *hasResult = true; *hasResultType = true; break;
case OpRayQueryGetIntersectionClusterIdNV: *hasResult = true; *hasResultType = true; break;
case OpHitObjectGetClusterIdNV: *hasResult = true; *hasResultType = true; break;
case OpTypeCooperativeMatrixNV: *hasResult = true; *hasResultType = false; break;
case OpCooperativeMatrixLoadNV: *hasResult = true; *hasResultType = true; break;
@@ -3221,6 +3248,14 @@ inline void HasResultAndType(Op opcode, bool *hasResult, bool *hasResultType) {
case OpSubgroup2DBlockStoreINTEL: *hasResult = false; *hasResultType = false; break;
case OpSubgroupMatrixMultiplyAccumulateINTEL: *hasResult = true; *hasResultType = true; break;
case OpBitwiseFunctionINTEL: *hasResult = true; *hasResultType = true; break;
case OpUntypedVariableLengthArrayINTEL: *hasResult = true; *hasResultType = true; break;
case OpConditionalExtensionINTEL: *hasResult = false; *hasResultType = false; break;
case OpConditionalEntryPointINTEL: *hasResult = false; *hasResultType = false; break;
case OpConditionalCapabilityINTEL: *hasResult = false; *hasResultType = false; break;
case OpSpecConstantTargetINTEL: *hasResult = true; *hasResultType = true; break;
case OpSpecConstantArchitectureINTEL: *hasResult = true; *hasResultType = true; break;
case OpSpecConstantCapabilitiesINTEL: *hasResult = true; *hasResultType = true; break;
case OpConditionalCopyObjectINTEL: *hasResult = true; *hasResultType = true; break;
case OpGroupIMulKHR: *hasResult = true; *hasResultType = true; break;
case OpGroupFMulKHR: *hasResult = true; *hasResultType = true; break;
case OpGroupBitwiseAndKHR: *hasResult = true; *hasResultType = true; break;
@@ -3761,6 +3796,7 @@ inline const char* DecorationToString(Decoration value) {
case DecorationHostAccessINTEL: return "HostAccessINTEL";
case DecorationInitModeINTEL: return "InitModeINTEL";
case DecorationImplementInRegisterMapINTEL: return "ImplementInRegisterMapINTEL";
case DecorationConditionalINTEL: return "ConditionalINTEL";
case DecorationCacheControlLoadINTEL: return "CacheControlLoadINTEL";
case DecorationCacheControlStoreINTEL: return "CacheControlStoreINTEL";
default: return "Unknown";
@@ -4051,6 +4087,7 @@ inline const char* CapabilityToString(Capability value) {
case CapabilityTextureBoxFilterQCOM: return "TextureBoxFilterQCOM";
case CapabilityTextureBlockMatchQCOM: return "TextureBlockMatchQCOM";
case CapabilityTileShadingQCOM: return "TileShadingQCOM";
case CapabilityCooperativeMatrixConversionQCOM: return "CooperativeMatrixConversionQCOM";
case CapabilityTextureBlockMatch2QCOM: return "TextureBlockMatch2QCOM";
case CapabilityFloat16ImageAMD: return "Float16ImageAMD";
case CapabilityImageGatherBiasLodAMD: return "ImageGatherBiasLodAMD";
@@ -4177,6 +4214,7 @@ inline const char* CapabilityToString(Capability value) {
case CapabilityBitInstructions: return "BitInstructions";
case CapabilityGroupNonUniformRotateKHR: return "GroupNonUniformRotateKHR";
case CapabilityFloatControls2: return "FloatControls2";
case CapabilityFMAKHR: return "FMAKHR";
case CapabilityAtomicFloat32AddEXT: return "AtomicFloat32AddEXT";
case CapabilityAtomicFloat64AddEXT: return "AtomicFloat64AddEXT";
case CapabilityLongCompositesINTEL: return "LongCompositesINTEL";
@@ -4200,6 +4238,9 @@ inline const char* CapabilityToString(Capability value) {
case CapabilitySubgroup2DBlockTransposeINTEL: return "Subgroup2DBlockTransposeINTEL";
case CapabilitySubgroupMatrixMultiplyAccumulateINTEL: return "SubgroupMatrixMultiplyAccumulateINTEL";
case CapabilityTernaryBitwiseFunctionINTEL: return "TernaryBitwiseFunctionINTEL";
case CapabilityUntypedVariableLengthArrayINTEL: return "UntypedVariableLengthArrayINTEL";
case CapabilitySpecConditionalINTEL: return "SpecConditionalINTEL";
case CapabilityFunctionVariantsINTEL: return "FunctionVariantsINTEL";
case CapabilityGroupUniformArithmeticKHR: return "GroupUniformArithmeticKHR";
case CapabilityTensorFloat32RoundingINTEL: return "TensorFloat32RoundingINTEL";
case CapabilityMaskedGatherScatterINTEL: return "MaskedGatherScatterINTEL";
@@ -4769,12 +4810,14 @@ inline const char* OpToString(Op value) {
case OpUntypedInBoundsPtrAccessChainKHR: return "OpUntypedInBoundsPtrAccessChainKHR";
case OpUntypedArrayLengthKHR: return "OpUntypedArrayLengthKHR";
case OpUntypedPrefetchKHR: return "OpUntypedPrefetchKHR";
case OpFmaKHR: return "OpFmaKHR";
case OpSubgroupAllKHR: return "OpSubgroupAllKHR";
case OpSubgroupAnyKHR: return "OpSubgroupAnyKHR";
case OpSubgroupAllEqualKHR: return "OpSubgroupAllEqualKHR";
case OpGroupNonUniformRotateKHR: return "OpGroupNonUniformRotateKHR";
case OpSubgroupReadInvocationKHR: return "OpSubgroupReadInvocationKHR";
case OpExtInstWithForwardRefsKHR: return "OpExtInstWithForwardRefsKHR";
case OpUntypedGroupAsyncCopyKHR: return "OpUntypedGroupAsyncCopyKHR";
case OpTraceRayKHR: return "OpTraceRayKHR";
case OpExecuteCallableKHR: return "OpExecuteCallableKHR";
case OpConvertUToAccelerationStructureKHR: return "OpConvertUToAccelerationStructureKHR";
@@ -4805,10 +4848,14 @@ inline const char* OpToString(Op value) {
case OpImageBoxFilterQCOM: return "OpImageBoxFilterQCOM";
case OpImageBlockMatchSSDQCOM: return "OpImageBlockMatchSSDQCOM";
case OpImageBlockMatchSADQCOM: return "OpImageBlockMatchSADQCOM";
case OpBitCastArrayQCOM: return "OpBitCastArrayQCOM";
case OpImageBlockMatchWindowSSDQCOM: return "OpImageBlockMatchWindowSSDQCOM";
case OpImageBlockMatchWindowSADQCOM: return "OpImageBlockMatchWindowSADQCOM";
case OpImageBlockMatchGatherSSDQCOM: return "OpImageBlockMatchGatherSSDQCOM";
case OpImageBlockMatchGatherSADQCOM: return "OpImageBlockMatchGatherSADQCOM";
case OpCompositeConstructCoopMatQCOM: return "OpCompositeConstructCoopMatQCOM";
case OpCompositeExtractCoopMatQCOM: return "OpCompositeExtractCoopMatQCOM";
case OpExtractSubArrayQCOM: return "OpExtractSubArrayQCOM";
case OpGroupIAddNonUniformAMD: return "OpGroupIAddNonUniformAMD";
case OpGroupFAddNonUniformAMD: return "OpGroupFAddNonUniformAMD";
case OpGroupFMinNonUniformAMD: return "OpGroupFMinNonUniformAMD";
@@ -5194,6 +5241,14 @@ inline const char* OpToString(Op value) {
case OpSubgroup2DBlockStoreINTEL: return "OpSubgroup2DBlockStoreINTEL";
case OpSubgroupMatrixMultiplyAccumulateINTEL: return "OpSubgroupMatrixMultiplyAccumulateINTEL";
case OpBitwiseFunctionINTEL: return "OpBitwiseFunctionINTEL";
case OpUntypedVariableLengthArrayINTEL: return "OpUntypedVariableLengthArrayINTEL";
case OpConditionalExtensionINTEL: return "OpConditionalExtensionINTEL";
case OpConditionalEntryPointINTEL: return "OpConditionalEntryPointINTEL";
case OpConditionalCapabilityINTEL: return "OpConditionalCapabilityINTEL";
case OpSpecConstantTargetINTEL: return "OpSpecConstantTargetINTEL";
case OpSpecConstantArchitectureINTEL: return "OpSpecConstantArchitectureINTEL";
case OpSpecConstantCapabilitiesINTEL: return "OpSpecConstantCapabilitiesINTEL";
case OpConditionalCopyObjectINTEL: return "OpConditionalCopyObjectINTEL";
case OpGroupIMulKHR: return "OpGroupIMulKHR";
case OpGroupFMulKHR: return "OpGroupFMulKHR";
case OpGroupBitwiseAndKHR: return "OpGroupBitwiseAndKHR";

View File

@@ -27,8 +27,17 @@
#ifndef SPV_ENABLE_UTILITY_CODE
#define SPV_ENABLE_UTILITY_CODE
#endif
#include "spirv.hpp"
// Pragmatic hack to avoid symbol conflicts when including both hpp11 and hpp headers in same translation unit.
// This is an unfortunate SPIRV-Headers issue that we cannot easily deal with ourselves.
#ifdef SPIRV_CROSS_SPV_HEADER_NAMESPACE_OVERRIDE
#define spv SPIRV_CROSS_SPV_HEADER_NAMESPACE_OVERRIDE
#define SPIRV_CROSS_SPV_HEADER_NAMESPACE SPIRV_CROSS_SPV_HEADER_NAMESPACE_OVERRIDE
#else
#define SPIRV_CROSS_SPV_HEADER_NAMESPACE spv
#endif
#include "spirv.hpp"
#include "spirv_cross_containers.hpp"
#include "spirv_cross_error_handling.hpp"
#include <functional>
@@ -2041,4 +2050,7 @@ struct hash<SPIRV_CROSS_NAMESPACE::TypedID<type>>
};
} // namespace std
#ifdef SPIRV_CROSS_SPV_HEADER_NAMESPACE_OVERRIDE
#undef spv
#endif
#endif

View File

@@ -23,7 +23,7 @@
#include "spirv_cpp.hpp"
using namespace spv;
using namespace SPIRV_CROSS_SPV_HEADER_NAMESPACE;
using namespace SPIRV_CROSS_NAMESPACE;
using namespace std;

View File

@@ -31,7 +31,7 @@
#include <utility>
using namespace std;
using namespace spv;
using namespace SPIRV_CROSS_SPV_HEADER_NAMESPACE;
using namespace SPIRV_CROSS_NAMESPACE;
Compiler::Compiler(vector<uint32_t> ir_)
@@ -628,7 +628,7 @@ bool Compiler::is_immutable(uint32_t id) const
return false;
}
static inline bool storage_class_is_interface(spv::StorageClass storage)
static inline bool storage_class_is_interface(StorageClass storage)
{
switch (storage)
{
@@ -661,8 +661,8 @@ bool Compiler::is_hidden_variable(const SPIRVariable &var, bool include_builtins
// In SPIR-V 1.4 and up we must also use the active variable interface to disable global variables
// which are not part of the entry point.
if (ir.get_spirv_version() >= 0x10400 && var.storage != spv::StorageClassGeneric &&
var.storage != spv::StorageClassFunction && !interface_variable_exists_in_entry_point(var.self))
if (ir.get_spirv_version() >= 0x10400 && var.storage != StorageClassGeneric &&
var.storage != StorageClassFunction && !interface_variable_exists_in_entry_point(var.self))
{
return true;
}
@@ -1341,7 +1341,7 @@ const SPIRType &Compiler::get_pointee_type(uint32_t type_id) const
uint32_t Compiler::get_variable_data_type_id(const SPIRVariable &var) const
{
if (var.phi_variable || var.storage == spv::StorageClass::StorageClassAtomicCounter)
if (var.phi_variable || var.storage == StorageClassAtomicCounter)
return var.basetype;
return get_pointee_type_id(var.basetype);
}
@@ -1378,7 +1378,7 @@ bool Compiler::is_sampled_image_type(const SPIRType &type)
type.image.dim != DimBuffer;
}
void Compiler::set_member_decoration_string(TypeID id, uint32_t index, spv::Decoration decoration,
void Compiler::set_member_decoration_string(TypeID id, uint32_t index, Decoration decoration,
const std::string &argument)
{
ir.set_member_decoration_string(id, index, decoration, argument);
@@ -1439,7 +1439,7 @@ void Compiler::unset_member_decoration(TypeID id, uint32_t index, Decoration dec
ir.unset_member_decoration(id, index, decoration);
}
void Compiler::set_decoration_string(ID id, spv::Decoration decoration, const std::string &argument)
void Compiler::set_decoration_string(ID id, Decoration decoration, const std::string &argument)
{
ir.set_decoration_string(id, decoration, argument);
}
@@ -1602,7 +1602,7 @@ void Compiler::unset_decoration(ID id, Decoration decoration)
ir.unset_decoration(id, decoration);
}
bool Compiler::get_binary_offset_for_decoration(VariableID id, spv::Decoration decoration, uint32_t &word_offset) const
bool Compiler::get_binary_offset_for_decoration(VariableID id, Decoration decoration, uint32_t &word_offset) const
{
auto *m = ir.find_meta(id);
if (!m)
@@ -1907,6 +1907,15 @@ bool Compiler::traverse_all_reachable_opcodes(const SPIRBlock &block, OpcodeHand
handler.set_current_block(block);
handler.rearm_current_block(block);
if (handler.enable_result_types)
{
for (auto &phi: block.phi_variables)
{
auto &v = get<SPIRVariable>(phi.function_variable);
handler.result_types[phi.function_variable] = v.basetype;
}
}
// Ideally, perhaps traverse the CFG instead of all blocks in order to eliminate dead blocks,
// but this shouldn't be a problem in practice unless the SPIR-V is doing insane things like recursing
// inside dead blocks ...
@@ -1918,11 +1927,24 @@ bool Compiler::traverse_all_reachable_opcodes(const SPIRBlock &block, OpcodeHand
if (!handler.handle(op, ops, i.length))
return false;
if (handler.enable_result_types)
{
// If it has one, keep track of the instruction's result type, mapped by ID
uint32_t result_type, result_id;
if (instruction_to_result_type(result_type, result_id, op, ops, i.length))
handler.result_types[result_id] = result_type;
}
if (op == OpFunctionCall)
{
auto &func = get<SPIRFunction>(ops[2]);
if (handler.follow_function_call(func))
{
if (handler.enable_result_types)
for (auto &arg : func.arguments)
if (!arg.alias_global_variable)
handler.result_types[arg.id] = arg.type;
if (!handler.begin_function_scope(ops, i.length))
return false;
if (!traverse_all_reachable_opcodes(get<SPIRFunction>(ops[2]), handler))
@@ -2457,7 +2479,7 @@ uint32_t Compiler::get_work_group_size_specialization_constants(SpecializationCo
return execution.workgroup_size.constant;
}
uint32_t Compiler::get_execution_mode_argument(spv::ExecutionMode mode, uint32_t index) const
uint32_t Compiler::get_execution_mode_argument(ExecutionMode mode, uint32_t index) const
{
auto &execution = get_entry_point();
switch (mode)
@@ -2643,14 +2665,14 @@ SmallVector<EntryPoint> Compiler::get_entry_points_and_stages() const
return entries;
}
void Compiler::rename_entry_point(const std::string &old_name, const std::string &new_name, spv::ExecutionModel model)
void Compiler::rename_entry_point(const std::string &old_name, const std::string &new_name, ExecutionModel model)
{
auto &entry = get_entry_point(old_name, model);
entry.orig_name = new_name;
entry.name = new_name;
}
void Compiler::set_entry_point(const std::string &name, spv::ExecutionModel model)
void Compiler::set_entry_point(const std::string &name, ExecutionModel model)
{
auto &entry = get_entry_point(name, model);
ir.default_entry_point = entry.self;
@@ -3346,7 +3368,7 @@ void Compiler::analyze_parameter_preservation(
Compiler::AnalyzeVariableScopeAccessHandler::AnalyzeVariableScopeAccessHandler(Compiler &compiler_,
SPIRFunction &entry_)
: compiler(compiler_)
: OpcodeHandler(compiler_)
, entry(entry_)
{
}
@@ -3464,11 +3486,11 @@ bool Compiler::AnalyzeVariableScopeAccessHandler::handle_terminator(const SPIRBl
return true;
}
bool Compiler::AnalyzeVariableScopeAccessHandler::handle(spv::Op op, const uint32_t *args, uint32_t length)
bool Compiler::AnalyzeVariableScopeAccessHandler::handle(Op op, const uint32_t *args, uint32_t length)
{
// Keep track of the types of temporaries, so we can hoist them out as necessary.
uint32_t result_type = 0, result_id = 0;
if (compiler.instruction_to_result_type(result_type, result_id, op, args, length))
if (instruction_to_result_type(result_type, result_id, op, args, length))
{
// For some opcodes, we will need to override the result id.
// If we need to hoist the temporary, the temporary type is the input, not the result.
@@ -3811,7 +3833,7 @@ bool Compiler::AnalyzeVariableScopeAccessHandler::handle(spv::Op op, const uint3
}
Compiler::StaticExpressionAccessHandler::StaticExpressionAccessHandler(Compiler &compiler_, uint32_t variable_id_)
: compiler(compiler_)
: OpcodeHandler(compiler_)
, variable_id(variable_id_)
{
}
@@ -3821,7 +3843,7 @@ bool Compiler::StaticExpressionAccessHandler::follow_function_call(const SPIRFun
return false;
}
bool Compiler::StaticExpressionAccessHandler::handle(spv::Op op, const uint32_t *args, uint32_t length)
bool Compiler::StaticExpressionAccessHandler::handle(Op op, const uint32_t *args, uint32_t length)
{
switch (op)
{
@@ -4381,7 +4403,7 @@ bool Compiler::may_read_undefined_variable_in_block(const SPIRBlock &block, uint
return true;
}
bool Compiler::GeometryEmitDisocveryHandler::handle(spv::Op opcode, const uint32_t *, uint32_t)
bool Compiler::GeometryEmitDisocveryHandler::handle(Op opcode, const uint32_t *, uint32_t)
{
if (opcode == OpEmitVertex || opcode == OpEndPrimitive)
{
@@ -4399,8 +4421,9 @@ bool Compiler::GeometryEmitDisocveryHandler::begin_function_scope(const uint32_t
return true;
}
bool Compiler::GeometryEmitDisocveryHandler::end_function_scope([[maybe_unused]] const uint32_t *stream, uint32_t)
bool Compiler::GeometryEmitDisocveryHandler::end_function_scope(const uint32_t *stream, uint32_t)
{
(void)stream;
assert(function_stack.back() == &compiler.get<SPIRFunction>(stream[2]));
function_stack.pop_back();
@@ -4521,7 +4544,7 @@ void Compiler::ActiveBuiltinHandler::add_if_builtin_or_block(uint32_t id)
add_if_builtin(id, true);
}
bool Compiler::ActiveBuiltinHandler::handle(spv::Op opcode, const uint32_t *args, uint32_t length)
bool Compiler::ActiveBuiltinHandler::handle(Op opcode, const uint32_t *args, uint32_t length)
{
switch (opcode)
{
@@ -4716,7 +4739,7 @@ void Compiler::analyze_image_and_sampler_usage()
comparison_ids.insert(combined.combined_id);
}
bool Compiler::CombinedImageSamplerDrefHandler::handle(spv::Op opcode, const uint32_t *args, uint32_t)
bool Compiler::CombinedImageSamplerDrefHandler::handle(Op opcode, const uint32_t *args, uint32_t)
{
// Mark all sampled images which are used with Dref.
switch (opcode)
@@ -4825,11 +4848,11 @@ void Compiler::build_function_control_flow_graphs_and_analyze()
}
Compiler::CFGBuilder::CFGBuilder(Compiler &compiler_)
: compiler(compiler_)
: OpcodeHandler(compiler_)
{
}
bool Compiler::CFGBuilder::handle(spv::Op, const uint32_t *, uint32_t)
bool Compiler::CFGBuilder::handle(Op, const uint32_t *, uint32_t)
{
return true;
}
@@ -5005,7 +5028,7 @@ void Compiler::make_constant_null(uint32_t id, uint32_t type)
}
}
const SmallVector<spv::Capability> &Compiler::get_declared_capabilities() const
const SmallVector<Capability> &Compiler::get_declared_capabilities() const
{
return ir.declared_capabilities;
}
@@ -5080,7 +5103,7 @@ bool Compiler::reflection_ssbo_instance_name_is_significant() const
return aliased_ssbo_types;
}
bool Compiler::instruction_to_result_type(uint32_t &result_type, uint32_t &result_id, spv::Op op,
bool Compiler::instruction_to_result_type(uint32_t &result_type, uint32_t &result_id, Op op,
const uint32_t *args, uint32_t length)
{
if (length < 2)
@@ -5127,7 +5150,7 @@ Bitset Compiler::combined_decoration_for_member(const SPIRType &type, uint32_t i
return flags;
}
bool Compiler::is_desktop_only_format(spv::ImageFormat format)
bool Compiler::is_desktop_only_format(ImageFormat format)
{
switch (format)
{
@@ -5197,7 +5220,7 @@ void Compiler::clear_force_recompile()
}
Compiler::PhysicalStorageBufferPointerHandler::PhysicalStorageBufferPointerHandler(Compiler &compiler_)
: compiler(compiler_)
: OpcodeHandler(compiler_)
{
}
@@ -5246,7 +5269,7 @@ bool Compiler::PhysicalStorageBufferPointerHandler::type_is_bda_block_entry(uint
uint32_t Compiler::PhysicalStorageBufferPointerHandler::get_minimum_scalar_alignment(const SPIRType &type) const
{
if (type.storage == spv::StorageClassPhysicalStorageBuffer)
if (type.storage == StorageClassPhysicalStorageBuffer)
return 8;
else if (type.basetype == SPIRType::Struct)
{
@@ -5764,3 +5787,13 @@ void Compiler::add_loop_level()
{
current_loop_level++;
}
const SPIRType *Compiler::OpcodeHandler::get_expression_result_type(uint32_t id) const
{
auto itr = result_types.find(id);
if (itr == result_types.end())
return nullptr;
return &compiler.get<SPIRType>(itr->second);
}

View File

@@ -27,12 +27,20 @@
#ifndef SPV_ENABLE_UTILITY_CODE
#define SPV_ENABLE_UTILITY_CODE
#endif
// Pragmatic hack to avoid symbol conflicts when including both hpp11 and hpp headers in same translation unit.
// This is an unfortunate SPIRV-Headers issue that we cannot easily deal with ourselves.
#ifdef SPIRV_CROSS_SPV_HEADER_NAMESPACE_OVERRIDE
#define spv SPIRV_CROSS_SPV_HEADER_NAMESPACE_OVERRIDE
#endif
#include "spirv.hpp"
#include "spirv_cfg.hpp"
#include "spirv_cross_parsed_ir.hpp"
namespace SPIRV_CROSS_NAMESPACE
{
using namespace SPIRV_CROSS_SPV_HEADER_NAMESPACE;
struct Resource
{
// Resources are identified with their SPIR-V ID.
@@ -69,7 +77,7 @@ struct BuiltInResource
// A builtin present here does not necessarily mean it's considered an active builtin,
// since variable ID "activeness" is only tracked on OpVariable level, not Block members.
// For that, update_active_builtins() -> has_active_builtin() can be used to further refine the reflection.
spv::BuiltIn builtin;
BuiltIn builtin;
// This is the actual value type of the builtin.
// Typically float4, float, array<float, N> for the gl_PerVertex builtins.
@@ -152,7 +160,7 @@ enum BufferPackingStandard
struct EntryPoint
{
std::string name;
spv::ExecutionModel execution_model;
ExecutionModel execution_model;
};
class Compiler
@@ -183,8 +191,8 @@ public:
const std::string &get_name(ID id) const;
// Applies a decoration to an ID. Effectively injects OpDecorate.
void set_decoration(ID id, spv::Decoration decoration, uint32_t argument = 0);
void set_decoration_string(ID id, spv::Decoration decoration, const std::string &argument);
void set_decoration(ID id, Decoration decoration, uint32_t argument = 0);
void set_decoration_string(ID id, Decoration decoration, const std::string &argument);
// Overrides the identifier OpName of an ID.
// Identifiers beginning with underscores or identifiers which contain double underscores
@@ -192,22 +200,22 @@ public:
void set_name(ID id, const std::string &name);
// Gets a bitmask for the decorations which are applied to ID.
// I.e. (1ull << spv::DecorationFoo) | (1ull << spv::DecorationBar)
// I.e. (1ull << DecorationFoo) | (1ull << DecorationBar)
const Bitset &get_decoration_bitset(ID id) const;
// Returns whether the decoration has been applied to the ID.
bool has_decoration(ID id, spv::Decoration decoration) const;
bool has_decoration(ID id, Decoration decoration) const;
// Gets the value for decorations which take arguments.
// If the decoration is a boolean (i.e. spv::DecorationNonWritable),
// If the decoration is a boolean (i.e. DecorationNonWritable),
// 1 will be returned.
// If decoration doesn't exist or decoration is not recognized,
// 0 will be returned.
uint32_t get_decoration(ID id, spv::Decoration decoration) const;
const std::string &get_decoration_string(ID id, spv::Decoration decoration) const;
uint32_t get_decoration(ID id, Decoration decoration) const;
const std::string &get_decoration_string(ID id, Decoration decoration) const;
// Removes the decoration for an ID.
void unset_decoration(ID id, spv::Decoration decoration);
void unset_decoration(ID id, Decoration decoration);
// Gets the SPIR-V type associated with ID.
// Mostly used with Resource::type_id and Resource::base_type_id to parse the underlying type of a resource.
@@ -217,7 +225,7 @@ public:
const SPIRType &get_type_from_variable(VariableID id) const;
// Gets the underlying storage class for an OpVariable.
spv::StorageClass get_storage_class(VariableID id) const;
StorageClass get_storage_class(VariableID id) const;
// If get_name() is an empty string, get the fallback name which will be used
// instead in the disassembled source.
@@ -232,8 +240,8 @@ public:
const std::string &get_member_name(TypeID id, uint32_t index) const;
// Given an OpTypeStruct in ID, obtain the OpMemberDecoration for member number "index".
uint32_t get_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration) const;
const std::string &get_member_decoration_string(TypeID id, uint32_t index, spv::Decoration decoration) const;
uint32_t get_member_decoration(TypeID id, uint32_t index, Decoration decoration) const;
const std::string &get_member_decoration_string(TypeID id, uint32_t index, Decoration decoration) const;
// Sets the member identifier for OpTypeStruct ID, member number "index".
void set_member_name(TypeID id, uint32_t index, const std::string &name);
@@ -246,15 +254,15 @@ public:
const Bitset &get_member_decoration_bitset(TypeID id, uint32_t index) const;
// Returns whether the decoration has been applied to a member of a struct.
bool has_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration) const;
bool has_member_decoration(TypeID id, uint32_t index, Decoration decoration) const;
// Similar to set_decoration, but for struct members.
void set_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration, uint32_t argument = 0);
void set_member_decoration_string(TypeID id, uint32_t index, spv::Decoration decoration,
void set_member_decoration(TypeID id, uint32_t index, Decoration decoration, uint32_t argument = 0);
void set_member_decoration_string(TypeID id, uint32_t index, Decoration decoration,
const std::string &argument);
// Unsets a member decoration, similar to unset_decoration.
void unset_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration);
void unset_member_decoration(TypeID id, uint32_t index, Decoration decoration);
// Gets the fallback name for a member, similar to get_fallback_name.
virtual const std::string get_fallback_member_name(uint32_t index) const
@@ -340,28 +348,28 @@ public:
// Names for entry points in the SPIR-V module may alias if they belong to different execution models.
// To disambiguate, we must pass along with the entry point names the execution model.
SmallVector<EntryPoint> get_entry_points_and_stages() const;
void set_entry_point(const std::string &entry, spv::ExecutionModel execution_model);
void set_entry_point(const std::string &entry, ExecutionModel execution_model);
// Renames an entry point from old_name to new_name.
// If old_name is currently selected as the current entry point, it will continue to be the current entry point,
// albeit with a new name.
// get_entry_points() is essentially invalidated at this point.
void rename_entry_point(const std::string &old_name, const std::string &new_name,
spv::ExecutionModel execution_model);
const SPIREntryPoint &get_entry_point(const std::string &name, spv::ExecutionModel execution_model) const;
SPIREntryPoint &get_entry_point(const std::string &name, spv::ExecutionModel execution_model);
ExecutionModel execution_model);
const SPIREntryPoint &get_entry_point(const std::string &name, ExecutionModel execution_model) const;
SPIREntryPoint &get_entry_point(const std::string &name, ExecutionModel execution_model);
const std::string &get_cleansed_entry_point_name(const std::string &name,
spv::ExecutionModel execution_model) const;
ExecutionModel execution_model) const;
// Traverses all reachable opcodes and sets active_builtins to a bitmask of all builtin variables which are accessed in the shader.
void update_active_builtins();
bool has_active_builtin(spv::BuiltIn builtin, spv::StorageClass storage) const;
bool has_active_builtin(BuiltIn builtin, StorageClass storage) const;
// Query and modify OpExecutionMode.
const Bitset &get_execution_mode_bitset() const;
void unset_execution_mode(spv::ExecutionMode mode);
void set_execution_mode(spv::ExecutionMode mode, uint32_t arg0 = 0, uint32_t arg1 = 0, uint32_t arg2 = 0);
void unset_execution_mode(ExecutionMode mode);
void set_execution_mode(ExecutionMode mode, uint32_t arg0 = 0, uint32_t arg1 = 0, uint32_t arg2 = 0);
// Gets argument for an execution mode (LocalSize, Invocations, OutputVertices).
// For LocalSize or LocalSizeId, the index argument is used to select the dimension (X = 0, Y = 1, Z = 2).
@@ -369,8 +377,8 @@ public:
// LocalSizeId query returns an ID. If LocalSizeId execution mode is not used, it returns 0.
// LocalSize always returns a literal. If execution mode is LocalSizeId,
// the literal (spec constant or not) is still returned.
uint32_t get_execution_mode_argument(spv::ExecutionMode mode, uint32_t index = 0) const;
spv::ExecutionModel get_execution_model() const;
uint32_t get_execution_mode_argument(ExecutionMode mode, uint32_t index = 0) const;
ExecutionModel get_execution_model() const;
bool is_tessellation_shader() const;
bool is_tessellating_triangles() const;
@@ -483,7 +491,7 @@ public:
// If the decoration was declared, sets the word_offset to an offset into the provided SPIR-V binary buffer and returns true,
// otherwise, returns false.
// If the decoration does not have any value attached to it (e.g. DecorationRelaxedPrecision), this function will also return false.
bool get_binary_offset_for_decoration(VariableID id, spv::Decoration decoration, uint32_t &word_offset) const;
bool get_binary_offset_for_decoration(VariableID id, Decoration decoration, uint32_t &word_offset) const;
// HLSL counter buffer reflection interface.
// Append/Consume/Increment/Decrement in HLSL is implemented as two "neighbor" buffer objects where
@@ -509,7 +517,7 @@ public:
bool buffer_get_hlsl_counter_buffer(VariableID id, uint32_t &counter_id) const;
// Gets the list of all SPIR-V Capabilities which were declared in the SPIR-V module.
const SmallVector<spv::Capability> &get_declared_capabilities() const;
const SmallVector<Capability> &get_declared_capabilities() const;
// Gets the list of all SPIR-V extensions which were declared in the SPIR-V module.
const SmallVector<std::string> &get_declared_extensions() const;
@@ -672,14 +680,14 @@ protected:
const SPIREntryPoint &get_entry_point() const;
SPIREntryPoint &get_entry_point();
static bool is_tessellation_shader(spv::ExecutionModel model);
static bool is_tessellation_shader(ExecutionModel model);
virtual std::string to_name(uint32_t id, bool allow_alias = true) const;
bool is_builtin_variable(const SPIRVariable &var) const;
bool is_builtin_type(const SPIRType &type) const;
bool is_hidden_variable(const SPIRVariable &var, bool include_builtins = false) const;
bool is_immutable(uint32_t id) const;
bool is_member_builtin(const SPIRType &type, uint32_t index, spv::BuiltIn *builtin) const;
bool is_member_builtin(const SPIRType &type, uint32_t index, BuiltIn *builtin) const;
bool is_scalar(const SPIRType &type) const;
bool is_vector(const SPIRType &type) const;
bool is_matrix(const SPIRType &type) const;
@@ -788,11 +796,12 @@ protected:
// Used internally to implement various traversals for queries.
struct OpcodeHandler
{
explicit OpcodeHandler(Compiler &compiler_) : compiler(compiler_) {}
virtual ~OpcodeHandler() = default;
// Return true if traversal should continue.
// If false, traversal will end immediately.
virtual bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) = 0;
virtual bool handle(Op opcode, const uint32_t *args, uint32_t length) = 0;
virtual bool handle_terminator(const SPIRBlock &)
{
return true;
@@ -823,20 +832,40 @@ protected:
{
return true;
}
Compiler &compiler;
std::unordered_map<uint32_t, uint32_t> result_types;
const SPIRType *get_expression_result_type(uint32_t id) const;
bool enable_result_types = false;
template <typename T> T &get(uint32_t id)
{
return compiler.get<T>(id);
}
template <typename T> const T &get(uint32_t id) const
{
return compiler.get<T>(id);
}
template <typename T, typename... P>
T &set(uint32_t id, P &&... args)
{
return compiler.set<T>(id, std::forward<P>(args)...);
}
};
struct BufferAccessHandler : OpcodeHandler
{
BufferAccessHandler(const Compiler &compiler_, SmallVector<BufferRange> &ranges_, uint32_t id_)
: compiler(compiler_)
: OpcodeHandler(const_cast<Compiler &>(compiler_))
, ranges(ranges_)
, id(id_)
{
}
bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override;
bool handle(Op opcode, const uint32_t *args, uint32_t length) override;
const Compiler &compiler;
SmallVector<BufferRange> &ranges;
uint32_t id;
@@ -846,29 +875,26 @@ protected:
struct InterfaceVariableAccessHandler : OpcodeHandler
{
InterfaceVariableAccessHandler(const Compiler &compiler_, std::unordered_set<VariableID> &variables_)
: compiler(compiler_)
: OpcodeHandler(const_cast<Compiler &>(compiler_))
, variables(variables_)
{
}
bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override;
bool handle(Op opcode, const uint32_t *args, uint32_t length) override;
const Compiler &compiler;
std::unordered_set<VariableID> &variables;
};
struct CombinedImageSamplerHandler : OpcodeHandler
{
CombinedImageSamplerHandler(Compiler &compiler_)
: compiler(compiler_)
explicit CombinedImageSamplerHandler(Compiler &compiler_)
: OpcodeHandler(compiler_)
{
}
bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override;
bool handle(Op opcode, const uint32_t *args, uint32_t length) override;
bool begin_function_scope(const uint32_t *args, uint32_t length) override;
bool end_function_scope(const uint32_t *args, uint32_t length) override;
Compiler &compiler;
// Each function in the call stack needs its own remapping for parameters so we can deduce which global variable each texture/sampler the parameter is statically bound to.
std::stack<std::unordered_map<uint32_t, uint32_t>> parameter_remapping;
std::stack<SPIRFunction *> functions;
@@ -882,27 +908,24 @@ protected:
struct DummySamplerForCombinedImageHandler : OpcodeHandler
{
DummySamplerForCombinedImageHandler(Compiler &compiler_)
: compiler(compiler_)
explicit DummySamplerForCombinedImageHandler(Compiler &compiler_)
: OpcodeHandler(compiler_)
{
}
bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override;
Compiler &compiler;
bool handle(Op opcode, const uint32_t *args, uint32_t length) override;
bool need_dummy_sampler = false;
};
struct ActiveBuiltinHandler : OpcodeHandler
{
ActiveBuiltinHandler(Compiler &compiler_)
: compiler(compiler_)
explicit ActiveBuiltinHandler(Compiler &compiler_)
: OpcodeHandler(compiler_)
{
}
bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override;
Compiler &compiler;
bool handle(Op opcode, const uint32_t *args, uint32_t length) override;
void handle_builtin(const SPIRType &type, spv::BuiltIn builtin, const Bitset &decoration_flags);
void handle_builtin(const SPIRType &type, BuiltIn builtin, const Bitset &decoration_flags);
void add_if_builtin(uint32_t id);
void add_if_builtin_or_block(uint32_t id);
void add_if_builtin(uint32_t id, bool allow_blocks);
@@ -954,13 +977,12 @@ protected:
struct CombinedImageSamplerDrefHandler : OpcodeHandler
{
CombinedImageSamplerDrefHandler(Compiler &compiler_)
: compiler(compiler_)
explicit CombinedImageSamplerDrefHandler(Compiler &compiler_)
: OpcodeHandler(compiler_)
{
}
bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override;
bool handle(Op opcode, const uint32_t *args, uint32_t length) override;
Compiler &compiler;
std::unordered_set<uint32_t> dref_combined_samplers;
};
@@ -968,14 +990,13 @@ protected:
{
CombinedImageSamplerUsageHandler(Compiler &compiler_,
const std::unordered_set<uint32_t> &dref_combined_samplers_)
: compiler(compiler_)
: OpcodeHandler(compiler_)
, dref_combined_samplers(dref_combined_samplers_)
{
}
bool begin_function_scope(const uint32_t *args, uint32_t length) override;
bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override;
Compiler &compiler;
bool handle(Op opcode, const uint32_t *args, uint32_t length) override;
const std::unordered_set<uint32_t> &dref_combined_samplers;
std::unordered_map<uint32_t, std::unordered_set<uint32_t>> dependency_hierarchy;
@@ -997,8 +1018,7 @@ protected:
explicit CFGBuilder(Compiler &compiler_);
bool follow_function_call(const SPIRFunction &func) override;
bool handle(spv::Op op, const uint32_t *args, uint32_t length) override;
Compiler &compiler;
bool handle(Op op, const uint32_t *args, uint32_t length) override;
std::unordered_map<uint32_t, std::unique_ptr<CFG>> function_cfgs;
};
@@ -1012,10 +1032,9 @@ protected:
void notify_variable_access(uint32_t id, uint32_t block);
bool id_is_phi_variable(uint32_t id) const;
bool id_is_potential_temporary(uint32_t id) const;
bool handle(spv::Op op, const uint32_t *args, uint32_t length) override;
bool handle(Op op, const uint32_t *args, uint32_t length) override;
bool handle_terminator(const SPIRBlock &block) override;
Compiler &compiler;
SPIRFunction &entry;
std::unordered_map<uint32_t, std::unordered_set<uint32_t>> accessed_variables_to_block;
std::unordered_map<uint32_t, std::unordered_set<uint32_t>> accessed_temporaries_to_block;
@@ -1033,9 +1052,8 @@ protected:
{
StaticExpressionAccessHandler(Compiler &compiler_, uint32_t variable_id_);
bool follow_function_call(const SPIRFunction &) override;
bool handle(spv::Op op, const uint32_t *args, uint32_t length) override;
bool handle(Op op, const uint32_t *args, uint32_t length) override;
Compiler &compiler;
uint32_t variable_id;
uint32_t static_expression = 0;
uint32_t write_count = 0;
@@ -1049,8 +1067,7 @@ protected:
struct PhysicalStorageBufferPointerHandler : OpcodeHandler
{
explicit PhysicalStorageBufferPointerHandler(Compiler &compiler_);
bool handle(spv::Op op, const uint32_t *args, uint32_t length) override;
Compiler &compiler;
bool handle(Op op, const uint32_t *args, uint32_t length) override;
std::unordered_set<uint32_t> non_block_types;
std::unordered_map<uint32_t, PhysicalBlockMeta> physical_block_type_meta;
@@ -1077,12 +1094,11 @@ protected:
struct GeometryEmitDisocveryHandler : OpcodeHandler
{
explicit GeometryEmitDisocveryHandler(Compiler &compiler_)
: compiler(compiler_)
: OpcodeHandler(compiler_)
{
}
Compiler &compiler;
bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override;
bool handle(Op opcode, const uint32_t *args, uint32_t length) override;
bool begin_function_scope(const uint32_t *, uint32_t) override;
bool end_function_scope(const uint32_t *, uint32_t) override;
SmallVector<SPIRFunction *> function_stack;
@@ -1097,16 +1113,15 @@ protected:
struct InterlockedResourceAccessHandler : OpcodeHandler
{
InterlockedResourceAccessHandler(Compiler &compiler_, uint32_t entry_point_id)
: compiler(compiler_)
: OpcodeHandler(compiler_)
{
call_stack.push_back(entry_point_id);
}
bool handle(spv::Op op, const uint32_t *args, uint32_t length) override;
bool handle(Op op, const uint32_t *args, uint32_t length) override;
bool begin_function_scope(const uint32_t *args, uint32_t length) override;
bool end_function_scope(const uint32_t *args, uint32_t length) override;
Compiler &compiler;
bool in_crit_sec = false;
uint32_t interlock_function_id = 0;
@@ -1122,17 +1137,16 @@ protected:
struct InterlockedResourceAccessPrepassHandler : OpcodeHandler
{
InterlockedResourceAccessPrepassHandler(Compiler &compiler_, uint32_t entry_point_id)
: compiler(compiler_)
: OpcodeHandler(compiler_)
{
call_stack.push_back(entry_point_id);
}
void rearm_current_block(const SPIRBlock &block) override;
bool handle(spv::Op op, const uint32_t *args, uint32_t length) override;
bool handle(Op op, const uint32_t *args, uint32_t length) override;
bool begin_function_scope(const uint32_t *args, uint32_t length) override;
bool end_function_scope(const uint32_t *args, uint32_t length) override;
Compiler &compiler;
uint32_t interlock_function_id = 0;
uint32_t current_block_id = 0;
bool split_function_case = false;
@@ -1149,11 +1163,11 @@ protected:
std::unordered_map<uint32_t, std::string> declared_block_names;
bool instruction_to_result_type(uint32_t &result_type, uint32_t &result_id, spv::Op op, const uint32_t *args,
uint32_t length);
static bool instruction_to_result_type(
uint32_t &result_type, uint32_t &result_id, Op op, const uint32_t *args, uint32_t length);
Bitset combined_decoration_for_member(const SPIRType &type, uint32_t index) const;
static bool is_desktop_only_format(spv::ImageFormat format);
static bool is_desktop_only_format(ImageFormat format);
bool is_depth_image(const SPIRType &type, uint32_t id) const;
@@ -1198,4 +1212,8 @@ private:
};
} // namespace SPIRV_CROSS_NAMESPACE
#ifdef SPIRV_CROSS_SPV_HEADER_NAMESPACE_OVERRIDE
#undef spv
#endif
#endif

View File

@@ -75,6 +75,7 @@
#define SPVC_END_SAFE_SCOPE(context, error)
#endif
using namespace SPIRV_CROSS_SPV_HEADER_NAMESPACE;
using namespace std;
using namespace SPIRV_CROSS_NAMESPACE;
@@ -957,7 +958,7 @@ spvc_result spvc_compiler_mask_stage_output_by_builtin(spvc_compiler compiler, S
return SPVC_ERROR_INVALID_ARGUMENT;
}
static_cast<CompilerGLSL *>(compiler->compiler.get())->mask_stage_output_by_builtin(spv::BuiltIn(builtin));
static_cast<CompilerGLSL *>(compiler->compiler.get())->mask_stage_output_by_builtin(BuiltIn(builtin));
return SPVC_SUCCESS;
#else
(void)builtin;
@@ -1080,7 +1081,7 @@ spvc_result spvc_compiler_hlsl_add_resource_binding(spvc_compiler compiler,
HLSLResourceBinding bind;
bind.binding = binding->binding;
bind.desc_set = binding->desc_set;
bind.stage = static_cast<spv::ExecutionModel>(binding->stage);
bind.stage = static_cast<ExecutionModel>(binding->stage);
bind.cbv.register_binding = binding->cbv.register_binding;
bind.cbv.register_space = binding->cbv.register_space;
bind.uav.register_binding = binding->uav.register_binding;
@@ -1109,7 +1110,7 @@ spvc_bool spvc_compiler_hlsl_is_resource_used(spvc_compiler compiler, SpvExecuti
}
auto &hlsl = *static_cast<CompilerHLSL *>(compiler->compiler.get());
return hlsl.is_hlsl_resource_binding_used(static_cast<spv::ExecutionModel>(model), set, binding) ? SPVC_TRUE :
return hlsl.is_hlsl_resource_binding_used(static_cast<ExecutionModel>(model), set, binding) ? SPVC_TRUE :
SPVC_FALSE;
#else
(void)model;
@@ -1240,7 +1241,7 @@ spvc_result spvc_compiler_msl_add_vertex_attribute(spvc_compiler compiler, const
MSLShaderInterfaceVariable attr;
attr.location = va->location;
attr.format = static_cast<MSLShaderVariableFormat>(va->format);
attr.builtin = static_cast<spv::BuiltIn>(va->builtin);
attr.builtin = static_cast<BuiltIn>(va->builtin);
msl.add_msl_shader_input(attr);
return SPVC_SUCCESS;
#else
@@ -1263,7 +1264,7 @@ spvc_result spvc_compiler_msl_add_shader_input(spvc_compiler compiler, const spv
MSLShaderInterfaceVariable input;
input.location = si->location;
input.format = static_cast<MSLShaderVariableFormat>(si->format);
input.builtin = static_cast<spv::BuiltIn>(si->builtin);
input.builtin = static_cast<BuiltIn>(si->builtin);
input.vecsize = si->vecsize;
msl.add_msl_shader_input(input);
return SPVC_SUCCESS;
@@ -1287,7 +1288,7 @@ spvc_result spvc_compiler_msl_add_shader_input_2(spvc_compiler compiler, const s
MSLShaderInterfaceVariable input;
input.location = si->location;
input.format = static_cast<MSLShaderVariableFormat>(si->format);
input.builtin = static_cast<spv::BuiltIn>(si->builtin);
input.builtin = static_cast<BuiltIn>(si->builtin);
input.vecsize = si->vecsize;
input.rate = static_cast<MSLShaderVariableRate>(si->rate);
msl.add_msl_shader_input(input);
@@ -1312,7 +1313,7 @@ spvc_result spvc_compiler_msl_add_shader_output(spvc_compiler compiler, const sp
MSLShaderInterfaceVariable output;
output.location = so->location;
output.format = static_cast<MSLShaderVariableFormat>(so->format);
output.builtin = static_cast<spv::BuiltIn>(so->builtin);
output.builtin = static_cast<BuiltIn>(so->builtin);
output.vecsize = so->vecsize;
msl.add_msl_shader_output(output);
return SPVC_SUCCESS;
@@ -1336,7 +1337,7 @@ spvc_result spvc_compiler_msl_add_shader_output_2(spvc_compiler compiler, const
MSLShaderInterfaceVariable output;
output.location = so->location;
output.format = static_cast<MSLShaderVariableFormat>(so->format);
output.builtin = static_cast<spv::BuiltIn>(so->builtin);
output.builtin = static_cast<BuiltIn>(so->builtin);
output.vecsize = so->vecsize;
output.rate = static_cast<MSLShaderVariableRate>(so->rate);
msl.add_msl_shader_output(output);
@@ -1362,7 +1363,7 @@ spvc_result spvc_compiler_msl_add_resource_binding(spvc_compiler compiler,
MSLResourceBinding bind;
bind.binding = binding->binding;
bind.desc_set = binding->desc_set;
bind.stage = static_cast<spv::ExecutionModel>(binding->stage);
bind.stage = static_cast<ExecutionModel>(binding->stage);
bind.msl_buffer = binding->msl_buffer;
bind.msl_texture = binding->msl_texture;
bind.msl_sampler = binding->msl_sampler;
@@ -1389,7 +1390,7 @@ spvc_result spvc_compiler_msl_add_resource_binding_2(spvc_compiler compiler,
MSLResourceBinding bind;
bind.binding = binding->binding;
bind.desc_set = binding->desc_set;
bind.stage = static_cast<spv::ExecutionModel>(binding->stage);
bind.stage = static_cast<ExecutionModel>(binding->stage);
bind.msl_buffer = binding->msl_buffer;
bind.msl_texture = binding->msl_texture;
bind.msl_sampler = binding->msl_sampler;
@@ -1535,7 +1536,7 @@ spvc_bool spvc_compiler_msl_is_resource_used(spvc_compiler compiler, SpvExecutio
}
auto &msl = *static_cast<CompilerMSL *>(compiler->compiler.get());
return msl.is_msl_resource_binding_used(static_cast<spv::ExecutionModel>(model), set, binding) ? SPVC_TRUE :
return msl.is_msl_resource_binding_used(static_cast<ExecutionModel>(model), set, binding) ? SPVC_TRUE :
SPVC_FALSE;
#else
(void)model;
@@ -2082,13 +2083,13 @@ spvc_result spvc_resources_get_builtin_resource_list_for_type(
void spvc_compiler_set_decoration(spvc_compiler compiler, SpvId id, SpvDecoration decoration, unsigned argument)
{
compiler->compiler->set_decoration(id, static_cast<spv::Decoration>(decoration), argument);
compiler->compiler->set_decoration(id, static_cast<Decoration>(decoration), argument);
}
void spvc_compiler_set_decoration_string(spvc_compiler compiler, SpvId id, SpvDecoration decoration,
const char *argument)
{
compiler->compiler->set_decoration_string(id, static_cast<spv::Decoration>(decoration), argument);
compiler->compiler->set_decoration_string(id, static_cast<Decoration>(decoration), argument);
}
void spvc_compiler_set_name(spvc_compiler compiler, SpvId id, const char *argument)
@@ -2099,13 +2100,13 @@ void spvc_compiler_set_name(spvc_compiler compiler, SpvId id, const char *argume
void spvc_compiler_set_member_decoration(spvc_compiler compiler, spvc_type_id id, unsigned member_index,
SpvDecoration decoration, unsigned argument)
{
compiler->compiler->set_member_decoration(id, member_index, static_cast<spv::Decoration>(decoration), argument);
compiler->compiler->set_member_decoration(id, member_index, static_cast<Decoration>(decoration), argument);
}
void spvc_compiler_set_member_decoration_string(spvc_compiler compiler, spvc_type_id id, unsigned member_index,
SpvDecoration decoration, const char *argument)
{
compiler->compiler->set_member_decoration_string(id, member_index, static_cast<spv::Decoration>(decoration),
compiler->compiler->set_member_decoration_string(id, member_index, static_cast<Decoration>(decoration),
argument);
}
@@ -2116,24 +2117,24 @@ void spvc_compiler_set_member_name(spvc_compiler compiler, spvc_type_id id, unsi
void spvc_compiler_unset_decoration(spvc_compiler compiler, SpvId id, SpvDecoration decoration)
{
compiler->compiler->unset_decoration(id, static_cast<spv::Decoration>(decoration));
compiler->compiler->unset_decoration(id, static_cast<Decoration>(decoration));
}
void spvc_compiler_unset_member_decoration(spvc_compiler compiler, spvc_type_id id, unsigned member_index,
SpvDecoration decoration)
{
compiler->compiler->unset_member_decoration(id, member_index, static_cast<spv::Decoration>(decoration));
compiler->compiler->unset_member_decoration(id, member_index, static_cast<Decoration>(decoration));
}
spvc_bool spvc_compiler_has_decoration(spvc_compiler compiler, SpvId id, SpvDecoration decoration)
{
return compiler->compiler->has_decoration(id, static_cast<spv::Decoration>(decoration)) ? SPVC_TRUE : SPVC_FALSE;
return compiler->compiler->has_decoration(id, static_cast<Decoration>(decoration)) ? SPVC_TRUE : SPVC_FALSE;
}
spvc_bool spvc_compiler_has_member_decoration(spvc_compiler compiler, spvc_type_id id, unsigned member_index,
SpvDecoration decoration)
{
return compiler->compiler->has_member_decoration(id, member_index, static_cast<spv::Decoration>(decoration)) ?
return compiler->compiler->has_member_decoration(id, member_index, static_cast<Decoration>(decoration)) ?
SPVC_TRUE :
SPVC_FALSE;
}
@@ -2145,24 +2146,24 @@ const char *spvc_compiler_get_name(spvc_compiler compiler, SpvId id)
unsigned spvc_compiler_get_decoration(spvc_compiler compiler, SpvId id, SpvDecoration decoration)
{
return compiler->compiler->get_decoration(id, static_cast<spv::Decoration>(decoration));
return compiler->compiler->get_decoration(id, static_cast<Decoration>(decoration));
}
const char *spvc_compiler_get_decoration_string(spvc_compiler compiler, SpvId id, SpvDecoration decoration)
{
return compiler->compiler->get_decoration_string(id, static_cast<spv::Decoration>(decoration)).c_str();
return compiler->compiler->get_decoration_string(id, static_cast<Decoration>(decoration)).c_str();
}
unsigned spvc_compiler_get_member_decoration(spvc_compiler compiler, spvc_type_id id, unsigned member_index,
SpvDecoration decoration)
{
return compiler->compiler->get_member_decoration(id, member_index, static_cast<spv::Decoration>(decoration));
return compiler->compiler->get_member_decoration(id, member_index, static_cast<Decoration>(decoration));
}
const char *spvc_compiler_get_member_decoration_string(spvc_compiler compiler, spvc_type_id id, unsigned member_index,
SpvDecoration decoration)
{
return compiler->compiler->get_member_decoration_string(id, member_index, static_cast<spv::Decoration>(decoration))
return compiler->compiler->get_member_decoration_string(id, member_index, static_cast<Decoration>(decoration))
.c_str();
}
@@ -2207,7 +2208,7 @@ spvc_result spvc_compiler_set_entry_point(spvc_compiler compiler, const char *na
{
SPVC_BEGIN_SAFE_SCOPE
{
compiler->compiler->set_entry_point(name, static_cast<spv::ExecutionModel>(model));
compiler->compiler->set_entry_point(name, static_cast<ExecutionModel>(model));
}
SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_INVALID_ARGUMENT)
return SPVC_SUCCESS;
@@ -2218,7 +2219,7 @@ spvc_result spvc_compiler_rename_entry_point(spvc_compiler compiler, const char
{
SPVC_BEGIN_SAFE_SCOPE
{
compiler->compiler->rename_entry_point(old_name, new_name, static_cast<spv::ExecutionModel>(model));
compiler->compiler->rename_entry_point(old_name, new_name, static_cast<ExecutionModel>(model));
}
SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_INVALID_ARGUMENT)
return SPVC_SUCCESS;
@@ -2230,7 +2231,7 @@ const char *spvc_compiler_get_cleansed_entry_point_name(spvc_compiler compiler,
SPVC_BEGIN_SAFE_SCOPE
{
auto cleansed_name =
compiler->compiler->get_cleansed_entry_point_name(name, static_cast<spv::ExecutionModel>(model));
compiler->compiler->get_cleansed_entry_point_name(name, static_cast<ExecutionModel>(model));
return compiler->context->allocate_name(cleansed_name);
}
SPVC_END_SAFE_SCOPE(compiler->context, nullptr)
@@ -2238,19 +2239,19 @@ const char *spvc_compiler_get_cleansed_entry_point_name(spvc_compiler compiler,
void spvc_compiler_set_execution_mode(spvc_compiler compiler, SpvExecutionMode mode)
{
compiler->compiler->set_execution_mode(static_cast<spv::ExecutionMode>(mode));
compiler->compiler->set_execution_mode(static_cast<ExecutionMode>(mode));
}
void spvc_compiler_set_execution_mode_with_arguments(spvc_compiler compiler, SpvExecutionMode mode, unsigned arg0,
unsigned arg1,
unsigned arg2)
{
compiler->compiler->set_execution_mode(static_cast<spv::ExecutionMode>(mode), arg0, arg1, arg2);
compiler->compiler->set_execution_mode(static_cast<ExecutionMode>(mode), arg0, arg1, arg2);
}
void spvc_compiler_unset_execution_mode(spvc_compiler compiler, SpvExecutionMode mode)
{
compiler->compiler->unset_execution_mode(static_cast<spv::ExecutionMode>(mode));
compiler->compiler->unset_execution_mode(static_cast<ExecutionMode>(mode));
}
spvc_result spvc_compiler_get_execution_modes(spvc_compiler compiler, const SpvExecutionMode **modes, size_t *num_modes)
@@ -2272,13 +2273,13 @@ spvc_result spvc_compiler_get_execution_modes(spvc_compiler compiler, const SpvE
unsigned spvc_compiler_get_execution_mode_argument(spvc_compiler compiler, SpvExecutionMode mode)
{
return compiler->compiler->get_execution_mode_argument(static_cast<spv::ExecutionMode>(mode));
return compiler->compiler->get_execution_mode_argument(static_cast<ExecutionMode>(mode));
}
unsigned spvc_compiler_get_execution_mode_argument_by_index(spvc_compiler compiler, SpvExecutionMode mode,
unsigned index)
{
return compiler->compiler->get_execution_mode_argument(static_cast<spv::ExecutionMode>(mode), index);
return compiler->compiler->get_execution_mode_argument(static_cast<ExecutionMode>(mode), index);
}
SpvExecutionModel spvc_compiler_get_execution_model(spvc_compiler compiler)
@@ -2293,7 +2294,7 @@ void spvc_compiler_update_active_builtins(spvc_compiler compiler)
spvc_bool spvc_compiler_has_active_builtin(spvc_compiler compiler, SpvBuiltIn builtin, SpvStorageClass storage)
{
return compiler->compiler->has_active_builtin(static_cast<spv::BuiltIn>(builtin), static_cast<spv::StorageClass>(storage)) ?
return compiler->compiler->has_active_builtin(static_cast<BuiltIn>(builtin), static_cast<StorageClass>(storage)) ?
SPVC_TRUE :
SPVC_FALSE;
}
@@ -2722,7 +2723,7 @@ spvc_bool spvc_compiler_get_binary_offset_for_decoration(spvc_compiler compiler,
unsigned *word_offset)
{
uint32_t off = 0;
bool ret = compiler->compiler->get_binary_offset_for_decoration(id, static_cast<spv::Decoration>(decoration), off);
bool ret = compiler->compiler->get_binary_offset_for_decoration(id, static_cast<Decoration>(decoration), off);
if (ret)
{
*word_offset = off;
@@ -2755,7 +2756,7 @@ spvc_result spvc_compiler_get_declared_capabilities(spvc_compiler compiler, cons
size_t *num_capabilities)
{
auto &caps = compiler->compiler->get_declared_capabilities();
static_assert(sizeof(SpvCapability) == sizeof(spv::Capability), "Enum size mismatch.");
static_assert(sizeof(SpvCapability) == sizeof(Capability), "Enum size mismatch.");
*capabilities = reinterpret_cast<const SpvCapability *>(caps.data());
*num_capabilities = caps.size();
return SPVC_SUCCESS;

View File

@@ -26,7 +26,7 @@
#include <assert.h>
using namespace std;
using namespace spv;
using namespace SPIRV_CROSS_SPV_HEADER_NAMESPACE;
namespace SPIRV_CROSS_NAMESPACE
{

View File

@@ -30,6 +30,7 @@
namespace SPIRV_CROSS_NAMESPACE
{
using namespace SPIRV_CROSS_SPV_HEADER_NAMESPACE;
// This data structure holds all information needed to perform cross-compilation and reflection.
// It is the output of the Parser, but any implementation could create this structure.
@@ -87,7 +88,7 @@ public:
// Declared capabilities and extensions in the SPIR-V module.
// Not really used except for reflection at the moment.
SmallVector<spv::Capability> declared_capabilities;
SmallVector<Capability> declared_capabilities;
SmallVector<std::string> declared_extensions;
// Meta data about blocks. The cross-compiler needs to query if a block is either of these types.
@@ -111,7 +112,7 @@ public:
struct Source
{
spv::SourceLanguage lang = spv::SourceLanguageUnknown;
SourceLanguage lang = SourceLanguageUnknown;
uint32_t version = 0;
bool es = false;
bool known = false;
@@ -122,8 +123,8 @@ public:
Source source;
spv::AddressingModel addressing_model = spv::AddressingModelMax;
spv::MemoryModel memory_model = spv::MemoryModelMax;
AddressingModel addressing_model = AddressingModelMax;
MemoryModel memory_model = MemoryModelMax;
// Decoration handling methods.
// Can be useful for simple "raw" reflection.
@@ -131,25 +132,25 @@ public:
// and might as well just have the whole suite of decoration/name handling in one place.
void set_name(ID id, const std::string &name);
const std::string &get_name(ID id) const;
void set_decoration(ID id, spv::Decoration decoration, uint32_t argument = 0);
void set_decoration_string(ID id, spv::Decoration decoration, const std::string &argument);
bool has_decoration(ID id, spv::Decoration decoration) const;
uint32_t get_decoration(ID id, spv::Decoration decoration) const;
const std::string &get_decoration_string(ID id, spv::Decoration decoration) const;
void set_decoration(ID id, Decoration decoration, uint32_t argument = 0);
void set_decoration_string(ID id, Decoration decoration, const std::string &argument);
bool has_decoration(ID id, Decoration decoration) const;
uint32_t get_decoration(ID id, Decoration decoration) const;
const std::string &get_decoration_string(ID id, Decoration decoration) const;
const Bitset &get_decoration_bitset(ID id) const;
void unset_decoration(ID id, spv::Decoration decoration);
void unset_decoration(ID id, Decoration decoration);
// Decoration handling methods (for members of a struct).
void set_member_name(TypeID id, uint32_t index, const std::string &name);
const std::string &get_member_name(TypeID id, uint32_t index) const;
void set_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration, uint32_t argument = 0);
void set_member_decoration_string(TypeID id, uint32_t index, spv::Decoration decoration,
void set_member_decoration(TypeID id, uint32_t index, Decoration decoration, uint32_t argument = 0);
void set_member_decoration_string(TypeID id, uint32_t index, Decoration decoration,
const std::string &argument);
uint32_t get_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration) const;
const std::string &get_member_decoration_string(TypeID id, uint32_t index, spv::Decoration decoration) const;
bool has_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration) const;
uint32_t get_member_decoration(TypeID id, uint32_t index, Decoration decoration) const;
const std::string &get_member_decoration_string(TypeID id, uint32_t index, Decoration decoration) const;
bool has_member_decoration(TypeID id, uint32_t index, Decoration decoration) const;
const Bitset &get_member_decoration_bitset(TypeID id, uint32_t index) const;
void unset_member_decoration(TypeID id, uint32_t index, spv::Decoration decoration);
void unset_member_decoration(TypeID id, uint32_t index, Decoration decoration);
void mark_used_as_array_length(ID id);
uint32_t increase_bound_by(uint32_t count);

View File

@@ -24,7 +24,7 @@
#include "spirv_cross_util.hpp"
#include "spirv_common.hpp"
using namespace spv;
using namespace SPIRV_CROSS_SPV_HEADER_NAMESPACE;
using namespace SPIRV_CROSS_NAMESPACE;
namespace spirv_cross_util
@@ -34,10 +34,10 @@ void rename_interface_variable(Compiler &compiler, const SmallVector<Resource> &
{
for (auto &v : resources)
{
if (!compiler.has_decoration(v.id, spv::DecorationLocation))
if (!compiler.has_decoration(v.id, DecorationLocation))
continue;
auto loc = compiler.get_decoration(v.id, spv::DecorationLocation);
auto loc = compiler.get_decoration(v.id, DecorationLocation);
if (loc != location)
continue;
@@ -61,16 +61,16 @@ void inherit_combined_sampler_bindings(Compiler &compiler)
auto &samplers = compiler.get_combined_image_samplers();
for (auto &s : samplers)
{
if (compiler.has_decoration(s.image_id, spv::DecorationDescriptorSet))
if (compiler.has_decoration(s.image_id, DecorationDescriptorSet))
{
uint32_t set = compiler.get_decoration(s.image_id, spv::DecorationDescriptorSet);
compiler.set_decoration(s.combined_id, spv::DecorationDescriptorSet, set);
uint32_t set = compiler.get_decoration(s.image_id, DecorationDescriptorSet);
compiler.set_decoration(s.combined_id, DecorationDescriptorSet, set);
}
if (compiler.has_decoration(s.image_id, spv::DecorationBinding))
if (compiler.has_decoration(s.image_id, DecorationBinding))
{
uint32_t binding = compiler.get_decoration(s.image_id, spv::DecorationBinding);
compiler.set_decoration(s.combined_id, spv::DecorationBinding, binding);
uint32_t binding = compiler.get_decoration(s.image_id, DecorationBinding);
compiler.set_decoration(s.combined_id, DecorationBinding, binding);
}
}
}

View File

@@ -37,7 +37,7 @@
#endif
#include <locale.h>
using namespace spv;
using namespace SPIRV_CROSS_SPV_HEADER_NAMESPACE;
using namespace SPIRV_CROSS_NAMESPACE;
using namespace std;
@@ -267,7 +267,7 @@ static const char *to_pls_layout(PlsFormat format)
}
}
static std::pair<spv::Op, SPIRType::BaseType> pls_format_to_basetype(PlsFormat format)
static std::pair<Op, SPIRType::BaseType> pls_format_to_basetype(PlsFormat format)
{
switch (format)
{
@@ -278,17 +278,17 @@ static std::pair<spv::Op, SPIRType::BaseType> pls_format_to_basetype(PlsFormat f
case PlsRGB10A2:
case PlsRGBA8:
case PlsRG16:
return std::make_pair(spv::OpTypeFloat, SPIRType::Float);
return std::make_pair(OpTypeFloat, SPIRType::Float);
case PlsRGBA8I:
case PlsRG16I:
return std::make_pair(spv::OpTypeInt, SPIRType::Int);
return std::make_pair(OpTypeInt, SPIRType::Int);
case PlsRGB10A2UI:
case PlsRGBA8UI:
case PlsRG16UI:
case PlsR32UI:
return std::make_pair(spv::OpTypeInt, SPIRType::UInt);
return std::make_pair(OpTypeInt, SPIRType::UInt);
}
}
@@ -654,6 +654,20 @@ void CompilerGLSL::find_static_extensions()
ray_tracing_is_khr = true;
break;
case CapabilityRayQueryPositionFetchKHR:
if (options.es || options.version < 460 || !options.vulkan_semantics)
SPIRV_CROSS_THROW("RayQuery Position Fetch requires Vulkan GLSL 460.");
require_extension_internal("GL_EXT_ray_tracing_position_fetch");
ray_tracing_is_khr = true;
break;
case CapabilityRayTracingPositionFetchKHR:
if (options.es || options.version < 460 || !options.vulkan_semantics)
SPIRV_CROSS_THROW("Ray Tracing Position Fetch requires Vulkan GLSL 460.");
require_extension_internal("GL_EXT_ray_tracing_position_fetch");
ray_tracing_is_khr = true;
break;
case CapabilityRayTraversalPrimitiveCullingKHR:
if (options.es || options.version < 460 || !options.vulkan_semantics)
SPIRV_CROSS_THROW("RayQuery requires Vulkan GLSL 460.");
@@ -1476,7 +1490,7 @@ string CompilerGLSL::layout_for_member(const SPIRType &type, uint32_t index)
return res;
}
const char *CompilerGLSL::format_to_glsl(spv::ImageFormat format)
const char *CompilerGLSL::format_to_glsl(ImageFormat format)
{
if (options.es && is_desktop_only_format(format))
SPIRV_CROSS_THROW("Attempting to use image format not supported in ES profile.");
@@ -4324,7 +4338,7 @@ void CompilerGLSL::emit_subgroup_arithmetic_workaround(const std::string &func,
}
}
void CompilerGLSL::emit_extension_workarounds(spv::ExecutionModel model)
void CompilerGLSL::emit_extension_workarounds(ExecutionModel model)
{
static const char *workaround_types[] = { "int", "ivec2", "ivec3", "ivec4", "uint", "uvec2", "uvec3", "uvec4",
"float", "vec2", "vec3", "vec4", "double", "dvec2", "dvec3", "dvec4" };
@@ -4643,7 +4657,7 @@ void CompilerGLSL::emit_extension_workarounds(spv::ExecutionModel model)
if (shader_subgroup_supporter.is_feature_requested(Supp::SubgroupMemBarrier))
{
if (model == spv::ExecutionModelGLCompute)
if (model == ExecutionModelGLCompute)
{
statement("#ifndef GL_KHR_shader_subgroup_basic");
statement("void subgroupMemoryBarrier() { groupMemoryBarrier(); }");
@@ -4729,7 +4743,7 @@ void CompilerGLSL::emit_extension_workarounds(spv::ExecutionModel model)
}
auto arithmetic_feature_helper =
[&](Supp::Feature feat, std::string func_name, spv::Op op, spv::GroupOperation group_op)
[&](Supp::Feature feat, std::string func_name, Op op, GroupOperation group_op)
{
if (shader_subgroup_supporter.is_feature_requested(feat))
{
@@ -6014,9 +6028,9 @@ string CompilerGLSL::constant_expression(const SPIRConstant &c,
require_extension_internal("GL_EXT_null_initializer");
return backend.constant_null_initializer;
}
else if (c.replicated && type.op != spv::OpTypeArray)
else if (c.replicated && type.op != OpTypeArray)
{
if (type.op == spv::OpTypeMatrix)
if (type.op == OpTypeMatrix)
{
uint32_t num_elements = type.columns;
// GLSL does not allow the replication constructor for matrices
@@ -6151,7 +6165,7 @@ string CompilerGLSL::constant_expression(const SPIRConstant &c,
else
return join(type_to_glsl(type), "(0)");
}
else if (c.columns() == 1 && type.op != spv::OpTypeCooperativeMatrixKHR)
else if (c.columns() == 1 && type.op != OpTypeCooperativeMatrixKHR)
{
auto res = constant_expression_vector(c, 0);
@@ -7470,29 +7484,29 @@ string CompilerGLSL::legacy_tex_op(const std::string &op, const SPIRType &imgtyp
const char *type;
switch (imgtype.image.dim)
{
case spv::Dim1D:
case Dim1D:
// Force 2D path for ES.
if (options.es)
type = (imgtype.image.arrayed && !options.es) ? "2DArray" : "2D";
else
type = (imgtype.image.arrayed && !options.es) ? "1DArray" : "1D";
break;
case spv::Dim2D:
case Dim2D:
type = (imgtype.image.arrayed && !options.es) ? "2DArray" : "2D";
break;
case spv::Dim3D:
case Dim3D:
type = "3D";
break;
case spv::DimCube:
case DimCube:
type = "Cube";
break;
case spv::DimRect:
case DimRect:
type = "2DRect";
break;
case spv::DimBuffer:
case DimBuffer:
type = "Buffer";
break;
case spv::DimSubpassData:
case DimSubpassData:
type = "2D";
break;
default:
@@ -7535,7 +7549,7 @@ string CompilerGLSL::legacy_tex_op(const std::string &op, const SPIRType &imgtyp
else
SPIRV_CROSS_THROW(join(op, " not allowed on depth samplers in legacy ES"));
if (imgtype.image.dim == spv::DimCube)
if (imgtype.image.dim == DimCube)
return "shadowCubeNV";
}
@@ -7802,7 +7816,7 @@ string CompilerGLSL::to_combined_image_sampler(VariableID image_id, VariableID s
}
}
bool CompilerGLSL::is_supported_subgroup_op_in_opengl(spv::Op op, const uint32_t *ops)
bool CompilerGLSL::is_supported_subgroup_op_in_opengl(Op op, const uint32_t *ops)
{
switch (op)
{
@@ -8064,19 +8078,19 @@ std::string CompilerGLSL::to_texture_op(const Instruction &i, bool sparse, bool
uint32_t coord_components = 0;
switch (imgtype.image.dim)
{
case spv::Dim1D:
case Dim1D:
coord_components = 1;
break;
case spv::Dim2D:
case Dim2D:
coord_components = 2;
break;
case spv::Dim3D:
case Dim3D:
coord_components = 3;
break;
case spv::DimCube:
case DimCube:
coord_components = 3;
break;
case spv::DimBuffer:
case DimBuffer:
coord_components = 1;
break;
default:
@@ -9517,6 +9531,35 @@ void CompilerGLSL::emit_subgroup_op(const Instruction &i)
auto int_type = to_signed_basetype(integer_width);
auto uint_type = to_unsigned_basetype(integer_width);
if (options.vulkan_semantics)
{
auto &return_type = get<SPIRType>(ops[0]);
switch (return_type.basetype)
{
case SPIRType::SByte:
case SPIRType::UByte:
require_extension_internal("GL_EXT_shader_subgroup_extended_types_int8");
break;
case SPIRType::Short:
case SPIRType::UShort:
require_extension_internal("GL_EXT_shader_subgroup_extended_types_int16");
break;
case SPIRType::Half:
require_extension_internal("GL_EXT_shader_subgroup_extended_types_float16");
break;
case SPIRType::Int64:
case SPIRType::UInt64:
require_extension_internal("GL_EXT_shader_subgroup_extended_types_int64");
break;
default:
break;
}
}
switch (op)
{
case OpGroupNonUniformElect:
@@ -10036,9 +10079,9 @@ string CompilerGLSL::builtin_to_glsl(BuiltIn builtin, StorageClass storage)
auto model = get_entry_point().model;
switch (model)
{
case spv::ExecutionModelIntersectionKHR:
case spv::ExecutionModelAnyHitKHR:
case spv::ExecutionModelClosestHitKHR:
case ExecutionModelIntersectionKHR:
case ExecutionModelAnyHitKHR:
case ExecutionModelClosestHitKHR:
// gl_InstanceID is allowed in these shaders.
break;
@@ -10370,6 +10413,14 @@ string CompilerGLSL::builtin_to_glsl(BuiltIn builtin, StorageClass storage)
case BuiltInCullPrimitiveEXT:
return "gl_CullPrimitiveEXT";
case BuiltInHitTriangleVertexPositionsKHR:
{
if (!options.vulkan_semantics)
SPIRV_CROSS_THROW("Need Vulkan semantics for EXT_ray_tracing_position_fetch.");
require_extension_internal("GL_EXT_ray_tracing_position_fetch");
return "gl_HitTriangleVertexPositionsEXT";
}
case BuiltInClusterIDNV:
{
if (!options.vulkan_semantics)
@@ -10490,7 +10541,7 @@ string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indice
bool dimension_flatten = false;
bool access_meshlet_position_y = false;
bool chain_is_builtin = false;
spv::BuiltIn chained_builtin = {};
BuiltIn chained_builtin = {};
if (auto *base_expr = maybe_get<SPIRExpression>(base))
{
@@ -10622,7 +10673,7 @@ string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indice
expr = join("(", expr, ")");
}
// Arrays and OpTypeCooperativeVectorNV (aka fancy arrays)
else if (!type->array.empty() || type->op == spv::OpTypeCooperativeVectorNV)
else if (!type->array.empty() || type->op == OpTypeCooperativeVectorNV)
{
// If we are flattening multidimensional arrays, only create opening bracket on first
// array index.
@@ -11021,7 +11072,7 @@ bool CompilerGLSL::check_physical_type_cast(std::string &, const SPIRType *, uin
return false;
}
bool CompilerGLSL::prepare_access_chain_for_scalar_access(std::string &, const SPIRType &, spv::StorageClass, bool &)
bool CompilerGLSL::prepare_access_chain_for_scalar_access(std::string &, const SPIRType &, StorageClass, bool &)
{
return false;
}
@@ -11868,7 +11919,7 @@ string CompilerGLSL::build_composite_combiner(uint32_t return_type, const uint32
// Can only merge swizzles for vectors.
auto &type = get<SPIRType>(return_type);
bool can_apply_swizzle_opt = type.basetype != SPIRType::Struct && type.array.empty() && type.columns == 1 &&
type.op != spv::OpTypeCooperativeMatrixKHR;
type.op != OpTypeCooperativeMatrixKHR;
bool swizzle_optimization = false;
for (uint32_t i = 0; i < length; i++)
@@ -12995,7 +13046,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
if (composite_type_is_complex)
allow_base_expression = false;
if (composite_type.op == spv::OpTypeCooperativeMatrixKHR)
if (composite_type.op == OpTypeCooperativeMatrixKHR)
allow_base_expression = false;
// Packed expressions or physical ID mapped expressions cannot be split up.
@@ -13967,7 +14018,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
}
if ((type.basetype == SPIRType::FloatE4M3 || type.basetype == SPIRType::FloatE5M2) &&
has_decoration(id, spv::DecorationSaturatedToLargestFloat8NormalConversionEXT))
has_decoration(id, DecorationSaturatedToLargestFloat8NormalConversionEXT))
{
emit_uninitialized_temporary_expression(result_type, id);
statement("saturatedConvertEXT(", to_expression(id), ", ", to_unpacked_expression(ops[2]), ");");
@@ -15272,7 +15323,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
uint32_t result_type = ops[0];
uint32_t id = ops[1];
if (type.image.dim == spv::DimSubpassData)
if (type.image.dim == DimSubpassData)
{
emit_unary_func_op(result_type, id, ops[2], "fragmentMaskFetchAMD");
}
@@ -15291,7 +15342,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
uint32_t result_type = ops[0];
uint32_t id = ops[1];
if (type.image.dim == spv::DimSubpassData)
if (type.image.dim == DimSubpassData)
{
emit_binary_func_op(result_type, id, ops[2], ops[4], "fragmentFetchAMD");
}
@@ -15501,6 +15552,11 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
flush_variable_declaration(ops[0]);
statement("rayQueryConfirmIntersectionEXT(", to_expression(ops[0]), ");");
break;
case OpRayQueryGetIntersectionTriangleVertexPositionsKHR:
flush_variable_declaration(ops[1]);
emit_uninitialized_temporary_expression(ops[0], ops[1]);
statement("rayQueryGetIntersectionTriangleVertexPositionsEXT(", to_expression(ops[2]), ", bool(", to_expression(ops[3]), "), ", to_expression(ops[1]), ");");
break;
#define GLSL_RAY_QUERY_GET_OP(op) \
case OpRayQueryGet##op##KHR: \
flush_variable_declaration(ops[2]); \
@@ -15555,21 +15611,21 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
std::string tensor_operands;
if (ops[4] == 0)
tensor_operands = "0x0u";
else if (ops[4] == spv::TensorOperandsNontemporalARMMask)
else if (ops[4] == TensorOperandsNontemporalARMMask)
tensor_operands = "gl_TensorOperandsNonTemporalARM";
else if (ops[4] == spv::TensorOperandsOutOfBoundsValueARMMask)
else if (ops[4] == TensorOperandsOutOfBoundsValueARMMask)
tensor_operands = "gl_TensorOperandsOutOfBoundsValueARM";
else if (ops[4] == (spv::TensorOperandsNontemporalARMMask | spv::TensorOperandsOutOfBoundsValueARMMask))
else if (ops[4] == (TensorOperandsNontemporalARMMask | TensorOperandsOutOfBoundsValueARMMask))
tensor_operands = "gl_TensorOperandsNonTemporalARM | gl_TensorOperandsOutOfBoundsValueARM";
else
SPIRV_CROSS_THROW("Invalid tensorOperands for tensorReadARM.");
if ((ops[4] & spv::TensorOperandsOutOfBoundsValueARMMask) && length != 6)
if ((ops[4] & TensorOperandsOutOfBoundsValueARMMask) && length != 6)
SPIRV_CROSS_THROW("gl_TensorOperandsOutOfBoundsValueARM requires an outOfBoundsValue argument.");
args.push_back(tensor_operands); // tensorOperands
}
if (length >= 6)
{
if ((length > 6) || (ops[4] & spv::TensorOperandsOutOfBoundsValueARMMask) == 0)
if ((length > 6) || (ops[4] & TensorOperandsOutOfBoundsValueARMMask) == 0)
SPIRV_CROSS_THROW("Too many arguments to tensorReadARM.");
args.push_back(to_expression(ops[5])); // outOfBoundsValue
}
@@ -15593,7 +15649,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
std::string tensor_operands;
if (ops[3] == 0)
tensor_operands = "0x0u";
else if (ops[3] == spv::TensorOperandsNontemporalARMMask)
else if (ops[3] == TensorOperandsNontemporalARMMask)
tensor_operands = "gl_TensorOperandsNonTemporalARM";
else
SPIRV_CROSS_THROW("Invalid tensorOperands for tensorWriteARM.");
@@ -15943,15 +15999,15 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
auto value_to_replicate = to_expression(ops[2]);
std::string rhs;
// Matrices don't have a replicating constructor for vectors. Need to manually replicate
if (type.op == spv::OpTypeMatrix || type.op == spv::OpTypeArray)
if (type.op == OpTypeMatrix || type.op == OpTypeArray)
{
if (type.op == spv::OpTypeArray && type.array.size() != 1)
if (type.op == OpTypeArray && type.array.size() != 1)
{
SPIRV_CROSS_THROW(
"Multi-dimensional arrays currently not supported for OpCompositeConstructReplicateEXT");
}
uint32_t num_elements = type.op == spv::OpTypeMatrix ? type.columns : type.array[0];
if (backend.use_initializer_list && type.op == spv::OpTypeArray)
uint32_t num_elements = type.op == OpTypeMatrix ? type.columns : type.array[0];
if (backend.use_initializer_list && type.op == OpTypeArray)
{
rhs += "{";
}
@@ -15966,7 +16022,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
if (i < num_elements - 1)
rhs += ", ";
}
if (backend.use_initializer_list && type.op == spv::OpTypeArray)
if (backend.use_initializer_list && type.op == OpTypeArray)
rhs += "}";
else
rhs += ")";
@@ -16845,7 +16901,7 @@ string CompilerGLSL::type_to_glsl(const SPIRType &type, uint32_t id)
require_extension_internal("GL_ARB_shader_atomic_counters");
}
if (type.op == spv::OpTypeCooperativeVectorNV)
if (type.op == OpTypeCooperativeVectorNV)
{
require_extension_internal("GL_NV_cooperative_vector");
if (!options.vulkan_semantics)
@@ -16860,7 +16916,7 @@ string CompilerGLSL::type_to_glsl(const SPIRType &type, uint32_t id)
while (is_pointer(*coop_type) || is_array(*coop_type))
coop_type = &get<SPIRType>(coop_type->parent_type);
if (coop_type->op == spv::OpTypeCooperativeMatrixKHR)
if (coop_type->op == OpTypeCooperativeMatrixKHR)
{
require_extension_internal("GL_KHR_cooperative_matrix");
if (!options.vulkan_semantics)
@@ -16893,9 +16949,9 @@ string CompilerGLSL::type_to_glsl(const SPIRType &type, uint32_t id)
if (!scope->specialization)
{
require_extension_internal("GL_KHR_memory_scope_semantics");
if (scope->scalar() == spv::ScopeSubgroup)
if (scope->scalar() == ScopeSubgroup)
scope_expr = "gl_ScopeSubgroup";
else if (scope->scalar() == spv::ScopeWorkgroup)
else if (scope->scalar() == ScopeWorkgroup)
scope_expr = "gl_ScopeWorkgroup";
else
SPIRV_CROSS_THROW("Invalid scope for cooperative matrix.");
@@ -17127,7 +17183,7 @@ void CompilerGLSL::flatten_buffer_block(VariableID id)
flattened_buffer_blocks.insert(id);
}
bool CompilerGLSL::builtin_translates_to_nonarray(spv::BuiltIn /*builtin*/) const
bool CompilerGLSL::builtin_translates_to_nonarray(BuiltIn /*builtin*/) const
{
return false; // GLSL itself does not need to translate array builtin types to non-array builtin types
}
@@ -17172,7 +17228,7 @@ void CompilerGLSL::add_function_overload(const SPIRFunction &func)
uint32_t type_id = get_pointee_type_id(arg.type);
// Workaround glslang bug. It seems to only consider the base type when resolving overloads.
if (get<SPIRType>(type_id).op == spv::OpTypeCooperativeMatrixKHR)
if (get<SPIRType>(type_id).op == OpTypeCooperativeMatrixKHR)
type_id = get<SPIRType>(type_id).parent_type;
auto &type = get<SPIRType>(type_id);
@@ -19993,7 +20049,7 @@ bool CompilerGLSL::is_stage_output_location_masked(uint32_t location, uint32_t c
return masked_output_locations.count({ location, component }) != 0;
}
bool CompilerGLSL::is_stage_output_builtin_masked(spv::BuiltIn builtin) const
bool CompilerGLSL::is_stage_output_builtin_masked(BuiltIn builtin) const
{
return masked_output_builtins.count(builtin) != 0;
}

View File

@@ -32,6 +32,7 @@
namespace SPIRV_CROSS_NAMESPACE
{
using namespace SPIRV_CROSS_SPV_HEADER_NAMESPACE;
struct GlslConstantNameMapping;
enum PlsFormat
@@ -289,7 +290,7 @@ public:
// This option is only meaningful for MSL and HLSL, since GLSL matches by location directly.
// Masking builtins only takes effect if the builtin in question is part of the stage output interface.
void mask_stage_output_by_location(uint32_t location, uint32_t component);
void mask_stage_output_by_builtin(spv::BuiltIn builtin);
void mask_stage_output_by_builtin(BuiltIn builtin);
// Allow to control how to format float literals in the output.
// Set to "nullptr" to use the default "convert_to_string" function.
@@ -389,7 +390,7 @@ protected:
};
// TODO remove this function when all subgroup ops are supported (or make it always return true)
static bool is_supported_subgroup_op_in_opengl(spv::Op op, const uint32_t *ops);
static bool is_supported_subgroup_op_in_opengl(Op op, const uint32_t *ops);
void reset(uint32_t iteration_count);
void emit_function(SPIRFunction &func, const Bitset &return_flags);
@@ -416,7 +417,7 @@ protected:
// For relax_nan_checks.
GLSLstd450 get_remapped_glsl_op(GLSLstd450 std450_op) const;
spv::Op get_remapped_spirv_op(spv::Op op) const;
Op get_remapped_spirv_op(Op op) const;
virtual void emit_glsl_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args,
uint32_t count);
@@ -443,7 +444,7 @@ protected:
SmallVector<uint32_t> &inherited_expressions);
virtual void emit_subgroup_op(const Instruction &i);
virtual std::string type_to_glsl(const SPIRType &type, uint32_t id = 0);
virtual std::string builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage);
virtual std::string builtin_to_glsl(BuiltIn builtin, StorageClass storage);
virtual void emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index,
const std::string &qualifier = "", uint32_t base_offset = 0);
virtual void emit_struct_padding_target(const SPIRType &type);
@@ -455,7 +456,7 @@ protected:
virtual std::string constant_expression_vector(const SPIRConstant &c, uint32_t vector);
virtual void emit_fixup();
virtual std::string variable_decl(const SPIRType &type, const std::string &name, uint32_t id = 0);
virtual bool variable_decl_is_remapped_storage(const SPIRVariable &var, spv::StorageClass storage) const;
virtual bool variable_decl_is_remapped_storage(const SPIRVariable &var, StorageClass storage) const;
virtual std::string to_func_call_arg(const SPIRFunction::Parameter &arg, uint32_t id);
virtual void emit_workgroup_initialization(const SPIRVariable &var);
@@ -500,7 +501,7 @@ protected:
virtual std::string unpack_expression_type(std::string expr_str, const SPIRType &type, uint32_t physical_type_id,
bool packed_type, bool row_major);
virtual bool builtin_translates_to_nonarray(spv::BuiltIn builtin) const;
virtual bool builtin_translates_to_nonarray(BuiltIn builtin) const;
virtual bool is_user_type_structured(uint32_t id) const;
@@ -671,16 +672,16 @@ protected:
void emit_struct(SPIRType &type);
void emit_resources();
void emit_extension_workarounds(spv::ExecutionModel model);
void emit_subgroup_arithmetic_workaround(const std::string &func, spv::Op op, spv::GroupOperation group_op);
void emit_extension_workarounds(ExecutionModel model);
void emit_subgroup_arithmetic_workaround(const std::string &func, Op op, GroupOperation group_op);
void emit_polyfills(uint32_t polyfills, bool relaxed);
void emit_buffer_block_native(const SPIRVariable &var);
void emit_buffer_reference_block(uint32_t type_id, bool forward_declaration);
void emit_buffer_block_legacy(const SPIRVariable &var);
void emit_buffer_block_flattened(const SPIRVariable &type);
void fixup_implicit_builtin_block_names(spv::ExecutionModel model);
void emit_declared_builtin_block(spv::StorageClass storage, spv::ExecutionModel model);
bool should_force_emit_builtin_block(spv::StorageClass storage);
void fixup_implicit_builtin_block_names(ExecutionModel model);
void emit_declared_builtin_block(StorageClass storage, ExecutionModel model);
bool should_force_emit_builtin_block(StorageClass storage);
void emit_push_constant_block_vulkan(const SPIRVariable &var);
void emit_push_constant_block_glsl(const SPIRVariable &var);
void emit_interface_block(const SPIRVariable &type);
@@ -773,12 +774,12 @@ protected:
// Relevant for PtrAccessChain / BDA.
virtual uint32_t get_physical_type_stride(const SPIRType &type) const;
spv::StorageClass get_expression_effective_storage_class(uint32_t ptr);
StorageClass get_expression_effective_storage_class(uint32_t ptr);
virtual bool access_chain_needs_stage_io_builtin_translation(uint32_t base);
virtual bool check_physical_type_cast(std::string &expr, const SPIRType *type, uint32_t physical_type);
virtual bool prepare_access_chain_for_scalar_access(std::string &expr, const SPIRType &type,
spv::StorageClass storage, bool &is_packed);
StorageClass storage, bool &is_packed);
std::string access_chain(uint32_t base, const uint32_t *indices, uint32_t count, const SPIRType &target_type,
AccessChainMeta *meta = nullptr, bool ptr_chain = false);
@@ -842,14 +843,14 @@ protected:
std::string to_precision_qualifiers_glsl(uint32_t id);
virtual const char *to_storage_qualifiers_glsl(const SPIRVariable &var);
std::string flags_to_qualifiers_glsl(const SPIRType &type, uint32_t id, const Bitset &flags);
const char *format_to_glsl(spv::ImageFormat format);
const char *format_to_glsl(ImageFormat format);
virtual std::string layout_for_member(const SPIRType &type, uint32_t index);
virtual std::string to_interpolation_qualifiers(const Bitset &flags);
std::string layout_for_variable(const SPIRVariable &variable);
std::string to_combined_image_sampler(VariableID image_id, VariableID samp_id);
virtual bool skip_argument(uint32_t id) const;
virtual bool emit_array_copy(const char *expr, uint32_t lhs_id, uint32_t rhs_id,
spv::StorageClass lhs_storage, spv::StorageClass rhs_storage);
StorageClass lhs_storage, StorageClass rhs_storage);
virtual void emit_block_hints(const SPIRBlock &block);
virtual std::string to_initializer_expression(const SPIRVariable &var);
virtual std::string to_zero_initialized_expression(uint32_t type_id);
@@ -1024,7 +1025,7 @@ protected:
bool type_is_empty(const SPIRType &type);
bool can_use_io_location(spv::StorageClass storage, bool block);
bool can_use_io_location(StorageClass storage, bool block);
const Instruction *get_next_instruction_in_block(const Instruction &instr);
static uint32_t mask_relevant_memory_semantics(uint32_t semantics);
@@ -1039,7 +1040,7 @@ protected:
// Builtins in GLSL are always specific signedness, but the SPIR-V can declare them
// as either unsigned or signed.
// Sometimes we will need to automatically perform casts on load and store to make this work.
virtual SPIRType::BaseType get_builtin_basetype(spv::BuiltIn builtin, SPIRType::BaseType default_type);
virtual SPIRType::BaseType get_builtin_basetype(BuiltIn builtin, SPIRType::BaseType default_type);
virtual void cast_to_variable_store(uint32_t target_id, std::string &expr, const SPIRType &expr_type);
virtual void cast_from_variable_load(uint32_t source_id, std::string &expr, const SPIRType &expr_type);
void unroll_array_from_complex_load(uint32_t target_id, uint32_t source_id, std::string &expr);
@@ -1068,7 +1069,7 @@ protected:
static const char *vector_swizzle(int vecsize, int index);
bool is_stage_output_location_masked(uint32_t location, uint32_t component) const;
bool is_stage_output_builtin_masked(spv::BuiltIn builtin) const;
bool is_stage_output_builtin_masked(BuiltIn builtin) const;
bool is_stage_output_variable_masked(const SPIRVariable &var) const;
bool is_stage_output_block_member_masked(const SPIRVariable &var, uint32_t index, bool strip_array) const;
bool is_per_primitive_variable(const SPIRVariable &var) const;

View File

@@ -27,7 +27,7 @@
#include <algorithm>
#include <assert.h>
using namespace spv;
using namespace SPIRV_CROSS_SPV_HEADER_NAMESPACE;
using namespace SPIRV_CROSS_NAMESPACE;
using namespace std;
@@ -1139,7 +1139,7 @@ void CompilerHLSL::emit_interface_block_in_struct(const SPIRVariable &var, unord
}
}
std::string CompilerHLSL::builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage)
std::string CompilerHLSL::builtin_to_glsl(BuiltIn builtin, StorageClass storage)
{
switch (builtin)
{
@@ -3760,19 +3760,19 @@ void CompilerHLSL::emit_texture_op(const Instruction &i, bool sparse)
uint32_t coord_components = 0;
switch (imgtype.image.dim)
{
case spv::Dim1D:
case Dim1D:
coord_components = 1;
break;
case spv::Dim2D:
case Dim2D:
coord_components = 2;
break;
case spv::Dim3D:
case Dim3D:
coord_components = 3;
break;
case spv::DimCube:
case DimCube:
coord_components = 3;
break;
case spv::DimBuffer:
case DimBuffer:
coord_components = 1;
break;
default:
@@ -3783,7 +3783,7 @@ void CompilerHLSL::emit_texture_op(const Instruction &i, bool sparse)
if (dref)
inherited_expressions.push_back(dref);
if (imgtype.image.arrayed)
if (imgtype.image.arrayed && op != OpImageQueryLod)
coord_components++;
uint32_t bias = 0;
@@ -4001,7 +4001,7 @@ void CompilerHLSL::emit_texture_op(const Instruction &i, bool sparse)
{
if (dref)
{
if (imgtype.image.dim != spv::Dim1D && imgtype.image.dim != spv::Dim2D)
if (imgtype.image.dim != Dim1D && imgtype.image.dim != Dim2D)
{
SPIRV_CROSS_THROW(
"Depth comparison is only supported for 1D and 2D textures in HLSL shader model 2/3.");
@@ -5436,7 +5436,7 @@ void CompilerHLSL::emit_access_chain(const Instruction &instruction)
}
}
void CompilerHLSL::emit_atomic(const uint32_t *ops, uint32_t length, spv::Op op)
void CompilerHLSL::emit_atomic(const uint32_t *ops, uint32_t length, Op op)
{
const char *atomic_op = nullptr;
@@ -7113,8 +7113,8 @@ bool CompilerHLSL::is_hlsl_force_storage_buffer_as_uav(ID id) const
return true;
}
const uint32_t desc_set = get_decoration(id, spv::DecorationDescriptorSet);
const uint32_t binding = get_decoration(id, spv::DecorationBinding);
const uint32_t desc_set = get_decoration(id, DecorationDescriptorSet);
const uint32_t binding = get_decoration(id, DecorationBinding);
return (force_uav_buffer_bindings.find({ desc_set, binding }) != force_uav_buffer_bindings.end());
}
@@ -7134,6 +7134,7 @@ bool CompilerHLSL::is_user_type_structured(uint32_t id) const
const std::string &user_type = get_decoration_string(id, DecorationUserTypeGOOGLE);
return user_type.compare(0, 16, "structuredbuffer") == 0 ||
user_type.compare(0, 18, "rwstructuredbuffer") == 0 ||
user_type.compare(0, 35, "globallycoherent rwstructuredbuffer") == 0 ||
user_type.compare(0, 33, "rasterizerorderedstructuredbuffer") == 0;
}
return false;

View File

@@ -29,6 +29,7 @@
namespace SPIRV_CROSS_NAMESPACE
{
using namespace SPIRV_CROSS_SPV_HEADER_NAMESPACE;
// Interface which remaps vertex inputs to a fixed semantic name to make linking easier.
struct HLSLVertexAttributeRemap
{
@@ -87,7 +88,7 @@ using HLSLBindingFlags = uint32_t;
// For deeper control of push constants, set_root_constant_layouts() can be used instead.
struct HLSLResourceBinding
{
spv::ExecutionModel stage = spv::ExecutionModelMax;
ExecutionModel stage = ExecutionModelMax;
uint32_t desc_set = 0;
uint32_t binding = 0;
@@ -216,7 +217,7 @@ public:
// is_hlsl_resource_binding_used() will return true after calling ::compile() if
// the set/binding combination was used by the HLSL code.
void add_hlsl_resource_binding(const HLSLResourceBinding &resource);
bool is_hlsl_resource_binding_used(spv::ExecutionModel model, uint32_t set, uint32_t binding) const;
bool is_hlsl_resource_binding_used(ExecutionModel model, uint32_t set, uint32_t binding) const;
// Controls which storage buffer bindings will be forced to be declared as UAVs.
void set_hlsl_force_storage_buffer_as_uav(uint32_t desc_set, uint32_t binding);
@@ -255,7 +256,7 @@ private:
void emit_specialization_constants_and_structs();
void emit_composite_constants();
void emit_fixup() override;
std::string builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage) override;
std::string builtin_to_glsl(BuiltIn builtin, StorageClass storage) override;
std::string layout_for_member(const SPIRType &type, uint32_t index) override;
std::string to_interpolation_qualifiers(const Bitset &flags) override;
std::string bitcast_glsl_op(const SPIRType &result_type, const SPIRType &argument_type) override;
@@ -281,7 +282,7 @@ private:
const SmallVector<uint32_t> &composite_chain);
std::string write_access_chain_value(uint32_t value, const SmallVector<uint32_t> &composite_chain, bool enclose);
void emit_store(const Instruction &instruction);
void emit_atomic(const uint32_t *ops, uint32_t length, spv::Op op);
void emit_atomic(const uint32_t *ops, uint32_t length, Op op);
void emit_subgroup_op(const Instruction &i) override;
void emit_block_hints(const SPIRBlock &block) override;
@@ -294,7 +295,7 @@ private:
const char *to_storage_qualifiers_glsl(const SPIRVariable &var) override;
void replace_illegal_names() override;
SPIRType::BaseType get_builtin_basetype(spv::BuiltIn builtin, SPIRType::BaseType default_type) override;
SPIRType::BaseType get_builtin_basetype(BuiltIn builtin, SPIRType::BaseType default_type) override;
bool is_hlsl_force_storage_buffer_as_uav(ID id) const;
@@ -379,7 +380,7 @@ private:
uint32_t type_to_consumed_locations(const SPIRType &type) const;
std::string to_semantic(uint32_t location, spv::ExecutionModel em, spv::StorageClass sc);
std::string to_semantic(uint32_t location, ExecutionModel em, StorageClass sc);
uint32_t num_workgroups_builtin = 0;
HLSLBindingFlags resource_binding_flags = 0;

File diff suppressed because it is too large Load Diff

View File

@@ -33,6 +33,7 @@
namespace SPIRV_CROSS_NAMESPACE
{
using namespace SPIRV_CROSS_SPV_HEADER_NAMESPACE;
// Indicates the format of a shader interface variable. Currently limited to specifying
// if the input is an 8-bit unsigned integer, 16-bit unsigned integer, or
@@ -78,7 +79,7 @@ struct MSLShaderInterfaceVariable
uint32_t location = 0;
uint32_t component = 0;
MSLShaderVariableFormat format = MSL_SHADER_VARIABLE_FORMAT_OTHER;
spv::BuiltIn builtin = spv::BuiltInMax;
BuiltIn builtin = BuiltInMax;
uint32_t vecsize = 0;
MSLShaderVariableRate rate = MSL_SHADER_VARIABLE_RATE_PER_VERTEX;
};
@@ -104,7 +105,7 @@ struct MSLShaderInterfaceVariable
// become a [[buffer(N)]], [[texture(N)]] or [[sampler(N)]] depending on the resource types used.
struct MSLResourceBinding
{
spv::ExecutionModel stage = spv::ExecutionModelMax;
ExecutionModel stage = ExecutionModelMax;
SPIRType::BaseType basetype = SPIRType::Unknown;
uint32_t desc_set = 0;
uint32_t binding = 0;
@@ -591,9 +592,9 @@ public:
// rasterization if vertex shader requires rasterization to be disabled.
bool get_is_rasterization_disabled() const
{
return is_rasterization_disabled && (get_entry_point().model == spv::ExecutionModelVertex ||
get_entry_point().model == spv::ExecutionModelTessellationControl ||
get_entry_point().model == spv::ExecutionModelTessellationEvaluation);
return is_rasterization_disabled && (get_entry_point().model == ExecutionModelVertex ||
get_entry_point().model == ExecutionModelTessellationControl ||
get_entry_point().model == ExecutionModelTessellationEvaluation);
}
// Provide feedback to calling API to allow it to pass an auxiliary
@@ -706,20 +707,20 @@ public:
// This is typical for tessellation builtin inputs such as tess levels, gl_Position, etc.
// This returns k_unknown_location if the location was explicitly assigned with
// add_msl_shader_input or the builtin is not used, otherwise returns N in [[attribute(N)]].
uint32_t get_automatic_builtin_input_location(spv::BuiltIn builtin) const;
uint32_t get_automatic_builtin_input_location(BuiltIn builtin) const;
// If not using add_msl_shader_output, it's possible
// that certain builtin attributes need to be automatically assigned locations.
// This is typical for tessellation builtin outputs such as tess levels, gl_Position, etc.
// This returns k_unknown_location if the location were explicitly assigned with
// add_msl_shader_output or the builtin were not used, otherwise returns N in [[attribute(N)]].
uint32_t get_automatic_builtin_output_location(spv::BuiltIn builtin) const;
uint32_t get_automatic_builtin_output_location(BuiltIn builtin) const;
// NOTE: Only resources which are remapped using add_msl_resource_binding will be reported here.
// Constexpr samplers are always assumed to be emitted.
// No specific MSLResourceBinding remapping is required for constexpr samplers as long as they are remapped
// by remap_constexpr_sampler(_by_binding).
bool is_msl_resource_binding_used(spv::ExecutionModel model, uint32_t set, uint32_t binding) const;
bool is_msl_resource_binding_used(ExecutionModel model, uint32_t set, uint32_t binding) const;
// This must only be called after a successful call to CompilerMSL::compile().
// For a variable resource ID obtained through reflection API, report the automatically assigned resource index.
@@ -931,14 +932,14 @@ protected:
std::string type_to_array_glsl(const SPIRType &type, uint32_t variable_id) override;
std::string constant_op_expression(const SPIRConstantOp &cop) override;
bool variable_decl_is_remapped_storage(const SPIRVariable &variable, spv::StorageClass storage) const override;
bool variable_decl_is_remapped_storage(const SPIRVariable &variable, StorageClass storage) const override;
// GCC workaround of lambdas calling protected functions (for older GCC versions)
std::string variable_decl(const SPIRType &type, const std::string &name, uint32_t id = 0) override;
std::string image_type_glsl(const SPIRType &type, uint32_t id, bool member) override;
std::string sampler_type(const SPIRType &type, uint32_t id, bool member);
std::string builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage) override;
std::string builtin_to_glsl(BuiltIn builtin, StorageClass storage) override;
std::string to_func_call_arg(const SPIRFunction::Parameter &arg, uint32_t id) override;
std::string to_name(uint32_t id, bool allow_alias = true) const override;
std::string to_function_name(const TextureFunctionNameArguments &args) override;
@@ -950,7 +951,7 @@ protected:
bool is_packed, bool row_major) override;
// Returns true for BuiltInSampleMask because gl_SampleMask[] is an array in SPIR-V, but [[sample_mask]] is a scalar in Metal.
bool builtin_translates_to_nonarray(spv::BuiltIn builtin) const override;
bool builtin_translates_to_nonarray(BuiltIn builtin) const override;
std::string bitcast_glsl_op(const SPIRType &result_type, const SPIRType &argument_type) override;
bool emit_complex_bitcast(uint32_t result_id, uint32_t id, uint32_t op0) override;
@@ -990,8 +991,8 @@ protected:
void extract_global_variables_from_function(uint32_t func_id, std::set<uint32_t> &added_arg_ids,
std::unordered_set<uint32_t> &global_var_ids,
std::unordered_set<uint32_t> &processed_func_ids);
uint32_t add_interface_block(spv::StorageClass storage, bool patch = false);
uint32_t add_interface_block_pointer(uint32_t ib_var_id, spv::StorageClass storage);
uint32_t add_interface_block(StorageClass storage, bool patch = false);
uint32_t add_interface_block_pointer(uint32_t ib_var_id, StorageClass storage);
uint32_t add_meshlet_block(bool per_primitive);
struct InterfaceBlockMeta
@@ -1012,23 +1013,23 @@ protected:
std::string to_tesc_invocation_id();
void emit_local_masked_variable(const SPIRVariable &masked_var, bool strip_array);
void add_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref, SPIRType &ib_type,
void add_variable_to_interface_block(StorageClass storage, const std::string &ib_var_ref, SPIRType &ib_type,
SPIRVariable &var, InterfaceBlockMeta &meta);
void add_composite_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref,
void add_composite_variable_to_interface_block(StorageClass storage, const std::string &ib_var_ref,
SPIRType &ib_type, SPIRVariable &var, InterfaceBlockMeta &meta);
void add_plain_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref,
void add_plain_variable_to_interface_block(StorageClass storage, const std::string &ib_var_ref,
SPIRType &ib_type, SPIRVariable &var, InterfaceBlockMeta &meta);
bool add_component_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref,
bool add_component_variable_to_interface_block(StorageClass storage, const std::string &ib_var_ref,
SPIRVariable &var, const SPIRType &type,
InterfaceBlockMeta &meta);
void add_plain_member_variable_to_interface_block(spv::StorageClass storage,
void add_plain_member_variable_to_interface_block(StorageClass storage,
const std::string &ib_var_ref, SPIRType &ib_type,
SPIRVariable &var, SPIRType &var_type,
uint32_t mbr_idx, InterfaceBlockMeta &meta,
const std::string &mbr_name_qual,
const std::string &var_chain_qual,
uint32_t &location, uint32_t &var_mbr_idx);
void add_composite_member_variable_to_interface_block(spv::StorageClass storage,
void add_composite_member_variable_to_interface_block(StorageClass storage,
const std::string &ib_var_ref, SPIRType &ib_type,
SPIRVariable &var, SPIRType &var_type,
uint32_t mbr_idx, InterfaceBlockMeta &meta,
@@ -1040,11 +1041,11 @@ protected:
void add_tess_level_input(const std::string &base_ref, const std::string &mbr_name, SPIRVariable &var);
void ensure_struct_members_valid_vecsizes(SPIRType &struct_type, uint32_t &location);
void fix_up_interface_member_indices(spv::StorageClass storage, uint32_t ib_type_id);
void fix_up_interface_member_indices(StorageClass storage, uint32_t ib_type_id);
void mark_location_as_used_by_shader(uint32_t location, const SPIRType &type,
spv::StorageClass storage, bool fallback = false);
uint32_t ensure_correct_builtin_type(uint32_t type_id, spv::BuiltIn builtin);
StorageClass storage, bool fallback = false);
uint32_t ensure_correct_builtin_type(uint32_t type_id, BuiltIn builtin);
uint32_t ensure_correct_input_type(uint32_t type_id, uint32_t location, uint32_t component,
uint32_t num_components, bool strip_array);
@@ -1059,6 +1060,7 @@ protected:
void fix_up_shader_inputs_outputs();
bool entry_point_is_vertex() const;
bool entry_point_returns_stage_output() const;
bool entry_point_requires_const_device_buffers() const;
std::string func_type_decl(SPIRType &type);
@@ -1074,23 +1076,23 @@ protected:
std::string to_buffer_size_expression(uint32_t id);
bool is_sample_rate() const;
bool is_intersection_query() const;
bool is_direct_input_builtin(spv::BuiltIn builtin);
std::string builtin_qualifier(spv::BuiltIn builtin);
std::string builtin_type_decl(spv::BuiltIn builtin, uint32_t id = 0);
std::string built_in_func_arg(spv::BuiltIn builtin, bool prefix_comma);
bool is_direct_input_builtin(BuiltIn builtin);
std::string builtin_qualifier(BuiltIn builtin);
std::string builtin_type_decl(BuiltIn builtin, uint32_t id = 0);
std::string built_in_func_arg(BuiltIn builtin, bool prefix_comma);
std::string member_attribute_qualifier(const SPIRType &type, uint32_t index);
std::string member_location_attribute_qualifier(const SPIRType &type, uint32_t index);
std::string argument_decl(const SPIRFunction::Parameter &arg);
const char *descriptor_address_space(uint32_t id, spv::StorageClass storage, const char *plain_address_space) const;
const char *descriptor_address_space(uint32_t id, StorageClass storage, const char *plain_address_space) const;
std::string round_fp_tex_coords(std::string tex_coords, bool coord_is_fp);
uint32_t get_metal_resource_index(SPIRVariable &var, SPIRType::BaseType basetype, uint32_t plane = 0);
uint32_t get_member_location(uint32_t type_id, uint32_t index, uint32_t *comp = nullptr) const;
uint32_t get_or_allocate_builtin_input_member_location(spv::BuiltIn builtin,
uint32_t get_or_allocate_builtin_input_member_location(BuiltIn builtin,
uint32_t type_id, uint32_t index, uint32_t *comp = nullptr);
uint32_t get_or_allocate_builtin_output_member_location(spv::BuiltIn builtin,
uint32_t get_or_allocate_builtin_output_member_location(BuiltIn builtin,
uint32_t type_id, uint32_t index, uint32_t *comp = nullptr);
uint32_t get_physical_tess_level_array_size(spv::BuiltIn builtin) const;
uint32_t get_physical_tess_level_array_size(BuiltIn builtin) const;
uint32_t get_physical_type_stride(const SPIRType &type) const override;
@@ -1136,7 +1138,7 @@ protected:
std::string get_tess_factor_struct_name();
SPIRType &get_uint_type();
uint32_t get_uint_type_id();
void emit_atomic_func_op(uint32_t result_type, uint32_t result_id, const char *op, spv::Op opcode,
void emit_atomic_func_op(uint32_t result_type, uint32_t result_id, const char *op, Op opcode,
uint32_t mem_order_1, uint32_t mem_order_2, bool has_mem_order_2, uint32_t op0, uint32_t op1 = 0,
bool op1_is_pointer = false, bool op1_is_literal = false, uint32_t op2 = 0);
const char *get_memory_order(uint32_t spv_mem_sem);
@@ -1144,7 +1146,7 @@ protected:
void add_typedef_line(const std::string &line);
void emit_barrier(uint32_t id_exe_scope, uint32_t id_mem_scope, uint32_t id_mem_sem);
bool emit_array_copy(const char *expr, uint32_t lhs_id, uint32_t rhs_id,
spv::StorageClass lhs_storage, spv::StorageClass rhs_storage) override;
StorageClass lhs_storage, StorageClass rhs_storage) override;
void build_implicit_builtins();
uint32_t build_constant_uint_array_pointer();
void emit_entry_point_declarations() override;
@@ -1194,7 +1196,7 @@ protected:
void analyze_workgroup_variables();
bool access_chain_needs_stage_io_builtin_translation(uint32_t base) override;
bool prepare_access_chain_for_scalar_access(std::string &expr, const SPIRType &type, spv::StorageClass storage,
bool prepare_access_chain_for_scalar_access(std::string &expr, const SPIRType &type, StorageClass storage,
bool &is_packed) override;
void fix_up_interpolant_access_chain(const uint32_t *ops, uint32_t length);
bool check_physical_type_cast(std::string &expr, const SPIRType *type, uint32_t physical_type) override;
@@ -1203,9 +1205,9 @@ protected:
bool emit_tessellation_io_load(uint32_t result_type, uint32_t id, uint32_t ptr);
bool is_out_of_bounds_tessellation_level(uint32_t id_lhs);
void ensure_builtin(spv::StorageClass storage, spv::BuiltIn builtin);
void ensure_builtin(StorageClass storage, BuiltIn builtin);
void mark_implicit_builtin(spv::StorageClass storage, spv::BuiltIn builtin, uint32_t id);
void mark_implicit_builtin(StorageClass storage, BuiltIn builtin, uint32_t id);
std::string convert_to_f32(const std::string &expr, uint32_t components);
@@ -1266,7 +1268,7 @@ protected:
bool using_builtin_array() const;
bool is_rasterization_disabled = false;
bool has_descriptor_side_effects = false;
bool has_descriptor_side_effects_buffer = false;
bool capture_output_to_buffer = false;
bool needs_swizzle_buffer_def = false;
bool used_swizzle_buffer = false;
@@ -1277,6 +1279,7 @@ protected:
bool needs_sample_id = false;
bool needs_helper_invocation = false;
bool needs_workgroup_zero_init = false;
bool needs_point_size_output = false;
bool writes_to_depth = false;
bool writes_to_point_size = false;
std::string qual_pos_var_name;
@@ -1295,7 +1298,7 @@ protected:
std::string patch_output_buffer_var_name = "spvPatchOut";
std::string tess_factor_buffer_var_name = "spvTessLevel";
std::string index_buffer_var_name = "spvIndices";
spv::Op previous_instruction_opcode = spv::OpNop;
Op previous_instruction_opcode = OpNop;
// Must be ordered since declaration is in a specific order.
std::map<uint32_t, MSLConstexprSampler> constexpr_samplers_by_id;
@@ -1303,7 +1306,6 @@ protected:
const MSLConstexprSampler *find_constexpr_sampler(uint32_t id) const;
std::unordered_set<uint32_t> buffers_requiring_array_length;
SmallVector<std::pair<uint32_t, uint32_t>> buffer_aliases_argument;
SmallVector<uint32_t> buffer_aliases_discrete;
std::unordered_set<uint32_t> atomic_image_vars_emulated; // Emulate texture2D atomic operations
std::unordered_set<uint32_t> pull_model_inputs;
@@ -1312,7 +1314,13 @@ protected:
SmallVector<SPIRVariable *> entry_point_bindings;
// Must be ordered since array is in a specific order.
std::map<SetBindingPair, std::pair<uint32_t, uint32_t>> buffers_requiring_dynamic_offset;
struct DynamicBuffer
{
uint32_t base_index;
uint32_t var_id;
std::string mbr_name;
};
std::map<SetBindingPair, DynamicBuffer> buffers_requiring_dynamic_offset;
SmallVector<uint32_t> disabled_frag_outputs;
@@ -1349,7 +1357,7 @@ protected:
bool type_is_msl_framebuffer_fetch(const SPIRType &type) const;
bool is_supported_argument_buffer_type(const SPIRType &type) const;
bool variable_storage_requires_stage_io(spv::StorageClass storage) const;
bool variable_storage_requires_stage_io(StorageClass storage) const;
bool needs_manual_helper_invocation_updates() const
{
@@ -1357,7 +1365,7 @@ protected:
}
bool needs_frag_discard_checks() const
{
return get_execution_model() == spv::ExecutionModelFragment && msl_options.supports_msl_version(2, 3) &&
return get_execution_model() == ExecutionModelFragment && msl_options.supports_msl_version(2, 3) &&
msl_options.check_discarded_frag_stores && frag_shader_needs_discard_checks;
}
@@ -1367,17 +1375,17 @@ protected:
// OpcodeHandler that handles several MSL preprocessing operations.
struct OpCodePreprocessor : OpcodeHandler
{
OpCodePreprocessor(CompilerMSL &compiler_)
: compiler(compiler_)
explicit OpCodePreprocessor(CompilerMSL &compiler_)
: OpcodeHandler(compiler_), self(compiler_)
{
enable_result_types = true;
}
bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override;
CompilerMSL::SPVFuncImpl get_spv_func_impl(spv::Op opcode, const uint32_t *args, uint32_t length);
bool handle(Op opcode, const uint32_t *args, uint32_t length) override;
CompilerMSL::SPVFuncImpl get_spv_func_impl(Op opcode, const uint32_t *args, uint32_t length);
void check_resource_write(uint32_t var_id);
CompilerMSL &compiler;
std::unordered_map<uint32_t, uint32_t> result_types;
CompilerMSL &self;
std::unordered_map<uint32_t, uint32_t> image_pointers_emulated; // Emulate texture2D atomic operations
bool suppress_missing_prototypes = false;
bool uses_atomics = false;
@@ -1394,14 +1402,13 @@ protected:
// OpcodeHandler that scans for uses of sampled images
struct SampledImageScanner : OpcodeHandler
{
SampledImageScanner(CompilerMSL &compiler_)
: compiler(compiler_)
explicit SampledImageScanner(CompilerMSL &compiler_)
: OpcodeHandler(compiler_), self(compiler_)
{
}
bool handle(spv::Op opcode, const uint32_t *args, uint32_t) override;
CompilerMSL &compiler;
CompilerMSL &self;
bool handle(Op opcode, const uint32_t *args, uint32_t) override;
};
// Sorts the members of a SPIRType and associated Meta info based on a settable sorting

View File

@@ -25,7 +25,7 @@
#include <assert.h>
using namespace std;
using namespace spv;
using namespace SPIRV_CROSS_SPV_HEADER_NAMESPACE;
namespace SPIRV_CROSS_NAMESPACE
{
@@ -594,7 +594,7 @@ void Parser::parse(const Instruction &instruction)
{
if (length > 2)
{
if (ops[2] == spv::FPEncodingBFloat16KHR)
if (ops[2] == FPEncodingBFloat16KHR)
type.basetype = SPIRType::BFloat16;
else
SPIRV_CROSS_THROW("Unrecognized encoding for OpTypeFloat 16.");
@@ -606,9 +606,9 @@ void Parser::parse(const Instruction &instruction)
{
if (length < 2)
SPIRV_CROSS_THROW("Missing encoding for OpTypeFloat 8.");
else if (ops[2] == spv::FPEncodingFloat8E4M3EXT)
else if (ops[2] == FPEncodingFloat8E4M3EXT)
type.basetype = SPIRType::FloatE4M3;
else if (ops[2] == spv::FPEncodingFloat8E5M2EXT)
else if (ops[2] == FPEncodingFloat8E5M2EXT)
type.basetype = SPIRType::FloatE5M2;
else
SPIRV_CROSS_THROW("Invalid encoding for OpTypeFloat 8.");
@@ -944,7 +944,7 @@ void Parser::parse(const Instruction &instruction)
uint32_t id = ops[1];
// Instead of a temporary, create a new function-wide temporary with this ID instead.
auto &var = set<SPIRVariable>(id, result_type, spv::StorageClassFunction);
auto &var = set<SPIRVariable>(id, result_type, StorageClassFunction);
var.phi_variable = true;
current_function->add_local_variable(id);

View File

@@ -25,7 +25,7 @@
#include "spirv_glsl.hpp"
#include <iomanip>
using namespace spv;
using namespace SPIRV_CROSS_SPV_HEADER_NAMESPACE;
using namespace SPIRV_CROSS_NAMESPACE;
using namespace std;
@@ -449,7 +449,7 @@ void CompilerReflection::emit_type_member_qualifiers(const SPIRType &type, uint3
}
}
string CompilerReflection::execution_model_to_str(spv::ExecutionModel model)
string CompilerReflection::execution_model_to_str(ExecutionModel model)
{
switch (model)
{
@@ -510,9 +510,9 @@ void CompilerReflection::emit_entry_points()
json_stream->begin_json_object();
json_stream->emit_json_key_value("name", e.name);
json_stream->emit_json_key_value("mode", execution_model_to_str(e.execution_model));
if (e.execution_model == ExecutionModelGLCompute || e.execution_model == spv::ExecutionModelMeshEXT ||
e.execution_model == spv::ExecutionModelMeshNV || e.execution_model == spv::ExecutionModelTaskEXT ||
e.execution_model == spv::ExecutionModelTaskNV)
if (e.execution_model == ExecutionModelGLCompute || e.execution_model == ExecutionModelMeshEXT ||
e.execution_model == ExecutionModelMeshNV || e.execution_model == ExecutionModelTaskEXT ||
e.execution_model == ExecutionModelTaskNV)
{
const auto &spv_entry = get_entry_point(e.name, e.execution_model);

View File

@@ -34,6 +34,7 @@ class Stream;
namespace SPIRV_CROSS_NAMESPACE
{
using namespace SPIRV_CROSS_SPV_HEADER_NAMESPACE;
class CompilerReflection : public CompilerGLSL
{
using Parent = CompilerGLSL;
@@ -67,7 +68,7 @@ public:
std::string compile() override;
private:
static std::string execution_model_to_str(spv::ExecutionModel model);
static std::string execution_model_to_str(ExecutionModel model);
void emit_entry_points();
void emit_types();