mirror of
https://github.com/bkaradzic/bgfx.git
synced 2026-02-17 20:52:36 +01:00
Updated spirv-tools.
This commit is contained in:
@@ -1 +1 @@
|
||||
"v2024.4", "SPIRV-Tools v2024.4 v2024.4.rc2-55-g2fc78cee"
|
||||
"v2025.1", "SPIRV-Tools v2025.1 v2025.1.rc1-22-gabb6ee0e"
|
||||
|
||||
@@ -878,17 +878,17 @@ static const spv_opcode_desc_t kOpcodeTableEntries[] = {
|
||||
{"AliasDomainDeclINTEL", spv::Op::OpAliasDomainDeclINTEL, 0, nullptr, 1, pygen_variable_caps_MemoryAccessAliasingINTEL, 2, {SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_OPTIONAL_ID}, 1, 0, 1, pygen_variable_exts_SPV_INTEL_memory_access_aliasing, 0xffffffffu, 0xffffffffu},
|
||||
{"AliasScopeDeclINTEL", spv::Op::OpAliasScopeDeclINTEL, 0, nullptr, 1, pygen_variable_caps_MemoryAccessAliasingINTEL, 3, {SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_OPTIONAL_ID}, 1, 0, 1, pygen_variable_exts_SPV_INTEL_memory_access_aliasing, 0xffffffffu, 0xffffffffu},
|
||||
{"AliasScopeListDeclINTEL", spv::Op::OpAliasScopeListDeclINTEL, 0, nullptr, 1, pygen_variable_caps_MemoryAccessAliasingINTEL, 2, {SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_VARIABLE_ID}, 1, 0, 1, pygen_variable_exts_SPV_INTEL_memory_access_aliasing, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedSqrtINTEL", spv::Op::OpFixedSqrtINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 9, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedRecipINTEL", spv::Op::OpFixedRecipINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 9, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedRsqrtINTEL", spv::Op::OpFixedRsqrtINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 9, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedSinINTEL", spv::Op::OpFixedSinINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 9, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedCosINTEL", spv::Op::OpFixedCosINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 9, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedSinCosINTEL", spv::Op::OpFixedSinCosINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 9, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedSinPiINTEL", spv::Op::OpFixedSinPiINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 9, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedCosPiINTEL", spv::Op::OpFixedCosPiINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 9, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedSinCosPiINTEL", spv::Op::OpFixedSinCosPiINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 9, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedLogINTEL", spv::Op::OpFixedLogINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 9, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedExpINTEL", spv::Op::OpFixedExpINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 9, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedSqrtINTEL", spv::Op::OpFixedSqrtINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 8, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedRecipINTEL", spv::Op::OpFixedRecipINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 8, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedRsqrtINTEL", spv::Op::OpFixedRsqrtINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 8, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedSinINTEL", spv::Op::OpFixedSinINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 8, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedCosINTEL", spv::Op::OpFixedCosINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 8, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedSinCosINTEL", spv::Op::OpFixedSinCosINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 8, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedSinPiINTEL", spv::Op::OpFixedSinPiINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 8, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedCosPiINTEL", spv::Op::OpFixedCosPiINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 8, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedSinCosPiINTEL", spv::Op::OpFixedSinCosPiINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 8, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedLogINTEL", spv::Op::OpFixedLogINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 8, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"FixedExpINTEL", spv::Op::OpFixedExpINTEL, 0, nullptr, 1, pygen_variable_caps_ArbitraryPrecisionFixedPointINTEL, 8, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER, SPV_OPERAND_TYPE_LITERAL_INTEGER}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"PtrCastToCrossWorkgroupINTEL", spv::Op::OpPtrCastToCrossWorkgroupINTEL, 0, nullptr, 1, pygen_variable_caps_USMStorageClassesINTEL, 3, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"CrossWorkgroupCastToPtrINTEL", spv::Op::OpCrossWorkgroupCastToPtrINTEL, 0, nullptr, 1, pygen_variable_caps_USMStorageClassesINTEL, 3, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID}, 1, 1, 0, nullptr, 0xffffffffu, 0xffffffffu},
|
||||
{"ReadPipeBlockingINTEL", spv::Op::OpReadPipeBlockingINTEL, 0, nullptr, 1, pygen_variable_caps_BlockingPipesINTEL, 4, {SPV_OPERAND_TYPE_TYPE_ID, SPV_OPERAND_TYPE_RESULT_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID}, 1, 1, 1, pygen_variable_exts_SPV_INTEL_blocking_pipes, 0xffffffffu, 0xffffffffu},
|
||||
|
||||
@@ -38,10 +38,11 @@
|
||||
{37, "heroseh", "Hero C Compiler", "heroseh Hero C Compiler"},
|
||||
{38, "Meta", "SparkSL", "Meta SparkSL"},
|
||||
{39, "SirLynix", "Nazara ShaderLang Compiler", "SirLynix Nazara ShaderLang Compiler"},
|
||||
{40, "NVIDIA", "Slang Compiler", "NVIDIA Slang Compiler"},
|
||||
{40, "Khronos", "Slang Compiler", "Khronos Slang Compiler"},
|
||||
{41, "Zig Software Foundation", "Zig Compiler", "Zig Software Foundation Zig Compiler"},
|
||||
{42, "Rendong Liang", "spq", "Rendong Liang spq"},
|
||||
{43, "LLVM", "LLVM SPIR-V Backend", "LLVM LLVM SPIR-V Backend"},
|
||||
{44, "Robert Konrad", "Kongruent", "Robert Konrad Kongruent"},
|
||||
{45, "Kitsunebi Games", "Nuvk SPIR-V Emitter and DLSL compiler", "Kitsunebi Games Nuvk SPIR-V Emitter and DLSL compiler"},
|
||||
{46, "Nintendo", "", "Nintendo"},
|
||||
{46, "Nintendo", "", "Nintendo"},
|
||||
{47, "ARM", "", "ARM"},
|
||||
@@ -41,5 +41,6 @@ static const spv_ext_inst_desc_t nonsemantic_clspvreflection_entries[] = {
|
||||
{"PrintfInfo", 38, 0, nullptr, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_VARIABLE_ID, SPV_OPERAND_TYPE_NONE}},
|
||||
{"PrintfBufferStorageBuffer", 39, 0, nullptr, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_NONE}},
|
||||
{"PrintfBufferPointerPushConstant", 40, 0, nullptr, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_NONE}},
|
||||
{"NormalizedSamplerMaskPushConstant", 41, 0, nullptr, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_NONE}}
|
||||
{"NormalizedSamplerMaskPushConstant", 41, 0, nullptr, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_NONE}},
|
||||
{"WorkgroupVariableSize", 42, 0, nullptr, {SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_ID, SPV_OPERAND_TYPE_NONE}}
|
||||
};
|
||||
@@ -1263,7 +1263,7 @@ static const spv_operand_desc_t pygen_variable_CapabilityEntries[] = {
|
||||
{"MultiView", 4439, 0, nullptr, 1, pygen_variable_caps_Shader, 1, pygen_variable_exts_SPV_KHR_multiview, {}, SPV_SPIRV_VERSION_WORD(1,3), 0xffffffffu},
|
||||
{"VariablePointersStorageBuffer", 4441, 0, nullptr, 1, pygen_variable_caps_Shader, 1, pygen_variable_exts_SPV_KHR_variable_pointers, {}, SPV_SPIRV_VERSION_WORD(1,3), 0xffffffffu},
|
||||
{"VariablePointers", 4442, 0, nullptr, 1, pygen_variable_caps_VariablePointersStorageBuffer, 1, pygen_variable_exts_SPV_KHR_variable_pointers, {}, SPV_SPIRV_VERSION_WORD(1,3), 0xffffffffu},
|
||||
{"AtomicStorageOps", 4445, 0, nullptr, 0, nullptr, 1, pygen_variable_exts_SPV_KHR_shader_atomic_counter_ops, {}, 0xffffffffu, 0xffffffffu},
|
||||
{"AtomicStorageOps", 4445, 0, nullptr, 1, pygen_variable_caps_AtomicStorage, 1, pygen_variable_exts_SPV_KHR_shader_atomic_counter_ops, {}, 0xffffffffu, 0xffffffffu},
|
||||
{"SampleMaskPostDepthCoverage", 4447, 0, nullptr, 0, nullptr, 1, pygen_variable_exts_SPV_KHR_post_depth_coverage, {}, 0xffffffffu, 0xffffffffu},
|
||||
{"StorageBuffer8BitAccess", 4448, 0, nullptr, 0, nullptr, 1, pygen_variable_exts_SPV_KHR_8bit_storage, {}, SPV_SPIRV_VERSION_WORD(1,5), 0xffffffffu},
|
||||
{"UniformAndStorageBuffer8BitAccess", 4449, 0, nullptr, 1, pygen_variable_caps_StorageBuffer8BitAccess, 1, pygen_variable_exts_SPV_KHR_8bit_storage, {}, SPV_SPIRV_VERSION_WORD(1,5), 0xffffffffu},
|
||||
|
||||
@@ -741,6 +741,10 @@ SPIRV_TOOLS_EXPORT void spvValidatorOptionsSetAllowLocalSizeId(
|
||||
SPIRV_TOOLS_EXPORT void spvValidatorOptionsSetAllowOffsetTextureOperand(
|
||||
spv_validator_options options, bool val);
|
||||
|
||||
// Allow base operands of some bit operations to be non-32-bit wide.
|
||||
SPIRV_TOOLS_EXPORT void spvValidatorOptionsSetAllowVulkan32BitBitwise(
|
||||
spv_validator_options options, bool val);
|
||||
|
||||
// Whether friendly names should be used in validation error messages.
|
||||
SPIRV_TOOLS_EXPORT void spvValidatorOptionsSetFriendlyNames(
|
||||
spv_validator_options options, bool val);
|
||||
|
||||
@@ -132,6 +132,11 @@ class SPIRV_TOOLS_EXPORT ValidatorOptions {
|
||||
spvValidatorOptionsSetAllowOffsetTextureOperand(options_, val);
|
||||
}
|
||||
|
||||
// Allow base operands of some bit operations to be non-32-bit wide.
|
||||
void SetAllowVulkan32BitBitwise(bool val) {
|
||||
spvValidatorOptionsSetAllowVulkan32BitBitwise(options_, val);
|
||||
}
|
||||
|
||||
// Records whether or not the validator should relax the rules on pointer
|
||||
// usage in logical addressing mode.
|
||||
//
|
||||
|
||||
@@ -240,7 +240,7 @@ class SPIRV_TOOLS_EXPORT Optimizer {
|
||||
|
||||
private:
|
||||
struct SPIRV_TOOLS_LOCAL Impl; // Opaque struct for holding internal data.
|
||||
std::unique_ptr<Impl> impl_; // Unique pointer to internal data.
|
||||
std::unique_ptr<Impl> impl_; // Unique pointer to internal data.
|
||||
};
|
||||
|
||||
// Creates a null pass.
|
||||
@@ -655,7 +655,7 @@ Optimizer::PassToken CreateRedundancyEliminationPass();
|
||||
// element if those elements are accessed individually. The parameter is a
|
||||
// limit on the number of members in the composite variable that the pass will
|
||||
// consider replacing.
|
||||
Optimizer::PassToken CreateScalarReplacementPass(uint32_t size_limit = 100);
|
||||
Optimizer::PassToken CreateScalarReplacementPass(uint32_t size_limit = 0);
|
||||
|
||||
// Create a private to local pass.
|
||||
// This pass looks for variables declared in the private storage class that are
|
||||
@@ -968,6 +968,11 @@ Optimizer::PassToken CreateInvocationInterlockPlacementPass();
|
||||
// Creates a pass to add/remove maximal reconvergence execution mode.
|
||||
// This pass either adds or removes maximal reconvergence from all entry points.
|
||||
Optimizer::PassToken CreateModifyMaximalReconvergencePass(bool add);
|
||||
|
||||
// Creates a pass to split combined image+sampler variables and function
|
||||
// parameters into separate image and sampler parts. Binding numbers and
|
||||
// other decorations are copied.
|
||||
Optimizer::PassToken CreateSplitCombinedImageSamplerPass();
|
||||
} // namespace spvtools
|
||||
|
||||
#endif // INCLUDE_SPIRV_TOOLS_OPTIMIZER_HPP_
|
||||
|
||||
62
3rdparty/spirv-tools/source/diff/diff.cpp
vendored
62
3rdparty/spirv-tools/source/diff/diff.cpp
vendored
@@ -67,7 +67,9 @@ void CompactIds(std::vector<uint32_t>& ids) {
|
||||
ids.resize(write_index);
|
||||
}
|
||||
|
||||
// A mapping between src and dst ids.
|
||||
// A mapping from ids in one module to ids in the other.
|
||||
//
|
||||
// Differ contains two of these, for src->dst and dst->src.
|
||||
class IdMap {
|
||||
public:
|
||||
IdMap(size_t id_bound) { id_map_.resize(id_bound, 0); }
|
||||
@@ -190,6 +192,7 @@ class SrcDstIdMap {
|
||||
IdMap dst_to_src_;
|
||||
};
|
||||
|
||||
// Mappings from ids to instructions and metadata, for a single module's ids.
|
||||
struct IdInstructions {
|
||||
IdInstructions(const opt::Module* module)
|
||||
: inst_map_(module->IdBound(), nullptr),
|
||||
@@ -198,6 +201,10 @@ struct IdInstructions {
|
||||
forward_pointer_map_(module->IdBound()) {
|
||||
// Map ids from all sections to instructions that define them.
|
||||
MapIdsToInstruction(module->ext_inst_imports());
|
||||
MapIdsToInstruction(module->debugs1());
|
||||
MapIdsToInstruction(module->debugs2());
|
||||
MapIdsToInstruction(module->debugs3());
|
||||
MapIdsToInstruction(module->ext_inst_debuginfo());
|
||||
MapIdsToInstruction(module->types_values());
|
||||
for (const opt::Function& function : *module) {
|
||||
function.ForEachInst(
|
||||
@@ -321,6 +328,8 @@ class Differ {
|
||||
// Get various properties from an id. These Helper functions are passed to
|
||||
// `GroupIds` and `GroupIdsAndMatch` below (as the `get_group` argument).
|
||||
uint32_t GroupIdsHelperGetTypeId(const IdInstructions& id_to, uint32_t id);
|
||||
uint32_t GroupIdsHelperGetFunctionTypeId(const IdInstructions& id_to,
|
||||
uint32_t id);
|
||||
spv::StorageClass GroupIdsHelperGetTypePointerStorageClass(
|
||||
const IdInstructions& id_to, uint32_t id);
|
||||
spv::Op GroupIdsHelperGetTypePointerTypeOp(const IdInstructions& id_to,
|
||||
@@ -883,6 +892,17 @@ uint32_t Differ::GroupIdsHelperGetTypeId(const IdInstructions& id_to,
|
||||
return GetInst(id_to, id)->type_id();
|
||||
}
|
||||
|
||||
// Return an `OpFunction` instruction's full `OpTypeFunction` type,
|
||||
// which includes parameter types.
|
||||
//
|
||||
// `GroupIdsHelperGetTypeId` applied to an `OpFunction` only gets the
|
||||
// function's return type, so this is a slightly more precise way to
|
||||
// match up functions by signature.
|
||||
uint32_t Differ::GroupIdsHelperGetFunctionTypeId(const IdInstructions& id_to,
|
||||
uint32_t id) {
|
||||
return GetInst(id_to, id)->GetSingleWordOperand(3);
|
||||
}
|
||||
|
||||
spv::StorageClass Differ::GroupIdsHelperGetTypePointerStorageClass(
|
||||
const IdInstructions& id_to, uint32_t id) {
|
||||
const opt::Instruction* inst = GetInst(id_to, id);
|
||||
@@ -902,6 +922,24 @@ spv::Op Differ::GroupIdsHelperGetTypePointerTypeOp(const IdInstructions& id_to,
|
||||
return type_inst->opcode();
|
||||
}
|
||||
|
||||
// Group unmatched ids in `ids` according to some characteristic,
|
||||
// determined by `get_group`.
|
||||
//
|
||||
// Using `get_group` to compute some sort of key for each id, set
|
||||
// `groups` to map each key to all the ids that have that key.
|
||||
//
|
||||
// For example, to group ids by name, pass `Differ::GetName` as
|
||||
// `get_group`. This will fill `groups` with a map from each name to
|
||||
// all the ids with that name.
|
||||
//
|
||||
// Under the assumption that we're trying to establish new pairings,
|
||||
// ids that are already paired are omitted from `groups`.
|
||||
//
|
||||
// The `is_src` parameter indicates whether `ids` are drawn from the
|
||||
// source module or the destination module.
|
||||
//
|
||||
// The template parameter `T` is the key type, like `std::string` or
|
||||
// `uint32_t`.
|
||||
template <typename T>
|
||||
void Differ::GroupIds(const IdGroup& ids, bool is_src,
|
||||
std::map<T, IdGroup>* groups,
|
||||
@@ -924,6 +962,10 @@ void Differ::GroupIds(const IdGroup& ids, bool is_src,
|
||||
}
|
||||
}
|
||||
|
||||
// Group `src_ids` and `dst_ids` according to `get_group`, and then use
|
||||
// `match_group` to pair up ids in corresponding groups.
|
||||
//
|
||||
// Don't try to pair ids in groups whose key is `invalid_group_key`.
|
||||
template <typename T>
|
||||
void Differ::GroupIdsAndMatch(
|
||||
const IdGroup& src_ids, const IdGroup& dst_ids, T invalid_group_key,
|
||||
@@ -2483,7 +2525,7 @@ void Differ::MatchFunctions() {
|
||||
|
||||
// If there are multiple functions with the same name, group them by
|
||||
// type, and match only if the types match (and are unique).
|
||||
GroupIdsAndMatch<uint32_t>(src_group, dst_group, 0,
|
||||
GroupIdsAndMatchByMappedId(src_group, dst_group,
|
||||
&Differ::GroupIdsHelperGetTypeId,
|
||||
[this](const IdGroup& src_group_by_type_id,
|
||||
const IdGroup& dst_group_by_type_id) {
|
||||
@@ -2526,9 +2568,19 @@ void Differ::MatchFunctions() {
|
||||
dst_match_result, 0);
|
||||
}
|
||||
|
||||
// Best effort match functions with matching type.
|
||||
GroupIdsAndMatch<uint32_t>(
|
||||
src_func_ids, dst_func_ids, 0, &Differ::GroupIdsHelperGetTypeId,
|
||||
// Best effort match functions with matching return and argument types.
|
||||
GroupIdsAndMatchByMappedId(
|
||||
src_func_ids, dst_func_ids, &Differ::GroupIdsHelperGetFunctionTypeId,
|
||||
[this](const IdGroup& src_group_by_func_type_id,
|
||||
const IdGroup& dst_group_by_func_type_id) {
|
||||
BestEffortMatchFunctions(src_group_by_func_type_id,
|
||||
dst_group_by_func_type_id, src_func_insts_,
|
||||
dst_func_insts_);
|
||||
});
|
||||
|
||||
// Best effort match functions with matching return types.
|
||||
GroupIdsAndMatchByMappedId(
|
||||
src_func_ids, dst_func_ids, &Differ::GroupIdsHelperGetTypeId,
|
||||
[this](const IdGroup& src_group_by_type_id,
|
||||
const IdGroup& dst_group_by_type_id) {
|
||||
BestEffortMatchFunctions(src_group_by_type_id, dst_group_by_type_id,
|
||||
|
||||
@@ -276,6 +276,7 @@ CopyPropagateArrays::GetSourceObjectIfAny(uint32_t result) {
|
||||
case spv::Op::OpCompositeConstruct:
|
||||
return BuildMemoryObjectFromCompositeConstruct(result_inst);
|
||||
case spv::Op::OpCopyObject:
|
||||
case spv::Op::OpCopyLogical:
|
||||
return GetSourceObjectIfAny(result_inst->GetSingleWordInOperand(0));
|
||||
case spv::Op::OpCompositeInsert:
|
||||
return BuildMemoryObjectFromInsert(result_inst);
|
||||
|
||||
186
3rdparty/spirv-tools/source/opt/folding_rules.cpp
vendored
186
3rdparty/spirv-tools/source/opt/folding_rules.cpp
vendored
@@ -2454,6 +2454,31 @@ FoldingRule RedundantFDiv() {
|
||||
FloatConstantKind kind0 = getFloatConstantKind(constants[0]);
|
||||
FloatConstantKind kind1 = getFloatConstantKind(constants[1]);
|
||||
|
||||
if (kind0 == FloatConstantKind::Zero || kind1 == FloatConstantKind::One) {
|
||||
inst->SetOpcode(spv::Op::OpCopyObject);
|
||||
inst->SetInOperands(
|
||||
{{SPV_OPERAND_TYPE_ID, {inst->GetSingleWordInOperand(0)}}});
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
}
|
||||
|
||||
FoldingRule RedundantFMod() {
|
||||
return [](IRContext* context, Instruction* inst,
|
||||
const std::vector<const analysis::Constant*>& constants) {
|
||||
assert(inst->opcode() == spv::Op::OpFMod &&
|
||||
"Wrong opcode. Should be OpFMod.");
|
||||
assert(constants.size() == 2);
|
||||
|
||||
if (!inst->IsFloatingPointFoldingAllowed()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
FloatConstantKind kind0 = getFloatConstantKind(constants[0]);
|
||||
FloatConstantKind kind1 = getFloatConstantKind(constants[1]);
|
||||
|
||||
if (kind0 == FloatConstantKind::Zero) {
|
||||
inst->SetOpcode(spv::Op::OpCopyObject);
|
||||
inst->SetInOperands(
|
||||
@@ -2462,9 +2487,15 @@ FoldingRule RedundantFDiv() {
|
||||
}
|
||||
|
||||
if (kind1 == FloatConstantKind::One) {
|
||||
auto type = context->get_type_mgr()->GetType(inst->type_id());
|
||||
std::vector<uint32_t> zero_words;
|
||||
zero_words.resize(ElementWidth(type) / 32);
|
||||
auto const_mgr = context->get_constant_mgr();
|
||||
auto zero = const_mgr->GetConstant(type, std::move(zero_words));
|
||||
auto zero_id = const_mgr->GetDefiningInstruction(zero)->result_id();
|
||||
|
||||
inst->SetOpcode(spv::Op::OpCopyObject);
|
||||
inst->SetInOperands(
|
||||
{{SPV_OPERAND_TYPE_ID, {inst->GetSingleWordInOperand(0)}}});
|
||||
inst->SetInOperands({{SPV_OPERAND_TYPE_ID, {zero_id}}});
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -2507,24 +2538,18 @@ FoldingRule RedundantFMix() {
|
||||
};
|
||||
}
|
||||
|
||||
// This rule handles addition of zero for integers.
|
||||
FoldingRule RedundantIAdd() {
|
||||
return [](IRContext* context, Instruction* inst,
|
||||
const std::vector<const analysis::Constant*>& constants) {
|
||||
assert(inst->opcode() == spv::Op::OpIAdd &&
|
||||
"Wrong opcode. Should be OpIAdd.");
|
||||
// Returns a folding rule that folds the instruction to operand |foldToArg|
|
||||
// (0 or 1) if operand |arg| (0 or 1) is a zero constant.
|
||||
FoldingRule RedundantBinaryOpWithZeroOperand(uint32_t arg, uint32_t foldToArg) {
|
||||
return [arg, foldToArg](
|
||||
IRContext* context, Instruction* inst,
|
||||
const std::vector<const analysis::Constant*>& constants) {
|
||||
assert(constants.size() == 2);
|
||||
|
||||
uint32_t operand = std::numeric_limits<uint32_t>::max();
|
||||
const analysis::Type* operand_type = nullptr;
|
||||
if (constants[0] && constants[0]->IsZero()) {
|
||||
operand = inst->GetSingleWordInOperand(1);
|
||||
operand_type = constants[0]->type();
|
||||
} else if (constants[1] && constants[1]->IsZero()) {
|
||||
operand = inst->GetSingleWordInOperand(0);
|
||||
operand_type = constants[1]->type();
|
||||
}
|
||||
if (constants[arg] && constants[arg]->IsZero()) {
|
||||
auto operand = inst->GetSingleWordInOperand(foldToArg);
|
||||
auto operand_type = constants[arg]->type();
|
||||
|
||||
if (operand != std::numeric_limits<uint32_t>::max()) {
|
||||
const analysis::Type* inst_type =
|
||||
context->get_type_mgr()->GetType(inst->type_id());
|
||||
if (inst_type->IsSame(operand_type)) {
|
||||
@@ -2539,6 +2564,117 @@ FoldingRule RedundantIAdd() {
|
||||
};
|
||||
}
|
||||
|
||||
// This rule handles any of RedundantBinaryRhs0Ops with a 0 or vector 0 on the
|
||||
// right-hand side (a | 0 => a).
|
||||
static const constexpr spv::Op RedundantBinaryRhs0Ops[] = {
|
||||
spv::Op::OpBitwiseOr,
|
||||
spv::Op::OpBitwiseXor,
|
||||
spv::Op::OpShiftRightLogical,
|
||||
spv::Op::OpShiftRightArithmetic,
|
||||
spv::Op::OpShiftLeftLogical,
|
||||
spv::Op::OpIAdd,
|
||||
spv::Op::OpISub};
|
||||
FoldingRule RedundantBinaryRhs0(spv::Op op) {
|
||||
assert(std::find(std::begin(RedundantBinaryRhs0Ops),
|
||||
std::end(RedundantBinaryRhs0Ops),
|
||||
op) != std::end(RedundantBinaryRhs0Ops) &&
|
||||
"Wrong opcode.");
|
||||
(void)op;
|
||||
return RedundantBinaryOpWithZeroOperand(1, 0);
|
||||
}
|
||||
|
||||
// This rule handles any of RedundantBinaryLhs0Ops with a 0 or vector 0 on the
|
||||
// left-hand side (0 | a => a).
|
||||
static const constexpr spv::Op RedundantBinaryLhs0Ops[] = {
|
||||
spv::Op::OpBitwiseOr, spv::Op::OpBitwiseXor, spv::Op::OpIAdd};
|
||||
FoldingRule RedundantBinaryLhs0(spv::Op op) {
|
||||
assert(std::find(std::begin(RedundantBinaryLhs0Ops),
|
||||
std::end(RedundantBinaryLhs0Ops),
|
||||
op) != std::end(RedundantBinaryLhs0Ops) &&
|
||||
"Wrong opcode.");
|
||||
(void)op;
|
||||
return RedundantBinaryOpWithZeroOperand(0, 1);
|
||||
}
|
||||
|
||||
// This rule handles shifts and divisions of 0 or vector 0 by any amount
|
||||
// (0 >> a => 0).
|
||||
static const constexpr spv::Op RedundantBinaryLhs0To0Ops[] = {
|
||||
spv::Op::OpShiftRightLogical,
|
||||
spv::Op::OpShiftRightArithmetic,
|
||||
spv::Op::OpShiftLeftLogical,
|
||||
spv::Op::OpSDiv,
|
||||
spv::Op::OpUDiv,
|
||||
spv::Op::OpSMod,
|
||||
spv::Op::OpUMod};
|
||||
FoldingRule RedundantBinaryLhs0To0(spv::Op op) {
|
||||
assert(std::find(std::begin(RedundantBinaryLhs0To0Ops),
|
||||
std::end(RedundantBinaryLhs0To0Ops),
|
||||
op) != std::end(RedundantBinaryLhs0To0Ops) &&
|
||||
"Wrong opcode.");
|
||||
(void)op;
|
||||
return RedundantBinaryOpWithZeroOperand(0, 0);
|
||||
}
|
||||
|
||||
// Returns true if all elements in |c| are 1.
|
||||
bool IsAllInt1(const analysis::Constant* c) {
|
||||
if (auto composite = c->AsCompositeConstant()) {
|
||||
auto& components = composite->GetComponents();
|
||||
return std::all_of(std::begin(components), std::end(components), IsAllInt1);
|
||||
} else if (c->AsIntConstant()) {
|
||||
return c->GetSignExtendedValue() == 1;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// This rule handles divisions by 1 or vector 1 (a / 1 => a).
|
||||
FoldingRule RedundantSUDiv() {
|
||||
return [](IRContext* context, Instruction* inst,
|
||||
const std::vector<const analysis::Constant*>& constants) {
|
||||
assert(constants.size() == 2);
|
||||
assert((inst->opcode() == spv::Op::OpUDiv ||
|
||||
inst->opcode() == spv::Op::OpSDiv) &&
|
||||
"Wrong opcode.");
|
||||
|
||||
if (constants[1] && IsAllInt1(constants[1])) {
|
||||
auto operand = inst->GetSingleWordInOperand(0);
|
||||
auto operand_type = constants[1]->type();
|
||||
|
||||
const analysis::Type* inst_type =
|
||||
context->get_type_mgr()->GetType(inst->type_id());
|
||||
if (inst_type->IsSame(operand_type)) {
|
||||
inst->SetOpcode(spv::Op::OpCopyObject);
|
||||
} else {
|
||||
inst->SetOpcode(spv::Op::OpBitcast);
|
||||
}
|
||||
inst->SetInOperands({{SPV_OPERAND_TYPE_ID, {operand}}});
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
};
|
||||
}
|
||||
|
||||
// This rule handles modulo from division by 1 or vector 1 (a % 1 => 0).
|
||||
FoldingRule RedundantSUMod() {
|
||||
return [](IRContext* context, Instruction* inst,
|
||||
const std::vector<const analysis::Constant*>& constants) {
|
||||
assert(constants.size() == 2);
|
||||
assert((inst->opcode() == spv::Op::OpUMod ||
|
||||
inst->opcode() == spv::Op::OpSMod) &&
|
||||
"Wrong opcode.");
|
||||
|
||||
if (constants[1] && IsAllInt1(constants[1])) {
|
||||
auto type = context->get_type_mgr()->GetType(inst->type_id());
|
||||
auto zero_id = context->get_constant_mgr()->GetNullConstId(type);
|
||||
|
||||
inst->SetOpcode(spv::Op::OpCopyObject);
|
||||
inst->SetInOperands({{SPV_OPERAND_TYPE_ID, {zero_id}}});
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
};
|
||||
}
|
||||
|
||||
// This rule look for a dot with a constant vector containing a single 1 and
|
||||
// the rest 0s. This is the same as doing an extract.
|
||||
FoldingRule DotProductDoingExtract() {
|
||||
@@ -2876,6 +3012,17 @@ void FoldingRules::AddFoldingRules() {
|
||||
// Note that the order in which rules are added to the list matters. If a rule
|
||||
// applies to the instruction, the rest of the rules will not be attempted.
|
||||
// Take that into consideration.
|
||||
for (auto op : RedundantBinaryRhs0Ops)
|
||||
rules_[op].push_back(RedundantBinaryRhs0(op));
|
||||
for (auto op : RedundantBinaryLhs0Ops)
|
||||
rules_[op].push_back(RedundantBinaryLhs0(op));
|
||||
for (auto op : RedundantBinaryLhs0To0Ops)
|
||||
rules_[op].push_back(RedundantBinaryLhs0To0(op));
|
||||
rules_[spv::Op::OpSDiv].push_back(RedundantSUDiv());
|
||||
rules_[spv::Op::OpUDiv].push_back(RedundantSUDiv());
|
||||
rules_[spv::Op::OpSMod].push_back(RedundantSUMod());
|
||||
rules_[spv::Op::OpUMod].push_back(RedundantSUMod());
|
||||
|
||||
rules_[spv::Op::OpBitcast].push_back(BitCastScalarOrVector());
|
||||
|
||||
rules_[spv::Op::OpCompositeConstruct].push_back(
|
||||
@@ -2907,6 +3054,8 @@ void FoldingRules::AddFoldingRules() {
|
||||
rules_[spv::Op::OpFDiv].push_back(MergeDivMulArithmetic());
|
||||
rules_[spv::Op::OpFDiv].push_back(MergeDivNegateArithmetic());
|
||||
|
||||
rules_[spv::Op::OpFMod].push_back(RedundantFMod());
|
||||
|
||||
rules_[spv::Op::OpFMul].push_back(RedundantFMul());
|
||||
rules_[spv::Op::OpFMul].push_back(MergeMulMulArithmetic());
|
||||
rules_[spv::Op::OpFMul].push_back(MergeMulDivArithmetic());
|
||||
@@ -2921,7 +3070,6 @@ void FoldingRules::AddFoldingRules() {
|
||||
rules_[spv::Op::OpFSub].push_back(MergeSubAddArithmetic());
|
||||
rules_[spv::Op::OpFSub].push_back(MergeSubSubArithmetic());
|
||||
|
||||
rules_[spv::Op::OpIAdd].push_back(RedundantIAdd());
|
||||
rules_[spv::Op::OpIAdd].push_back(MergeAddNegateArithmetic());
|
||||
rules_[spv::Op::OpIAdd].push_back(MergeAddAddArithmetic());
|
||||
rules_[spv::Op::OpIAdd].push_back(MergeAddSubArithmetic());
|
||||
|
||||
21
3rdparty/spirv-tools/source/opt/function.h
vendored
21
3rdparty/spirv-tools/source/opt/function.h
vendored
@@ -17,6 +17,7 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
#include <iterator>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <unordered_set>
|
||||
@@ -39,6 +40,7 @@ class Function {
|
||||
public:
|
||||
using iterator = UptrVectorIterator<BasicBlock>;
|
||||
using const_iterator = UptrVectorIterator<BasicBlock, true>;
|
||||
using ParamList = std::vector<std::unique_ptr<Instruction>>;
|
||||
|
||||
// Creates a function instance declared by the given OpFunction instruction
|
||||
// |def_inst|.
|
||||
@@ -77,6 +79,23 @@ class Function {
|
||||
// Does nothing if the function doesn't have such a parameter.
|
||||
inline void RemoveParameter(uint32_t id);
|
||||
|
||||
// Rewrites the function parameters by calling a replacer callback.
|
||||
// The replacer takes two parameters: an expiring unique pointer to a current
|
||||
// instruction, and a back-inserter into a new list of unique pointers to
|
||||
// instructions. The replacer is called for each current parameter, in order.
|
||||
// Not valid to call while also iterating through the parameter list, e.g.
|
||||
// within the ForEachParam method.
|
||||
using RewriteParamFn = std::function<void(
|
||||
std::unique_ptr<Instruction>&&, std::back_insert_iterator<ParamList>&)>;
|
||||
void RewriteParams(RewriteParamFn& replacer) {
|
||||
ParamList new_params;
|
||||
auto appender = std::back_inserter(new_params);
|
||||
for (auto& param : params_) {
|
||||
replacer(std::move(param), appender);
|
||||
}
|
||||
params_ = std::move(new_params);
|
||||
}
|
||||
|
||||
// Saves the given function end instruction.
|
||||
inline void SetFunctionEnd(std::unique_ptr<Instruction> end_inst);
|
||||
|
||||
@@ -197,7 +216,7 @@ class Function {
|
||||
// The OpFunction instruction that begins the definition of this function.
|
||||
std::unique_ptr<Instruction> def_inst_;
|
||||
// All parameters to this function.
|
||||
std::vector<std::unique_ptr<Instruction>> params_;
|
||||
ParamList params_;
|
||||
// All debug instructions in this function's header.
|
||||
InstructionList debug_insts_in_header_;
|
||||
// All basic blocks inside this function in specification order
|
||||
|
||||
@@ -55,6 +55,11 @@ Pass::Status InlineExhaustivePass::InlineExhaustive(Function* func) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (modified) {
|
||||
FixDebugDeclares(func);
|
||||
}
|
||||
|
||||
return (modified ? Status::SuccessWithChange : Status::SuccessWithoutChange);
|
||||
}
|
||||
|
||||
|
||||
65
3rdparty/spirv-tools/source/opt/inline_pass.cpp
vendored
65
3rdparty/spirv-tools/source/opt/inline_pass.cpp
vendored
@@ -30,6 +30,8 @@ namespace {
|
||||
constexpr int kSpvFunctionCallFunctionId = 2;
|
||||
constexpr int kSpvFunctionCallArgumentId = 3;
|
||||
constexpr int kSpvReturnValueId = 0;
|
||||
constexpr int kSpvDebugDeclareVarInIdx = 3;
|
||||
constexpr int kSpvAccessChainBaseInIdx = 0;
|
||||
} // namespace
|
||||
|
||||
uint32_t InlinePass::AddPointerToType(uint32_t type_id,
|
||||
@@ -858,5 +860,68 @@ void InlinePass::InitializeInline() {
|
||||
|
||||
InlinePass::InlinePass() {}
|
||||
|
||||
void InlinePass::FixDebugDeclares(Function* func) {
|
||||
std::map<uint32_t, Instruction*> access_chains;
|
||||
std::vector<Instruction*> debug_declare_insts;
|
||||
|
||||
func->ForEachInst([&access_chains, &debug_declare_insts](Instruction* inst) {
|
||||
if (inst->opcode() == spv::Op::OpAccessChain) {
|
||||
access_chains[inst->result_id()] = inst;
|
||||
}
|
||||
if (inst->GetCommonDebugOpcode() == CommonDebugInfoDebugDeclare) {
|
||||
debug_declare_insts.push_back(inst);
|
||||
}
|
||||
});
|
||||
|
||||
for (auto& inst : debug_declare_insts) {
|
||||
FixDebugDeclare(inst, access_chains);
|
||||
}
|
||||
}
|
||||
|
||||
void InlinePass::FixDebugDeclare(
|
||||
Instruction* dbg_declare_inst,
|
||||
const std::map<uint32_t, Instruction*>& access_chains) {
|
||||
do {
|
||||
uint32_t var_id =
|
||||
dbg_declare_inst->GetSingleWordInOperand(kSpvDebugDeclareVarInIdx);
|
||||
|
||||
// The def-use chains are not kept up to date while inlining, so we need to
|
||||
// get the variable by traversing the functions.
|
||||
auto it = access_chains.find(var_id);
|
||||
if (it == access_chains.end()) {
|
||||
return;
|
||||
}
|
||||
Instruction* access_chain = it->second;
|
||||
|
||||
// If the variable id in the debug declare is an access chain, it is
|
||||
// invalid. it needs to be fixed up. The debug declare will be updated so
|
||||
// that its Var operand becomes the base of the access chain. The indexes of
|
||||
// the access chain are prepended before the indexes of the debug declare.
|
||||
|
||||
std::vector<Operand> operands;
|
||||
for (int i = 0; i < kSpvDebugDeclareVarInIdx; i++) {
|
||||
operands.push_back(dbg_declare_inst->GetInOperand(i));
|
||||
}
|
||||
|
||||
uint32_t access_chain_base =
|
||||
access_chain->GetSingleWordInOperand(kSpvAccessChainBaseInIdx);
|
||||
operands.push_back(Operand(SPV_OPERAND_TYPE_ID, {access_chain_base}));
|
||||
operands.push_back(
|
||||
dbg_declare_inst->GetInOperand(kSpvDebugDeclareVarInIdx + 1));
|
||||
|
||||
for (uint32_t i = kSpvAccessChainBaseInIdx + 1;
|
||||
i < access_chain->NumInOperands(); ++i) {
|
||||
operands.push_back(access_chain->GetInOperand(i));
|
||||
}
|
||||
|
||||
for (uint32_t i = kSpvDebugDeclareVarInIdx + 2;
|
||||
i < dbg_declare_inst->NumInOperands(); ++i) {
|
||||
operands.push_back(dbg_declare_inst->GetInOperand(i));
|
||||
}
|
||||
|
||||
dbg_declare_inst->SetInOperands(std::move(operands));
|
||||
} while (true);
|
||||
}
|
||||
|
||||
} // namespace opt
|
||||
} // namespace spvtools
|
||||
|
||||
11
3rdparty/spirv-tools/source/opt/inline_pass.h
vendored
11
3rdparty/spirv-tools/source/opt/inline_pass.h
vendored
@@ -150,6 +150,12 @@ class InlinePass : public Pass {
|
||||
// Initialize state for optimization of |module|
|
||||
void InitializeInline();
|
||||
|
||||
// Fixes invalid debug declare functions in `func` that were caused by
|
||||
// inlining. This function cannot be called while in the middle of inlining
|
||||
// because it needs to be able to find the instructions that define an
|
||||
// id.
|
||||
void FixDebugDeclares(Function* func);
|
||||
|
||||
// Map from function's result id to function.
|
||||
std::unordered_map<uint32_t, Function*> id2function_;
|
||||
|
||||
@@ -241,6 +247,11 @@ class InlinePass : public Pass {
|
||||
// structural dominance.
|
||||
void UpdateSingleBlockLoopContinueTarget(
|
||||
uint32_t new_id, std::vector<std::unique_ptr<BasicBlock>>* new_blocks);
|
||||
|
||||
// Replaces the `var` operand of `dbg_declare_inst` and updates the indexes
|
||||
// accordingly, if it is the id of an access chain in `access_chains`.
|
||||
void FixDebugDeclare(Instruction* dbg_declare_inst,
|
||||
const std::map<uint32_t, Instruction*>& access_chains);
|
||||
};
|
||||
|
||||
} // namespace opt
|
||||
|
||||
73
3rdparty/spirv-tools/source/opt/ir_builder.h
vendored
73
3rdparty/spirv-tools/source/opt/ir_builder.h
vendored
@@ -15,6 +15,7 @@
|
||||
#ifndef SOURCE_OPT_IR_BUILDER_H_
|
||||
#define SOURCE_OPT_IR_BUILDER_H_
|
||||
|
||||
#include <cassert>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
@@ -480,8 +481,11 @@ class InstructionBuilder {
|
||||
return AddInstruction(std::move(select));
|
||||
}
|
||||
|
||||
Instruction* AddAccessChain(uint32_t type_id, uint32_t base_ptr_id,
|
||||
std::vector<uint32_t> ids) {
|
||||
Instruction* AddOpcodeAccessChain(spv::Op opcode, uint32_t type_id,
|
||||
uint32_t base_ptr_id,
|
||||
const std::vector<uint32_t>& ids) {
|
||||
assert(opcode == spv::Op::OpAccessChain ||
|
||||
opcode == spv::Op::OpInBoundsAccessChain);
|
||||
std::vector<Operand> operands;
|
||||
operands.push_back({SPV_OPERAND_TYPE_ID, {base_ptr_id}});
|
||||
|
||||
@@ -490,12 +494,22 @@ class InstructionBuilder {
|
||||
}
|
||||
|
||||
// TODO(1841): Handle id overflow.
|
||||
std::unique_ptr<Instruction> new_inst(
|
||||
new Instruction(GetContext(), spv::Op::OpAccessChain, type_id,
|
||||
GetContext()->TakeNextId(), operands));
|
||||
std::unique_ptr<Instruction> new_inst(new Instruction(
|
||||
GetContext(), opcode, type_id, GetContext()->TakeNextId(), operands));
|
||||
return AddInstruction(std::move(new_inst));
|
||||
}
|
||||
|
||||
Instruction* AddAccessChain(uint32_t type_id, uint32_t base_ptr_id,
|
||||
const std::vector<uint32_t>& ids) {
|
||||
return AddOpcodeAccessChain(spv::Op::OpAccessChain, type_id, base_ptr_id,
|
||||
ids);
|
||||
}
|
||||
Instruction* AddInBoundsAccessChain(uint32_t type_id, uint32_t base_ptr_id,
|
||||
const std::vector<uint32_t>& ids) {
|
||||
return AddOpcodeAccessChain(spv::Op::OpInBoundsAccessChain, type_id,
|
||||
base_ptr_id, ids);
|
||||
}
|
||||
|
||||
Instruction* AddLoad(uint32_t type_id, uint32_t base_ptr_id,
|
||||
uint32_t alignment = 0) {
|
||||
std::vector<Operand> operands;
|
||||
@@ -514,9 +528,19 @@ class InstructionBuilder {
|
||||
return AddInstruction(std::move(new_inst));
|
||||
}
|
||||
|
||||
Instruction* AddCopyObject(uint32_t type_id, uint32_t value_id) {
|
||||
std::vector<Operand> operands{{SPV_OPERAND_TYPE_ID, {value_id}}};
|
||||
|
||||
// TODO(1841): Handle id overflow.
|
||||
std::unique_ptr<Instruction> new_inst(
|
||||
new Instruction(GetContext(), spv::Op::OpCopyObject, type_id,
|
||||
GetContext()->TakeNextId(), operands));
|
||||
return AddInstruction(std::move(new_inst));
|
||||
}
|
||||
|
||||
Instruction* AddVariable(uint32_t type_id, uint32_t storage_class) {
|
||||
std::vector<Operand> operands;
|
||||
operands.push_back({SPV_OPERAND_TYPE_ID, {storage_class}});
|
||||
operands.push_back({SPV_OPERAND_TYPE_STORAGE_CLASS, {storage_class}});
|
||||
std::unique_ptr<Instruction> new_inst(
|
||||
new Instruction(GetContext(), spv::Op::OpVariable, type_id,
|
||||
GetContext()->TakeNextId(), operands));
|
||||
@@ -572,6 +596,26 @@ class InstructionBuilder {
|
||||
return AddInstruction(std::move(new_inst));
|
||||
}
|
||||
|
||||
Instruction* AddDecoration(uint32_t target_id, spv::Decoration d,
|
||||
const std::vector<uint32_t>& literals) {
|
||||
std::vector<Operand> operands;
|
||||
operands.push_back({SPV_OPERAND_TYPE_ID, {target_id}});
|
||||
operands.push_back({SPV_OPERAND_TYPE_DECORATION, {uint32_t(d)}});
|
||||
for (uint32_t literal : literals) {
|
||||
operands.push_back({SPV_OPERAND_TYPE_LITERAL_INTEGER, {literal}});
|
||||
}
|
||||
|
||||
std::unique_ptr<Instruction> new_inst(
|
||||
new Instruction(GetContext(), spv::Op::OpDecorate, 0, 0, operands));
|
||||
// Decorations are annotation instructions. Add it via the IR context,
|
||||
// so the decoration manager will be updated.
|
||||
// Decorations don't belong to basic blocks, so there is no need
|
||||
// to update the instruction to block mapping.
|
||||
Instruction* result = new_inst.get();
|
||||
GetContext()->AddAnnotationInst(std::move(new_inst));
|
||||
return result;
|
||||
}
|
||||
|
||||
Instruction* AddNaryExtendedInstruction(
|
||||
uint32_t result_type, uint32_t set, uint32_t instruction,
|
||||
const std::vector<uint32_t>& ext_operands) {
|
||||
@@ -593,6 +637,23 @@ class InstructionBuilder {
|
||||
return AddInstruction(std::move(new_inst));
|
||||
}
|
||||
|
||||
Instruction* AddSampledImage(uint32_t sampled_image_type_id,
|
||||
uint32_t image_id, uint32_t sampler_id) {
|
||||
std::vector<Operand> operands;
|
||||
operands.push_back({SPV_OPERAND_TYPE_ID, {image_id}});
|
||||
operands.push_back({SPV_OPERAND_TYPE_ID, {sampler_id}});
|
||||
|
||||
uint32_t result_id = GetContext()->TakeNextId();
|
||||
if (result_id == 0) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::unique_ptr<Instruction> new_inst(
|
||||
new Instruction(GetContext(), spv::Op::OpSampledImage,
|
||||
sampled_image_type_id, result_id, operands));
|
||||
return AddInstruction(std::move(new_inst));
|
||||
}
|
||||
|
||||
// Inserts the new instruction before the insertion point.
|
||||
Instruction* AddInstruction(std::unique_ptr<Instruction>&& insn) {
|
||||
Instruction* insn_ptr = &*insert_before_.InsertBefore(std::move(insn));
|
||||
|
||||
@@ -564,6 +564,7 @@ void IRContext::AddCombinatorsForCapability(uint32_t capability) {
|
||||
(uint32_t)spv::Op::OpCompositeConstruct,
|
||||
(uint32_t)spv::Op::OpCompositeExtract,
|
||||
(uint32_t)spv::Op::OpCompositeInsert,
|
||||
(uint32_t)spv::Op::OpCopyLogical,
|
||||
(uint32_t)spv::Op::OpCopyObject,
|
||||
(uint32_t)spv::Op::OpTranspose,
|
||||
(uint32_t)spv::Op::OpSampledImage,
|
||||
|
||||
13
3rdparty/spirv-tools/source/opt/optimizer.cpp
vendored
13
3rdparty/spirv-tools/source/opt/optimizer.cpp
vendored
@@ -189,7 +189,7 @@ Optimizer& Optimizer::RegisterPerformancePasses(bool preserve_interface) {
|
||||
.RegisterPass(CreateLocalSingleBlockLoadStoreElimPass())
|
||||
.RegisterPass(CreateLocalSingleStoreElimPass())
|
||||
.RegisterPass(CreateAggressiveDCEPass(preserve_interface))
|
||||
.RegisterPass(CreateScalarReplacementPass())
|
||||
.RegisterPass(CreateScalarReplacementPass(0))
|
||||
.RegisterPass(CreateLocalAccessChainConvertPass())
|
||||
.RegisterPass(CreateLocalSingleBlockLoadStoreElimPass())
|
||||
.RegisterPass(CreateLocalSingleStoreElimPass())
|
||||
@@ -203,7 +203,7 @@ Optimizer& Optimizer::RegisterPerformancePasses(bool preserve_interface) {
|
||||
.RegisterPass(CreateRedundancyEliminationPass())
|
||||
.RegisterPass(CreateCombineAccessChainsPass())
|
||||
.RegisterPass(CreateSimplificationPass())
|
||||
.RegisterPass(CreateScalarReplacementPass())
|
||||
.RegisterPass(CreateScalarReplacementPass(0))
|
||||
.RegisterPass(CreateLocalAccessChainConvertPass())
|
||||
.RegisterPass(CreateLocalSingleBlockLoadStoreElimPass())
|
||||
.RegisterPass(CreateLocalSingleStoreElimPass())
|
||||
@@ -401,7 +401,7 @@ bool Optimizer::RegisterPassFromFlag(const std::string& flag,
|
||||
RegisterPass(CreateLoopUnswitchPass());
|
||||
} else if (pass_name == "scalar-replacement") {
|
||||
if (pass_args.size() == 0) {
|
||||
RegisterPass(CreateScalarReplacementPass());
|
||||
RegisterPass(CreateScalarReplacementPass(0));
|
||||
} else {
|
||||
int limit = -1;
|
||||
if (pass_args.find_first_not_of("0123456789") == std::string::npos) {
|
||||
@@ -637,6 +637,8 @@ bool Optimizer::RegisterPassFromFlag(const std::string& flag,
|
||||
}
|
||||
} else if (pass_name == "trim-capabilities") {
|
||||
RegisterPass(CreateTrimCapabilitiesPass());
|
||||
} else if (pass_name == "split-combined-image-sampler") {
|
||||
RegisterPass(CreateSplitCombinedImageSamplerPass());
|
||||
} else {
|
||||
Errorf(consumer(), nullptr, {},
|
||||
"Unknown flag '--%s'. Use --help for a list of valid flags",
|
||||
@@ -1188,6 +1190,11 @@ Optimizer::PassToken CreateOpExtInstWithForwardReferenceFixupPass() {
|
||||
MakeUnique<opt::OpExtInstWithForwardReferenceFixupPass>());
|
||||
}
|
||||
|
||||
Optimizer::PassToken CreateSplitCombinedImageSamplerPass() {
|
||||
return MakeUnique<Optimizer::PassToken::Impl>(
|
||||
MakeUnique<opt::SplitCombinedImageSamplerPass>());
|
||||
}
|
||||
|
||||
} // namespace spvtools
|
||||
|
||||
extern "C" {
|
||||
|
||||
1
3rdparty/spirv-tools/source/opt/passes.h
vendored
1
3rdparty/spirv-tools/source/opt/passes.h
vendored
@@ -77,6 +77,7 @@
|
||||
#include "source/opt/scalar_replacement_pass.h"
|
||||
#include "source/opt/set_spec_constant_default_value_pass.h"
|
||||
#include "source/opt/simplification_pass.h"
|
||||
#include "source/opt/split_combined_image_sampler_pass.h"
|
||||
#include "source/opt/spread_volatile_semantics.h"
|
||||
#include "source/opt/ssa_rewrite_pass.h"
|
||||
#include "source/opt/strength_reduction_pass.h"
|
||||
|
||||
@@ -33,7 +33,7 @@ namespace opt {
|
||||
// Documented in optimizer.hpp
|
||||
class ScalarReplacementPass : public MemPass {
|
||||
private:
|
||||
static constexpr uint32_t kDefaultLimit = 100;
|
||||
static constexpr uint32_t kDefaultLimit = 0;
|
||||
|
||||
public:
|
||||
ScalarReplacementPass(uint32_t limit = kDefaultLimit)
|
||||
|
||||
622
3rdparty/spirv-tools/source/opt/split_combined_image_sampler_pass.cpp
vendored
Normal file
622
3rdparty/spirv-tools/source/opt/split_combined_image_sampler_pass.cpp
vendored
Normal file
@@ -0,0 +1,622 @@
|
||||
// Copyright (c) 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "source/opt/split_combined_image_sampler_pass.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
#include <memory>
|
||||
|
||||
#include "source/opt/instruction.h"
|
||||
#include "source/opt/ir_builder.h"
|
||||
#include "source/opt/ir_context.h"
|
||||
#include "source/opt/type_manager.h"
|
||||
#include "source/opt/types.h"
|
||||
#include "source/util/make_unique.h"
|
||||
#include "spirv/unified1/spirv.h"
|
||||
|
||||
namespace spvtools {
|
||||
namespace opt {
|
||||
|
||||
#define CHECK(cond) \
|
||||
{ \
|
||||
if ((cond) != SPV_SUCCESS) return Pass::Status::Failure; \
|
||||
}
|
||||
|
||||
#define CHECK_STATUS(cond) \
|
||||
{ \
|
||||
if (auto c = (cond); c != SPV_SUCCESS) return c; \
|
||||
}
|
||||
|
||||
IRContext::Analysis SplitCombinedImageSamplerPass::GetPreservedAnalyses() {
|
||||
return
|
||||
// def use manager is updated
|
||||
IRContext::kAnalysisDefUse
|
||||
|
||||
// decorations are updated
|
||||
| IRContext::kAnalysisDecorations
|
||||
|
||||
// control flow is not changed
|
||||
| IRContext::kAnalysisCFG //
|
||||
| IRContext::kAnalysisLoopAnalysis //
|
||||
| IRContext::kAnalysisStructuredCFG
|
||||
|
||||
// type manager is updated
|
||||
| IRContext::kAnalysisTypes;
|
||||
}
|
||||
|
||||
Pass::Status SplitCombinedImageSamplerPass::Process() {
|
||||
def_use_mgr_ = context()->get_def_use_mgr();
|
||||
type_mgr_ = context()->get_type_mgr();
|
||||
|
||||
FindCombinedTextureSamplers();
|
||||
if (combined_types_to_remove_.empty() && !sampled_image_used_as_param_) {
|
||||
return Ok();
|
||||
}
|
||||
|
||||
CHECK(RemapFunctions());
|
||||
CHECK(RemapVars());
|
||||
CHECK(RemoveDeadTypes());
|
||||
|
||||
def_use_mgr_ = nullptr;
|
||||
type_mgr_ = nullptr;
|
||||
|
||||
return Ok();
|
||||
}
|
||||
|
||||
spvtools::DiagnosticStream SplitCombinedImageSamplerPass::Fail() {
|
||||
return std::move(
|
||||
spvtools::DiagnosticStream({}, consumer(), "", SPV_ERROR_INVALID_BINARY)
|
||||
<< "split-combined-image-sampler: ");
|
||||
}
|
||||
|
||||
void SplitCombinedImageSamplerPass::FindCombinedTextureSamplers() {
|
||||
for (auto& inst : context()->types_values()) {
|
||||
RegisterGlobal(inst.result_id());
|
||||
switch (inst.opcode()) {
|
||||
case spv::Op::OpTypeSampler:
|
||||
// Modules can't have duplicate sampler types.
|
||||
assert(!sampler_type_);
|
||||
sampler_type_ = &inst;
|
||||
break;
|
||||
|
||||
case spv::Op::OpTypeSampledImage:
|
||||
if (!first_sampled_image_type_) {
|
||||
first_sampled_image_type_ = &inst;
|
||||
}
|
||||
combined_types_.insert(inst.result_id());
|
||||
def_use_mgr_->WhileEachUser(inst.result_id(), [&](Instruction* i) {
|
||||
sampled_image_used_as_param_ |=
|
||||
i->opcode() == spv::Op::OpTypeFunction;
|
||||
return !sampled_image_used_as_param_;
|
||||
});
|
||||
break;
|
||||
|
||||
case spv::Op::OpTypeArray:
|
||||
case spv::Op::OpTypeRuntimeArray: {
|
||||
auto pointee_id = inst.GetSingleWordInOperand(0);
|
||||
if (combined_types_.find(pointee_id) != combined_types_.end()) {
|
||||
combined_types_.insert(inst.result_id());
|
||||
combined_types_to_remove_.push_back(inst.result_id());
|
||||
}
|
||||
} break;
|
||||
|
||||
case spv::Op::OpTypePointer: {
|
||||
auto sc =
|
||||
static_cast<spv::StorageClass>(inst.GetSingleWordInOperand(0));
|
||||
if (sc == spv::StorageClass::UniformConstant) {
|
||||
auto pointee_id = inst.GetSingleWordInOperand(1);
|
||||
if (combined_types_.find(pointee_id) != combined_types_.end()) {
|
||||
combined_types_.insert(inst.result_id());
|
||||
combined_types_to_remove_.push_back(inst.result_id());
|
||||
}
|
||||
}
|
||||
} break;
|
||||
|
||||
case spv::Op::OpVariable:
|
||||
if (combined_types_.find(inst.type_id()) != combined_types_.end()) {
|
||||
ordered_vars_.push_back(&inst);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Instruction* SplitCombinedImageSamplerPass::GetSamplerType() {
|
||||
if (!sampler_type_) {
|
||||
analysis::Sampler s;
|
||||
uint32_t sampler_type_id = type_mgr_->GetTypeInstruction(&s);
|
||||
sampler_type_ = def_use_mgr_->GetDef(sampler_type_id);
|
||||
assert(first_sampled_image_type_);
|
||||
sampler_type_->InsertBefore(first_sampled_image_type_);
|
||||
RegisterNewGlobal(sampler_type_->result_id());
|
||||
}
|
||||
return sampler_type_;
|
||||
}
|
||||
|
||||
spv_result_t SplitCombinedImageSamplerPass::RemapVars() {
|
||||
for (Instruction* var : ordered_vars_) {
|
||||
CHECK_STATUS(RemapVar(var));
|
||||
}
|
||||
return SPV_SUCCESS;
|
||||
}
|
||||
|
||||
std::pair<Instruction*, Instruction*> SplitCombinedImageSamplerPass::SplitType(
|
||||
Instruction& combined_kind_type) {
|
||||
if (auto where = type_remap_.find(combined_kind_type.result_id());
|
||||
where != type_remap_.end()) {
|
||||
auto& type_remap = where->second;
|
||||
return {type_remap.image_kind_type, type_remap.sampler_kind_type};
|
||||
}
|
||||
|
||||
switch (combined_kind_type.opcode()) {
|
||||
case spv::Op::OpTypeSampledImage: {
|
||||
auto* image_type =
|
||||
def_use_mgr_->GetDef(combined_kind_type.GetSingleWordInOperand(0));
|
||||
auto* sampler_type = GetSamplerType();
|
||||
type_remap_[combined_kind_type.result_id()] = {&combined_kind_type,
|
||||
image_type, sampler_type};
|
||||
return {image_type, sampler_type};
|
||||
break;
|
||||
}
|
||||
case spv::Op::OpTypePointer: {
|
||||
auto sc = static_cast<spv::StorageClass>(
|
||||
combined_kind_type.GetSingleWordInOperand(0));
|
||||
if (sc == spv::StorageClass::UniformConstant) {
|
||||
auto* pointee =
|
||||
def_use_mgr_->GetDef(combined_kind_type.GetSingleWordInOperand(1));
|
||||
auto [image_pointee, sampler_pointee] = SplitType(*pointee);
|
||||
// These would be null if the pointee is an image type or a sampler
|
||||
// type. Don't decompose them. Currently this method does not check the
|
||||
// assumption that it is being only called on combined types. So code
|
||||
// this defensively.
|
||||
if (image_pointee && sampler_pointee) {
|
||||
auto* ptr_image = MakeUniformConstantPointer(image_pointee);
|
||||
auto* ptr_sampler = MakeUniformConstantPointer(sampler_pointee);
|
||||
type_remap_[combined_kind_type.result_id()] = {
|
||||
&combined_kind_type, ptr_image, ptr_sampler};
|
||||
return {ptr_image, ptr_sampler};
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case spv::Op::OpTypeArray: {
|
||||
const auto* array_ty =
|
||||
type_mgr_->GetType(combined_kind_type.result_id())->AsArray();
|
||||
assert(array_ty);
|
||||
const auto* sampled_image_ty = array_ty->element_type()->AsSampledImage();
|
||||
assert(sampled_image_ty);
|
||||
|
||||
const analysis::Type* image_ty = sampled_image_ty->image_type();
|
||||
assert(image_ty);
|
||||
analysis::Array array_image_ty(image_ty, array_ty->length_info());
|
||||
const uint32_t array_image_ty_id =
|
||||
type_mgr_->GetTypeInstruction(&array_image_ty);
|
||||
auto* array_image_ty_inst = def_use_mgr_->GetDef(array_image_ty_id);
|
||||
if (!IsKnownGlobal(array_image_ty_id)) {
|
||||
array_image_ty_inst->InsertBefore(&combined_kind_type);
|
||||
RegisterNewGlobal(array_image_ty_id);
|
||||
// GetTypeInstruction also updated the def-use manager.
|
||||
}
|
||||
|
||||
analysis::Array sampler_array_ty(
|
||||
type_mgr_->GetType(GetSamplerType()->result_id()),
|
||||
array_ty->length_info());
|
||||
const uint32_t array_sampler_ty_id =
|
||||
type_mgr_->GetTypeInstruction(&sampler_array_ty);
|
||||
auto* array_sampler_ty_inst = def_use_mgr_->GetDef(array_sampler_ty_id);
|
||||
if (!IsKnownGlobal(array_sampler_ty_id)) {
|
||||
array_sampler_ty_inst->InsertBefore(&combined_kind_type);
|
||||
RegisterNewGlobal(array_sampler_ty_id);
|
||||
// GetTypeInstruction also updated the def-use manager.
|
||||
}
|
||||
return {array_image_ty_inst, array_sampler_ty_inst};
|
||||
}
|
||||
case spv::Op::OpTypeRuntimeArray: {
|
||||
// This is like the sized-array case, but there is no length parameter.
|
||||
auto* array_ty =
|
||||
type_mgr_->GetType(combined_kind_type.result_id())->AsRuntimeArray();
|
||||
assert(array_ty);
|
||||
auto* sampled_image_ty = array_ty->element_type()->AsSampledImage();
|
||||
assert(sampled_image_ty);
|
||||
|
||||
const analysis::Type* image_ty = sampled_image_ty->image_type();
|
||||
assert(image_ty);
|
||||
analysis::RuntimeArray array_image_ty(image_ty);
|
||||
const uint32_t array_image_ty_id =
|
||||
type_mgr_->GetTypeInstruction(&array_image_ty);
|
||||
auto* array_image_ty_inst = def_use_mgr_->GetDef(array_image_ty_id);
|
||||
if (!IsKnownGlobal(array_image_ty_id)) {
|
||||
array_image_ty_inst->InsertBefore(&combined_kind_type);
|
||||
RegisterNewGlobal(array_image_ty_id);
|
||||
// GetTypeInstruction also updated the def-use manager.
|
||||
}
|
||||
|
||||
analysis::RuntimeArray sampler_array_ty(
|
||||
type_mgr_->GetType(GetSamplerType()->result_id()));
|
||||
const uint32_t array_sampler_ty_id =
|
||||
type_mgr_->GetTypeInstruction(&sampler_array_ty);
|
||||
auto* array_sampler_ty_inst = def_use_mgr_->GetDef(array_sampler_ty_id);
|
||||
if (!IsKnownGlobal(array_sampler_ty_id)) {
|
||||
array_sampler_ty_inst->InsertBefore(&combined_kind_type);
|
||||
RegisterNewGlobal(array_sampler_ty_id);
|
||||
// GetTypeInstruction also updated the def-use manager.
|
||||
}
|
||||
return {array_image_ty_inst, array_sampler_ty_inst};
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return {nullptr, nullptr};
|
||||
}
|
||||
|
||||
spv_result_t SplitCombinedImageSamplerPass::RemapVar(
|
||||
Instruction* combined_var) {
|
||||
InstructionBuilder builder(context(), combined_var,
|
||||
IRContext::kAnalysisDefUse);
|
||||
|
||||
// Create an image variable, and a sampler variable.
|
||||
auto* combined_var_type = def_use_mgr_->GetDef(combined_var->type_id());
|
||||
auto [ptr_image_ty, ptr_sampler_ty] = SplitType(*combined_var_type);
|
||||
assert(ptr_image_ty);
|
||||
assert(ptr_sampler_ty);
|
||||
Instruction* sampler_var = builder.AddVariable(
|
||||
ptr_sampler_ty->result_id(), SpvStorageClassUniformConstant);
|
||||
Instruction* image_var = builder.AddVariable(ptr_image_ty->result_id(),
|
||||
SpvStorageClassUniformConstant);
|
||||
modified_ = true;
|
||||
return RemapUses(combined_var, image_var, sampler_var);
|
||||
}
|
||||
|
||||
spv_result_t SplitCombinedImageSamplerPass::RemapUses(
|
||||
Instruction* combined, Instruction* image_part, Instruction* sampler_part) {
|
||||
// The instructions to delete.
|
||||
std::unordered_set<Instruction*> dead_insts;
|
||||
// The insertion point should be updated before using this builder.
|
||||
// We needed *something* here.
|
||||
InstructionBuilder builder(context(), combined, IRContext::kAnalysisDefUse);
|
||||
|
||||
// This code must maintain the SPIR-V "Data rule" about sampled image values:
|
||||
// > All OpSampledImage instructions, or instructions that load an image or
|
||||
// > sampler reference, must be in the same block in which their Result <id>
|
||||
// > are consumed.
|
||||
//
|
||||
// When the code below inserts OpSampledImage instructions, it is always
|
||||
// either:
|
||||
// - in the same block as the previous OpSampledImage instruction it is
|
||||
// replacing, or
|
||||
// - in the same block as the instruction using sampled image value it is
|
||||
// replacing.
|
||||
//
|
||||
// Assuming that rule is already honoured by the module, these updates will
|
||||
// continue to honour the rule.
|
||||
|
||||
// Represents a single use of a value to be remapped.
|
||||
struct RemapUse {
|
||||
uint32_t used_id; // The ID that is being used.
|
||||
Instruction* user;
|
||||
uint32_t index;
|
||||
Instruction* image_part; // The image part of the replacement.
|
||||
Instruction* sampler_part; // The sampler part of the replacement.
|
||||
};
|
||||
// The work list of uses to be remapped.
|
||||
std::vector<RemapUse> uses;
|
||||
|
||||
// Adds remap records for each use of a value to be remapped.
|
||||
// Also schedules the original value for deletion.
|
||||
auto add_remap = [this, &dead_insts, &uses](Instruction* combined_arg,
|
||||
Instruction* image_part_arg,
|
||||
Instruction* sampler_part_arg) {
|
||||
const uint32_t used_combined_id = combined_arg->result_id();
|
||||
|
||||
def_use_mgr_->ForEachUse(
|
||||
combined_arg, [&](Instruction* user, uint32_t use_index) {
|
||||
uses.push_back({used_combined_id, user, use_index, image_part_arg,
|
||||
sampler_part_arg});
|
||||
});
|
||||
dead_insts.insert(combined_arg);
|
||||
};
|
||||
|
||||
add_remap(combined, image_part, sampler_part);
|
||||
|
||||
// Use index-based iteration because we can add to the work list as we go
|
||||
// along, and reallocation would invalidate ordinary iterators.
|
||||
for (size_t use_index = 0; use_index < uses.size(); ++use_index) {
|
||||
auto& use = uses[use_index];
|
||||
switch (use.user->opcode()) {
|
||||
case spv::Op::OpCopyObject: {
|
||||
// Append the uses of this OpCopyObject to the work list.
|
||||
add_remap(use.user, image_part, sampler_part);
|
||||
break;
|
||||
}
|
||||
case spv::Op::OpLoad: {
|
||||
assert(use.index == 2 && "variable used as non-pointer index on load");
|
||||
Instruction* load = use.user;
|
||||
|
||||
// Assume the loaded value is a sampled image.
|
||||
assert(def_use_mgr_->GetDef(load->type_id())->opcode() ==
|
||||
spv::Op::OpTypeSampledImage);
|
||||
|
||||
// Create loads for the image part and sampler part.
|
||||
builder.SetInsertPoint(load);
|
||||
auto* image = builder.AddLoad(PointeeTypeId(use.image_part),
|
||||
use.image_part->result_id());
|
||||
auto* sampler = builder.AddLoad(PointeeTypeId(use.sampler_part),
|
||||
use.sampler_part->result_id());
|
||||
// Create a sampled image from the loads of the two parts.
|
||||
auto* sampled_image = builder.AddSampledImage(
|
||||
load->type_id(), image->result_id(), sampler->result_id());
|
||||
// Replace the original sampled image value with the new one.
|
||||
std::unordered_set<Instruction*> users;
|
||||
def_use_mgr_->ForEachUse(
|
||||
load, [&users, sampled_image](Instruction* user, uint32_t index) {
|
||||
user->SetOperand(index, {sampled_image->result_id()});
|
||||
users.insert(user);
|
||||
});
|
||||
for (auto* user : users) {
|
||||
def_use_mgr_->AnalyzeInstUse(user);
|
||||
}
|
||||
dead_insts.insert(load);
|
||||
break;
|
||||
}
|
||||
case spv::Op::OpDecorate: {
|
||||
assert(use.index == 0 && "variable used as non-target index");
|
||||
builder.SetInsertPoint(use.user);
|
||||
spv::Decoration deco{use.user->GetSingleWordInOperand(1)};
|
||||
std::vector<uint32_t> literals;
|
||||
for (uint32_t i = 2; i < use.user->NumInOperands(); i++) {
|
||||
literals.push_back(use.user->GetSingleWordInOperand(i));
|
||||
}
|
||||
builder.AddDecoration(use.image_part->result_id(), deco, literals);
|
||||
builder.AddDecoration(use.sampler_part->result_id(), deco, literals);
|
||||
// KillInst will delete names and decorations, so don't schedule a
|
||||
// deletion of this instruction.
|
||||
break;
|
||||
}
|
||||
case spv::Op::OpEntryPoint: {
|
||||
// The entry point lists variables in the shader interface, i.e.
|
||||
// module-scope variables referenced by the static call tree rooted
|
||||
// at the entry point. (It can be a proper superset). Before SPIR-V
|
||||
// 1.4, only Input and Output variables are listed; in 1.4 and later,
|
||||
// module-scope variables in all storage classes are listed.
|
||||
// If a combined image+sampler is listed by the entry point, then
|
||||
// the separated image and sampler variables should be.
|
||||
assert(use.index >= 3 &&
|
||||
"variable used in OpEntryPoint but not as an interface ID");
|
||||
use.user->SetOperand(use.index, {use.image_part->result_id()});
|
||||
use.user->InsertOperand(
|
||||
use.user->NumOperands(),
|
||||
{SPV_OPERAND_TYPE_ID, {use.sampler_part->result_id()}});
|
||||
def_use_mgr_->AnalyzeInstUse(use.user);
|
||||
break;
|
||||
}
|
||||
case spv::Op::OpName:
|
||||
// TODO(dneto): Maybe we should synthesize names for the remapped vars.
|
||||
|
||||
// KillInst will delete names and decorations, so don't schedule a
|
||||
// deletion of this instruction.
|
||||
break;
|
||||
case spv::Op::OpFunctionCall: {
|
||||
// Replace each combined arg with two args: the image part, then the
|
||||
// sampler part.
|
||||
// The combined value could have been used twice in the argument list.
|
||||
// Moving things around now will invalidate the 'use' list above.
|
||||
// So don't trust the use index value.
|
||||
auto& call = *use.user;
|
||||
// The insert API only takes absolute arg IDs, not "in" arg IDs.
|
||||
const auto first_arg_operand_index = 3; // Skip the callee ID
|
||||
for (uint32_t i = first_arg_operand_index; i < call.NumOperands();
|
||||
++i) {
|
||||
if (use.used_id == call.GetSingleWordOperand(i)) {
|
||||
call.SetOperand(i, {use.sampler_part->result_id()});
|
||||
call.InsertOperand(
|
||||
i, {SPV_OPERAND_TYPE_ID, {use.image_part->result_id()}});
|
||||
++i;
|
||||
}
|
||||
}
|
||||
def_use_mgr_->AnalyzeInstUse(&call);
|
||||
break;
|
||||
}
|
||||
case spv::Op::OpAccessChain:
|
||||
case spv::Op::OpInBoundsAccessChain: {
|
||||
auto* original_access_chain = use.user;
|
||||
builder.SetInsertPoint(original_access_chain);
|
||||
// It can only be the base pointer
|
||||
assert(use.index == 2);
|
||||
|
||||
// Replace the original access chain with access chains for the image
|
||||
// part and the sampler part.
|
||||
std::vector<uint32_t> indices;
|
||||
for (uint32_t i = 3; i < original_access_chain->NumOperands(); i++) {
|
||||
indices.push_back(original_access_chain->GetSingleWordOperand(i));
|
||||
}
|
||||
|
||||
auto [result_image_part_ty, result_sampler_part_ty] =
|
||||
SplitType(*def_use_mgr_->GetDef(original_access_chain->type_id()));
|
||||
auto* result_image_part = builder.AddOpcodeAccessChain(
|
||||
use.user->opcode(), result_image_part_ty->result_id(),
|
||||
use.image_part->result_id(), indices);
|
||||
auto* result_sampler_part = builder.AddOpcodeAccessChain(
|
||||
use.user->opcode(), result_sampler_part_ty->result_id(),
|
||||
use.sampler_part->result_id(), indices);
|
||||
|
||||
// Remap uses of the original access chain.
|
||||
add_remap(original_access_chain, result_image_part,
|
||||
result_sampler_part);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
uint32_t used_type_id = def_use_mgr_->GetDef(use.used_id)->type_id();
|
||||
auto* used_type = def_use_mgr_->GetDef(used_type_id);
|
||||
if (used_type->opcode() == spv::Op::OpTypeSampledImage) {
|
||||
// This value being used is a sampled image value. But it's
|
||||
// being replaced, so recreate it here.
|
||||
// Example: used by OpImage, OpImageSampleExplicitLod, etc.
|
||||
builder.SetInsertPoint(use.user);
|
||||
auto* sampled_image =
|
||||
builder.AddSampledImage(used_type_id, use.image_part->result_id(),
|
||||
use.sampler_part->result_id());
|
||||
use.user->SetOperand(use.index, {sampled_image->result_id()});
|
||||
def_use_mgr_->AnalyzeInstUse(use.user);
|
||||
break;
|
||||
}
|
||||
return Fail() << "unhandled user: " << *use.user;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (auto* inst : dead_insts) {
|
||||
KillInst(inst);
|
||||
}
|
||||
|
||||
return SPV_SUCCESS;
|
||||
}
|
||||
|
||||
spv_result_t SplitCombinedImageSamplerPass::RemapFunctions() {
|
||||
// Remap function types. A combined type can appear as a parameter, but not as
|
||||
// the return type.
|
||||
{
|
||||
std::unordered_set<Instruction*> dead_insts;
|
||||
for (auto& inst : context()->types_values()) {
|
||||
if (inst.opcode() != spv::Op::OpTypeFunction) {
|
||||
continue;
|
||||
}
|
||||
analysis::Function* f_ty =
|
||||
type_mgr_->GetType(inst.result_id())->AsFunction();
|
||||
std::vector<const analysis::Type*> new_params;
|
||||
for (const auto* param_ty : f_ty->param_types()) {
|
||||
const auto param_ty_id = type_mgr_->GetId(param_ty);
|
||||
if (combined_types_.find(param_ty_id) != combined_types_.end()) {
|
||||
auto* param_type = def_use_mgr_->GetDef(param_ty_id);
|
||||
auto [image_type, sampler_type] = SplitType(*param_type);
|
||||
assert(image_type);
|
||||
assert(sampler_type);
|
||||
// The image and sampler types must already exist, so there is no
|
||||
// need to move them to the right spot.
|
||||
new_params.push_back(type_mgr_->GetType(image_type->result_id()));
|
||||
new_params.push_back(type_mgr_->GetType(sampler_type->result_id()));
|
||||
} else {
|
||||
new_params.push_back(param_ty);
|
||||
}
|
||||
}
|
||||
if (new_params.size() != f_ty->param_types().size()) {
|
||||
// Replace this type.
|
||||
analysis::Function new_f_ty(f_ty->return_type(), new_params);
|
||||
const uint32_t new_f_ty_id = type_mgr_->GetTypeInstruction(&new_f_ty);
|
||||
std::unordered_set<Instruction*> users;
|
||||
def_use_mgr_->ForEachUse(
|
||||
&inst,
|
||||
[&users, new_f_ty_id](Instruction* user, uint32_t use_index) {
|
||||
user->SetOperand(use_index, {new_f_ty_id});
|
||||
users.insert(user);
|
||||
});
|
||||
for (auto* user : users) {
|
||||
def_use_mgr_->AnalyzeInstUse(user);
|
||||
}
|
||||
dead_insts.insert(&inst);
|
||||
}
|
||||
}
|
||||
for (auto* inst : dead_insts) {
|
||||
KillInst(inst);
|
||||
}
|
||||
}
|
||||
|
||||
// Rewite OpFunctionParameter in function definitions.
|
||||
for (Function& fn : *context()->module()) {
|
||||
// Rewrite the function parameters and record their replacements.
|
||||
struct Replacement {
|
||||
Instruction* combined;
|
||||
Instruction* image;
|
||||
Instruction* sampler;
|
||||
};
|
||||
std::vector<Replacement> replacements;
|
||||
|
||||
Function::RewriteParamFn rewriter =
|
||||
[&](std::unique_ptr<Instruction>&& param,
|
||||
std::back_insert_iterator<Function::ParamList>& appender) {
|
||||
if (combined_types_.count(param->type_id()) == 0) {
|
||||
appender = std::move(param);
|
||||
return;
|
||||
}
|
||||
|
||||
// Replace this parameter with two new parameters.
|
||||
auto* combined_inst = param.release();
|
||||
auto* combined_type = def_use_mgr_->GetDef(combined_inst->type_id());
|
||||
auto [image_type, sampler_type] = SplitType(*combined_type);
|
||||
auto image_param = MakeUnique<Instruction>(
|
||||
context(), spv::Op::OpFunctionParameter, image_type->result_id(),
|
||||
context()->TakeNextId(), Instruction::OperandList{});
|
||||
auto sampler_param = MakeUnique<Instruction>(
|
||||
context(), spv::Op::OpFunctionParameter,
|
||||
sampler_type->result_id(), context()->TakeNextId(),
|
||||
Instruction::OperandList{});
|
||||
replacements.push_back(
|
||||
{combined_inst, image_param.get(), sampler_param.get()});
|
||||
appender = std::move(image_param);
|
||||
appender = std::move(sampler_param);
|
||||
};
|
||||
fn.RewriteParams(rewriter);
|
||||
|
||||
for (auto& r : replacements) {
|
||||
modified_ = true;
|
||||
def_use_mgr_->AnalyzeInstDefUse(r.image);
|
||||
def_use_mgr_->AnalyzeInstDefUse(r.sampler);
|
||||
CHECK_STATUS(RemapUses(r.combined, r.image, r.sampler));
|
||||
}
|
||||
}
|
||||
return SPV_SUCCESS;
|
||||
}
|
||||
|
||||
Instruction* SplitCombinedImageSamplerPass::MakeUniformConstantPointer(
|
||||
Instruction* pointee) {
|
||||
uint32_t ptr_id = type_mgr_->FindPointerToType(
|
||||
pointee->result_id(), spv::StorageClass::UniformConstant);
|
||||
auto* ptr = def_use_mgr_->GetDef(ptr_id);
|
||||
if (!IsKnownGlobal(ptr_id)) {
|
||||
// The pointer type was created at the end. Put it right after the
|
||||
// pointee.
|
||||
ptr->InsertBefore(pointee);
|
||||
pointee->InsertBefore(ptr);
|
||||
RegisterNewGlobal(ptr_id);
|
||||
// FindPointerToType also updated the def-use manager.
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
spv_result_t SplitCombinedImageSamplerPass::RemoveDeadTypes() {
|
||||
for (auto dead_type_id : combined_types_to_remove_) {
|
||||
if (auto* ty = def_use_mgr_->GetDef(dead_type_id)) {
|
||||
KillInst(ty);
|
||||
}
|
||||
}
|
||||
return SPV_SUCCESS;
|
||||
}
|
||||
|
||||
void SplitCombinedImageSamplerPass::KillInst(Instruction* inst) {
|
||||
// IRContext::KillInst will remove associated debug instructions and
|
||||
// decorations. It will delete the object only if it is already in a list.
|
||||
const bool was_in_list = inst->IsInAList();
|
||||
context()->KillInst(inst);
|
||||
if (!was_in_list) {
|
||||
// Avoid leaking
|
||||
delete inst;
|
||||
}
|
||||
modified_ = true;
|
||||
}
|
||||
|
||||
} // namespace opt
|
||||
} // namespace spvtools
|
||||
164
3rdparty/spirv-tools/source/opt/split_combined_image_sampler_pass.h
vendored
Normal file
164
3rdparty/spirv-tools/source/opt/split_combined_image_sampler_pass.h
vendored
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright (c) 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef LIBSPIRV_OPT_SPLIT_COMBINED_IMAGE_SAMPLER_PASS_H_
|
||||
#define LIBSPIRV_OPT_SPLIT_COMBINED_IMAGE_SAMPLER_PASS_H_
|
||||
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "source/diagnostic.h"
|
||||
#include "source/opt/decoration_manager.h"
|
||||
#include "source/opt/def_use_manager.h"
|
||||
#include "source/opt/pass.h"
|
||||
#include "source/opt/type_manager.h"
|
||||
|
||||
namespace spvtools {
|
||||
namespace opt {
|
||||
|
||||
// Replaces each combined-image sampler variable with an image variable
|
||||
// and a sampler variable. Similar for function parameters.
|
||||
//
|
||||
// Copy the descriptor set and binding number. Vulkan allows this, surprisingly.
|
||||
class SplitCombinedImageSamplerPass : public Pass {
|
||||
public:
|
||||
virtual ~SplitCombinedImageSamplerPass() override = default;
|
||||
const char* name() const override { return "split-combined-image-sampler"; }
|
||||
IRContext::Analysis GetPreservedAnalyses() override;
|
||||
Status Process() override;
|
||||
|
||||
private:
|
||||
// Records failure for the current module, and returns a stream
|
||||
// that can be used to provide user error information to the message
|
||||
// consumer.
|
||||
spvtools::DiagnosticStream Fail();
|
||||
|
||||
// Find variables that contain combined texture-samplers, or arrays of them.
|
||||
// Also populate known_globals_.
|
||||
void FindCombinedTextureSamplers();
|
||||
|
||||
// Returns the sampler type. If it does not yet exist, then it is created
|
||||
// and placed before the first sampled image type.
|
||||
Instruction* GetSamplerType();
|
||||
|
||||
// Remaps function types and function declarations. Each
|
||||
// pointer-to-sampled-image-type operand is replaced with a pair of
|
||||
// pointer-to-image-type and pointer-to-sampler-type pair.
|
||||
// Updates the def-use manager and type manager.
|
||||
spv_result_t RemapFunctions();
|
||||
// Remap resource variables.
|
||||
// Updates the def-use manager.
|
||||
spv_result_t RemapVars();
|
||||
// Remap a single resource variable for combined var.
|
||||
// Updates the def-use manager and the decorations manager.
|
||||
spv_result_t RemapVar(Instruction* combined_var);
|
||||
// Transitively remaps uses of the combined object with uses of the
|
||||
// decomposed image and sampler parts. The combined object can be sampled
|
||||
// image value, a pointer to one, an array of one, or a pointer to an array
|
||||
// of one. The image and sampler parts have corresponding shapes.
|
||||
// Updates the def-use manager and the decorations manager.
|
||||
spv_result_t RemapUses(Instruction* combined, Instruction* image_part,
|
||||
Instruction* sampler_part);
|
||||
// Removes types that are no longer referenced.
|
||||
spv_result_t RemoveDeadTypes();
|
||||
|
||||
// Returns the type instruction for a UniformConstant pointer to the given
|
||||
// pointee type. If it does not yet exist, the new type instruction is created
|
||||
// and placed immediately after the pointee type instruction. Updates def-use
|
||||
// and type managers, and the set of known globals.
|
||||
Instruction* MakeUniformConstantPointer(Instruction* pointee);
|
||||
|
||||
// Returns the ID of the pointee type for a pointer value instruction.
|
||||
uint32_t PointeeTypeId(Instruction* ptr_value) {
|
||||
auto* ptr_ty = def_use_mgr_->GetDef(ptr_value->type_id());
|
||||
assert(ptr_ty->opcode() == spv::Op::OpTypePointer);
|
||||
return ptr_ty->GetSingleWordInOperand(1);
|
||||
}
|
||||
|
||||
// Cached from the IRContext. Valid while Process() is running.
|
||||
analysis::DefUseManager* def_use_mgr_ = nullptr;
|
||||
// Cached from the IRContext. Valid while Process() is running.
|
||||
analysis::TypeManager* type_mgr_ = nullptr;
|
||||
|
||||
// Did processing modify the module?
|
||||
bool modified_ = false;
|
||||
Pass::Status Ok() {
|
||||
return modified_ ? Pass::Status::SuccessWithChange
|
||||
: Pass::Status::SuccessWithoutChange;
|
||||
}
|
||||
|
||||
// The first OpTypeSampledImage instruction in the module, if one exists.
|
||||
Instruction* first_sampled_image_type_ = nullptr;
|
||||
// An OpTypeSampler instruction, if one existed already, or if we created one.
|
||||
Instruction* sampler_type_ = nullptr;
|
||||
|
||||
// The known types and module-scope values.
|
||||
// We use this to know when a new such value was created.
|
||||
std::unordered_set<uint32_t> known_globals_;
|
||||
bool IsKnownGlobal(uint32_t id) const {
|
||||
return known_globals_.find(id) != known_globals_.end();
|
||||
}
|
||||
void RegisterGlobal(uint32_t id) { known_globals_.insert(id); }
|
||||
void RegisterNewGlobal(uint32_t id) {
|
||||
modified_ = true;
|
||||
RegisterGlobal(id);
|
||||
}
|
||||
|
||||
// Deletes an instruction and associated debug and decoration instructions.
|
||||
// Updates the def-use manager.
|
||||
void KillInst(Instruction* inst);
|
||||
|
||||
// Combined types. The known combined sampled-image type,
|
||||
// and recursively pointers or arrays of them.
|
||||
std::unordered_set<uint32_t> combined_types_;
|
||||
// The pre-existing types this pass should remove: pointer to
|
||||
// combined type, array of combined type, pointer to array of combined type.
|
||||
std::vector<uint32_t> combined_types_to_remove_;
|
||||
// Is an OpTypeSampledImage used as a function parameter? Those should be
|
||||
// transformed.
|
||||
bool sampled_image_used_as_param_ = false;
|
||||
|
||||
// Remaps a combined-kind type to corresponding sampler-kind and image-kind
|
||||
// of type.
|
||||
struct TypeRemapInfo {
|
||||
// The instruction for the combined type, pointer to combined type,
|
||||
// or point to array of combined type.
|
||||
Instruction* combined_kind_type;
|
||||
// The corresponding image type, with the same shape of indirection as the
|
||||
// combined_kind_type.
|
||||
Instruction* image_kind_type;
|
||||
// The corresponding sampler type, with the same shape of indirection as the
|
||||
// combined_kind_type.
|
||||
Instruction* sampler_kind_type;
|
||||
};
|
||||
// Maps the ID of a combined-image-sampler type kind to its corresponding
|
||||
// split parts.
|
||||
std::unordered_map<uint32_t, TypeRemapInfo> type_remap_;
|
||||
|
||||
// Returns the image-like and sampler-like types of the same indirection shape
|
||||
// as the given combined-like type. If combined_kind_type is not a combined
|
||||
// type or a pointer to one, or an array of one or a pointer to an array of
|
||||
// one, then returns a pair of null pointer. Either both components are
|
||||
// non-null, or both components are null. Updates the def-use manager and the
|
||||
// type manager if new instructions are created.
|
||||
std::pair<Instruction*, Instruction*> SplitType(
|
||||
Instruction& combined_kind_type);
|
||||
|
||||
// The combined-image-sampler variables to be replaced.
|
||||
std::vector<Instruction*> ordered_vars_;
|
||||
};
|
||||
} // namespace opt
|
||||
} // namespace spvtools
|
||||
#endif // LIBSPIRV_OPT_SPLIT_COMBINED_IMAGE_SAMPLER_PASS_H_
|
||||
@@ -241,6 +241,37 @@ Handler_OpTypePointer_StorageUniformBufferBlock16(
|
||||
: std::nullopt;
|
||||
}
|
||||
|
||||
static std::optional<spv::Capability>
|
||||
Handler_OpTypePointer_StorageBuffer16BitAccess(const Instruction* instruction) {
|
||||
assert(instruction->opcode() == spv::Op::OpTypePointer &&
|
||||
"This handler only support OpTypePointer opcodes.");
|
||||
|
||||
// Requires StorageBuffer, ShaderRecordBufferKHR or PhysicalStorageBuffer
|
||||
// storage classes.
|
||||
spv::StorageClass storage_class = spv::StorageClass(
|
||||
instruction->GetSingleWordInOperand(kOpTypePointerStorageClassIndex));
|
||||
if (storage_class != spv::StorageClass::StorageBuffer &&
|
||||
storage_class != spv::StorageClass::ShaderRecordBufferKHR &&
|
||||
storage_class != spv::StorageClass::PhysicalStorageBuffer) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
const auto* decoration_mgr = instruction->context()->get_decoration_mgr();
|
||||
const bool matchesCondition =
|
||||
AnyTypeOf(instruction, [decoration_mgr](const Instruction* item) {
|
||||
if (!decoration_mgr->HasDecoration(item->result_id(),
|
||||
spv::Decoration::Block)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return AnyTypeOf(item, is16bitType);
|
||||
});
|
||||
|
||||
return matchesCondition
|
||||
? std::optional(spv::Capability::StorageBuffer16BitAccess)
|
||||
: std::nullopt;
|
||||
}
|
||||
|
||||
static std::optional<spv::Capability> Handler_OpTypePointer_StorageUniform16(
|
||||
const Instruction* instruction) {
|
||||
assert(instruction->opcode() == spv::Op::OpTypePointer &&
|
||||
@@ -388,7 +419,7 @@ Handler_OpImageSparseRead_StorageImageReadWithoutFormat(
|
||||
}
|
||||
|
||||
// Opcode of interest to determine capabilities requirements.
|
||||
constexpr std::array<std::pair<spv::Op, OpcodeHandler>, 13> kOpcodeHandlers{{
|
||||
constexpr std::array<std::pair<spv::Op, OpcodeHandler>, 14> kOpcodeHandlers{{
|
||||
// clang-format off
|
||||
{spv::Op::OpImageRead, Handler_OpImageRead_StorageImageReadWithoutFormat},
|
||||
{spv::Op::OpImageWrite, Handler_OpImageWrite_StorageImageWriteWithoutFormat},
|
||||
@@ -403,6 +434,7 @@ constexpr std::array<std::pair<spv::Op, OpcodeHandler>, 13> kOpcodeHandlers{{
|
||||
{spv::Op::OpTypePointer, Handler_OpTypePointer_StorageUniform16},
|
||||
{spv::Op::OpTypePointer, Handler_OpTypePointer_StorageUniform16},
|
||||
{spv::Op::OpTypePointer, Handler_OpTypePointer_StorageUniformBufferBlock16},
|
||||
{spv::Op::OpTypePointer, Handler_OpTypePointer_StorageBuffer16BitAccess},
|
||||
// clang-format on
|
||||
}};
|
||||
|
||||
|
||||
@@ -99,6 +99,7 @@ class TrimCapabilitiesPass : public Pass {
|
||||
spv::Capability::RayTraversalPrimitiveCullingKHR,
|
||||
spv::Capability::Shader,
|
||||
spv::Capability::ShaderClockKHR,
|
||||
spv::Capability::StorageBuffer16BitAccess,
|
||||
spv::Capability::StorageImageReadWithoutFormat,
|
||||
spv::Capability::StorageImageWriteWithoutFormat,
|
||||
spv::Capability::StorageInputOutput16,
|
||||
|
||||
@@ -495,6 +495,8 @@ uint32_t TypeManager::GetTypeInstruction(const Type* type) {
|
||||
break;
|
||||
}
|
||||
context()->AddType(std::move(typeInst));
|
||||
// TODO(dneto): This next call to AnalyzeDefUse is redundant becaues
|
||||
// IRContext::AddType already does it.
|
||||
context()->AnalyzeDefUse(&*--context()->types_values_end());
|
||||
AttachDecorations(id, type);
|
||||
return id;
|
||||
|
||||
@@ -131,6 +131,11 @@ void spvValidatorOptionsSetAllowOffsetTextureOperand(
|
||||
options->allow_offset_texture_operand = val;
|
||||
}
|
||||
|
||||
void spvValidatorOptionsSetAllowVulkan32BitBitwise(
|
||||
spv_validator_options options, bool val) {
|
||||
options->allow_vulkan_32_bit_bitwise = val;
|
||||
}
|
||||
|
||||
void spvValidatorOptionsSetFriendlyNames(spv_validator_options options,
|
||||
bool val) {
|
||||
options->use_friendly_names = val;
|
||||
|
||||
@@ -49,6 +49,7 @@ struct spv_validator_options_t {
|
||||
skip_block_layout(false),
|
||||
allow_localsizeid(false),
|
||||
allow_offset_texture_operand(false),
|
||||
allow_vulkan_32_bit_bitwise(false),
|
||||
before_hlsl_legalization(false),
|
||||
use_friendly_names(true) {}
|
||||
|
||||
@@ -62,6 +63,7 @@ struct spv_validator_options_t {
|
||||
bool skip_block_layout;
|
||||
bool allow_localsizeid;
|
||||
bool allow_offset_texture_operand;
|
||||
bool allow_vulkan_32_bit_bitwise;
|
||||
bool before_hlsl_legalization;
|
||||
bool use_friendly_names;
|
||||
};
|
||||
|
||||
128
3rdparty/spirv-tools/source/text.cpp
vendored
128
3rdparty/spirv-tools/source/text.cpp
vendored
@@ -515,6 +515,124 @@ spv_result_t encodeInstructionStartingWithImmediate(
|
||||
return SPV_SUCCESS;
|
||||
}
|
||||
|
||||
/// @brief Translate an instruction started by OpUnknown and the following
|
||||
/// operands to binary form
|
||||
///
|
||||
/// @param[in] grammar the grammar to use for compilation
|
||||
/// @param[in, out] context the dynamic compilation info
|
||||
/// @param[out] pInst returned binary Opcode
|
||||
///
|
||||
/// @return result code
|
||||
spv_result_t encodeInstructionStartingWithOpUnknown(
|
||||
const spvtools::AssemblyGrammar& grammar,
|
||||
spvtools::AssemblyContext* context, spv_instruction_t* pInst) {
|
||||
spv_position_t nextPosition = {};
|
||||
|
||||
uint16_t opcode;
|
||||
uint16_t wordCount;
|
||||
|
||||
// The '(' character.
|
||||
if (context->advance())
|
||||
return context->diagnostic() << "Expected '(', found end of stream.";
|
||||
if ('(' != context->peek()) {
|
||||
return context->diagnostic() << "'(' expected after OpUnknown but found '"
|
||||
<< context->peek() << "'.";
|
||||
}
|
||||
context->seekForward(1);
|
||||
|
||||
// The opcode enumerant.
|
||||
if (context->advance())
|
||||
return context->diagnostic()
|
||||
<< "Expected opcode enumerant, found end of stream.";
|
||||
std::string opcodeString;
|
||||
spv_result_t error = context->getWord(&opcodeString, &nextPosition);
|
||||
if (error) return context->diagnostic(error) << "Internal Error";
|
||||
|
||||
if (!spvtools::utils::ParseNumber(opcodeString.c_str(), &opcode)) {
|
||||
return context->diagnostic()
|
||||
<< "Invalid opcode enumerant: \"" << opcodeString << "\".";
|
||||
}
|
||||
|
||||
context->setPosition(nextPosition);
|
||||
|
||||
// The ',' character.
|
||||
if (context->advance())
|
||||
return context->diagnostic() << "Expected ',', found end of stream.";
|
||||
if (',' != context->peek()) {
|
||||
return context->diagnostic()
|
||||
<< "',' expected after opcode enumerant but found '"
|
||||
<< context->peek() << "'.";
|
||||
}
|
||||
context->seekForward(1);
|
||||
|
||||
// The number of words.
|
||||
if (context->advance())
|
||||
return context->diagnostic()
|
||||
<< "Expected number of words, found end of stream.";
|
||||
std::string wordCountString;
|
||||
error = context->getWord(&wordCountString, &nextPosition);
|
||||
if (error) return context->diagnostic(error) << "Internal Error";
|
||||
|
||||
if (!spvtools::utils::ParseNumber(wordCountString.c_str(), &wordCount)) {
|
||||
return context->diagnostic()
|
||||
<< "Invalid number of words: \"" << wordCountString << "\".";
|
||||
}
|
||||
|
||||
if (wordCount == 0) {
|
||||
return context->diagnostic() << "Number of words (which includes the "
|
||||
"opcode) must be greater than zero.";
|
||||
}
|
||||
|
||||
context->setPosition(nextPosition);
|
||||
|
||||
// The ')' character.
|
||||
if (context->advance())
|
||||
return context->diagnostic() << "Expected ')', found end of stream.";
|
||||
if (')' != context->peek()) {
|
||||
return context->diagnostic()
|
||||
<< "')' expected after number of words but found '"
|
||||
<< context->peek() << "'.";
|
||||
}
|
||||
context->seekForward(1);
|
||||
|
||||
pInst->opcode = static_cast<spv::Op>(opcode);
|
||||
context->binaryEncodeU32(spvOpcodeMake(wordCount, pInst->opcode), pInst);
|
||||
|
||||
wordCount--; // Subtract the opcode from the number of words left to read.
|
||||
|
||||
while (wordCount-- > 0) {
|
||||
if (context->advance() == SPV_END_OF_STREAM) {
|
||||
return context->diagnostic() << "Expected " << wordCount + 1
|
||||
<< " more operands, found end of stream.";
|
||||
}
|
||||
if (context->isStartOfNewInst()) {
|
||||
std::string invalid;
|
||||
context->getWord(&invalid, &nextPosition);
|
||||
return context->diagnostic()
|
||||
<< "Unexpected start of new instruction: \"" << invalid
|
||||
<< "\". Expected " << wordCount + 1 << " more operands";
|
||||
}
|
||||
|
||||
std::string operandValue;
|
||||
if ((error = context->getWord(&operandValue, &nextPosition)))
|
||||
return context->diagnostic(error) << "Internal Error";
|
||||
|
||||
if (operandValue == "=")
|
||||
return context->diagnostic() << "OpUnknown not allowed before =.";
|
||||
|
||||
// Needed to pass to spvTextEncodeOpcode(), but it shouldn't ever be
|
||||
// expanded.
|
||||
spv_operand_pattern_t dummyExpectedOperands;
|
||||
error = spvTextEncodeOperand(
|
||||
grammar, context, SPV_OPERAND_TYPE_OPTIONAL_CIV, operandValue.c_str(),
|
||||
pInst, &dummyExpectedOperands);
|
||||
if (error) return error;
|
||||
context->setPosition(nextPosition);
|
||||
}
|
||||
|
||||
return SPV_SUCCESS;
|
||||
}
|
||||
|
||||
/// @brief Translate single Opcode and operands to binary form
|
||||
///
|
||||
/// @param[in] grammar the grammar to use for compilation
|
||||
@@ -574,6 +692,16 @@ spv_result_t spvTextEncodeOpcode(const spvtools::AssemblyGrammar& grammar,
|
||||
}
|
||||
}
|
||||
|
||||
if (opcodeName == "OpUnknown") {
|
||||
if (!result_id.empty()) {
|
||||
return context->diagnostic()
|
||||
<< "OpUnknown not allowed in assignment. Use an explicit result "
|
||||
"id operand instead.";
|
||||
}
|
||||
context->setPosition(nextPosition);
|
||||
return encodeInstructionStartingWithOpUnknown(grammar, context, pInst);
|
||||
}
|
||||
|
||||
// NOTE: The table contains Opcode names without the "Op" prefix.
|
||||
const char* pInstName = opcodeName.data() + 2;
|
||||
|
||||
|
||||
3
3rdparty/spirv-tools/source/text_handler.cpp
vendored
3
3rdparty/spirv-tools/source/text_handler.cpp
vendored
@@ -118,6 +118,9 @@ spv_result_t getWord(spv_text text, spv_position position, std::string* word) {
|
||||
break;
|
||||
case ' ':
|
||||
case ';':
|
||||
case ',':
|
||||
case '(':
|
||||
case ')':
|
||||
case '\t':
|
||||
case '\n':
|
||||
case '\r':
|
||||
|
||||
@@ -30,14 +30,14 @@ spv_result_t ValidateBaseType(ValidationState_t& _, const Instruction* inst,
|
||||
|
||||
if (!_.IsIntScalarType(base_type) && !_.IsIntVectorType(base_type)) {
|
||||
return _.diag(SPV_ERROR_INVALID_DATA, inst)
|
||||
<< _.VkErrorID(4781)
|
||||
<< "Expected int scalar or vector type for Base operand: "
|
||||
<< spvOpcodeString(opcode);
|
||||
}
|
||||
|
||||
// Vulkan has a restriction to 32 bit for base
|
||||
if (spvIsVulkanEnv(_.context()->target_env)) {
|
||||
if (_.GetBitWidth(base_type) != 32) {
|
||||
if (_.GetBitWidth(base_type) != 32 &&
|
||||
!_.options()->allow_vulkan_32_bit_bitwise) {
|
||||
return _.diag(SPV_ERROR_INVALID_DATA, inst)
|
||||
<< _.VkErrorID(4781)
|
||||
<< "Expected 32-bit int type for Base operand: "
|
||||
|
||||
@@ -1916,7 +1916,7 @@ spv_result_t CheckComponentDecoration(ValidationState_t& vstate,
|
||||
if (!vstate.IsIntScalarOrVectorType(type_id) &&
|
||||
!vstate.IsFloatScalarOrVectorType(type_id)) {
|
||||
return vstate.diag(SPV_ERROR_INVALID_ID, &inst)
|
||||
<< vstate.VkErrorID(4924)
|
||||
<< vstate.VkErrorID(10583)
|
||||
<< "Component decoration specified for type "
|
||||
<< vstate.getIdName(type_id) << " that is not a scalar or vector";
|
||||
}
|
||||
@@ -2083,6 +2083,204 @@ spv_result_t CheckDecorationsFromDecoration(ValidationState_t& vstate) {
|
||||
return SPV_SUCCESS;
|
||||
}
|
||||
|
||||
bool AllowsLayout(ValidationState_t& vstate, const spv::StorageClass sc) {
|
||||
switch (sc) {
|
||||
case spv::StorageClass::StorageBuffer:
|
||||
case spv::StorageClass::Uniform:
|
||||
case spv::StorageClass::PhysicalStorageBuffer:
|
||||
case spv::StorageClass::PushConstant:
|
||||
// Always explicitly laid out.
|
||||
return true;
|
||||
case spv::StorageClass::UniformConstant:
|
||||
return false;
|
||||
case spv::StorageClass::Workgroup:
|
||||
return vstate.HasCapability(
|
||||
spv::Capability::WorkgroupMemoryExplicitLayoutKHR);
|
||||
case spv::StorageClass::Function:
|
||||
case spv::StorageClass::Private:
|
||||
return vstate.version() <= SPV_SPIRV_VERSION_WORD(1, 4);
|
||||
case spv::StorageClass::Input:
|
||||
case spv::StorageClass::Output:
|
||||
// Block is used generally and mesh shaders use Offset.
|
||||
return true;
|
||||
default:
|
||||
// TODO: Some storage classes in ray tracing use explicit layout
|
||||
// decorations, but it is not well documented which. For now treat other
|
||||
// storage classes as allowed to be laid out. See Vulkan internal issue
|
||||
// 4192.
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
bool UsesExplicitLayout(ValidationState_t& vstate, uint32_t type_id,
|
||||
std::unordered_map<uint32_t, bool>& cache) {
|
||||
if (type_id == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (cache.count(type_id)) {
|
||||
return cache[type_id];
|
||||
}
|
||||
|
||||
bool res = false;
|
||||
const auto type_inst = vstate.FindDef(type_id);
|
||||
if (type_inst->opcode() == spv::Op::OpTypeStruct ||
|
||||
type_inst->opcode() == spv::Op::OpTypeArray ||
|
||||
type_inst->opcode() == spv::Op::OpTypeRuntimeArray ||
|
||||
type_inst->opcode() == spv::Op::OpTypePointer ||
|
||||
type_inst->opcode() == spv::Op::OpTypeUntypedPointerKHR) {
|
||||
const auto& id_decs = vstate.id_decorations();
|
||||
const auto iter = id_decs.find(type_id);
|
||||
if (iter != id_decs.end()) {
|
||||
bool allowLayoutDecorations = false;
|
||||
if (type_inst->opcode() == spv::Op::OpTypePointer) {
|
||||
const auto sc = type_inst->GetOperandAs<spv::StorageClass>(1);
|
||||
allowLayoutDecorations = AllowsLayout(vstate, sc);
|
||||
}
|
||||
if (!allowLayoutDecorations) {
|
||||
res = std::any_of(
|
||||
iter->second.begin(), iter->second.end(), [](const Decoration& d) {
|
||||
return d.dec_type() == spv::Decoration::Block ||
|
||||
d.dec_type() == spv::Decoration::BufferBlock ||
|
||||
d.dec_type() == spv::Decoration::Offset ||
|
||||
d.dec_type() == spv::Decoration::ArrayStride ||
|
||||
d.dec_type() == spv::Decoration::MatrixStride;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (!res) {
|
||||
switch (type_inst->opcode()) {
|
||||
case spv::Op::OpTypeStruct:
|
||||
for (uint32_t i = 1; !res && i < type_inst->operands().size(); i++) {
|
||||
res = UsesExplicitLayout(
|
||||
vstate, type_inst->GetOperandAs<uint32_t>(i), cache);
|
||||
}
|
||||
break;
|
||||
case spv::Op::OpTypeArray:
|
||||
case spv::Op::OpTypeRuntimeArray:
|
||||
res = UsesExplicitLayout(vstate, type_inst->GetOperandAs<uint32_t>(1),
|
||||
cache);
|
||||
break;
|
||||
case spv::Op::OpTypePointer: {
|
||||
const auto sc = type_inst->GetOperandAs<spv::StorageClass>(1);
|
||||
if (!AllowsLayout(vstate, sc)) {
|
||||
res = UsesExplicitLayout(
|
||||
vstate, type_inst->GetOperandAs<uint32_t>(2), cache);
|
||||
}
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cache[type_id] = res;
|
||||
return res;
|
||||
}
|
||||
|
||||
spv_result_t CheckInvalidVulkanExplicitLayout(ValidationState_t& vstate) {
|
||||
if (!spvIsVulkanEnv(vstate.context()->target_env)) {
|
||||
return SPV_SUCCESS;
|
||||
}
|
||||
|
||||
std::unordered_map<uint32_t, bool> cache;
|
||||
for (const auto& inst : vstate.ordered_instructions()) {
|
||||
const auto type_id = inst.type_id();
|
||||
const auto type_inst = vstate.FindDef(type_id);
|
||||
uint32_t fail_id = 0;
|
||||
// Variables are the main place to check for improper decorations, but some
|
||||
// untyped pointer instructions must also be checked since those types may
|
||||
// never be instantiated by a variable. Unlike verifying a valid layout,
|
||||
// physical storage buffer does not need checked here since it is always
|
||||
// explicitly laid out.
|
||||
switch (inst.opcode()) {
|
||||
case spv::Op::OpVariable:
|
||||
case spv::Op::OpUntypedVariableKHR: {
|
||||
const auto sc = inst.GetOperandAs<spv::StorageClass>(2);
|
||||
auto check_id = type_id;
|
||||
if (inst.opcode() == spv::Op::OpUntypedVariableKHR) {
|
||||
if (inst.operands().size() > 3) {
|
||||
check_id = inst.GetOperandAs<uint32_t>(3);
|
||||
}
|
||||
}
|
||||
if (!AllowsLayout(vstate, sc) &&
|
||||
UsesExplicitLayout(vstate, check_id, cache)) {
|
||||
fail_id = check_id;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case spv::Op::OpUntypedAccessChainKHR:
|
||||
case spv::Op::OpUntypedInBoundsAccessChainKHR:
|
||||
case spv::Op::OpUntypedPtrAccessChainKHR:
|
||||
case spv::Op::OpUntypedInBoundsPtrAccessChainKHR: {
|
||||
// Check both the base type and return type. The return type may have an
|
||||
// invalid array stride.
|
||||
const auto sc = type_inst->GetOperandAs<spv::StorageClass>(1);
|
||||
const auto base_type_id = inst.GetOperandAs<uint32_t>(2);
|
||||
if (!AllowsLayout(vstate, sc)) {
|
||||
if (UsesExplicitLayout(vstate, base_type_id, cache)) {
|
||||
fail_id = base_type_id;
|
||||
} else if (UsesExplicitLayout(vstate, type_id, cache)) {
|
||||
fail_id = type_id;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case spv::Op::OpUntypedArrayLengthKHR: {
|
||||
// Check the data type.
|
||||
const auto ptr_ty_id =
|
||||
vstate.FindDef(inst.GetOperandAs<uint32_t>(3))->type_id();
|
||||
const auto ptr_ty = vstate.FindDef(ptr_ty_id);
|
||||
const auto sc = ptr_ty->GetOperandAs<spv::StorageClass>(1);
|
||||
const auto base_type_id = inst.GetOperandAs<uint32_t>(2);
|
||||
if (!AllowsLayout(vstate, sc) &&
|
||||
UsesExplicitLayout(vstate, base_type_id, cache)) {
|
||||
fail_id = base_type_id;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case spv::Op::OpLoad: {
|
||||
const auto ptr_id = inst.GetOperandAs<uint32_t>(2);
|
||||
const auto ptr_type = vstate.FindDef(vstate.FindDef(ptr_id)->type_id());
|
||||
if (ptr_type->opcode() == spv::Op::OpTypeUntypedPointerKHR) {
|
||||
// For untyped pointers check the return type for an invalid layout.
|
||||
const auto sc = ptr_type->GetOperandAs<spv::StorageClass>(1);
|
||||
if (!AllowsLayout(vstate, sc) &&
|
||||
UsesExplicitLayout(vstate, type_id, cache)) {
|
||||
fail_id = type_id;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case spv::Op::OpStore: {
|
||||
const auto ptr_id = inst.GetOperandAs<uint32_t>(1);
|
||||
const auto ptr_type = vstate.FindDef(vstate.FindDef(ptr_id)->type_id());
|
||||
if (ptr_type->opcode() == spv::Op::OpTypeUntypedPointerKHR) {
|
||||
// For untyped pointers, check the type of the data operand for an
|
||||
// invalid layout.
|
||||
const auto sc = ptr_type->GetOperandAs<spv::StorageClass>(1);
|
||||
const auto data_type_id = vstate.GetOperandTypeId(&inst, 2);
|
||||
if (!AllowsLayout(vstate, sc) &&
|
||||
UsesExplicitLayout(vstate, data_type_id, cache)) {
|
||||
fail_id = inst.GetOperandAs<uint32_t>(2);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (fail_id != 0) {
|
||||
return vstate.diag(SPV_ERROR_INVALID_ID, &inst)
|
||||
<< "Invalid explicit layout decorations on type for operand "
|
||||
<< vstate.getIdName(fail_id);
|
||||
}
|
||||
}
|
||||
|
||||
return SPV_SUCCESS;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
spv_result_t ValidateDecorations(ValidationState_t& vstate) {
|
||||
@@ -2094,6 +2292,7 @@ spv_result_t ValidateDecorations(ValidationState_t& vstate) {
|
||||
if (auto error = CheckVulkanMemoryModelDeprecatedDecorations(vstate))
|
||||
return error;
|
||||
if (auto error = CheckDecorationsFromDecoration(vstate)) return error;
|
||||
if (auto error = CheckInvalidVulkanExplicitLayout(vstate)) return error;
|
||||
return SPV_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
@@ -258,7 +258,8 @@ spv_result_t ValidateFunctionCall(ValidationState_t& _,
|
||||
_.HasCapability(spv::Capability::VariablePointers) &&
|
||||
sc == spv::StorageClass::Workgroup;
|
||||
const bool uc_ptr = sc == spv::StorageClass::UniformConstant;
|
||||
if (!ssbo_vptr && !wg_vptr && !uc_ptr) {
|
||||
if (!_.options()->before_hlsl_legalization && !ssbo_vptr &&
|
||||
!wg_vptr && !uc_ptr) {
|
||||
return _.diag(SPV_ERROR_INVALID_ID, inst)
|
||||
<< "Pointer operand " << _.getIdName(argument_id)
|
||||
<< " must be a memory object declaration";
|
||||
|
||||
@@ -48,6 +48,29 @@ bool is_interface_variable(const Instruction* inst, bool is_spv_1_4) {
|
||||
}
|
||||
}
|
||||
|
||||
// Special validation for varibles that are between shader stages
|
||||
spv_result_t ValidateInputOutputInterfaceVariables(ValidationState_t& _,
|
||||
const Instruction* var) {
|
||||
auto var_pointer = _.FindDef(var->GetOperandAs<uint32_t>(0));
|
||||
uint32_t pointer_id = var_pointer->GetOperandAs<uint32_t>(2);
|
||||
|
||||
const auto isPhysicalStorageBuffer = [](const Instruction* insn) {
|
||||
return insn->opcode() == spv::Op::OpTypePointer &&
|
||||
insn->GetOperandAs<spv::StorageClass>(1) ==
|
||||
spv::StorageClass::PhysicalStorageBuffer;
|
||||
};
|
||||
|
||||
if (_.ContainsType(pointer_id, isPhysicalStorageBuffer)) {
|
||||
return _.diag(SPV_ERROR_INVALID_ID, var)
|
||||
<< _.VkErrorID(9557) << "Input/Output interface variable id <"
|
||||
<< var->id()
|
||||
<< "> contains a PhysicalStorageBuffer pointer, which is not "
|
||||
"allowed. If you want to interface shader stages with a "
|
||||
"PhysicalStorageBuffer, cast to a uint64 or uvec2 instead.";
|
||||
}
|
||||
return SPV_SUCCESS;
|
||||
}
|
||||
|
||||
// Checks that \c var is listed as an interface in all the entry points that use
|
||||
// it.
|
||||
spv_result_t check_interface_variable(ValidationState_t& _,
|
||||
@@ -107,6 +130,12 @@ spv_result_t check_interface_variable(ValidationState_t& _,
|
||||
}
|
||||
}
|
||||
|
||||
if (var->GetOperandAs<spv::StorageClass>(2) == spv::StorageClass::Input ||
|
||||
var->GetOperandAs<spv::StorageClass>(2) == spv::StorageClass::Output) {
|
||||
if (auto error = ValidateInputOutputInterfaceVariables(_, var))
|
||||
return error;
|
||||
}
|
||||
|
||||
return SPV_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -135,17 +164,22 @@ spv_result_t NumConsumedLocations(ValidationState_t& _, const Instruction* type,
|
||||
}
|
||||
break;
|
||||
case spv::Op::OpTypeMatrix:
|
||||
// Matrices consume locations equal to the underlying vector type for
|
||||
// each column.
|
||||
NumConsumedLocations(_, _.FindDef(type->GetOperandAs<uint32_t>(1)),
|
||||
num_locations);
|
||||
// Matrices consume locations equivalent to arrays of 4-component vectors.
|
||||
if (_.ContainsSizedIntOrFloatType(type->id(), spv::Op::OpTypeInt, 64) ||
|
||||
_.ContainsSizedIntOrFloatType(type->id(), spv::Op::OpTypeFloat, 64)) {
|
||||
*num_locations = 2;
|
||||
} else {
|
||||
*num_locations = 1;
|
||||
}
|
||||
*num_locations *= type->GetOperandAs<uint32_t>(2);
|
||||
break;
|
||||
case spv::Op::OpTypeArray: {
|
||||
// Arrays consume locations equal to the underlying type times the number
|
||||
// of elements in the vector.
|
||||
NumConsumedLocations(_, _.FindDef(type->GetOperandAs<uint32_t>(1)),
|
||||
num_locations);
|
||||
if (auto error = NumConsumedLocations(
|
||||
_, _.FindDef(type->GetOperandAs<uint32_t>(1)), num_locations)) {
|
||||
return error;
|
||||
}
|
||||
bool is_int = false;
|
||||
bool is_const = false;
|
||||
uint32_t value = 0;
|
||||
@@ -215,10 +249,31 @@ uint32_t NumConsumedComponents(ValidationState_t& _, const Instruction* type) {
|
||||
NumConsumedComponents(_, _.FindDef(type->GetOperandAs<uint32_t>(1)));
|
||||
num_components *= type->GetOperandAs<uint32_t>(2);
|
||||
break;
|
||||
case spv::Op::OpTypeArray:
|
||||
// Skip the array.
|
||||
return NumConsumedComponents(_,
|
||||
_.FindDef(type->GetOperandAs<uint32_t>(1)));
|
||||
case spv::Op::OpTypeMatrix:
|
||||
// Matrices consume all components of the location.
|
||||
// Round up to next multiple of 4.
|
||||
num_components =
|
||||
NumConsumedComponents(_, _.FindDef(type->GetOperandAs<uint32_t>(1)));
|
||||
num_components *= type->GetOperandAs<uint32_t>(2);
|
||||
num_components = ((num_components + 3) / 4) * 4;
|
||||
break;
|
||||
case spv::Op::OpTypeArray: {
|
||||
// Arrays consume all components of the location.
|
||||
// Round up to next multiple of 4.
|
||||
num_components =
|
||||
NumConsumedComponents(_, _.FindDef(type->GetOperandAs<uint32_t>(1)));
|
||||
|
||||
bool is_int = false;
|
||||
bool is_const = false;
|
||||
uint32_t value = 0;
|
||||
// Attempt to evaluate the number of array elements.
|
||||
std::tie(is_int, is_const, value) =
|
||||
_.EvalInt32IfConst(type->GetOperandAs<uint32_t>(2));
|
||||
if (is_int && is_const) num_components *= value;
|
||||
|
||||
num_components = ((num_components + 3) / 4) * 4;
|
||||
return num_components;
|
||||
}
|
||||
case spv::Op::OpTypePointer:
|
||||
if (_.addressing_model() ==
|
||||
spv::AddressingModel::PhysicalStorageBuffer64 &&
|
||||
@@ -301,9 +356,10 @@ spv_result_t GetLocationsForVariable(
|
||||
}
|
||||
}
|
||||
|
||||
// Vulkan 14.1.3: Tessellation control and mesh per-vertex outputs and
|
||||
// tessellation control, evaluation and geometry per-vertex inputs have a
|
||||
// layer of arraying that is not included in interface matching.
|
||||
// Vulkan 15.1.3 (Interface Matching): Tessellation control and mesh
|
||||
// per-vertex outputs and tessellation control, evaluation and geometry
|
||||
// per-vertex inputs have a layer of arraying that is not included in
|
||||
// interface matching.
|
||||
bool is_arrayed = false;
|
||||
switch (entry_point->GetOperandAs<spv::ExecutionModel>(0)) {
|
||||
case spv::ExecutionModel::TessellationControl:
|
||||
@@ -357,51 +413,33 @@ spv_result_t GetLocationsForVariable(
|
||||
|
||||
const std::string storage_class = is_output ? "output" : "input";
|
||||
if (has_location) {
|
||||
auto sub_type = type;
|
||||
bool is_int = false;
|
||||
bool is_const = false;
|
||||
uint32_t array_size = 1;
|
||||
// If the variable is still arrayed, mark the locations/components per
|
||||
// index.
|
||||
if (type->opcode() == spv::Op::OpTypeArray) {
|
||||
// Determine the array size if possible and get the element type.
|
||||
std::tie(is_int, is_const, array_size) =
|
||||
_.EvalInt32IfConst(type->GetOperandAs<uint32_t>(2));
|
||||
if (!is_int || !is_const) array_size = 1;
|
||||
auto sub_type_id = type->GetOperandAs<uint32_t>(1);
|
||||
sub_type = _.FindDef(sub_type_id);
|
||||
uint32_t num_locations = 0;
|
||||
if (auto error = NumConsumedLocations(_, type, &num_locations))
|
||||
return error;
|
||||
uint32_t num_components = NumConsumedComponents(_, type);
|
||||
|
||||
uint32_t start = location * 4;
|
||||
uint32_t end = (location + num_locations) * 4;
|
||||
if (num_components % 4 != 0) {
|
||||
start += component;
|
||||
end = start + num_components;
|
||||
}
|
||||
|
||||
uint32_t num_locations = 0;
|
||||
if (auto error = NumConsumedLocations(_, sub_type, &num_locations))
|
||||
return error;
|
||||
uint32_t num_components = NumConsumedComponents(_, sub_type);
|
||||
if (kMaxLocations <= start) {
|
||||
// Too many locations, give up.
|
||||
return SPV_SUCCESS;
|
||||
}
|
||||
|
||||
for (uint32_t array_idx = 0; array_idx < array_size; ++array_idx) {
|
||||
uint32_t array_location = location + (num_locations * array_idx);
|
||||
uint32_t start = array_location * 4;
|
||||
if (kMaxLocations <= start) {
|
||||
// Too many locations, give up.
|
||||
break;
|
||||
}
|
||||
auto locs = locations;
|
||||
if (has_index && index == 1) locs = output_index1_locations;
|
||||
|
||||
uint32_t end = (array_location + num_locations) * 4;
|
||||
if (num_components != 0) {
|
||||
start += component;
|
||||
end = array_location * 4 + component + num_components;
|
||||
}
|
||||
|
||||
auto locs = locations;
|
||||
if (has_index && index == 1) locs = output_index1_locations;
|
||||
|
||||
for (uint32_t i = start; i < end; ++i) {
|
||||
if (!locs->insert(i).second) {
|
||||
return _.diag(SPV_ERROR_INVALID_DATA, entry_point)
|
||||
<< (is_output ? _.VkErrorID(8722) : _.VkErrorID(8721))
|
||||
<< "Entry-point has conflicting " << storage_class
|
||||
<< " location assignment at location " << i / 4
|
||||
<< ", component " << i % 4;
|
||||
}
|
||||
for (uint32_t i = start; i < end; ++i) {
|
||||
if (!locs->insert(i).second) {
|
||||
return _.diag(SPV_ERROR_INVALID_DATA, entry_point)
|
||||
<< (is_output ? _.VkErrorID(8722) : _.VkErrorID(8721))
|
||||
<< "Entry-point has conflicting " << storage_class
|
||||
<< " location assignment at location " << i / 4 << ", component "
|
||||
<< i % 4;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -460,38 +498,19 @@ spv_result_t GetLocationsForVariable(
|
||||
continue;
|
||||
}
|
||||
|
||||
if (member->opcode() == spv::Op::OpTypeArray && num_components >= 1 &&
|
||||
num_components < 4) {
|
||||
// When an array has an element that takes less than a location in
|
||||
// size, calculate the used locations in a strided manner.
|
||||
for (uint32_t l = location; l < num_locations + location; ++l) {
|
||||
for (uint32_t c = component; c < component + num_components; ++c) {
|
||||
uint32_t check = 4 * l + c;
|
||||
if (!locations->insert(check).second) {
|
||||
return _.diag(SPV_ERROR_INVALID_DATA, entry_point)
|
||||
<< (is_output ? _.VkErrorID(8722) : _.VkErrorID(8721))
|
||||
<< "Entry-point has conflicting " << storage_class
|
||||
<< " location assignment at location " << l
|
||||
<< ", component " << c;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// TODO: There is a hole here is the member is an array of 3- or
|
||||
// 4-element vectors of 64-bit types.
|
||||
uint32_t end = (location + num_locations) * 4;
|
||||
if (num_components != 0) {
|
||||
start += component;
|
||||
end = location * 4 + component + num_components;
|
||||
}
|
||||
for (uint32_t l = start; l < end; ++l) {
|
||||
if (!locations->insert(l).second) {
|
||||
return _.diag(SPV_ERROR_INVALID_DATA, entry_point)
|
||||
<< (is_output ? _.VkErrorID(8722) : _.VkErrorID(8721))
|
||||
<< "Entry-point has conflicting " << storage_class
|
||||
<< " location assignment at location " << l / 4
|
||||
<< ", component " << l % 4;
|
||||
}
|
||||
uint32_t end = (location + num_locations) * 4;
|
||||
if (num_components % 4 != 0) {
|
||||
start += component;
|
||||
end = location * 4 + component + num_components;
|
||||
}
|
||||
|
||||
for (uint32_t l = start; l < end; ++l) {
|
||||
if (!locations->insert(l).second) {
|
||||
return _.diag(SPV_ERROR_INVALID_DATA, entry_point)
|
||||
<< (is_output ? _.VkErrorID(8722) : _.VkErrorID(8721))
|
||||
<< "Entry-point has conflicting " << storage_class
|
||||
<< " location assignment at location " << l / 4
|
||||
<< ", component " << l % 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -380,6 +380,16 @@ spv_result_t CheckMemoryAccess(ValidationState_t& _, const Instruction* inst,
|
||||
<< _.VkErrorID(4708)
|
||||
<< "Memory accesses with PhysicalStorageBuffer must use Aligned.";
|
||||
}
|
||||
} else {
|
||||
// even if there are other masks, the Aligned operand will be next
|
||||
const uint32_t aligned_value = inst->GetOperandAs<uint32_t>(index + 1);
|
||||
const bool is_power_of_two =
|
||||
aligned_value && !(aligned_value & (aligned_value - 1));
|
||||
if (!is_power_of_two) {
|
||||
return _.diag(SPV_ERROR_INVALID_ID, inst)
|
||||
<< "Memory accesses Aligned operand value " << aligned_value
|
||||
<< " is not a power of two.";
|
||||
}
|
||||
}
|
||||
|
||||
return SPV_SUCCESS;
|
||||
@@ -2781,17 +2791,42 @@ spv_result_t ValidatePtrComparison(ValidationState_t& _,
|
||||
|
||||
const auto op1 = _.FindDef(inst->GetOperandAs<uint32_t>(2u));
|
||||
const auto op2 = _.FindDef(inst->GetOperandAs<uint32_t>(3u));
|
||||
if (!op1 || !op2 || op1->type_id() != op2->type_id()) {
|
||||
return _.diag(SPV_ERROR_INVALID_ID, inst)
|
||||
<< "The types of Operand 1 and Operand 2 must match";
|
||||
}
|
||||
const auto op1_type = _.FindDef(op1->type_id());
|
||||
const auto op2_type = _.FindDef(op2->type_id());
|
||||
if (!op1_type || (op1_type->opcode() != spv::Op::OpTypePointer &&
|
||||
op1_type->opcode() != spv::Op::OpTypeUntypedPointerKHR)) {
|
||||
return _.diag(SPV_ERROR_INVALID_ID, inst)
|
||||
<< "Operand type must be a pointer";
|
||||
}
|
||||
|
||||
if (!op2_type || (op2_type->opcode() != spv::Op::OpTypePointer &&
|
||||
op2_type->opcode() != spv::Op::OpTypeUntypedPointerKHR)) {
|
||||
return _.diag(SPV_ERROR_INVALID_ID, inst)
|
||||
<< "Operand type must be a pointer";
|
||||
}
|
||||
|
||||
if (inst->opcode() == spv::Op::OpPtrDiff) {
|
||||
if (op1->type_id() != op2->type_id()) {
|
||||
return _.diag(SPV_ERROR_INVALID_ID, inst)
|
||||
<< "The types of Operand 1 and Operand 2 must match";
|
||||
}
|
||||
} else {
|
||||
const auto either_untyped =
|
||||
op1_type->opcode() == spv::Op::OpTypeUntypedPointerKHR ||
|
||||
op2_type->opcode() == spv::Op::OpTypeUntypedPointerKHR;
|
||||
if (either_untyped) {
|
||||
const auto sc1 = op1_type->GetOperandAs<spv::StorageClass>(1);
|
||||
const auto sc2 = op2_type->GetOperandAs<spv::StorageClass>(1);
|
||||
if (sc1 != sc2) {
|
||||
return _.diag(SPV_ERROR_INVALID_ID, inst)
|
||||
<< "Pointer storage classes must match";
|
||||
}
|
||||
} else if (op1->type_id() != op2->type_id()) {
|
||||
return _.diag(SPV_ERROR_INVALID_ID, inst)
|
||||
<< "The types of Operand 1 and Operand 2 must match";
|
||||
}
|
||||
}
|
||||
|
||||
spv::StorageClass sc = op1_type->GetOperandAs<spv::StorageClass>(1u);
|
||||
if (_.addressing_model() == spv::AddressingModel::Logical) {
|
||||
if (sc != spv::StorageClass::Workgroup &&
|
||||
|
||||
@@ -2415,8 +2415,6 @@ std::string ValidationState_t::VkErrorID(uint32_t id,
|
||||
return VUID_WRAP(VUID-StandaloneSpirv-Component-04922);
|
||||
case 4923:
|
||||
return VUID_WRAP(VUID-StandaloneSpirv-Component-04923);
|
||||
case 4924:
|
||||
return VUID_WRAP(VUID-StandaloneSpirv-Component-04924);
|
||||
case 6201:
|
||||
return VUID_WRAP(VUID-StandaloneSpirv-Flat-06201);
|
||||
case 6202:
|
||||
@@ -2525,6 +2523,8 @@ std::string ValidationState_t::VkErrorID(uint32_t id,
|
||||
return VUID_WRAP(VUID-StandaloneSpirv-OpEntryPoint-08722);
|
||||
case 8973:
|
||||
return VUID_WRAP(VUID-StandaloneSpirv-Pointer-08973);
|
||||
case 9557:
|
||||
return VUID_WRAP(VUID-StandaloneSpirv-Input-09557);
|
||||
case 9638:
|
||||
return VUID_WRAP(VUID-StandaloneSpirv-OpTypeImage-09638);
|
||||
case 9658:
|
||||
@@ -2534,6 +2534,8 @@ std::string ValidationState_t::VkErrorID(uint32_t id,
|
||||
case 10213:
|
||||
// This use to be a standalone, but maintenance8 will set allow_offset_texture_operand now
|
||||
return VUID_WRAP(VUID-RuntimeSpirv-Offset-10213);
|
||||
case 10583:
|
||||
return VUID_WRAP(VUID-StandaloneSpirv-Component-10583);
|
||||
default:
|
||||
return ""; // unknown id
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user