Updated spirv-tools.

This commit is contained in:
Бранимир Караџић
2023-07-14 17:41:59 -07:00
parent 9ef1db93d9
commit 9ba15cd473
18 changed files with 829 additions and 194 deletions

View File

@@ -1 +1 @@
"v2023.3", "SPIRV-Tools v2023.3 v2022.4-253-gb18e55e0"
"v2023.3", "SPIRV-Tools v2023.3 v2022.4-269-g34399abb"

View File

@@ -36,4 +36,5 @@
{35, "Saarland University", "Shady", "Saarland University Shady"},
{36, "Taichi Graphics", "Taichi", "Taichi Graphics Taichi"},
{37, "heroseh", "Hero C Compiler", "heroseh Hero C Compiler"},
{38, "Meta", "SparkSL", "Meta SparkSL"},
{38, "Meta", "SparkSL", "Meta SparkSL"},
{39, "SirLynix", "Nazara ShaderLang Compiler", "SirLynix Nazara ShaderLang Compiler"},

View File

@@ -393,7 +393,9 @@ static const spv_operand_desc_t pygen_variable_SourceLanguageEntries[] = {
{"HLSL", 5, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1, 0), 0xffffffffu},
{"CPP_for_OpenCL", 6, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1, 0), 0xffffffffu},
{"SYCL", 7, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1, 0), 0xffffffffu},
{"HERO_C", 8, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1, 0), 0xffffffffu}
{"HERO_C", 8, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1, 0), 0xffffffffu},
{"NZSL", 9, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1, 0), 0xffffffffu},
{"WGSL", 10, 0, nullptr, 0, nullptr, {}, SPV_SPIRV_VERSION_WORD(1, 0), 0xffffffffu}
};
static const spv_operand_desc_t pygen_variable_ExecutionModelEntries[] = {

View File

@@ -184,7 +184,7 @@ CapabilitySet AssemblyGrammar::filterCapsAgainstTargetEnv(
// spvOperandTableValueLookup() filters capabilities internally
// according to the current target environment by itself. So we
// should be safe to add this capability if the lookup succeeds.
cap_set.Add(cap_array[i]);
cap_set.insert(cap_array[i]);
}
}
return cap_set;

View File

@@ -626,7 +626,6 @@ spv_result_t Parser::parseOperand(size_t inst_offset,
} break;
case SPV_OPERAND_TYPE_CAPABILITY:
case SPV_OPERAND_TYPE_SOURCE_LANGUAGE:
case SPV_OPERAND_TYPE_EXECUTION_MODEL:
case SPV_OPERAND_TYPE_ADDRESSING_MODEL:
case SPV_OPERAND_TYPE_MEMORY_MODEL:
@@ -683,6 +682,21 @@ spv_result_t Parser::parseOperand(size_t inst_offset,
spvPushOperandTypes(entry->operandTypes, expected_operands);
} break;
case SPV_OPERAND_TYPE_SOURCE_LANGUAGE: {
spv_operand_desc entry;
if (grammar_.lookupOperand(type, word, &entry)) {
return diagnostic()
<< "Invalid " << spvOperandTypeStr(parsed_operand.type)
<< " operand: " << word
<< ", if you are creating a new source language please use "
"value 0 "
"(Unknown) and when ready, add your source language to "
"SPRIV-Headers";
}
// Prepare to accept operands to this operand, if needed.
spvPushOperandTypes(entry->operandTypes, expected_operands);
} break;
case SPV_OPERAND_TYPE_FP_FAST_MATH_MODE:
case SPV_OPERAND_TYPE_FUNCTION_CONTROL:
case SPV_OPERAND_TYPE_LOOP_CONTROL:

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2016 Google Inc.
// Copyright (c) 2023 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,195 +12,438 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cassert>
#include <cstdint>
#include <functional>
#include <initializer_list>
#include <limits>
#include <type_traits>
#include <vector>
#ifndef SOURCE_ENUM_SET_H_
#define SOURCE_ENUM_SET_H_
#include <cstdint>
#include <functional>
#include <memory>
#include <set>
#include <utility>
#include "source/latest_version_spirv_header.h"
#include "source/util/make_unique.h"
namespace spvtools {
// A set of values of a 32-bit enum type.
// It is fast and compact for the common case, where enum values
// are at most 63. But it can represent enums with larger values,
// as may appear in extensions.
template <typename EnumType>
// This container is optimized to store and retrieve unsigned enum values.
// The base model for this implementation is an open-addressing hashtable with
// linear probing. For small enums (max index < 64), all operations are O(1).
//
// - Enums are stored in buckets (64 contiguous values max per bucket)
// - Buckets ranges don't overlap, but don't have to be contiguous.
// - Enums are packed into 64-bits buckets, using 1 bit per enum value.
//
// Example:
// - MyEnum { A = 0, B = 1, C = 64, D = 65 }
// - 2 buckets are required:
// - bucket 0, storing values in the range [ 0; 64[
// - bucket 1, storing values in the range [64; 128[
//
// - Buckets are stored in a sorted vector (sorted by bucket range).
// - Retrieval is done by computing the theoretical bucket index using the enum
// value, and
// doing a linear scan from this position.
// - Insertion is done by retrieving the bucket and either:
// - inserting a new bucket in the sorted vector when no buckets has a
// compatible range.
// - setting the corresponding bit in the bucket.
// This means insertion in the middle/beginning can cause a memmove when no
// bucket is available. In our case, this happens at most 23 times for the
// largest enum we have (Opcodes).
template <typename T>
class EnumSet {
private:
// The ForEach method will call the functor on enum values in
// enum value order (lowest to highest). To make that easier, use
// an ordered set for the overflow values.
using OverflowSetType = std::set<uint32_t>;
using BucketType = uint64_t;
using ElementType = std::underlying_type_t<T>;
static_assert(std::is_enum_v<T>, "EnumSets only works with enums.");
static_assert(std::is_signed_v<ElementType> == false,
"EnumSet doesn't supports signed enums.");
// Each bucket can hold up to `kBucketSize` distinct, contiguous enum values.
// The first value a bucket can hold must be aligned on `kBucketSize`.
struct Bucket {
// bit mask to store `kBucketSize` enums.
BucketType data;
// 1st enum this bucket can represent.
T start;
friend bool operator==(const Bucket& lhs, const Bucket& rhs) {
return lhs.start == rhs.start && lhs.data == rhs.data;
}
};
// How many distinct values can a bucket hold? 1 bit per value.
static constexpr size_t kBucketSize = sizeof(BucketType) * 8ULL;
public:
// Construct an empty set.
EnumSet() {}
// Construct an set with just the given enum value.
explicit EnumSet(EnumType c) { Add(c); }
// Construct an set from an initializer list of enum values.
EnumSet(std::initializer_list<EnumType> cs) {
for (auto c : cs) Add(c);
}
EnumSet(uint32_t count, const EnumType* ptr) {
for (uint32_t i = 0; i < count; ++i) Add(ptr[i]);
}
// Copy constructor.
EnumSet(const EnumSet& other) { *this = other; }
// Move constructor. The moved-from set is emptied.
EnumSet(EnumSet&& other) {
mask_ = other.mask_;
overflow_ = std::move(other.overflow_);
other.mask_ = 0;
other.overflow_.reset(nullptr);
}
// Assignment operator.
EnumSet& operator=(const EnumSet& other) {
if (&other != this) {
mask_ = other.mask_;
overflow_.reset(other.overflow_ ? new OverflowSetType(*other.overflow_)
: nullptr);
class Iterator {
public:
typedef Iterator self_type;
typedef T value_type;
typedef T& reference;
typedef T* pointer;
typedef std::forward_iterator_tag iterator_category;
typedef size_t difference_type;
Iterator(const Iterator& other)
: set_(other.set_),
bucketIndex_(other.bucketIndex_),
bucketOffset_(other.bucketOffset_) {}
Iterator& operator++() {
do {
if (bucketIndex_ >= set_->buckets_.size()) {
bucketIndex_ = set_->buckets_.size();
bucketOffset_ = 0;
break;
}
if (bucketOffset_ + 1 == kBucketSize) {
bucketOffset_ = 0;
++bucketIndex_;
} else {
++bucketOffset_;
}
} while (bucketIndex_ < set_->buckets_.size() &&
!set_->HasEnumAt(bucketIndex_, bucketOffset_));
return *this;
}
Iterator operator++(int) {
Iterator old = *this;
operator++();
return old;
}
T operator*() const {
assert(set_->HasEnumAt(bucketIndex_, bucketOffset_) &&
"operator*() called on an invalid iterator.");
return GetValueFromBucket(set_->buckets_[bucketIndex_], bucketOffset_);
}
bool operator!=(const Iterator& other) const {
return set_ != other.set_ || bucketOffset_ != other.bucketOffset_ ||
bucketIndex_ != other.bucketIndex_;
}
bool operator==(const Iterator& other) const {
return !(operator!=(other));
}
Iterator& operator=(const Iterator& other) {
set_ = other.set_;
bucketIndex_ = other.bucketIndex_;
bucketOffset_ = other.bucketOffset_;
return *this;
}
private:
Iterator(const EnumSet* set, size_t bucketIndex, ElementType bucketOffset)
: set_(set), bucketIndex_(bucketIndex), bucketOffset_(bucketOffset) {}
private:
const EnumSet* set_ = nullptr;
// Index of the bucket in the vector.
size_t bucketIndex_ = 0;
// Offset in bits in the current bucket.
ElementType bucketOffset_ = 0;
friend class EnumSet;
};
// Required to allow the use of std::inserter.
using value_type = T;
using const_iterator = Iterator;
using iterator = Iterator;
public:
iterator cbegin() const noexcept {
auto it = iterator(this, /* bucketIndex= */ 0, /* bucketOffset= */ 0);
if (buckets_.size() == 0) {
return it;
}
// The iterator has the logic to find the next valid bit. If the value 0
// is not stored, use it to find the next valid bit.
if (!HasEnumAt(it.bucketIndex_, it.bucketOffset_)) {
++it;
}
return it;
}
iterator begin() const noexcept { return cbegin(); }
iterator cend() const noexcept {
return iterator(this, buckets_.size(), /* bucketOffset= */ 0);
}
iterator end() const noexcept { return cend(); }
// Creates an empty set.
EnumSet() : buckets_(0), size_(0) {}
// Creates a set and store `value` in it.
EnumSet(T value) : EnumSet() { insert(value); }
// Creates a set and stores each `values` in it.
EnumSet(std::initializer_list<T> values) : EnumSet() {
for (auto item : values) {
insert(item);
}
}
// Creates a set, and insert `count` enum values pointed by `array` in it.
EnumSet(ElementType count, const T* array) : EnumSet() {
for (ElementType i = 0; i < count; i++) {
insert(array[i]);
}
}
// Copies the EnumSet `other` into a new EnumSet.
EnumSet(const EnumSet& other)
: buckets_(other.buckets_), size_(other.size_) {}
// Moves the EnumSet `other` into a new EnumSet.
EnumSet(EnumSet&& other)
: buckets_(std::move(other.buckets_)), size_(other.size_) {}
// Deep-copies the EnumSet `other` into this EnumSet.
EnumSet& operator=(const EnumSet& other) {
buckets_ = other.buckets_;
size_ = other.size_;
return *this;
}
friend bool operator==(const EnumSet& a, const EnumSet& b) {
if (a.mask_ != b.mask_) {
return false;
// Matches std::unordered_set::insert behavior.
std::pair<iterator, bool> insert(const T& value) {
const size_t index = FindBucketForValue(value);
const ElementType offset = ComputeBucketOffset(value);
if (index >= buckets_.size() ||
buckets_[index].start != ComputeBucketStart(value)) {
size_ += 1;
InsertBucketFor(index, value);
return std::make_pair(Iterator(this, index, offset), true);
}
if (a.overflow_ == nullptr && b.overflow_ == nullptr) {
auto& bucket = buckets_[index];
const auto mask = ComputeMaskForValue(value);
if (bucket.data & mask) {
return std::make_pair(Iterator(this, index, offset), false);
}
size_ += 1;
bucket.data |= ComputeMaskForValue(value);
return std::make_pair(Iterator(this, index, offset), true);
}
// Inserts `value` in the set if possible.
// Similar to `std::unordered_set::insert`, except the hint is ignored.
// Returns an iterator to the inserted element, or the element preventing
// insertion.
iterator insert(const_iterator, const T& value) {
return insert(value).first;
}
// Inserts `value` in the set if possible.
// Similar to `std::unordered_set::insert`, except the hint is ignored.
// Returns an iterator to the inserted element, or the element preventing
// insertion.
iterator insert(const_iterator, T&& value) { return insert(value).first; }
// Removes the value `value` into the set.
// Similar to `std::unordered_set::erase`.
// Returns the number of erased elements.
size_t erase(const T& value) {
const size_t index = FindBucketForValue(value);
if (index >= buckets_.size() ||
buckets_[index].start != ComputeBucketStart(value)) {
return 0;
}
auto& bucket = buckets_[index];
const auto mask = ComputeMaskForValue(value);
if (!(bucket.data & mask)) {
return 0;
}
size_ -= 1;
bucket.data &= ~mask;
if (bucket.data == 0) {
buckets_.erase(buckets_.cbegin() + index);
}
return 1;
}
// Returns true if `value` is present in the set.
bool contains(T value) const {
const size_t index = FindBucketForValue(value);
if (index >= buckets_.size() ||
buckets_[index].start != ComputeBucketStart(value)) {
return false;
}
auto& bucket = buckets_[index];
return bucket.data & ComputeMaskForValue(value);
}
// Returns the 1 if `value` is present in the set, `0` otherwise.
inline size_t count(T value) const { return contains(value) ? 1 : 0; }
// Returns true if the set is holds no values.
inline bool empty() const { return size_ == 0; }
// Returns the number of enums stored in this set.
size_t size() const { return size_; }
// Returns true if this set contains at least one value contained in `in_set`.
// Note: If `in_set` is empty, this function returns true.
bool HasAnyOf(const EnumSet<T>& in_set) const {
if (in_set.empty()) {
return true;
}
if (a.overflow_ == nullptr || b.overflow_ == nullptr) {
return false;
}
auto lhs = buckets_.cbegin();
auto rhs = in_set.buckets_.cbegin();
return *a.overflow_ == *b.overflow_;
}
while (lhs != buckets_.cend() && rhs != in_set.buckets_.cend()) {
if (lhs->start == rhs->start) {
if (lhs->data & rhs->data) {
// At least 1 bit is shared. Early return.
return true;
}
friend bool operator!=(const EnumSet& a, const EnumSet& b) {
return !(a == b);
}
lhs++;
rhs++;
continue;
}
// Adds the given enum value to the set. This has no effect if the
// enum value is already in the set.
void Add(EnumType c) { AddWord(ToWord(c)); }
// LHS bucket is smaller than the current RHS bucket. Catching up on RHS.
if (lhs->start < rhs->start) {
lhs++;
continue;
}
// Removes the given enum value from the set. This has no effect if the
// enum value is not in the set.
void Remove(EnumType c) { RemoveWord(ToWord(c)); }
// Returns true if this enum value is in the set.
bool Contains(EnumType c) const { return ContainsWord(ToWord(c)); }
// Applies f to each enum in the set, in order from smallest enum
// value to largest.
void ForEach(std::function<void(EnumType)> f) const {
for (uint32_t i = 0; i < 64; ++i) {
if (mask_ & AsMask(i)) f(static_cast<EnumType>(i));
}
if (overflow_) {
for (uint32_t c : *overflow_) f(static_cast<EnumType>(c));
}
}
// Returns true if the set is empty.
bool IsEmpty() const {
if (mask_) return false;
if (overflow_ && !overflow_->empty()) return false;
return true;
}
// Returns true if the set contains ANY of the elements of |in_set|,
// or if |in_set| is empty.
bool HasAnyOf(const EnumSet<EnumType>& in_set) const {
if (in_set.IsEmpty()) return true;
if (mask_ & in_set.mask_) return true;
if (!overflow_ || !in_set.overflow_) return false;
for (uint32_t item : *in_set.overflow_) {
if (overflow_->find(item) != overflow_->end()) return true;
// Otherwise, RHS needs to catch up on LHS.
rhs++;
}
return false;
}
private:
// Adds the given enum value (as a 32-bit word) to the set. This has no
// effect if the enum value is already in the set.
void AddWord(uint32_t word) {
if (auto new_bits = AsMask(word)) {
mask_ |= new_bits;
} else {
Overflow().insert(word);
// Returns the index of the last bucket in which `value` could be stored.
static constexpr inline size_t ComputeLargestPossibleBucketIndexFor(T value) {
return static_cast<size_t>(value) / kBucketSize;
}
// Returns the smallest enum value that could be contained in the same bucket
// as `value`.
static constexpr inline T ComputeBucketStart(T value) {
return static_cast<T>(kBucketSize *
ComputeLargestPossibleBucketIndexFor(value));
}
// Returns the index of the bit that corresponds to `value` in the bucket.
static constexpr inline ElementType ComputeBucketOffset(T value) {
return static_cast<ElementType>(value) % kBucketSize;
}
// Returns the bitmask used to represent the enum `value` in its bucket.
static constexpr inline BucketType ComputeMaskForValue(T value) {
return 1ULL << ComputeBucketOffset(value);
}
// Returns the `enum` stored in `bucket` at `offset`.
// `offset` is the bit-offset in the bucket storage.
static constexpr inline T GetValueFromBucket(const Bucket& bucket,
BucketType offset) {
return static_cast<T>(static_cast<ElementType>(bucket.start) + offset);
}
// For a given enum `value`, finds the bucket index that could contain this
// value. If no such bucket is found, the index at which the new bucket should
// be inserted is returned.
size_t FindBucketForValue(T value) const {
// Set is empty, insert at 0.
if (buckets_.size() == 0) {
return 0;
}
}
// Removes the given enum value (as a 32-bit word) from the set. This has no
// effect if the enum value is not in the set.
void RemoveWord(uint32_t word) {
if (auto new_bits = AsMask(word)) {
mask_ &= ~new_bits;
} else {
auto itr = Overflow().find(word);
if (itr != Overflow().end()) Overflow().erase(itr);
const T wanted_start = ComputeBucketStart(value);
assert(buckets_.size() > 0 &&
"Size must not be 0 here. Has the code above changed?");
size_t index = std::min(buckets_.size() - 1,
ComputeLargestPossibleBucketIndexFor(value));
// This loops behaves like std::upper_bound with a reverse iterator.
// Buckets are sorted. 3 main cases:
// - The bucket matches
// => returns the bucket index.
// - The found bucket is larger
// => scans left until it finds the correct bucket, or insertion point.
// - The found bucket is smaller
// => We are at the end, so we return past-end index for insertion.
for (; buckets_[index].start >= wanted_start; index--) {
if (index == 0) {
return 0;
}
}
return index + 1;
}
// Returns true if the enum represented as a 32-bit word is in the set.
bool ContainsWord(uint32_t word) const {
// We shouldn't call Overflow() since this is a const method.
if (auto bits = AsMask(word)) {
return (mask_ & bits) != 0;
} else if (auto overflow = overflow_.get()) {
return overflow->find(word) != overflow->end();
// Creates a new bucket to store `value` and inserts it at `index`.
// If the `index` is past the end, the bucket is inserted at the end of the
// vector.
void InsertBucketFor(size_t index, T value) {
const T bucket_start = ComputeBucketStart(value);
Bucket bucket = {1ULL << ComputeBucketOffset(value), bucket_start};
auto it = buckets_.emplace(buckets_.begin() + index, std::move(bucket));
#if defined(NDEBUG)
(void)it; // Silencing unused variable warning.
#else
assert(std::next(it) == buckets_.end() ||
std::next(it)->start > bucket_start);
assert(it == buckets_.begin() || std::prev(it)->start < bucket_start);
#endif
}
// Returns true if the bucket at `bucketIndex/ stores the enum at
// `bucketOffset`, false otherwise.
bool HasEnumAt(size_t bucketIndex, BucketType bucketOffset) const {
assert(bucketIndex < buckets_.size());
assert(bucketOffset < kBucketSize);
return buckets_[bucketIndex].data & (1ULL << bucketOffset);
}
// Returns true if `lhs` and `rhs` hold the exact same values.
friend bool operator==(const EnumSet& lhs, const EnumSet& rhs) {
if (lhs.size_ != rhs.size_) {
return false;
}
// The word is large, but the set doesn't have large members, so
// it doesn't have an overflow set.
return false;
}
// Returns the enum value as a uint32_t.
uint32_t ToWord(EnumType value) const {
static_assert(sizeof(EnumType) <= sizeof(uint32_t),
"EnumType must statically castable to uint32_t");
return static_cast<uint32_t>(value);
}
// Determines whether the given enum value can be represented
// as a bit in a uint64_t mask. If so, then returns that mask bit.
// Otherwise, returns 0.
uint64_t AsMask(uint32_t word) const {
if (word > 63) return 0;
return uint64_t(1) << word;
}
// Ensures that overflow_set_ references a set. A new empty set is
// allocated if one doesn't exist yet. Returns overflow_set_.
OverflowSetType& Overflow() {
if (overflow_.get() == nullptr) {
overflow_ = MakeUnique<OverflowSetType>();
if (lhs.buckets_.size() != rhs.buckets_.size()) {
return false;
}
return *overflow_;
return lhs.buckets_ == rhs.buckets_;
}
// Enums with values up to 63 are stored as bits in this mask.
uint64_t mask_ = 0;
// Enums with values larger than 63 are stored in this set.
// This set should normally be empty or very small.
std::unique_ptr<OverflowSetType> overflow_ = {};
// Returns true if `lhs` and `rhs` hold at least 1 different value.
friend bool operator!=(const EnumSet& lhs, const EnumSet& rhs) {
return !(lhs == rhs);
}
// Storage for the buckets.
std::vector<Bucket> buckets_;
// How many enums is this set storing.
size_t size_ = 0;
};
// A set of spv::Capability, optimized for small capability values.
// A set of spv::Capability.
using CapabilitySet = EnumSet<spv::Capability>;
} // namespace spvtools

View File

@@ -40,8 +40,9 @@ std::string GetExtensionString(const spv_parsed_instruction_t* inst) {
std::string ExtensionSetToString(const ExtensionSet& extensions) {
std::stringstream ss;
extensions.ForEach(
[&ss](Extension ext) { ss << ExtensionToString(ext) << " "; });
for (auto extension : extensions) {
ss << ExtensionToString(extension) << " ";
}
return ss.str();
}

View File

@@ -15,6 +15,7 @@
#ifndef SOURCE_EXTENSIONS_H_
#define SOURCE_EXTENSIONS_H_
#include <cstdint>
#include <string>
#include "source/enum_set.h"
@@ -23,7 +24,7 @@
namespace spvtools {
// The known SPIR-V extensions.
enum Extension {
enum Extension : uint32_t {
#include "extension_enum.inc"
};

View File

@@ -40,31 +40,33 @@ void FeatureManager::AddExtension(Instruction* ext) {
const std::string name = ext->GetInOperand(0u).AsString();
Extension extension;
if (GetExtensionFromString(name.c_str(), &extension)) {
extensions_.Add(extension);
extensions_.insert(extension);
}
}
void FeatureManager::RemoveExtension(Extension ext) {
if (!extensions_.Contains(ext)) return;
extensions_.Remove(ext);
if (!extensions_.contains(ext)) return;
extensions_.erase(ext);
}
void FeatureManager::AddCapability(spv::Capability cap) {
if (capabilities_.Contains(cap)) return;
if (capabilities_.contains(cap)) return;
capabilities_.Add(cap);
capabilities_.insert(cap);
spv_operand_desc desc = {};
if (SPV_SUCCESS == grammar_.lookupOperand(SPV_OPERAND_TYPE_CAPABILITY,
uint32_t(cap), &desc)) {
CapabilitySet(desc->numCapabilities, desc->capabilities)
.ForEach([this](spv::Capability c) { AddCapability(c); });
for (auto capability :
CapabilitySet(desc->numCapabilities, desc->capabilities)) {
AddCapability(capability);
}
}
}
void FeatureManager::RemoveCapability(spv::Capability cap) {
if (!capabilities_.Contains(cap)) return;
capabilities_.Remove(cap);
if (!capabilities_.contains(cap)) return;
capabilities_.erase(cap);
}
void FeatureManager::AddCapabilities(Module* module) {

View File

@@ -28,14 +28,14 @@ class FeatureManager {
explicit FeatureManager(const AssemblyGrammar& grammar) : grammar_(grammar) {}
// Returns true if |ext| is an enabled extension in the module.
bool HasExtension(Extension ext) const { return extensions_.Contains(ext); }
bool HasExtension(Extension ext) const { return extensions_.contains(ext); }
// Removes the given |extension| from the current FeatureManager.
void RemoveExtension(Extension extension);
// Returns true if |cap| is an enabled capability in the module.
bool HasCapability(spv::Capability cap) const {
return capabilities_.Contains(cap);
return capabilities_.contains(cap);
}
// Removes the given |capability| from the current FeatureManager.

View File

@@ -233,11 +233,12 @@ uint32_t InstrumentPass::GenStageInfo(uint32_t stage_idx,
uint32_t load_id = GenVarLoad(
context()->GetBuiltinInputVarId(uint32_t(spv::BuiltIn::VertexIndex)),
builder);
ids[1] = load_id;
ids[1] = GenUintCastCode(load_id, builder);
load_id = GenVarLoad(context()->GetBuiltinInputVarId(
uint32_t(spv::BuiltIn::InstanceIndex)),
builder);
ids[2] = load_id;
ids[2] = GenUintCastCode(load_id, builder);
} break;
case spv::ExecutionModel::GLCompute:
case spv::ExecutionModel::TaskNV:

View File

@@ -718,9 +718,9 @@ void IRContext::AddCombinatorsForExtension(Instruction* extension) {
}
void IRContext::InitializeCombinators() {
get_feature_mgr()->GetCapabilities()->ForEach([this](spv::Capability cap) {
AddCombinatorsForCapability(uint32_t(cap));
});
for (auto capability : *get_feature_mgr()->GetCapabilities()) {
AddCombinatorsForCapability(uint32_t(capability));
}
for (auto& extension : module()->ext_inst_imports()) {
AddCombinatorsForExtension(&extension);

View File

@@ -240,7 +240,7 @@ bool IsEnabledByExtension(ValidationState_t& _, uint32_t capability) {
ExtensionSet operand_exts(operand_desc->numExtensions,
operand_desc->extensions);
if (operand_exts.IsEmpty()) return false;
if (operand_exts.empty()) return false;
return _.HasAnyOfExtensions(operand_exts);
}

View File

@@ -38,14 +38,14 @@ namespace {
std::string ToString(const CapabilitySet& capabilities,
const AssemblyGrammar& grammar) {
std::stringstream ss;
capabilities.ForEach([&grammar, &ss](spv::Capability cap) {
for (auto capability : capabilities) {
spv_operand_desc desc;
if (SPV_SUCCESS == grammar.lookupOperand(SPV_OPERAND_TYPE_CAPABILITY,
uint32_t(cap), &desc))
uint32_t(capability), &desc))
ss << desc->name << " ";
else
ss << uint32_t(cap) << " ";
});
ss << uint32_t(capability) << " ";
}
return ss.str();
}
@@ -178,10 +178,11 @@ spv_result_t CheckRequiredCapabilities(ValidationState_t& state,
// Vulkan API requires more capabilities on rounding mode.
if (spvIsVulkanEnv(state.context()->target_env)) {
enabling_capabilities.Add(spv::Capability::StorageUniformBufferBlock16);
enabling_capabilities.Add(spv::Capability::StorageUniform16);
enabling_capabilities.Add(spv::Capability::StoragePushConstant16);
enabling_capabilities.Add(spv::Capability::StorageInputOutput16);
enabling_capabilities.insert(
spv::Capability::StorageUniformBufferBlock16);
enabling_capabilities.insert(spv::Capability::StorageUniform16);
enabling_capabilities.insert(spv::Capability::StoragePushConstant16);
enabling_capabilities.insert(spv::Capability::StorageInputOutput16);
}
} else {
enabling_capabilities = state.grammar().filterCapsAgainstTargetEnv(
@@ -195,7 +196,7 @@ spv_result_t CheckRequiredCapabilities(ValidationState_t& state,
if (inst->opcode() != spv::Op::OpCapability) {
const bool enabled_by_cap =
state.HasAnyOfCapabilities(enabling_capabilities);
if (!enabling_capabilities.IsEmpty() && !enabled_by_cap) {
if (!enabling_capabilities.empty() && !enabled_by_cap) {
return state.diag(SPV_ERROR_INVALID_CAPABILITY, inst)
<< "Operand " << which_operand << " of "
<< spvOpcodeString(inst->opcode())
@@ -303,7 +304,7 @@ spv_result_t VersionCheck(ValidationState_t& _, const Instruction* inst) {
}
ExtensionSet exts(inst_desc->numExtensions, inst_desc->extensions);
if (exts.IsEmpty()) {
if (exts.empty()) {
// If no extensions can enable this instruction, then emit error
// messages only concerning core SPIR-V versions if errors happen.
if (min_version == ~0u) {

View File

@@ -1737,9 +1737,10 @@ spv_result_t ValidateCooperativeMatrixLoadStoreKHR(ValidationState_t& _,
storage_class != spv::StorageClass::StorageBuffer &&
storage_class != spv::StorageClass::PhysicalStorageBuffer) {
return _.diag(SPV_ERROR_INVALID_ID, inst)
<< opname << " storage class for pointer type <id> "
<< _.VkErrorID(8973) << opname
<< " storage class for pointer type <id> "
<< _.getIdName(pointer_type_id)
<< " is not Workgroup or StorageBuffer.";
<< " is not Workgroup, StorageBuffer, or PhysicalStorageBuffer.";
}
const auto pointee_id = pointer_type->GetOperandAs<uint32_t>(2);

View File

@@ -26,6 +26,207 @@ namespace spvtools {
namespace val {
namespace {
spv_result_t ValidateGroupNonUniformElect(ValidationState_t& _,
const Instruction* inst) {
if (!_.IsBoolScalarType(inst->type_id())) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Result must be a boolean scalar type";
}
return SPV_SUCCESS;
}
spv_result_t ValidateGroupNonUniformAnyAll(ValidationState_t& _,
const Instruction* inst) {
if (!_.IsBoolScalarType(inst->type_id())) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Result must be a boolean scalar type";
}
if (!_.IsBoolScalarType(_.GetOperandTypeId(inst, 3))) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Predicate must be a boolean scalar type";
}
return SPV_SUCCESS;
}
spv_result_t ValidateGroupNonUniformAllEqual(ValidationState_t& _,
const Instruction* inst) {
if (!_.IsBoolScalarType(inst->type_id())) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Result must be a boolean scalar type";
}
const auto value_type = _.GetOperandTypeId(inst, 3);
if (!_.IsFloatScalarOrVectorType(value_type) &&
!_.IsIntScalarOrVectorType(value_type) &&
!_.IsBoolScalarOrVectorType(value_type)) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Value must be a scalar or vector of integer, floating-point, or "
"boolean type";
}
return SPV_SUCCESS;
}
spv_result_t ValidateGroupNonUniformBroadcastShuffle(ValidationState_t& _,
const Instruction* inst) {
const auto type_id = inst->type_id();
if (!_.IsFloatScalarOrVectorType(type_id) &&
!_.IsIntScalarOrVectorType(type_id) &&
!_.IsBoolScalarOrVectorType(type_id)) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Result must be a scalar or vector of integer, floating-point, "
"or boolean type";
}
const auto value_type_id = _.GetOperandTypeId(inst, 3);
if (value_type_id != type_id) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "The type of Value must match the Result type";
}
const auto GetOperandName = [](const spv::Op opcode) {
std::string operand;
switch (opcode) {
case spv::Op::OpGroupNonUniformBroadcast:
case spv::Op::OpGroupNonUniformShuffle:
operand = "Id";
break;
case spv::Op::OpGroupNonUniformShuffleXor:
operand = "Mask";
break;
case spv::Op::OpGroupNonUniformQuadBroadcast:
operand = "Index";
break;
case spv::Op::OpGroupNonUniformQuadSwap:
operand = "Direction";
break;
case spv::Op::OpGroupNonUniformShuffleUp:
case spv::Op::OpGroupNonUniformShuffleDown:
default:
operand = "Delta";
break;
}
return operand;
};
const auto id_type_id = _.GetOperandTypeId(inst, 4);
if (!_.IsUnsignedIntScalarType(id_type_id)) {
std::string operand = GetOperandName(inst->opcode());
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< operand << " must be an unsigned integer scalar";
}
const bool should_be_constant =
inst->opcode() == spv::Op::OpGroupNonUniformQuadSwap ||
((inst->opcode() == spv::Op::OpGroupNonUniformBroadcast ||
inst->opcode() == spv::Op::OpGroupNonUniformQuadBroadcast) &&
_.version() < SPV_SPIRV_VERSION_WORD(1, 5));
if (should_be_constant) {
const auto id_id = inst->GetOperandAs<uint32_t>(4);
const auto id_op = _.GetIdOpcode(id_id);
if (!spvOpcodeIsConstant(id_op)) {
std::string operand = GetOperandName(inst->opcode());
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Before SPIR-V 1.5, " << operand
<< " must be a constant instruction";
}
}
return SPV_SUCCESS;
}
spv_result_t ValidateGroupNonUniformBroadcastFirst(ValidationState_t& _,
const Instruction* inst) {
const auto type_id = inst->type_id();
if (!_.IsFloatScalarOrVectorType(type_id) &&
!_.IsIntScalarOrVectorType(type_id) &&
!_.IsBoolScalarOrVectorType(type_id)) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Result must be a scalar or vector of integer, floating-point, "
"or boolean type";
}
const auto value_type_id = _.GetOperandTypeId(inst, 3);
if (value_type_id != type_id) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "The type of Value must match the Result type";
}
return SPV_SUCCESS;
}
spv_result_t ValidateGroupNonUniformBallot(ValidationState_t& _,
const Instruction* inst) {
if (!_.IsUnsignedIntVectorType(inst->type_id())) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Result must be a 4-component unsigned integer vector";
}
if (_.GetDimension(inst->type_id()) != 4) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Result must be a 4-component unsigned integer vector";
}
const auto pred_type_id = _.GetOperandTypeId(inst, 3);
if (!_.IsBoolScalarType(pred_type_id)) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Predicate must be a boolean scalar";
}
return SPV_SUCCESS;
}
spv_result_t ValidateGroupNonUniformInverseBallot(ValidationState_t& _,
const Instruction* inst) {
if (!_.IsBoolScalarType(inst->type_id())) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Result must be a boolean scalar";
}
const auto value_type_id = _.GetOperandTypeId(inst, 3);
if (!_.IsUnsignedIntVectorType(value_type_id)) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Value must be a 4-component unsigned integer vector";
}
if (_.GetDimension(value_type_id) != 4) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Value must be a 4-component unsigned integer vector";
}
return SPV_SUCCESS;
}
spv_result_t ValidateGroupNonUniformBallotBitExtract(ValidationState_t& _,
const Instruction* inst) {
if (!_.IsBoolScalarType(inst->type_id())) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Result must be a boolean scalar";
}
const auto value_type_id = _.GetOperandTypeId(inst, 3);
if (!_.IsUnsignedIntVectorType(value_type_id)) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Value must be a 4-component unsigned integer vector";
}
if (_.GetDimension(value_type_id) != 4) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Value must be a 4-component unsigned integer vector";
}
const auto id_type_id = _.GetOperandTypeId(inst, 4);
if (!_.IsUnsignedIntScalarType(id_type_id)) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Id must be an unsigned integer scalar";
}
return SPV_SUCCESS;
}
spv_result_t ValidateGroupNonUniformBallotBitCount(ValidationState_t& _,
const Instruction* inst) {
// Scope is already checked by ValidateExecutionScope() above.
@@ -60,6 +261,107 @@ spv_result_t ValidateGroupNonUniformBallotBitCount(ValidationState_t& _,
return SPV_SUCCESS;
}
spv_result_t ValidateGroupNonUniformBallotFind(ValidationState_t& _,
const Instruction* inst) {
if (!_.IsUnsignedIntScalarType(inst->type_id())) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Result must be an unsigned integer scalar";
}
const auto value_type_id = _.GetOperandTypeId(inst, 3);
if (!_.IsUnsignedIntVectorType(value_type_id)) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Value must be a 4-component unsigned integer vector";
}
if (_.GetDimension(value_type_id) != 4) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Value must be a 4-component unsigned integer vector";
}
return SPV_SUCCESS;
}
spv_result_t ValidateGroupNonUniformArithmetic(ValidationState_t& _,
const Instruction* inst) {
const bool is_unsigned = inst->opcode() == spv::Op::OpGroupNonUniformUMin ||
inst->opcode() == spv::Op::OpGroupNonUniformUMax;
const bool is_float = inst->opcode() == spv::Op::OpGroupNonUniformFAdd ||
inst->opcode() == spv::Op::OpGroupNonUniformFMul ||
inst->opcode() == spv::Op::OpGroupNonUniformFMin ||
inst->opcode() == spv::Op::OpGroupNonUniformFMax;
const bool is_bool = inst->opcode() == spv::Op::OpGroupNonUniformLogicalAnd ||
inst->opcode() == spv::Op::OpGroupNonUniformLogicalOr ||
inst->opcode() == spv::Op::OpGroupNonUniformLogicalXor;
if (is_float) {
if (!_.IsFloatScalarOrVectorType(inst->type_id())) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Result must be a floating-point scalar or vector";
}
} else if (is_bool) {
if (!_.IsBoolScalarOrVectorType(inst->type_id())) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Result must be a boolean scalar or vector";
}
} else if (is_unsigned) {
if (!_.IsUnsignedIntScalarOrVectorType(inst->type_id())) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Result must be an unsigned integer scalar or vector";
}
} else if (!_.IsIntScalarOrVectorType(inst->type_id())) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Result must be an integer scalar or vector";
}
const auto value_type_id = _.GetOperandTypeId(inst, 4);
if (value_type_id != inst->type_id()) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "The type of Value must match the Result type";
}
const auto group_op = inst->GetOperandAs<spv::GroupOperation>(3);
bool is_clustered_reduce = group_op == spv::GroupOperation::ClusteredReduce;
bool is_partitioned_nv =
group_op == spv::GroupOperation::PartitionedReduceNV ||
group_op == spv::GroupOperation::PartitionedInclusiveScanNV ||
group_op == spv::GroupOperation::PartitionedExclusiveScanNV;
if (inst->operands().size() <= 5) {
if (is_clustered_reduce) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "ClusterSize must be present when Operation is ClusteredReduce";
} else if (is_partitioned_nv) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Ballot must be present when Operation is PartitionedReduceNV, "
"PartitionedInclusiveScanNV, or PartitionedExclusiveScanNV";
}
} else {
const auto operand_id = inst->GetOperandAs<uint32_t>(5);
const auto* operand = _.FindDef(operand_id);
if (is_partitioned_nv) {
if (!operand || !_.IsIntScalarOrVectorType(operand->type_id())) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Ballot must be a 4-component integer vector";
}
if (_.GetDimension(operand->type_id()) != 4) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "Ballot must be a 4-component integer vector";
}
} else {
if (!operand || !_.IsUnsignedIntScalarType(operand->type_id())) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "ClusterSize must be an unsigned integer scalar";
}
if (!spvOpcodeIsConstant(operand->opcode())) {
return _.diag(SPV_ERROR_INVALID_DATA, inst)
<< "ClusterSize must be a constant instruction";
}
}
}
return SPV_SUCCESS;
}
spv_result_t ValidateGroupNonUniformRotateKHR(ValidationState_t& _,
const Instruction* inst) {
// Scope is already checked by ValidateExecutionScope() above.
@@ -120,15 +422,58 @@ spv_result_t NonUniformPass(ValidationState_t& _, const Instruction* inst) {
const spv::Op opcode = inst->opcode();
if (spvOpcodeIsNonUniformGroupOperation(opcode)) {
const uint32_t execution_scope = inst->word(3);
const uint32_t execution_scope = inst->GetOperandAs<uint32_t>(2);
if (auto error = ValidateExecutionScope(_, inst, execution_scope)) {
return error;
}
}
switch (opcode) {
case spv::Op::OpGroupNonUniformElect:
return ValidateGroupNonUniformElect(_, inst);
case spv::Op::OpGroupNonUniformAny:
case spv::Op::OpGroupNonUniformAll:
return ValidateGroupNonUniformAnyAll(_, inst);
case spv::Op::OpGroupNonUniformAllEqual:
return ValidateGroupNonUniformAllEqual(_, inst);
case spv::Op::OpGroupNonUniformBroadcast:
case spv::Op::OpGroupNonUniformShuffle:
case spv::Op::OpGroupNonUniformShuffleXor:
case spv::Op::OpGroupNonUniformShuffleUp:
case spv::Op::OpGroupNonUniformShuffleDown:
case spv::Op::OpGroupNonUniformQuadBroadcast:
case spv::Op::OpGroupNonUniformQuadSwap:
return ValidateGroupNonUniformBroadcastShuffle(_, inst);
case spv::Op::OpGroupNonUniformBroadcastFirst:
return ValidateGroupNonUniformBroadcastFirst(_, inst);
case spv::Op::OpGroupNonUniformBallot:
return ValidateGroupNonUniformBallot(_, inst);
case spv::Op::OpGroupNonUniformInverseBallot:
return ValidateGroupNonUniformInverseBallot(_, inst);
case spv::Op::OpGroupNonUniformBallotBitExtract:
return ValidateGroupNonUniformBallotBitExtract(_, inst);
case spv::Op::OpGroupNonUniformBallotBitCount:
return ValidateGroupNonUniformBallotBitCount(_, inst);
case spv::Op::OpGroupNonUniformBallotFindLSB:
case spv::Op::OpGroupNonUniformBallotFindMSB:
return ValidateGroupNonUniformBallotFind(_, inst);
case spv::Op::OpGroupNonUniformIAdd:
case spv::Op::OpGroupNonUniformFAdd:
case spv::Op::OpGroupNonUniformIMul:
case spv::Op::OpGroupNonUniformFMul:
case spv::Op::OpGroupNonUniformSMin:
case spv::Op::OpGroupNonUniformUMin:
case spv::Op::OpGroupNonUniformFMin:
case spv::Op::OpGroupNonUniformSMax:
case spv::Op::OpGroupNonUniformUMax:
case spv::Op::OpGroupNonUniformFMax:
case spv::Op::OpGroupNonUniformBitwiseAnd:
case spv::Op::OpGroupNonUniformBitwiseOr:
case spv::Op::OpGroupNonUniformBitwiseXor:
case spv::Op::OpGroupNonUniformLogicalAnd:
case spv::Op::OpGroupNonUniformLogicalOr:
case spv::Op::OpGroupNonUniformLogicalXor:
return ValidateGroupNonUniformArithmetic(_, inst);
case spv::Op::OpGroupNonUniformRotateKHR:
return ValidateGroupNonUniformRotateKHR(_, inst);
default:

View File

@@ -21,6 +21,7 @@
#include "source/opcode.h"
#include "source/spirv_constant.h"
#include "source/spirv_target_env.h"
#include "source/util/make_unique.h"
#include "source/val/basic_block.h"
#include "source/val/construct.h"
#include "source/val/function.h"
@@ -359,14 +360,16 @@ void ValidationState_t::RegisterCapability(spv::Capability cap) {
// Avoid redundant work. Otherwise the recursion could induce work
// quadrdatic in the capability dependency depth. (Ok, not much, but
// it's something.)
if (module_capabilities_.Contains(cap)) return;
if (module_capabilities_.contains(cap)) return;
module_capabilities_.Add(cap);
module_capabilities_.insert(cap);
spv_operand_desc desc;
if (SPV_SUCCESS == grammar_.lookupOperand(SPV_OPERAND_TYPE_CAPABILITY,
uint32_t(cap), &desc)) {
CapabilitySet(desc->numCapabilities, desc->capabilities)
.ForEach([this](spv::Capability c) { RegisterCapability(c); });
for (auto capability :
CapabilitySet(desc->numCapabilities, desc->capabilities)) {
RegisterCapability(capability);
}
}
switch (cap) {
@@ -418,9 +421,9 @@ void ValidationState_t::RegisterCapability(spv::Capability cap) {
}
void ValidationState_t::RegisterExtension(Extension ext) {
if (module_extensions_.Contains(ext)) return;
if (module_extensions_.contains(ext)) return;
module_extensions_.Add(ext);
module_extensions_.insert(ext);
switch (ext) {
case kSPV_AMD_gpu_shader_half_float:
@@ -1009,6 +1012,23 @@ bool ValidationState_t::IsUnsignedIntVectorType(uint32_t id) const {
return false;
}
bool ValidationState_t::IsUnsignedIntScalarOrVectorType(uint32_t id) const {
const Instruction* inst = FindDef(id);
if (!inst) {
return false;
}
if (inst->opcode() == spv::Op::OpTypeInt) {
return inst->GetOperandAs<uint32_t>(2) == 0;
}
if (inst->opcode() == spv::Op::OpTypeVector) {
return IsUnsignedIntScalarType(GetComponentType(id));
}
return false;
}
bool ValidationState_t::IsSignedIntScalarType(uint32_t id) const {
const Instruction* inst = FindDef(id);
return inst && inst->opcode() == spv::Op::OpTypeInt && inst->word(3) == 1;
@@ -2245,6 +2265,8 @@ std::string ValidationState_t::VkErrorID(uint32_t id,
return VUID_WRAP(VUID-StandaloneSpirv-OpEntryPoint-08721);
case 8722:
return VUID_WRAP(VUID-StandaloneSpirv-OpEntryPoint-08722);
case 8973:
return VUID_WRAP(VUID-StandaloneSpirv-Pointer-08973);
default:
return ""; // unknown id
}

View File

@@ -317,7 +317,7 @@ class ValidationState_t {
/// Returns true if the capability is enabled in the module.
bool HasCapability(spv::Capability cap) const {
return module_capabilities_.Contains(cap);
return module_capabilities_.contains(cap);
}
/// Returns a reference to the set of capabilities in the module.
@@ -328,7 +328,7 @@ class ValidationState_t {
/// Returns true if the extension is enabled in the module.
bool HasExtension(Extension ext) const {
return module_extensions_.Contains(ext);
return module_extensions_.contains(ext);
}
/// Returns true if any of the capabilities is enabled, or if |capabilities|
@@ -602,6 +602,7 @@ class ValidationState_t {
bool IsIntScalarOrVectorType(uint32_t id) const;
bool IsUnsignedIntScalarType(uint32_t id) const;
bool IsUnsignedIntVectorType(uint32_t id) const;
bool IsUnsignedIntScalarOrVectorType(uint32_t id) const;
bool IsSignedIntScalarType(uint32_t id) const;
bool IsSignedIntVectorType(uint32_t id) const;
bool IsBoolScalarType(uint32_t id) const;