commit 17c1ae1638bb4e47f35972f7a72612cae35bea6c Author: User Date: Thu Jan 29 17:06:34 2026 +0100 Init diff --git a/.drone.yml b/.drone.yml new file mode 100644 index 0000000..3827bfa --- /dev/null +++ b/.drone.yml @@ -0,0 +1,134 @@ +--- +kind: pipeline +type: docker +name: default + +steps: +- name: build_image_base + pull: never + image: bash_n_docker + volumes: + - name: dockersock + path: /var/run/docker.sock + - name: docker_config + path: /docker_config/config.json + commands: + - export DOCKER_CONFIG=/docker_config + - docker build -f docker/Dockerfile_Base -t spacegame_base . + +- name: build_image_linux + pull: never + image: bash_n_docker + depends_on: + - build_image_base + volumes: + - name: dockersock + path: /var/run/docker.sock + - name: docker_config + path: /docker_config/config.json + commands: + - export DOCKER_CONFIG=/docker_config + - docker build -f docker/Dockerfile_Linux -t spacegame_linux . + +- name: build_image_windows + pull: never + image: bash_n_docker + depends_on: + - build_image_base + volumes: + - name: dockersock + path: /var/run/docker.sock + - name: docker_config + path: /docker_config/config.json + commands: + - export DOCKER_CONFIG=/docker_config + - docker build -f docker/Dockerfile_Windows -t spacegame_windows . + + +- name: build_game_linux + pull: never + image: spacegame_linux + depends_on: + - build_image_linux + commands: + - > + cmake -S . -B build_linux -G Ninja + -DCMAKE_TOOLCHAIN_FILE="cmake/clang_toolchain.cmake" + -DCMAKE_BUILD_TYPE=Release + -DSPACEGAME_BUILD_SHADERS=ON + - cmake --build build_linux --verbose -- -l $(($(nproc)+4)) + +- name: build_game_windows + pull: never + image: spacegame_windows + depends_on: + - build_image_windows + commands: + - > + cmake -S . -B build_windows -G Ninja + -DCMAKE_TOOLCHAIN_FILE="cmake/clang_mingw_toolchain.cmake" + -DCMAKE_BUILD_TYPE=Release + -DSPACEGAME_BUILD_SHADERS=OFF + - cmake --build build_windows --verbose -- -l $(($(nproc)+4)) + + +- name: package_game + pull: never + image: spacegame_linux + depends_on: + - build_game_linux + - build_game_windows + when: + event: + - tag + commands: + # We need to fetch the release tag in order to get its message + - git fetch origin tag $DRONE_TAG --no-tags + # Get short version from tag name and long version from tag message + # - VERSION_SHORT=$DRONE_TAG + - VERSION_LONG=$(git for-each-ref refs/tags/$DRONE_TAG --format='%(contents)') + # Package linux + - LINUX_VERSION="$VERSION_LONG.linux" + - mkdir spacegame_linux + - echo $LINUX_VERSION > spacegame_linux/version.txt + - cp build_linux/bin/x86_64/Release/spacegame spacegame_linux/spacegame + - cp -r assets spacegame_linux/assets + - zip -r spacegame_linux.zip spacegame_linux + # Package windows + - WINDOWS_VERSION="$VERSION_LONG.windows" + - mkdir spacegame_windows + - echo $WINDOWS_VERSION > spacegame_windows/version.txt + - cp build_windows/bin/x86_64/Release/spacegame.exe spacegame_windows/spacegame.exe + - cp -r assets spacegame_windows/assets + - zip -r spacegame_windows.zip spacegame_windows + + +- name: release + image: plugins/gitea-release + depends_on: + - package_game + when: + event: + - tag + settings: + api_key: + from_secret: api_key + base_url: https://git.lph.zone + prerelease: true + files: + - spacegame_linux.zip + - spacegame_windows.zip + + +volumes: +- name: dockersock + host: + path: /var/run/docker.sock + +- name: docker_config + host: + path: /home/crydsch/.docker/config.json + + +image_pull_secrets: +- dockerconfig diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..338124f --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +.vscode +build* +assets/shaders/*.spv +atlas diff --git a/3rdparty/.gitignore b/3rdparty/.gitignore new file mode 100644 index 0000000..0c93ede --- /dev/null +++ b/3rdparty/.gitignore @@ -0,0 +1,5 @@ +glfw +bx +bimg +bgfx +bgfx.cmake diff --git a/3rdparty/README.md b/3rdparty/README.md new file mode 100644 index 0000000..2e771f9 --- /dev/null +++ b/3rdparty/README.md @@ -0,0 +1,20 @@ + +## emilib: Loose collection of misc C++ libs + +Repo.: https://github.com/emilk/emilib + + +## stb: single-file public domain libraries for C/C++ + +Repo.: https://github.com/nothings/stb + + +## bgfx for rendering + +Repos.: +- bx +- bimg +- bgfx +- bgfx.cmake + +## glfw for window creation diff --git a/3rdparty/emilib/hash_map.hpp b/3rdparty/emilib/hash_map.hpp new file mode 100644 index 0000000..f53f0d8 --- /dev/null +++ b/3rdparty/emilib/hash_map.hpp @@ -0,0 +1,671 @@ +// By Emil Ernerfeldt 2014-2017 +// LICENSE: +// This software is dual-licensed to the public domain and under the following +// license: you are granted a perpetual, irrevocable license to copy, modify, +// publish, and distribute this file as you see fit. + +#pragma once + +#include +#include +#include +#include + +namespace emilib { + +// (Crydsch) fixes for standalone usage +#define DCHECK_NE_F(x,y) assert((x) != (y)) +#define DCHECK_F(x) assert(x) +#define DCHECK_EQ_F(x,y) assert((x) == (y)) + +/// like std::equal_to but no need to #include +template +struct HashMapEqualTo +{ + constexpr bool operator()(const T& lhs, const T& rhs) const + { + return lhs == rhs; + } +}; + +/// A cache-friendly hash table with open addressing, linear probing and power-of-two capacity +template , typename EqT = HashMapEqualTo> +class HashMap +{ +private: + using MyType = HashMap; + + using PairT = std::pair; +public: + using size_type = size_t; + using value_type = PairT; + using reference = PairT&; + using const_reference = const PairT&; + + class iterator + { + public: + using iterator_category = std::forward_iterator_tag; + using difference_type = size_t; + using distance_type = size_t; + using value_type = std::pair; + using pointer = value_type*; + using reference = value_type&; + + iterator() { } + + iterator(MyType* hash_map, size_t bucket) : _map(hash_map), _bucket(bucket) + { + } + + iterator& operator++() + { + this->goto_next_element(); + return *this; + } + + iterator operator++(int) + { + size_t old_index = _bucket; + this->goto_next_element(); + return iterator(_map, old_index); + } + + reference operator*() const + { + return _map->_pairs[_bucket]; + } + + pointer operator->() const + { + return _map->_pairs + _bucket; + } + + bool operator==(const iterator& rhs) const + { + DCHECK_EQ_F(_map, rhs._map); + return this->_bucket == rhs._bucket; + } + + bool operator!=(const iterator& rhs) const + { + DCHECK_EQ_F(_map, rhs._map); + return this->_bucket != rhs._bucket; + } + + private: + void goto_next_element() + { + DCHECK_LT_F(_bucket, _map->_num_buckets); + do { + _bucket++; + } while (_bucket < _map->_num_buckets && _map->_states[_bucket] != State::FILLED); + } + + //private: + // friend class MyType; + public: + MyType* _map; + size_t _bucket; + }; + + class const_iterator + { + public: + using iterator_category = std::forward_iterator_tag; + using difference_type = size_t; + using distance_type = size_t; + using value_type = const std::pair; + using pointer = value_type*; + using reference = value_type&; + + const_iterator() { } + + const_iterator(iterator proto) : _map(proto._map), _bucket(proto._bucket) + { + } + + const_iterator(const MyType* hash_map, size_t bucket) : _map(hash_map), _bucket(bucket) + { + } + + const_iterator& operator++() + { + this->goto_next_element(); + return *this; + } + + const_iterator operator++(int) + { + size_t old_index = _bucket; + this->goto_next_element(); + return const_iterator(_map, old_index); + } + + reference operator*() const + { + return _map->_pairs[_bucket]; + } + + pointer operator->() const + { + return _map->_pairs + _bucket; + } + + bool operator==(const const_iterator& rhs) const + { + DCHECK_EQ_F(_map, rhs._map); + return this->_bucket == rhs._bucket; + } + + bool operator!=(const const_iterator& rhs) const + { + DCHECK_EQ_F(_map, rhs._map); + return this->_bucket != rhs._bucket; + } + + private: + void goto_next_element() + { + DCHECK_LT_F(_bucket, _map->_num_buckets); + do { + _bucket++; + } while (_bucket < _map->_num_buckets && _map->_states[_bucket] != State::FILLED); + } + + //private: + // friend class MyType; + public: + const MyType* _map; + size_t _bucket; + }; + + // ------------------------------------------------------------------------ + + HashMap() = default; + + HashMap(const HashMap& other) + { + reserve(other.size()); + insert(other.cbegin(), other.cend()); + } + + HashMap(HashMap&& other) + { + *this = std::move(other); + } + + HashMap& operator=(const HashMap& other) + { + clear(); + reserve(other.size()); + insert(other.cbegin(), other.cend()); + return *this; + } + + void operator=(HashMap&& other) + { + this->swap(other); + } + + ~HashMap() + { + for (size_t bucket=0; bucket<_num_buckets; ++bucket) { + if (_states[bucket] == State::FILLED) { + _pairs[bucket].~PairT(); + } + } + free(_states); + free(_pairs); + } + + void swap(HashMap& other) + { + std::swap(_hasher, other._hasher); + std::swap(_eq, other._eq); + std::swap(_states, other._states); + std::swap(_pairs, other._pairs); + std::swap(_num_buckets, other._num_buckets); + std::swap(_num_filled, other._num_filled); + std::swap(_max_probe_length, other._max_probe_length); + std::swap(_mask, other._mask); + } + + // ------------------------------------------------------------- + + iterator begin() + { + size_t bucket = 0; + while (bucket<_num_buckets && _states[bucket] != State::FILLED) { + ++bucket; + } + return iterator(this, bucket); + } + + const_iterator cbegin() const + { + size_t bucket = 0; + while (bucket<_num_buckets && _states[bucket] != State::FILLED) { + ++bucket; + } + return const_iterator(this, bucket); + } + + const_iterator begin() const + { + return cbegin(); + } + + iterator end() + { + return iterator(this, _num_buckets); + } + + const_iterator cend() const + { + return const_iterator(this, _num_buckets); + } + + const_iterator end() const + { + return cend(); + } + + size_t size() const + { + return _num_filled; + } + + bool empty() const + { + return _num_filled==0; + } + + // Returns the number of buckets. + size_t bucket_count() const + { + return _num_buckets; + } + + /// Returns average number of elements per bucket. + float load_factor() const + { + return static_cast(_num_filled) / static_cast(_num_buckets); + } + + // ------------------------------------------------------------ + + template + iterator find(const KeyLike& key) + { + auto bucket = this->find_filled_bucket(key); + if (bucket == (size_t)-1) { + return this->end(); + } + return iterator(this, bucket); + } + + template + const_iterator find(const KeyLike& key) const + { + auto bucket = this->find_filled_bucket(key); + if (bucket == (size_t)-1) + { + return this->end(); + } + return const_iterator(this, bucket); + } + + template + bool contains(const KeyLike& k) const + { + return find_filled_bucket(k) != (size_t)-1; + } + + template + size_t count(const KeyLike& k) const + { + return find_filled_bucket(k) != (size_t)-1 ? 1 : 0; + } + + /// Returns the matching ValueT or nullptr if k isn't found. + template + ValueT* try_get(const KeyLike& k) + { + auto bucket = find_filled_bucket(k); + if (bucket != (size_t)-1) { + return &_pairs[bucket].second; + } else { + return nullptr; + } + } + + /// Const version of the above + template + const ValueT* try_get(const KeyLike& k) const + { + auto bucket = find_filled_bucket(k); + if (bucket != (size_t)-1) { + return &_pairs[bucket].second; + } else { + return nullptr; + } + } + + /// Convenience function. + template + const ValueT get_or_return_default(const KeyLike& k) const + { + const ValueT* ret = try_get(k); + if (ret) { + return *ret; + } else { + return ValueT(); + } + } + + // ----------------------------------------------------- + + /// Returns a pair consisting of an iterator to the inserted element + /// (or to the element that prevented the insertion) + /// and a bool denoting whether the insertion took place. + std::pair insert(const KeyT& key, const ValueT& value) + { + check_expand_need(); + + auto bucket = find_or_allocate(key); + + if (_states[bucket] == State::FILLED) { + return { iterator(this, bucket), false }; + } else { + _states[bucket] = State::FILLED; + new(_pairs + bucket) PairT(key, value); + _num_filled++; + return { iterator(this, bucket), true }; + } + } + + std::pair insert(const std::pair& p) + { + return insert(p.first, p.second); + } + + void insert(const_iterator begin, const_iterator end) + { + // TODO: reserve space exactly once. + for (; begin != end; ++begin) { + insert(begin->first, begin->second); + } + } + + /// Same as above, but contains(key) MUST be false + void insert_unique(KeyT&& key, ValueT&& value) + { + DCHECK_F(!contains(key)); + check_expand_need(); + auto bucket = find_empty_bucket(key); + _states[bucket] = State::FILLED; + new(_pairs + bucket) PairT(std::move(key), std::move(value)); + _num_filled++; + } + + void insert_unique(std::pair&& p) + { + insert_unique(std::move(p.first), std::move(p.second)); + } + + void insert_or_assign(const KeyT& key, ValueT&& value) + { + check_expand_need(); + + auto bucket = find_or_allocate(key); + + // Check if inserting a new value rather than overwriting an old entry + if (_states[bucket] == State::FILLED) { + _pairs[bucket].second = value; + } else { + _states[bucket] = State::FILLED; + new(_pairs + bucket) PairT(key, value); + _num_filled++; + } + } + + /// Return the old value or ValueT() if it didn't exist. + ValueT set_get(const KeyT& key, const ValueT& new_value) + { + check_expand_need(); + + auto bucket = find_or_allocate(key); + + // Check if inserting a new value rather than overwriting an old entry + if (_states[bucket] == State::FILLED) { + ValueT old_value = _pairs[bucket].second; + _pairs[bucket] = new_value.second; + return old_value; + } else { + _states[bucket] = State::FILLED; + new(_pairs + bucket) PairT(key, new_value); + _num_filled++; + return ValueT(); + } + } + + /// Like std::map::operator[]. + ValueT& operator[](const KeyT& key) + { + check_expand_need(); + + auto bucket = find_or_allocate(key); + + /* Check if inserting a new value rather than overwriting an old entry */ + if (_states[bucket] != State::FILLED) { + _states[bucket] = State::FILLED; + new(_pairs + bucket) PairT(key, ValueT()); + _num_filled++; + } + + return _pairs[bucket].second; + } + + // ------------------------------------------------------- + + /// Erase an element from the hash table. + /// return false if element was not found + bool erase(const KeyT& key) + { + auto bucket = find_filled_bucket(key); + if (bucket != (size_t)-1) { + _states[bucket] = State::ACTIVE; + _pairs[bucket].~PairT(); + _num_filled -= 1; + return true; + } else { + return false; + } + } + + /// Erase an element using an iterator. + /// Returns an iterator to the next element (or end()). + iterator erase(iterator it) + { + DCHECK_EQ_F(it._map, this); + DCHECK_LT_F(it._bucket, _num_buckets); + _states[it._bucket] = State::ACTIVE; + _pairs[it._bucket].~PairT(); + _num_filled -= 1; + return ++it; + } + + /// Remove all elements, keeping full capacity. + void clear() + { + for (size_t bucket=0; bucket<_num_buckets; ++bucket) { + if (_states[bucket] == State::FILLED) { + _states[bucket] = State::INACTIVE; + _pairs[bucket].~PairT(); + } + } + _num_filled = 0; + _max_probe_length = -1; + } + + /// Make room for this many elements + void reserve(size_t num_elems) + { + size_t required_buckets = num_elems + num_elems/2 + 1; + if (required_buckets <= _num_buckets) { + return; + } + size_t num_buckets = 4; + while (num_buckets < required_buckets) { num_buckets *= 2; } + + auto new_states = (State*)malloc(num_buckets * sizeof(State)); + auto new_pairs = (PairT*)malloc(num_buckets * sizeof(PairT)); + + if (!new_states || !new_pairs) { + free(new_states); + free(new_pairs); + // throw std::bad_alloc(); + std::abort(); + } + + //auto old_num_filled = _num_filled; + auto old_num_buckets = _num_buckets; + auto old_states = _states; + auto old_pairs = _pairs; + + _num_filled = 0; + _num_buckets = num_buckets; + _mask = _num_buckets - 1; + _states = new_states; + _pairs = new_pairs; + + std::fill_n(_states, num_buckets, State::INACTIVE); + + _max_probe_length = -1; + + for (size_t src_bucket=0; src_bucket + size_t find_filled_bucket(const KeyLike& key) const + { + if (empty()) { return (size_t)-1; } // Optimization + + auto hash_value = _hasher(key); + for (int offset=0; offset<=_max_probe_length; ++offset) { + auto bucket = (hash_value + offset) & _mask; + if (_states[bucket] == State::FILLED) { + if (_eq(_pairs[bucket].first, key)) { + return bucket; + } + } else if (_states[bucket] == State::INACTIVE) { + return (size_t)-1; // End of the chain! + } + } + return (size_t)-1; + } + + // Find the bucket with this key, or return a good empty bucket to place the key in. + // In the latter case, the bucket is expected to be filled. + size_t find_or_allocate(const KeyT& key) + { + auto hash_value = _hasher(key); + size_t hole = (size_t)-1; + int offset=0; + for (; offset<=_max_probe_length; ++offset) { + auto bucket = (hash_value + offset) & _mask; + + if (_states[bucket] == State::FILLED) { + if (_eq(_pairs[bucket].first, key)) { + return bucket; + } + } else if (_states[bucket] == State::INACTIVE) { + return bucket; + } else { + // ACTIVE: keep searching + if (hole == (size_t)-1) { + hole = bucket; + } + } + } + + // No key found - but maybe a hole for it + + assert(offset == _max_probe_length+1); + + if (hole != (size_t)-1) { + return hole; + } + + // No hole found within _max_probe_length + for (; ; ++offset) { + auto bucket = (hash_value + offset) & _mask; + + if (_states[bucket] != State::FILLED) { + _max_probe_length = offset; + return bucket; + } + } + } + + // key is not in this map. Find a place to put it. + size_t find_empty_bucket(const KeyT& key) + { + auto hash_value = _hasher(key); + for (int offset=0; ; ++offset) { + auto bucket = (hash_value + offset) & _mask; + if (_states[bucket] != State::FILLED) { + if (offset > _max_probe_length) { + _max_probe_length = offset; + } + return bucket; + } + } + } + +private: + enum class State : uint8_t + { + INACTIVE, // Never been touched + ACTIVE, // Is inside a search-chain, but is empty + FILLED // Is set with key/value + }; + + HashT _hasher; + EqT _eq; + State* _states = nullptr; + PairT* _pairs = nullptr; + size_t _num_buckets = 0; + size_t _num_filled = 0; + int _max_probe_length = -1; // Our longest bucket-brigade is this long. ONLY when we have zero elements is this ever negative (-1). + size_t _mask = 0; // _num_buckets minus one +}; + +} // namespace emilib diff --git a/3rdparty/emilib/hash_set.hpp b/3rdparty/emilib/hash_set.hpp new file mode 100644 index 0000000..a59682f --- /dev/null +++ b/3rdparty/emilib/hash_set.hpp @@ -0,0 +1,586 @@ +// By Emil Ernerfeldt 2014-2016 +// LICENSE: +// This software is dual-licensed to the public domain and under the following +// license: you are granted a perpetual, irrevocable license to copy, modify, +// publish, and distribute this file as you see fit. + +#pragma once + +#include // malloc +#include +#include +#include + +namespace emilib { + +/// like std::equal_to but no need to `#include ` +template +struct HashSetEqualTo +{ + constexpr bool operator()(const T& lhs, const T& rhs) const + { + return lhs == rhs; + } +}; + +/// A cache-friendly hash set with open addressing, linear probing and power-of-two capacity +template , typename EqT = HashSetEqualTo> +class HashSet +{ +private: + using MyType = HashSet; + +public: + using size_type = size_t; + using value_type = KeyT; + using reference = KeyT&; + using const_reference = const KeyT&; + + class iterator + { + public: + using iterator_category = std::forward_iterator_tag; + using difference_type = size_t; + using distance_type = size_t; + using value_type = KeyT; + using pointer = value_type*; + using reference = value_type&; + + iterator() { } + + iterator(MyType* hash_set, size_t bucket) : _set(hash_set), _bucket(bucket) + { + } + + iterator& operator++() + { + this->goto_next_element(); + return *this; + } + + iterator operator++(int) + { + size_t old_index = _bucket; + this->goto_next_element(); + return iterator(_set, old_index); + } + + reference operator*() const + { + return _set->_keys[_bucket]; + } + + pointer operator->() const + { + return _set->_keys + _bucket; + } + + bool operator==(const iterator& rhs) const + { + DCHECK_EQ_F(_set, rhs._set); + return this->_bucket == rhs._bucket; + } + + bool operator!=(const iterator& rhs) const + { + DCHECK_EQ_F(_set, rhs._set); + return this->_bucket != rhs._bucket; + } + + private: + void goto_next_element() + { + DCHECK_LT_F(_bucket, _set->_num_buckets); + do { + _bucket++; + } while (_bucket < _set->_num_buckets && _set->_states[_bucket] != State::FILLED); + } + + //private: + // friend class MyType; + public: + MyType* _set; + size_t _bucket; + }; + + class const_iterator + { + public: + using iterator_category = std::forward_iterator_tag; + using difference_type = size_t; + using distance_type = size_t; + using value_type = const KeyT; + using pointer = value_type*; + using reference = value_type&; + + const_iterator() { } + + const_iterator(iterator proto) : _set(proto._set), _bucket(proto._bucket) + { + } + + const_iterator(const MyType* hash_set, size_t bucket) : _set(hash_set), _bucket(bucket) + { + } + + const_iterator& operator++() + { + this->goto_next_element(); + return *this; + } + + const_iterator operator++(int) + { + size_t old_index = _bucket; + this->goto_next_element(); + return const_iterator(_set, old_index); + } + + reference operator*() const + { + return _set->_keys[_bucket]; + } + + pointer operator->() const + { + return _set->_keys + _bucket; + } + + bool operator==(const const_iterator& rhs) const + { + DCHECK_EQ_F(_set, rhs._set); + return this->_bucket == rhs._bucket; + } + + bool operator!=(const const_iterator& rhs) const + { + DCHECK_EQ_F(_set, rhs._set); + return this->_bucket != rhs._bucket; + } + + private: + void goto_next_element() + { + DCHECK_LT_F(_bucket, _set->_num_buckets); + do { + _bucket++; + } while (_bucket < _set->_num_buckets && _set->_states[_bucket] != State::FILLED); + } + + //private: + // friend class MyType; + public: + const MyType* _set; + size_t _bucket; + }; + + // ------------------------------------------------------------------------ + + HashSet() = default; + + HashSet(const HashSet& other) + { + reserve(other.size()); + insert(other.cbegin(), other.cend()); + } + + HashSet(HashSet&& other) + { + *this = std::move(other); + } + + HashSet& operator=(const HashSet& other) + { + clear(); + reserve(other.size()); + insert(other.cbegin(), other.cend()); + return *this; + } + + void operator=(HashSet&& other) + { + this->swap(other); + } + + ~HashSet() + { + for (size_t bucket=0; bucket<_num_buckets; ++bucket) { + if (_states[bucket] == State::FILLED) { + _keys[bucket].~KeyT(); + } + } + free(_states); + free(_keys); + } + + void swap(HashSet& other) + { + std::swap(_hasher, other._hasher); + std::swap(_eq, other._eq); + std::swap(_states, other._states); + std::swap(_keys, other._keys); + std::swap(_num_buckets, other._num_buckets); + std::swap(_num_filled, other._num_filled); + std::swap(_max_probe_length, other._max_probe_length); + std::swap(_mask, other._mask); + } + + // ------------------------------------------------------------- + + iterator begin() + { + size_t bucket = 0; + while (bucket<_num_buckets && _states[bucket] != State::FILLED) { + ++bucket; + } + return iterator(this, bucket); + } + + const_iterator cbegin() const + { + size_t bucket = 0; + while (bucket<_num_buckets && _states[bucket] != State::FILLED) { + ++bucket; + } + return const_iterator(this, bucket); + } + + const_iterator begin() const + { + return cbegin(); + } + + iterator end() + { + return iterator(this, _num_buckets); + } + + const_iterator cend() const + { + return const_iterator(this, _num_buckets); + } + + const_iterator end() const + { + return cend(); + } + + size_t size() const + { + return _num_filled; + } + + bool empty() const + { + return _num_filled==0; + } + + // Returns the number of buckets. + size_t bucket_count() const + { + return _num_buckets; + } + + /// Returns average number of elements per bucket. + float load_factor() const + { + return static_cast(_num_filled) / static_cast(_num_buckets); + } + + // ------------------------------------------------------------ + + iterator find(const KeyT& key) + { + auto bucket = this->find_filled_bucket(key); + if (bucket == (size_t)-1) { + return this->end(); + } + return iterator(this, bucket); + } + + const_iterator find(const KeyT& key) const + { + auto bucket = this->find_filled_bucket(key); + if (bucket == (size_t)-1) { + return this->end(); + } + return const_iterator(this, bucket); + } + + bool contains(const KeyT& k) const + { + return find_filled_bucket(k) != (size_t)-1; + } + + size_t count(const KeyT& k) const + { + return find_filled_bucket(k) != (size_t)-1 ? 1 : 0; + } + + // ----------------------------------------------------- + + /// Insert an element, unless it already exists. + /// Returns a pair consisting of an iterator to the inserted element + /// (or to the element that prevented the insertion) + /// and a bool denoting whether the insertion took place. + std::pair insert(const KeyT& key) + { + check_expand_need(); + + auto bucket = find_or_allocate(key); + + if (_states[bucket] == State::FILLED) { + return { iterator(this, bucket), false }; + } else { + _states[bucket] = State::FILLED; + new(_keys + bucket) KeyT(key); + _num_filled++; + return { iterator(this, bucket), true }; + } + } + + /// Insert an element, unless it already exists. + /// Returns a pair consisting of an iterator to the inserted element + /// (or to the element that prevented the insertion) + /// and a bool denoting whether the insertion took place. + std::pair insert(KeyT&& key) + { + check_expand_need(); + + auto bucket = find_or_allocate(key); + + if (_states[bucket] == State::FILLED) { + return { iterator(this, bucket), false }; + } else { + _states[bucket] = State::FILLED; + new(_keys + bucket) KeyT(std::move(key)); + _num_filled++; + return { iterator(this, bucket), true }; + } + } + + template + std::pair emplace(Args&&... args) + { + return insert(KeyT(std::forward(args)...)); + } + + void insert(const_iterator begin, const_iterator end) + { + // TODO: reserve space exactly once. + for (; begin != end; ++begin) { + insert(*begin); + } + } + + /// Same as above, but contains(key) MUST be false + void insert_unique(KeyT key) + { + DCHECK_F(!contains(key)); + check_expand_need(); + auto bucket = find_empty_bucket(key); + _states[bucket] = State::FILLED; + new(_keys + bucket) KeyT(std::move(key)); + _num_filled++; + } + + // ------------------------------------------------------- + + /// Erase an element from the hash set. + /// return false if element was not found. + bool erase(const KeyT& key) + { + auto bucket = find_filled_bucket(key); + if (bucket != (size_t)-1) { + _states[bucket] = State::ACTIVE; + _keys[bucket].~KeyT(); + _num_filled -= 1; + return true; + } else { + return false; + } + } + + /// Erase an element using an iterator. + /// Returns an iterator to the next element (or end()). + iterator erase(iterator it) + { + DCHECK_EQ_F(it._set, this); + DCHECK_LT_F(it._bucket, _num_buckets); + _states[it._bucket] = State::ACTIVE; + _keys[it._bucket].~KeyT(); + _num_filled -= 1; + return ++it; + } + + /// Remove all elements, keeping full capacity. + void clear() + { + for (size_t bucket=0; bucket<_num_buckets; ++bucket) { + if (_states[bucket] == State::FILLED) { + _states[bucket] = State::INACTIVE; + _keys[bucket].~KeyT(); + } + } + _num_filled = 0; + _max_probe_length = -1; + } + + /// Make room for this many elements + void reserve(size_t num_elems) + { + size_t required_buckets = num_elems + num_elems/2 + 1; + if (required_buckets <= _num_buckets) { + return; + } + size_t num_buckets = 4; + while (num_buckets < required_buckets) { num_buckets *= 2; } + + auto new_states = (State*)malloc(num_buckets * sizeof(State)); + auto new_keys = (KeyT*)malloc(num_buckets * sizeof(KeyT)); + + if (!new_states || !new_keys) { + free(new_states); + free(new_keys); + // throw std::bad_alloc(); + std::abort(); + } + + // auto old_num_filled = _num_filled; + auto old_num_buckets = _num_buckets; + auto old_states = _states; + auto old_keys = _keys; + + _num_filled = 0; + _num_buckets = num_buckets; + _mask = _num_buckets - 1; + _states = new_states; + _keys = new_keys; + + std::fill_n(_states, num_buckets, State::INACTIVE); + + _max_probe_length = -1; + + for (size_t src_bucket=0; src_bucket _max_probe_length) { + _max_probe_length = offset; + } + return bucket; + } + } + } + +private: + enum class State : uint8_t + { + INACTIVE, // Never been touched + ACTIVE, // Is inside a search-chain, but is empty + FILLED // Is set with key/value + }; + + HashT _hasher; + EqT _eq; + State* _states = nullptr; + KeyT* _keys = nullptr; + size_t _num_buckets = 0; + size_t _num_filled = 0; + int _max_probe_length = -1; // Our longest bucket-brigade is this long. ONLY when we have zero elements is this ever negative (-1). + size_t _mask = 0; // _num_buckets minus one +}; + +} // namespace emilib diff --git a/3rdparty/stb/stb_image.h b/3rdparty/stb/stb_image.h new file mode 100644 index 0000000..a632d54 --- /dev/null +++ b/3rdparty/stb/stb_image.h @@ -0,0 +1,7985 @@ +/* stb_image - v2.29 - public domain image loader - http://nothings.org/stb + no warranty implied; use at your own risk + + Do this: + #define STB_IMAGE_IMPLEMENTATION + before you include this file in *one* C or C++ file to create the implementation. + + // i.e. it should look like this: + #include ... + #include ... + #include ... + #define STB_IMAGE_IMPLEMENTATION + #include "stb_image.h" + + You can #define STBI_ASSERT(x) before the #include to avoid using assert.h. + And #define STBI_MALLOC, STBI_REALLOC, and STBI_FREE to avoid using malloc,realloc,free + + + QUICK NOTES: + Primarily of interest to game developers and other people who can + avoid problematic images and only need the trivial interface + + JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib) + PNG 1/2/4/8/16-bit-per-channel + + TGA (not sure what subset, if a subset) + BMP non-1bpp, non-RLE + PSD (composited view only, no extra channels, 8/16 bit-per-channel) + + GIF (*comp always reports as 4-channel) + HDR (radiance rgbE format) + PIC (Softimage PIC) + PNM (PPM and PGM binary only) + + Animated GIF still needs a proper API, but here's one way to do it: + http://gist.github.com/urraka/685d9a6340b26b830d49 + + - decode from memory or through FILE (define STBI_NO_STDIO to remove code) + - decode from arbitrary I/O callbacks + - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON) + + Full documentation under "DOCUMENTATION" below. + + +LICENSE + + See end of file for license information. + +RECENT REVISION HISTORY: + + 2.29 (2023-05-xx) optimizations + 2.28 (2023-01-29) many error fixes, security errors, just tons of stuff + 2.27 (2021-07-11) document stbi_info better, 16-bit PNM support, bug fixes + 2.26 (2020-07-13) many minor fixes + 2.25 (2020-02-02) fix warnings + 2.24 (2020-02-02) fix warnings; thread-local failure_reason and flip_vertically + 2.23 (2019-08-11) fix clang static analysis warning + 2.22 (2019-03-04) gif fixes, fix warnings + 2.21 (2019-02-25) fix typo in comment + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) bugfix, 1-bit BMP, 16-bitness query, fix warnings + 2.16 (2017-07-23) all functions have 16-bit variants; optimizations; bugfixes + 2.15 (2017-03-18) fix png-1,2,4; all Imagenet JPGs; no runtime SSE detection on GCC + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 + RGB-format JPEG; remove white matting in PSD; + allocate large structures on the stack; + correct channel count for PNG & BMP + 2.10 (2016-01-22) avoid warning introduced in 2.09 + 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED + + See end of file for full revision history. + + + ============================ Contributors ========================= + + Image formats Extensions, features + Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info) + Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info) + Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG) + Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks) + Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) + Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) + Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) + github:urraka (animated gif) Junggon Kim (PNM comments) + Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA) + socks-the-fox (16-bit PNG) + Jeremy Sawicki (handle all ImageNet JPGs) + Optimizations & bugfixes Mikhail Morozov (1-bit BMP) + Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query) + Arseny Kapoulkine Simon Breuss (16-bit PNM) + John-Mark Allen + Carmelo J Fdez-Aguera + + Bug & warning fixes + Marc LeBlanc David Woo Guillaume George Martins Mozeiko + Christpher Lloyd Jerry Jansson Joseph Thomson Blazej Dariusz Roszkowski + Phil Jordan Dave Moore Roy Eltham + Hayaki Saito Nathan Reed Won Chun + Luke Graham Johan Duparc Nick Verigakis the Horde3D community + Thomas Ruf Ronny Chevalier github:rlyeh + Janez Zemva John Bartholomew Michal Cichon github:romigrou + Jonathan Blow Ken Hamada Tero Hanninen github:svdijk + Eugene Golushkov Laurent Gomila Cort Stratton github:snagar + Aruelien Pocheville Sergio Gonzalez Thibault Reuille github:Zelex + Cass Everitt Ryamond Barbiero github:grim210 + Paul Du Bois Engin Manap Aldo Culquicondor github:sammyhw + Philipp Wiesemann Dale Weiler Oriol Ferrer Mesia github:phprus + Josh Tobin Neil Bickford Matthew Gregan github:poppolopoppo + Julian Raschke Gregory Mullen Christian Floisand github:darealshinji + Baldur Karlsson Kevin Schmidt JR Smith github:Michaelangel007 + Brad Weinberger Matvey Cherevko github:mosra + Luca Sas Alexander Veselov Zack Middleton [reserved] + Ryan C. Gordon [reserved] [reserved] + DO NOT ADD YOUR NAME HERE + + Jacko Dirks + + To add your name to the credits, pick a random blank space in the middle and fill it. + 80% of merge conflicts on stb PRs are due to people adding their name at the end + of the credits. +*/ + +#ifndef STBI_INCLUDE_STB_IMAGE_H +#define STBI_INCLUDE_STB_IMAGE_H + +// DOCUMENTATION +// +// Limitations: +// - no 12-bit-per-channel JPEG +// - no JPEGs with arithmetic coding +// - GIF always returns *comp=4 +// +// Basic usage (see HDR discussion below for HDR usage): +// int x,y,n; +// unsigned char *data = stbi_load(filename, &x, &y, &n, 0); +// // ... process data if not NULL ... +// // ... x = width, y = height, n = # 8-bit components per pixel ... +// // ... replace '0' with '1'..'4' to force that many components per pixel +// // ... but 'n' will always be the number that it would have been if you said 0 +// stbi_image_free(data); +// +// Standard parameters: +// int *x -- outputs image width in pixels +// int *y -- outputs image height in pixels +// int *channels_in_file -- outputs # of image components in image file +// int desired_channels -- if non-zero, # of image components requested in result +// +// The return value from an image loader is an 'unsigned char *' which points +// to the pixel data, or NULL on an allocation failure or if the image is +// corrupt or invalid. The pixel data consists of *y scanlines of *x pixels, +// with each pixel consisting of N interleaved 8-bit components; the first +// pixel pointed to is top-left-most in the image. There is no padding between +// image scanlines or between pixels, regardless of format. The number of +// components N is 'desired_channels' if desired_channels is non-zero, or +// *channels_in_file otherwise. If desired_channels is non-zero, +// *channels_in_file has the number of components that _would_ have been +// output otherwise. E.g. if you set desired_channels to 4, you will always +// get RGBA output, but you can check *channels_in_file to see if it's trivially +// opaque because e.g. there were only 3 channels in the source image. +// +// An output image with N components has the following components interleaved +// in this order in each pixel: +// +// N=#comp components +// 1 grey +// 2 grey, alpha +// 3 red, green, blue +// 4 red, green, blue, alpha +// +// If image loading fails for any reason, the return value will be NULL, +// and *x, *y, *channels_in_file will be unchanged. The function +// stbi_failure_reason() can be queried for an extremely brief, end-user +// unfriendly explanation of why the load failed. Define STBI_NO_FAILURE_STRINGS +// to avoid compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly +// more user-friendly ones. +// +// Paletted PNG, BMP, GIF, and PIC images are automatically depalettized. +// +// To query the width, height and component count of an image without having to +// decode the full file, you can use the stbi_info family of functions: +// +// int x,y,n,ok; +// ok = stbi_info(filename, &x, &y, &n); +// // returns ok=1 and sets x, y, n if image is a supported format, +// // 0 otherwise. +// +// Note that stb_image pervasively uses ints in its public API for sizes, +// including sizes of memory buffers. This is now part of the API and thus +// hard to change without causing breakage. As a result, the various image +// loaders all have certain limits on image size; these differ somewhat +// by format but generally boil down to either just under 2GB or just under +// 1GB. When the decoded image would be larger than this, stb_image decoding +// will fail. +// +// Additionally, stb_image will reject image files that have any of their +// dimensions set to a larger value than the configurable STBI_MAX_DIMENSIONS, +// which defaults to 2**24 = 16777216 pixels. Due to the above memory limit, +// the only way to have an image with such dimensions load correctly +// is for it to have a rather extreme aspect ratio. Either way, the +// assumption here is that such larger images are likely to be malformed +// or malicious. If you do need to load an image with individual dimensions +// larger than that, and it still fits in the overall size limit, you can +// #define STBI_MAX_DIMENSIONS on your own to be something larger. +// +// =========================================================================== +// +// UNICODE: +// +// If compiling for Windows and you wish to use Unicode filenames, compile +// with +// #define STBI_WINDOWS_UTF8 +// and pass utf8-encoded filenames. Call stbi_convert_wchar_to_utf8 to convert +// Windows wchar_t filenames to utf8. +// +// =========================================================================== +// +// Philosophy +// +// stb libraries are designed with the following priorities: +// +// 1. easy to use +// 2. easy to maintain +// 3. good performance +// +// Sometimes I let "good performance" creep up in priority over "easy to maintain", +// and for best performance I may provide less-easy-to-use APIs that give higher +// performance, in addition to the easy-to-use ones. Nevertheless, it's important +// to keep in mind that from the standpoint of you, a client of this library, +// all you care about is #1 and #3, and stb libraries DO NOT emphasize #3 above all. +// +// Some secondary priorities arise directly from the first two, some of which +// provide more explicit reasons why performance can't be emphasized. +// +// - Portable ("ease of use") +// - Small source code footprint ("easy to maintain") +// - No dependencies ("ease of use") +// +// =========================================================================== +// +// I/O callbacks +// +// I/O callbacks allow you to read from arbitrary sources, like packaged +// files or some other source. Data read from callbacks are processed +// through a small internal buffer (currently 128 bytes) to try to reduce +// overhead. +// +// The three functions you must define are "read" (reads some bytes of data), +// "skip" (skips some bytes of data), "eof" (reports if the stream is at the end). +// +// =========================================================================== +// +// SIMD support +// +// The JPEG decoder will try to automatically use SIMD kernels on x86 when +// supported by the compiler. For ARM Neon support, you must explicitly +// request it. +// +// (The old do-it-yourself SIMD API is no longer supported in the current +// code.) +// +// On x86, SSE2 will automatically be used when available based on a run-time +// test; if not, the generic C versions are used as a fall-back. On ARM targets, +// the typical path is to have separate builds for NEON and non-NEON devices +// (at least this is true for iOS and Android). Therefore, the NEON support is +// toggled by a build flag: define STBI_NEON to get NEON loops. +// +// If for some reason you do not want to use any of SIMD code, or if +// you have issues compiling it, you can disable it entirely by +// defining STBI_NO_SIMD. +// +// =========================================================================== +// +// HDR image support (disable by defining STBI_NO_HDR) +// +// stb_image supports loading HDR images in general, and currently the Radiance +// .HDR file format specifically. You can still load any file through the existing +// interface; if you attempt to load an HDR file, it will be automatically remapped +// to LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1; +// both of these constants can be reconfigured through this interface: +// +// stbi_hdr_to_ldr_gamma(2.2f); +// stbi_hdr_to_ldr_scale(1.0f); +// +// (note, do not use _inverse_ constants; stbi_image will invert them +// appropriately). +// +// Additionally, there is a new, parallel interface for loading files as +// (linear) floats to preserve the full dynamic range: +// +// float *data = stbi_loadf(filename, &x, &y, &n, 0); +// +// If you load LDR images through this interface, those images will +// be promoted to floating point values, run through the inverse of +// constants corresponding to the above: +// +// stbi_ldr_to_hdr_scale(1.0f); +// stbi_ldr_to_hdr_gamma(2.2f); +// +// Finally, given a filename (or an open file or memory block--see header +// file for details) containing image data, you can query for the "most +// appropriate" interface to use (that is, whether the image is HDR or +// not), using: +// +// stbi_is_hdr(char *filename); +// +// =========================================================================== +// +// iPhone PNG support: +// +// We optionally support converting iPhone-formatted PNGs (which store +// premultiplied BGRA) back to RGB, even though they're internally encoded +// differently. To enable this conversion, call +// stbi_convert_iphone_png_to_rgb(1). +// +// Call stbi_set_unpremultiply_on_load(1) as well to force a divide per +// pixel to remove any premultiplied alpha *only* if the image file explicitly +// says there's premultiplied data (currently only happens in iPhone images, +// and only if iPhone convert-to-rgb processing is on). +// +// =========================================================================== +// +// ADDITIONAL CONFIGURATION +// +// - You can suppress implementation of any of the decoders to reduce +// your code footprint by #defining one or more of the following +// symbols before creating the implementation. +// +// STBI_NO_JPEG +// STBI_NO_PNG +// STBI_NO_BMP +// STBI_NO_PSD +// STBI_NO_TGA +// STBI_NO_GIF +// STBI_NO_HDR +// STBI_NO_PIC +// STBI_NO_PNM (.ppm and .pgm) +// +// - You can request *only* certain decoders and suppress all other ones +// (this will be more forward-compatible, as addition of new decoders +// doesn't require you to disable them explicitly): +// +// STBI_ONLY_JPEG +// STBI_ONLY_PNG +// STBI_ONLY_BMP +// STBI_ONLY_PSD +// STBI_ONLY_TGA +// STBI_ONLY_GIF +// STBI_ONLY_HDR +// STBI_ONLY_PIC +// STBI_ONLY_PNM (.ppm and .pgm) +// +// - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still +// want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB +// +// - If you define STBI_MAX_DIMENSIONS, stb_image will reject images greater +// than that size (in either width or height) without further processing. +// This is to let programs in the wild set an upper bound to prevent +// denial-of-service attacks on untrusted data, as one could generate a +// valid image of gigantic dimensions and force stb_image to allocate a +// huge block of memory and spend disproportionate time decoding it. By +// default this is set to (1 << 24), which is 16777216, but that's still +// very big. + +#ifndef STBI_NO_STDIO +#include +#endif // STBI_NO_STDIO + +#define STBI_VERSION 1 + +enum +{ + STBI_default = 0, // only used for desired_channels + + STBI_grey = 1, + STBI_grey_alpha = 2, + STBI_rgb = 3, + STBI_rgb_alpha = 4 +}; + +#include +typedef unsigned char stbi_uc; +typedef unsigned short stbi_us; + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef STBIDEF +#ifdef STB_IMAGE_STATIC +#define STBIDEF static +#else +#define STBIDEF extern +#endif +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// PRIMARY API - works on images of any type +// + +// +// load image by filename, open file, or memory buffer +// + +typedef struct +{ + int (*read) (void *user,char *data,int size); // fill 'data' with 'size' bytes. return number of bytes actually read + void (*skip) (void *user,int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative + int (*eof) (void *user); // returns nonzero if we are at end of file/data +} stbi_io_callbacks; + +//////////////////////////////////// +// +// 8-bits-per-channel interface +// + +STBIDEF stbi_uc *stbi_load_from_memory (stbi_uc const *buffer, int len , int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk , void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_uc *stbi_load (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +// for stbi_load_from_file, file pointer is left pointing immediately after image +#endif + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +#endif + +#ifdef STBI_WINDOWS_UTF8 +STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input); +#endif + +//////////////////////////////////// +// +// 16-bits-per-channel interface +// + +STBIDEF stbi_us *stbi_load_16_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_us *stbi_load_16 (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +#endif + +//////////////////////////////////// +// +// float-per-channel interface +// +#ifndef STBI_NO_LINEAR + STBIDEF float *stbi_loadf_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_callbacks (stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + + #ifndef STBI_NO_STDIO + STBIDEF float *stbi_loadf (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); + #endif +#endif + +#ifndef STBI_NO_HDR + STBIDEF void stbi_hdr_to_ldr_gamma(float gamma); + STBIDEF void stbi_hdr_to_ldr_scale(float scale); +#endif // STBI_NO_HDR + +#ifndef STBI_NO_LINEAR + STBIDEF void stbi_ldr_to_hdr_gamma(float gamma); + STBIDEF void stbi_ldr_to_hdr_scale(float scale); +#endif // STBI_NO_LINEAR + +// stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user); +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len); +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename); +STBIDEF int stbi_is_hdr_from_file(FILE *f); +#endif // STBI_NO_STDIO + + +// get a VERY brief reason for failure +// on most compilers (and ALL modern mainstream compilers) this is threadsafe +STBIDEF const char *stbi_failure_reason (void); + +// free the loaded image -- this is just free() +STBIDEF void stbi_image_free (void *retval_from_stbi_load); + +// get image dimensions & components without fully decoding +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len); +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *clbk, void *user); + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_file (FILE *f, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit (char const *filename); +STBIDEF int stbi_is_16_bit_from_file(FILE *f); +#endif + + + +// for image formats that explicitly notate that they have premultiplied alpha, +// we just return the colors as stored in the file. set this flag to force +// unpremultiplication. results are undefined if the unpremultiply overflow. +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply); + +// indicate whether we should process iphone images back to canonical format, +// or just pass them through "as-is" +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); + +// flip the image vertically, so the first pixel in the output array is the bottom left +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); + +// as above, but only applies to images loaded on the thread that calls the function +// this function is only available if your compiler supports thread-local variables; +// calling it will fail to link if your compiler doesn't +STBIDEF void stbi_set_unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply); +STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert); +STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip); + +// ZLIB client - used by PNG, available for other purposes + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header); +STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + +STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + + +#ifdef __cplusplus +} +#endif + +// +// +//// end header file ///////////////////////////////////////////////////// +#endif // STBI_INCLUDE_STB_IMAGE_H + +#ifdef STB_IMAGE_IMPLEMENTATION + +#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \ + || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \ + || defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \ + || defined(STBI_ONLY_ZLIB) + #ifndef STBI_ONLY_JPEG + #define STBI_NO_JPEG + #endif + #ifndef STBI_ONLY_PNG + #define STBI_NO_PNG + #endif + #ifndef STBI_ONLY_BMP + #define STBI_NO_BMP + #endif + #ifndef STBI_ONLY_PSD + #define STBI_NO_PSD + #endif + #ifndef STBI_ONLY_TGA + #define STBI_NO_TGA + #endif + #ifndef STBI_ONLY_GIF + #define STBI_NO_GIF + #endif + #ifndef STBI_ONLY_HDR + #define STBI_NO_HDR + #endif + #ifndef STBI_ONLY_PIC + #define STBI_NO_PIC + #endif + #ifndef STBI_ONLY_PNM + #define STBI_NO_PNM + #endif +#endif + +#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB) +#define STBI_NO_ZLIB +#endif + + +#include +#include // ptrdiff_t on osx +#include +#include +#include + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +#include // ldexp, pow +#endif + +#ifndef STBI_NO_STDIO +#include +#endif + +#ifndef STBI_ASSERT +#include +#define STBI_ASSERT(x) assert(x) +#endif + +#ifdef __cplusplus +#define STBI_EXTERN extern "C" +#else +#define STBI_EXTERN extern +#endif + + +#ifndef _MSC_VER + #ifdef __cplusplus + #define stbi_inline inline + #else + #define stbi_inline + #endif +#else + #define stbi_inline __forceinline +#endif + +#ifndef STBI_NO_THREAD_LOCALS + #if defined(__cplusplus) && __cplusplus >= 201103L + #define STBI_THREAD_LOCAL thread_local + #elif defined(__GNUC__) && __GNUC__ < 5 + #define STBI_THREAD_LOCAL __thread + #elif defined(_MSC_VER) + #define STBI_THREAD_LOCAL __declspec(thread) + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_THREADS__) + #define STBI_THREAD_LOCAL _Thread_local + #endif + + #ifndef STBI_THREAD_LOCAL + #if defined(__GNUC__) + #define STBI_THREAD_LOCAL __thread + #endif + #endif +#endif + +#if defined(_MSC_VER) || defined(__SYMBIAN32__) +typedef unsigned short stbi__uint16; +typedef signed short stbi__int16; +typedef unsigned int stbi__uint32; +typedef signed int stbi__int32; +#else +#include +typedef uint16_t stbi__uint16; +typedef int16_t stbi__int16; +typedef uint32_t stbi__uint32; +typedef int32_t stbi__int32; +#endif + +// should produce compiler error if size is wrong +typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1]; + +#ifdef _MSC_VER +#define STBI_NOTUSED(v) (void)(v) +#else +#define STBI_NOTUSED(v) (void)sizeof(v) +#endif + +#ifdef _MSC_VER +#define STBI_HAS_LROTL +#endif + +#ifdef STBI_HAS_LROTL + #define stbi_lrot(x,y) _lrotl(x,y) +#else + #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (-(y) & 31))) +#endif + +#if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED)) +// ok +#elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED) +// ok +#else +#error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)." +#endif + +#ifndef STBI_MALLOC +#define STBI_MALLOC(sz) malloc(sz) +#define STBI_REALLOC(p,newsz) realloc(p,newsz) +#define STBI_FREE(p) free(p) +#endif + +#ifndef STBI_REALLOC_SIZED +#define STBI_REALLOC_SIZED(p,oldsz,newsz) STBI_REALLOC(p,newsz) +#endif + +// x86/x64 detection +#if defined(__x86_64__) || defined(_M_X64) +#define STBI__X64_TARGET +#elif defined(__i386) || defined(_M_IX86) +#define STBI__X86_TARGET +#endif + +#if defined(__GNUC__) && defined(STBI__X86_TARGET) && !defined(__SSE2__) && !defined(STBI_NO_SIMD) +// gcc doesn't support sse2 intrinsics unless you compile with -msse2, +// which in turn means it gets to use SSE2 everywhere. This is unfortunate, +// but previous attempts to provide the SSE2 functions with runtime +// detection caused numerous issues. The way architecture extensions are +// exposed in GCC/Clang is, sadly, not really suited for one-file libs. +// New behavior: if compiled with -msse2, we use SSE2 without any +// detection; if not, we don't use it at all. +#define STBI_NO_SIMD +#endif + +#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD) +// Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET +// +// 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the +// Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant. +// As a result, enabling SSE2 on 32-bit MinGW is dangerous when not +// simultaneously enabling "-mstackrealign". +// +// See https://github.com/nothings/stb/issues/81 for more information. +// +// So default to no SSE2 on 32-bit MinGW. If you've read this far and added +// -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2. +#define STBI_NO_SIMD +#endif + +#if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) +#define STBI_SSE2 +#include + +#ifdef _MSC_VER + +#if _MSC_VER >= 1400 // not VC6 +#include // __cpuid +static int stbi__cpuid3(void) +{ + int info[4]; + __cpuid(info,1); + return info[3]; +} +#else +static int stbi__cpuid3(void) +{ + int res; + __asm { + mov eax,1 + cpuid + mov res,edx + } + return res; +} +#endif + +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name + +#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) +static int stbi__sse2_available(void) +{ + int info3 = stbi__cpuid3(); + return ((info3 >> 26) & 1) != 0; +} +#endif + +#else // assume GCC-style if not VC++ +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) + +#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) +static int stbi__sse2_available(void) +{ + // If we're even attempting to compile this on GCC/Clang, that means + // -msse2 is on, which means the compiler is allowed to use SSE2 + // instructions at will, and so are we. + return 1; +} +#endif + +#endif +#endif + +// ARM NEON +#if defined(STBI_NO_SIMD) && defined(STBI_NEON) +#undef STBI_NEON +#endif + +#ifdef STBI_NEON +#include +#ifdef _MSC_VER +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name +#else +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) +#endif +#endif + +#ifndef STBI_SIMD_ALIGN +#define STBI_SIMD_ALIGN(type, name) type name +#endif + +#ifndef STBI_MAX_DIMENSIONS +#define STBI_MAX_DIMENSIONS (1 << 24) +#endif + +/////////////////////////////////////////////// +// +// stbi__context struct and start_xxx functions + +// stbi__context structure is our basic context used by all images, so it +// contains all the IO context, plus some basic image information +typedef struct +{ + stbi__uint32 img_x, img_y; + int img_n, img_out_n; + + stbi_io_callbacks io; + void *io_user_data; + + int read_from_callbacks; + int buflen; + stbi_uc buffer_start[128]; + int callback_already_read; + + stbi_uc *img_buffer, *img_buffer_end; + stbi_uc *img_buffer_original, *img_buffer_original_end; +} stbi__context; + + +static void stbi__refill_buffer(stbi__context *s); + +// initialize a memory-decode context +static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len) +{ + s->io.read = NULL; + s->read_from_callbacks = 0; + s->callback_already_read = 0; + s->img_buffer = s->img_buffer_original = (stbi_uc *) buffer; + s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *) buffer+len; +} + +// initialize a callback-based context +static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user) +{ + s->io = *c; + s->io_user_data = user; + s->buflen = sizeof(s->buffer_start); + s->read_from_callbacks = 1; + s->callback_already_read = 0; + s->img_buffer = s->img_buffer_original = s->buffer_start; + stbi__refill_buffer(s); + s->img_buffer_original_end = s->img_buffer_end; +} + +#ifndef STBI_NO_STDIO + +static int stbi__stdio_read(void *user, char *data, int size) +{ + return (int) fread(data,1,size,(FILE*) user); +} + +static void stbi__stdio_skip(void *user, int n) +{ + int ch; + fseek((FILE*) user, n, SEEK_CUR); + ch = fgetc((FILE*) user); /* have to read a byte to reset feof()'s flag */ + if (ch != EOF) { + ungetc(ch, (FILE *) user); /* push byte back onto stream if valid. */ + } +} + +static int stbi__stdio_eof(void *user) +{ + return feof((FILE*) user) || ferror((FILE *) user); +} + +static stbi_io_callbacks stbi__stdio_callbacks = +{ + stbi__stdio_read, + stbi__stdio_skip, + stbi__stdio_eof, +}; + +static void stbi__start_file(stbi__context *s, FILE *f) +{ + stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *) f); +} + +//static void stop_file(stbi__context *s) { } + +#endif // !STBI_NO_STDIO + +static void stbi__rewind(stbi__context *s) +{ + // conceptually rewind SHOULD rewind to the beginning of the stream, + // but we just rewind to the beginning of the initial buffer, because + // we only use it after doing 'test', which only ever looks at at most 92 bytes + s->img_buffer = s->img_buffer_original; + s->img_buffer_end = s->img_buffer_original_end; +} + +enum +{ + STBI_ORDER_RGB, + STBI_ORDER_BGR +}; + +typedef struct +{ + int bits_per_channel; + int num_channels; + int channel_order; +} stbi__result_info; + +#ifndef STBI_NO_JPEG +static int stbi__jpeg_test(stbi__context *s); +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNG +static int stbi__png_test(stbi__context *s); +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__png_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_BMP +static int stbi__bmp_test(stbi__context *s); +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_TGA +static int stbi__tga_test(stbi__context *s); +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s); +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc); +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__psd_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_HDR +static int stbi__hdr_test(stbi__context *s); +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_test(stbi__context *s); +static void *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_GIF +static int stbi__gif_test(stbi__context *s); +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNM +static int stbi__pnm_test(stbi__context *s); +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__pnm_is16(stbi__context *s); +#endif + +static +#ifdef STBI_THREAD_LOCAL +STBI_THREAD_LOCAL +#endif +const char *stbi__g_failure_reason; + +STBIDEF const char *stbi_failure_reason(void) +{ + return stbi__g_failure_reason; +} + +#ifndef STBI_NO_FAILURE_STRINGS +static int stbi__err(const char *str) +{ + stbi__g_failure_reason = str; + return 0; +} +#endif + +static void *stbi__malloc(size_t size) +{ + return STBI_MALLOC(size); +} + +// stb_image uses ints pervasively, including for offset calculations. +// therefore the largest decoded image size we can support with the +// current code, even on 64-bit targets, is INT_MAX. this is not a +// significant limitation for the intended use case. +// +// we do, however, need to make sure our size calculations don't +// overflow. hence a few helper functions for size calculations that +// multiply integers together, making sure that they're non-negative +// and no overflow occurs. + +// return 1 if the sum is valid, 0 on overflow. +// negative terms are considered invalid. +static int stbi__addsizes_valid(int a, int b) +{ + if (b < 0) return 0; + // now 0 <= b <= INT_MAX, hence also + // 0 <= INT_MAX - b <= INTMAX. + // And "a + b <= INT_MAX" (which might overflow) is the + // same as a <= INT_MAX - b (no overflow) + return a <= INT_MAX - b; +} + +// returns 1 if the product is valid, 0 on overflow. +// negative factors are considered invalid. +static int stbi__mul2sizes_valid(int a, int b) +{ + if (a < 0 || b < 0) return 0; + if (b == 0) return 1; // mul-by-0 is always safe + // portable way to check for no overflows in a*b + return a <= INT_MAX/b; +} + +#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) +// returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow +static int stbi__mad2sizes_valid(int a, int b, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add); +} +#endif + +// returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow +static int stbi__mad3sizes_valid(int a, int b, int c, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__addsizes_valid(a*b*c, add); +} + +// returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) || !defined(STBI_NO_PNM) +static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add); +} +#endif + +#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) +// mallocs with size overflow checking +static void *stbi__malloc_mad2(int a, int b, int add) +{ + if (!stbi__mad2sizes_valid(a, b, add)) return NULL; + return stbi__malloc(a*b + add); +} +#endif + +static void *stbi__malloc_mad3(int a, int b, int c, int add) +{ + if (!stbi__mad3sizes_valid(a, b, c, add)) return NULL; + return stbi__malloc(a*b*c + add); +} + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) || !defined(STBI_NO_PNM) +static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) +{ + if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL; + return stbi__malloc(a*b*c*d + add); +} +#endif + +// returns 1 if the sum of two signed ints is valid (between -2^31 and 2^31-1 inclusive), 0 on overflow. +static int stbi__addints_valid(int a, int b) +{ + if ((a >= 0) != (b >= 0)) return 1; // a and b have different signs, so no overflow + if (a < 0 && b < 0) return a >= INT_MIN - b; // same as a + b >= INT_MIN; INT_MIN - b cannot overflow since b < 0. + return a <= INT_MAX - b; +} + +// returns 1 if the product of two ints fits in a signed short, 0 on overflow. +static int stbi__mul2shorts_valid(int a, int b) +{ + if (b == 0 || b == -1) return 1; // multiplication by 0 is always 0; check for -1 so SHRT_MIN/b doesn't overflow + if ((a >= 0) == (b >= 0)) return a <= SHRT_MAX/b; // product is positive, so similar to mul2sizes_valid + if (b < 0) return a <= SHRT_MIN / b; // same as a * b >= SHRT_MIN + return a >= SHRT_MIN / b; +} + +// stbi__err - error +// stbi__errpf - error returning pointer to float +// stbi__errpuc - error returning pointer to unsigned char + +#ifdef STBI_NO_FAILURE_STRINGS + #define stbi__err(x,y) 0 +#elif defined(STBI_FAILURE_USERMSG) + #define stbi__err(x,y) stbi__err(y) +#else + #define stbi__err(x,y) stbi__err(x) +#endif + +#define stbi__errpf(x,y) ((float *)(size_t) (stbi__err(x,y)?NULL:NULL)) +#define stbi__errpuc(x,y) ((unsigned char *)(size_t) (stbi__err(x,y)?NULL:NULL)) + +STBIDEF void stbi_image_free(void *retval_from_stbi_load) +{ + STBI_FREE(retval_from_stbi_load); +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp); +#endif + +#ifndef STBI_NO_HDR +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp); +#endif + +static int stbi__vertically_flip_on_load_global = 0; + +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load_global = flag_true_if_should_flip; +} + +#ifndef STBI_THREAD_LOCAL +#define stbi__vertically_flip_on_load stbi__vertically_flip_on_load_global +#else +static STBI_THREAD_LOCAL int stbi__vertically_flip_on_load_local, stbi__vertically_flip_on_load_set; + +STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load_local = flag_true_if_should_flip; + stbi__vertically_flip_on_load_set = 1; +} + +#define stbi__vertically_flip_on_load (stbi__vertically_flip_on_load_set \ + ? stbi__vertically_flip_on_load_local \ + : stbi__vertically_flip_on_load_global) +#endif // STBI_THREAD_LOCAL + +static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields + ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed + ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order + ri->num_channels = 0; + + // test the formats with a very explicit header first (at least a FOURCC + // or distinctive magic number first) + #ifndef STBI_NO_PNG + if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_BMP + if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_GIF + if (stbi__gif_test(s)) return stbi__gif_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PSD + if (stbi__psd_test(s)) return stbi__psd_load(s,x,y,comp,req_comp, ri, bpc); + #else + STBI_NOTUSED(bpc); + #endif + #ifndef STBI_NO_PIC + if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp, ri); + #endif + + // then the formats that can end up attempting to load with just 1 or 2 + // bytes matching expectations; these are prone to false positives, so + // try them later + #ifndef STBI_NO_JPEG + if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PNM + if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp, ri); + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + float *hdr = stbi__hdr_load(s, x,y,comp,req_comp, ri); + return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp); + } + #endif + + #ifndef STBI_NO_TGA + // test tga last because it's a crappy test! + if (stbi__tga_test(s)) + return stbi__tga_load(s,x,y,comp,req_comp, ri); + #endif + + return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt"); +} + +static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi_uc *reduced; + + reduced = (stbi_uc *) stbi__malloc(img_len); + if (reduced == NULL) return stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling + + STBI_FREE(orig); + return reduced; +} + +static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi__uint16 *enlarged; + + enlarged = (stbi__uint16 *) stbi__malloc(img_len*2); + if (enlarged == NULL) return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff + + STBI_FREE(orig); + return enlarged; +} + +static void stbi__vertical_flip(void *image, int w, int h, int bytes_per_pixel) +{ + int row; + size_t bytes_per_row = (size_t)w * bytes_per_pixel; + stbi_uc temp[2048]; + stbi_uc *bytes = (stbi_uc *)image; + + for (row = 0; row < (h>>1); row++) { + stbi_uc *row0 = bytes + row*bytes_per_row; + stbi_uc *row1 = bytes + (h - row - 1)*bytes_per_row; + // swap row0 with row1 + size_t bytes_left = bytes_per_row; + while (bytes_left) { + size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp); + memcpy(temp, row0, bytes_copy); + memcpy(row0, row1, bytes_copy); + memcpy(row1, temp, bytes_copy); + row0 += bytes_copy; + row1 += bytes_copy; + bytes_left -= bytes_copy; + } + } +} + +#ifndef STBI_NO_GIF +static void stbi__vertical_flip_slices(void *image, int w, int h, int z, int bytes_per_pixel) +{ + int slice; + int slice_size = w * h * bytes_per_pixel; + + stbi_uc *bytes = (stbi_uc *)image; + for (slice = 0; slice < z; ++slice) { + stbi__vertical_flip(bytes, w, h, bytes_per_pixel); + bytes += slice_size; + } +} +#endif + +static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); + + if (result == NULL) + return NULL; + + // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. + STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); + + if (ri.bits_per_channel != 8) { + result = stbi__convert_16_to_8((stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 8; + } + + // @TODO: move stbi__convert_format to here + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc)); + } + + return (unsigned char *) result; +} + +static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); + + if (result == NULL) + return NULL; + + // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. + STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); + + if (ri.bits_per_channel != 16) { + result = stbi__convert_8_to_16((stbi_uc *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 16; + } + + // @TODO: move stbi__convert_format16 to here + // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi__uint16)); + } + + return (stbi__uint16 *) result; +} + +#if !defined(STBI_NO_HDR) && !defined(STBI_NO_LINEAR) +static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) +{ + if (stbi__vertically_flip_on_load && result != NULL) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(float)); + } +} +#endif + +#ifndef STBI_NO_STDIO + +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) +STBI_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, const char *str, int cbmb, wchar_t *widestr, int cchwide); +STBI_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigned long flags, const wchar_t *widestr, int cchwide, char *str, int cbmb, const char *defchar, int *used_default); +#endif + +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) +STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input) +{ + return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int) bufferlen, NULL, NULL); +} +#endif + +static FILE *stbi__fopen(char const *filename, char const *mode) +{ + FILE *f; +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) + wchar_t wMode[64]; + wchar_t wFilename[1024]; + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename)/sizeof(*wFilename))) + return 0; + + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode)/sizeof(*wMode))) + return 0; + +#if defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != _wfopen_s(&f, wFilename, wMode)) + f = 0; +#else + f = _wfopen(wFilename, wMode); +#endif + +#elif defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != fopen_s(&f, filename, mode)) + f=0; +#else + f = fopen(filename, mode); +#endif + return f; +} + + +STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + unsigned char *result; + if (!f) return stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__uint16 *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_16bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + stbi__uint16 *result; + if (!f) return (stbi_us *) stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file_16(f,x,y,comp,req_comp); + fclose(f); + return result; +} + + +#endif //!STBI_NO_STDIO + +STBIDEF stbi_us *stbi_load_16_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_mem(&s,buffer,len); + + result = (unsigned char*) stbi__load_gif_main(&s, delays, x, y, z, comp, req_comp); + if (stbi__vertically_flip_on_load) { + stbi__vertical_flip_slices( result, *x, *y, *z, *comp ); + } + + return result; +} +#endif + +#ifndef STBI_NO_LINEAR +static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *data; + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + stbi__result_info ri; + float *hdr_data = stbi__hdr_load(s,x,y,comp,req_comp, &ri); + if (hdr_data) + stbi__float_postprocess(hdr_data,x,y,comp,req_comp); + return hdr_data; + } + #endif + data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp); + if (data) + return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp); + return stbi__errpf("unknown image type", "Image not of any known type, or corrupt"); +} + +STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_STDIO +STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + float *result; + FILE *f = stbi__fopen(filename, "rb"); + if (!f) return stbi__errpf("can't fopen", "Unable to open file"); + result = stbi_loadf_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_file(&s,f); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} +#endif // !STBI_NO_STDIO + +#endif // !STBI_NO_LINEAR + +// these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is +// defined, for API simplicity; if STBI_NO_LINEAR is defined, it always +// reports false! + +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(buffer); + STBI_NOTUSED(len); + return 0; + #endif +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result=0; + if (f) { + result = stbi_is_hdr_from_file(f); + fclose(f); + } + return result; +} + +STBIDEF int stbi_is_hdr_from_file(FILE *f) +{ + #ifndef STBI_NO_HDR + long pos = ftell(f); + int res; + stbi__context s; + stbi__start_file(&s,f); + res = stbi__hdr_test(&s); + fseek(f, pos, SEEK_SET); + return res; + #else + STBI_NOTUSED(f); + return 0; + #endif +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(clbk); + STBI_NOTUSED(user); + return 0; + #endif +} + +#ifndef STBI_NO_LINEAR +static float stbi__l2h_gamma=2.2f, stbi__l2h_scale=1.0f; + +STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) { stbi__l2h_gamma = gamma; } +STBIDEF void stbi_ldr_to_hdr_scale(float scale) { stbi__l2h_scale = scale; } +#endif + +static float stbi__h2l_gamma_i=1.0f/2.2f, stbi__h2l_scale_i=1.0f; + +STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1/gamma; } +STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1/scale; } + + +////////////////////////////////////////////////////////////////////////////// +// +// Common code used by all image loaders +// + +enum +{ + STBI__SCAN_load=0, + STBI__SCAN_type, + STBI__SCAN_header +}; + +static void stbi__refill_buffer(stbi__context *s) +{ + int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen); + s->callback_already_read += (int) (s->img_buffer - s->img_buffer_original); + if (n == 0) { + // at end of file, treat same as if from memory, but need to handle case + // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file + s->read_from_callbacks = 0; + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start+1; + *s->img_buffer = 0; + } else { + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start + n; + } +} + +stbi_inline static stbi_uc stbi__get8(stbi__context *s) +{ + if (s->img_buffer < s->img_buffer_end) + return *s->img_buffer++; + if (s->read_from_callbacks) { + stbi__refill_buffer(s); + return *s->img_buffer++; + } + return 0; +} + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_HDR) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +stbi_inline static int stbi__at_eof(stbi__context *s) +{ + if (s->io.read) { + if (!(s->io.eof)(s->io_user_data)) return 0; + // if feof() is true, check if buffer = end + // special case: we've only got the special 0 character at the end + if (s->read_from_callbacks == 0) return 1; + } + + return s->img_buffer >= s->img_buffer_end; +} +#endif + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) +// nothing +#else +static void stbi__skip(stbi__context *s, int n) +{ + if (n == 0) return; // already there! + if (n < 0) { + s->img_buffer = s->img_buffer_end; + return; + } + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + s->img_buffer = s->img_buffer_end; + (s->io.skip)(s->io_user_data, n - blen); + return; + } + } + s->img_buffer += n; +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_TGA) && defined(STBI_NO_HDR) && defined(STBI_NO_PNM) +// nothing +#else +static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) +{ + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + int res, count; + + memcpy(buffer, s->img_buffer, blen); + + count = (s->io.read)(s->io_user_data, (char*) buffer + blen, n - blen); + res = (count == (n-blen)); + s->img_buffer = s->img_buffer_end; + return res; + } + } + + if (s->img_buffer+n <= s->img_buffer_end) { + memcpy(buffer, s->img_buffer, n); + s->img_buffer += n; + return 1; + } else + return 0; +} +#endif + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC) +// nothing +#else +static int stbi__get16be(stbi__context *s) +{ + int z = stbi__get8(s); + return (z << 8) + stbi__get8(s); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC) +// nothing +#else +static stbi__uint32 stbi__get32be(stbi__context *s) +{ + stbi__uint32 z = stbi__get16be(s); + return (z << 16) + stbi__get16be(s); +} +#endif + +#if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) +// nothing +#else +static int stbi__get16le(stbi__context *s) +{ + int z = stbi__get8(s); + return z + (stbi__get8(s) << 8); +} +#endif + +#ifndef STBI_NO_BMP +static stbi__uint32 stbi__get32le(stbi__context *s) +{ + stbi__uint32 z = stbi__get16le(s); + z += (stbi__uint32)stbi__get16le(s) << 16; + return z; +} +#endif + +#define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +////////////////////////////////////////////////////////////////////////////// +// +// generic converter from built-in img_n to req_comp +// individual types do this automatically as much as possible (e.g. jpeg +// does all cases internally since it needs to colorspace convert anyway, +// and it never has alpha, so very few cases ). png can automatically +// interleave an alpha=255 channel, but falls back to this for other cases +// +// assume data buffer is malloced, so malloc a new one and free that one +// only failure mode is malloc failing + +static stbi_uc stbi__compute_y(int r, int g, int b) +{ + return (stbi_uc) (((r*77) + (g*150) + (29*b)) >> 8); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + unsigned char *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (unsigned char *) stbi__malloc_mad3(req_comp, x, y, 0); + if (good == NULL) { + STBI_FREE(data); + return stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + unsigned char *src = data + j * x * img_n ; + unsigned char *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=255; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=255; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=255; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = 255; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; + default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return stbi__errpuc("unsupported", "Unsupported format conversion"); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) +// nothing +#else +static stbi__uint16 stbi__compute_y_16(int r, int g, int b) +{ + return (stbi__uint16) (((r*77) + (g*150) + (29*b)) >> 8); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) +// nothing +#else +static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + stbi__uint16 *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (stbi__uint16 *) stbi__malloc(req_comp * x * y * 2); + if (good == NULL) { + STBI_FREE(data); + return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + stbi__uint16 *src = data + j * x * img_n ; + stbi__uint16 *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=0xffff; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=0xffff; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=0xffff; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = 0xffff; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; + default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return (stbi__uint16*) stbi__errpuc("unsupported", "Unsupported format conversion"); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} +#endif + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) +{ + int i,k,n; + float *output; + if (!data) return NULL; + output = (float *) stbi__malloc_mad4(x, y, comp, sizeof(float), 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + output[i*comp + k] = (float) (pow(data[i*comp+k]/255.0f, stbi__l2h_gamma) * stbi__l2h_scale); + } + } + if (n < comp) { + for (i=0; i < x*y; ++i) { + output[i*comp + n] = data[i*comp + n]/255.0f; + } + } + STBI_FREE(data); + return output; +} +#endif + +#ifndef STBI_NO_HDR +#define stbi__float2int(x) ((int) (x)) +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp) +{ + int i,k,n; + stbi_uc *output; + if (!data) return NULL; + output = (stbi_uc *) stbi__malloc_mad3(x, y, comp, 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + float z = (float) pow(data[i*comp+k]*stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + if (k < comp) { + float z = data[i*comp+k] * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + } + STBI_FREE(data); + return output; +} +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// "baseline" JPEG/JFIF decoder +// +// simple implementation +// - doesn't support delayed output of y-dimension +// - simple interface (only one output format: 8-bit interleaved RGB) +// - doesn't try to recover corrupt jpegs +// - doesn't allow partial loading, loading multiple at once +// - still fast on x86 (copying globals into locals doesn't help x86) +// - allocates lots of intermediate memory (full size of all components) +// - non-interleaved case requires this anyway +// - allows good upsampling (see next) +// high-quality +// - upsampled channels are bilinearly interpolated, even across blocks +// - quality integer IDCT derived from IJG's 'slow' +// performance +// - fast huffman; reasonable integer IDCT +// - some SIMD kernels for common paths on targets with SSE2/NEON +// - uses a lot of intermediate memory, could cache poorly + +#ifndef STBI_NO_JPEG + +// huffman decoding acceleration +#define FAST_BITS 9 // larger handles more cases; smaller stomps less cache + +typedef struct +{ + stbi_uc fast[1 << FAST_BITS]; + // weirdly, repacking this into AoS is a 10% speed loss, instead of a win + stbi__uint16 code[256]; + stbi_uc values[256]; + stbi_uc size[257]; + unsigned int maxcode[18]; + int delta[17]; // old 'firstsymbol' - old 'firstcode' +} stbi__huffman; + +typedef struct +{ + stbi__context *s; + stbi__huffman huff_dc[4]; + stbi__huffman huff_ac[4]; + stbi__uint16 dequant[4][64]; + stbi__int16 fast_ac[4][1 << FAST_BITS]; + +// sizes for components, interleaved MCUs + int img_h_max, img_v_max; + int img_mcu_x, img_mcu_y; + int img_mcu_w, img_mcu_h; + +// definition of jpeg image component + struct + { + int id; + int h,v; + int tq; + int hd,ha; + int dc_pred; + + int x,y,w2,h2; + stbi_uc *data; + void *raw_data, *raw_coeff; + stbi_uc *linebuf; + short *coeff; // progressive only + int coeff_w, coeff_h; // number of 8x8 coefficient blocks + } img_comp[4]; + + stbi__uint32 code_buffer; // jpeg entropy-coded buffer + int code_bits; // number of valid bits + unsigned char marker; // marker seen while filling entropy buffer + int nomore; // flag if we saw a marker so must stop + + int progressive; + int spec_start; + int spec_end; + int succ_high; + int succ_low; + int eob_run; + int jfif; + int app14_color_transform; // Adobe APP14 tag + int rgb; + + int scan_n, order[4]; + int restart_interval, todo; + +// kernels + void (*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]); + void (*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step); + stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs); +} stbi__jpeg; + +static int stbi__build_huffman(stbi__huffman *h, int *count) +{ + int i,j,k=0; + unsigned int code; + // build size list for each symbol (from JPEG spec) + for (i=0; i < 16; ++i) { + for (j=0; j < count[i]; ++j) { + h->size[k++] = (stbi_uc) (i+1); + if(k >= 257) return stbi__err("bad size list","Corrupt JPEG"); + } + } + h->size[k] = 0; + + // compute actual symbols (from jpeg spec) + code = 0; + k = 0; + for(j=1; j <= 16; ++j) { + // compute delta to add to code to compute symbol id + h->delta[j] = k - code; + if (h->size[k] == j) { + while (h->size[k] == j) + h->code[k++] = (stbi__uint16) (code++); + if (code-1 >= (1u << j)) return stbi__err("bad code lengths","Corrupt JPEG"); + } + // compute largest code + 1 for this size, preshifted as needed later + h->maxcode[j] = code << (16-j); + code <<= 1; + } + h->maxcode[j] = 0xffffffff; + + // build non-spec acceleration table; 255 is flag for not-accelerated + memset(h->fast, 255, 1 << FAST_BITS); + for (i=0; i < k; ++i) { + int s = h->size[i]; + if (s <= FAST_BITS) { + int c = h->code[i] << (FAST_BITS-s); + int m = 1 << (FAST_BITS-s); + for (j=0; j < m; ++j) { + h->fast[c+j] = (stbi_uc) i; + } + } + } + return 1; +} + +// build a table that decodes both magnitude and value of small ACs in +// one go. +static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h) +{ + int i; + for (i=0; i < (1 << FAST_BITS); ++i) { + stbi_uc fast = h->fast[i]; + fast_ac[i] = 0; + if (fast < 255) { + int rs = h->values[fast]; + int run = (rs >> 4) & 15; + int magbits = rs & 15; + int len = h->size[fast]; + + if (magbits && len + magbits <= FAST_BITS) { + // magnitude code followed by receive_extend code + int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits); + int m = 1 << (magbits - 1); + if (k < m) k += (~0U << magbits) + 1; + // if the result is small enough, we can fit it in fast_ac table + if (k >= -128 && k <= 127) + fast_ac[i] = (stbi__int16) ((k * 256) + (run * 16) + (len + magbits)); + } + } + } +} + +static void stbi__grow_buffer_unsafe(stbi__jpeg *j) +{ + do { + unsigned int b = j->nomore ? 0 : stbi__get8(j->s); + if (b == 0xff) { + int c = stbi__get8(j->s); + while (c == 0xff) c = stbi__get8(j->s); // consume fill bytes + if (c != 0) { + j->marker = (unsigned char) c; + j->nomore = 1; + return; + } + } + j->code_buffer |= b << (24 - j->code_bits); + j->code_bits += 8; + } while (j->code_bits <= 24); +} + +// (1 << n) - 1 +static const stbi__uint32 stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535}; + +// decode a jpeg huffman value from the bitstream +stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) +{ + unsigned int temp; + int c,k; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + // look at the top FAST_BITS and determine what symbol ID it is, + // if the code is <= FAST_BITS + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + k = h->fast[c]; + if (k < 255) { + int s = h->size[k]; + if (s > j->code_bits) + return -1; + j->code_buffer <<= s; + j->code_bits -= s; + return h->values[k]; + } + + // naive test is to shift the code_buffer down so k bits are + // valid, then test against maxcode. To speed this up, we've + // preshifted maxcode left so that it has (16-k) 0s at the + // end; in other words, regardless of the number of bits, it + // wants to be compared against something shifted to have 16; + // that way we don't need to shift inside the loop. + temp = j->code_buffer >> 16; + for (k=FAST_BITS+1 ; ; ++k) + if (temp < h->maxcode[k]) + break; + if (k == 17) { + // error! code not found + j->code_bits -= 16; + return -1; + } + + if (k > j->code_bits) + return -1; + + // convert the huffman code to the symbol id + c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; + if(c < 0 || c >= 256) // symbol id out of bounds! + return -1; + STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); + + // convert the id to a symbol + j->code_bits -= k; + j->code_buffer <<= k; + return h->values[c]; +} + +// bias[n] = (-1<code_bits < n) stbi__grow_buffer_unsafe(j); + if (j->code_bits < n) return 0; // ran out of bits from stream, return 0s intead of continuing + + sgn = j->code_buffer >> 31; // sign bit always in MSB; 0 if MSB clear (positive), 1 if MSB set (negative) + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k + (stbi__jbias[n] & (sgn - 1)); +} + +// get some unsigned bits +stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n) +{ + unsigned int k; + if (j->code_bits < n) stbi__grow_buffer_unsafe(j); + if (j->code_bits < n) return 0; // ran out of bits from stream, return 0s intead of continuing + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k; +} + +stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) +{ + unsigned int k; + if (j->code_bits < 1) stbi__grow_buffer_unsafe(j); + if (j->code_bits < 1) return 0; // ran out of bits from stream, return 0s intead of continuing + k = j->code_buffer; + j->code_buffer <<= 1; + --j->code_bits; + return k & 0x80000000; +} + +// given a value that's at position X in the zigzag stream, +// where does it appear in the 8x8 matrix coded as row-major? +static const stbi_uc stbi__jpeg_dezigzag[64+15] = +{ + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63, + // let corrupt input sample past end + 63, 63, 63, 63, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63 +}; + +// decode one 64-entry block-- +static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi__uint16 *dequant) +{ + int diff,dc,k; + int t; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0 || t > 15) return stbi__err("bad huffman code","Corrupt JPEG"); + + // 0 all the ac values now so we can do it 32-bits at a time + memset(data,0,64*sizeof(data[0])); + + diff = t ? stbi__extend_receive(j, t) : 0; + if (!stbi__addints_valid(j->img_comp[b].dc_pred, diff)) return stbi__err("bad delta","Corrupt JPEG"); + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + if (!stbi__mul2shorts_valid(dc, dequant[0])) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + data[0] = (short) (dc * dequant[0]); + + // decode AC components, see JPEG spec + k = 1; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + if (s > j->code_bits) return stbi__err("bad huffman code", "Combined length longer than code bits available"); + j->code_buffer <<= s; + j->code_bits -= s; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) * dequant[zig]); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (rs != 0xf0) break; // end block + k += 16; + } else { + k += r; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) * dequant[zig]); + } + } + } while (k < 64); + return 1; +} + +static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b) +{ + int diff,dc; + int t; + if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + if (j->succ_high == 0) { + // first scan for DC coefficient, must be first + memset(data,0,64*sizeof(data[0])); // 0 all the ac values now + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0 || t > 15) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + diff = t ? stbi__extend_receive(j, t) : 0; + + if (!stbi__addints_valid(j->img_comp[b].dc_pred, diff)) return stbi__err("bad delta", "Corrupt JPEG"); + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + if (!stbi__mul2shorts_valid(dc, 1 << j->succ_low)) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + data[0] = (short) (dc * (1 << j->succ_low)); + } else { + // refinement scan for DC coefficient + if (stbi__jpeg_get_bit(j)) + data[0] += (short) (1 << j->succ_low); + } + return 1; +} + +// @OPTIMIZE: store non-zigzagged during the decode passes, +// and only de-zigzag when dequantizing +static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac) +{ + int k; + if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->succ_high == 0) { + int shift = j->succ_low; + + if (j->eob_run) { + --j->eob_run; + return 1; + } + + k = j->spec_start; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + if (s > j->code_bits) return stbi__err("bad huffman code", "Combined length longer than code bits available"); + j->code_buffer <<= s; + j->code_bits -= s; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) * (1 << shift)); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r); + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + --j->eob_run; + break; + } + k += 16; + } else { + k += r; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) * (1 << shift)); + } + } + } while (k <= j->spec_end); + } else { + // refinement scan for these AC coefficients + + short bit = (short) (1 << j->succ_low); + + if (j->eob_run) { + --j->eob_run; + for (k = j->spec_start; k <= j->spec_end; ++k) { + short *p = &data[stbi__jpeg_dezigzag[k]]; + if (*p != 0) + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } + } else { + k = j->spec_start; + do { + int r,s; + int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r) - 1; + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + r = 64; // force end of block + } else { + // r=15 s=0 should write 16 0s, so we just do + // a run of 15 0s and then write s (which is 0), + // so we don't have to do anything special here + } + } else { + if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG"); + // sign bit + if (stbi__jpeg_get_bit(j)) + s = bit; + else + s = -bit; + } + + // advance by r + while (k <= j->spec_end) { + short *p = &data[stbi__jpeg_dezigzag[k++]]; + if (*p != 0) { + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } else { + if (r == 0) { + *p = (short) s; + break; + } + --r; + } + } + } while (k <= j->spec_end); + } + } + return 1; +} + +// take a -128..127 value and stbi__clamp it and convert to 0..255 +stbi_inline static stbi_uc stbi__clamp(int x) +{ + // trick to use a single test to catch both cases + if ((unsigned int) x > 255) { + if (x < 0) return 0; + if (x > 255) return 255; + } + return (stbi_uc) x; +} + +#define stbi__f2f(x) ((int) (((x) * 4096 + 0.5))) +#define stbi__fsh(x) ((x) * 4096) + +// derived from jidctint -- DCT_ISLOW +#define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \ + int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \ + p2 = s2; \ + p3 = s6; \ + p1 = (p2+p3) * stbi__f2f(0.5411961f); \ + t2 = p1 + p3*stbi__f2f(-1.847759065f); \ + t3 = p1 + p2*stbi__f2f( 0.765366865f); \ + p2 = s0; \ + p3 = s4; \ + t0 = stbi__fsh(p2+p3); \ + t1 = stbi__fsh(p2-p3); \ + x0 = t0+t3; \ + x3 = t0-t3; \ + x1 = t1+t2; \ + x2 = t1-t2; \ + t0 = s7; \ + t1 = s5; \ + t2 = s3; \ + t3 = s1; \ + p3 = t0+t2; \ + p4 = t1+t3; \ + p1 = t0+t3; \ + p2 = t1+t2; \ + p5 = (p3+p4)*stbi__f2f( 1.175875602f); \ + t0 = t0*stbi__f2f( 0.298631336f); \ + t1 = t1*stbi__f2f( 2.053119869f); \ + t2 = t2*stbi__f2f( 3.072711026f); \ + t3 = t3*stbi__f2f( 1.501321110f); \ + p1 = p5 + p1*stbi__f2f(-0.899976223f); \ + p2 = p5 + p2*stbi__f2f(-2.562915447f); \ + p3 = p3*stbi__f2f(-1.961570560f); \ + p4 = p4*stbi__f2f(-0.390180644f); \ + t3 += p1+p4; \ + t2 += p2+p3; \ + t1 += p2+p4; \ + t0 += p1+p3; + +static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64]) +{ + int i,val[64],*v=val; + stbi_uc *o; + short *d = data; + + // columns + for (i=0; i < 8; ++i,++d, ++v) { + // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing + if (d[ 8]==0 && d[16]==0 && d[24]==0 && d[32]==0 + && d[40]==0 && d[48]==0 && d[56]==0) { + // no shortcut 0 seconds + // (1|2|3|4|5|6|7)==0 0 seconds + // all separate -0.047 seconds + // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds + int dcterm = d[0]*4; + v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm; + } else { + STBI__IDCT_1D(d[ 0],d[ 8],d[16],d[24],d[32],d[40],d[48],d[56]) + // constants scaled things up by 1<<12; let's bring them back + // down, but keep 2 extra bits of precision + x0 += 512; x1 += 512; x2 += 512; x3 += 512; + v[ 0] = (x0+t3) >> 10; + v[56] = (x0-t3) >> 10; + v[ 8] = (x1+t2) >> 10; + v[48] = (x1-t2) >> 10; + v[16] = (x2+t1) >> 10; + v[40] = (x2-t1) >> 10; + v[24] = (x3+t0) >> 10; + v[32] = (x3-t0) >> 10; + } + } + + for (i=0, v=val, o=out; i < 8; ++i,v+=8,o+=out_stride) { + // no fast case since the first 1D IDCT spread components out + STBI__IDCT_1D(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7]) + // constants scaled things up by 1<<12, plus we had 1<<2 from first + // loop, plus horizontal and vertical each scale by sqrt(8) so together + // we've got an extra 1<<3, so 1<<17 total we need to remove. + // so we want to round that, which means adding 0.5 * 1<<17, + // aka 65536. Also, we'll end up with -128 to 127 that we want + // to encode as 0..255 by adding 128, so we'll add that before the shift + x0 += 65536 + (128<<17); + x1 += 65536 + (128<<17); + x2 += 65536 + (128<<17); + x3 += 65536 + (128<<17); + // tried computing the shifts into temps, or'ing the temps to see + // if any were out of range, but that was slower + o[0] = stbi__clamp((x0+t3) >> 17); + o[7] = stbi__clamp((x0-t3) >> 17); + o[1] = stbi__clamp((x1+t2) >> 17); + o[6] = stbi__clamp((x1-t2) >> 17); + o[2] = stbi__clamp((x2+t1) >> 17); + o[5] = stbi__clamp((x2-t1) >> 17); + o[3] = stbi__clamp((x3+t0) >> 17); + o[4] = stbi__clamp((x3-t0) >> 17); + } +} + +#ifdef STBI_SSE2 +// sse2 integer IDCT. not the fastest possible implementation but it +// produces bit-identical results to the generic C version so it's +// fully "transparent". +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + // This is constructed to match our regular (generic) integer IDCT exactly. + __m128i row0, row1, row2, row3, row4, row5, row6, row7; + __m128i tmp; + + // dot product constant: even elems=x, odd elems=y + #define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y)) + + // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit) + // out(1) = c1[even]*x + c1[odd]*y + #define dct_rot(out0,out1, x,y,c0,c1) \ + __m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \ + __m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \ + __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \ + __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \ + __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \ + __m128i out1##_h = _mm_madd_epi16(c0##hi, c1) + + // out = in << 12 (in 16-bit, out 32-bit) + #define dct_widen(out, in) \ + __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \ + __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4) + + // wide add + #define dct_wadd(out, a, b) \ + __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_add_epi32(a##_h, b##_h) + + // wide sub + #define dct_wsub(out, a, b) \ + __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_sub_epi32(a##_h, b##_h) + + // butterfly a/b, add bias, then shift by "s" and pack + #define dct_bfly32o(out0, out1, a,b,bias,s) \ + { \ + __m128i abiased_l = _mm_add_epi32(a##_l, bias); \ + __m128i abiased_h = _mm_add_epi32(a##_h, bias); \ + dct_wadd(sum, abiased, b); \ + dct_wsub(dif, abiased, b); \ + out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \ + out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \ + } + + // 8-bit interleave step (for transposes) + #define dct_interleave8(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi8(a, b); \ + b = _mm_unpackhi_epi8(tmp, b) + + // 16-bit interleave step (for transposes) + #define dct_interleave16(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi16(a, b); \ + b = _mm_unpackhi_epi16(tmp, b) + + #define dct_pass(bias,shift) \ + { \ + /* even part */ \ + dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \ + __m128i sum04 = _mm_add_epi16(row0, row4); \ + __m128i dif04 = _mm_sub_epi16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \ + dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \ + __m128i sum17 = _mm_add_epi16(row1, row7); \ + __m128i sum35 = _mm_add_epi16(row3, row5); \ + dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \ + dct_wadd(x4, y0o, y4o); \ + dct_wadd(x5, y1o, y5o); \ + dct_wadd(x6, y2o, y5o); \ + dct_wadd(x7, y3o, y4o); \ + dct_bfly32o(row0,row7, x0,x7,bias,shift); \ + dct_bfly32o(row1,row6, x1,x6,bias,shift); \ + dct_bfly32o(row2,row5, x2,x5,bias,shift); \ + dct_bfly32o(row3,row4, x3,x4,bias,shift); \ + } + + __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f)); + __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f( 0.765366865f), stbi__f2f(0.5411961f)); + __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f)); + __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f)); + __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f( 0.298631336f), stbi__f2f(-1.961570560f)); + __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f( 3.072711026f)); + __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f( 2.053119869f), stbi__f2f(-0.390180644f)); + __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f( 1.501321110f)); + + // rounding biases in column/row passes, see stbi__idct_block for explanation. + __m128i bias_0 = _mm_set1_epi32(512); + __m128i bias_1 = _mm_set1_epi32(65536 + (128<<17)); + + // load + row0 = _mm_load_si128((const __m128i *) (data + 0*8)); + row1 = _mm_load_si128((const __m128i *) (data + 1*8)); + row2 = _mm_load_si128((const __m128i *) (data + 2*8)); + row3 = _mm_load_si128((const __m128i *) (data + 3*8)); + row4 = _mm_load_si128((const __m128i *) (data + 4*8)); + row5 = _mm_load_si128((const __m128i *) (data + 5*8)); + row6 = _mm_load_si128((const __m128i *) (data + 6*8)); + row7 = _mm_load_si128((const __m128i *) (data + 7*8)); + + // column pass + dct_pass(bias_0, 10); + + { + // 16bit 8x8 transpose pass 1 + dct_interleave16(row0, row4); + dct_interleave16(row1, row5); + dct_interleave16(row2, row6); + dct_interleave16(row3, row7); + + // transpose pass 2 + dct_interleave16(row0, row2); + dct_interleave16(row1, row3); + dct_interleave16(row4, row6); + dct_interleave16(row5, row7); + + // transpose pass 3 + dct_interleave16(row0, row1); + dct_interleave16(row2, row3); + dct_interleave16(row4, row5); + dct_interleave16(row6, row7); + } + + // row pass + dct_pass(bias_1, 17); + + { + // pack + __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7 + __m128i p1 = _mm_packus_epi16(row2, row3); + __m128i p2 = _mm_packus_epi16(row4, row5); + __m128i p3 = _mm_packus_epi16(row6, row7); + + // 8bit 8x8 transpose pass 1 + dct_interleave8(p0, p2); // a0e0a1e1... + dct_interleave8(p1, p3); // c0g0c1g1... + + // transpose pass 2 + dct_interleave8(p0, p1); // a0c0e0g0... + dct_interleave8(p2, p3); // b0d0f0h0... + + // transpose pass 3 + dct_interleave8(p0, p2); // a0b0c0d0... + dct_interleave8(p1, p3); // a4b4c4d4... + + // store + _mm_storel_epi64((__m128i *) out, p0); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p2); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p1); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p3); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e)); + } + +#undef dct_const +#undef dct_rot +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_interleave8 +#undef dct_interleave16 +#undef dct_pass +} + +#endif // STBI_SSE2 + +#ifdef STBI_NEON + +// NEON integer IDCT. should produce bit-identical +// results to the generic C version. +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + int16x8_t row0, row1, row2, row3, row4, row5, row6, row7; + + int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f)); + int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f)); + int16x4_t rot0_2 = vdup_n_s16(stbi__f2f( 0.765366865f)); + int16x4_t rot1_0 = vdup_n_s16(stbi__f2f( 1.175875602f)); + int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f)); + int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f)); + int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f)); + int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f)); + int16x4_t rot3_0 = vdup_n_s16(stbi__f2f( 0.298631336f)); + int16x4_t rot3_1 = vdup_n_s16(stbi__f2f( 2.053119869f)); + int16x4_t rot3_2 = vdup_n_s16(stbi__f2f( 3.072711026f)); + int16x4_t rot3_3 = vdup_n_s16(stbi__f2f( 1.501321110f)); + +#define dct_long_mul(out, inq, coeff) \ + int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff) + +#define dct_long_mac(out, acc, inq, coeff) \ + int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff) + +#define dct_widen(out, inq) \ + int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \ + int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12) + +// wide add +#define dct_wadd(out, a, b) \ + int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vaddq_s32(a##_h, b##_h) + +// wide sub +#define dct_wsub(out, a, b) \ + int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vsubq_s32(a##_h, b##_h) + +// butterfly a/b, then shift using "shiftop" by "s" and pack +#define dct_bfly32o(out0,out1, a,b,shiftop,s) \ + { \ + dct_wadd(sum, a, b); \ + dct_wsub(dif, a, b); \ + out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \ + out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \ + } + +#define dct_pass(shiftop, shift) \ + { \ + /* even part */ \ + int16x8_t sum26 = vaddq_s16(row2, row6); \ + dct_long_mul(p1e, sum26, rot0_0); \ + dct_long_mac(t2e, p1e, row6, rot0_1); \ + dct_long_mac(t3e, p1e, row2, rot0_2); \ + int16x8_t sum04 = vaddq_s16(row0, row4); \ + int16x8_t dif04 = vsubq_s16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + int16x8_t sum15 = vaddq_s16(row1, row5); \ + int16x8_t sum17 = vaddq_s16(row1, row7); \ + int16x8_t sum35 = vaddq_s16(row3, row5); \ + int16x8_t sum37 = vaddq_s16(row3, row7); \ + int16x8_t sumodd = vaddq_s16(sum17, sum35); \ + dct_long_mul(p5o, sumodd, rot1_0); \ + dct_long_mac(p1o, p5o, sum17, rot1_1); \ + dct_long_mac(p2o, p5o, sum35, rot1_2); \ + dct_long_mul(p3o, sum37, rot2_0); \ + dct_long_mul(p4o, sum15, rot2_1); \ + dct_wadd(sump13o, p1o, p3o); \ + dct_wadd(sump24o, p2o, p4o); \ + dct_wadd(sump23o, p2o, p3o); \ + dct_wadd(sump14o, p1o, p4o); \ + dct_long_mac(x4, sump13o, row7, rot3_0); \ + dct_long_mac(x5, sump24o, row5, rot3_1); \ + dct_long_mac(x6, sump23o, row3, rot3_2); \ + dct_long_mac(x7, sump14o, row1, rot3_3); \ + dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \ + dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \ + dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \ + dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \ + } + + // load + row0 = vld1q_s16(data + 0*8); + row1 = vld1q_s16(data + 1*8); + row2 = vld1q_s16(data + 2*8); + row3 = vld1q_s16(data + 3*8); + row4 = vld1q_s16(data + 4*8); + row5 = vld1q_s16(data + 5*8); + row6 = vld1q_s16(data + 6*8); + row7 = vld1q_s16(data + 7*8); + + // add DC bias + row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0)); + + // column pass + dct_pass(vrshrn_n_s32, 10); + + // 16bit 8x8 transpose + { +// these three map to a single VTRN.16, VTRN.32, and VSWP, respectively. +// whether compilers actually get this is another story, sadly. +#define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); } +#define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); } + + // pass 1 + dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6 + dct_trn16(row2, row3); + dct_trn16(row4, row5); + dct_trn16(row6, row7); + + // pass 2 + dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4 + dct_trn32(row1, row3); + dct_trn32(row4, row6); + dct_trn32(row5, row7); + + // pass 3 + dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0 + dct_trn64(row1, row5); + dct_trn64(row2, row6); + dct_trn64(row3, row7); + +#undef dct_trn16 +#undef dct_trn32 +#undef dct_trn64 + } + + // row pass + // vrshrn_n_s32 only supports shifts up to 16, we need + // 17. so do a non-rounding shift of 16 first then follow + // up with a rounding shift by 1. + dct_pass(vshrn_n_s32, 16); + + { + // pack and round + uint8x8_t p0 = vqrshrun_n_s16(row0, 1); + uint8x8_t p1 = vqrshrun_n_s16(row1, 1); + uint8x8_t p2 = vqrshrun_n_s16(row2, 1); + uint8x8_t p3 = vqrshrun_n_s16(row3, 1); + uint8x8_t p4 = vqrshrun_n_s16(row4, 1); + uint8x8_t p5 = vqrshrun_n_s16(row5, 1); + uint8x8_t p6 = vqrshrun_n_s16(row6, 1); + uint8x8_t p7 = vqrshrun_n_s16(row7, 1); + + // again, these can translate into one instruction, but often don't. +#define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); } +#define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); } + + // sadly can't use interleaved stores here since we only write + // 8 bytes to each scan line! + + // 8x8 8-bit transpose pass 1 + dct_trn8_8(p0, p1); + dct_trn8_8(p2, p3); + dct_trn8_8(p4, p5); + dct_trn8_8(p6, p7); + + // pass 2 + dct_trn8_16(p0, p2); + dct_trn8_16(p1, p3); + dct_trn8_16(p4, p6); + dct_trn8_16(p5, p7); + + // pass 3 + dct_trn8_32(p0, p4); + dct_trn8_32(p1, p5); + dct_trn8_32(p2, p6); + dct_trn8_32(p3, p7); + + // store + vst1_u8(out, p0); out += out_stride; + vst1_u8(out, p1); out += out_stride; + vst1_u8(out, p2); out += out_stride; + vst1_u8(out, p3); out += out_stride; + vst1_u8(out, p4); out += out_stride; + vst1_u8(out, p5); out += out_stride; + vst1_u8(out, p6); out += out_stride; + vst1_u8(out, p7); + +#undef dct_trn8_8 +#undef dct_trn8_16 +#undef dct_trn8_32 + } + +#undef dct_long_mul +#undef dct_long_mac +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_pass +} + +#endif // STBI_NEON + +#define STBI__MARKER_none 0xff +// if there's a pending marker from the entropy stream, return that +// otherwise, fetch from the stream and get a marker. if there's no +// marker, return 0xff, which is never a valid marker value +static stbi_uc stbi__get_marker(stbi__jpeg *j) +{ + stbi_uc x; + if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; } + x = stbi__get8(j->s); + if (x != 0xff) return STBI__MARKER_none; + while (x == 0xff) + x = stbi__get8(j->s); // consume repeated 0xff fill bytes + return x; +} + +// in each scan, we'll have scan_n components, and the order +// of the components is specified by order[] +#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7) + +// after a restart interval, stbi__jpeg_reset the entropy decoder and +// the dc prediction +static void stbi__jpeg_reset(stbi__jpeg *j) +{ + j->code_bits = 0; + j->code_buffer = 0; + j->nomore = 0; + j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = j->img_comp[3].dc_pred = 0; + j->marker = STBI__MARKER_none; + j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff; + j->eob_run = 0; + // no more than 1<<31 MCUs if no restart_interal? that's plenty safe, + // since we don't even allow 1<<30 pixels +} + +static int stbi__parse_entropy_coded_data(stbi__jpeg *z) +{ + stbi__jpeg_reset(z); + if (!z->progressive) { + if (z->scan_n == 1) { + int i,j; + STBI_SIMD_ALIGN(short, data[64]); + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + // if it's NOT a restart, then just bail, so we get corrupt data + // rather than no data + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + STBI_SIMD_ALIGN(short, data[64]); + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x)*8; + int y2 = (j*z->img_comp[n].v + y)*8; + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data); + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } else { + if (z->scan_n == 1) { + int i,j; + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + if (z->spec_start == 0) { + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } else { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha])) + return 0; + } + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x); + int y2 = (j*z->img_comp[n].v + y); + short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w); + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } +} + +static void stbi__jpeg_dequantize(short *data, stbi__uint16 *dequant) +{ + int i; + for (i=0; i < 64; ++i) + data[i] *= dequant[i]; +} + +static void stbi__jpeg_finish(stbi__jpeg *z) +{ + if (z->progressive) { + // dequantize and idct the data + int i,j,n; + for (n=0; n < z->s->img_n; ++n) { + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]); + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + } + } + } + } +} + +static int stbi__process_marker(stbi__jpeg *z, int m) +{ + int L; + switch (m) { + case STBI__MARKER_none: // no marker found + return stbi__err("expected marker","Corrupt JPEG"); + + case 0xDD: // DRI - specify restart interval + if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len","Corrupt JPEG"); + z->restart_interval = stbi__get16be(z->s); + return 1; + + case 0xDB: // DQT - define quantization table + L = stbi__get16be(z->s)-2; + while (L > 0) { + int q = stbi__get8(z->s); + int p = q >> 4, sixteen = (p != 0); + int t = q & 15,i; + if (p != 0 && p != 1) return stbi__err("bad DQT type","Corrupt JPEG"); + if (t > 3) return stbi__err("bad DQT table","Corrupt JPEG"); + + for (i=0; i < 64; ++i) + z->dequant[t][stbi__jpeg_dezigzag[i]] = (stbi__uint16)(sixteen ? stbi__get16be(z->s) : stbi__get8(z->s)); + L -= (sixteen ? 129 : 65); + } + return L==0; + + case 0xC4: // DHT - define huffman table + L = stbi__get16be(z->s)-2; + while (L > 0) { + stbi_uc *v; + int sizes[16],i,n=0; + int q = stbi__get8(z->s); + int tc = q >> 4; + int th = q & 15; + if (tc > 1 || th > 3) return stbi__err("bad DHT header","Corrupt JPEG"); + for (i=0; i < 16; ++i) { + sizes[i] = stbi__get8(z->s); + n += sizes[i]; + } + if(n > 256) return stbi__err("bad DHT header","Corrupt JPEG"); // Loop over i < n would write past end of values! + L -= 17; + if (tc == 0) { + if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0; + v = z->huff_dc[th].values; + } else { + if (!stbi__build_huffman(z->huff_ac+th, sizes)) return 0; + v = z->huff_ac[th].values; + } + for (i=0; i < n; ++i) + v[i] = stbi__get8(z->s); + if (tc != 0) + stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th); + L -= n; + } + return L==0; + } + + // check for comment block or APP blocks + if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) { + L = stbi__get16be(z->s); + if (L < 2) { + if (m == 0xFE) + return stbi__err("bad COM len","Corrupt JPEG"); + else + return stbi__err("bad APP len","Corrupt JPEG"); + } + L -= 2; + + if (m == 0xE0 && L >= 5) { // JFIF APP0 segment + static const unsigned char tag[5] = {'J','F','I','F','\0'}; + int ok = 1; + int i; + for (i=0; i < 5; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 5; + if (ok) + z->jfif = 1; + } else if (m == 0xEE && L >= 12) { // Adobe APP14 segment + static const unsigned char tag[6] = {'A','d','o','b','e','\0'}; + int ok = 1; + int i; + for (i=0; i < 6; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 6; + if (ok) { + stbi__get8(z->s); // version + stbi__get16be(z->s); // flags0 + stbi__get16be(z->s); // flags1 + z->app14_color_transform = stbi__get8(z->s); // color transform + L -= 6; + } + } + + stbi__skip(z->s, L); + return 1; + } + + return stbi__err("unknown marker","Corrupt JPEG"); +} + +// after we see SOS +static int stbi__process_scan_header(stbi__jpeg *z) +{ + int i; + int Ls = stbi__get16be(z->s); + z->scan_n = stbi__get8(z->s); + if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int) z->s->img_n) return stbi__err("bad SOS component count","Corrupt JPEG"); + if (Ls != 6+2*z->scan_n) return stbi__err("bad SOS len","Corrupt JPEG"); + for (i=0; i < z->scan_n; ++i) { + int id = stbi__get8(z->s), which; + int q = stbi__get8(z->s); + for (which = 0; which < z->s->img_n; ++which) + if (z->img_comp[which].id == id) + break; + if (which == z->s->img_n) return 0; // no match + z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff","Corrupt JPEG"); + z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff","Corrupt JPEG"); + z->order[i] = which; + } + + { + int aa; + z->spec_start = stbi__get8(z->s); + z->spec_end = stbi__get8(z->s); // should be 63, but might be 0 + aa = stbi__get8(z->s); + z->succ_high = (aa >> 4); + z->succ_low = (aa & 15); + if (z->progressive) { + if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13) + return stbi__err("bad SOS", "Corrupt JPEG"); + } else { + if (z->spec_start != 0) return stbi__err("bad SOS","Corrupt JPEG"); + if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS","Corrupt JPEG"); + z->spec_end = 63; + } + } + + return 1; +} + +static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why) +{ + int i; + for (i=0; i < ncomp; ++i) { + if (z->img_comp[i].raw_data) { + STBI_FREE(z->img_comp[i].raw_data); + z->img_comp[i].raw_data = NULL; + z->img_comp[i].data = NULL; + } + if (z->img_comp[i].raw_coeff) { + STBI_FREE(z->img_comp[i].raw_coeff); + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].coeff = 0; + } + if (z->img_comp[i].linebuf) { + STBI_FREE(z->img_comp[i].linebuf); + z->img_comp[i].linebuf = NULL; + } + } + return why; +} + +static int stbi__process_frame_header(stbi__jpeg *z, int scan) +{ + stbi__context *s = z->s; + int Lf,p,i,q, h_max=1,v_max=1,c; + Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len","Corrupt JPEG"); // JPEG + p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit","JPEG format not supported: 8-bit only"); // JPEG baseline + s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG + s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width","Corrupt JPEG"); // JPEG requires + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + c = stbi__get8(s); + if (c != 3 && c != 1 && c != 4) return stbi__err("bad component count","Corrupt JPEG"); + s->img_n = c; + for (i=0; i < c; ++i) { + z->img_comp[i].data = NULL; + z->img_comp[i].linebuf = NULL; + } + + if (Lf != 8+3*s->img_n) return stbi__err("bad SOF len","Corrupt JPEG"); + + z->rgb = 0; + for (i=0; i < s->img_n; ++i) { + static const unsigned char rgb[3] = { 'R', 'G', 'B' }; + z->img_comp[i].id = stbi__get8(s); + if (s->img_n == 3 && z->img_comp[i].id == rgb[i]) + ++z->rgb; + q = stbi__get8(s); + z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H","Corrupt JPEG"); + z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V","Corrupt JPEG"); + z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ","Corrupt JPEG"); + } + + if (scan != STBI__SCAN_load) return 1; + + if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return stbi__err("too large", "Image too large to decode"); + + for (i=0; i < s->img_n; ++i) { + if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h; + if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v; + } + + // check that plane subsampling factors are integer ratios; our resamplers can't deal with fractional ratios + // and I've never seen a non-corrupted JPEG file actually use them + for (i=0; i < s->img_n; ++i) { + if (h_max % z->img_comp[i].h != 0) return stbi__err("bad H","Corrupt JPEG"); + if (v_max % z->img_comp[i].v != 0) return stbi__err("bad V","Corrupt JPEG"); + } + + // compute interleaved mcu info + z->img_h_max = h_max; + z->img_v_max = v_max; + z->img_mcu_w = h_max * 8; + z->img_mcu_h = v_max * 8; + // these sizes can't be more than 17 bits + z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w; + z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h; + + for (i=0; i < s->img_n; ++i) { + // number of effective pixels (e.g. for non-interleaved MCU) + z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max-1) / h_max; + z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max-1) / v_max; + // to simplify generation, we'll allocate enough memory to decode + // the bogus oversized data from using interleaved MCUs and their + // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't + // discard the extra data until colorspace conversion + // + // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier) + // so these muls can't overflow with 32-bit ints (which we require) + z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8; + z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8; + z->img_comp[i].coeff = 0; + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].linebuf = NULL; + z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15); + if (z->img_comp[i].raw_data == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + // align blocks for idct using mmx/sse + z->img_comp[i].data = (stbi_uc*) (((size_t) z->img_comp[i].raw_data + 15) & ~15); + if (z->progressive) { + // w2, h2 are multiples of 8 (see above) + z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8; + z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8; + z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15); + if (z->img_comp[i].raw_coeff == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + z->img_comp[i].coeff = (short*) (((size_t) z->img_comp[i].raw_coeff + 15) & ~15); + } + } + + return 1; +} + +// use comparisons since in some cases we handle more than one case (e.g. SOF) +#define stbi__DNL(x) ((x) == 0xdc) +#define stbi__SOI(x) ((x) == 0xd8) +#define stbi__EOI(x) ((x) == 0xd9) +#define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2) +#define stbi__SOS(x) ((x) == 0xda) + +#define stbi__SOF_progressive(x) ((x) == 0xc2) + +static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) +{ + int m; + z->jfif = 0; + z->app14_color_transform = -1; // valid values are 0,1,2 + z->marker = STBI__MARKER_none; // initialize cached marker to empty + m = stbi__get_marker(z); + if (!stbi__SOI(m)) return stbi__err("no SOI","Corrupt JPEG"); + if (scan == STBI__SCAN_type) return 1; + m = stbi__get_marker(z); + while (!stbi__SOF(m)) { + if (!stbi__process_marker(z,m)) return 0; + m = stbi__get_marker(z); + while (m == STBI__MARKER_none) { + // some files have extra padding after their blocks, so ok, we'll scan + if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG"); + m = stbi__get_marker(z); + } + } + z->progressive = stbi__SOF_progressive(m); + if (!stbi__process_frame_header(z, scan)) return 0; + return 1; +} + +static stbi_uc stbi__skip_jpeg_junk_at_end(stbi__jpeg *j) +{ + // some JPEGs have junk at end, skip over it but if we find what looks + // like a valid marker, resume there + while (!stbi__at_eof(j->s)) { + stbi_uc x = stbi__get8(j->s); + while (x == 0xff) { // might be a marker + if (stbi__at_eof(j->s)) return STBI__MARKER_none; + x = stbi__get8(j->s); + if (x != 0x00 && x != 0xff) { + // not a stuffed zero or lead-in to another marker, looks + // like an actual marker, return it + return x; + } + // stuffed zero has x=0 now which ends the loop, meaning we go + // back to regular scan loop. + // repeated 0xff keeps trying to read the next byte of the marker. + } + } + return STBI__MARKER_none; +} + +// decode image to YCbCr format +static int stbi__decode_jpeg_image(stbi__jpeg *j) +{ + int m; + for (m = 0; m < 4; m++) { + j->img_comp[m].raw_data = NULL; + j->img_comp[m].raw_coeff = NULL; + } + j->restart_interval = 0; + if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0; + m = stbi__get_marker(j); + while (!stbi__EOI(m)) { + if (stbi__SOS(m)) { + if (!stbi__process_scan_header(j)) return 0; + if (!stbi__parse_entropy_coded_data(j)) return 0; + if (j->marker == STBI__MARKER_none ) { + j->marker = stbi__skip_jpeg_junk_at_end(j); + // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 + } + m = stbi__get_marker(j); + if (STBI__RESTART(m)) + m = stbi__get_marker(j); + } else if (stbi__DNL(m)) { + int Ld = stbi__get16be(j->s); + stbi__uint32 NL = stbi__get16be(j->s); + if (Ld != 4) return stbi__err("bad DNL len", "Corrupt JPEG"); + if (NL != j->s->img_y) return stbi__err("bad DNL height", "Corrupt JPEG"); + m = stbi__get_marker(j); + } else { + if (!stbi__process_marker(j, m)) return 1; + m = stbi__get_marker(j); + } + } + if (j->progressive) + stbi__jpeg_finish(j); + return 1; +} + +// static jfif-centered resampling (across block boundaries) + +typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1, + int w, int hs); + +#define stbi__div4(x) ((stbi_uc) ((x) >> 2)) + +static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + STBI_NOTUSED(out); + STBI_NOTUSED(in_far); + STBI_NOTUSED(w); + STBI_NOTUSED(hs); + return in_near; +} + +static stbi_uc* stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples vertically for every one in input + int i; + STBI_NOTUSED(hs); + for (i=0; i < w; ++i) + out[i] = stbi__div4(3*in_near[i] + in_far[i] + 2); + return out; +} + +static stbi_uc* stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples horizontally for every one in input + int i; + stbi_uc *input = in_near; + + if (w == 1) { + // if only one sample, can't do any interpolation + out[0] = out[1] = input[0]; + return out; + } + + out[0] = input[0]; + out[1] = stbi__div4(input[0]*3 + input[1] + 2); + for (i=1; i < w-1; ++i) { + int n = 3*input[i]+2; + out[i*2+0] = stbi__div4(n+input[i-1]); + out[i*2+1] = stbi__div4(n+input[i+1]); + } + out[i*2+0] = stbi__div4(input[w-2]*3 + input[w-1] + 2); + out[i*2+1] = input[w-1]; + + STBI_NOTUSED(in_far); + STBI_NOTUSED(hs); + + return out; +} + +#define stbi__div16(x) ((stbi_uc) ((x) >> 4)) + +static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i,t0,t1; + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + out[0] = stbi__div4(t1+2); + for (i=1; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i=0,t0,t1; + + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + // process groups of 8 pixels for as long as we can. + // note we can't handle the last pixel in a row in this loop + // because we need to handle the filter boundary conditions. + for (; i < ((w-1) & ~7); i += 8) { +#if defined(STBI_SSE2) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + __m128i zero = _mm_setzero_si128(); + __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i)); + __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i)); + __m128i farw = _mm_unpacklo_epi8(farb, zero); + __m128i nearw = _mm_unpacklo_epi8(nearb, zero); + __m128i diff = _mm_sub_epi16(farw, nearw); + __m128i nears = _mm_slli_epi16(nearw, 2); + __m128i curr = _mm_add_epi16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + __m128i prv0 = _mm_slli_si128(curr, 2); + __m128i nxt0 = _mm_srli_si128(curr, 2); + __m128i prev = _mm_insert_epi16(prv0, t1, 0); + __m128i next = _mm_insert_epi16(nxt0, 3*in_near[i+8] + in_far[i+8], 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + __m128i bias = _mm_set1_epi16(8); + __m128i curs = _mm_slli_epi16(curr, 2); + __m128i prvd = _mm_sub_epi16(prev, curr); + __m128i nxtd = _mm_sub_epi16(next, curr); + __m128i curb = _mm_add_epi16(curs, bias); + __m128i even = _mm_add_epi16(prvd, curb); + __m128i odd = _mm_add_epi16(nxtd, curb); + + // interleave even and odd pixels, then undo scaling. + __m128i int0 = _mm_unpacklo_epi16(even, odd); + __m128i int1 = _mm_unpackhi_epi16(even, odd); + __m128i de0 = _mm_srli_epi16(int0, 4); + __m128i de1 = _mm_srli_epi16(int1, 4); + + // pack and write output + __m128i outv = _mm_packus_epi16(de0, de1); + _mm_storeu_si128((__m128i *) (out + i*2), outv); +#elif defined(STBI_NEON) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + uint8x8_t farb = vld1_u8(in_far + i); + uint8x8_t nearb = vld1_u8(in_near + i); + int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb)); + int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2)); + int16x8_t curr = vaddq_s16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + int16x8_t prv0 = vextq_s16(curr, curr, 7); + int16x8_t nxt0 = vextq_s16(curr, curr, 1); + int16x8_t prev = vsetq_lane_s16(t1, prv0, 0); + int16x8_t next = vsetq_lane_s16(3*in_near[i+8] + in_far[i+8], nxt0, 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + int16x8_t curs = vshlq_n_s16(curr, 2); + int16x8_t prvd = vsubq_s16(prev, curr); + int16x8_t nxtd = vsubq_s16(next, curr); + int16x8_t even = vaddq_s16(curs, prvd); + int16x8_t odd = vaddq_s16(curs, nxtd); + + // undo scaling and round, then store with even/odd phases interleaved + uint8x8x2_t o; + o.val[0] = vqrshrun_n_s16(even, 4); + o.val[1] = vqrshrun_n_s16(odd, 4); + vst2_u8(out + i*2, o); +#endif + + // "previous" value for next iter + t1 = 3*in_near[i+7] + in_far[i+7]; + } + + t0 = t1; + t1 = 3*in_near[i] + in_far[i]; + out[i*2] = stbi__div16(3*t1 + t0 + 8); + + for (++i; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} +#endif + +static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // resample with nearest-neighbor + int i,j; + STBI_NOTUSED(in_far); + for (i=0; i < w; ++i) + for (j=0; j < hs; ++j) + out[i*hs+j] = in_near[i]; + return out; +} + +// this is a reduced-precision calculation of YCbCr-to-RGB introduced +// to make sure the code produces the same results in both SIMD and scalar +#define stbi__float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8) +static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) +{ + int i; + for (i=0; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + (cr*-stbi__float2fixed(0.71414f)) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step) +{ + int i = 0; + +#ifdef STBI_SSE2 + // step == 3 is pretty ugly on the final interleave, and i'm not convinced + // it's useful in practice (you wouldn't use it for textures, for example). + // so just accelerate step == 4 case. + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + __m128i signflip = _mm_set1_epi8(-0x80); + __m128i cr_const0 = _mm_set1_epi16( (short) ( 1.40200f*4096.0f+0.5f)); + __m128i cr_const1 = _mm_set1_epi16( - (short) ( 0.71414f*4096.0f+0.5f)); + __m128i cb_const0 = _mm_set1_epi16( - (short) ( 0.34414f*4096.0f+0.5f)); + __m128i cb_const1 = _mm_set1_epi16( (short) ( 1.77200f*4096.0f+0.5f)); + __m128i y_bias = _mm_set1_epi8((char) (unsigned char) 128); + __m128i xw = _mm_set1_epi16(255); // alpha channel + + for (; i+7 < count; i += 8) { + // load + __m128i y_bytes = _mm_loadl_epi64((__m128i *) (y+i)); + __m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr+i)); + __m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb+i)); + __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128 + __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128 + + // unpack to short (and left-shift cr, cb by 8) + __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes); + __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased); + __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased); + + // color transform + __m128i yws = _mm_srli_epi16(yw, 4); + __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw); + __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw); + __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1); + __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1); + __m128i rws = _mm_add_epi16(cr0, yws); + __m128i gwt = _mm_add_epi16(cb0, yws); + __m128i bws = _mm_add_epi16(yws, cb1); + __m128i gws = _mm_add_epi16(gwt, cr1); + + // descale + __m128i rw = _mm_srai_epi16(rws, 4); + __m128i bw = _mm_srai_epi16(bws, 4); + __m128i gw = _mm_srai_epi16(gws, 4); + + // back to byte, set up for transpose + __m128i brb = _mm_packus_epi16(rw, bw); + __m128i gxb = _mm_packus_epi16(gw, xw); + + // transpose to interleave channels + __m128i t0 = _mm_unpacklo_epi8(brb, gxb); + __m128i t1 = _mm_unpackhi_epi8(brb, gxb); + __m128i o0 = _mm_unpacklo_epi16(t0, t1); + __m128i o1 = _mm_unpackhi_epi16(t0, t1); + + // store + _mm_storeu_si128((__m128i *) (out + 0), o0); + _mm_storeu_si128((__m128i *) (out + 16), o1); + out += 32; + } + } +#endif + +#ifdef STBI_NEON + // in this version, step=3 support would be easy to add. but is there demand? + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + uint8x8_t signflip = vdup_n_u8(0x80); + int16x8_t cr_const0 = vdupq_n_s16( (short) ( 1.40200f*4096.0f+0.5f)); + int16x8_t cr_const1 = vdupq_n_s16( - (short) ( 0.71414f*4096.0f+0.5f)); + int16x8_t cb_const0 = vdupq_n_s16( - (short) ( 0.34414f*4096.0f+0.5f)); + int16x8_t cb_const1 = vdupq_n_s16( (short) ( 1.77200f*4096.0f+0.5f)); + + for (; i+7 < count; i += 8) { + // load + uint8x8_t y_bytes = vld1_u8(y + i); + uint8x8_t cr_bytes = vld1_u8(pcr + i); + uint8x8_t cb_bytes = vld1_u8(pcb + i); + int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip)); + int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip)); + + // expand to s16 + int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4)); + int16x8_t crw = vshll_n_s8(cr_biased, 7); + int16x8_t cbw = vshll_n_s8(cb_biased, 7); + + // color transform + int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0); + int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0); + int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1); + int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1); + int16x8_t rws = vaddq_s16(yws, cr0); + int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1); + int16x8_t bws = vaddq_s16(yws, cb1); + + // undo scaling, round, convert to byte + uint8x8x4_t o; + o.val[0] = vqrshrun_n_s16(rws, 4); + o.val[1] = vqrshrun_n_s16(gws, 4); + o.val[2] = vqrshrun_n_s16(bws, 4); + o.val[3] = vdup_n_u8(255); + + // store, interleaving r/g/b/a + vst4_u8(out, o); + out += 8*4; + } + } +#endif + + for (; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + cr*-stbi__float2fixed(0.71414f) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#endif + +// set up the kernels +static void stbi__setup_jpeg(stbi__jpeg *j) +{ + j->idct_block_kernel = stbi__idct_block; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2; + +#ifdef STBI_SSE2 + if (stbi__sse2_available()) { + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; + } +#endif + +#ifdef STBI_NEON + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; +#endif +} + +// clean up the temporary component buffers +static void stbi__cleanup_jpeg(stbi__jpeg *j) +{ + stbi__free_jpeg_components(j, j->s->img_n, 0); +} + +typedef struct +{ + resample_row_func resample; + stbi_uc *line0,*line1; + int hs,vs; // expansion factor in each axis + int w_lores; // horizontal pixels pre-expansion + int ystep; // how far through vertical expansion we are + int ypos; // which pre-expansion row we're on +} stbi__resample; + +// fast 0..255 * 0..255 => 0..255 rounded multiplication +static stbi_uc stbi__blinn_8x8(stbi_uc x, stbi_uc y) +{ + unsigned int t = x*y + 128; + return (stbi_uc) ((t + (t >>8)) >> 8); +} + +static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp) +{ + int n, decode_n, is_rgb; + z->s->img_n = 0; // make stbi__cleanup_jpeg safe + + // validate req_comp + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + + // load a jpeg image from whichever source, but leave in YCbCr format + if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; } + + // determine actual number of components to generate + n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 : 1; + + is_rgb = z->s->img_n == 3 && (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif)); + + if (z->s->img_n == 3 && n < 3 && !is_rgb) + decode_n = 1; + else + decode_n = z->s->img_n; + + // nothing to do if no components requested; check this now to avoid + // accessing uninitialized coutput[0] later + if (decode_n <= 0) { stbi__cleanup_jpeg(z); return NULL; } + + // resample and color-convert + { + int k; + unsigned int i,j; + stbi_uc *output; + stbi_uc *coutput[4] = { NULL, NULL, NULL, NULL }; + + stbi__resample res_comp[4]; + + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + + // allocate line buffer big enough for upsampling off the edges + // with upsample factor of 4 + z->img_comp[k].linebuf = (stbi_uc *) stbi__malloc(z->s->img_x + 3); + if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + r->hs = z->img_h_max / z->img_comp[k].h; + r->vs = z->img_v_max / z->img_comp[k].v; + r->ystep = r->vs >> 1; + r->w_lores = (z->s->img_x + r->hs-1) / r->hs; + r->ypos = 0; + r->line0 = r->line1 = z->img_comp[k].data; + + if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1; + else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2; + else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2; + else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel; + else r->resample = stbi__resample_row_generic; + } + + // can't error after this so, this is safe + output = (stbi_uc *) stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1); + if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + // now go ahead and resample + for (j=0; j < z->s->img_y; ++j) { + stbi_uc *out = output + n * z->s->img_x * j; + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + int y_bot = r->ystep >= (r->vs >> 1); + coutput[k] = r->resample(z->img_comp[k].linebuf, + y_bot ? r->line1 : r->line0, + y_bot ? r->line0 : r->line1, + r->w_lores, r->hs); + if (++r->ystep >= r->vs) { + r->ystep = 0; + r->line0 = r->line1; + if (++r->ypos < z->img_comp[k].y) + r->line1 += z->img_comp[k].w2; + } + } + if (n >= 3) { + stbi_uc *y = coutput[0]; + if (z->s->img_n == 3) { + if (is_rgb) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = y[i]; + out[1] = coutput[1][i]; + out[2] = coutput[2][i]; + out[3] = 255; + out += n; + } + } else { + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else if (z->s->img_n == 4) { + if (z->app14_color_transform == 0) { // CMYK + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(coutput[0][i], m); + out[1] = stbi__blinn_8x8(coutput[1][i], m); + out[2] = stbi__blinn_8x8(coutput[2][i], m); + out[3] = 255; + out += n; + } + } else if (z->app14_color_transform == 2) { // YCCK + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(255 - out[0], m); + out[1] = stbi__blinn_8x8(255 - out[1], m); + out[2] = stbi__blinn_8x8(255 - out[2], m); + out += n; + } + } else { // YCbCr + alpha? Ignore the fourth channel for now + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else + for (i=0; i < z->s->img_x; ++i) { + out[0] = out[1] = out[2] = y[i]; + out[3] = 255; // not used if n==3 + out += n; + } + } else { + if (is_rgb) { + if (n == 1) + for (i=0; i < z->s->img_x; ++i) + *out++ = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + else { + for (i=0; i < z->s->img_x; ++i, out += 2) { + out[0] = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + out[1] = 255; + } + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 0) { + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + stbi_uc r = stbi__blinn_8x8(coutput[0][i], m); + stbi_uc g = stbi__blinn_8x8(coutput[1][i], m); + stbi_uc b = stbi__blinn_8x8(coutput[2][i], m); + out[0] = stbi__compute_y(r, g, b); + out[1] = 255; + out += n; + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 2) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = stbi__blinn_8x8(255 - coutput[0][i], coutput[3][i]); + out[1] = 255; + out += n; + } + } else { + stbi_uc *y = coutput[0]; + if (n == 1) + for (i=0; i < z->s->img_x; ++i) out[i] = y[i]; + else + for (i=0; i < z->s->img_x; ++i) { *out++ = y[i]; *out++ = 255; } + } + } + } + stbi__cleanup_jpeg(z); + *out_x = z->s->img_x; + *out_y = z->s->img_y; + if (comp) *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output + return output; + } +} + +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + unsigned char* result; + stbi__jpeg* j = (stbi__jpeg*) stbi__malloc(sizeof(stbi__jpeg)); + if (!j) return stbi__errpuc("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); + STBI_NOTUSED(ri); + j->s = s; + stbi__setup_jpeg(j); + result = load_jpeg_image(j, x,y,comp,req_comp); + STBI_FREE(j); + return result; +} + +static int stbi__jpeg_test(stbi__context *s) +{ + int r; + stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg)); + if (!j) return stbi__err("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); + j->s = s; + stbi__setup_jpeg(j); + r = stbi__decode_jpeg_header(j, STBI__SCAN_type); + stbi__rewind(s); + STBI_FREE(j); + return r; +} + +static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp) +{ + if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) { + stbi__rewind( j->s ); + return 0; + } + if (x) *x = j->s->img_x; + if (y) *y = j->s->img_y; + if (comp) *comp = j->s->img_n >= 3 ? 3 : 1; + return 1; +} + +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) +{ + int result; + stbi__jpeg* j = (stbi__jpeg*) (stbi__malloc(sizeof(stbi__jpeg))); + if (!j) return stbi__err("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); + j->s = s; + result = stbi__jpeg_info_raw(j, x, y, comp); + STBI_FREE(j); + return result; +} +#endif + +// public domain zlib decode v0.2 Sean Barrett 2006-11-18 +// simple implementation +// - all input must be provided in an upfront buffer +// - all output is written to a single output buffer (can malloc/realloc) +// performance +// - fast huffman + +#ifndef STBI_NO_ZLIB + +// fast-way is faster to check than jpeg huffman, but slow way is slower +#define STBI__ZFAST_BITS 9 // accelerate all cases in default tables +#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1) +#define STBI__ZNSYMS 288 // number of symbols in literal/length alphabet + +// zlib-style huffman encoding +// (jpegs packs from left, zlib from right, so can't share code) +typedef struct +{ + stbi__uint16 fast[1 << STBI__ZFAST_BITS]; + stbi__uint16 firstcode[16]; + int maxcode[17]; + stbi__uint16 firstsymbol[16]; + stbi_uc size[STBI__ZNSYMS]; + stbi__uint16 value[STBI__ZNSYMS]; +} stbi__zhuffman; + +stbi_inline static int stbi__bitreverse16(int n) +{ + n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); + n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); + n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); + n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); + return n; +} + +stbi_inline static int stbi__bit_reverse(int v, int bits) +{ + STBI_ASSERT(bits <= 16); + // to bit reverse n bits, reverse 16 and shift + // e.g. 11 bits, bit reverse and shift away 5 + return stbi__bitreverse16(v) >> (16-bits); +} + +static int stbi__zbuild_huffman(stbi__zhuffman *z, const stbi_uc *sizelist, int num) +{ + int i,k=0; + int code, next_code[16], sizes[17]; + + // DEFLATE spec for generating codes + memset(sizes, 0, sizeof(sizes)); + memset(z->fast, 0, sizeof(z->fast)); + for (i=0; i < num; ++i) + ++sizes[sizelist[i]]; + sizes[0] = 0; + for (i=1; i < 16; ++i) + if (sizes[i] > (1 << i)) + return stbi__err("bad sizes", "Corrupt PNG"); + code = 0; + for (i=1; i < 16; ++i) { + next_code[i] = code; + z->firstcode[i] = (stbi__uint16) code; + z->firstsymbol[i] = (stbi__uint16) k; + code = (code + sizes[i]); + if (sizes[i]) + if (code-1 >= (1 << i)) return stbi__err("bad codelengths","Corrupt PNG"); + z->maxcode[i] = code << (16-i); // preshift for inner loop + code <<= 1; + k += sizes[i]; + } + z->maxcode[16] = 0x10000; // sentinel + for (i=0; i < num; ++i) { + int s = sizelist[i]; + if (s) { + int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s]; + stbi__uint16 fastv = (stbi__uint16) ((s << 9) | i); + z->size [c] = (stbi_uc ) s; + z->value[c] = (stbi__uint16) i; + if (s <= STBI__ZFAST_BITS) { + int j = stbi__bit_reverse(next_code[s],s); + while (j < (1 << STBI__ZFAST_BITS)) { + z->fast[j] = fastv; + j += (1 << s); + } + } + ++next_code[s]; + } + } + return 1; +} + +// zlib-from-memory implementation for PNG reading +// because PNG allows splitting the zlib stream arbitrarily, +// and it's annoying structurally to have PNG call ZLIB call PNG, +// we require PNG read all the IDATs and combine them into a single +// memory buffer + +typedef struct +{ + stbi_uc *zbuffer, *zbuffer_end; + int num_bits; + int hit_zeof_once; + stbi__uint32 code_buffer; + + char *zout; + char *zout_start; + char *zout_end; + int z_expandable; + + stbi__zhuffman z_length, z_distance; +} stbi__zbuf; + +stbi_inline static int stbi__zeof(stbi__zbuf *z) +{ + return (z->zbuffer >= z->zbuffer_end); +} + +stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z) +{ + return stbi__zeof(z) ? 0 : *z->zbuffer++; +} + +static void stbi__fill_bits(stbi__zbuf *z) +{ + do { + if (z->code_buffer >= (1U << z->num_bits)) { + z->zbuffer = z->zbuffer_end; /* treat this as EOF so we fail. */ + return; + } + z->code_buffer |= (unsigned int) stbi__zget8(z) << z->num_bits; + z->num_bits += 8; + } while (z->num_bits <= 24); +} + +stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n) +{ + unsigned int k; + if (z->num_bits < n) stbi__fill_bits(z); + k = z->code_buffer & ((1 << n) - 1); + z->code_buffer >>= n; + z->num_bits -= n; + return k; +} + +static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s,k; + // not resolved by fast table, so compute it the slow way + // use jpeg approach, which requires MSbits at top + k = stbi__bit_reverse(a->code_buffer, 16); + for (s=STBI__ZFAST_BITS+1; ; ++s) + if (k < z->maxcode[s]) + break; + if (s >= 16) return -1; // invalid code! + // code size is s, so: + b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s]; + if (b >= STBI__ZNSYMS) return -1; // some data was corrupt somewhere! + if (z->size[b] != s) return -1; // was originally an assert, but report failure instead. + a->code_buffer >>= s; + a->num_bits -= s; + return z->value[b]; +} + +stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s; + if (a->num_bits < 16) { + if (stbi__zeof(a)) { + if (!a->hit_zeof_once) { + // This is the first time we hit eof, insert 16 extra padding btis + // to allow us to keep going; if we actually consume any of them + // though, that is invalid data. This is caught later. + a->hit_zeof_once = 1; + a->num_bits += 16; // add 16 implicit zero bits + } else { + // We already inserted our extra 16 padding bits and are again + // out, this stream is actually prematurely terminated. + return -1; + } + } else { + stbi__fill_bits(a); + } + } + b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; + if (b) { + s = b >> 9; + a->code_buffer >>= s; + a->num_bits -= s; + return b & 511; + } + return stbi__zhuffman_decode_slowpath(a, z); +} + +static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes +{ + char *q; + unsigned int cur, limit, old_limit; + z->zout = zout; + if (!z->z_expandable) return stbi__err("output buffer limit","Corrupt PNG"); + cur = (unsigned int) (z->zout - z->zout_start); + limit = old_limit = (unsigned) (z->zout_end - z->zout_start); + if (UINT_MAX - cur < (unsigned) n) return stbi__err("outofmem", "Out of memory"); + while (cur + n > limit) { + if(limit > UINT_MAX / 2) return stbi__err("outofmem", "Out of memory"); + limit *= 2; + } + q = (char *) STBI_REALLOC_SIZED(z->zout_start, old_limit, limit); + STBI_NOTUSED(old_limit); + if (q == NULL) return stbi__err("outofmem", "Out of memory"); + z->zout_start = q; + z->zout = q + cur; + z->zout_end = q + limit; + return 1; +} + +static const int stbi__zlength_base[31] = { + 3,4,5,6,7,8,9,10,11,13, + 15,17,19,23,27,31,35,43,51,59, + 67,83,99,115,131,163,195,227,258,0,0 }; + +static const int stbi__zlength_extra[31]= +{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 }; + +static const int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, +257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0}; + +static const int stbi__zdist_extra[32] = +{ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; + +static int stbi__parse_huffman_block(stbi__zbuf *a) +{ + char *zout = a->zout; + for(;;) { + int z = stbi__zhuffman_decode(a, &a->z_length); + if (z < 256) { + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); // error in huffman codes + if (zout >= a->zout_end) { + if (!stbi__zexpand(a, zout, 1)) return 0; + zout = a->zout; + } + *zout++ = (char) z; + } else { + stbi_uc *p; + int len,dist; + if (z == 256) { + a->zout = zout; + if (a->hit_zeof_once && a->num_bits < 16) { + // The first time we hit zeof, we inserted 16 extra zero bits into our bit + // buffer so the decoder can just do its speculative decoding. But if we + // actually consumed any of those bits (which is the case when num_bits < 16), + // the stream actually read past the end so it is malformed. + return stbi__err("unexpected end","Corrupt PNG"); + } + return 1; + } + if (z >= 286) return stbi__err("bad huffman code","Corrupt PNG"); // per DEFLATE, length codes 286 and 287 must not appear in compressed data + z -= 257; + len = stbi__zlength_base[z]; + if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]); + z = stbi__zhuffman_decode(a, &a->z_distance); + if (z < 0 || z >= 30) return stbi__err("bad huffman code","Corrupt PNG"); // per DEFLATE, distance codes 30 and 31 must not appear in compressed data + dist = stbi__zdist_base[z]; + if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]); + if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG"); + if (len > a->zout_end - zout) { + if (!stbi__zexpand(a, zout, len)) return 0; + zout = a->zout; + } + p = (stbi_uc *) (zout - dist); + if (dist == 1) { // run of one byte; common in images. + stbi_uc v = *p; + if (len) { do *zout++ = v; while (--len); } + } else { + if (len) { do *zout++ = *p++; while (--len); } + } + } + } +} + +static int stbi__compute_huffman_codes(stbi__zbuf *a) +{ + static const stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 }; + stbi__zhuffman z_codelength; + stbi_uc lencodes[286+32+137];//padding for maximum single op + stbi_uc codelength_sizes[19]; + int i,n; + + int hlit = stbi__zreceive(a,5) + 257; + int hdist = stbi__zreceive(a,5) + 1; + int hclen = stbi__zreceive(a,4) + 4; + int ntot = hlit + hdist; + + memset(codelength_sizes, 0, sizeof(codelength_sizes)); + for (i=0; i < hclen; ++i) { + int s = stbi__zreceive(a,3); + codelength_sizes[length_dezigzag[i]] = (stbi_uc) s; + } + if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0; + + n = 0; + while (n < ntot) { + int c = stbi__zhuffman_decode(a, &z_codelength); + if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG"); + if (c < 16) + lencodes[n++] = (stbi_uc) c; + else { + stbi_uc fill = 0; + if (c == 16) { + c = stbi__zreceive(a,2)+3; + if (n == 0) return stbi__err("bad codelengths", "Corrupt PNG"); + fill = lencodes[n-1]; + } else if (c == 17) { + c = stbi__zreceive(a,3)+3; + } else if (c == 18) { + c = stbi__zreceive(a,7)+11; + } else { + return stbi__err("bad codelengths", "Corrupt PNG"); + } + if (ntot - n < c) return stbi__err("bad codelengths", "Corrupt PNG"); + memset(lencodes+n, fill, c); + n += c; + } + } + if (n != ntot) return stbi__err("bad codelengths","Corrupt PNG"); + if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0; + return 1; +} + +static int stbi__parse_uncompressed_block(stbi__zbuf *a) +{ + stbi_uc header[4]; + int len,nlen,k; + if (a->num_bits & 7) + stbi__zreceive(a, a->num_bits & 7); // discard + // drain the bit-packed data into header + k = 0; + while (a->num_bits > 0) { + header[k++] = (stbi_uc) (a->code_buffer & 255); // suppress MSVC run-time check + a->code_buffer >>= 8; + a->num_bits -= 8; + } + if (a->num_bits < 0) return stbi__err("zlib corrupt","Corrupt PNG"); + // now fill header the normal way + while (k < 4) + header[k++] = stbi__zget8(a); + len = header[1] * 256 + header[0]; + nlen = header[3] * 256 + header[2]; + if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt","Corrupt PNG"); + if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer","Corrupt PNG"); + if (a->zout + len > a->zout_end) + if (!stbi__zexpand(a, a->zout, len)) return 0; + memcpy(a->zout, a->zbuffer, len); + a->zbuffer += len; + a->zout += len; + return 1; +} + +static int stbi__parse_zlib_header(stbi__zbuf *a) +{ + int cmf = stbi__zget8(a); + int cm = cmf & 15; + /* int cinfo = cmf >> 4; */ + int flg = stbi__zget8(a); + if (stbi__zeof(a)) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if ((cmf*256+flg) % 31 != 0) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if (flg & 32) return stbi__err("no preset dict","Corrupt PNG"); // preset dictionary not allowed in png + if (cm != 8) return stbi__err("bad compression","Corrupt PNG"); // DEFLATE required for png + // window = 1 << (8 + cinfo)... but who cares, we fully buffer output + return 1; +} + +static const stbi_uc stbi__zdefault_length[STBI__ZNSYMS] = +{ + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8 +}; +static const stbi_uc stbi__zdefault_distance[32] = +{ + 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5 +}; +/* +Init algorithm: +{ + int i; // use <= to match clearly with spec + for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8; + for ( ; i <= 255; ++i) stbi__zdefault_length[i] = 9; + for ( ; i <= 279; ++i) stbi__zdefault_length[i] = 7; + for ( ; i <= 287; ++i) stbi__zdefault_length[i] = 8; + + for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5; +} +*/ + +static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) +{ + int final, type; + if (parse_header) + if (!stbi__parse_zlib_header(a)) return 0; + a->num_bits = 0; + a->code_buffer = 0; + a->hit_zeof_once = 0; + do { + final = stbi__zreceive(a,1); + type = stbi__zreceive(a,2); + if (type == 0) { + if (!stbi__parse_uncompressed_block(a)) return 0; + } else if (type == 3) { + return 0; + } else { + if (type == 1) { + // use fixed code lengths + if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , STBI__ZNSYMS)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0; + } else { + if (!stbi__compute_huffman_codes(a)) return 0; + } + if (!stbi__parse_huffman_block(a)) return 0; + } + } while (!final); + return 1; +} + +static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header) +{ + a->zout_start = obuf; + a->zout = obuf; + a->zout_end = obuf + olen; + a->z_expandable = exp; + + return stbi__parse_zlib(a, parse_header); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, 1)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen) +{ + return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 1)) + return (int) (a.zout - a.zout_start); + else + return -1; +} + +STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(16384); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer+len; + if (stbi__do_zlib(&a, p, 16384, 1, 0)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 0)) + return (int) (a.zout - a.zout_start); + else + return -1; +} +#endif + +// public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18 +// simple implementation +// - only 8-bit samples +// - no CRC checking +// - allocates lots of intermediate memory +// - avoids problem of streaming data between subsystems +// - avoids explicit window management +// performance +// - uses stb_zlib, a PD zlib implementation with fast huffman decoding + +#ifndef STBI_NO_PNG +typedef struct +{ + stbi__uint32 length; + stbi__uint32 type; +} stbi__pngchunk; + +static stbi__pngchunk stbi__get_chunk_header(stbi__context *s) +{ + stbi__pngchunk c; + c.length = stbi__get32be(s); + c.type = stbi__get32be(s); + return c; +} + +static int stbi__check_png_header(stbi__context *s) +{ + static const stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 }; + int i; + for (i=0; i < 8; ++i) + if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig","Not a PNG"); + return 1; +} + +typedef struct +{ + stbi__context *s; + stbi_uc *idata, *expanded, *out; + int depth; +} stbi__png; + + +enum { + STBI__F_none=0, + STBI__F_sub=1, + STBI__F_up=2, + STBI__F_avg=3, + STBI__F_paeth=4, + // synthetic filter used for first scanline to avoid needing a dummy row of 0s + STBI__F_avg_first +}; + +static stbi_uc first_row_filter[5] = +{ + STBI__F_none, + STBI__F_sub, + STBI__F_none, + STBI__F_avg_first, + STBI__F_sub // Paeth with b=c=0 turns out to be equivalent to sub +}; + +static int stbi__paeth(int a, int b, int c) +{ + // This formulation looks very different from the reference in the PNG spec, but is + // actually equivalent and has favorable data dependencies and admits straightforward + // generation of branch-free code, which helps performance significantly. + int thresh = c*3 - (a + b); + int lo = a < b ? a : b; + int hi = a < b ? b : a; + int t0 = (hi <= thresh) ? lo : c; + int t1 = (thresh <= lo) ? hi : t0; + return t1; +} + +static const stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; + +// adds an extra all-255 alpha channel +// dest == src is legal +// img_n must be 1 or 3 +static void stbi__create_png_alpha_expand8(stbi_uc *dest, stbi_uc *src, stbi__uint32 x, int img_n) +{ + int i; + // must process data backwards since we allow dest==src + if (img_n == 1) { + for (i=x-1; i >= 0; --i) { + dest[i*2+1] = 255; + dest[i*2+0] = src[i]; + } + } else { + STBI_ASSERT(img_n == 3); + for (i=x-1; i >= 0; --i) { + dest[i*4+3] = 255; + dest[i*4+2] = src[i*3+2]; + dest[i*4+1] = src[i*3+1]; + dest[i*4+0] = src[i*3+0]; + } + } +} + +// create the png data from post-deflated data +static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) +{ + int bytes = (depth == 16 ? 2 : 1); + stbi__context *s = a->s; + stbi__uint32 i,j,stride = x*out_n*bytes; + stbi__uint32 img_len, img_width_bytes; + stbi_uc *filter_buf; + int all_ok = 1; + int k; + int img_n = s->img_n; // copy it into a local for later + + int output_bytes = out_n*bytes; + int filter_bytes = img_n*bytes; + int width = x; + + STBI_ASSERT(out_n == s->img_n || out_n == s->img_n+1); + a->out = (stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into + if (!a->out) return stbi__err("outofmem", "Out of memory"); + + // note: error exits here don't need to clean up a->out individually, + // stbi__do_png always does on error. + if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return stbi__err("too large", "Corrupt PNG"); + img_width_bytes = (((img_n * x * depth) + 7) >> 3); + if (!stbi__mad2sizes_valid(img_width_bytes, y, img_width_bytes)) return stbi__err("too large", "Corrupt PNG"); + img_len = (img_width_bytes + 1) * y; + + // we used to check for exact match between raw_len and img_len on non-interlaced PNGs, + // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros), + // so just check for raw_len < img_len always. + if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG"); + + // Allocate two scan lines worth of filter workspace buffer. + filter_buf = (stbi_uc *) stbi__malloc_mad2(img_width_bytes, 2, 0); + if (!filter_buf) return stbi__err("outofmem", "Out of memory"); + + // Filtering for low-bit-depth images + if (depth < 8) { + filter_bytes = 1; + width = img_width_bytes; + } + + for (j=0; j < y; ++j) { + // cur/prior filter buffers alternate + stbi_uc *cur = filter_buf + (j & 1)*img_width_bytes; + stbi_uc *prior = filter_buf + (~j & 1)*img_width_bytes; + stbi_uc *dest = a->out + stride*j; + int nk = width * filter_bytes; + int filter = *raw++; + + // check filter type + if (filter > 4) { + all_ok = stbi__err("invalid filter","Corrupt PNG"); + break; + } + + // if first row, use special filter that doesn't sample previous row + if (j == 0) filter = first_row_filter[filter]; + + // perform actual filtering + switch (filter) { + case STBI__F_none: + memcpy(cur, raw, nk); + break; + case STBI__F_sub: + memcpy(cur, raw, filter_bytes); + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); + break; + case STBI__F_up: + for (k = 0; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + prior[k]); + break; + case STBI__F_avg: + for (k = 0; k < filter_bytes; ++k) + cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); + break; + case STBI__F_paeth: + for (k = 0; k < filter_bytes; ++k) + cur[k] = STBI__BYTECAST(raw[k] + prior[k]); // prior[k] == stbi__paeth(0,prior[k],0) + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes], prior[k], prior[k-filter_bytes])); + break; + case STBI__F_avg_first: + memcpy(cur, raw, filter_bytes); + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); + break; + } + + raw += nk; + + // expand decoded bits in cur to dest, also adding an extra alpha channel if desired + if (depth < 8) { + stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range + stbi_uc *in = cur; + stbi_uc *out = dest; + stbi_uc inb = 0; + stbi__uint32 nsmp = x*img_n; + + // expand bits to bytes first + if (depth == 4) { + for (i=0; i < nsmp; ++i) { + if ((i & 1) == 0) inb = *in++; + *out++ = scale * (inb >> 4); + inb <<= 4; + } + } else if (depth == 2) { + for (i=0; i < nsmp; ++i) { + if ((i & 3) == 0) inb = *in++; + *out++ = scale * (inb >> 6); + inb <<= 2; + } + } else { + STBI_ASSERT(depth == 1); + for (i=0; i < nsmp; ++i) { + if ((i & 7) == 0) inb = *in++; + *out++ = scale * (inb >> 7); + inb <<= 1; + } + } + + // insert alpha=255 values if desired + if (img_n != out_n) + stbi__create_png_alpha_expand8(dest, dest, x, img_n); + } else if (depth == 8) { + if (img_n == out_n) + memcpy(dest, cur, x*img_n); + else + stbi__create_png_alpha_expand8(dest, cur, x, img_n); + } else if (depth == 16) { + // convert the image data from big-endian to platform-native + stbi__uint16 *dest16 = (stbi__uint16*)dest; + stbi__uint32 nsmp = x*img_n; + + if (img_n == out_n) { + for (i = 0; i < nsmp; ++i, ++dest16, cur += 2) + *dest16 = (cur[0] << 8) | cur[1]; + } else { + STBI_ASSERT(img_n+1 == out_n); + if (img_n == 1) { + for (i = 0; i < x; ++i, dest16 += 2, cur += 2) { + dest16[0] = (cur[0] << 8) | cur[1]; + dest16[1] = 0xffff; + } + } else { + STBI_ASSERT(img_n == 3); + for (i = 0; i < x; ++i, dest16 += 4, cur += 6) { + dest16[0] = (cur[0] << 8) | cur[1]; + dest16[1] = (cur[2] << 8) | cur[3]; + dest16[2] = (cur[4] << 8) | cur[5]; + dest16[3] = 0xffff; + } + } + } + } + } + + STBI_FREE(filter_buf); + if (!all_ok) return 0; + + return 1; +} + +static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) +{ + int bytes = (depth == 16 ? 2 : 1); + int out_bytes = out_n * bytes; + stbi_uc *final; + int p; + if (!interlaced) + return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color); + + // de-interlacing + final = (stbi_uc *) stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0); + if (!final) return stbi__err("outofmem", "Out of memory"); + for (p=0; p < 7; ++p) { + int xorig[] = { 0,4,0,2,0,1,0 }; + int yorig[] = { 0,0,4,0,2,0,1 }; + int xspc[] = { 8,8,4,4,2,2,1 }; + int yspc[] = { 8,8,8,4,4,2,2 }; + int i,j,x,y; + // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 + x = (a->s->img_x - xorig[p] + xspc[p]-1) / xspc[p]; + y = (a->s->img_y - yorig[p] + yspc[p]-1) / yspc[p]; + if (x && y) { + stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y; + if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) { + STBI_FREE(final); + return 0; + } + for (j=0; j < y; ++j) { + for (i=0; i < x; ++i) { + int out_y = j*yspc[p]+yorig[p]; + int out_x = i*xspc[p]+xorig[p]; + memcpy(final + out_y*a->s->img_x*out_bytes + out_x*out_bytes, + a->out + (j*x+i)*out_bytes, out_bytes); + } + } + STBI_FREE(a->out); + image_data += img_len; + image_data_len -= img_len; + } + } + a->out = final; + + return 1; +} + +static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + // compute color-based transparency, assuming we've + // already got 255 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i=0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 255); + p += 2; + } + } else { + for (i=0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__compute_transparency16(stbi__png *z, stbi__uint16 tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi__uint16 *p = (stbi__uint16*) z->out; + + // compute color-based transparency, assuming we've + // already got 65535 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i = 0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 65535); + p += 2; + } + } else { + for (i = 0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n) +{ + stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y; + stbi_uc *p, *temp_out, *orig = a->out; + + p = (stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0); + if (p == NULL) return stbi__err("outofmem", "Out of memory"); + + // between here and free(out) below, exitting would leak + temp_out = p; + + if (pal_img_n == 3) { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p += 3; + } + } else { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p[3] = palette[n+3]; + p += 4; + } + } + STBI_FREE(a->out); + a->out = temp_out; + + STBI_NOTUSED(len); + + return 1; +} + +static int stbi__unpremultiply_on_load_global = 0; +static int stbi__de_iphone_flag_global = 0; + +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load_global = flag_true_if_should_unpremultiply; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag_global = flag_true_if_should_convert; +} + +#ifndef STBI_THREAD_LOCAL +#define stbi__unpremultiply_on_load stbi__unpremultiply_on_load_global +#define stbi__de_iphone_flag stbi__de_iphone_flag_global +#else +static STBI_THREAD_LOCAL int stbi__unpremultiply_on_load_local, stbi__unpremultiply_on_load_set; +static STBI_THREAD_LOCAL int stbi__de_iphone_flag_local, stbi__de_iphone_flag_set; + +STBIDEF void stbi_set_unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load_local = flag_true_if_should_unpremultiply; + stbi__unpremultiply_on_load_set = 1; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag_local = flag_true_if_should_convert; + stbi__de_iphone_flag_set = 1; +} + +#define stbi__unpremultiply_on_load (stbi__unpremultiply_on_load_set \ + ? stbi__unpremultiply_on_load_local \ + : stbi__unpremultiply_on_load_global) +#define stbi__de_iphone_flag (stbi__de_iphone_flag_set \ + ? stbi__de_iphone_flag_local \ + : stbi__de_iphone_flag_global) +#endif // STBI_THREAD_LOCAL + +static void stbi__de_iphone(stbi__png *z) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + if (s->img_out_n == 3) { // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 3; + } + } else { + STBI_ASSERT(s->img_out_n == 4); + if (stbi__unpremultiply_on_load) { + // convert bgr to rgb and unpremultiply + for (i=0; i < pixel_count; ++i) { + stbi_uc a = p[3]; + stbi_uc t = p[0]; + if (a) { + stbi_uc half = a / 2; + p[0] = (p[2] * 255 + half) / a; + p[1] = (p[1] * 255 + half) / a; + p[2] = ( t * 255 + half) / a; + } else { + p[0] = p[2]; + p[2] = t; + } + p += 4; + } + } else { + // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 4; + } + } + } +} + +#define STBI__PNG_TYPE(a,b,c,d) (((unsigned) (a) << 24) + ((unsigned) (b) << 16) + ((unsigned) (c) << 8) + (unsigned) (d)) + +static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) +{ + stbi_uc palette[1024], pal_img_n=0; + stbi_uc has_trans=0, tc[3]={0}; + stbi__uint16 tc16[3]; + stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0; + int first=1,k,interlace=0, color=0, is_iphone=0; + stbi__context *s = z->s; + + z->expanded = NULL; + z->idata = NULL; + z->out = NULL; + + if (!stbi__check_png_header(s)) return 0; + + if (scan == STBI__SCAN_type) return 1; + + for (;;) { + stbi__pngchunk c = stbi__get_chunk_header(s); + switch (c.type) { + case STBI__PNG_TYPE('C','g','B','I'): + is_iphone = 1; + stbi__skip(s, c.length); + break; + case STBI__PNG_TYPE('I','H','D','R'): { + int comp,filter; + if (!first) return stbi__err("multiple IHDR","Corrupt PNG"); + first = 0; + if (c.length != 13) return stbi__err("bad IHDR len","Corrupt PNG"); + s->img_x = stbi__get32be(s); + s->img_y = stbi__get32be(s); + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + z->depth = stbi__get8(s); if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return stbi__err("1/2/4/8/16-bit only","PNG not supported: 1/2/4/8/16-bit only"); + color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3 && z->depth == 16) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype","Corrupt PNG"); + comp = stbi__get8(s); if (comp) return stbi__err("bad comp method","Corrupt PNG"); + filter= stbi__get8(s); if (filter) return stbi__err("bad filter method","Corrupt PNG"); + interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method","Corrupt PNG"); + if (!s->img_x || !s->img_y) return stbi__err("0-pixel image","Corrupt PNG"); + if (!pal_img_n) { + s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); + if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); + } else { + // if paletted, then pal_n is our final components, and + // img_n is # components to decompress/filter. + s->img_n = 1; + if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG"); + } + // even with SCAN_header, have to scan to see if we have a tRNS + break; + } + + case STBI__PNG_TYPE('P','L','T','E'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (c.length > 256*3) return stbi__err("invalid PLTE","Corrupt PNG"); + pal_len = c.length / 3; + if (pal_len * 3 != c.length) return stbi__err("invalid PLTE","Corrupt PNG"); + for (i=0; i < pal_len; ++i) { + palette[i*4+0] = stbi__get8(s); + palette[i*4+1] = stbi__get8(s); + palette[i*4+2] = stbi__get8(s); + palette[i*4+3] = 255; + } + break; + } + + case STBI__PNG_TYPE('t','R','N','S'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (z->idata) return stbi__err("tRNS after IDAT","Corrupt PNG"); + if (pal_img_n) { + if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; } + if (pal_len == 0) return stbi__err("tRNS before PLTE","Corrupt PNG"); + if (c.length > pal_len) return stbi__err("bad tRNS len","Corrupt PNG"); + pal_img_n = 4; + for (i=0; i < c.length; ++i) + palette[i*4+3] = stbi__get8(s); + } else { + if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG"); + if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG"); + has_trans = 1; + // non-paletted with tRNS = constant alpha. if header-scanning, we can stop now. + if (scan == STBI__SCAN_header) { ++s->img_n; return 1; } + if (z->depth == 16) { + for (k = 0; k < s->img_n; ++k) tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is + } else { + for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger + } + } + break; + } + + case STBI__PNG_TYPE('I','D','A','T'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG"); + if (scan == STBI__SCAN_header) { + // header scan definitely stops at first IDAT + if (pal_img_n) + s->img_n = pal_img_n; + return 1; + } + if (c.length > (1u << 30)) return stbi__err("IDAT size limit", "IDAT section larger than 2^30 bytes"); + if ((int)(ioff + c.length) < (int)ioff) return 0; + if (ioff + c.length > idata_limit) { + stbi__uint32 idata_limit_old = idata_limit; + stbi_uc *p; + if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096; + while (ioff + c.length > idata_limit) + idata_limit *= 2; + STBI_NOTUSED(idata_limit_old); + p = (stbi_uc *) STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory"); + z->idata = p; + } + if (!stbi__getn(s, z->idata+ioff,c.length)) return stbi__err("outofdata","Corrupt PNG"); + ioff += c.length; + break; + } + + case STBI__PNG_TYPE('I','E','N','D'): { + stbi__uint32 raw_len, bpl; + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (scan != STBI__SCAN_load) return 1; + if (z->idata == NULL) return stbi__err("no IDAT","Corrupt PNG"); + // initial guess for decoded data size to avoid unnecessary reallocs + bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component + raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */; + z->expanded = (stbi_uc *) stbi_zlib_decode_malloc_guesssize_headerflag((char *) z->idata, ioff, raw_len, (int *) &raw_len, !is_iphone); + if (z->expanded == NULL) return 0; // zlib should set error + STBI_FREE(z->idata); z->idata = NULL; + if ((req_comp == s->img_n+1 && req_comp != 3 && !pal_img_n) || has_trans) + s->img_out_n = s->img_n+1; + else + s->img_out_n = s->img_n; + if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) return 0; + if (has_trans) { + if (z->depth == 16) { + if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) return 0; + } else { + if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0; + } + } + if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2) + stbi__de_iphone(z); + if (pal_img_n) { + // pal_img_n == 3 or 4 + s->img_n = pal_img_n; // record the actual colors we had + s->img_out_n = pal_img_n; + if (req_comp >= 3) s->img_out_n = req_comp; + if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n)) + return 0; + } else if (has_trans) { + // non-paletted image with tRNS -> source image has (constant) alpha + ++s->img_n; + } + STBI_FREE(z->expanded); z->expanded = NULL; + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + return 1; + } + + default: + // if critical, fail + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if ((c.type & (1 << 29)) == 0) { + #ifndef STBI_NO_FAILURE_STRINGS + // not threadsafe + static char invalid_chunk[] = "XXXX PNG chunk not known"; + invalid_chunk[0] = STBI__BYTECAST(c.type >> 24); + invalid_chunk[1] = STBI__BYTECAST(c.type >> 16); + invalid_chunk[2] = STBI__BYTECAST(c.type >> 8); + invalid_chunk[3] = STBI__BYTECAST(c.type >> 0); + #endif + return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type"); + } + stbi__skip(s, c.length); + break; + } + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + } +} + +static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri) +{ + void *result=NULL; + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) { + if (p->depth <= 8) + ri->bits_per_channel = 8; + else if (p->depth == 16) + ri->bits_per_channel = 16; + else + return stbi__errpuc("bad bits_per_channel", "PNG not supported: unsupported color depth"); + result = p->out; + p->out = NULL; + if (req_comp && req_comp != p->s->img_out_n) { + if (ri->bits_per_channel == 8) + result = stbi__convert_format((unsigned char *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + else + result = stbi__convert_format16((stbi__uint16 *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + p->s->img_out_n = req_comp; + if (result == NULL) return result; + } + *x = p->s->img_x; + *y = p->s->img_y; + if (n) *n = p->s->img_n; + } + STBI_FREE(p->out); p->out = NULL; + STBI_FREE(p->expanded); p->expanded = NULL; + STBI_FREE(p->idata); p->idata = NULL; + + return result; +} + +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi__png p; + p.s = s; + return stbi__do_png(&p, x,y,comp,req_comp, ri); +} + +static int stbi__png_test(stbi__context *s) +{ + int r; + r = stbi__check_png_header(s); + stbi__rewind(s); + return r; +} + +static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp) +{ + if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) { + stbi__rewind( p->s ); + return 0; + } + if (x) *x = p->s->img_x; + if (y) *y = p->s->img_y; + if (comp) *comp = p->s->img_n; + return 1; +} + +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__png p; + p.s = s; + return stbi__png_info_raw(&p, x, y, comp); +} + +static int stbi__png_is16(stbi__context *s) +{ + stbi__png p; + p.s = s; + if (!stbi__png_info_raw(&p, NULL, NULL, NULL)) + return 0; + if (p.depth != 16) { + stbi__rewind(p.s); + return 0; + } + return 1; +} +#endif + +// Microsoft/Windows BMP image + +#ifndef STBI_NO_BMP +static int stbi__bmp_test_raw(stbi__context *s) +{ + int r; + int sz; + if (stbi__get8(s) != 'B') return 0; + if (stbi__get8(s) != 'M') return 0; + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + stbi__get32le(s); // discard data offset + sz = stbi__get32le(s); + r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124); + return r; +} + +static int stbi__bmp_test(stbi__context *s) +{ + int r = stbi__bmp_test_raw(s); + stbi__rewind(s); + return r; +} + + +// returns 0..31 for the highest set bit +static int stbi__high_bit(unsigned int z) +{ + int n=0; + if (z == 0) return -1; + if (z >= 0x10000) { n += 16; z >>= 16; } + if (z >= 0x00100) { n += 8; z >>= 8; } + if (z >= 0x00010) { n += 4; z >>= 4; } + if (z >= 0x00004) { n += 2; z >>= 2; } + if (z >= 0x00002) { n += 1;/* >>= 1;*/ } + return n; +} + +static int stbi__bitcount(unsigned int a) +{ + a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2 + a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4 + a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits + a = (a + (a >> 8)); // max 16 per 8 bits + a = (a + (a >> 16)); // max 32 per 8 bits + return a & 0xff; +} + +// extract an arbitrarily-aligned N-bit value (N=bits) +// from v, and then make it 8-bits long and fractionally +// extend it to full full range. +static int stbi__shiftsigned(unsigned int v, int shift, int bits) +{ + static unsigned int mul_table[9] = { + 0, + 0xff/*0b11111111*/, 0x55/*0b01010101*/, 0x49/*0b01001001*/, 0x11/*0b00010001*/, + 0x21/*0b00100001*/, 0x41/*0b01000001*/, 0x81/*0b10000001*/, 0x01/*0b00000001*/, + }; + static unsigned int shift_table[9] = { + 0, 0,0,1,0,2,4,6,0, + }; + if (shift < 0) + v <<= -shift; + else + v >>= shift; + STBI_ASSERT(v < 256); + v >>= (8-bits); + STBI_ASSERT(bits >= 0 && bits <= 8); + return (int) ((unsigned) v * mul_table[bits]) >> shift_table[bits]; +} + +typedef struct +{ + int bpp, offset, hsz; + unsigned int mr,mg,mb,ma, all_a; + int extra_read; +} stbi__bmp_data; + +static int stbi__bmp_set_mask_defaults(stbi__bmp_data *info, int compress) +{ + // BI_BITFIELDS specifies masks explicitly, don't override + if (compress == 3) + return 1; + + if (compress == 0) { + if (info->bpp == 16) { + info->mr = 31u << 10; + info->mg = 31u << 5; + info->mb = 31u << 0; + } else if (info->bpp == 32) { + info->mr = 0xffu << 16; + info->mg = 0xffu << 8; + info->mb = 0xffu << 0; + info->ma = 0xffu << 24; + info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 + } else { + // otherwise, use defaults, which is all-0 + info->mr = info->mg = info->mb = info->ma = 0; + } + return 1; + } + return 0; // error +} + +static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) +{ + int hsz; + if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP"); + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + info->offset = stbi__get32le(s); + info->hsz = hsz = stbi__get32le(s); + info->mr = info->mg = info->mb = info->ma = 0; + info->extra_read = 14; + + if (info->offset < 0) return stbi__errpuc("bad BMP", "bad BMP"); + + if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown"); + if (hsz == 12) { + s->img_x = stbi__get16le(s); + s->img_y = stbi__get16le(s); + } else { + s->img_x = stbi__get32le(s); + s->img_y = stbi__get32le(s); + } + if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP"); + info->bpp = stbi__get16le(s); + if (hsz != 12) { + int compress = stbi__get32le(s); + if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); + if (compress >= 4) return stbi__errpuc("BMP JPEG/PNG", "BMP type not supported: unsupported compression"); // this includes PNG/JPEG modes + if (compress == 3 && info->bpp != 16 && info->bpp != 32) return stbi__errpuc("bad BMP", "bad BMP"); // bitfields requires 16 or 32 bits/pixel + stbi__get32le(s); // discard sizeof + stbi__get32le(s); // discard hres + stbi__get32le(s); // discard vres + stbi__get32le(s); // discard colorsused + stbi__get32le(s); // discard max important + if (hsz == 40 || hsz == 56) { + if (hsz == 56) { + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + } + if (info->bpp == 16 || info->bpp == 32) { + if (compress == 0) { + stbi__bmp_set_mask_defaults(info, compress); + } else if (compress == 3) { + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->extra_read += 12; + // not documented, but generated by photoshop and handled by mspaint + if (info->mr == info->mg && info->mg == info->mb) { + // ?!?!? + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else { + // V4/V5 header + int i; + if (hsz != 108 && hsz != 124) + return stbi__errpuc("bad BMP", "bad BMP"); + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->ma = stbi__get32le(s); + if (compress != 3) // override mr/mg/mb unless in BI_BITFIELDS mode, as per docs + stbi__bmp_set_mask_defaults(info, compress); + stbi__get32le(s); // discard color space + for (i=0; i < 12; ++i) + stbi__get32le(s); // discard color space parameters + if (hsz == 124) { + stbi__get32le(s); // discard rendering intent + stbi__get32le(s); // discard offset of profile data + stbi__get32le(s); // discard size of profile data + stbi__get32le(s); // discard reserved + } + } + } + return (void *) 1; +} + + +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + unsigned int mr=0,mg=0,mb=0,ma=0, all_a; + stbi_uc pal[256][4]; + int psize=0,i,j,width; + int flip_vertically, pad, target; + stbi__bmp_data info; + STBI_NOTUSED(ri); + + info.all_a = 255; + if (stbi__bmp_parse_header(s, &info) == NULL) + return NULL; // error code already set + + flip_vertically = ((int) s->img_y) > 0; + s->img_y = abs((int) s->img_y); + + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + mr = info.mr; + mg = info.mg; + mb = info.mb; + ma = info.ma; + all_a = info.all_a; + + if (info.hsz == 12) { + if (info.bpp < 24) + psize = (info.offset - info.extra_read - 24) / 3; + } else { + if (info.bpp < 16) + psize = (info.offset - info.extra_read - info.hsz) >> 2; + } + if (psize == 0) { + // accept some number of extra bytes after the header, but if the offset points either to before + // the header ends or implies a large amount of extra data, reject the file as malformed + int bytes_read_so_far = s->callback_already_read + (int)(s->img_buffer - s->img_buffer_original); + int header_limit = 1024; // max we actually read is below 256 bytes currently. + int extra_data_limit = 256*4; // what ordinarily goes here is a palette; 256 entries*4 bytes is its max size. + if (bytes_read_so_far <= 0 || bytes_read_so_far > header_limit) { + return stbi__errpuc("bad header", "Corrupt BMP"); + } + // we established that bytes_read_so_far is positive and sensible. + // the first half of this test rejects offsets that are either too small positives, or + // negative, and guarantees that info.offset >= bytes_read_so_far > 0. this in turn + // ensures the number computed in the second half of the test can't overflow. + if (info.offset < bytes_read_so_far || info.offset - bytes_read_so_far > extra_data_limit) { + return stbi__errpuc("bad offset", "Corrupt BMP"); + } else { + stbi__skip(s, info.offset - bytes_read_so_far); + } + } + + if (info.bpp == 24 && ma == 0xff000000) + s->img_n = 3; + else + s->img_n = ma ? 4 : 3; + if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 + target = req_comp; + else + target = s->img_n; // if they want monochrome, we'll post-convert + + // sanity-check size + if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "Corrupt BMP"); + + out = (stbi_uc *) stbi__malloc_mad3(target, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + if (info.bpp < 16) { + int z=0; + if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); } + for (i=0; i < psize; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + if (info.hsz != 12) stbi__get8(s); + pal[i][3] = 255; + } + stbi__skip(s, info.offset - info.extra_read - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); + if (info.bpp == 1) width = (s->img_x + 7) >> 3; + else if (info.bpp == 4) width = (s->img_x + 1) >> 1; + else if (info.bpp == 8) width = s->img_x; + else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); } + pad = (-width)&3; + if (info.bpp == 1) { + for (j=0; j < (int) s->img_y; ++j) { + int bit_offset = 7, v = stbi__get8(s); + for (i=0; i < (int) s->img_x; ++i) { + int color = (v>>bit_offset)&0x1; + out[z++] = pal[color][0]; + out[z++] = pal[color][1]; + out[z++] = pal[color][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + if((--bit_offset) < 0) { + bit_offset = 7; + v = stbi__get8(s); + } + } + stbi__skip(s, pad); + } + } else { + for (j=0; j < (int) s->img_y; ++j) { + for (i=0; i < (int) s->img_x; i += 2) { + int v=stbi__get8(s),v2=0; + if (info.bpp == 4) { + v2 = v & 15; + v >>= 4; + } + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + v = (info.bpp == 8) ? stbi__get8(s) : v2; + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + } + stbi__skip(s, pad); + } + } + } else { + int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0; + int z = 0; + int easy=0; + stbi__skip(s, info.offset - info.extra_read - info.hsz); + if (info.bpp == 24) width = 3 * s->img_x; + else if (info.bpp == 16) width = 2*s->img_x; + else /* bpp = 32 and pad = 0 */ width=0; + pad = (-width) & 3; + if (info.bpp == 24) { + easy = 1; + } else if (info.bpp == 32) { + if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) + easy = 2; + } + if (!easy) { + if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + // right shift amt to put high bit in position #7 + rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr); + gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg); + bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb); + ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma); + if (rcount > 8 || gcount > 8 || bcount > 8 || acount > 8) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + } + for (j=0; j < (int) s->img_y; ++j) { + if (easy) { + for (i=0; i < (int) s->img_x; ++i) { + unsigned char a; + out[z+2] = stbi__get8(s); + out[z+1] = stbi__get8(s); + out[z+0] = stbi__get8(s); + z += 3; + a = (easy == 2 ? stbi__get8(s) : 255); + all_a |= a; + if (target == 4) out[z++] = a; + } + } else { + int bpp = info.bpp; + for (i=0; i < (int) s->img_x; ++i) { + stbi__uint32 v = (bpp == 16 ? (stbi__uint32) stbi__get16le(s) : stbi__get32le(s)); + unsigned int a; + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); + a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); + all_a |= a; + if (target == 4) out[z++] = STBI__BYTECAST(a); + } + } + stbi__skip(s, pad); + } + } + + // if alpha channel is all 0s, replace with all 255s + if (target == 4 && all_a == 0) + for (i=4*s->img_x*s->img_y-1; i >= 0; i -= 4) + out[i] = 255; + + if (flip_vertically) { + stbi_uc t; + for (j=0; j < (int) s->img_y>>1; ++j) { + stbi_uc *p1 = out + j *s->img_x*target; + stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target; + for (i=0; i < (int) s->img_x*target; ++i) { + t = p1[i]; p1[i] = p2[i]; p2[i] = t; + } + } + } + + if (req_comp && req_comp != target) { + out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + return out; +} +#endif + +// Targa Truevision - TGA +// by Jonathan Dummer +#ifndef STBI_NO_TGA +// returns STBI_rgb or whatever, 0 on error +static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16) +{ + // only RGB or RGBA (incl. 16bit) or grey allowed + if (is_rgb16) *is_rgb16 = 0; + switch(bits_per_pixel) { + case 8: return STBI_grey; + case 16: if(is_grey) return STBI_grey_alpha; + // fallthrough + case 15: if(is_rgb16) *is_rgb16 = 1; + return STBI_rgb; + case 24: // fallthrough + case 32: return bits_per_pixel/8; + default: return 0; + } +} + +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp) +{ + int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp; + int sz, tga_colormap_type; + stbi__get8(s); // discard Offset + tga_colormap_type = stbi__get8(s); // colormap type + if( tga_colormap_type > 1 ) { + stbi__rewind(s); + return 0; // only RGB or indexed allowed + } + tga_image_type = stbi__get8(s); // image type + if ( tga_colormap_type == 1 ) { // colormapped (paletted) image + if (tga_image_type != 1 && tga_image_type != 9) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip image x and y origin + tga_colormap_bpp = sz; + } else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE + if ( (tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11) ) { + stbi__rewind(s); + return 0; // only RGB or grey allowed, +/- RLE + } + stbi__skip(s,9); // skip colormap specification and image x/y origin + tga_colormap_bpp = 0; + } + tga_w = stbi__get16le(s); + if( tga_w < 1 ) { + stbi__rewind(s); + return 0; // test width + } + tga_h = stbi__get16le(s); + if( tga_h < 1 ) { + stbi__rewind(s); + return 0; // test height + } + tga_bits_per_pixel = stbi__get8(s); // bits per pixel + stbi__get8(s); // ignore alpha bits + if (tga_colormap_bpp != 0) { + if((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) { + // when using a colormap, tga_bits_per_pixel is the size of the indexes + // I don't think anything but 8 or 16bit indexes makes sense + stbi__rewind(s); + return 0; + } + tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL); + } else { + tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL); + } + if(!tga_comp) { + stbi__rewind(s); + return 0; + } + if (x) *x = tga_w; + if (y) *y = tga_h; + if (comp) *comp = tga_comp; + return 1; // seems to have passed everything +} + +static int stbi__tga_test(stbi__context *s) +{ + int res = 0; + int sz, tga_color_type; + stbi__get8(s); // discard Offset + tga_color_type = stbi__get8(s); // color type + if ( tga_color_type > 1 ) goto errorEnd; // only RGB or indexed allowed + sz = stbi__get8(s); // image type + if ( tga_color_type == 1 ) { // colormapped (paletted) image + if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9 + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + stbi__skip(s,4); // skip image x and y origin + } else { // "normal" image w/o colormap + if ( (sz != 2) && (sz != 3) && (sz != 10) && (sz != 11) ) goto errorEnd; // only RGB or grey allowed, +/- RLE + stbi__skip(s,9); // skip colormap specification and image x/y origin + } + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test width + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test height + sz = stbi__get8(s); // bits per pixel + if ( (tga_color_type == 1) && (sz != 8) && (sz != 16) ) goto errorEnd; // for colormapped images, bpp is size of an index + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + + res = 1; // if we got this far, everything's good and we can return 1 instead of 0 + +errorEnd: + stbi__rewind(s); + return res; +} + +// read 16bit value and convert to 24bit RGB +static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) +{ + stbi__uint16 px = (stbi__uint16)stbi__get16le(s); + stbi__uint16 fiveBitMask = 31; + // we have 3 channels with 5bits each + int r = (px >> 10) & fiveBitMask; + int g = (px >> 5) & fiveBitMask; + int b = px & fiveBitMask; + // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later + out[0] = (stbi_uc)((r * 255)/31); + out[1] = (stbi_uc)((g * 255)/31); + out[2] = (stbi_uc)((b * 255)/31); + + // some people claim that the most significant bit might be used for alpha + // (possibly if an alpha-bit is set in the "image descriptor byte") + // but that only made 16bit test images completely translucent.. + // so let's treat all 15 and 16bit TGAs as RGB with no alpha. +} + +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + // read in the TGA header stuff + int tga_offset = stbi__get8(s); + int tga_indexed = stbi__get8(s); + int tga_image_type = stbi__get8(s); + int tga_is_RLE = 0; + int tga_palette_start = stbi__get16le(s); + int tga_palette_len = stbi__get16le(s); + int tga_palette_bits = stbi__get8(s); + int tga_x_origin = stbi__get16le(s); + int tga_y_origin = stbi__get16le(s); + int tga_width = stbi__get16le(s); + int tga_height = stbi__get16le(s); + int tga_bits_per_pixel = stbi__get8(s); + int tga_comp, tga_rgb16=0; + int tga_inverted = stbi__get8(s); + // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?) + // image data + unsigned char *tga_data; + unsigned char *tga_palette = NULL; + int i, j; + unsigned char raw_data[4] = {0}; + int RLE_count = 0; + int RLE_repeating = 0; + int read_next_pixel = 1; + STBI_NOTUSED(ri); + STBI_NOTUSED(tga_x_origin); // @TODO + STBI_NOTUSED(tga_y_origin); // @TODO + + if (tga_height > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (tga_width > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + // do a tiny bit of precessing + if ( tga_image_type >= 8 ) + { + tga_image_type -= 8; + tga_is_RLE = 1; + } + tga_inverted = 1 - ((tga_inverted >> 5) & 1); + + // If I'm paletted, then I'll use the number of bits from the palette + if ( tga_indexed ) tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16); + else tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16); + + if(!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency + return stbi__errpuc("bad format", "Can't find out TGA pixelformat"); + + // tga info + *x = tga_width; + *y = tga_height; + if (comp) *comp = tga_comp; + + if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0)) + return stbi__errpuc("too large", "Corrupt TGA"); + + tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0); + if (!tga_data) return stbi__errpuc("outofmem", "Out of memory"); + + // skip to the data's starting position (offset usually = 0) + stbi__skip(s, tga_offset ); + + if ( !tga_indexed && !tga_is_RLE && !tga_rgb16 ) { + for (i=0; i < tga_height; ++i) { + int row = tga_inverted ? tga_height -i - 1 : i; + stbi_uc *tga_row = tga_data + row*tga_width*tga_comp; + stbi__getn(s, tga_row, tga_width * tga_comp); + } + } else { + // do I need to load a palette? + if ( tga_indexed) + { + if (tga_palette_len == 0) { /* you have to have at least one entry! */ + STBI_FREE(tga_data); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + + // any data to skip? (offset usually = 0) + stbi__skip(s, tga_palette_start ); + // load the palette + tga_palette = (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0); + if (!tga_palette) { + STBI_FREE(tga_data); + return stbi__errpuc("outofmem", "Out of memory"); + } + if (tga_rgb16) { + stbi_uc *pal_entry = tga_palette; + STBI_ASSERT(tga_comp == STBI_rgb); + for (i=0; i < tga_palette_len; ++i) { + stbi__tga_read_rgb16(s, pal_entry); + pal_entry += tga_comp; + } + } else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) { + STBI_FREE(tga_data); + STBI_FREE(tga_palette); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + } + // load the data + for (i=0; i < tga_width * tga_height; ++i) + { + // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk? + if ( tga_is_RLE ) + { + if ( RLE_count == 0 ) + { + // yep, get the next byte as a RLE command + int RLE_cmd = stbi__get8(s); + RLE_count = 1 + (RLE_cmd & 127); + RLE_repeating = RLE_cmd >> 7; + read_next_pixel = 1; + } else if ( !RLE_repeating ) + { + read_next_pixel = 1; + } + } else + { + read_next_pixel = 1; + } + // OK, if I need to read a pixel, do it now + if ( read_next_pixel ) + { + // load however much data we did have + if ( tga_indexed ) + { + // read in index, then perform the lookup + int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s); + if ( pal_idx >= tga_palette_len ) { + // invalid index + pal_idx = 0; + } + pal_idx *= tga_comp; + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = tga_palette[pal_idx+j]; + } + } else if(tga_rgb16) { + STBI_ASSERT(tga_comp == STBI_rgb); + stbi__tga_read_rgb16(s, raw_data); + } else { + // read in the data raw + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = stbi__get8(s); + } + } + // clear the reading flag for the next pixel + read_next_pixel = 0; + } // end of reading a pixel + + // copy data + for (j = 0; j < tga_comp; ++j) + tga_data[i*tga_comp+j] = raw_data[j]; + + // in case we're in RLE mode, keep counting down + --RLE_count; + } + // do I need to invert the image? + if ( tga_inverted ) + { + for (j = 0; j*2 < tga_height; ++j) + { + int index1 = j * tga_width * tga_comp; + int index2 = (tga_height - 1 - j) * tga_width * tga_comp; + for (i = tga_width * tga_comp; i > 0; --i) + { + unsigned char temp = tga_data[index1]; + tga_data[index1] = tga_data[index2]; + tga_data[index2] = temp; + ++index1; + ++index2; + } + } + } + // clear my palette, if I had one + if ( tga_palette != NULL ) + { + STBI_FREE( tga_palette ); + } + } + + // swap RGB - if the source data was RGB16, it already is in the right order + if (tga_comp >= 3 && !tga_rgb16) + { + unsigned char* tga_pixel = tga_data; + for (i=0; i < tga_width * tga_height; ++i) + { + unsigned char temp = tga_pixel[0]; + tga_pixel[0] = tga_pixel[2]; + tga_pixel[2] = temp; + tga_pixel += tga_comp; + } + } + + // convert to target component count + if (req_comp && req_comp != tga_comp) + tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height); + + // the things I do to get rid of an error message, and yet keep + // Microsoft's C compilers happy... [8^( + tga_palette_start = tga_palette_len = tga_palette_bits = + tga_x_origin = tga_y_origin = 0; + STBI_NOTUSED(tga_palette_start); + // OK, done + return tga_data; +} +#endif + +// ************************************************************************************************* +// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s) +{ + int r = (stbi__get32be(s) == 0x38425053); + stbi__rewind(s); + return r; +} + +static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount) +{ + int count, nleft, len; + + count = 0; + while ((nleft = pixelCount - count) > 0) { + len = stbi__get8(s); + if (len == 128) { + // No-op. + } else if (len < 128) { + // Copy next len+1 bytes literally. + len++; + if (len > nleft) return 0; // corrupt data + count += len; + while (len) { + *p = stbi__get8(s); + p += 4; + len--; + } + } else if (len > 128) { + stbi_uc val; + // Next -len+1 bytes in the dest are replicated from next source byte. + // (Interpret len as a negative 8-bit int.) + len = 257 - len; + if (len > nleft) return 0; // corrupt data + val = stbi__get8(s); + count += len; + while (len) { + *p = val; + p += 4; + len--; + } + } + } + + return 1; +} + +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + int pixelCount; + int channelCount, compression; + int channel, i; + int bitdepth; + int w,h; + stbi_uc *out; + STBI_NOTUSED(ri); + + // Check identifier + if (stbi__get32be(s) != 0x38425053) // "8BPS" + return stbi__errpuc("not PSD", "Corrupt PSD image"); + + // Check file type version. + if (stbi__get16be(s) != 1) + return stbi__errpuc("wrong version", "Unsupported version of PSD image"); + + // Skip 6 reserved bytes. + stbi__skip(s, 6 ); + + // Read the number of channels (R, G, B, A, etc). + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) + return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image"); + + // Read the rows and columns of the image. + h = stbi__get32be(s); + w = stbi__get32be(s); + + if (h > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (w > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + // Make sure the depth is 8 bits. + bitdepth = stbi__get16be(s); + if (bitdepth != 8 && bitdepth != 16) + return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit"); + + // Make sure the color mode is RGB. + // Valid options are: + // 0: Bitmap + // 1: Grayscale + // 2: Indexed color + // 3: RGB color + // 4: CMYK color + // 7: Multichannel + // 8: Duotone + // 9: Lab color + if (stbi__get16be(s) != 3) + return stbi__errpuc("wrong color format", "PSD is not in RGB color format"); + + // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.) + stbi__skip(s,stbi__get32be(s) ); + + // Skip the image resources. (resolution, pen tool paths, etc) + stbi__skip(s, stbi__get32be(s) ); + + // Skip the reserved data. + stbi__skip(s, stbi__get32be(s) ); + + // Find out if the data is compressed. + // Known values: + // 0: no compression + // 1: RLE compressed + compression = stbi__get16be(s); + if (compression > 1) + return stbi__errpuc("bad compression", "PSD has an unknown compression format"); + + // Check size + if (!stbi__mad3sizes_valid(4, w, h, 0)) + return stbi__errpuc("too large", "Corrupt PSD"); + + // Create the destination image. + + if (!compression && bitdepth == 16 && bpc == 16) { + out = (stbi_uc *) stbi__malloc_mad3(8, w, h, 0); + ri->bits_per_channel = 16; + } else + out = (stbi_uc *) stbi__malloc(4 * w*h); + + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + pixelCount = w*h; + + // Initialize the data to zero. + //memset( out, 0, pixelCount * 4 ); + + // Finally, the image data. + if (compression) { + // RLE as used by .PSD and .TIFF + // Loop until you get the number of unpacked bytes you are expecting: + // Read the next source byte into n. + // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally. + // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times. + // Else if n is 128, noop. + // Endloop + + // The RLE-compressed data is preceded by a 2-byte data count for each row in the data, + // which we're going to just skip. + stbi__skip(s, h * channelCount * 2 ); + + // Read the RLE data by channel. + for (channel = 0; channel < 4; channel++) { + stbi_uc *p; + + p = out+channel; + if (channel >= channelCount) { + // Fill this channel with default data. + for (i = 0; i < pixelCount; i++, p += 4) + *p = (channel == 3 ? 255 : 0); + } else { + // Read the RLE data. + if (!stbi__psd_decode_rle(s, p, pixelCount)) { + STBI_FREE(out); + return stbi__errpuc("corrupt", "bad RLE data"); + } + } + } + + } else { + // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...) + // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image. + + // Read the data by channel. + for (channel = 0; channel < 4; channel++) { + if (channel >= channelCount) { + // Fill this channel with default data. + if (bitdepth == 16 && bpc == 16) { + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + stbi__uint16 val = channel == 3 ? 65535 : 0; + for (i = 0; i < pixelCount; i++, q += 4) + *q = val; + } else { + stbi_uc *p = out+channel; + stbi_uc val = channel == 3 ? 255 : 0; + for (i = 0; i < pixelCount; i++, p += 4) + *p = val; + } + } else { + if (ri->bits_per_channel == 16) { // output bpc + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + for (i = 0; i < pixelCount; i++, q += 4) + *q = (stbi__uint16) stbi__get16be(s); + } else { + stbi_uc *p = out+channel; + if (bitdepth == 16) { // input bpc + for (i = 0; i < pixelCount; i++, p += 4) + *p = (stbi_uc) (stbi__get16be(s) >> 8); + } else { + for (i = 0; i < pixelCount; i++, p += 4) + *p = stbi__get8(s); + } + } + } + } + } + + // remove weird white matte from PSD + if (channelCount >= 4) { + if (ri->bits_per_channel == 16) { + for (i=0; i < w*h; ++i) { + stbi__uint16 *pixel = (stbi__uint16 *) out + 4*i; + if (pixel[3] != 0 && pixel[3] != 65535) { + float a = pixel[3] / 65535.0f; + float ra = 1.0f / a; + float inv_a = 65535.0f * (1 - ra); + pixel[0] = (stbi__uint16) (pixel[0]*ra + inv_a); + pixel[1] = (stbi__uint16) (pixel[1]*ra + inv_a); + pixel[2] = (stbi__uint16) (pixel[2]*ra + inv_a); + } + } + } else { + for (i=0; i < w*h; ++i) { + unsigned char *pixel = out + 4*i; + if (pixel[3] != 0 && pixel[3] != 255) { + float a = pixel[3] / 255.0f; + float ra = 1.0f / a; + float inv_a = 255.0f * (1 - ra); + pixel[0] = (unsigned char) (pixel[0]*ra + inv_a); + pixel[1] = (unsigned char) (pixel[1]*ra + inv_a); + pixel[2] = (unsigned char) (pixel[2]*ra + inv_a); + } + } + } + } + + // convert to desired output format + if (req_comp && req_comp != 4) { + if (ri->bits_per_channel == 16) + out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, 4, req_comp, w, h); + else + out = stbi__convert_format(out, 4, req_comp, w, h); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + if (comp) *comp = 4; + *y = h; + *x = w; + + return out; +} +#endif + +// ************************************************************************************************* +// Softimage PIC loader +// by Tom Seddon +// +// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format +// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/ + +#ifndef STBI_NO_PIC +static int stbi__pic_is4(stbi__context *s,const char *str) +{ + int i; + for (i=0; i<4; ++i) + if (stbi__get8(s) != (stbi_uc)str[i]) + return 0; + + return 1; +} + +static int stbi__pic_test_core(stbi__context *s) +{ + int i; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) + return 0; + + for(i=0;i<84;++i) + stbi__get8(s); + + if (!stbi__pic_is4(s,"PICT")) + return 0; + + return 1; +} + +typedef struct +{ + stbi_uc size,type,channel; +} stbi__pic_packet; + +static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest) +{ + int mask=0x80, i; + + for (i=0; i<4; ++i, mask>>=1) { + if (channel & mask) { + if (stbi__at_eof(s)) return stbi__errpuc("bad file","PIC file too short"); + dest[i]=stbi__get8(s); + } + } + + return dest; +} + +static void stbi__copyval(int channel,stbi_uc *dest,const stbi_uc *src) +{ + int mask=0x80,i; + + for (i=0;i<4; ++i, mask>>=1) + if (channel&mask) + dest[i]=src[i]; +} + +static stbi_uc *stbi__pic_load_core(stbi__context *s,int width,int height,int *comp, stbi_uc *result) +{ + int act_comp=0,num_packets=0,y,chained; + stbi__pic_packet packets[10]; + + // this will (should...) cater for even some bizarre stuff like having data + // for the same channel in multiple packets. + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return stbi__errpuc("bad format","too many packets"); + + packet = &packets[num_packets++]; + + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + + act_comp |= packet->channel; + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (reading packets)"); + if (packet->size != 8) return stbi__errpuc("bad format","packet isn't 8bpp"); + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel? + + for(y=0; ytype) { + default: + return stbi__errpuc("bad format","packet has bad compression type"); + + case 0: {//uncompressed + int x; + + for(x=0;xchannel,dest)) + return 0; + break; + } + + case 1://Pure RLE + { + int left=width, i; + + while (left>0) { + stbi_uc count,value[4]; + + count=stbi__get8(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pure read count)"); + + if (count > left) + count = (stbi_uc) left; + + if (!stbi__readval(s,packet->channel,value)) return 0; + + for(i=0; ichannel,dest,value); + left -= count; + } + } + break; + + case 2: {//Mixed RLE + int left=width; + while (left>0) { + int count = stbi__get8(s), i; + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (mixed read count)"); + + if (count >= 128) { // Repeated + stbi_uc value[4]; + + if (count==128) + count = stbi__get16be(s); + else + count -= 127; + if (count > left) + return stbi__errpuc("bad file","scanline overrun"); + + if (!stbi__readval(s,packet->channel,value)) + return 0; + + for(i=0;ichannel,dest,value); + } else { // Raw + ++count; + if (count>left) return stbi__errpuc("bad file","scanline overrun"); + + for(i=0;ichannel,dest)) + return 0; + } + left-=count; + } + break; + } + } + } + } + + return result; +} + +static void *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_comp, stbi__result_info *ri) +{ + stbi_uc *result; + int i, x,y, internal_comp; + STBI_NOTUSED(ri); + + if (!comp) comp = &internal_comp; + + for (i=0; i<92; ++i) + stbi__get8(s); + + x = stbi__get16be(s); + y = stbi__get16be(s); + + if (y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pic header)"); + if (!stbi__mad3sizes_valid(x, y, 4, 0)) return stbi__errpuc("too large", "PIC image too large to decode"); + + stbi__get32be(s); //skip `ratio' + stbi__get16be(s); //skip `fields' + stbi__get16be(s); //skip `pad' + + // intermediate buffer is RGBA + result = (stbi_uc *) stbi__malloc_mad3(x, y, 4, 0); + if (!result) return stbi__errpuc("outofmem", "Out of memory"); + memset(result, 0xff, x*y*4); + + if (!stbi__pic_load_core(s,x,y,comp, result)) { + STBI_FREE(result); + result=0; + } + *px = x; + *py = y; + if (req_comp == 0) req_comp = *comp; + result=stbi__convert_format(result,4,req_comp,x,y); + + return result; +} + +static int stbi__pic_test(stbi__context *s) +{ + int r = stbi__pic_test_core(s); + stbi__rewind(s); + return r; +} +#endif + +// ************************************************************************************************* +// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb + +#ifndef STBI_NO_GIF +typedef struct +{ + stbi__int16 prefix; + stbi_uc first; + stbi_uc suffix; +} stbi__gif_lzw; + +typedef struct +{ + int w,h; + stbi_uc *out; // output buffer (always 4 components) + stbi_uc *background; // The current "background" as far as a gif is concerned + stbi_uc *history; + int flags, bgindex, ratio, transparent, eflags; + stbi_uc pal[256][4]; + stbi_uc lpal[256][4]; + stbi__gif_lzw codes[8192]; + stbi_uc *color_table; + int parse, step; + int lflags; + int start_x, start_y; + int max_x, max_y; + int cur_x, cur_y; + int line_size; + int delay; +} stbi__gif; + +static int stbi__gif_test_raw(stbi__context *s) +{ + int sz; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return 0; + sz = stbi__get8(s); + if (sz != '9' && sz != '7') return 0; + if (stbi__get8(s) != 'a') return 0; + return 1; +} + +static int stbi__gif_test(stbi__context *s) +{ + int r = stbi__gif_test_raw(s); + stbi__rewind(s); + return r; +} + +static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp) +{ + int i; + for (i=0; i < num_entries; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + pal[i][3] = transp == i ? 0 : 255; + } +} + +static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info) +{ + stbi_uc version; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') + return stbi__err("not GIF", "Corrupt GIF"); + + version = stbi__get8(s); + if (version != '7' && version != '9') return stbi__err("not GIF", "Corrupt GIF"); + if (stbi__get8(s) != 'a') return stbi__err("not GIF", "Corrupt GIF"); + + stbi__g_failure_reason = ""; + g->w = stbi__get16le(s); + g->h = stbi__get16le(s); + g->flags = stbi__get8(s); + g->bgindex = stbi__get8(s); + g->ratio = stbi__get8(s); + g->transparent = -1; + + if (g->w > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (g->h > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + + if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments + + if (is_info) return 1; + + if (g->flags & 0x80) + stbi__gif_parse_colortable(s,g->pal, 2 << (g->flags & 7), -1); + + return 1; +} + +static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif)); + if (!g) return stbi__err("outofmem", "Out of memory"); + if (!stbi__gif_header(s, g, comp, 1)) { + STBI_FREE(g); + stbi__rewind( s ); + return 0; + } + if (x) *x = g->w; + if (y) *y = g->h; + STBI_FREE(g); + return 1; +} + +static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code) +{ + stbi_uc *p, *c; + int idx; + + // recurse to decode the prefixes, since the linked-list is backwards, + // and working backwards through an interleaved image would be nasty + if (g->codes[code].prefix >= 0) + stbi__out_gif_code(g, g->codes[code].prefix); + + if (g->cur_y >= g->max_y) return; + + idx = g->cur_x + g->cur_y; + p = &g->out[idx]; + g->history[idx / 4] = 1; + + c = &g->color_table[g->codes[code].suffix * 4]; + if (c[3] > 128) { // don't render transparent pixels; + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = c[3]; + } + g->cur_x += 4; + + if (g->cur_x >= g->max_x) { + g->cur_x = g->start_x; + g->cur_y += g->step; + + while (g->cur_y >= g->max_y && g->parse > 0) { + g->step = (1 << g->parse) * g->line_size; + g->cur_y = g->start_y + (g->step >> 1); + --g->parse; + } + } +} + +static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) +{ + stbi_uc lzw_cs; + stbi__int32 len, init_code; + stbi__uint32 first; + stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear; + stbi__gif_lzw *p; + + lzw_cs = stbi__get8(s); + if (lzw_cs > 12) return NULL; + clear = 1 << lzw_cs; + first = 1; + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + bits = 0; + valid_bits = 0; + for (init_code = 0; init_code < clear; init_code++) { + g->codes[init_code].prefix = -1; + g->codes[init_code].first = (stbi_uc) init_code; + g->codes[init_code].suffix = (stbi_uc) init_code; + } + + // support no starting clear code + avail = clear+2; + oldcode = -1; + + len = 0; + for(;;) { + if (valid_bits < codesize) { + if (len == 0) { + len = stbi__get8(s); // start new block + if (len == 0) + return g->out; + } + --len; + bits |= (stbi__int32) stbi__get8(s) << valid_bits; + valid_bits += 8; + } else { + stbi__int32 code = bits & codemask; + bits >>= codesize; + valid_bits -= codesize; + // @OPTIMIZE: is there some way we can accelerate the non-clear path? + if (code == clear) { // clear code + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + avail = clear + 2; + oldcode = -1; + first = 0; + } else if (code == clear + 1) { // end of stream code + stbi__skip(s, len); + while ((len = stbi__get8(s)) > 0) + stbi__skip(s,len); + return g->out; + } else if (code <= avail) { + if (first) { + return stbi__errpuc("no clear code", "Corrupt GIF"); + } + + if (oldcode >= 0) { + p = &g->codes[avail++]; + if (avail > 8192) { + return stbi__errpuc("too many codes", "Corrupt GIF"); + } + + p->prefix = (stbi__int16) oldcode; + p->first = g->codes[oldcode].first; + p->suffix = (code == avail) ? p->first : g->codes[code].first; + } else if (code == avail) + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + + stbi__out_gif_code(g, (stbi__uint16) code); + + if ((avail & codemask) == 0 && avail <= 0x0FFF) { + codesize++; + codemask = (1 << codesize) - 1; + } + + oldcode = code; + } else { + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + } + } + } +} + +// this function is designed to support animated gifs, although stb_image doesn't support it +// two back is the image from two frames ago, used for a very specific disposal format +static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp, stbi_uc *two_back) +{ + int dispose; + int first_frame; + int pi; + int pcount; + STBI_NOTUSED(req_comp); + + // on first frame, any non-written pixels get the background colour (non-transparent) + first_frame = 0; + if (g->out == 0) { + if (!stbi__gif_header(s, g, comp,0)) return 0; // stbi__g_failure_reason set by stbi__gif_header + if (!stbi__mad3sizes_valid(4, g->w, g->h, 0)) + return stbi__errpuc("too large", "GIF image is too large"); + pcount = g->w * g->h; + g->out = (stbi_uc *) stbi__malloc(4 * pcount); + g->background = (stbi_uc *) stbi__malloc(4 * pcount); + g->history = (stbi_uc *) stbi__malloc(pcount); + if (!g->out || !g->background || !g->history) + return stbi__errpuc("outofmem", "Out of memory"); + + // image is treated as "transparent" at the start - ie, nothing overwrites the current background; + // background colour is only used for pixels that are not rendered first frame, after that "background" + // color refers to the color that was there the previous frame. + memset(g->out, 0x00, 4 * pcount); + memset(g->background, 0x00, 4 * pcount); // state of the background (starts transparent) + memset(g->history, 0x00, pcount); // pixels that were affected previous frame + first_frame = 1; + } else { + // second frame - how do we dispose of the previous one? + dispose = (g->eflags & 0x1C) >> 2; + pcount = g->w * g->h; + + if ((dispose == 3) && (two_back == 0)) { + dispose = 2; // if I don't have an image to revert back to, default to the old background + } + + if (dispose == 3) { // use previous graphic + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &two_back[pi * 4], 4 ); + } + } + } else if (dispose == 2) { + // restore what was changed last frame to background before that frame; + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &g->background[pi * 4], 4 ); + } + } + } else { + // This is a non-disposal case eithe way, so just + // leave the pixels as is, and they will become the new background + // 1: do not dispose + // 0: not specified. + } + + // background is what out is after the undoing of the previou frame; + memcpy( g->background, g->out, 4 * g->w * g->h ); + } + + // clear my history; + memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame + + for (;;) { + int tag = stbi__get8(s); + switch (tag) { + case 0x2C: /* Image Descriptor */ + { + stbi__int32 x, y, w, h; + stbi_uc *o; + + x = stbi__get16le(s); + y = stbi__get16le(s); + w = stbi__get16le(s); + h = stbi__get16le(s); + if (((x + w) > (g->w)) || ((y + h) > (g->h))) + return stbi__errpuc("bad Image Descriptor", "Corrupt GIF"); + + g->line_size = g->w * 4; + g->start_x = x * 4; + g->start_y = y * g->line_size; + g->max_x = g->start_x + w * 4; + g->max_y = g->start_y + h * g->line_size; + g->cur_x = g->start_x; + g->cur_y = g->start_y; + + // if the width of the specified rectangle is 0, that means + // we may not see *any* pixels or the image is malformed; + // to make sure this is caught, move the current y down to + // max_y (which is what out_gif_code checks). + if (w == 0) + g->cur_y = g->max_y; + + g->lflags = stbi__get8(s); + + if (g->lflags & 0x40) { + g->step = 8 * g->line_size; // first interlaced spacing + g->parse = 3; + } else { + g->step = g->line_size; + g->parse = 0; + } + + if (g->lflags & 0x80) { + stbi__gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1); + g->color_table = (stbi_uc *) g->lpal; + } else if (g->flags & 0x80) { + g->color_table = (stbi_uc *) g->pal; + } else + return stbi__errpuc("missing color table", "Corrupt GIF"); + + o = stbi__process_gif_raster(s, g); + if (!o) return NULL; + + // if this was the first frame, + pcount = g->w * g->h; + if (first_frame && (g->bgindex > 0)) { + // if first frame, any pixel not drawn to gets the background color + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi] == 0) { + g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be reset next frame if need be; + memcpy( &g->out[pi * 4], &g->pal[g->bgindex], 4 ); + } + } + } + + return o; + } + + case 0x21: // Comment Extension. + { + int len; + int ext = stbi__get8(s); + if (ext == 0xF9) { // Graphic Control Extension. + len = stbi__get8(s); + if (len == 4) { + g->eflags = stbi__get8(s); + g->delay = 10 * stbi__get16le(s); // delay - 1/100th of a second, saving as 1/1000ths. + + // unset old transparent + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 255; + } + if (g->eflags & 0x01) { + g->transparent = stbi__get8(s); + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 0; + } + } else { + // don't need transparent + stbi__skip(s, 1); + g->transparent = -1; + } + } else { + stbi__skip(s, len); + break; + } + } + while ((len = stbi__get8(s)) != 0) { + stbi__skip(s, len); + } + break; + } + + case 0x3B: // gif stream termination code + return (stbi_uc *) s; // using '1' causes warning on some compilers + + default: + return stbi__errpuc("unknown code", "Corrupt GIF"); + } + } +} + +static void *stbi__load_gif_main_outofmem(stbi__gif *g, stbi_uc *out, int **delays) +{ + STBI_FREE(g->out); + STBI_FREE(g->history); + STBI_FREE(g->background); + + if (out) STBI_FREE(out); + if (delays && *delays) STBI_FREE(*delays); + return stbi__errpuc("outofmem", "Out of memory"); +} + +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + if (stbi__gif_test(s)) { + int layers = 0; + stbi_uc *u = 0; + stbi_uc *out = 0; + stbi_uc *two_back = 0; + stbi__gif g; + int stride; + int out_size = 0; + int delays_size = 0; + + STBI_NOTUSED(out_size); + STBI_NOTUSED(delays_size); + + memset(&g, 0, sizeof(g)); + if (delays) { + *delays = 0; + } + + do { + u = stbi__gif_load_next(s, &g, comp, req_comp, two_back); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + + if (u) { + *x = g.w; + *y = g.h; + ++layers; + stride = g.w * g.h * 4; + + if (out) { + void *tmp = (stbi_uc*) STBI_REALLOC_SIZED( out, out_size, layers * stride ); + if (!tmp) + return stbi__load_gif_main_outofmem(&g, out, delays); + else { + out = (stbi_uc*) tmp; + out_size = layers * stride; + } + + if (delays) { + int *new_delays = (int*) STBI_REALLOC_SIZED( *delays, delays_size, sizeof(int) * layers ); + if (!new_delays) + return stbi__load_gif_main_outofmem(&g, out, delays); + *delays = new_delays; + delays_size = layers * sizeof(int); + } + } else { + out = (stbi_uc*)stbi__malloc( layers * stride ); + if (!out) + return stbi__load_gif_main_outofmem(&g, out, delays); + out_size = layers * stride; + if (delays) { + *delays = (int*) stbi__malloc( layers * sizeof(int) ); + if (!*delays) + return stbi__load_gif_main_outofmem(&g, out, delays); + delays_size = layers * sizeof(int); + } + } + memcpy( out + ((layers - 1) * stride), u, stride ); + if (layers >= 2) { + two_back = out - 2 * stride; + } + + if (delays) { + (*delays)[layers - 1U] = g.delay; + } + } + } while (u != 0); + + // free temp buffer; + STBI_FREE(g.out); + STBI_FREE(g.history); + STBI_FREE(g.background); + + // do the final conversion after loading everything; + if (req_comp && req_comp != 4) + out = stbi__convert_format(out, 4, req_comp, layers * g.w, g.h); + + *z = layers; + return out; + } else { + return stbi__errpuc("not GIF", "Image was not as a gif type."); + } +} + +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *u = 0; + stbi__gif g; + memset(&g, 0, sizeof(g)); + STBI_NOTUSED(ri); + + u = stbi__gif_load_next(s, &g, comp, req_comp, 0); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + if (u) { + *x = g.w; + *y = g.h; + + // moved conversion to after successful load so that the same + // can be done for multiple frames. + if (req_comp && req_comp != 4) + u = stbi__convert_format(u, 4, req_comp, g.w, g.h); + } else if (g.out) { + // if there was an error and we allocated an image buffer, free it! + STBI_FREE(g.out); + } + + // free buffers needed for multiple frame loading; + STBI_FREE(g.history); + STBI_FREE(g.background); + + return u; +} + +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp) +{ + return stbi__gif_info_raw(s,x,y,comp); +} +#endif + +// ************************************************************************************************* +// Radiance RGBE HDR loader +// originally by Nicolas Schulz +#ifndef STBI_NO_HDR +static int stbi__hdr_test_core(stbi__context *s, const char *signature) +{ + int i; + for (i=0; signature[i]; ++i) + if (stbi__get8(s) != signature[i]) + return 0; + stbi__rewind(s); + return 1; +} + +static int stbi__hdr_test(stbi__context* s) +{ + int r = stbi__hdr_test_core(s, "#?RADIANCE\n"); + stbi__rewind(s); + if(!r) { + r = stbi__hdr_test_core(s, "#?RGBE\n"); + stbi__rewind(s); + } + return r; +} + +#define STBI__HDR_BUFLEN 1024 +static char *stbi__hdr_gettoken(stbi__context *z, char *buffer) +{ + int len=0; + char c = '\0'; + + c = (char) stbi__get8(z); + + while (!stbi__at_eof(z) && c != '\n') { + buffer[len++] = c; + if (len == STBI__HDR_BUFLEN-1) { + // flush to end of line + while (!stbi__at_eof(z) && stbi__get8(z) != '\n') + ; + break; + } + c = (char) stbi__get8(z); + } + + buffer[len] = 0; + return buffer; +} + +static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp) +{ + if ( input[3] != 0 ) { + float f1; + // Exponent + f1 = (float) ldexp(1.0f, input[3] - (int)(128 + 8)); + if (req_comp <= 2) + output[0] = (input[0] + input[1] + input[2]) * f1 / 3; + else { + output[0] = input[0] * f1; + output[1] = input[1] * f1; + output[2] = input[2] * f1; + } + if (req_comp == 2) output[1] = 1; + if (req_comp == 4) output[3] = 1; + } else { + switch (req_comp) { + case 4: output[3] = 1; /* fallthrough */ + case 3: output[0] = output[1] = output[2] = 0; + break; + case 2: output[1] = 1; /* fallthrough */ + case 1: output[0] = 0; + break; + } + } +} + +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int width, height; + stbi_uc *scanline; + float *hdr_data; + int len; + unsigned char count, value; + int i, j, k, c1,c2, z; + const char *headerToken; + STBI_NOTUSED(ri); + + // Check identifier + headerToken = stbi__hdr_gettoken(s,buffer); + if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0) + return stbi__errpf("not HDR", "Corrupt HDR image"); + + // Parse header + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) return stbi__errpf("unsupported format", "Unsupported HDR format"); + + // Parse width and height + // can't use sscanf() if we're not using stdio! + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + height = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + width = (int) strtol(token, NULL, 10); + + if (height > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)"); + if (width > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)"); + + *x = width; + *y = height; + + if (comp) *comp = 3; + if (req_comp == 0) req_comp = 3; + + if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0)) + return stbi__errpf("too large", "HDR image is too large"); + + // Read data + hdr_data = (float *) stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0); + if (!hdr_data) + return stbi__errpf("outofmem", "Out of memory"); + + // Load image data + // image data is stored as some number of sca + if ( width < 8 || width >= 32768) { + // Read flat data + for (j=0; j < height; ++j) { + for (i=0; i < width; ++i) { + stbi_uc rgbe[4]; + main_decode_loop: + stbi__getn(s, rgbe, 4); + stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp); + } + } + } else { + // Read RLE-encoded data + scanline = NULL; + + for (j = 0; j < height; ++j) { + c1 = stbi__get8(s); + c2 = stbi__get8(s); + len = stbi__get8(s); + if (c1 != 2 || c2 != 2 || (len & 0x80)) { + // not run-length encoded, so we have to actually use THIS data as a decoded + // pixel (note this can't be a valid pixel--one of RGB must be >= 128) + stbi_uc rgbe[4]; + rgbe[0] = (stbi_uc) c1; + rgbe[1] = (stbi_uc) c2; + rgbe[2] = (stbi_uc) len; + rgbe[3] = (stbi_uc) stbi__get8(s); + stbi__hdr_convert(hdr_data, rgbe, req_comp); + i = 1; + j = 0; + STBI_FREE(scanline); + goto main_decode_loop; // yes, this makes no sense + } + len <<= 8; + len |= stbi__get8(s); + if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); } + if (scanline == NULL) { + scanline = (stbi_uc *) stbi__malloc_mad2(width, 4, 0); + if (!scanline) { + STBI_FREE(hdr_data); + return stbi__errpf("outofmem", "Out of memory"); + } + } + + for (k = 0; k < 4; ++k) { + int nleft; + i = 0; + while ((nleft = width - i) > 0) { + count = stbi__get8(s); + if (count > 128) { + // Run + value = stbi__get8(s); + count -= 128; + if ((count == 0) || (count > nleft)) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = value; + } else { + // Dump + if ((count == 0) || (count > nleft)) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = stbi__get8(s); + } + } + } + for (i=0; i < width; ++i) + stbi__hdr_convert(hdr_data+(j*width + i)*req_comp, scanline + i*4, req_comp); + } + if (scanline) + STBI_FREE(scanline); + } + + return hdr_data; +} + +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int dummy; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (stbi__hdr_test(s) == 0) { + stbi__rewind( s ); + return 0; + } + + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) { + stbi__rewind( s ); + return 0; + } + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *y = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *x = (int) strtol(token, NULL, 10); + *comp = 3; + return 1; +} +#endif // STBI_NO_HDR + +#ifndef STBI_NO_BMP +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) +{ + void *p; + stbi__bmp_data info; + + info.all_a = 255; + p = stbi__bmp_parse_header(s, &info); + if (p == NULL) { + stbi__rewind( s ); + return 0; + } + if (x) *x = s->img_x; + if (y) *y = s->img_y; + if (comp) { + if (info.bpp == 24 && info.ma == 0xff000000) + *comp = 3; + else + *comp = info.ma ? 4 : 3; + } + return 1; +} +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) +{ + int channelCount, dummy, depth; + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + *y = stbi__get32be(s); + *x = stbi__get32be(s); + depth = stbi__get16be(s); + if (depth != 8 && depth != 16) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 3) { + stbi__rewind( s ); + return 0; + } + *comp = 4; + return 1; +} + +static int stbi__psd_is16(stbi__context *s) +{ + int channelCount, depth; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + STBI_NOTUSED(stbi__get32be(s)); + STBI_NOTUSED(stbi__get32be(s)); + depth = stbi__get16be(s); + if (depth != 16) { + stbi__rewind( s ); + return 0; + } + return 1; +} +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) +{ + int act_comp=0,num_packets=0,chained,dummy; + stbi__pic_packet packets[10]; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) { + stbi__rewind(s); + return 0; + } + + stbi__skip(s, 88); + + *x = stbi__get16be(s); + *y = stbi__get16be(s); + if (stbi__at_eof(s)) { + stbi__rewind( s); + return 0; + } + if ( (*x) != 0 && (1 << 28) / (*x) < (*y)) { + stbi__rewind( s ); + return 0; + } + + stbi__skip(s, 8); + + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return 0; + + packet = &packets[num_packets++]; + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + act_comp |= packet->channel; + + if (stbi__at_eof(s)) { + stbi__rewind( s ); + return 0; + } + if (packet->size != 8) { + stbi__rewind( s ); + return 0; + } + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); + + return 1; +} +#endif + +// ************************************************************************************************* +// Portable Gray Map and Portable Pixel Map loader +// by Ken Miller +// +// PGM: http://netpbm.sourceforge.net/doc/pgm.html +// PPM: http://netpbm.sourceforge.net/doc/ppm.html +// +// Known limitations: +// Does not support comments in the header section +// Does not support ASCII image data (formats P2 and P3) + +#ifndef STBI_NO_PNM + +static int stbi__pnm_test(stbi__context *s) +{ + char p, t; + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind( s ); + return 0; + } + return 1; +} + +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + STBI_NOTUSED(ri); + + ri->bits_per_channel = stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n); + if (ri->bits_per_channel == 0) + return 0; + + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + + if (!stbi__mad4sizes_valid(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0)) + return stbi__errpuc("too large", "PNM too large"); + + out = (stbi_uc *) stbi__malloc_mad4(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + if (!stbi__getn(s, out, s->img_n * s->img_x * s->img_y * (ri->bits_per_channel / 8))) { + STBI_FREE(out); + return stbi__errpuc("bad PNM", "PNM file truncated"); + } + + if (req_comp && req_comp != s->img_n) { + if (ri->bits_per_channel == 16) { + out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, s->img_n, req_comp, s->img_x, s->img_y); + } else { + out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + } + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + return out; +} + +static int stbi__pnm_isspace(char c) +{ + return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r'; +} + +static void stbi__pnm_skip_whitespace(stbi__context *s, char *c) +{ + for (;;) { + while (!stbi__at_eof(s) && stbi__pnm_isspace(*c)) + *c = (char) stbi__get8(s); + + if (stbi__at_eof(s) || *c != '#') + break; + + while (!stbi__at_eof(s) && *c != '\n' && *c != '\r' ) + *c = (char) stbi__get8(s); + } +} + +static int stbi__pnm_isdigit(char c) +{ + return c >= '0' && c <= '9'; +} + +static int stbi__pnm_getinteger(stbi__context *s, char *c) +{ + int value = 0; + + while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) { + value = value*10 + (*c - '0'); + *c = (char) stbi__get8(s); + if((value > 214748364) || (value == 214748364 && *c > '7')) + return stbi__err("integer parse overflow", "Parsing an integer in the PPM header overflowed a 32-bit int"); + } + + return value; +} + +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) +{ + int maxv, dummy; + char c, p, t; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + stbi__rewind(s); + + // Get identifier + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind(s); + return 0; + } + + *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm + + c = (char) stbi__get8(s); + stbi__pnm_skip_whitespace(s, &c); + + *x = stbi__pnm_getinteger(s, &c); // read width + if(*x == 0) + return stbi__err("invalid width", "PPM image header had zero or overflowing width"); + stbi__pnm_skip_whitespace(s, &c); + + *y = stbi__pnm_getinteger(s, &c); // read height + if (*y == 0) + return stbi__err("invalid width", "PPM image header had zero or overflowing width"); + stbi__pnm_skip_whitespace(s, &c); + + maxv = stbi__pnm_getinteger(s, &c); // read max value + if (maxv > 65535) + return stbi__err("max value > 65535", "PPM image supports only 8-bit and 16-bit images"); + else if (maxv > 255) + return 16; + else + return 8; +} + +static int stbi__pnm_is16(stbi__context *s) +{ + if (stbi__pnm_info(s, NULL, NULL, NULL) == 16) + return 1; + return 0; +} +#endif + +static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp) +{ + #ifndef STBI_NO_JPEG + if (stbi__jpeg_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNG + if (stbi__png_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_GIF + if (stbi__gif_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_BMP + if (stbi__bmp_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PIC + if (stbi__pic_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_info(s, x, y, comp)) return 1; + #endif + + // test tga last because it's a crappy test! + #ifndef STBI_NO_TGA + if (stbi__tga_info(s, x, y, comp)) + return 1; + #endif + return stbi__err("unknown image type", "Image not of any known type, or corrupt"); +} + +static int stbi__is_16_main(stbi__context *s) +{ + #ifndef STBI_NO_PNG + if (stbi__png_is16(s)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_is16(s)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_is16(s)) return 1; + #endif + return 0; +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_info_from_file(f, x, y, comp); + fclose(f); + return result; +} + +STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__info_main(&s,x,y,comp); + fseek(f,pos,SEEK_SET); + return r; +} + +STBIDEF int stbi_is_16_bit(char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_is_16_bit_from_file(f); + fclose(f); + return result; +} + +STBIDEF int stbi_is_16_bit_from_file(FILE *f) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__is_16_main(&s); + fseek(f,pos,SEEK_SET); + return r; +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__is_16_main(&s); +} + +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *c, void *user) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__is_16_main(&s); +} + +#endif // STB_IMAGE_IMPLEMENTATION + +/* + revision history: + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) change sbti__shiftsigned to avoid clang -O2 bug + 1-bit BMP + *_is_16_bit api + avoid warnings + 2.16 (2017-07-23) all functions have 16-bit variants; + STBI_NO_STDIO works again; + compilation fixes; + fix rounding in unpremultiply; + optimize vertical flip; + disable raw_len validation; + documentation fixes + 2.15 (2017-03-18) fix png-1,2,4 bug; now all Imagenet JPGs decode; + warning fixes; disable run-time SSE detection on gcc; + uniform handling of optional "return" values; + thread-safe initialization of zlib tables + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) allocate large structures on the stack + remove white matting for transparent PSD + fix reported channel count for PNG & BMP + re-enable SSE2 in non-gcc 64-bit + support RGB-formatted JPEG + read 16-bit PNGs (only as 8-bit) + 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED + 2.09 (2016-01-16) allow comments in PNM files + 16-bit-per-pixel TGA (not bit-per-component) + info() for TGA could break due to .hdr handling + info() for BMP to shares code instead of sloppy parse + can use STBI_REALLOC_SIZED if allocator doesn't support realloc + code cleanup + 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA + 2.07 (2015-09-13) fix compiler warnings + partial animated GIF support + limited 16-bpc PSD support + #ifdef unused functions + bug with < 92 byte PIC,PNM,HDR,TGA + 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value + 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning + 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit + 2.03 (2015-04-12) extra corruption checking (mmozeiko) + stbi_set_flip_vertically_on_load (nguillemot) + fix NEON support; fix mingw support + 2.02 (2015-01-19) fix incorrect assert, fix warning + 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2 + 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG + 2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg) + progressive JPEG (stb) + PGM/PPM support (Ken Miller) + STBI_MALLOC,STBI_REALLOC,STBI_FREE + GIF bugfix -- seemingly never worked + STBI_NO_*, STBI_ONLY_* + 1.48 (2014-12-14) fix incorrectly-named assert() + 1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb) + optimize PNG (ryg) + fix bug in interlaced PNG with user-specified channel count (stb) + 1.46 (2014-08-26) + fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG + 1.45 (2014-08-16) + fix MSVC-ARM internal compiler error by wrapping malloc + 1.44 (2014-08-07) + various warning fixes from Ronny Chevalier + 1.43 (2014-07-15) + fix MSVC-only compiler problem in code changed in 1.42 + 1.42 (2014-07-09) + don't define _CRT_SECURE_NO_WARNINGS (affects user code) + fixes to stbi__cleanup_jpeg path + added STBI_ASSERT to avoid requiring assert.h + 1.41 (2014-06-25) + fix search&replace from 1.36 that messed up comments/error messages + 1.40 (2014-06-22) + fix gcc struct-initialization warning + 1.39 (2014-06-15) + fix to TGA optimization when req_comp != number of components in TGA; + fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite) + add support for BMP version 5 (more ignored fields) + 1.38 (2014-06-06) + suppress MSVC warnings on integer casts truncating values + fix accidental rename of 'skip' field of I/O + 1.37 (2014-06-04) + remove duplicate typedef + 1.36 (2014-06-03) + convert to header file single-file library + if de-iphone isn't set, load iphone images color-swapped instead of returning NULL + 1.35 (2014-05-27) + various warnings + fix broken STBI_SIMD path + fix bug where stbi_load_from_file no longer left file pointer in correct place + fix broken non-easy path for 32-bit BMP (possibly never used) + TGA optimization by Arseny Kapoulkine + 1.34 (unknown) + use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case + 1.33 (2011-07-14) + make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements + 1.32 (2011-07-13) + support for "info" function for all supported filetypes (SpartanJ) + 1.31 (2011-06-20) + a few more leak fixes, bug in PNG handling (SpartanJ) + 1.30 (2011-06-11) + added ability to load files via callbacks to accomidate custom input streams (Ben Wenger) + removed deprecated format-specific test/load functions + removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway + error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) + fix inefficiency in decoding 32-bit BMP (David Woo) + 1.29 (2010-08-16) + various warning fixes from Aurelien Pocheville + 1.28 (2010-08-01) + fix bug in GIF palette transparency (SpartanJ) + 1.27 (2010-08-01) + cast-to-stbi_uc to fix warnings + 1.26 (2010-07-24) + fix bug in file buffering for PNG reported by SpartanJ + 1.25 (2010-07-17) + refix trans_data warning (Won Chun) + 1.24 (2010-07-12) + perf improvements reading from files on platforms with lock-heavy fgetc() + minor perf improvements for jpeg + deprecated type-specific functions so we'll get feedback if they're needed + attempt to fix trans_data warning (Won Chun) + 1.23 fixed bug in iPhone support + 1.22 (2010-07-10) + removed image *writing* support + stbi_info support from Jetro Lauha + GIF support from Jean-Marc Lienher + iPhone PNG-extensions from James Brown + warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva) + 1.21 fix use of 'stbi_uc' in header (reported by jon blow) + 1.20 added support for Softimage PIC, by Tom Seddon + 1.19 bug in interlaced PNG corruption check (found by ryg) + 1.18 (2008-08-02) + fix a threading bug (local mutable static) + 1.17 support interlaced PNG + 1.16 major bugfix - stbi__convert_format converted one too many pixels + 1.15 initialize some fields for thread safety + 1.14 fix threadsafe conversion bug + header-file-only version (#define STBI_HEADER_FILE_ONLY before including) + 1.13 threadsafe + 1.12 const qualifiers in the API + 1.11 Support installable IDCT, colorspace conversion routines + 1.10 Fixes for 64-bit (don't use "unsigned long") + optimized upsampling by Fabian "ryg" Giesen + 1.09 Fix format-conversion for PSD code (bad global variables!) + 1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz + 1.07 attempt to fix C++ warning/errors again + 1.06 attempt to fix C++ warning/errors again + 1.05 fix TGA loading to return correct *comp and use good luminance calc + 1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free + 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR + 1.02 support for (subset of) HDR files, float interface for preferred access to them + 1.01 fix bug: possible bug in handling right-side up bmps... not sure + fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all + 1.00 interface to zlib that skips zlib header + 0.99 correct handling of alpha in palette + 0.98 TGA loader by lonesock; dynamically add loaders (untested) + 0.97 jpeg errors on too large a file; also catch another malloc failure + 0.96 fix detection of invalid v value - particleman@mollyrocket forum + 0.95 during header scan, seek to markers in case of padding + 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same + 0.93 handle jpegtran output; verbose errors + 0.92 read 4,8,16,24,32-bit BMP files of several formats + 0.91 output 24-bit Windows 3.0 BMP files + 0.90 fix a few more warnings; bump version number to approach 1.0 + 0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd + 0.60 fix compiling as c++ + 0.59 fix warnings: merge Dave Moore's -Wall fixes + 0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian + 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available + 0.56 fix bug: zlib uncompressed mode len vs. nlen + 0.55 fix bug: restart_interval not initialized to 0 + 0.54 allow NULL for 'int *comp' + 0.53 fix bug in png 3->4; speedup png decoding + 0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments + 0.51 obey req_comp requests, 1-component jpegs return as 1-component, + on 'test' only check type, not whether we support this variant + 0.50 (2006-11-19) + first released version +*/ + + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ diff --git a/3rdparty/stb/stb_image_write.h b/3rdparty/stb/stb_image_write.h new file mode 100644 index 0000000..e4b32ed --- /dev/null +++ b/3rdparty/stb/stb_image_write.h @@ -0,0 +1,1724 @@ +/* stb_image_write - v1.16 - public domain - http://nothings.org/stb + writes out PNG/BMP/TGA/JPEG/HDR images to C stdio - Sean Barrett 2010-2015 + no warranty implied; use at your own risk + + Before #including, + + #define STB_IMAGE_WRITE_IMPLEMENTATION + + in the file that you want to have the implementation. + + Will probably not work correctly with strict-aliasing optimizations. + +ABOUT: + + This header file is a library for writing images to C stdio or a callback. + + The PNG output is not optimal; it is 20-50% larger than the file + written by a decent optimizing implementation; though providing a custom + zlib compress function (see STBIW_ZLIB_COMPRESS) can mitigate that. + This library is designed for source code compactness and simplicity, + not optimal image file size or run-time performance. + +BUILDING: + + You can #define STBIW_ASSERT(x) before the #include to avoid using assert.h. + You can #define STBIW_MALLOC(), STBIW_REALLOC(), and STBIW_FREE() to replace + malloc,realloc,free. + You can #define STBIW_MEMMOVE() to replace memmove() + You can #define STBIW_ZLIB_COMPRESS to use a custom zlib-style compress function + for PNG compression (instead of the builtin one), it must have the following signature: + unsigned char * my_compress(unsigned char *data, int data_len, int *out_len, int quality); + The returned data will be freed with STBIW_FREE() (free() by default), + so it must be heap allocated with STBIW_MALLOC() (malloc() by default), + +UNICODE: + + If compiling for Windows and you wish to use Unicode filenames, compile + with + #define STBIW_WINDOWS_UTF8 + and pass utf8-encoded filenames. Call stbiw_convert_wchar_to_utf8 to convert + Windows wchar_t filenames to utf8. + +USAGE: + + There are five functions, one for each image file format: + + int stbi_write_png(char const *filename, int w, int h, int comp, const void *data, int stride_in_bytes); + int stbi_write_bmp(char const *filename, int w, int h, int comp, const void *data); + int stbi_write_tga(char const *filename, int w, int h, int comp, const void *data); + int stbi_write_jpg(char const *filename, int w, int h, int comp, const void *data, int quality); + int stbi_write_hdr(char const *filename, int w, int h, int comp, const float *data); + + void stbi_flip_vertically_on_write(int flag); // flag is non-zero to flip data vertically + + There are also five equivalent functions that use an arbitrary write function. You are + expected to open/close your file-equivalent before and after calling these: + + int stbi_write_png_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data, int stride_in_bytes); + int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); + int stbi_write_tga_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); + int stbi_write_hdr_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const float *data); + int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality); + + where the callback is: + void stbi_write_func(void *context, void *data, int size); + + You can configure it with these global variables: + int stbi_write_tga_with_rle; // defaults to true; set to 0 to disable RLE + int stbi_write_png_compression_level; // defaults to 8; set to higher for more compression + int stbi_write_force_png_filter; // defaults to -1; set to 0..5 to force a filter mode + + + You can define STBI_WRITE_NO_STDIO to disable the file variant of these + functions, so the library will not use stdio.h at all. However, this will + also disable HDR writing, because it requires stdio for formatted output. + + Each function returns 0 on failure and non-0 on success. + + The functions create an image file defined by the parameters. The image + is a rectangle of pixels stored from left-to-right, top-to-bottom. + Each pixel contains 'comp' channels of data stored interleaved with 8-bits + per channel, in the following order: 1=Y, 2=YA, 3=RGB, 4=RGBA. (Y is + monochrome color.) The rectangle is 'w' pixels wide and 'h' pixels tall. + The *data pointer points to the first byte of the top-left-most pixel. + For PNG, "stride_in_bytes" is the distance in bytes from the first byte of + a row of pixels to the first byte of the next row of pixels. + + PNG creates output files with the same number of components as the input. + The BMP format expands Y to RGB in the file format and does not + output alpha. + + PNG supports writing rectangles of data even when the bytes storing rows of + data are not consecutive in memory (e.g. sub-rectangles of a larger image), + by supplying the stride between the beginning of adjacent rows. The other + formats do not. (Thus you cannot write a native-format BMP through the BMP + writer, both because it is in BGR order and because it may have padding + at the end of the line.) + + PNG allows you to set the deflate compression level by setting the global + variable 'stbi_write_png_compression_level' (it defaults to 8). + + HDR expects linear float data. Since the format is always 32-bit rgb(e) + data, alpha (if provided) is discarded, and for monochrome data it is + replicated across all three channels. + + TGA supports RLE or non-RLE compressed data. To use non-RLE-compressed + data, set the global variable 'stbi_write_tga_with_rle' to 0. + + JPEG does ignore alpha channels in input data; quality is between 1 and 100. + Higher quality looks better but results in a bigger image. + JPEG baseline (no JPEG progressive). + +CREDITS: + + + Sean Barrett - PNG/BMP/TGA + Baldur Karlsson - HDR + Jean-Sebastien Guay - TGA monochrome + Tim Kelsey - misc enhancements + Alan Hickman - TGA RLE + Emmanuel Julien - initial file IO callback implementation + Jon Olick - original jo_jpeg.cpp code + Daniel Gibson - integrate JPEG, allow external zlib + Aarni Koskela - allow choosing PNG filter + + bugfixes: + github:Chribba + Guillaume Chereau + github:jry2 + github:romigrou + Sergio Gonzalez + Jonas Karlsson + Filip Wasil + Thatcher Ulrich + github:poppolopoppo + Patrick Boettcher + github:xeekworx + Cap Petschulat + Simon Rodriguez + Ivan Tikhonov + github:ignotion + Adam Schackart + Andrew Kensler + +LICENSE + + See end of file for license information. + +*/ + +#ifndef INCLUDE_STB_IMAGE_WRITE_H +#define INCLUDE_STB_IMAGE_WRITE_H + +#include + +// if STB_IMAGE_WRITE_STATIC causes problems, try defining STBIWDEF to 'inline' or 'static inline' +#ifndef STBIWDEF +#ifdef STB_IMAGE_WRITE_STATIC +#define STBIWDEF static +#else +#ifdef __cplusplus +#define STBIWDEF extern "C" +#else +#define STBIWDEF extern +#endif +#endif +#endif + +#ifndef STB_IMAGE_WRITE_STATIC // C++ forbids static forward declarations +STBIWDEF int stbi_write_tga_with_rle; +STBIWDEF int stbi_write_png_compression_level; +STBIWDEF int stbi_write_force_png_filter; +#endif + +#ifndef STBI_WRITE_NO_STDIO +STBIWDEF int stbi_write_png(char const *filename, int w, int h, int comp, const void *data, int stride_in_bytes); +STBIWDEF int stbi_write_bmp(char const *filename, int w, int h, int comp, const void *data); +STBIWDEF int stbi_write_tga(char const *filename, int w, int h, int comp, const void *data); +STBIWDEF int stbi_write_hdr(char const *filename, int w, int h, int comp, const float *data); +STBIWDEF int stbi_write_jpg(char const *filename, int x, int y, int comp, const void *data, int quality); + +#ifdef STBIW_WINDOWS_UTF8 +STBIWDEF int stbiw_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input); +#endif +#endif + +typedef void stbi_write_func(void *context, void *data, int size); + +STBIWDEF int stbi_write_png_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data, int stride_in_bytes); +STBIWDEF int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); +STBIWDEF int stbi_write_tga_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); +STBIWDEF int stbi_write_hdr_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const float *data); +STBIWDEF int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality); + +STBIWDEF void stbi_flip_vertically_on_write(int flip_boolean); + +#endif//INCLUDE_STB_IMAGE_WRITE_H + +#ifdef STB_IMAGE_WRITE_IMPLEMENTATION + +#ifdef _WIN32 + #ifndef _CRT_SECURE_NO_WARNINGS + #define _CRT_SECURE_NO_WARNINGS + #endif + #ifndef _CRT_NONSTDC_NO_DEPRECATE + #define _CRT_NONSTDC_NO_DEPRECATE + #endif +#endif + +#ifndef STBI_WRITE_NO_STDIO +#include +#endif // STBI_WRITE_NO_STDIO + +#include +#include +#include +#include + +#if defined(STBIW_MALLOC) && defined(STBIW_FREE) && (defined(STBIW_REALLOC) || defined(STBIW_REALLOC_SIZED)) +// ok +#elif !defined(STBIW_MALLOC) && !defined(STBIW_FREE) && !defined(STBIW_REALLOC) && !defined(STBIW_REALLOC_SIZED) +// ok +#else +#error "Must define all or none of STBIW_MALLOC, STBIW_FREE, and STBIW_REALLOC (or STBIW_REALLOC_SIZED)." +#endif + +#ifndef STBIW_MALLOC +#define STBIW_MALLOC(sz) malloc(sz) +#define STBIW_REALLOC(p,newsz) realloc(p,newsz) +#define STBIW_FREE(p) free(p) +#endif + +#ifndef STBIW_REALLOC_SIZED +#define STBIW_REALLOC_SIZED(p,oldsz,newsz) STBIW_REALLOC(p,newsz) +#endif + + +#ifndef STBIW_MEMMOVE +#define STBIW_MEMMOVE(a,b,sz) memmove(a,b,sz) +#endif + + +#ifndef STBIW_ASSERT +#include +#define STBIW_ASSERT(x) assert(x) +#endif + +#define STBIW_UCHAR(x) (unsigned char) ((x) & 0xff) + +#ifdef STB_IMAGE_WRITE_STATIC +static int stbi_write_png_compression_level = 8; +static int stbi_write_tga_with_rle = 1; +static int stbi_write_force_png_filter = -1; +#else +int stbi_write_png_compression_level = 8; +int stbi_write_tga_with_rle = 1; +int stbi_write_force_png_filter = -1; +#endif + +static int stbi__flip_vertically_on_write = 0; + +STBIWDEF void stbi_flip_vertically_on_write(int flag) +{ + stbi__flip_vertically_on_write = flag; +} + +typedef struct +{ + stbi_write_func *func; + void *context; + unsigned char buffer[64]; + int buf_used; +} stbi__write_context; + +// initialize a callback-based context +static void stbi__start_write_callbacks(stbi__write_context *s, stbi_write_func *c, void *context) +{ + s->func = c; + s->context = context; +} + +#ifndef STBI_WRITE_NO_STDIO + +static void stbi__stdio_write(void *context, void *data, int size) +{ + fwrite(data,1,size,(FILE*) context); +} + +#if defined(_WIN32) && defined(STBIW_WINDOWS_UTF8) +#ifdef __cplusplus +#define STBIW_EXTERN extern "C" +#else +#define STBIW_EXTERN extern +#endif +STBIW_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, const char *str, int cbmb, wchar_t *widestr, int cchwide); +STBIW_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigned long flags, const wchar_t *widestr, int cchwide, char *str, int cbmb, const char *defchar, int *used_default); + +STBIWDEF int stbiw_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input) +{ + return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int) bufferlen, NULL, NULL); +} +#endif + +static FILE *stbiw__fopen(char const *filename, char const *mode) +{ + FILE *f; +#if defined(_WIN32) && defined(STBIW_WINDOWS_UTF8) + wchar_t wMode[64]; + wchar_t wFilename[1024]; + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename)/sizeof(*wFilename))) + return 0; + + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode)/sizeof(*wMode))) + return 0; + +#if defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != _wfopen_s(&f, wFilename, wMode)) + f = 0; +#else + f = _wfopen(wFilename, wMode); +#endif + +#elif defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != fopen_s(&f, filename, mode)) + f=0; +#else + f = fopen(filename, mode); +#endif + return f; +} + +static int stbi__start_write_file(stbi__write_context *s, const char *filename) +{ + FILE *f = stbiw__fopen(filename, "wb"); + stbi__start_write_callbacks(s, stbi__stdio_write, (void *) f); + return f != NULL; +} + +static void stbi__end_write_file(stbi__write_context *s) +{ + fclose((FILE *)s->context); +} + +#endif // !STBI_WRITE_NO_STDIO + +typedef unsigned int stbiw_uint32; +typedef int stb_image_write_test[sizeof(stbiw_uint32)==4 ? 1 : -1]; + +static void stbiw__writefv(stbi__write_context *s, const char *fmt, va_list v) +{ + while (*fmt) { + switch (*fmt++) { + case ' ': break; + case '1': { unsigned char x = STBIW_UCHAR(va_arg(v, int)); + s->func(s->context,&x,1); + break; } + case '2': { int x = va_arg(v,int); + unsigned char b[2]; + b[0] = STBIW_UCHAR(x); + b[1] = STBIW_UCHAR(x>>8); + s->func(s->context,b,2); + break; } + case '4': { stbiw_uint32 x = va_arg(v,int); + unsigned char b[4]; + b[0]=STBIW_UCHAR(x); + b[1]=STBIW_UCHAR(x>>8); + b[2]=STBIW_UCHAR(x>>16); + b[3]=STBIW_UCHAR(x>>24); + s->func(s->context,b,4); + break; } + default: + STBIW_ASSERT(0); + return; + } + } +} + +static void stbiw__writef(stbi__write_context *s, const char *fmt, ...) +{ + va_list v; + va_start(v, fmt); + stbiw__writefv(s, fmt, v); + va_end(v); +} + +static void stbiw__write_flush(stbi__write_context *s) +{ + if (s->buf_used) { + s->func(s->context, &s->buffer, s->buf_used); + s->buf_used = 0; + } +} + +static void stbiw__putc(stbi__write_context *s, unsigned char c) +{ + s->func(s->context, &c, 1); +} + +static void stbiw__write1(stbi__write_context *s, unsigned char a) +{ + if ((size_t)s->buf_used + 1 > sizeof(s->buffer)) + stbiw__write_flush(s); + s->buffer[s->buf_used++] = a; +} + +static void stbiw__write3(stbi__write_context *s, unsigned char a, unsigned char b, unsigned char c) +{ + int n; + if ((size_t)s->buf_used + 3 > sizeof(s->buffer)) + stbiw__write_flush(s); + n = s->buf_used; + s->buf_used = n+3; + s->buffer[n+0] = a; + s->buffer[n+1] = b; + s->buffer[n+2] = c; +} + +static void stbiw__write_pixel(stbi__write_context *s, int rgb_dir, int comp, int write_alpha, int expand_mono, unsigned char *d) +{ + unsigned char bg[3] = { 255, 0, 255}, px[3]; + int k; + + if (write_alpha < 0) + stbiw__write1(s, d[comp - 1]); + + switch (comp) { + case 2: // 2 pixels = mono + alpha, alpha is written separately, so same as 1-channel case + case 1: + if (expand_mono) + stbiw__write3(s, d[0], d[0], d[0]); // monochrome bmp + else + stbiw__write1(s, d[0]); // monochrome TGA + break; + case 4: + if (!write_alpha) { + // composite against pink background + for (k = 0; k < 3; ++k) + px[k] = bg[k] + ((d[k] - bg[k]) * d[3]) / 255; + stbiw__write3(s, px[1 - rgb_dir], px[1], px[1 + rgb_dir]); + break; + } + /* FALLTHROUGH */ + case 3: + stbiw__write3(s, d[1 - rgb_dir], d[1], d[1 + rgb_dir]); + break; + } + if (write_alpha > 0) + stbiw__write1(s, d[comp - 1]); +} + +static void stbiw__write_pixels(stbi__write_context *s, int rgb_dir, int vdir, int x, int y, int comp, void *data, int write_alpha, int scanline_pad, int expand_mono) +{ + stbiw_uint32 zero = 0; + int i,j, j_end; + + if (y <= 0) + return; + + if (stbi__flip_vertically_on_write) + vdir *= -1; + + if (vdir < 0) { + j_end = -1; j = y-1; + } else { + j_end = y; j = 0; + } + + for (; j != j_end; j += vdir) { + for (i=0; i < x; ++i) { + unsigned char *d = (unsigned char *) data + (j*x+i)*comp; + stbiw__write_pixel(s, rgb_dir, comp, write_alpha, expand_mono, d); + } + stbiw__write_flush(s); + s->func(s->context, &zero, scanline_pad); + } +} + +static int stbiw__outfile(stbi__write_context *s, int rgb_dir, int vdir, int x, int y, int comp, int expand_mono, void *data, int alpha, int pad, const char *fmt, ...) +{ + if (y < 0 || x < 0) { + return 0; + } else { + va_list v; + va_start(v, fmt); + stbiw__writefv(s, fmt, v); + va_end(v); + stbiw__write_pixels(s,rgb_dir,vdir,x,y,comp,data,alpha,pad, expand_mono); + return 1; + } +} + +static int stbi_write_bmp_core(stbi__write_context *s, int x, int y, int comp, const void *data) +{ + if (comp != 4) { + // write RGB bitmap + int pad = (-x*3) & 3; + return stbiw__outfile(s,-1,-1,x,y,comp,1,(void *) data,0,pad, + "11 4 22 4" "4 44 22 444444", + 'B', 'M', 14+40+(x*3+pad)*y, 0,0, 14+40, // file header + 40, x,y, 1,24, 0,0,0,0,0,0); // bitmap header + } else { + // RGBA bitmaps need a v4 header + // use BI_BITFIELDS mode with 32bpp and alpha mask + // (straight BI_RGB with alpha mask doesn't work in most readers) + return stbiw__outfile(s,-1,-1,x,y,comp,1,(void *)data,1,0, + "11 4 22 4" "4 44 22 444444 4444 4 444 444 444 444", + 'B', 'M', 14+108+x*y*4, 0, 0, 14+108, // file header + 108, x,y, 1,32, 3,0,0,0,0,0, 0xff0000,0xff00,0xff,0xff000000u, 0, 0,0,0, 0,0,0, 0,0,0, 0,0,0); // bitmap V4 header + } +} + +STBIWDEF int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data) +{ + stbi__write_context s = { 0 }; + stbi__start_write_callbacks(&s, func, context); + return stbi_write_bmp_core(&s, x, y, comp, data); +} + +#ifndef STBI_WRITE_NO_STDIO +STBIWDEF int stbi_write_bmp(char const *filename, int x, int y, int comp, const void *data) +{ + stbi__write_context s = { 0 }; + if (stbi__start_write_file(&s,filename)) { + int r = stbi_write_bmp_core(&s, x, y, comp, data); + stbi__end_write_file(&s); + return r; + } else + return 0; +} +#endif //!STBI_WRITE_NO_STDIO + +static int stbi_write_tga_core(stbi__write_context *s, int x, int y, int comp, void *data) +{ + int has_alpha = (comp == 2 || comp == 4); + int colorbytes = has_alpha ? comp-1 : comp; + int format = colorbytes < 2 ? 3 : 2; // 3 color channels (RGB/RGBA) = 2, 1 color channel (Y/YA) = 3 + + if (y < 0 || x < 0) + return 0; + + if (!stbi_write_tga_with_rle) { + return stbiw__outfile(s, -1, -1, x, y, comp, 0, (void *) data, has_alpha, 0, + "111 221 2222 11", 0, 0, format, 0, 0, 0, 0, 0, x, y, (colorbytes + has_alpha) * 8, has_alpha * 8); + } else { + int i,j,k; + int jend, jdir; + + stbiw__writef(s, "111 221 2222 11", 0,0,format+8, 0,0,0, 0,0,x,y, (colorbytes + has_alpha) * 8, has_alpha * 8); + + if (stbi__flip_vertically_on_write) { + j = 0; + jend = y; + jdir = 1; + } else { + j = y-1; + jend = -1; + jdir = -1; + } + for (; j != jend; j += jdir) { + unsigned char *row = (unsigned char *) data + j * x * comp; + int len; + + for (i = 0; i < x; i += len) { + unsigned char *begin = row + i * comp; + int diff = 1; + len = 1; + + if (i < x - 1) { + ++len; + diff = memcmp(begin, row + (i + 1) * comp, comp); + if (diff) { + const unsigned char *prev = begin; + for (k = i + 2; k < x && len < 128; ++k) { + if (memcmp(prev, row + k * comp, comp)) { + prev += comp; + ++len; + } else { + --len; + break; + } + } + } else { + for (k = i + 2; k < x && len < 128; ++k) { + if (!memcmp(begin, row + k * comp, comp)) { + ++len; + } else { + break; + } + } + } + } + + if (diff) { + unsigned char header = STBIW_UCHAR(len - 1); + stbiw__write1(s, header); + for (k = 0; k < len; ++k) { + stbiw__write_pixel(s, -1, comp, has_alpha, 0, begin + k * comp); + } + } else { + unsigned char header = STBIW_UCHAR(len - 129); + stbiw__write1(s, header); + stbiw__write_pixel(s, -1, comp, has_alpha, 0, begin); + } + } + } + stbiw__write_flush(s); + } + return 1; +} + +STBIWDEF int stbi_write_tga_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data) +{ + stbi__write_context s = { 0 }; + stbi__start_write_callbacks(&s, func, context); + return stbi_write_tga_core(&s, x, y, comp, (void *) data); +} + +#ifndef STBI_WRITE_NO_STDIO +STBIWDEF int stbi_write_tga(char const *filename, int x, int y, int comp, const void *data) +{ + stbi__write_context s = { 0 }; + if (stbi__start_write_file(&s,filename)) { + int r = stbi_write_tga_core(&s, x, y, comp, (void *) data); + stbi__end_write_file(&s); + return r; + } else + return 0; +} +#endif + +// ************************************************************************************************* +// Radiance RGBE HDR writer +// by Baldur Karlsson + +#define stbiw__max(a, b) ((a) > (b) ? (a) : (b)) + +#ifndef STBI_WRITE_NO_STDIO + +static void stbiw__linear_to_rgbe(unsigned char *rgbe, float *linear) +{ + int exponent; + float maxcomp = stbiw__max(linear[0], stbiw__max(linear[1], linear[2])); + + if (maxcomp < 1e-32f) { + rgbe[0] = rgbe[1] = rgbe[2] = rgbe[3] = 0; + } else { + float normalize = (float) frexp(maxcomp, &exponent) * 256.0f/maxcomp; + + rgbe[0] = (unsigned char)(linear[0] * normalize); + rgbe[1] = (unsigned char)(linear[1] * normalize); + rgbe[2] = (unsigned char)(linear[2] * normalize); + rgbe[3] = (unsigned char)(exponent + 128); + } +} + +static void stbiw__write_run_data(stbi__write_context *s, int length, unsigned char databyte) +{ + unsigned char lengthbyte = STBIW_UCHAR(length+128); + STBIW_ASSERT(length+128 <= 255); + s->func(s->context, &lengthbyte, 1); + s->func(s->context, &databyte, 1); +} + +static void stbiw__write_dump_data(stbi__write_context *s, int length, unsigned char *data) +{ + unsigned char lengthbyte = STBIW_UCHAR(length); + STBIW_ASSERT(length <= 128); // inconsistent with spec but consistent with official code + s->func(s->context, &lengthbyte, 1); + s->func(s->context, data, length); +} + +static void stbiw__write_hdr_scanline(stbi__write_context *s, int width, int ncomp, unsigned char *scratch, float *scanline) +{ + unsigned char scanlineheader[4] = { 2, 2, 0, 0 }; + unsigned char rgbe[4]; + float linear[3]; + int x; + + scanlineheader[2] = (width&0xff00)>>8; + scanlineheader[3] = (width&0x00ff); + + /* skip RLE for images too small or large */ + if (width < 8 || width >= 32768) { + for (x=0; x < width; x++) { + switch (ncomp) { + case 4: /* fallthrough */ + case 3: linear[2] = scanline[x*ncomp + 2]; + linear[1] = scanline[x*ncomp + 1]; + linear[0] = scanline[x*ncomp + 0]; + break; + default: + linear[0] = linear[1] = linear[2] = scanline[x*ncomp + 0]; + break; + } + stbiw__linear_to_rgbe(rgbe, linear); + s->func(s->context, rgbe, 4); + } + } else { + int c,r; + /* encode into scratch buffer */ + for (x=0; x < width; x++) { + switch(ncomp) { + case 4: /* fallthrough */ + case 3: linear[2] = scanline[x*ncomp + 2]; + linear[1] = scanline[x*ncomp + 1]; + linear[0] = scanline[x*ncomp + 0]; + break; + default: + linear[0] = linear[1] = linear[2] = scanline[x*ncomp + 0]; + break; + } + stbiw__linear_to_rgbe(rgbe, linear); + scratch[x + width*0] = rgbe[0]; + scratch[x + width*1] = rgbe[1]; + scratch[x + width*2] = rgbe[2]; + scratch[x + width*3] = rgbe[3]; + } + + s->func(s->context, scanlineheader, 4); + + /* RLE each component separately */ + for (c=0; c < 4; c++) { + unsigned char *comp = &scratch[width*c]; + + x = 0; + while (x < width) { + // find first run + r = x; + while (r+2 < width) { + if (comp[r] == comp[r+1] && comp[r] == comp[r+2]) + break; + ++r; + } + if (r+2 >= width) + r = width; + // dump up to first run + while (x < r) { + int len = r-x; + if (len > 128) len = 128; + stbiw__write_dump_data(s, len, &comp[x]); + x += len; + } + // if there's a run, output it + if (r+2 < width) { // same test as what we break out of in search loop, so only true if we break'd + // find next byte after run + while (r < width && comp[r] == comp[x]) + ++r; + // output run up to r + while (x < r) { + int len = r-x; + if (len > 127) len = 127; + stbiw__write_run_data(s, len, comp[x]); + x += len; + } + } + } + } + } +} + +static int stbi_write_hdr_core(stbi__write_context *s, int x, int y, int comp, float *data) +{ + if (y <= 0 || x <= 0 || data == NULL) + return 0; + else { + // Each component is stored separately. Allocate scratch space for full output scanline. + unsigned char *scratch = (unsigned char *) STBIW_MALLOC(x*4); + int i, len; + char buffer[128]; + char header[] = "#?RADIANCE\n# Written by stb_image_write.h\nFORMAT=32-bit_rle_rgbe\n"; + s->func(s->context, header, sizeof(header)-1); + +#ifdef __STDC_LIB_EXT1__ + len = sprintf_s(buffer, sizeof(buffer), "EXPOSURE= 1.0000000000000\n\n-Y %d +X %d\n", y, x); +#else + len = sprintf(buffer, "EXPOSURE= 1.0000000000000\n\n-Y %d +X %d\n", y, x); +#endif + s->func(s->context, buffer, len); + + for(i=0; i < y; i++) + stbiw__write_hdr_scanline(s, x, comp, scratch, data + comp*x*(stbi__flip_vertically_on_write ? y-1-i : i)); + STBIW_FREE(scratch); + return 1; + } +} + +STBIWDEF int stbi_write_hdr_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const float *data) +{ + stbi__write_context s = { 0 }; + stbi__start_write_callbacks(&s, func, context); + return stbi_write_hdr_core(&s, x, y, comp, (float *) data); +} + +STBIWDEF int stbi_write_hdr(char const *filename, int x, int y, int comp, const float *data) +{ + stbi__write_context s = { 0 }; + if (stbi__start_write_file(&s,filename)) { + int r = stbi_write_hdr_core(&s, x, y, comp, (float *) data); + stbi__end_write_file(&s); + return r; + } else + return 0; +} +#endif // STBI_WRITE_NO_STDIO + + +////////////////////////////////////////////////////////////////////////////// +// +// PNG writer +// + +#ifndef STBIW_ZLIB_COMPRESS +// stretchy buffer; stbiw__sbpush() == vector<>::push_back() -- stbiw__sbcount() == vector<>::size() +#define stbiw__sbraw(a) ((int *) (void *) (a) - 2) +#define stbiw__sbm(a) stbiw__sbraw(a)[0] +#define stbiw__sbn(a) stbiw__sbraw(a)[1] + +#define stbiw__sbneedgrow(a,n) ((a)==0 || stbiw__sbn(a)+n >= stbiw__sbm(a)) +#define stbiw__sbmaybegrow(a,n) (stbiw__sbneedgrow(a,(n)) ? stbiw__sbgrow(a,n) : 0) +#define stbiw__sbgrow(a,n) stbiw__sbgrowf((void **) &(a), (n), sizeof(*(a))) + +#define stbiw__sbpush(a, v) (stbiw__sbmaybegrow(a,1), (a)[stbiw__sbn(a)++] = (v)) +#define stbiw__sbcount(a) ((a) ? stbiw__sbn(a) : 0) +#define stbiw__sbfree(a) ((a) ? STBIW_FREE(stbiw__sbraw(a)),0 : 0) + +static void *stbiw__sbgrowf(void **arr, int increment, int itemsize) +{ + int m = *arr ? 2*stbiw__sbm(*arr)+increment : increment+1; + void *p = STBIW_REALLOC_SIZED(*arr ? stbiw__sbraw(*arr) : 0, *arr ? (stbiw__sbm(*arr)*itemsize + sizeof(int)*2) : 0, itemsize * m + sizeof(int)*2); + STBIW_ASSERT(p); + if (p) { + if (!*arr) ((int *) p)[1] = 0; + *arr = (void *) ((int *) p + 2); + stbiw__sbm(*arr) = m; + } + return *arr; +} + +static unsigned char *stbiw__zlib_flushf(unsigned char *data, unsigned int *bitbuffer, int *bitcount) +{ + while (*bitcount >= 8) { + stbiw__sbpush(data, STBIW_UCHAR(*bitbuffer)); + *bitbuffer >>= 8; + *bitcount -= 8; + } + return data; +} + +static int stbiw__zlib_bitrev(int code, int codebits) +{ + int res=0; + while (codebits--) { + res = (res << 1) | (code & 1); + code >>= 1; + } + return res; +} + +static unsigned int stbiw__zlib_countm(unsigned char *a, unsigned char *b, int limit) +{ + int i; + for (i=0; i < limit && i < 258; ++i) + if (a[i] != b[i]) break; + return i; +} + +static unsigned int stbiw__zhash(unsigned char *data) +{ + stbiw_uint32 hash = data[0] + (data[1] << 8) + (data[2] << 16); + hash ^= hash << 3; + hash += hash >> 5; + hash ^= hash << 4; + hash += hash >> 17; + hash ^= hash << 25; + hash += hash >> 6; + return hash; +} + +#define stbiw__zlib_flush() (out = stbiw__zlib_flushf(out, &bitbuf, &bitcount)) +#define stbiw__zlib_add(code,codebits) \ + (bitbuf |= (code) << bitcount, bitcount += (codebits), stbiw__zlib_flush()) +#define stbiw__zlib_huffa(b,c) stbiw__zlib_add(stbiw__zlib_bitrev(b,c),c) +// default huffman tables +#define stbiw__zlib_huff1(n) stbiw__zlib_huffa(0x30 + (n), 8) +#define stbiw__zlib_huff2(n) stbiw__zlib_huffa(0x190 + (n)-144, 9) +#define stbiw__zlib_huff3(n) stbiw__zlib_huffa(0 + (n)-256,7) +#define stbiw__zlib_huff4(n) stbiw__zlib_huffa(0xc0 + (n)-280,8) +#define stbiw__zlib_huff(n) ((n) <= 143 ? stbiw__zlib_huff1(n) : (n) <= 255 ? stbiw__zlib_huff2(n) : (n) <= 279 ? stbiw__zlib_huff3(n) : stbiw__zlib_huff4(n)) +#define stbiw__zlib_huffb(n) ((n) <= 143 ? stbiw__zlib_huff1(n) : stbiw__zlib_huff2(n)) + +#define stbiw__ZHASH 16384 + +#endif // STBIW_ZLIB_COMPRESS + +STBIWDEF unsigned char * stbi_zlib_compress(unsigned char *data, int data_len, int *out_len, int quality) +{ +#ifdef STBIW_ZLIB_COMPRESS + // user provided a zlib compress implementation, use that + return STBIW_ZLIB_COMPRESS(data, data_len, out_len, quality); +#else // use builtin + static unsigned short lengthc[] = { 3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258, 259 }; + static unsigned char lengtheb[]= { 0,0,0,0,0,0,0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0 }; + static unsigned short distc[] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577, 32768 }; + static unsigned char disteb[] = { 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13 }; + unsigned int bitbuf=0; + int i,j, bitcount=0; + unsigned char *out = NULL; + unsigned char ***hash_table = (unsigned char***) STBIW_MALLOC(stbiw__ZHASH * sizeof(unsigned char**)); + if (hash_table == NULL) + return NULL; + if (quality < 5) quality = 5; + + stbiw__sbpush(out, 0x78); // DEFLATE 32K window + stbiw__sbpush(out, 0x5e); // FLEVEL = 1 + stbiw__zlib_add(1,1); // BFINAL = 1 + stbiw__zlib_add(1,2); // BTYPE = 1 -- fixed huffman + + for (i=0; i < stbiw__ZHASH; ++i) + hash_table[i] = NULL; + + i=0; + while (i < data_len-3) { + // hash next 3 bytes of data to be compressed + int h = stbiw__zhash(data+i)&(stbiw__ZHASH-1), best=3; + unsigned char *bestloc = 0; + unsigned char **hlist = hash_table[h]; + int n = stbiw__sbcount(hlist); + for (j=0; j < n; ++j) { + if (hlist[j]-data > i-32768) { // if entry lies within window + int d = stbiw__zlib_countm(hlist[j], data+i, data_len-i); + if (d >= best) { best=d; bestloc=hlist[j]; } + } + } + // when hash table entry is too long, delete half the entries + if (hash_table[h] && stbiw__sbn(hash_table[h]) == 2*quality) { + STBIW_MEMMOVE(hash_table[h], hash_table[h]+quality, sizeof(hash_table[h][0])*quality); + stbiw__sbn(hash_table[h]) = quality; + } + stbiw__sbpush(hash_table[h],data+i); + + if (bestloc) { + // "lazy matching" - check match at *next* byte, and if it's better, do cur byte as literal + h = stbiw__zhash(data+i+1)&(stbiw__ZHASH-1); + hlist = hash_table[h]; + n = stbiw__sbcount(hlist); + for (j=0; j < n; ++j) { + if (hlist[j]-data > i-32767) { + int e = stbiw__zlib_countm(hlist[j], data+i+1, data_len-i-1); + if (e > best) { // if next match is better, bail on current match + bestloc = NULL; + break; + } + } + } + } + + if (bestloc) { + int d = (int) (data+i - bestloc); // distance back + STBIW_ASSERT(d <= 32767 && best <= 258); + for (j=0; best > lengthc[j+1]-1; ++j); + stbiw__zlib_huff(j+257); + if (lengtheb[j]) stbiw__zlib_add(best - lengthc[j], lengtheb[j]); + for (j=0; d > distc[j+1]-1; ++j); + stbiw__zlib_add(stbiw__zlib_bitrev(j,5),5); + if (disteb[j]) stbiw__zlib_add(d - distc[j], disteb[j]); + i += best; + } else { + stbiw__zlib_huffb(data[i]); + ++i; + } + } + // write out final bytes + for (;i < data_len; ++i) + stbiw__zlib_huffb(data[i]); + stbiw__zlib_huff(256); // end of block + // pad with 0 bits to byte boundary + while (bitcount) + stbiw__zlib_add(0,1); + + for (i=0; i < stbiw__ZHASH; ++i) + (void) stbiw__sbfree(hash_table[i]); + STBIW_FREE(hash_table); + + // store uncompressed instead if compression was worse + if (stbiw__sbn(out) > data_len + 2 + ((data_len+32766)/32767)*5) { + stbiw__sbn(out) = 2; // truncate to DEFLATE 32K window and FLEVEL = 1 + for (j = 0; j < data_len;) { + int blocklen = data_len - j; + if (blocklen > 32767) blocklen = 32767; + stbiw__sbpush(out, data_len - j == blocklen); // BFINAL = ?, BTYPE = 0 -- no compression + stbiw__sbpush(out, STBIW_UCHAR(blocklen)); // LEN + stbiw__sbpush(out, STBIW_UCHAR(blocklen >> 8)); + stbiw__sbpush(out, STBIW_UCHAR(~blocklen)); // NLEN + stbiw__sbpush(out, STBIW_UCHAR(~blocklen >> 8)); + memcpy(out+stbiw__sbn(out), data+j, blocklen); + stbiw__sbn(out) += blocklen; + j += blocklen; + } + } + + { + // compute adler32 on input + unsigned int s1=1, s2=0; + int blocklen = (int) (data_len % 5552); + j=0; + while (j < data_len) { + for (i=0; i < blocklen; ++i) { s1 += data[j+i]; s2 += s1; } + s1 %= 65521; s2 %= 65521; + j += blocklen; + blocklen = 5552; + } + stbiw__sbpush(out, STBIW_UCHAR(s2 >> 8)); + stbiw__sbpush(out, STBIW_UCHAR(s2)); + stbiw__sbpush(out, STBIW_UCHAR(s1 >> 8)); + stbiw__sbpush(out, STBIW_UCHAR(s1)); + } + *out_len = stbiw__sbn(out); + // make returned pointer freeable + STBIW_MEMMOVE(stbiw__sbraw(out), out, *out_len); + return (unsigned char *) stbiw__sbraw(out); +#endif // STBIW_ZLIB_COMPRESS +} + +static unsigned int stbiw__crc32(unsigned char *buffer, int len) +{ +#ifdef STBIW_CRC32 + return STBIW_CRC32(buffer, len); +#else + static unsigned int crc_table[256] = + { + 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, + 0x0eDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, + 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, + 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, + 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, + 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, + 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, + 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, + 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, + 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, + 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, + 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, + 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, + 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, + 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, + 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, + 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, + 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, + 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, + 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, + 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, + 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, + 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, + 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, + 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, + 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D + }; + + unsigned int crc = ~0u; + int i; + for (i=0; i < len; ++i) + crc = (crc >> 8) ^ crc_table[buffer[i] ^ (crc & 0xff)]; + return ~crc; +#endif +} + +#define stbiw__wpng4(o,a,b,c,d) ((o)[0]=STBIW_UCHAR(a),(o)[1]=STBIW_UCHAR(b),(o)[2]=STBIW_UCHAR(c),(o)[3]=STBIW_UCHAR(d),(o)+=4) +#define stbiw__wp32(data,v) stbiw__wpng4(data, (v)>>24,(v)>>16,(v)>>8,(v)); +#define stbiw__wptag(data,s) stbiw__wpng4(data, s[0],s[1],s[2],s[3]) + +static void stbiw__wpcrc(unsigned char **data, int len) +{ + unsigned int crc = stbiw__crc32(*data - len - 4, len+4); + stbiw__wp32(*data, crc); +} + +static unsigned char stbiw__paeth(int a, int b, int c) +{ + int p = a + b - c, pa = abs(p-a), pb = abs(p-b), pc = abs(p-c); + if (pa <= pb && pa <= pc) return STBIW_UCHAR(a); + if (pb <= pc) return STBIW_UCHAR(b); + return STBIW_UCHAR(c); +} + +// @OPTIMIZE: provide an option that always forces left-predict or paeth predict +static void stbiw__encode_png_line(unsigned char *pixels, int stride_bytes, int width, int height, int y, int n, int filter_type, signed char *line_buffer) +{ + static int mapping[] = { 0,1,2,3,4 }; + static int firstmap[] = { 0,1,0,5,6 }; + int *mymap = (y != 0) ? mapping : firstmap; + int i; + int type = mymap[filter_type]; + unsigned char *z = pixels + stride_bytes * (stbi__flip_vertically_on_write ? height-1-y : y); + int signed_stride = stbi__flip_vertically_on_write ? -stride_bytes : stride_bytes; + + if (type==0) { + memcpy(line_buffer, z, width*n); + return; + } + + // first loop isn't optimized since it's just one pixel + for (i = 0; i < n; ++i) { + switch (type) { + case 1: line_buffer[i] = z[i]; break; + case 2: line_buffer[i] = z[i] - z[i-signed_stride]; break; + case 3: line_buffer[i] = z[i] - (z[i-signed_stride]>>1); break; + case 4: line_buffer[i] = (signed char) (z[i] - stbiw__paeth(0,z[i-signed_stride],0)); break; + case 5: line_buffer[i] = z[i]; break; + case 6: line_buffer[i] = z[i]; break; + } + } + switch (type) { + case 1: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - z[i-n]; break; + case 2: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - z[i-signed_stride]; break; + case 3: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - ((z[i-n] + z[i-signed_stride])>>1); break; + case 4: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - stbiw__paeth(z[i-n], z[i-signed_stride], z[i-signed_stride-n]); break; + case 5: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - (z[i-n]>>1); break; + case 6: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - stbiw__paeth(z[i-n], 0,0); break; + } +} + +STBIWDEF unsigned char *stbi_write_png_to_mem(const unsigned char *pixels, int stride_bytes, int x, int y, int n, int *out_len) +{ + int force_filter = stbi_write_force_png_filter; + int ctype[5] = { -1, 0, 4, 2, 6 }; + unsigned char sig[8] = { 137,80,78,71,13,10,26,10 }; + unsigned char *out,*o, *filt, *zlib; + signed char *line_buffer; + int j,zlen; + + if (stride_bytes == 0) + stride_bytes = x * n; + + if (force_filter >= 5) { + force_filter = -1; + } + + filt = (unsigned char *) STBIW_MALLOC((x*n+1) * y); if (!filt) return 0; + line_buffer = (signed char *) STBIW_MALLOC(x * n); if (!line_buffer) { STBIW_FREE(filt); return 0; } + for (j=0; j < y; ++j) { + int filter_type; + if (force_filter > -1) { + filter_type = force_filter; + stbiw__encode_png_line((unsigned char*)(pixels), stride_bytes, x, y, j, n, force_filter, line_buffer); + } else { // Estimate the best filter by running through all of them: + int best_filter = 0, best_filter_val = 0x7fffffff, est, i; + for (filter_type = 0; filter_type < 5; filter_type++) { + stbiw__encode_png_line((unsigned char*)(pixels), stride_bytes, x, y, j, n, filter_type, line_buffer); + + // Estimate the entropy of the line using this filter; the less, the better. + est = 0; + for (i = 0; i < x*n; ++i) { + est += abs((signed char) line_buffer[i]); + } + if (est < best_filter_val) { + best_filter_val = est; + best_filter = filter_type; + } + } + if (filter_type != best_filter) { // If the last iteration already got us the best filter, don't redo it + stbiw__encode_png_line((unsigned char*)(pixels), stride_bytes, x, y, j, n, best_filter, line_buffer); + filter_type = best_filter; + } + } + // when we get here, filter_type contains the filter type, and line_buffer contains the data + filt[j*(x*n+1)] = (unsigned char) filter_type; + STBIW_MEMMOVE(filt+j*(x*n+1)+1, line_buffer, x*n); + } + STBIW_FREE(line_buffer); + zlib = stbi_zlib_compress(filt, y*( x*n+1), &zlen, stbi_write_png_compression_level); + STBIW_FREE(filt); + if (!zlib) return 0; + + // each tag requires 12 bytes of overhead + out = (unsigned char *) STBIW_MALLOC(8 + 12+13 + 12+zlen + 12); + if (!out) return 0; + *out_len = 8 + 12+13 + 12+zlen + 12; + + o=out; + STBIW_MEMMOVE(o,sig,8); o+= 8; + stbiw__wp32(o, 13); // header length + stbiw__wptag(o, "IHDR"); + stbiw__wp32(o, x); + stbiw__wp32(o, y); + *o++ = 8; + *o++ = STBIW_UCHAR(ctype[n]); + *o++ = 0; + *o++ = 0; + *o++ = 0; + stbiw__wpcrc(&o,13); + + stbiw__wp32(o, zlen); + stbiw__wptag(o, "IDAT"); + STBIW_MEMMOVE(o, zlib, zlen); + o += zlen; + STBIW_FREE(zlib); + stbiw__wpcrc(&o, zlen); + + stbiw__wp32(o,0); + stbiw__wptag(o, "IEND"); + stbiw__wpcrc(&o,0); + + STBIW_ASSERT(o == out + *out_len); + + return out; +} + +#ifndef STBI_WRITE_NO_STDIO +STBIWDEF int stbi_write_png(char const *filename, int x, int y, int comp, const void *data, int stride_bytes) +{ + FILE *f; + int len; + unsigned char *png = stbi_write_png_to_mem((const unsigned char *) data, stride_bytes, x, y, comp, &len); + if (png == NULL) return 0; + + f = stbiw__fopen(filename, "wb"); + if (!f) { STBIW_FREE(png); return 0; } + fwrite(png, 1, len, f); + fclose(f); + STBIW_FREE(png); + return 1; +} +#endif + +STBIWDEF int stbi_write_png_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int stride_bytes) +{ + int len; + unsigned char *png = stbi_write_png_to_mem((const unsigned char *) data, stride_bytes, x, y, comp, &len); + if (png == NULL) return 0; + func(context, png, len); + STBIW_FREE(png); + return 1; +} + + +/* *************************************************************************** + * + * JPEG writer + * + * This is based on Jon Olick's jo_jpeg.cpp: + * public domain Simple, Minimalistic JPEG writer - http://www.jonolick.com/code.html + */ + +static const unsigned char stbiw__jpg_ZigZag[] = { 0,1,5,6,14,15,27,28,2,4,7,13,16,26,29,42,3,8,12,17,25,30,41,43,9,11,18, + 24,31,40,44,53,10,19,23,32,39,45,52,54,20,22,33,38,46,51,55,60,21,34,37,47,50,56,59,61,35,36,48,49,57,58,62,63 }; + +static void stbiw__jpg_writeBits(stbi__write_context *s, int *bitBufP, int *bitCntP, const unsigned short *bs) { + int bitBuf = *bitBufP, bitCnt = *bitCntP; + bitCnt += bs[1]; + bitBuf |= bs[0] << (24 - bitCnt); + while(bitCnt >= 8) { + unsigned char c = (bitBuf >> 16) & 255; + stbiw__putc(s, c); + if(c == 255) { + stbiw__putc(s, 0); + } + bitBuf <<= 8; + bitCnt -= 8; + } + *bitBufP = bitBuf; + *bitCntP = bitCnt; +} + +static void stbiw__jpg_DCT(float *d0p, float *d1p, float *d2p, float *d3p, float *d4p, float *d5p, float *d6p, float *d7p) { + float d0 = *d0p, d1 = *d1p, d2 = *d2p, d3 = *d3p, d4 = *d4p, d5 = *d5p, d6 = *d6p, d7 = *d7p; + float z1, z2, z3, z4, z5, z11, z13; + + float tmp0 = d0 + d7; + float tmp7 = d0 - d7; + float tmp1 = d1 + d6; + float tmp6 = d1 - d6; + float tmp2 = d2 + d5; + float tmp5 = d2 - d5; + float tmp3 = d3 + d4; + float tmp4 = d3 - d4; + + // Even part + float tmp10 = tmp0 + tmp3; // phase 2 + float tmp13 = tmp0 - tmp3; + float tmp11 = tmp1 + tmp2; + float tmp12 = tmp1 - tmp2; + + d0 = tmp10 + tmp11; // phase 3 + d4 = tmp10 - tmp11; + + z1 = (tmp12 + tmp13) * 0.707106781f; // c4 + d2 = tmp13 + z1; // phase 5 + d6 = tmp13 - z1; + + // Odd part + tmp10 = tmp4 + tmp5; // phase 2 + tmp11 = tmp5 + tmp6; + tmp12 = tmp6 + tmp7; + + // The rotator is modified from fig 4-8 to avoid extra negations. + z5 = (tmp10 - tmp12) * 0.382683433f; // c6 + z2 = tmp10 * 0.541196100f + z5; // c2-c6 + z4 = tmp12 * 1.306562965f + z5; // c2+c6 + z3 = tmp11 * 0.707106781f; // c4 + + z11 = tmp7 + z3; // phase 5 + z13 = tmp7 - z3; + + *d5p = z13 + z2; // phase 6 + *d3p = z13 - z2; + *d1p = z11 + z4; + *d7p = z11 - z4; + + *d0p = d0; *d2p = d2; *d4p = d4; *d6p = d6; +} + +static void stbiw__jpg_calcBits(int val, unsigned short bits[2]) { + int tmp1 = val < 0 ? -val : val; + val = val < 0 ? val-1 : val; + bits[1] = 1; + while(tmp1 >>= 1) { + ++bits[1]; + } + bits[0] = val & ((1<0)&&(DU[end0pos]==0); --end0pos) { + } + // end0pos = first element in reverse order !=0 + if(end0pos == 0) { + stbiw__jpg_writeBits(s, bitBuf, bitCnt, EOB); + return DU[0]; + } + for(i = 1; i <= end0pos; ++i) { + int startpos = i; + int nrzeroes; + unsigned short bits[2]; + for (; DU[i]==0 && i<=end0pos; ++i) { + } + nrzeroes = i-startpos; + if ( nrzeroes >= 16 ) { + int lng = nrzeroes>>4; + int nrmarker; + for (nrmarker=1; nrmarker <= lng; ++nrmarker) + stbiw__jpg_writeBits(s, bitBuf, bitCnt, M16zeroes); + nrzeroes &= 15; + } + stbiw__jpg_calcBits(DU[i], bits); + stbiw__jpg_writeBits(s, bitBuf, bitCnt, HTAC[(nrzeroes<<4)+bits[1]]); + stbiw__jpg_writeBits(s, bitBuf, bitCnt, bits); + } + if(end0pos != 63) { + stbiw__jpg_writeBits(s, bitBuf, bitCnt, EOB); + } + return DU[0]; +} + +static int stbi_write_jpg_core(stbi__write_context *s, int width, int height, int comp, const void* data, int quality) { + // Constants that don't pollute global namespace + static const unsigned char std_dc_luminance_nrcodes[] = {0,0,1,5,1,1,1,1,1,1,0,0,0,0,0,0,0}; + static const unsigned char std_dc_luminance_values[] = {0,1,2,3,4,5,6,7,8,9,10,11}; + static const unsigned char std_ac_luminance_nrcodes[] = {0,0,2,1,3,3,2,4,3,5,5,4,4,0,0,1,0x7d}; + static const unsigned char std_ac_luminance_values[] = { + 0x01,0x02,0x03,0x00,0x04,0x11,0x05,0x12,0x21,0x31,0x41,0x06,0x13,0x51,0x61,0x07,0x22,0x71,0x14,0x32,0x81,0x91,0xa1,0x08, + 0x23,0x42,0xb1,0xc1,0x15,0x52,0xd1,0xf0,0x24,0x33,0x62,0x72,0x82,0x09,0x0a,0x16,0x17,0x18,0x19,0x1a,0x25,0x26,0x27,0x28, + 0x29,0x2a,0x34,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59, + 0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x83,0x84,0x85,0x86,0x87,0x88,0x89, + 0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6, + 0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe1,0xe2, + 0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,0xf9,0xfa + }; + static const unsigned char std_dc_chrominance_nrcodes[] = {0,0,3,1,1,1,1,1,1,1,1,1,0,0,0,0,0}; + static const unsigned char std_dc_chrominance_values[] = {0,1,2,3,4,5,6,7,8,9,10,11}; + static const unsigned char std_ac_chrominance_nrcodes[] = {0,0,2,1,2,4,4,3,4,7,5,4,4,0,1,2,0x77}; + static const unsigned char std_ac_chrominance_values[] = { + 0x00,0x01,0x02,0x03,0x11,0x04,0x05,0x21,0x31,0x06,0x12,0x41,0x51,0x07,0x61,0x71,0x13,0x22,0x32,0x81,0x08,0x14,0x42,0x91, + 0xa1,0xb1,0xc1,0x09,0x23,0x33,0x52,0xf0,0x15,0x62,0x72,0xd1,0x0a,0x16,0x24,0x34,0xe1,0x25,0xf1,0x17,0x18,0x19,0x1a,0x26, + 0x27,0x28,0x29,0x2a,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58, + 0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x82,0x83,0x84,0x85,0x86,0x87, + 0x88,0x89,0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4, + 0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda, + 0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,0xf9,0xfa + }; + // Huffman tables + static const unsigned short YDC_HT[256][2] = { {0,2},{2,3},{3,3},{4,3},{5,3},{6,3},{14,4},{30,5},{62,6},{126,7},{254,8},{510,9}}; + static const unsigned short UVDC_HT[256][2] = { {0,2},{1,2},{2,2},{6,3},{14,4},{30,5},{62,6},{126,7},{254,8},{510,9},{1022,10},{2046,11}}; + static const unsigned short YAC_HT[256][2] = { + {10,4},{0,2},{1,2},{4,3},{11,4},{26,5},{120,7},{248,8},{1014,10},{65410,16},{65411,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {12,4},{27,5},{121,7},{502,9},{2038,11},{65412,16},{65413,16},{65414,16},{65415,16},{65416,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {28,5},{249,8},{1015,10},{4084,12},{65417,16},{65418,16},{65419,16},{65420,16},{65421,16},{65422,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {58,6},{503,9},{4085,12},{65423,16},{65424,16},{65425,16},{65426,16},{65427,16},{65428,16},{65429,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {59,6},{1016,10},{65430,16},{65431,16},{65432,16},{65433,16},{65434,16},{65435,16},{65436,16},{65437,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {122,7},{2039,11},{65438,16},{65439,16},{65440,16},{65441,16},{65442,16},{65443,16},{65444,16},{65445,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {123,7},{4086,12},{65446,16},{65447,16},{65448,16},{65449,16},{65450,16},{65451,16},{65452,16},{65453,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {250,8},{4087,12},{65454,16},{65455,16},{65456,16},{65457,16},{65458,16},{65459,16},{65460,16},{65461,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {504,9},{32704,15},{65462,16},{65463,16},{65464,16},{65465,16},{65466,16},{65467,16},{65468,16},{65469,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {505,9},{65470,16},{65471,16},{65472,16},{65473,16},{65474,16},{65475,16},{65476,16},{65477,16},{65478,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {506,9},{65479,16},{65480,16},{65481,16},{65482,16},{65483,16},{65484,16},{65485,16},{65486,16},{65487,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {1017,10},{65488,16},{65489,16},{65490,16},{65491,16},{65492,16},{65493,16},{65494,16},{65495,16},{65496,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {1018,10},{65497,16},{65498,16},{65499,16},{65500,16},{65501,16},{65502,16},{65503,16},{65504,16},{65505,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {2040,11},{65506,16},{65507,16},{65508,16},{65509,16},{65510,16},{65511,16},{65512,16},{65513,16},{65514,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {65515,16},{65516,16},{65517,16},{65518,16},{65519,16},{65520,16},{65521,16},{65522,16},{65523,16},{65524,16},{0,0},{0,0},{0,0},{0,0},{0,0}, + {2041,11},{65525,16},{65526,16},{65527,16},{65528,16},{65529,16},{65530,16},{65531,16},{65532,16},{65533,16},{65534,16},{0,0},{0,0},{0,0},{0,0},{0,0} + }; + static const unsigned short UVAC_HT[256][2] = { + {0,2},{1,2},{4,3},{10,4},{24,5},{25,5},{56,6},{120,7},{500,9},{1014,10},{4084,12},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {11,4},{57,6},{246,8},{501,9},{2038,11},{4085,12},{65416,16},{65417,16},{65418,16},{65419,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {26,5},{247,8},{1015,10},{4086,12},{32706,15},{65420,16},{65421,16},{65422,16},{65423,16},{65424,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {27,5},{248,8},{1016,10},{4087,12},{65425,16},{65426,16},{65427,16},{65428,16},{65429,16},{65430,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {58,6},{502,9},{65431,16},{65432,16},{65433,16},{65434,16},{65435,16},{65436,16},{65437,16},{65438,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {59,6},{1017,10},{65439,16},{65440,16},{65441,16},{65442,16},{65443,16},{65444,16},{65445,16},{65446,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {121,7},{2039,11},{65447,16},{65448,16},{65449,16},{65450,16},{65451,16},{65452,16},{65453,16},{65454,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {122,7},{2040,11},{65455,16},{65456,16},{65457,16},{65458,16},{65459,16},{65460,16},{65461,16},{65462,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {249,8},{65463,16},{65464,16},{65465,16},{65466,16},{65467,16},{65468,16},{65469,16},{65470,16},{65471,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {503,9},{65472,16},{65473,16},{65474,16},{65475,16},{65476,16},{65477,16},{65478,16},{65479,16},{65480,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {504,9},{65481,16},{65482,16},{65483,16},{65484,16},{65485,16},{65486,16},{65487,16},{65488,16},{65489,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {505,9},{65490,16},{65491,16},{65492,16},{65493,16},{65494,16},{65495,16},{65496,16},{65497,16},{65498,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {506,9},{65499,16},{65500,16},{65501,16},{65502,16},{65503,16},{65504,16},{65505,16},{65506,16},{65507,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {2041,11},{65508,16},{65509,16},{65510,16},{65511,16},{65512,16},{65513,16},{65514,16},{65515,16},{65516,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {16352,14},{65517,16},{65518,16},{65519,16},{65520,16},{65521,16},{65522,16},{65523,16},{65524,16},{65525,16},{0,0},{0,0},{0,0},{0,0},{0,0}, + {1018,10},{32707,15},{65526,16},{65527,16},{65528,16},{65529,16},{65530,16},{65531,16},{65532,16},{65533,16},{65534,16},{0,0},{0,0},{0,0},{0,0},{0,0} + }; + static const int YQT[] = {16,11,10,16,24,40,51,61,12,12,14,19,26,58,60,55,14,13,16,24,40,57,69,56,14,17,22,29,51,87,80,62,18,22, + 37,56,68,109,103,77,24,35,55,64,81,104,113,92,49,64,78,87,103,121,120,101,72,92,95,98,112,100,103,99}; + static const int UVQT[] = {17,18,24,47,99,99,99,99,18,21,26,66,99,99,99,99,24,26,56,99,99,99,99,99,47,66,99,99,99,99,99,99, + 99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99}; + static const float aasf[] = { 1.0f * 2.828427125f, 1.387039845f * 2.828427125f, 1.306562965f * 2.828427125f, 1.175875602f * 2.828427125f, + 1.0f * 2.828427125f, 0.785694958f * 2.828427125f, 0.541196100f * 2.828427125f, 0.275899379f * 2.828427125f }; + + int row, col, i, k, subsample; + float fdtbl_Y[64], fdtbl_UV[64]; + unsigned char YTable[64], UVTable[64]; + + if(!data || !width || !height || comp > 4 || comp < 1) { + return 0; + } + + quality = quality ? quality : 90; + subsample = quality <= 90 ? 1 : 0; + quality = quality < 1 ? 1 : quality > 100 ? 100 : quality; + quality = quality < 50 ? 5000 / quality : 200 - quality * 2; + + for(i = 0; i < 64; ++i) { + int uvti, yti = (YQT[i]*quality+50)/100; + YTable[stbiw__jpg_ZigZag[i]] = (unsigned char) (yti < 1 ? 1 : yti > 255 ? 255 : yti); + uvti = (UVQT[i]*quality+50)/100; + UVTable[stbiw__jpg_ZigZag[i]] = (unsigned char) (uvti < 1 ? 1 : uvti > 255 ? 255 : uvti); + } + + for(row = 0, k = 0; row < 8; ++row) { + for(col = 0; col < 8; ++col, ++k) { + fdtbl_Y[k] = 1 / (YTable [stbiw__jpg_ZigZag[k]] * aasf[row] * aasf[col]); + fdtbl_UV[k] = 1 / (UVTable[stbiw__jpg_ZigZag[k]] * aasf[row] * aasf[col]); + } + } + + // Write Headers + { + static const unsigned char head0[] = { 0xFF,0xD8,0xFF,0xE0,0,0x10,'J','F','I','F',0,1,1,0,0,1,0,1,0,0,0xFF,0xDB,0,0x84,0 }; + static const unsigned char head2[] = { 0xFF,0xDA,0,0xC,3,1,0,2,0x11,3,0x11,0,0x3F,0 }; + const unsigned char head1[] = { 0xFF,0xC0,0,0x11,8,(unsigned char)(height>>8),STBIW_UCHAR(height),(unsigned char)(width>>8),STBIW_UCHAR(width), + 3,1,(unsigned char)(subsample?0x22:0x11),0,2,0x11,1,3,0x11,1,0xFF,0xC4,0x01,0xA2,0 }; + s->func(s->context, (void*)head0, sizeof(head0)); + s->func(s->context, (void*)YTable, sizeof(YTable)); + stbiw__putc(s, 1); + s->func(s->context, UVTable, sizeof(UVTable)); + s->func(s->context, (void*)head1, sizeof(head1)); + s->func(s->context, (void*)(std_dc_luminance_nrcodes+1), sizeof(std_dc_luminance_nrcodes)-1); + s->func(s->context, (void*)std_dc_luminance_values, sizeof(std_dc_luminance_values)); + stbiw__putc(s, 0x10); // HTYACinfo + s->func(s->context, (void*)(std_ac_luminance_nrcodes+1), sizeof(std_ac_luminance_nrcodes)-1); + s->func(s->context, (void*)std_ac_luminance_values, sizeof(std_ac_luminance_values)); + stbiw__putc(s, 1); // HTUDCinfo + s->func(s->context, (void*)(std_dc_chrominance_nrcodes+1), sizeof(std_dc_chrominance_nrcodes)-1); + s->func(s->context, (void*)std_dc_chrominance_values, sizeof(std_dc_chrominance_values)); + stbiw__putc(s, 0x11); // HTUACinfo + s->func(s->context, (void*)(std_ac_chrominance_nrcodes+1), sizeof(std_ac_chrominance_nrcodes)-1); + s->func(s->context, (void*)std_ac_chrominance_values, sizeof(std_ac_chrominance_values)); + s->func(s->context, (void*)head2, sizeof(head2)); + } + + // Encode 8x8 macroblocks + { + static const unsigned short fillBits[] = {0x7F, 7}; + int DCY=0, DCU=0, DCV=0; + int bitBuf=0, bitCnt=0; + // comp == 2 is grey+alpha (alpha is ignored) + int ofsG = comp > 2 ? 1 : 0, ofsB = comp > 2 ? 2 : 0; + const unsigned char *dataR = (const unsigned char *)data; + const unsigned char *dataG = dataR + ofsG; + const unsigned char *dataB = dataR + ofsB; + int x, y, pos; + if(subsample) { + for(y = 0; y < height; y += 16) { + for(x = 0; x < width; x += 16) { + float Y[256], U[256], V[256]; + for(row = y, pos = 0; row < y+16; ++row) { + // row >= height => use last input row + int clamped_row = (row < height) ? row : height - 1; + int base_p = (stbi__flip_vertically_on_write ? (height-1-clamped_row) : clamped_row)*width*comp; + for(col = x; col < x+16; ++col, ++pos) { + // if col >= width => use pixel from last input column + int p = base_p + ((col < width) ? col : (width-1))*comp; + float r = dataR[p], g = dataG[p], b = dataB[p]; + Y[pos]= +0.29900f*r + 0.58700f*g + 0.11400f*b - 128; + U[pos]= -0.16874f*r - 0.33126f*g + 0.50000f*b; + V[pos]= +0.50000f*r - 0.41869f*g - 0.08131f*b; + } + } + DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y+0, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); + DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y+8, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); + DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y+128, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); + DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y+136, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); + + // subsample U,V + { + float subU[64], subV[64]; + int yy, xx; + for(yy = 0, pos = 0; yy < 8; ++yy) { + for(xx = 0; xx < 8; ++xx, ++pos) { + int j = yy*32+xx*2; + subU[pos] = (U[j+0] + U[j+1] + U[j+16] + U[j+17]) * 0.25f; + subV[pos] = (V[j+0] + V[j+1] + V[j+16] + V[j+17]) * 0.25f; + } + } + DCU = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, subU, 8, fdtbl_UV, DCU, UVDC_HT, UVAC_HT); + DCV = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, subV, 8, fdtbl_UV, DCV, UVDC_HT, UVAC_HT); + } + } + } + } else { + for(y = 0; y < height; y += 8) { + for(x = 0; x < width; x += 8) { + float Y[64], U[64], V[64]; + for(row = y, pos = 0; row < y+8; ++row) { + // row >= height => use last input row + int clamped_row = (row < height) ? row : height - 1; + int base_p = (stbi__flip_vertically_on_write ? (height-1-clamped_row) : clamped_row)*width*comp; + for(col = x; col < x+8; ++col, ++pos) { + // if col >= width => use pixel from last input column + int p = base_p + ((col < width) ? col : (width-1))*comp; + float r = dataR[p], g = dataG[p], b = dataB[p]; + Y[pos]= +0.29900f*r + 0.58700f*g + 0.11400f*b - 128; + U[pos]= -0.16874f*r - 0.33126f*g + 0.50000f*b; + V[pos]= +0.50000f*r - 0.41869f*g - 0.08131f*b; + } + } + + DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y, 8, fdtbl_Y, DCY, YDC_HT, YAC_HT); + DCU = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, U, 8, fdtbl_UV, DCU, UVDC_HT, UVAC_HT); + DCV = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, V, 8, fdtbl_UV, DCV, UVDC_HT, UVAC_HT); + } + } + } + + // Do the bit alignment of the EOI marker + stbiw__jpg_writeBits(s, &bitBuf, &bitCnt, fillBits); + } + + // EOI + stbiw__putc(s, 0xFF); + stbiw__putc(s, 0xD9); + + return 1; +} + +STBIWDEF int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality) +{ + stbi__write_context s = { 0 }; + stbi__start_write_callbacks(&s, func, context); + return stbi_write_jpg_core(&s, x, y, comp, (void *) data, quality); +} + + +#ifndef STBI_WRITE_NO_STDIO +STBIWDEF int stbi_write_jpg(char const *filename, int x, int y, int comp, const void *data, int quality) +{ + stbi__write_context s = { 0 }; + if (stbi__start_write_file(&s,filename)) { + int r = stbi_write_jpg_core(&s, x, y, comp, data, quality); + stbi__end_write_file(&s); + return r; + } else + return 0; +} +#endif + +#endif // STB_IMAGE_WRITE_IMPLEMENTATION + +/* Revision history + 1.16 (2021-07-11) + make Deflate code emit uncompressed blocks when it would otherwise expand + support writing BMPs with alpha channel + 1.15 (2020-07-13) unknown + 1.14 (2020-02-02) updated JPEG writer to downsample chroma channels + 1.13 + 1.12 + 1.11 (2019-08-11) + + 1.10 (2019-02-07) + support utf8 filenames in Windows; fix warnings and platform ifdefs + 1.09 (2018-02-11) + fix typo in zlib quality API, improve STB_I_W_STATIC in C++ + 1.08 (2018-01-29) + add stbi__flip_vertically_on_write, external zlib, zlib quality, choose PNG filter + 1.07 (2017-07-24) + doc fix + 1.06 (2017-07-23) + writing JPEG (using Jon Olick's code) + 1.05 ??? + 1.04 (2017-03-03) + monochrome BMP expansion + 1.03 ??? + 1.02 (2016-04-02) + avoid allocating large structures on the stack + 1.01 (2016-01-16) + STBIW_REALLOC_SIZED: support allocators with no realloc support + avoid race-condition in crc initialization + minor compile issues + 1.00 (2015-09-14) + installable file IO function + 0.99 (2015-09-13) + warning fixes; TGA rle support + 0.98 (2015-04-08) + added STBIW_MALLOC, STBIW_ASSERT etc + 0.97 (2015-01-18) + fixed HDR asserts, rewrote HDR rle logic + 0.96 (2015-01-17) + add HDR output + fix monochrome BMP + 0.95 (2014-08-17) + add monochrome TGA output + 0.94 (2014-05-31) + rename private functions to avoid conflicts with stb_image.h + 0.93 (2014-05-27) + warning fixes + 0.92 (2010-08-01) + casts to unsigned char to fix warnings + 0.91 (2010-07-17) + first public release + 0.90 first internal release +*/ + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..271a9bd --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,85 @@ +cmake_minimum_required(VERSION 3.21) +cmake_policy(SET CMP0091 NEW) + +project("spacegame" + VERSION 0.2.2 + DESCRIPTION "Blocky spacegame by crydsch@lph.zone" + LANGUAGES CXX +) + +set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -ggdb") +set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O0 -ggdb") + +# Enable compiler warnings +set(SPACEGAME_COMPILER_WARNINGS + -Wall -Wextra -pedantic + -Wcast-align -Wcast-qual -Wformat=2 -Winit-self -Wmissing-declarations + -Wmissing-include-dirs -Wredundant-decls -Wswitch-default + -Wundef -Wctor-dtor-privacy +) + +# Enable compile_commands.json generation for other tools +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + +set(EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/bin) + +if(NOT EXISTS /spacegame_deps) + message(FATAL_ERROR "'/spacegame_deps' not found!\nYou need to build the required dependecies first.\nSee 'scripts/build_deps.sh'") +else() + # Search for our pre-built dependencies first + set(CMAKE_PREFIX_PATH /spacegame_deps) +endif() + +list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake") + +# Note: shaderc must be run on the host system +option(SPACEGAME_BUILD_SHADERS "Use shaderc to build spirv shaders" OFF) + + +################ dependencies ################ + +set(THREADS_PREFER_PTHREAD_FLAG ON) +find_package(Threads REQUIRED) + +find_package(glfw3 REQUIRED) + +# This finds import libraries: bgfx::bgfx, bgfx::bx and bgfx::bimg +# Linking with these will bring: include dirs, compiler options, compiler defs, link libraries +find_package(bgfx REQUIRED) +# bgfx::bgfx wrongly links with: OpenGL,GLX and others +# So we link to libbgfx.a only +find_library(BGFX NAMES libbgfx.a REQUIRED) + +if(SPACEGAME_BUILD_SHADERS) + find_program(SHADERC shaderc REQUIRED) +endif() + +################## sources ################### + +if(SPACEGAME_BUILD_SHADERS) + add_subdirectory(shaders) +endif() + +# Game executable is here: +add_subdirectory(src) + +################# debugging ################## + +# A useful trick is to append "-LAH" to the cmake command +# of libraries to see their available options. +# +# Uncomment this section to have cmake print all variables. +# This is useful to find the actual variable names of the libraries in use. +# Such as, is it ${SMTH_INCLUDE_DIR} or ${SMTH_INCLUDE_DIRS}? +# get_cmake_property(_variableNames VARIABLES) +# list (SORT _variableNames) +# foreach (_variableName ${_variableNames}) +# message(STATUS "${_variableName}=${${_variableName}}") +# endforeach() +# +# List imported targets from 'find_package(..)' +# get_property(importTargets DIRECTORY "${CMAKE_SOURCE_DIR}" PROPERTY IMPORTED_TARGETS) +# message("Import Targets: ${importTargets}") +# +# Build this target to test generator expressions +# add_custom_target(debug_gen_expr COMMAND ${CMAKE_COMMAND} -E echo "$,true_string,false_string>") \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..afb93fe --- /dev/null +++ b/README.md @@ -0,0 +1,14 @@ +# Spacegame + +My little toy voxel engine born from an unconventional use of the rendering pipeline. +An idea that just kept working until I had a small prototype. +Might become a real game one day when it grows up. + +## Experiements here include + +- CMake + ninja as a build system +- The entire llvm compiler suite + cross compiling linux->windows +- A little Orthodox C++ +- Developing my own camera and input system +- Vulkan via bgfx: compute shader to generate geometry on the GPU + rendering the result. +- Automated release builds with docker + CI \ No newline at end of file diff --git a/assets/textures/colors.png b/assets/textures/colors.png new file mode 100644 index 0000000..6001fc3 Binary files /dev/null and b/assets/textures/colors.png differ diff --git a/cmake/README.md b/cmake/README.md new file mode 100644 index 0000000..acfc143 --- /dev/null +++ b/cmake/README.md @@ -0,0 +1,18 @@ + +# Toolchain +To use the same toolchain as the CI in vscode +place the file 'cmake-kits.json' into your '.vscode' directory. +Then select the 'Spacegame LLVM Kit' in the cmake-tools extension. +Restarting vscode may be necessary. + +# Debugging +To enable debugging add this to your .vscode/settings.json +``` +"cmake.debugConfig": { + "MIMode": "gdb", + "miDebuggerPath": "/usr/bin/gdb" + } +``` + +# Usefull links +- [LLVM Debian/Ubuntu packages](https://apt.llvm.org/) diff --git a/cmake/clang_mingw_toolchain.cmake b/cmake/clang_mingw_toolchain.cmake new file mode 100644 index 0000000..8e6b616 --- /dev/null +++ b/cmake/clang_mingw_toolchain.cmake @@ -0,0 +1,34 @@ +# This toolchain configuration file can be used cross-compile from linux to windows +# Ref.: https://github.com/glfw/glfw/blob/master/CMake/x86_64-w64-mingw32-clang.cmake + +MESSAGE("Cross-Compiling with toolchain file: ${CMAKE_TOOLCHAIN_FILE}") + +# llvm-mingw-x86_64 is expected to be avilable under this directory +set(MINGW_DIRECTORY /llvm-mingw-x86_64) + +# Define the environment for cross-compiling with 64-bit MinGW-w64 Clang +set(CMAKE_SYSTEM_NAME Windows) +SET(CMAKE_SYSTEM_VERSION 1) +set(CMAKE_C_COMPILER ${MINGW_DIRECTORY}/bin/x86_64-w64-mingw32-clang) +set(CMAKE_CXX_COMPILER ${MINGW_DIRECTORY}/bin/x86_64-w64-mingw32-clang++) +set(CMAKE_RC_COMPILER ${MINGW_DIRECTORY}/bin/x86_64-w64-mingw32-windres) +set(CMAKE_RANLIB ${MINGW_DIRECTORY}/bin/x86_64-w64-mingw32-ranlib) +set(CMAKE_AR ${MINGW_DIRECTORY}/bin/x86_64-w64-mingw32-ar) +set(CMAKE_STRIP ${MINGW_DIRECTORY}/bin/x86_64-w64-mingw32-strip) + +# Configure the behaviour of the find commands +set(CMAKE_FIND_ROOT_PATH + ${MINGW_DIRECTORY}/x86_64-w64-mingw32 + ${MINGW_DIRECTORY} + /spacegame_deps) +set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) +set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) + +# Note: For windows we supply all libraries in use directly through mingw => Full static linking! +set(CMAKE_CXX_FLAGS_INIT "-stdlib=libc++") +set(CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld -static") +set(CMAKE_MODULE_LINKER_FLAGS_INIT "-fuse-ld=lld -static") +set(CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld -static") +set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE TRUE) +set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_MINSIZEREL TRUE) diff --git a/cmake/clang_toolchain.cmake b/cmake/clang_toolchain.cmake new file mode 100644 index 0000000..7658a4e --- /dev/null +++ b/cmake/clang_toolchain.cmake @@ -0,0 +1,23 @@ +# This toolchain configuration file can be used for 'normal' linux native compilation + +MESSAGE("Compiling with toolchain file: ${CMAKE_TOOLCHAIN_FILE}") + +# Define the environment for compiling with 64-bit clang +# Note: Ensure the proper links / aliases are set +# ex. /usr/bin/clang -> /usr/bin/clang-18 +set(CMAKE_C_COMPILER /usr/bin/clang ) +set(CMAKE_CXX_COMPILER /usr/bin/clang++ ) +set(CMAKE_RC_COMPILER /usr/bin/llvm-windres) +set(CMAKE_RANLIB /usr/bin/llvm-ranlib ) +set(CMAKE_AR /usr/bin/llvm-ar ) +set(CMAKE_STRIP /usr/bin/llvm-strip ) + +# Note: We can static link with libc++, but not entirely since we depend on dynamic +# libraries such as x11,glx,etc... +# Note: Despite its name '-static-libstdc++' the option causes libc++ to be linked statically +set(CMAKE_CXX_FLAGS_INIT "-stdlib=libc++" ) +set(CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=lld -static-libstdc++") +set(CMAKE_MODULE_LINKER_FLAGS_INIT "-fuse-ld=lld -static-libstdc++") +set(CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=lld -static-libstdc++") +set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE TRUE) +set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_MINSIZEREL TRUE) diff --git a/cmake/cmake-kits.json b/cmake/cmake-kits.json new file mode 100644 index 0000000..de38db3 --- /dev/null +++ b/cmake/cmake-kits.json @@ -0,0 +1,10 @@ +[ + { + "name": "Spacegame LLVM Kit", + "toolchainFile": "cmake/clang_toolchain.cmake", + "cmakeSettings": + { + "SPACEGAME_BUILD_SHADERS": "ON" + } + } +] diff --git a/docker/Dockerfile_Base b/docker/Dockerfile_Base new file mode 100644 index 0000000..2403140 --- /dev/null +++ b/docker/Dockerfile_Base @@ -0,0 +1,37 @@ +# Ensure `nvidia-smi` & `vulkaninfo` run correctly on the host system. +# Run with `sudo docker run -it --rm --gpus all `. +# Check `nvidia-smi` & `vulkaninfo` run correctly inside the container. +# may need to install: +# libnvidia-gl-525-server \ +# vulkan-tools +# for debugging maybe: gdb + +FROM ubuntu:22.04 + +# Non interactive mode +ENV DEBIAN_FRONTEND=noninteractive + +COPY docker/rebuild_from_base.stamp docker/rebuild_from_base.stamp + +# Dependencies & Tools +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + cmake git micro jq \ + pkg-config curl wget zip \ + ca-certificates xz-utils \ + software-properties-common \ + cppcheck valgrind \ + `# glfw dependecies for x11 ` \ + xorg-dev \ + `# glfw dependecies for wayland ` \ + libwayland-dev libxkbcommon-dev wayland-protocols extra-cmake-modules \ + `# bgfx dependecies ` \ + libgl1-mesa-dev x11proto-core-dev libx11-dev && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* && \ + `# custom ninja ` \ + wget -O /usr/bin/ninja https://git.lph.zone/crydsch/ninja/releases/download/latest.proc_loadavg/ninja && \ + chmod +x /usr/bin/ninja && \ + ninja --version && \ + `# disable git detachedHead warning ` \ + git config --global advice.detachedHead false diff --git a/docker/Dockerfile_Linux b/docker/Dockerfile_Linux new file mode 100644 index 0000000..91bc259 --- /dev/null +++ b/docker/Dockerfile_Linux @@ -0,0 +1,32 @@ +FROM spacegame_base + +COPY docker/rebuild_from_llvm.stamp docker/rebuild_from_llvm.stamp + +# LLVM +# RUN `# llvm-16 ` \ +# wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc && \ +# add-apt-repository deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-16 main && \ +# apt-get update && \ +# apt-get install -y --no-install-recommends \ +# clang-16 clang-tools-16 \ +# lldb-16 lld-16 \ +# clang-tidy-16 clang-format-16 \ +# libc++-16-dev libc++abi-16-dev && \ +# apt-get clean && \ +# rm -rf /var/lib/apt/lists/* +COPY scripts/setup_llvm_links.sh scripts/setup_llvm_links.sh +RUN `# llvm stable via convenience script` \ + wget https://apt.llvm.org/llvm.sh && \ + chmod +x llvm.sh && \ + ./llvm.sh all && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* && \ + `# setup required links` \ + scripts/setup_llvm_links.sh + +# Build 3rdparty deps +COPY 3rdparty 3rdparty +COPY cmake/clang_toolchain.cmake cmake/clang_toolchain.cmake +COPY scripts/build_deps.sh scripts/build_deps.sh +RUN scripts/build_deps.sh "cmake/clang_toolchain.cmake" && \ + rm -rf 3rdparty/glfw 3rdparty/bx 3rdparty/bimg 3rdparty/bgfx 3rdparty/bgfx.cmake diff --git a/docker/Dockerfile_Windows b/docker/Dockerfile_Windows new file mode 100644 index 0000000..d70bd68 --- /dev/null +++ b/docker/Dockerfile_Windows @@ -0,0 +1,24 @@ +FROM spacegame_base + +COPY docker/rebuild_from_llvm.stamp docker/rebuild_from_llvm.stamp + +# LLVM +RUN `# llvm mingw ` \ + curl -L \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer 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" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + https://api.github.com/repos/mstorsjo/llvm-mingw/releases/latest | jq '.assets[] | select( .name | test("ucrt-ubuntu-20.04-x86_64") ).browser_download_url' | xargs wget -O llvm-mingw-x86_64.tar.xz && \ + mkdir llvm-mingw-x86_64 && \ + tar xvf llvm-mingw-x86_64.tar.xz --directory llvm-mingw-x86_64 --strip-components=1 && \ + rm -rf llvm-mingw-x86_64.tar.xz \ + llvm-mingw-x86_64/aarch64-w64-mingw32 \ + llvm-mingw-x86_64/armv7-w64-mingw32 \ + llvm-mingw-x86_64/i686-w64-mingw32 + +# Build 3rdparty deps +COPY 3rdparty 3rdparty +COPY cmake/clang_mingw_toolchain.cmake cmake/clang_mingw_toolchain.cmake +COPY scripts/build_deps.sh scripts/build_deps.sh +RUN scripts/build_deps.sh "cmake/clang_mingw_toolchain.cmake" && \ + rm -rf 3rdparty/glfw 3rdparty/bx 3rdparty/bimg 3rdparty/bgfx 3rdparty/bgfx.cmake diff --git a/docker/rebuild_from_base.stamp b/docker/rebuild_from_base.stamp new file mode 100644 index 0000000..e69de29 diff --git a/docker/rebuild_from_llvm.stamp b/docker/rebuild_from_llvm.stamp new file mode 100644 index 0000000..56a6051 --- /dev/null +++ b/docker/rebuild_from_llvm.stamp @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/scripts/clean_deps_n_build.sh b/scripts/clean_deps_n_build.sh new file mode 100755 index 0000000..d5c0d69 --- /dev/null +++ b/scripts/clean_deps_n_build.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -e + +if [ "$EUID" -ne 0 ] + then echo "This script must be run as root" + exit 1 +fi + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" + +pushd "$SCRIPT_DIR"/.. + +rm -rf /spacegame_deps build* assets/shaders/*.spv 3rdparty/glfw 3rdparty/bx 3rdparty/bimg 3rdparty/bgfx 3rdparty/bgfx.cmake + +popd diff --git a/scripts/release_game.sh b/scripts/release_game.sh new file mode 100755 index 0000000..e5d13ad --- /dev/null +++ b/scripts/release_game.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +set -e + +# Construct base version: '1.2.3-123+20230527T195630.811d543' +# Platform specific versions may add extra info: '1.2.3-123+20230527T195630.811d543.linux.x11' + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" + +pushd "$SCRIPT_DIR"/.. 2>&1 1>/dev/null + +# Fetch all tags from remote (if something changed) +git fetch --tags + +# Get current version from latest tag +VERSION_SHORT=$(git describe --tags --abbrev=0) + +echo "Current Version: $VERSION_SHORT" + +# Advance by one revision +# check if revision present +if [[ $VERSION_SHORT != *"-"* ]]; then + VERSION_SHORT="$VERSION_SHORT-0" +fi +# seperate base version from revision +BASE_VERSION=$(echo $VERSION_SHORT | sed -E 's~^(.*[.].*[.].*)-([0-9]+)~\1~I') +REVISION=$(echo $VERSION_SHORT | sed -E 's~^(.*[.].*[.].*)-([0-9]+)~\2~I') +# +1 +REVISION=$(($REVISION+1)) || $(echo 1) +# Assemble new version +VERSION_SHORT="$BASE_VERSION-$REVISION" + +read -p "Enter New Version [$VERSION_SHORT]: " VERSION_SHORT_INPUT +VERSION_SHORT=${VERSION_SHORT_INPUT:-$VERSION_SHORT} + +# Collect extra build information +# CMAKE_VERSION=$(grep CMAKE_PROJECT_VERSION:STATIC build/CMakeCache.txt | cut -d "=" -f2) +TIME=$(date -u +"%Y%m%dT%H%M%S") +COMMIT=$(git rev-parse HEAD) + +VERSION_LONG="$VERSION_SHORT+$TIME.$COMMIT" + +# Push any outstanding commits +git commit --allow-empty -m "Release $VERSION_SHORT" -m "[CI SKIP]" +git push + +# Create annotated tag named named after short version and annotated with long version +git tag -a -m "$VERSION_LONG" "$VERSION_SHORT" + +# push the new tag +git push origin "$VERSION_SHORT" + +popd 2>&1 1>/dev/null diff --git a/scripts/setup_llvm_links.sh b/scripts/setup_llvm_links.sh new file mode 100755 index 0000000..f9ee19f --- /dev/null +++ b/scripts/setup_llvm_links.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +# This cript creates default links to a set of llvm binaries +# ex. /usr/bin/clang -> /usr/bin/clang-18 + +set -e + +if [ "$EUID" -ne 0 ] + then echo "This script must be run as root" + exit 1 +fi + +# Detect clang version +# Command Breakdown: +# Get all installed packages +# | Detect clang version +# | | Sort available versions +# | | | Pick highest version +# | | | | Remove "clang-" to extract the version only +# v v v v v +VER=$(dpkg --list | grep -o "clang-[0-9][0-9]*" | sort -r | head -n 1 | sed "s#clang-##") +#echo Detected Tool-Version: $VER + +# Ensure all tools are installed +TOOLS=( +clang +clang++ +llvm-windres +llvm-ranlib +llvm-ar +llvm-strip +) + +for TOOL in "${TOOLS[@]}" +do + + set +e + OUTPUT=$($TOOL-$VER --version 2>&1) + EXIT_CODE=$? + set -e + + # on error (ex. no ground truth file) just print nothing + if [ $EXIT_CODE -ne 0 ] + then + echo $TOOL-$VER must be installed! + exit 1 + fi + + #echo Found $TOOL-$VER + + # Get full path + TOOL_PATH=$(which $TOOL-$VER) + + # Create links + ln -sf $TOOL_PATH /usr/bin/$TOOL + +done + diff --git a/shaders/CMakeLists.txt b/shaders/CMakeLists.txt new file mode 100644 index 0000000..bf9ed5d --- /dev/null +++ b/shaders/CMakeLists.txt @@ -0,0 +1,106 @@ + +# Ref.: https://stackoverflow.com/questions/67040258/cmake-compile-glsl-shaders +# Ref.: https://cmake.org/cmake/help/latest/command/add_custom_command.html + +set(SHADER_SOURCE_DIR ${PROJECT_SOURCE_DIR}/shaders) +set(SHADER_BIN_DIR ${PROJECT_SOURCE_DIR}/assets/shaders) +file(GLOB VERTEX_SHADERS ${SHADER_SOURCE_DIR}/vs_*.sc) +file(GLOB FRAGMENT_SHADERS ${SHADER_SOURCE_DIR}/fs_*.sc) +file(GLOB COMPUTE_SHADERS ${SHADER_SOURCE_DIR}/cs_*.sc) + +# vertex shaders +foreach(SHADER_IN IN LISTS VERTEX_SHADERS) + cmake_path(GET SHADER_IN FILENAME SHADER_NAME) # ex. SHADER_NAME==vs_cubes.sc + + # output file + string(REPLACE ".sc" ".spv" SHADER_OUT ${SHADER_NAME}) + set(SHADER_OUT ${SHADER_BIN_DIR}/${SHADER_OUT}) + + # varying file + string(REPLACE "vs" "varying" SHADER_VARYING ${SHADER_IN}) + + # message("SHADER_IN: " ${SHADER_IN} " - SHADER_VARYING: " ${SHADER_VARYING} " - SHADER_OUT: " ${SHADER_OUT}) + + _bgfx_shaderc_parse(CLI + FILE ${SHADER_IN} + OUTPUT ${SHADER_OUT} + VERTEX + LINUX + PROFILE spirv + VARYINGDEF ${SHADER_VARYING} + INCLUDES ${SHADER_BIN_DIR} + ) + + add_custom_command( + OUTPUT ${SHADER_OUT} + COMMAND ${SHADERC} ${CLI} + DEPENDS ${SHADER_IN} + COMMENT "Compiling shader ${SHADER_NAME}" + ) + + list(APPEND SPV_SHADERS ${SHADER_OUT}) +endforeach() + +# fragment shaders +foreach(SHADER_IN IN LISTS FRAGMENT_SHADERS) + cmake_path(GET SHADER_IN FILENAME SHADER_NAME) # ex. SHADER_NAME==vs_cubes.sc + + # output file + string(REPLACE ".sc" ".spv" SHADER_OUT ${SHADER_NAME}) + set(SHADER_OUT ${SHADER_BIN_DIR}/${SHADER_OUT}) + + # varying file + string(REPLACE "fs" "varying" SHADER_VARYING ${SHADER_IN}) + + # message("SHADER_IN: " ${SHADER_IN} " - SHADER_VARYING: " ${SHADER_VARYING} " - SHADER_OUT: " ${SHADER_OUT}) + + _bgfx_shaderc_parse(CLI + FILE ${SHADER_IN} + OUTPUT ${SHADER_OUT} + FRAGMENT + LINUX + PROFILE spirv + VARYINGDEF ${SHADER_VARYING} + INCLUDES ${SHADER_BIN_DIR} + ) + + add_custom_command( + OUTPUT ${SHADER_OUT} + COMMAND ${SHADERC} ${CLI} + DEPENDS ${SHADER_IN} + COMMENT "Compiling shader ${SHADER_NAME}" + ) + + list(APPEND SPV_SHADERS ${SHADER_OUT}) +endforeach() + +# compute shaders +foreach(SHADER_IN IN LISTS COMPUTE_SHADERS) + cmake_path(GET SHADER_IN FILENAME SHADER_NAME) # ex. SHADER_NAME==vs_cubes.sc + + # output file + string(REPLACE ".sc" ".spv" SHADER_OUT ${SHADER_NAME}) + set(SHADER_OUT ${SHADER_BIN_DIR}/${SHADER_OUT}) + + # message("SHADER_IN: " ${SHADER_IN} " - "SHADER_OUT: " ${SHADER_OUT}) + + _bgfx_shaderc_parse(CLI + FILE ${SHADER_IN} + OUTPUT ${SHADER_OUT} + COMPUTE + LINUX + PROFILE spirv + INCLUDES ${SHADER_BIN_DIR} + ) + + add_custom_command( + OUTPUT ${SHADER_OUT} + COMMAND ${SHADERC} ${CLI} + DEPENDS ${SHADER_IN} + COMMENT "Compiling shader ${SHADER_NAME}" + ) + + list(APPEND SPV_SHADERS ${SHADER_OUT}) +endforeach() + +add_custom_target(shaders ALL DEPENDS ${SPV_SHADERS}) diff --git a/shaders/bgfx_compute.sh b/shaders/bgfx_compute.sh new file mode 100644 index 0000000..59dda85 --- /dev/null +++ b/shaders/bgfx_compute.sh @@ -0,0 +1,327 @@ +/* + * Copyright 2011-2022 Branimir Karadzic. All rights reserved. + * License: https://github.com/bkaradzic/bgfx/blob/master/LICENSE + */ + +#ifndef BGFX_COMPUTE_H_HEADER_GUARD +#define BGFX_COMPUTE_H_HEADER_GUARD + +#include "bgfx_shader.sh" + +#ifndef __cplusplus + +#if BGFX_SHADER_LANGUAGE_HLSL > 0 && BGFX_SHADER_LANGUAGE_HLSL < 400 +# error "Compute is not supported!" +#endif // BGFX_SHADER_LANGUAGE_HLSL + +#if BGFX_SHADER_LANGUAGE_METAL || BGFX_SHADER_LANGUAGE_SPIRV +# define FORMAT(_format) [[spv::format_ ## _format]] +# define WRITEONLY [[spv::nonreadable]] +#else +# define FORMAT(_format) +# define WRITEONLY +#endif // BGFX_SHADER_LANGUAGE_METAL || BGFX_SHADER_LANGUAGE_SPIRV + +#if BGFX_SHADER_LANGUAGE_GLSL + +#define SHARED shared + +#define __IMAGE_XX(_name, _format, _reg, _image, _access) \ + layout(_format, binding=_reg) _access uniform highp _image _name + +#define readwrite +#define IMAGE2D_RO( _name, _format, _reg) __IMAGE_XX(_name, _format, _reg, image2D, readonly) +#define UIMAGE2D_RO(_name, _format, _reg) __IMAGE_XX(_name, _format, _reg, uimage2D, readonly) +#define IMAGE2D_WR( _name, _format, _reg) __IMAGE_XX(_name, _format, _reg, image2D, writeonly) +#define UIMAGE2D_WR(_name, _format, _reg) __IMAGE_XX(_name, _format, _reg, uimage2D, writeonly) +#define IMAGE2D_RW( _name, _format, _reg) __IMAGE_XX(_name, _format, _reg, image2D, readwrite) +#define UIMAGE2D_RW(_name, _format, _reg) __IMAGE_XX(_name, _format, _reg, uimage2D, readwrite) + +#define IMAGE2D_ARRAY_RO( _name, _format, _reg) __IMAGE_XX(_name, _format, _reg, image2DArray, readonly) +#define UIMAGE2D_ARRAY_RO(_name, _format, _reg) __IMAGE_XX(_name, _format, _reg, uimage2DArray, readonly) +#define IMAGE2D_ARRAY_WR( _name, _format, _reg) __IMAGE_XX(_name, _format, _reg, image2DArray, writeonly) +#define UIMAGE2D_ARRAY_WR(_name, _format, _reg) __IMAGE_XX(_name, _format, _reg, uimage2DArray, writeonly) +#define IMAGE2D_ARRAY_RW( _name, _format, _reg) __IMAGE_XX(_name, _format, _reg, image2DArray, readwrite) +#define UIMAGE2D_ARRAY_RW(_name, _format, _reg) __IMAGE_XX(_name, _format, _reg, uimage2DArray, readwrite) + +#define IMAGE3D_RO( _name, _format, _reg) __IMAGE_XX(_name, _format, _reg, image3D, readonly) +#define UIMAGE3D_RO(_name, _format, _reg) __IMAGE_XX(_name, _format, _reg, uimage3D, readonly) +#define IMAGE3D_WR( _name, _format, _reg) __IMAGE_XX(_name, _format, _reg, image3D, writeonly) +#define UIMAGE3D_WR(_name, _format, _reg) __IMAGE_XX(_name, _format, _reg, uimage3D, writeonly) +#define IMAGE3D_RW( _name, _format, _reg) __IMAGE_XX(_name, _format, _reg, image3D, readwrite) +#define UIMAGE3D_RW(_name, _format, _reg) __IMAGE_XX(_name, _format, _reg, uimage3D, readwrite) + +#define __BUFFER_XX(_name, _type, _reg, _access) \ + layout(std430, binding=_reg) _access buffer _name ## Buffer \ + { \ + _type _name[]; \ + } + +#define BUFFER_RO(_name, _type, _reg) __BUFFER_XX(_name, _type, _reg, readonly) +#define BUFFER_RW(_name, _type, _reg) __BUFFER_XX(_name, _type, _reg, readwrite) +#define BUFFER_WR(_name, _type, _reg) __BUFFER_XX(_name, _type, _reg, writeonly) + +#define NUM_THREADS(_x, _y, _z) layout (local_size_x = _x, local_size_y = _y, local_size_z = _z) in; + +#define atomicFetchAndAdd(_mem, _data, _original) _original = atomicAdd(_mem, _data) +#define atomicFetchAndAnd(_mem, _data, _original) _original = atomicAnd(_mem, _data) +#define atomicFetchAndMax(_mem, _data, _original) _original = atomicMax(_mem, _data) +#define atomicFetchAndMin(_mem, _data, _original) _original = atomicMin(_mem, _data) +#define atomicFetchAndOr(_mem, _data, _original) _original = atomicOr(_mem, _data) +#define atomicFetchAndXor(_mem, _data, _original) _original = atomicXor(_mem, _data) +#define atomicFetchAndExchange(_mem, _data, _original) _original = atomicExchange(_mem, _data) +#define atomicFetchCompareExchange(_mem, _compare, _data, _original) _original = atomicCompSwap(_mem,_compare, _data) + +#else + +#define SHARED groupshared + +#define COMP_r32ui uint +#define COMP_rg32ui uint2 +#define COMP_rgba32ui uint4 +#define COMP_r32f float +#define COMP_r16f float +#define COMP_rg16f float2 +#define COMP_rgba16f float4 +#if BGFX_SHADER_LANGUAGE_HLSL +# define COMP_rgba8 unorm float4 +# define COMP_rg8 unorm float2 +# define COMP_r8 unorm float +#else +# define COMP_rgba8 float4 +# define COMP_rg8 float2 +# define COMP_r8 float +#endif // BGFX_SHADER_LANGUAGE_HLSL +#define COMP_rgba32f float4 + +#define IMAGE2D_RO( _name, _format, _reg) \ + FORMAT(_format) Texture2D _name : REGISTER(t, _reg); \ + +#define UIMAGE2D_RO(_name, _format, _reg) IMAGE2D_RO(_name, _format, _reg) + +#define IMAGE2D_WR( _name, _format, _reg) \ + WRITEONLY FORMAT(_format) RWTexture2D _name : REGISTER(u, _reg); \ + +#define UIMAGE2D_WR(_name, _format, _reg) IMAGE2D_WR(_name, _format, _reg) + +#define IMAGE2D_RW( _name, _format, _reg) \ + FORMAT(_format) RWTexture2D _name : REGISTER(u, _reg); \ + +#define UIMAGE2D_RW(_name, _format, _reg) IMAGE2D_RW(_name, _format, _reg) + +#define IMAGE2D_ARRAY_RO(_name, _format, _reg) \ + FORMAT(_format) Texture2DArray _name : REGISTER(t, _reg); \ + +#define UIMAGE2D_ARRAY_RO(_name, _format, _reg) IMAGE2D_ARRAY_RO(_name, _format, _reg) + +#define IMAGE2D_ARRAY_WR( _name, _format, _reg) \ + WRITEONLY FORMAT(_format) RWTexture2DArray _name : REGISTER(u, _reg); \ + +#define UIMAGE2D_ARRAY_WR(_name, _format, _reg) IMAGE2D_ARRAY_WR(_name, _format, _reg) + +#define IMAGE2D_ARRAY_RW(_name, _format, _reg) \ + FORMAT(_format) RWTexture2DArray _name : REGISTER(u, _reg); \ + +#define UIMAGE2D_ARRAY_RW(_name, _format, _reg) IMAGE2D_ARRAY_RW(_name, _format, _reg) + +#define IMAGE3D_RO( _name, _format, _reg) \ + FORMAT(_format) Texture3D _name : REGISTER(t, _reg); + +#define UIMAGE3D_RO(_name, _format, _reg) IMAGE3D_RO(_name, _format, _reg) + +#define IMAGE3D_WR( _name, _format, _reg) \ + WRITEONLY FORMAT(_format) RWTexture3D _name : REGISTER(u, _reg); + +#define UIMAGE3D_WR(_name, _format, _reg) IMAGE3D_RW(_name, _format, _reg) + +#define IMAGE3D_RW( _name, _format, _reg) \ + FORMAT(_format) RWTexture3D _name : REGISTER(u, _reg); \ + +#define UIMAGE3D_RW(_name, _format, _reg) IMAGE3D_RW(_name, _format, _reg) + +#if BGFX_SHADER_LANGUAGE_METAL || BGFX_SHADER_LANGUAGE_SPIRV +#define BUFFER_RO(_name, _struct, _reg) StructuredBuffer<_struct> _name : REGISTER(t, _reg) +#define BUFFER_RW(_name, _struct, _reg) RWStructuredBuffer <_struct> _name : REGISTER(u, _reg) +#define BUFFER_WR(_name, _struct, _reg) BUFFER_RW(_name, _struct, _reg) +#else +#define BUFFER_RO(_name, _struct, _reg) Buffer<_struct> _name : REGISTER(t, _reg) +#define BUFFER_RW(_name, _struct, _reg) RWBuffer<_struct> _name : REGISTER(u, _reg) +#define BUFFER_WR(_name, _struct, _reg) BUFFER_RW(_name, _struct, _reg) +#endif + +#define NUM_THREADS(_x, _y, _z) [numthreads(_x, _y, _z)] + +#define __IMAGE_IMPL_A(_format, _storeComponents, _type, _loadComponents) \ + _type imageLoad(Texture2D<_format> _image, ivec2 _uv) \ + { \ + return _image[_uv]._loadComponents; \ + } \ + \ + ivec2 imageSize(Texture2D<_format> _image) \ + { \ + uvec2 result; \ + _image.GetDimensions(result.x, result.y); \ + return ivec2(result); \ + } \ + \ + _type imageLoad(RWTexture2D<_format> _image, ivec2 _uv) \ + { \ + return _image[_uv]._loadComponents; \ + } \ + \ + void imageStore(RWTexture2D<_format> _image, ivec2 _uv, _type _value) \ + { \ + _image[_uv] = _value._storeComponents; \ + } \ + \ + ivec2 imageSize(RWTexture2D<_format> _image) \ + { \ + uvec2 result; \ + _image.GetDimensions(result.x, result.y); \ + return ivec2(result); \ + } \ + \ + _type imageLoad(Texture2DArray<_format> _image, ivec3 _uvw) \ + { \ + return _image[_uvw]._loadComponents; \ + } \ + \ + ivec3 imageSize(Texture2DArray<_format> _image) \ + { \ + uvec3 result; \ + _image.GetDimensions(result.x, result.y, result.z); \ + return ivec3(result); \ + } \ + \ + _type imageLoad(RWTexture2DArray<_format> _image, ivec3 _uvw) \ + { \ + return _image[_uvw]._loadComponents; \ + } \ + \ + void imageStore(RWTexture2DArray<_format> _image, ivec3 _uvw, _type _value) \ + { \ + _image[_uvw] = _value._storeComponents; \ + } \ + \ + ivec3 imageSize(RWTexture2DArray<_format> _image) \ + { \ + uvec3 result; \ + _image.GetDimensions(result.x, result.y, result.z); \ + return ivec3(result); \ + } \ + \ + _type imageLoad(Texture3D<_format> _image, ivec3 _uvw) \ + { \ + return _image[_uvw]._loadComponents; \ + } \ + \ + ivec3 imageSize(Texture3D<_format> _image) \ + { \ + uvec3 result; \ + _image.GetDimensions(result.x, result.y, result.z); \ + return ivec3(result); \ + } \ + \ + _type imageLoad(RWTexture3D<_format> _image, ivec3 _uvw) \ + { \ + return _image[_uvw]._loadComponents; \ + } \ + \ + void imageStore(RWTexture3D<_format> _image, ivec3 _uvw, _type _value) \ + { \ + _image[_uvw] = _value._storeComponents; \ + } \ + \ + ivec3 imageSize(RWTexture3D<_format> _image) \ + { \ + uvec3 result; \ + _image.GetDimensions(result.x, result.y, result.z); \ + return ivec3(result); \ + } + +#define __IMAGE_IMPL_ATOMIC(_format, _storeComponents, _type, _loadComponents) \ + \ + void imageAtomicAdd(RWTexture2D<_format> _image, ivec2 _uv, _type _value) \ + { \ + InterlockedAdd(_image[_uv], _value._storeComponents); \ + } \ + + +__IMAGE_IMPL_A(float, x, vec4, xxxx) +__IMAGE_IMPL_A(float2, xy, vec4, xyyy) +__IMAGE_IMPL_A(float4, xyzw, vec4, xyzw) + +__IMAGE_IMPL_A(uint, x, uvec4, xxxx) +__IMAGE_IMPL_A(uint2, xy, uvec4, xyyy) +__IMAGE_IMPL_A(uint4, xyzw, uvec4, xyzw) + +#if BGFX_SHADER_LANGUAGE_HLSL +__IMAGE_IMPL_A(unorm float, x, vec4, xxxx) +__IMAGE_IMPL_A(unorm float2, xy, vec4, xyyy) +__IMAGE_IMPL_A(unorm float4, xyzw, vec4, xyzw) +#endif + +__IMAGE_IMPL_ATOMIC(uint, x, uvec4, xxxx) + + +#define atomicAdd(_mem, _data) InterlockedAdd(_mem, _data) +#define atomicAnd(_mem, _data) InterlockedAnd(_mem, _data) +#define atomicMax(_mem, _data) InterlockedMax(_mem, _data) +#define atomicMin(_mem, _data) InterlockedMin(_mem, _data) +#define atomicOr(_mem, _data) InterlockedOr(_mem, _data) +#define atomicXor(_mem, _data) InterlockedXor(_mem, _data) +#define atomicFetchAndAdd(_mem, _data, _original) InterlockedAdd(_mem, _data, _original) +#define atomicFetchAndAnd(_mem, _data, _original) InterlockedAnd(_mem, _data, _original) +#define atomicFetchAndMax(_mem, _data, _original) InterlockedMax(_mem, _data, _original) +#define atomicFetchAndMin(_mem, _data, _original) InterlockedMin(_mem, _data, _original) +#define atomicFetchAndOr(_mem, _data, _original) InterlockedOr(_mem, _data, _original) +#define atomicFetchAndXor(_mem, _data, _original) InterlockedXor(_mem, _data, _original) +#define atomicFetchAndExchange(_mem, _data, _original) InterlockedExchange(_mem, _data, _original) +#define atomicFetchCompareExchange(_mem, _compare, _data, _original) InterlockedCompareExchange(_mem,_compare, _data, _original) + +// InterlockedCompareStore + +#define barrier() GroupMemoryBarrierWithGroupSync() +#define memoryBarrier() GroupMemoryBarrierWithGroupSync() +#define memoryBarrierAtomicCounter() GroupMemoryBarrierWithGroupSync() +#define memoryBarrierBuffer() AllMemoryBarrierWithGroupSync() +#define memoryBarrierImage() GroupMemoryBarrierWithGroupSync() +#define memoryBarrierShared() GroupMemoryBarrierWithGroupSync() +#define groupMemoryBarrier() GroupMemoryBarrierWithGroupSync() + +#endif // BGFX_SHADER_LANGUAGE_GLSL + +#define dispatchIndirect( \ + _buffer \ + , _offset \ + , _numX \ + , _numY \ + , _numZ \ + ) \ + _buffer[(_offset)*2+0] = uvec4(_numX, _numY, _numZ, 0u) + +#define drawIndirect( \ + _buffer \ + , _offset \ + , _numVertices \ + , _numInstances \ + , _startVertex \ + , _startInstance \ + ) \ + _buffer[(_offset)*2+0] = uvec4(_numVertices, _numInstances, _startVertex, _startInstance) + +#define drawIndexedIndirect( \ + _buffer \ + , _offset \ + , _numIndices \ + , _numInstances \ + , _startIndex \ + , _startVertex \ + , _startInstance \ + ) \ + _buffer[(_offset)*2+0] = uvec4(_numIndices, _numInstances, _startIndex, _startVertex); \ + _buffer[(_offset)*2+1] = uvec4(_startInstance, 0u, 0u, 0u) + +#endif // __cplusplus + +#endif // BGFX_COMPUTE_H_HEADER_GUARD diff --git a/shaders/bgfx_shader.sh b/shaders/bgfx_shader.sh new file mode 100644 index 0000000..e8b57f7 --- /dev/null +++ b/shaders/bgfx_shader.sh @@ -0,0 +1,698 @@ +/* + * Copyright 2011-2022 Branimir Karadzic. All rights reserved. + * License: https://github.com/bkaradzic/bgfx/blob/master/LICENSE + */ + +#ifndef BGFX_SHADER_H_HEADER_GUARD +#define BGFX_SHADER_H_HEADER_GUARD + +#if !defined(BGFX_CONFIG_MAX_BONES) +# define BGFX_CONFIG_MAX_BONES 32 +#endif // !defined(BGFX_CONFIG_MAX_BONES) + +#ifndef __cplusplus + +#if BGFX_SHADER_LANGUAGE_HLSL > 300 +# define BRANCH [branch] +# define LOOP [loop] +# define UNROLL [unroll] +#else +# define BRANCH +# define LOOP +# define UNROLL +#endif // BGFX_SHADER_LANGUAGE_HLSL > 300 + +#if (BGFX_SHADER_LANGUAGE_HLSL > 300 || BGFX_SHADER_LANGUAGE_METAL || BGFX_SHADER_LANGUAGE_SPIRV) && BGFX_SHADER_TYPE_FRAGMENT +# define EARLY_DEPTH_STENCIL [earlydepthstencil] +#else +# define EARLY_DEPTH_STENCIL +#endif // BGFX_SHADER_LANGUAGE_HLSL > 300 && BGFX_SHADER_TYPE_FRAGMENT + +#if BGFX_SHADER_LANGUAGE_GLSL +# define ARRAY_BEGIN(_type, _name, _count) _type _name[_count] = _type[]( +# define ARRAY_END() ) +#else +# define ARRAY_BEGIN(_type, _name, _count) _type _name[_count] = { +# define ARRAY_END() } +#endif // BGFX_SHADER_LANGUAGE_GLSL + +#if BGFX_SHADER_LANGUAGE_HLSL \ + || BGFX_SHADER_LANGUAGE_PSSL \ + || BGFX_SHADER_LANGUAGE_SPIRV \ + || BGFX_SHADER_LANGUAGE_METAL +# define CONST(_x) static const _x +# define dFdx(_x) ddx(_x) +# define dFdy(_y) ddy(-(_y)) +# define inversesqrt(_x) rsqrt(_x) +# define fract(_x) frac(_x) + +# define bvec2 bool2 +# define bvec3 bool3 +# define bvec4 bool4 + +// To be able to patch the uav registers on the DXBC SPDB Chunk (D3D11 renderer) the whitespaces around +// '_type[_reg]' are necessary. This only affects shaders with debug info (i.e., those that have the SPDB Chunk). +# if BGFX_SHADER_LANGUAGE_HLSL > 400 || BGFX_SHADER_LANGUAGE_PSSL || BGFX_SHADER_LANGUAGE_SPIRV || BGFX_SHADER_LANGUAGE_METAL +# define REGISTER(_type, _reg) register( _type[_reg] ) +# else +# define REGISTER(_type, _reg) register(_type ## _reg) +# endif // BGFX_SHADER_LANGUAGE_HLSL + +# if BGFX_SHADER_LANGUAGE_HLSL > 300 || BGFX_SHADER_LANGUAGE_PSSL || BGFX_SHADER_LANGUAGE_SPIRV || BGFX_SHADER_LANGUAGE_METAL +# if BGFX_SHADER_LANGUAGE_HLSL > 400 || BGFX_SHADER_LANGUAGE_PSSL || BGFX_SHADER_LANGUAGE_SPIRV || BGFX_SHADER_LANGUAGE_METAL +# define dFdxCoarse(_x) ddx_coarse(_x) +# define dFdxFine(_x) ddx_fine(_x) +# define dFdyCoarse(_y) ddy_coarse(-(_y)) +# define dFdyFine(_y) ddy_fine(-(_y)) +# endif // BGFX_SHADER_LANGUAGE_HLSL > 400 + +# if BGFX_SHADER_LANGUAGE_HLSL || BGFX_SHADER_LANGUAGE_SPIRV || BGFX_SHADER_LANGUAGE_METAL +float intBitsToFloat(int _x) { return asfloat(_x); } +vec2 intBitsToFloat(uint2 _x) { return asfloat(_x); } +vec3 intBitsToFloat(uint3 _x) { return asfloat(_x); } +vec4 intBitsToFloat(uint4 _x) { return asfloat(_x); } +# endif // BGFX_SHADER_LANGUAGE_HLSL || BGFX_SHADER_LANGUAGE_SPIRV || BGFX_SHADER_LANGUAGE_METAL + +float uintBitsToFloat(uint _x) { return asfloat(_x); } +vec2 uintBitsToFloat(uint2 _x) { return asfloat(_x); } +vec3 uintBitsToFloat(uint3 _x) { return asfloat(_x); } +vec4 uintBitsToFloat(uint4 _x) { return asfloat(_x); } + +uint floatBitsToUint(float _x) { return asuint(_x); } +uvec2 floatBitsToUint(vec2 _x) { return asuint(_x); } +uvec3 floatBitsToUint(vec3 _x) { return asuint(_x); } +uvec4 floatBitsToUint(vec4 _x) { return asuint(_x); } + +int floatBitsToInt(float _x) { return asint(_x); } +ivec2 floatBitsToInt(vec2 _x) { return asint(_x); } +ivec3 floatBitsToInt(vec3 _x) { return asint(_x); } +ivec4 floatBitsToInt(vec4 _x) { return asint(_x); } + +uint bitfieldReverse(uint _x) { return reversebits(_x); } +uint2 bitfieldReverse(uint2 _x) { return reversebits(_x); } +uint3 bitfieldReverse(uint3 _x) { return reversebits(_x); } +uint4 bitfieldReverse(uint4 _x) { return reversebits(_x); } + +# if !BGFX_SHADER_LANGUAGE_SPIRV +uint packHalf2x16(vec2 _x) +{ + return (f32tof16(_x.y)<<16) | f32tof16(_x.x); +} + +vec2 unpackHalf2x16(uint _x) +{ + return vec2(f16tof32(_x & 0xffff), f16tof32(_x >> 16) ); +} +# endif // !BGFX_SHADER_LANGUAGE_SPIRV + +struct BgfxSampler2D +{ + SamplerState m_sampler; + Texture2D m_texture; +}; + +struct BgfxISampler2D +{ + Texture2D m_texture; +}; + +struct BgfxUSampler2D +{ + Texture2D m_texture; +}; + +struct BgfxSampler2DArray +{ + SamplerState m_sampler; + Texture2DArray m_texture; +}; + +struct BgfxSampler2DShadow +{ + SamplerComparisonState m_sampler; + Texture2D m_texture; +}; + +struct BgfxSampler2DArrayShadow +{ + SamplerComparisonState m_sampler; + Texture2DArray m_texture; +}; + +struct BgfxSampler3D +{ + SamplerState m_sampler; + Texture3D m_texture; +}; + +struct BgfxISampler3D +{ + Texture3D m_texture; +}; + +struct BgfxUSampler3D +{ + Texture3D m_texture; +}; + +struct BgfxSamplerCube +{ + SamplerState m_sampler; + TextureCube m_texture; +}; + +struct BgfxSamplerCubeShadow +{ + SamplerComparisonState m_sampler; + TextureCube m_texture; +}; + +struct BgfxSampler2DMS +{ + Texture2DMS m_texture; +}; + +vec4 bgfxTexture2D(BgfxSampler2D _sampler, vec2 _coord) +{ + return _sampler.m_texture.Sample(_sampler.m_sampler, _coord); +} + +vec4 bgfxTexture2DBias(BgfxSampler2D _sampler, vec2 _coord, float _bias) +{ + return _sampler.m_texture.SampleBias(_sampler.m_sampler, _coord, _bias); +} + +vec4 bgfxTexture2DLod(BgfxSampler2D _sampler, vec2 _coord, float _level) +{ + return _sampler.m_texture.SampleLevel(_sampler.m_sampler, _coord, _level); +} + +vec4 bgfxTexture2DLodOffset(BgfxSampler2D _sampler, vec2 _coord, float _level, ivec2 _offset) +{ + return _sampler.m_texture.SampleLevel(_sampler.m_sampler, _coord, _level, _offset); +} + +vec4 bgfxTexture2DProj(BgfxSampler2D _sampler, vec3 _coord) +{ + vec2 coord = _coord.xy * rcp(_coord.z); + return _sampler.m_texture.Sample(_sampler.m_sampler, coord); +} + +vec4 bgfxTexture2DProj(BgfxSampler2D _sampler, vec4 _coord) +{ + vec2 coord = _coord.xy * rcp(_coord.w); + return _sampler.m_texture.Sample(_sampler.m_sampler, coord); +} + +vec4 bgfxTexture2DGrad(BgfxSampler2D _sampler, vec2 _coord, vec2 _dPdx, vec2 _dPdy) +{ + return _sampler.m_texture.SampleGrad(_sampler.m_sampler, _coord, _dPdx, _dPdy); +} + +vec4 bgfxTexture2DArray(BgfxSampler2DArray _sampler, vec3 _coord) +{ + return _sampler.m_texture.Sample(_sampler.m_sampler, _coord); +} + +vec4 bgfxTexture2DArrayLod(BgfxSampler2DArray _sampler, vec3 _coord, float _lod) +{ + return _sampler.m_texture.SampleLevel(_sampler.m_sampler, _coord, _lod); +} + +vec4 bgfxTexture2DArrayLodOffset(BgfxSampler2DArray _sampler, vec3 _coord, float _level, ivec2 _offset) +{ + return _sampler.m_texture.SampleLevel(_sampler.m_sampler, _coord, _level, _offset); +} + +float bgfxShadow2D(BgfxSampler2DShadow _sampler, vec3 _coord) +{ + return _sampler.m_texture.SampleCmpLevelZero(_sampler.m_sampler, _coord.xy, _coord.z); +} + +float bgfxShadow2DProj(BgfxSampler2DShadow _sampler, vec4 _coord) +{ + vec3 coord = _coord.xyz * rcp(_coord.w); + return _sampler.m_texture.SampleCmpLevelZero(_sampler.m_sampler, coord.xy, coord.z); +} + +vec4 bgfxShadow2DArray(BgfxSampler2DArrayShadow _sampler, vec4 _coord) +{ + return _sampler.m_texture.SampleCmpLevelZero(_sampler.m_sampler, _coord.xyz, _coord.w); +} + +vec4 bgfxTexture3D(BgfxSampler3D _sampler, vec3 _coord) +{ + return _sampler.m_texture.Sample(_sampler.m_sampler, _coord); +} + +vec4 bgfxTexture3DLod(BgfxSampler3D _sampler, vec3 _coord, float _level) +{ + return _sampler.m_texture.SampleLevel(_sampler.m_sampler, _coord, _level); +} + +ivec4 bgfxTexture3D(BgfxISampler3D _sampler, vec3 _coord) +{ + uvec3 size; + _sampler.m_texture.GetDimensions(size.x, size.y, size.z); + return _sampler.m_texture.Load(ivec4(_coord * size, 0) ); +} + +uvec4 bgfxTexture3D(BgfxUSampler3D _sampler, vec3 _coord) +{ + uvec3 size; + _sampler.m_texture.GetDimensions(size.x, size.y, size.z); + return _sampler.m_texture.Load(ivec4(_coord * size, 0) ); +} + +vec4 bgfxTextureCube(BgfxSamplerCube _sampler, vec3 _coord) +{ + return _sampler.m_texture.Sample(_sampler.m_sampler, _coord); +} + +vec4 bgfxTextureCubeBias(BgfxSamplerCube _sampler, vec3 _coord, float _bias) +{ + return _sampler.m_texture.SampleBias(_sampler.m_sampler, _coord, _bias); +} + +vec4 bgfxTextureCubeLod(BgfxSamplerCube _sampler, vec3 _coord, float _level) +{ + return _sampler.m_texture.SampleLevel(_sampler.m_sampler, _coord, _level); +} + +float bgfxShadowCube(BgfxSamplerCubeShadow _sampler, vec4 _coord) +{ + return _sampler.m_texture.SampleCmpLevelZero(_sampler.m_sampler, _coord.xyz, _coord.w); +} + +vec4 bgfxTexelFetch(BgfxSampler2D _sampler, ivec2 _coord, int _lod) +{ + return _sampler.m_texture.Load(ivec3(_coord, _lod) ); +} + +vec4 bgfxTexelFetchOffset(BgfxSampler2D _sampler, ivec2 _coord, int _lod, ivec2 _offset) +{ + return _sampler.m_texture.Load(ivec3(_coord, _lod), _offset ); +} + +vec2 bgfxTextureSize(BgfxSampler2D _sampler, int _lod) +{ + vec2 result; + _sampler.m_texture.GetDimensions(result.x, result.y); + return result; +} + +vec2 bgfxTextureSize(BgfxISampler2D _sampler, int _lod) +{ + vec2 result; + _sampler.m_texture.GetDimensions(result.x, result.y); + return result; +} + +vec2 bgfxTextureSize(BgfxUSampler2D _sampler, int _lod) +{ + vec2 result; + _sampler.m_texture.GetDimensions(result.x, result.y); + return result; +} + +vec4 bgfxTextureGather0(BgfxSampler2D _sampler, vec2 _coord) +{ + return _sampler.m_texture.GatherRed(_sampler.m_sampler, _coord); +} +vec4 bgfxTextureGather1(BgfxSampler2D _sampler, vec2 _coord) +{ + return _sampler.m_texture.GatherGreen(_sampler.m_sampler, _coord); +} +vec4 bgfxTextureGather2(BgfxSampler2D _sampler, vec2 _coord) +{ + return _sampler.m_texture.GatherBlue(_sampler.m_sampler, _coord); +} +vec4 bgfxTextureGather3(BgfxSampler2D _sampler, vec2 _coord) +{ + return _sampler.m_texture.GatherAlpha(_sampler.m_sampler, _coord); +} + +vec4 bgfxTextureGatherOffset0(BgfxSampler2D _sampler, vec2 _coord, ivec2 _offset) +{ + return _sampler.m_texture.GatherRed(_sampler.m_sampler, _coord, _offset); +} +vec4 bgfxTextureGatherOffset1(BgfxSampler2D _sampler, vec2 _coord, ivec2 _offset) +{ + return _sampler.m_texture.GatherGreen(_sampler.m_sampler, _coord, _offset); +} +vec4 bgfxTextureGatherOffset2(BgfxSampler2D _sampler, vec2 _coord, ivec2 _offset) +{ + return _sampler.m_texture.GatherBlue(_sampler.m_sampler, _coord, _offset); +} +vec4 bgfxTextureGatherOffset3(BgfxSampler2D _sampler, vec2 _coord, ivec2 _offset) +{ + return _sampler.m_texture.GatherAlpha(_sampler.m_sampler, _coord, _offset); +} + +vec4 bgfxTextureGather0(BgfxSampler2DArray _sampler, vec3 _coord) +{ + return _sampler.m_texture.GatherRed(_sampler.m_sampler, _coord); +} +vec4 bgfxTextureGather1(BgfxSampler2DArray _sampler, vec3 _coord) +{ + return _sampler.m_texture.GatherGreen(_sampler.m_sampler, _coord); +} +vec4 bgfxTextureGather2(BgfxSampler2DArray _sampler, vec3 _coord) +{ + return _sampler.m_texture.GatherBlue(_sampler.m_sampler, _coord); +} +vec4 bgfxTextureGather3(BgfxSampler2DArray _sampler, vec3 _coord) +{ + return _sampler.m_texture.GatherAlpha(_sampler.m_sampler, _coord); +} + +ivec4 bgfxTexelFetch(BgfxISampler2D _sampler, ivec2 _coord, int _lod) +{ + return _sampler.m_texture.Load(ivec3(_coord, _lod) ); +} + +uvec4 bgfxTexelFetch(BgfxUSampler2D _sampler, ivec2 _coord, int _lod) +{ + return _sampler.m_texture.Load(ivec3(_coord, _lod) ); +} + +vec4 bgfxTexelFetch(BgfxSampler2DMS _sampler, ivec2 _coord, int _sampleIdx) +{ + return _sampler.m_texture.Load(_coord, _sampleIdx); +} + +vec4 bgfxTexelFetch(BgfxSampler2DArray _sampler, ivec3 _coord, int _lod) +{ + return _sampler.m_texture.Load(ivec4(_coord, _lod) ); +} + +vec4 bgfxTexelFetch(BgfxSampler3D _sampler, ivec3 _coord, int _lod) +{ + return _sampler.m_texture.Load(ivec4(_coord, _lod) ); +} + +vec3 bgfxTextureSize(BgfxSampler3D _sampler, int _lod) +{ + vec3 result; + _sampler.m_texture.GetDimensions(result.x, result.y, result.z); + return result; +} + +# define SAMPLER2D(_name, _reg) \ + uniform SamplerState _name ## Sampler : REGISTER(s, _reg); \ + uniform Texture2D _name ## Texture : REGISTER(t, _reg); \ + static BgfxSampler2D _name = { _name ## Sampler, _name ## Texture } +# define ISAMPLER2D(_name, _reg) \ + uniform Texture2D _name ## Texture : REGISTER(t, _reg); \ + static BgfxISampler2D _name = { _name ## Texture } +# define USAMPLER2D(_name, _reg) \ + uniform Texture2D _name ## Texture : REGISTER(t, _reg); \ + static BgfxUSampler2D _name = { _name ## Texture } +# define sampler2D BgfxSampler2D +# define texture2D(_sampler, _coord) bgfxTexture2D(_sampler, _coord) +# define texture2DBias(_sampler, _coord, _bias) bgfxTexture2DBias(_sampler, _coord, _bias) +# define texture2DLod(_sampler, _coord, _level) bgfxTexture2DLod(_sampler, _coord, _level) +# define texture2DLodOffset(_sampler, _coord, _level, _offset) bgfxTexture2DLodOffset(_sampler, _coord, _level, _offset) +# define texture2DProj(_sampler, _coord) bgfxTexture2DProj(_sampler, _coord) +# define texture2DGrad(_sampler, _coord, _dPdx, _dPdy) bgfxTexture2DGrad(_sampler, _coord, _dPdx, _dPdy) + +# define SAMPLER2DARRAY(_name, _reg) \ + uniform SamplerState _name ## Sampler : REGISTER(s, _reg); \ + uniform Texture2DArray _name ## Texture : REGISTER(t, _reg); \ + static BgfxSampler2DArray _name = { _name ## Sampler, _name ## Texture } +# define sampler2DArray BgfxSampler2DArray +# define texture2DArray(_sampler, _coord) bgfxTexture2DArray(_sampler, _coord) +# define texture2DArrayLod(_sampler, _coord, _lod) bgfxTexture2DArrayLod(_sampler, _coord, _lod) +# define texture2DArrayLodOffset(_sampler, _coord, _level, _offset) bgfxTexture2DArrayLodOffset(_sampler, _coord, _level, _offset) + +# define SAMPLER2DMS(_name, _reg) \ + uniform Texture2DMS _name ## Texture : REGISTER(t, _reg); \ + static BgfxSampler2DMS _name = { _name ## Texture } +# define sampler2DMS BgfxSampler2DMS + +# define SAMPLER2DSHADOW(_name, _reg) \ + uniform SamplerComparisonState _name ## SamplerComparison : REGISTER(s, _reg); \ + uniform Texture2D _name ## Texture : REGISTER(t, _reg); \ + static BgfxSampler2DShadow _name = { _name ## SamplerComparison, _name ## Texture } +# define sampler2DShadow BgfxSampler2DShadow +# define shadow2D(_sampler, _coord) bgfxShadow2D(_sampler, _coord) +# define shadow2DProj(_sampler, _coord) bgfxShadow2DProj(_sampler, _coord) + +# define SAMPLER2DARRAYSHADOW(_name, _reg) \ + uniform SamplerComparisonState _name ## SamplerComparison : REGISTER(s, _reg); \ + uniform Texture2DArray _name ## Texture : REGISTER(t, _reg); \ + static BgfxSampler2DArrayShadow _name = { _name ## SamplerComparison, _name ## Texture } +# define sampler2DArrayShadow BgfxSampler2DArrayShadow +# define shadow2DArray(_sampler, _coord) bgfxShadow2DArray(_sampler, _coord) + +# define SAMPLER3D(_name, _reg) \ + uniform SamplerState _name ## Sampler : REGISTER(s, _reg); \ + uniform Texture3D _name ## Texture : REGISTER(t, _reg); \ + static BgfxSampler3D _name = { _name ## Sampler, _name ## Texture } +# define ISAMPLER3D(_name, _reg) \ + uniform Texture3D _name ## Texture : REGISTER(t, _reg); \ + static BgfxISampler3D _name = { _name ## Texture } +# define USAMPLER3D(_name, _reg) \ + uniform Texture3D _name ## Texture : REGISTER(t, _reg); \ + static BgfxUSampler3D _name = { _name ## Texture } +# define sampler3D BgfxSampler3D +# define texture3D(_sampler, _coord) bgfxTexture3D(_sampler, _coord) +# define texture3DLod(_sampler, _coord, _level) bgfxTexture3DLod(_sampler, _coord, _level) + +# define SAMPLERCUBE(_name, _reg) \ + uniform SamplerState _name ## Sampler : REGISTER(s, _reg); \ + uniform TextureCube _name ## Texture : REGISTER(t, _reg); \ + static BgfxSamplerCube _name = { _name ## Sampler, _name ## Texture } +# define samplerCube BgfxSamplerCube +# define textureCube(_sampler, _coord) bgfxTextureCube(_sampler, _coord) +# define textureCubeBias(_sampler, _coord, _bias) bgfxTextureCubeBias(_sampler, _coord, _bias) +# define textureCubeLod(_sampler, _coord, _level) bgfxTextureCubeLod(_sampler, _coord, _level) + +# define SAMPLERCUBESHADOW(_name, _reg) \ + uniform SamplerComparisonState _name ## SamplerComparison : REGISTER(s, _reg); \ + uniform TextureCube _name ## Texture : REGISTER(t, _reg); \ + static BgfxSamplerCubeShadow _name = { _name ## SamplerComparison, _name ## Texture } +# define samplerCubeShadow BgfxSamplerCubeShadow +# define shadowCube(_sampler, _coord) bgfxShadowCube(_sampler, _coord) + +# define texelFetch(_sampler, _coord, _lod) bgfxTexelFetch(_sampler, _coord, _lod) +# define texelFetchOffset(_sampler, _coord, _lod, _offset) bgfxTexelFetchOffset(_sampler, _coord, _lod, _offset) +# define textureSize(_sampler, _lod) bgfxTextureSize(_sampler, _lod) +# define textureGather(_sampler, _coord, _comp) bgfxTextureGather ## _comp(_sampler, _coord) +# define textureGatherOffset(_sampler, _coord, _offset, _comp) bgfxTextureGatherOffset ## _comp(_sampler, _coord, _offset) +# else + +# define sampler2DShadow sampler2D + +vec4 bgfxTexture2DProj(sampler2D _sampler, vec3 _coord) +{ + return tex2Dproj(_sampler, vec4(_coord.xy, 0.0, _coord.z) ); +} + +vec4 bgfxTexture2DProj(sampler2D _sampler, vec4 _coord) +{ + return tex2Dproj(_sampler, _coord); +} + +float bgfxShadow2D(sampler2DShadow _sampler, vec3 _coord) +{ +#if 0 + float occluder = tex2D(_sampler, _coord.xy).x; + return step(_coord.z, occluder); +#else + return tex2Dproj(_sampler, vec4(_coord.xy, _coord.z, 1.0) ).x; +#endif // 0 +} + +float bgfxShadow2DProj(sampler2DShadow _sampler, vec4 _coord) +{ +#if 0 + vec3 coord = _coord.xyz * rcp(_coord.w); + float occluder = tex2D(_sampler, coord.xy).x; + return step(coord.z, occluder); +#else + return tex2Dproj(_sampler, _coord).x; +#endif // 0 +} + +# define SAMPLER2D(_name, _reg) uniform sampler2D _name : REGISTER(s, _reg) +# define SAMPLER2DMS(_name, _reg) uniform sampler2DMS _name : REGISTER(s, _reg) +# define texture2D(_sampler, _coord) tex2D(_sampler, _coord) +# define texture2DProj(_sampler, _coord) bgfxTexture2DProj(_sampler, _coord) + +# define SAMPLER2DARRAY(_name, _reg) SAMPLER2D(_name, _reg) +# define texture2DArray(_sampler, _coord) texture2D(_sampler, (_coord).xy) +# define texture2DArrayLod(_sampler, _coord, _lod) texture2DLod(_sampler, _coord, _lod) + +# define SAMPLER2DSHADOW(_name, _reg) uniform sampler2DShadow _name : REGISTER(s, _reg) +# define shadow2D(_sampler, _coord) bgfxShadow2D(_sampler, _coord) +# define shadow2DProj(_sampler, _coord) bgfxShadow2DProj(_sampler, _coord) + +# define SAMPLER3D(_name, _reg) uniform sampler3D _name : REGISTER(s, _reg) +# define texture3D(_sampler, _coord) tex3D(_sampler, _coord) + +# define SAMPLERCUBE(_name, _reg) uniform samplerCUBE _name : REGISTER(s, _reg) +# define textureCube(_sampler, _coord) texCUBE(_sampler, _coord) + +# define texture2DLod(_sampler, _coord, _level) tex2Dlod(_sampler, vec4( (_coord).xy, 0.0, _level) ) +# define texture2DGrad(_sampler, _coord, _dPdx, _dPdy) tex2Dgrad(_sampler, _coord, _dPdx, _dPdy) +# define texture3DLod(_sampler, _coord, _level) tex3Dlod(_sampler, vec4( (_coord).xyz, _level) ) +# define textureCubeLod(_sampler, _coord, _level) texCUBElod(_sampler, vec4( (_coord).xyz, _level) ) + +# endif // BGFX_SHADER_LANGUAGE_HLSL > 300 + +vec3 instMul(vec3 _vec, mat3 _mtx) { return mul(_mtx, _vec); } +vec3 instMul(mat3 _mtx, vec3 _vec) { return mul(_vec, _mtx); } +vec4 instMul(vec4 _vec, mat4 _mtx) { return mul(_mtx, _vec); } +vec4 instMul(mat4 _mtx, vec4 _vec) { return mul(_vec, _mtx); } + +bvec2 lessThan(vec2 _a, vec2 _b) { return _a < _b; } +bvec3 lessThan(vec3 _a, vec3 _b) { return _a < _b; } +bvec4 lessThan(vec4 _a, vec4 _b) { return _a < _b; } + +bvec2 lessThanEqual(vec2 _a, vec2 _b) { return _a <= _b; } +bvec3 lessThanEqual(vec3 _a, vec3 _b) { return _a <= _b; } +bvec4 lessThanEqual(vec4 _a, vec4 _b) { return _a <= _b; } + +bvec2 greaterThan(vec2 _a, vec2 _b) { return _a > _b; } +bvec3 greaterThan(vec3 _a, vec3 _b) { return _a > _b; } +bvec4 greaterThan(vec4 _a, vec4 _b) { return _a > _b; } + +bvec2 greaterThanEqual(vec2 _a, vec2 _b) { return _a >= _b; } +bvec3 greaterThanEqual(vec3 _a, vec3 _b) { return _a >= _b; } +bvec4 greaterThanEqual(vec4 _a, vec4 _b) { return _a >= _b; } + +bvec2 notEqual(vec2 _a, vec2 _b) { return _a != _b; } +bvec3 notEqual(vec3 _a, vec3 _b) { return _a != _b; } +bvec4 notEqual(vec4 _a, vec4 _b) { return _a != _b; } + +bvec2 equal(vec2 _a, vec2 _b) { return _a == _b; } +bvec3 equal(vec3 _a, vec3 _b) { return _a == _b; } +bvec4 equal(vec4 _a, vec4 _b) { return _a == _b; } + +float mix(float _a, float _b, float _t) { return lerp(_a, _b, _t); } +vec2 mix(vec2 _a, vec2 _b, vec2 _t) { return lerp(_a, _b, _t); } +vec3 mix(vec3 _a, vec3 _b, vec3 _t) { return lerp(_a, _b, _t); } +vec4 mix(vec4 _a, vec4 _b, vec4 _t) { return lerp(_a, _b, _t); } + +float mod(float _a, float _b) { return _a - _b * floor(_a / _b); } +vec2 mod(vec2 _a, vec2 _b) { return _a - _b * floor(_a / _b); } +vec3 mod(vec3 _a, vec3 _b) { return _a - _b * floor(_a / _b); } +vec4 mod(vec4 _a, vec4 _b) { return _a - _b * floor(_a / _b); } + +#else +# define CONST(_x) const _x +# define atan2(_x, _y) atan(_x, _y) +# define mul(_a, _b) ( (_a) * (_b) ) +# define saturate(_x) clamp(_x, 0.0, 1.0) +# define SAMPLER2D(_name, _reg) uniform sampler2D _name +# define SAMPLER2DMS(_name, _reg) uniform sampler2DMS _name +# define SAMPLER3D(_name, _reg) uniform sampler3D _name +# define SAMPLERCUBE(_name, _reg) uniform samplerCube _name +# define SAMPLER2DSHADOW(_name, _reg) uniform sampler2DShadow _name + +# define SAMPLER2DARRAY(_name, _reg) uniform sampler2DArray _name +# define SAMPLER2DMSARRAY(_name, _reg) uniform sampler2DMSArray _name +# define SAMPLERCUBEARRAY(_name, _reg) uniform samplerCubeArray _name +# define SAMPLER2DARRAYSHADOW(_name, _reg) uniform sampler2DArrayShadow _name + +# define ISAMPLER2D(_name, _reg) uniform isampler2D _name +# define USAMPLER2D(_name, _reg) uniform usampler2D _name +# define ISAMPLER3D(_name, _reg) uniform isampler3D _name +# define USAMPLER3D(_name, _reg) uniform usampler3D _name + +# define texture2DBias(_sampler, _coord, _bias) texture2D(_sampler, _coord, _bias) +# define textureCubeBias(_sampler, _coord, _bias) textureCube(_sampler, _coord, _bias) + +# if BGFX_SHADER_LANGUAGE_GLSL >= 130 +# define texture2D(_sampler, _coord) texture(_sampler, _coord) +# define texture2DArray(_sampler, _coord) texture(_sampler, _coord) +# define texture3D(_sampler, _coord) texture(_sampler, _coord) +# define textureCube(_sampler, _coord) texture(_sampler, _coord) +# define texture2DLod(_sampler, _coord, _lod) textureLod(_sampler, _coord, _lod) +# define texture2DLodOffset(_sampler, _coord, _lod, _offset) textureLodOffset(_sampler, _coord, _lod, _offset) +# endif // BGFX_SHADER_LANGUAGE_GLSL >= 130 + +vec3 instMul(vec3 _vec, mat3 _mtx) { return mul(_vec, _mtx); } +vec3 instMul(mat3 _mtx, vec3 _vec) { return mul(_mtx, _vec); } +vec4 instMul(vec4 _vec, mat4 _mtx) { return mul(_vec, _mtx); } +vec4 instMul(mat4 _mtx, vec4 _vec) { return mul(_mtx, _vec); } + +float rcp(float _a) { return 1.0/_a; } +vec2 rcp(vec2 _a) { return vec2(1.0)/_a; } +vec3 rcp(vec3 _a) { return vec3(1.0)/_a; } +vec4 rcp(vec4 _a) { return vec4(1.0)/_a; } +#endif // BGFX_SHADER_LANGUAGE_* + +vec2 vec2_splat(float _x) { return vec2(_x, _x); } +vec3 vec3_splat(float _x) { return vec3(_x, _x, _x); } +vec4 vec4_splat(float _x) { return vec4(_x, _x, _x, _x); } + +#if BGFX_SHADER_LANGUAGE_GLSL >= 130 || BGFX_SHADER_LANGUAGE_HLSL || BGFX_SHADER_LANGUAGE_PSSL || BGFX_SHADER_LANGUAGE_SPIRV || BGFX_SHADER_LANGUAGE_METAL +uvec2 uvec2_splat(uint _x) { return uvec2(_x, _x); } +uvec3 uvec3_splat(uint _x) { return uvec3(_x, _x, _x); } +uvec4 uvec4_splat(uint _x) { return uvec4(_x, _x, _x, _x); } +#endif // BGFX_SHADER_LANGUAGE_GLSL >= 130 || BGFX_SHADER_LANGUAGE_HLSL || BGFX_SHADER_LANGUAGE_PSSL || BGFX_SHADER_LANGUAGE_SPIRV || BGFX_SHADER_LANGUAGE_METAL + +mat4 mtxFromRows(vec4 _0, vec4 _1, vec4 _2, vec4 _3) +{ +#if BGFX_SHADER_LANGUAGE_GLSL + return transpose(mat4(_0, _1, _2, _3) ); +#else + return mat4(_0, _1, _2, _3); +#endif // BGFX_SHADER_LANGUAGE_GLSL +} +mat4 mtxFromCols(vec4 _0, vec4 _1, vec4 _2, vec4 _3) +{ +#if BGFX_SHADER_LANGUAGE_GLSL + return mat4(_0, _1, _2, _3); +#else + return transpose(mat4(_0, _1, _2, _3) ); +#endif // BGFX_SHADER_LANGUAGE_GLSL +} +mat3 mtxFromRows(vec3 _0, vec3 _1, vec3 _2) +{ +#if BGFX_SHADER_LANGUAGE_GLSL + return transpose(mat3(_0, _1, _2) ); +#else + return mat3(_0, _1, _2); +#endif // BGFX_SHADER_LANGUAGE_GLSL +} +mat3 mtxFromCols(vec3 _0, vec3 _1, vec3 _2) +{ +#if BGFX_SHADER_LANGUAGE_GLSL + return mat3(_0, _1, _2); +#else + return transpose(mat3(_0, _1, _2) ); +#endif // BGFX_SHADER_LANGUAGE_GLSL +} + +#if BGFX_SHADER_LANGUAGE_GLSL +#define mtxFromRows3(_0, _1, _2) transpose(mat3(_0, _1, _2) ) +#define mtxFromRows4(_0, _1, _2, _3) transpose(mat4(_0, _1, _2, _3) ) +#define mtxFromCols3(_0, _1, _2) mat3(_0, _1, _2) +#define mtxFromCols4(_0, _1, _2, _3) mat4(_0, _1, _2, _3) +#else +#define mtxFromRows3(_0, _1, _2) mat3(_0, _1, _2) +#define mtxFromRows4(_0, _1, _2, _3) mat4(_0, _1, _2, _3) +#define mtxFromCols3(_0, _1, _2) transpose(mat3(_0, _1, _2) ) +#define mtxFromCols4(_0, _1, _2, _3) transpose(mat4(_0, _1, _2, _3) ) +#endif // BGFX_SHADER_LANGUAGE_GLSL + +uniform vec4 u_viewRect; +uniform vec4 u_viewTexel; +uniform mat4 u_view; +uniform mat4 u_invView; +uniform mat4 u_proj; +uniform mat4 u_invProj; +uniform mat4 u_viewProj; +uniform mat4 u_invViewProj; +uniform mat4 u_model[BGFX_CONFIG_MAX_BONES]; +uniform mat4 u_modelView; +uniform mat4 u_modelViewProj; +uniform vec4 u_alphaRef4; +#define u_alphaRef u_alphaRef4.x + +#endif // __cplusplus + +#endif // BGFX_SHADER_H_HEADER_GUARD diff --git a/shaders/cs_cubes.sc b/shaders/cs_cubes.sc new file mode 100644 index 0000000..9b3c9fe --- /dev/null +++ b/shaders/cs_cubes.sc @@ -0,0 +1,103 @@ + +#include "orientations.sh" +#include "bgfx_compute.sh" + +// INPUT +// grids (matrices) +BUFFER_RO(grids, vec4, 0); +// chunks (grid_id, offset_x, _y, _z) +BUFFER_RO(chunks, vec4, 1); +// blocks (chunk_id, transform [pos in chunk, rotation], idx_buf_offset, num_indices) +BUFFER_RO(blocks, vec4, 2); +// block selection (visible blocks post-culling) +BUFFER_RO(block_selection, float, 3); + +// OUTPUT +// indirect draw calls +BUFFER_WR(indirectBuffer, uvec4, 4); +// matrices for each instance +BUFFER_WR(instanceBuffer, vec4, 5); + +uniform vec4 u_cubes_compute_params; + +// Use 64*1*1 local threads +NUM_THREADS(64, 1, 1) + + +void main() +{ + int tId = int(gl_GlobalInvocationID.x); + int numDrawItems = int(u_cubes_compute_params.w); + + int numToDrawPerThread = numDrawItems/64 + 1; + + int idxStart = tId*numToDrawPerThread; + int idxMax = min(numDrawItems, (tId+1)*numToDrawPerThread); + + for (int k = idxStart; k < idxMax; k++) { + uint block_id = block_selection[k]; + + // get block data + uint b_chunk_id = blocks[block_id].x; + uint b_transform = blocks[block_id].y; + uint b_index_buf_offset = blocks[block_id].z; + uint b_num_indices = blocks[block_id].w; + + // get chunk data + uint c_grid_id = chunks[b_chunk_id].x; + vec3 c_offset = chunks[b_chunk_id].yzw; + + // get grid data + mat4 g_mtx = mtxFromRows(grids[c_grid_id*4 + 0], grids[c_grid_id*4 + 1], grids[c_grid_id*4 + 2], grids[c_grid_id*4 + 3]); + + // calc block offset + // b_transform == [off_x | off_y | off_z | orientation] + uint b_orientation = b_transform & 0x1F; // 5 bit + vec3 b_offset; + b_offset.x = (b_transform >> 11) & 0x7; // 3 bit + b_offset.y = (b_transform >> 8) & 0x7; // 3 bit + b_offset.z = (b_transform >> 5) & 0x7; // 3 bit + + b_offset = b_offset + (c_offset * 8); + + // rotate block -> offset block -> apply g_mtx + + // apply orientation + mat3 b_mtx_orientation = orientations[b_orientation]; + + // apply offset + mat4 b_mtx = mat4(b_mtx_orientation); // Note: glsl matrices are colum major + //mat4 b_mtx; + //b_mtx[0] = vec4(1,0,0,0); + //b_mtx[1] = vec4(0,1,0,0); + //b_mtx[2] = vec4(0,0,1,0); + b_mtx[3] = vec4(b_offset, 1); + + // apply g_mtx + mat4 mtx_model = mul(b_mtx, g_mtx); + + instanceBuffer[k*4+0] = mtx_model[0]; + instanceBuffer[k*4+1] = mtx_model[1]; + instanceBuffer[k*4+2] = mtx_model[2]; + instanceBuffer[k*4+3] = mtx_model[3]; + + + // Fill indirect buffer + + drawIndexedIndirect( + // Target location params: + indirectBuffer, // target buffer + k, // index in buffer + // Draw call params: + b_num_indices, // number of indices for this draw call + 1u, // number of instances for this draw call. You can disable this draw call by setting to zero + b_index_buf_offset, // offset in the index buffer + 0, // offset in the vertex buffer. Note that you can use this to "re-index" sub-meshes - all indices in this draw will be decremented by this amount + k // offset in the instance buffer. If you are drawing more than 1 instance per call see "gpu driven rendering" for how to handle + ); + + } +} + + + diff --git a/shaders/fs_cubes.sc b/shaders/fs_cubes.sc new file mode 100644 index 0000000..ee9c8d6 --- /dev/null +++ b/shaders/fs_cubes.sc @@ -0,0 +1,78 @@ +$input tex_coord, world_pos + +#include "bgfx_shader.sh" +#include "shaderlib.sh" + +//layout(early_fragment_tests) in; + +SAMPLER2D(texture_atlas_sampler, 0); + +uniform vec4 u_cubes_compute_params; + +// Ref: https://en.wikipedia.org/wiki/Blinn%E2%80%93Phong_reflection_model + +const float lightPower = 40.0; + +void main() +{ + + // vec4 col = texture2D(texture_atlas_sampler, tex_coord); + + // // if(col.r + col.g + col.b == 0.0) discard; // playing with transparency + + + // vec3 normal = normalize(cross(dFdy(world_pos), dFdx(world_pos))); // screen space surface normal + // //col = vec4(normal, 1.0f); + + // const float ambient = 0.2; + // const vec3 light_pos = vec3(-5.0, 5.0, -5.0); + // const vec3 light_col = vec3(1.0, 1.0, 1.0); + // vec3 light_dir = light_pos - world_pos; + // float squared_distance = light_dir.x*light_dir.x + light_dir.y*light_dir.y + light_dir.z*light_dir.z; + // light_dir = normalize(light_dir); + + // vec4 diffuse = vec4(max(dot(light_dir, normal), 0) * light_col, 1.0); + // col = col * ambient + col * diffuse; + + // // Note: if gl_FrontFacing is making problems + // // => https://stackoverflow.com/questions/24375171/is-there-a-reliable-alternative-to-gl-frontfacing-in-a-fragment-shader + // //if (gl_FrontFacing) { + // // col = vec4(1.0,0.0,0.0,1.0); + // //}else{ + // // col = vec4(0.0,1.0,0.0,1.0); + // //} + + + // Calc id_coords (in id_atlas, for id sampling) + // in [0;ID_SIZE], is basically int + // id_coord = floor(id_coord); + // id_coord = id_coord + + // TODO use modf and combine floor/fract + // ! flooring may not be needed anyway + // just sample id_atlas with tex_coord + +// Texturing rework V2 +const uint IA_WIDTH = 2*8; // id's per row +const uint IA_HEIGHT = 2*8; // id's per column +const float ID_STRIDE_X = 1.0f / IA_WIDTH; +const float ID_STRIDE_Y = 1.0f / IA_HEIGHT; + + // Calc tex_coords (in tex_atlas, for texture sampling) + // in [0;1] in local texture space + tex_coord = tex_coord * vec2(IA_WIDTH, IA_HEIGHT); + tex_coord = fract(tex_coord); + + // vec4 col; + // if (tex_coord.x < 0.5) + // { + // col = vec4(1.0, 0.0, 0.0, 1.0); + // } + // else + // { + // col = vec4(0.0, 1.0, 0.0, 1.0); + // } + + vec4 col = vec4(tex_coord, 0.0, 1.0); + + gl_FragColor = col; +} diff --git a/shaders/fs_lines.sc b/shaders/fs_lines.sc new file mode 100644 index 0000000..7bc3060 --- /dev/null +++ b/shaders/fs_lines.sc @@ -0,0 +1,9 @@ +#include "bgfx_shader.sh" +#include "shaderlib.sh" + +uniform vec4 u_line_color; + +void main() +{ + gl_FragColor = u_line_color; +} diff --git a/shaders/orientations.sh b/shaders/orientations.sh new file mode 100644 index 0000000..4aa7584 --- /dev/null +++ b/shaders/orientations.sh @@ -0,0 +1,27 @@ +const mat3 orientations[24] = +{ +{{1.000000, 0.000000, 0.000000}, {0.000000, 1.000000, 0.000000}, {0.000000, 0.000000, 1.000000}}, +{{0.000000, 1.000000, 0.000000}, {-1.000000, 0.000000, 0.000000}, {0.000000, 0.000000, 1.000000}}, +{{-1.000000, 0.000000, 0.000000}, {0.000000, -1.000000, 0.000000}, {0.000000, 0.000000, 1.000000}}, +{{0.000000, -1.000000, 0.000000}, {1.000000, 0.000000, 0.000000}, {0.000000, 0.000000, 1.000000}}, +{{0.000000, 0.000000, -1.000000}, {0.000000, 1.000000, 0.000000}, {1.000000, 0.000000, 0.000000}}, +{{0.000000, 1.000000, 0.000000}, {0.000000, 0.000000, 1.000000}, {1.000000, 0.000000, 0.000000}}, +{{0.000000, 0.000000, 1.000000}, {0.000000, -1.000000, 0.000000}, {1.000000, 0.000000, 0.000000}}, +{{0.000000, -1.000000, 0.000000}, {0.000000, 0.000000, -1.000000}, {1.000000, 0.000000, 0.000000}}, +{{-1.000000, 0.000000, 0.000000}, {0.000000, 1.000000, 0.000000}, {0.000000, 0.000000, -1.000000}}, +{{0.000000, 1.000000, 0.000000}, {1.000000, 0.000000, 0.000000}, {0.000000, 0.000000, -1.000000}}, +{{1.000000, 0.000000, 0.000000}, {0.000000, -1.000000, 0.000000}, {0.000000, 0.000000, -1.000000}}, +{{0.000000, -1.000000, 0.000000}, {-1.000000, 0.000000, 0.000000}, {0.000000, 0.000000, -1.000000}}, +{{0.000000, 0.000000, 1.000000}, {0.000000, 1.000000, 0.000000}, {-1.000000, 0.000000, 0.000000}}, +{{0.000000, 1.000000, 0.000000}, {0.000000, 0.000000, -1.000000}, {-1.000000, 0.000000, 0.000000}}, +{{0.000000, 0.000000, -1.000000}, {0.000000, -1.000000, 0.000000}, {-1.000000, 0.000000, 0.000000}}, +{{0.000000, -1.000000, 0.000000}, {0.000000, 0.000000, 1.000000}, {-1.000000, 0.000000, 0.000000}}, +{{1.000000, 0.000000, 0.000000}, {0.000000, 0.000000, 1.000000}, {0.000000, -1.000000, 0.000000}}, +{{0.000000, 0.000000, 1.000000}, {-1.000000, 0.000000, 0.000000}, {0.000000, -1.000000, 0.000000}}, +{{-1.000000, 0.000000, 0.000000}, {0.000000, 0.000000, -1.000000}, {0.000000, -1.000000, 0.000000}}, +{{0.000000, 0.000000, -1.000000}, {1.000000, 0.000000, 0.000000}, {0.000000, -1.000000, 0.000000}}, +{{1.000000, 0.000000, 0.000000}, {0.000000, 0.000000, -1.000000}, {0.000000, 1.000000, 0.000000}}, +{{0.000000, 0.000000, -1.000000}, {-1.000000, 0.000000, 0.000000}, {0.000000, 1.000000, 0.000000}}, +{{-1.000000, 0.000000, 0.000000}, {0.000000, 0.000000, 1.000000}, {0.000000, 1.000000, 0.000000}}, +{{0.000000, 0.000000, 1.000000}, {1.000000, 0.000000, 0.000000}, {0.000000, 1.000000, 0.000000}}, +}; diff --git a/shaders/shaderlib.sh b/shaders/shaderlib.sh new file mode 100644 index 0000000..d4953d0 --- /dev/null +++ b/shaders/shaderlib.sh @@ -0,0 +1,431 @@ +/* + * Copyright 2011-2022 Branimir Karadzic. All rights reserved. + * License: https://github.com/bkaradzic/bgfx/blob/master/LICENSE + */ + +#ifndef __SHADERLIB_SH__ +#define __SHADERLIB_SH__ + +vec4 encodeRE8(float _r) +{ + float exponent = ceil(log2(_r) ); + return vec4(_r / exp2(exponent) + , 0.0 + , 0.0 + , (exponent + 128.0) / 255.0 + ); +} + +float decodeRE8(vec4 _re8) +{ + float exponent = _re8.w * 255.0 - 128.0; + return _re8.x * exp2(exponent); +} + +vec4 encodeRGBE8(vec3 _rgb) +{ + vec4 rgbe8; + float maxComponent = max(max(_rgb.x, _rgb.y), _rgb.z); + float exponent = ceil(log2(maxComponent) ); + rgbe8.xyz = _rgb / exp2(exponent); + rgbe8.w = (exponent + 128.0) / 255.0; + return rgbe8; +} + +vec3 decodeRGBE8(vec4 _rgbe8) +{ + float exponent = _rgbe8.w * 255.0 - 128.0; + vec3 rgb = _rgbe8.xyz * exp2(exponent); + return rgb; +} + +vec3 encodeNormalUint(vec3 _normal) +{ + return _normal * 0.5 + 0.5; +} + +vec3 decodeNormalUint(vec3 _encodedNormal) +{ + return _encodedNormal * 2.0 - 1.0; +} + +vec2 encodeNormalSphereMap(vec3 _normal) +{ + return normalize(_normal.xy) * sqrt(_normal.z * 0.5 + 0.5); +} + +vec3 decodeNormalSphereMap(vec2 _encodedNormal) +{ + float zz = dot(_encodedNormal, _encodedNormal) * 2.0 - 1.0; + return vec3(normalize(_encodedNormal.xy) * sqrt(1.0 - zz*zz), zz); +} + +vec2 octahedronWrap(vec2 _val) +{ + // Reference(s): + // - Octahedron normal vector encoding + // https://web.archive.org/web/20191027010600/https://knarkowicz.wordpress.com/2014/04/16/octahedron-normal-vector-encoding/comment-page-1/ + return (1.0 - abs(_val.yx) ) + * mix(vec2_splat(-1.0), vec2_splat(1.0), vec2(greaterThanEqual(_val.xy, vec2_splat(0.0) ) ) ); +} + +vec2 encodeNormalOctahedron(vec3 _normal) +{ + _normal /= abs(_normal.x) + abs(_normal.y) + abs(_normal.z); + _normal.xy = _normal.z >= 0.0 ? _normal.xy : octahedronWrap(_normal.xy); + _normal.xy = _normal.xy * 0.5 + 0.5; + return _normal.xy; +} + +vec3 decodeNormalOctahedron(vec2 _encodedNormal) +{ + _encodedNormal = _encodedNormal * 2.0 - 1.0; + + vec3 normal; + normal.z = 1.0 - abs(_encodedNormal.x) - abs(_encodedNormal.y); + normal.xy = normal.z >= 0.0 ? _encodedNormal.xy : octahedronWrap(_encodedNormal.xy); + return normalize(normal); +} + +vec3 convertRGB2XYZ(vec3 _rgb) +{ + // Reference(s): + // - RGB/XYZ Matrices + // https://web.archive.org/web/20191027010220/http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html + vec3 xyz; + xyz.x = dot(vec3(0.4124564, 0.3575761, 0.1804375), _rgb); + xyz.y = dot(vec3(0.2126729, 0.7151522, 0.0721750), _rgb); + xyz.z = dot(vec3(0.0193339, 0.1191920, 0.9503041), _rgb); + return xyz; +} + +vec3 convertXYZ2RGB(vec3 _xyz) +{ + vec3 rgb; + rgb.x = dot(vec3( 3.2404542, -1.5371385, -0.4985314), _xyz); + rgb.y = dot(vec3(-0.9692660, 1.8760108, 0.0415560), _xyz); + rgb.z = dot(vec3( 0.0556434, -0.2040259, 1.0572252), _xyz); + return rgb; +} + +vec3 convertXYZ2Yxy(vec3 _xyz) +{ + // Reference(s): + // - XYZ to xyY + // https://web.archive.org/web/20191027010144/http://www.brucelindbloom.com/index.html?Eqn_XYZ_to_xyY.html + float inv = 1.0/dot(_xyz, vec3(1.0, 1.0, 1.0) ); + return vec3(_xyz.y, _xyz.x*inv, _xyz.y*inv); +} + +vec3 convertYxy2XYZ(vec3 _Yxy) +{ + // Reference(s): + // - xyY to XYZ + // https://web.archive.org/web/20191027010036/http://www.brucelindbloom.com/index.html?Eqn_xyY_to_XYZ.html + vec3 xyz; + xyz.x = _Yxy.x*_Yxy.y/_Yxy.z; + xyz.y = _Yxy.x; + xyz.z = _Yxy.x*(1.0 - _Yxy.y - _Yxy.z)/_Yxy.z; + return xyz; +} + +vec3 convertRGB2Yxy(vec3 _rgb) +{ + return convertXYZ2Yxy(convertRGB2XYZ(_rgb) ); +} + +vec3 convertYxy2RGB(vec3 _Yxy) +{ + return convertXYZ2RGB(convertYxy2XYZ(_Yxy) ); +} + +vec3 convertRGB2Yuv(vec3 _rgb) +{ + vec3 yuv; + yuv.x = dot(_rgb, vec3(0.299, 0.587, 0.114) ); + yuv.y = (_rgb.x - yuv.x)*0.713 + 0.5; + yuv.z = (_rgb.z - yuv.x)*0.564 + 0.5; + return yuv; +} + +vec3 convertYuv2RGB(vec3 _yuv) +{ + vec3 rgb; + rgb.x = _yuv.x + 1.403*(_yuv.y-0.5); + rgb.y = _yuv.x - 0.344*(_yuv.y-0.5) - 0.714*(_yuv.z-0.5); + rgb.z = _yuv.x + 1.773*(_yuv.z-0.5); + return rgb; +} + +vec3 convertRGB2YIQ(vec3 _rgb) +{ + vec3 yiq; + yiq.x = dot(vec3(0.299, 0.587, 0.114 ), _rgb); + yiq.y = dot(vec3(0.595716, -0.274453, -0.321263), _rgb); + yiq.z = dot(vec3(0.211456, -0.522591, 0.311135), _rgb); + return yiq; +} + +vec3 convertYIQ2RGB(vec3 _yiq) +{ + vec3 rgb; + rgb.x = dot(vec3(1.0, 0.9563, 0.6210), _yiq); + rgb.y = dot(vec3(1.0, -0.2721, -0.6474), _yiq); + rgb.z = dot(vec3(1.0, -1.1070, 1.7046), _yiq); + return rgb; +} + +vec3 toLinear(vec3 _rgb) +{ + return pow(abs(_rgb), vec3_splat(2.2) ); +} + +vec4 toLinear(vec4 _rgba) +{ + return vec4(toLinear(_rgba.xyz), _rgba.w); +} + +vec3 toLinearAccurate(vec3 _rgb) +{ + vec3 lo = _rgb / 12.92; + vec3 hi = pow( (_rgb + 0.055) / 1.055, vec3_splat(2.4) ); + vec3 rgb = mix(hi, lo, vec3(lessThanEqual(_rgb, vec3_splat(0.04045) ) ) ); + return rgb; +} + +vec4 toLinearAccurate(vec4 _rgba) +{ + return vec4(toLinearAccurate(_rgba.xyz), _rgba.w); +} + +float toGamma(float _r) +{ + return pow(abs(_r), 1.0/2.2); +} + +vec3 toGamma(vec3 _rgb) +{ + return pow(abs(_rgb), vec3_splat(1.0/2.2) ); +} + +vec4 toGamma(vec4 _rgba) +{ + return vec4(toGamma(_rgba.xyz), _rgba.w); +} + +vec3 toGammaAccurate(vec3 _rgb) +{ + vec3 lo = _rgb * 12.92; + vec3 hi = pow(abs(_rgb), vec3_splat(1.0/2.4) ) * 1.055 - 0.055; + vec3 rgb = mix(hi, lo, vec3(lessThanEqual(_rgb, vec3_splat(0.0031308) ) ) ); + return rgb; +} + +vec4 toGammaAccurate(vec4 _rgba) +{ + return vec4(toGammaAccurate(_rgba.xyz), _rgba.w); +} + +vec3 toReinhard(vec3 _rgb) +{ + return toGamma(_rgb/(_rgb+vec3_splat(1.0) ) ); +} + +vec4 toReinhard(vec4 _rgba) +{ + return vec4(toReinhard(_rgba.xyz), _rgba.w); +} + +vec3 toFilmic(vec3 _rgb) +{ + _rgb = max(vec3_splat(0.0), _rgb - 0.004); + _rgb = (_rgb*(6.2*_rgb + 0.5) ) / (_rgb*(6.2*_rgb + 1.7) + 0.06); + return _rgb; +} + +vec4 toFilmic(vec4 _rgba) +{ + return vec4(toFilmic(_rgba.xyz), _rgba.w); +} + +vec3 toAcesFilmic(vec3 _rgb) +{ + // Reference(s): + // - ACES Filmic Tone Mapping Curve + // https://web.archive.org/web/20191027010704/https://knarkowicz.wordpress.com/2016/01/06/aces-filmic-tone-mapping-curve/ + float aa = 2.51f; + float bb = 0.03f; + float cc = 2.43f; + float dd = 0.59f; + float ee = 0.14f; + return saturate( (_rgb*(aa*_rgb + bb) )/(_rgb*(cc*_rgb + dd) + ee) ); +} + +vec4 toAcesFilmic(vec4 _rgba) +{ + return vec4(toAcesFilmic(_rgba.xyz), _rgba.w); +} + +vec3 luma(vec3 _rgb) +{ + float yy = dot(vec3(0.2126729, 0.7151522, 0.0721750), _rgb); + return vec3_splat(yy); +} + +vec4 luma(vec4 _rgba) +{ + return vec4(luma(_rgba.xyz), _rgba.w); +} + +vec3 conSatBri(vec3 _rgb, vec3 _csb) +{ + vec3 rgb = _rgb * _csb.z; + rgb = mix(luma(rgb), rgb, _csb.y); + rgb = mix(vec3_splat(0.5), rgb, _csb.x); + return rgb; +} + +vec4 conSatBri(vec4 _rgba, vec3 _csb) +{ + return vec4(conSatBri(_rgba.xyz, _csb), _rgba.w); +} + +vec3 posterize(vec3 _rgb, float _numColors) +{ + return floor(_rgb*_numColors) / _numColors; +} + +vec4 posterize(vec4 _rgba, float _numColors) +{ + return vec4(posterize(_rgba.xyz, _numColors), _rgba.w); +} + +vec3 sepia(vec3 _rgb) +{ + vec3 color; + color.x = dot(_rgb, vec3(0.393, 0.769, 0.189) ); + color.y = dot(_rgb, vec3(0.349, 0.686, 0.168) ); + color.z = dot(_rgb, vec3(0.272, 0.534, 0.131) ); + return color; +} + +vec4 sepia(vec4 _rgba) +{ + return vec4(sepia(_rgba.xyz), _rgba.w); +} + +vec3 blendOverlay(vec3 _base, vec3 _blend) +{ + vec3 lt = 2.0 * _base * _blend; + vec3 gte = 1.0 - 2.0 * (1.0 - _base) * (1.0 - _blend); + return mix(lt, gte, step(vec3_splat(0.5), _base) ); +} + +vec4 blendOverlay(vec4 _base, vec4 _blend) +{ + return vec4(blendOverlay(_base.xyz, _blend.xyz), _base.w); +} + +vec3 adjustHue(vec3 _rgb, float _hue) +{ + vec3 yiq = convertRGB2YIQ(_rgb); + float angle = _hue + atan2(yiq.z, yiq.y); + float len = length(yiq.yz); + return convertYIQ2RGB(vec3(yiq.x, len*cos(angle), len*sin(angle) ) ); +} + +vec4 packFloatToRgba(float _value) +{ + const vec4 shift = vec4(256 * 256 * 256, 256 * 256, 256, 1.0); + const vec4 mask = vec4(0, 1.0 / 256.0, 1.0 / 256.0, 1.0 / 256.0); + vec4 comp = fract(_value * shift); + comp -= comp.xxyz * mask; + return comp; +} + +float unpackRgbaToFloat(vec4 _rgba) +{ + const vec4 shift = vec4(1.0 / (256.0 * 256.0 * 256.0), 1.0 / (256.0 * 256.0), 1.0 / 256.0, 1.0); + return dot(_rgba, shift); +} + +vec2 packHalfFloat(float _value) +{ + const vec2 shift = vec2(256, 1.0); + const vec2 mask = vec2(0, 1.0 / 256.0); + vec2 comp = fract(_value * shift); + comp -= comp.xx * mask; + return comp; +} + +float unpackHalfFloat(vec2 _rg) +{ + const vec2 shift = vec2(1.0 / 256.0, 1.0); + return dot(_rg, shift); +} + +float random(vec2 _uv) +{ + return fract(sin(dot(_uv.xy, vec2(12.9898, 78.233) ) ) * 43758.5453); +} + +vec3 fixCubeLookup(vec3 _v, float _lod, float _topLevelCubeSize) +{ + // Reference(s): + // - Seamless cube-map filtering + // https://web.archive.org/web/20190411181934/http://the-witness.net/news/2012/02/seamless-cube-map-filtering/ + float ax = abs(_v.x); + float ay = abs(_v.y); + float az = abs(_v.z); + float vmax = max(max(ax, ay), az); + float scale = 1.0 - exp2(_lod) / _topLevelCubeSize; + if (ax != vmax) { _v.x *= scale; } + if (ay != vmax) { _v.y *= scale; } + if (az != vmax) { _v.z *= scale; } + return _v; +} + +vec2 texture2DBc5(sampler2D _sampler, vec2 _uv) +{ +#if BGFX_SHADER_LANGUAGE_HLSL && BGFX_SHADER_LANGUAGE_HLSL <= 300 + return texture2D(_sampler, _uv).yx; +#else + return texture2D(_sampler, _uv).xy; +#endif +} + +mat3 cofactor(mat4 _m) +{ + // Reference: + // Cofactor of matrix. Use to transform normals. The code assumes the last column of _m is [0,0,0,1]. + // https://www.shadertoy.com/view/3s33zj + // https://github.com/graphitemaster/normals_revisited + return mat3( + _m[1][1]*_m[2][2]-_m[1][2]*_m[2][1], + _m[1][2]*_m[2][0]-_m[1][0]*_m[2][2], + _m[1][0]*_m[2][1]-_m[1][1]*_m[2][0], + _m[0][2]*_m[2][1]-_m[0][1]*_m[2][2], + _m[0][0]*_m[2][2]-_m[0][2]*_m[2][0], + _m[0][1]*_m[2][0]-_m[0][0]*_m[2][1], + _m[0][1]*_m[1][2]-_m[0][2]*_m[1][1], + _m[0][2]*_m[1][0]-_m[0][0]*_m[1][2], + _m[0][0]*_m[1][1]-_m[0][1]*_m[1][0] + ); +} + +float toClipSpaceDepth(float _depthTextureZ) +{ +#if BGFX_SHADER_LANGUAGE_GLSL + return _depthTextureZ * 2.0 - 1.0; +#else + return _depthTextureZ; +#endif // BGFX_SHADER_LANGUAGE_GLSL +} + +vec3 clipToWorld(mat4 _invViewProj, vec3 _clipPos) +{ + vec4 wpos = mul(_invViewProj, vec4(_clipPos, 1.0) ); + return wpos.xyz / wpos.w; +} + +#endif // __SHADERLIB_SH__ diff --git a/shaders/varying_cubes.sc b/shaders/varying_cubes.sc new file mode 100644 index 0000000..9bfeea9 --- /dev/null +++ b/shaders/varying_cubes.sc @@ -0,0 +1,6 @@ +vec2 tex_coord : TEXCOORD0 = vec2(0.0, 0.0); +vec3 world_pos : TEXCOORD1 = vec3(0.0, 0.0, 0.0); +vec4 i_data0 : TEXCOORD7; +vec4 i_data1 : TEXCOORD6; +vec4 i_data2 : TEXCOORD5; +vec4 i_data3 : TEXCOORD4; \ No newline at end of file diff --git a/shaders/varying_lines.sc b/shaders/varying_lines.sc new file mode 100644 index 0000000..32f30d1 --- /dev/null +++ b/shaders/varying_lines.sc @@ -0,0 +1 @@ +vec3 a_position : POSITION; \ No newline at end of file diff --git a/shaders/verts.sh b/shaders/verts.sh new file mode 100644 index 0000000..00f34b9 --- /dev/null +++ b/shaders/verts.sh @@ -0,0 +1,732 @@ +const vec3 verts[729] = +{ +{-0.500000, -0.500000, -0.500000}, +{-0.375000, -0.500000, -0.500000}, +{-0.250000, -0.500000, -0.500000}, +{-0.125000, -0.500000, -0.500000}, +{0.000000, -0.500000, -0.500000}, +{0.125000, -0.500000, -0.500000}, +{0.250000, -0.500000, -0.500000}, +{0.375000, -0.500000, -0.500000}, +{0.500000, -0.500000, -0.500000}, +{-0.500000, -0.375000, -0.500000}, +{-0.375000, -0.375000, -0.500000}, +{-0.250000, -0.375000, -0.500000}, +{-0.125000, -0.375000, -0.500000}, +{0.000000, -0.375000, -0.500000}, +{0.125000, -0.375000, -0.500000}, +{0.250000, -0.375000, -0.500000}, +{0.375000, -0.375000, -0.500000}, +{0.500000, -0.375000, -0.500000}, +{-0.500000, -0.250000, -0.500000}, +{-0.375000, -0.250000, -0.500000}, +{-0.250000, -0.250000, -0.500000}, +{-0.125000, -0.250000, -0.500000}, +{0.000000, -0.250000, -0.500000}, +{0.125000, -0.250000, -0.500000}, +{0.250000, -0.250000, -0.500000}, +{0.375000, -0.250000, -0.500000}, +{0.500000, -0.250000, -0.500000}, +{-0.500000, -0.125000, -0.500000}, +{-0.375000, -0.125000, -0.500000}, +{-0.250000, -0.125000, -0.500000}, +{-0.125000, -0.125000, -0.500000}, +{0.000000, -0.125000, -0.500000}, +{0.125000, -0.125000, -0.500000}, +{0.250000, -0.125000, -0.500000}, +{0.375000, -0.125000, -0.500000}, +{0.500000, -0.125000, -0.500000}, +{-0.500000, 0.000000, -0.500000}, +{-0.375000, 0.000000, -0.500000}, +{-0.250000, 0.000000, -0.500000}, +{-0.125000, 0.000000, -0.500000}, +{0.000000, 0.000000, -0.500000}, +{0.125000, 0.000000, -0.500000}, +{0.250000, 0.000000, -0.500000}, +{0.375000, 0.000000, -0.500000}, +{0.500000, 0.000000, -0.500000}, +{-0.500000, 0.125000, -0.500000}, +{-0.375000, 0.125000, -0.500000}, +{-0.250000, 0.125000, -0.500000}, +{-0.125000, 0.125000, -0.500000}, +{0.000000, 0.125000, -0.500000}, +{0.125000, 0.125000, -0.500000}, +{0.250000, 0.125000, -0.500000}, +{0.375000, 0.125000, -0.500000}, +{0.500000, 0.125000, -0.500000}, +{-0.500000, 0.250000, -0.500000}, +{-0.375000, 0.250000, -0.500000}, +{-0.250000, 0.250000, -0.500000}, +{-0.125000, 0.250000, -0.500000}, +{0.000000, 0.250000, -0.500000}, +{0.125000, 0.250000, -0.500000}, +{0.250000, 0.250000, -0.500000}, +{0.375000, 0.250000, -0.500000}, +{0.500000, 0.250000, -0.500000}, +{-0.500000, 0.375000, -0.500000}, +{-0.375000, 0.375000, -0.500000}, +{-0.250000, 0.375000, -0.500000}, +{-0.125000, 0.375000, -0.500000}, +{0.000000, 0.375000, -0.500000}, +{0.125000, 0.375000, -0.500000}, +{0.250000, 0.375000, -0.500000}, +{0.375000, 0.375000, -0.500000}, +{0.500000, 0.375000, -0.500000}, +{-0.500000, 0.500000, -0.500000}, +{-0.375000, 0.500000, -0.500000}, +{-0.250000, 0.500000, -0.500000}, +{-0.125000, 0.500000, -0.500000}, +{0.000000, 0.500000, -0.500000}, +{0.125000, 0.500000, -0.500000}, +{0.250000, 0.500000, -0.500000}, +{0.375000, 0.500000, -0.500000}, +{0.500000, 0.500000, -0.500000}, +{-0.500000, -0.500000, -0.375000}, +{-0.375000, -0.500000, -0.375000}, +{-0.250000, -0.500000, -0.375000}, +{-0.125000, -0.500000, -0.375000}, +{0.000000, -0.500000, -0.375000}, +{0.125000, -0.500000, -0.375000}, +{0.250000, -0.500000, -0.375000}, +{0.375000, -0.500000, -0.375000}, +{0.500000, -0.500000, -0.375000}, +{-0.500000, -0.375000, -0.375000}, +{-0.375000, -0.375000, -0.375000}, +{-0.250000, -0.375000, -0.375000}, +{-0.125000, -0.375000, -0.375000}, +{0.000000, -0.375000, -0.375000}, +{0.125000, -0.375000, -0.375000}, +{0.250000, -0.375000, -0.375000}, +{0.375000, -0.375000, -0.375000}, +{0.500000, -0.375000, -0.375000}, +{-0.500000, -0.250000, -0.375000}, +{-0.375000, -0.250000, -0.375000}, +{-0.250000, -0.250000, -0.375000}, +{-0.125000, -0.250000, -0.375000}, +{0.000000, -0.250000, -0.375000}, +{0.125000, -0.250000, -0.375000}, +{0.250000, -0.250000, -0.375000}, +{0.375000, -0.250000, -0.375000}, +{0.500000, -0.250000, -0.375000}, +{-0.500000, -0.125000, -0.375000}, +{-0.375000, -0.125000, -0.375000}, +{-0.250000, -0.125000, -0.375000}, +{-0.125000, -0.125000, -0.375000}, +{0.000000, -0.125000, -0.375000}, +{0.125000, -0.125000, -0.375000}, +{0.250000, -0.125000, -0.375000}, +{0.375000, -0.125000, -0.375000}, +{0.500000, -0.125000, -0.375000}, +{-0.500000, 0.000000, -0.375000}, +{-0.375000, 0.000000, -0.375000}, +{-0.250000, 0.000000, -0.375000}, +{-0.125000, 0.000000, -0.375000}, +{0.000000, 0.000000, -0.375000}, +{0.125000, 0.000000, -0.375000}, +{0.250000, 0.000000, -0.375000}, +{0.375000, 0.000000, -0.375000}, +{0.500000, 0.000000, -0.375000}, +{-0.500000, 0.125000, -0.375000}, +{-0.375000, 0.125000, -0.375000}, +{-0.250000, 0.125000, -0.375000}, +{-0.125000, 0.125000, -0.375000}, +{0.000000, 0.125000, -0.375000}, +{0.125000, 0.125000, -0.375000}, +{0.250000, 0.125000, -0.375000}, +{0.375000, 0.125000, -0.375000}, +{0.500000, 0.125000, -0.375000}, +{-0.500000, 0.250000, -0.375000}, +{-0.375000, 0.250000, -0.375000}, +{-0.250000, 0.250000, -0.375000}, +{-0.125000, 0.250000, -0.375000}, +{0.000000, 0.250000, -0.375000}, +{0.125000, 0.250000, -0.375000}, +{0.250000, 0.250000, -0.375000}, +{0.375000, 0.250000, -0.375000}, +{0.500000, 0.250000, -0.375000}, +{-0.500000, 0.375000, -0.375000}, +{-0.375000, 0.375000, -0.375000}, +{-0.250000, 0.375000, -0.375000}, +{-0.125000, 0.375000, -0.375000}, +{0.000000, 0.375000, -0.375000}, +{0.125000, 0.375000, -0.375000}, +{0.250000, 0.375000, -0.375000}, +{0.375000, 0.375000, -0.375000}, +{0.500000, 0.375000, -0.375000}, +{-0.500000, 0.500000, -0.375000}, +{-0.375000, 0.500000, -0.375000}, +{-0.250000, 0.500000, -0.375000}, +{-0.125000, 0.500000, -0.375000}, +{0.000000, 0.500000, -0.375000}, +{0.125000, 0.500000, -0.375000}, +{0.250000, 0.500000, -0.375000}, +{0.375000, 0.500000, -0.375000}, +{0.500000, 0.500000, -0.375000}, +{-0.500000, -0.500000, -0.250000}, +{-0.375000, -0.500000, -0.250000}, +{-0.250000, -0.500000, -0.250000}, +{-0.125000, -0.500000, -0.250000}, +{0.000000, -0.500000, -0.250000}, +{0.125000, -0.500000, -0.250000}, +{0.250000, -0.500000, -0.250000}, +{0.375000, -0.500000, -0.250000}, +{0.500000, -0.500000, -0.250000}, +{-0.500000, -0.375000, -0.250000}, +{-0.375000, -0.375000, -0.250000}, +{-0.250000, -0.375000, -0.250000}, +{-0.125000, -0.375000, -0.250000}, +{0.000000, -0.375000, -0.250000}, +{0.125000, -0.375000, -0.250000}, +{0.250000, -0.375000, -0.250000}, +{0.375000, -0.375000, -0.250000}, +{0.500000, -0.375000, -0.250000}, +{-0.500000, -0.250000, -0.250000}, +{-0.375000, -0.250000, -0.250000}, +{-0.250000, -0.250000, -0.250000}, +{-0.125000, -0.250000, -0.250000}, +{0.000000, -0.250000, -0.250000}, +{0.125000, -0.250000, -0.250000}, +{0.250000, -0.250000, -0.250000}, +{0.375000, -0.250000, -0.250000}, +{0.500000, -0.250000, -0.250000}, +{-0.500000, -0.125000, -0.250000}, +{-0.375000, -0.125000, -0.250000}, +{-0.250000, -0.125000, -0.250000}, +{-0.125000, -0.125000, -0.250000}, +{0.000000, -0.125000, -0.250000}, +{0.125000, -0.125000, -0.250000}, +{0.250000, -0.125000, -0.250000}, +{0.375000, -0.125000, -0.250000}, +{0.500000, -0.125000, -0.250000}, +{-0.500000, 0.000000, -0.250000}, +{-0.375000, 0.000000, -0.250000}, +{-0.250000, 0.000000, -0.250000}, +{-0.125000, 0.000000, -0.250000}, +{0.000000, 0.000000, -0.250000}, +{0.125000, 0.000000, -0.250000}, +{0.250000, 0.000000, -0.250000}, +{0.375000, 0.000000, -0.250000}, +{0.500000, 0.000000, -0.250000}, +{-0.500000, 0.125000, -0.250000}, +{-0.375000, 0.125000, -0.250000}, +{-0.250000, 0.125000, -0.250000}, +{-0.125000, 0.125000, -0.250000}, +{0.000000, 0.125000, -0.250000}, +{0.125000, 0.125000, -0.250000}, +{0.250000, 0.125000, -0.250000}, +{0.375000, 0.125000, -0.250000}, +{0.500000, 0.125000, -0.250000}, +{-0.500000, 0.250000, -0.250000}, +{-0.375000, 0.250000, -0.250000}, +{-0.250000, 0.250000, -0.250000}, +{-0.125000, 0.250000, -0.250000}, +{0.000000, 0.250000, -0.250000}, +{0.125000, 0.250000, -0.250000}, +{0.250000, 0.250000, -0.250000}, +{0.375000, 0.250000, -0.250000}, +{0.500000, 0.250000, -0.250000}, +{-0.500000, 0.375000, -0.250000}, +{-0.375000, 0.375000, -0.250000}, +{-0.250000, 0.375000, -0.250000}, +{-0.125000, 0.375000, -0.250000}, +{0.000000, 0.375000, -0.250000}, +{0.125000, 0.375000, -0.250000}, +{0.250000, 0.375000, -0.250000}, +{0.375000, 0.375000, -0.250000}, +{0.500000, 0.375000, -0.250000}, +{-0.500000, 0.500000, -0.250000}, +{-0.375000, 0.500000, -0.250000}, +{-0.250000, 0.500000, -0.250000}, +{-0.125000, 0.500000, -0.250000}, +{0.000000, 0.500000, -0.250000}, +{0.125000, 0.500000, -0.250000}, +{0.250000, 0.500000, -0.250000}, +{0.375000, 0.500000, -0.250000}, +{0.500000, 0.500000, -0.250000}, +{-0.500000, -0.500000, -0.125000}, +{-0.375000, -0.500000, -0.125000}, +{-0.250000, -0.500000, -0.125000}, +{-0.125000, -0.500000, -0.125000}, +{0.000000, -0.500000, -0.125000}, +{0.125000, -0.500000, -0.125000}, +{0.250000, -0.500000, -0.125000}, +{0.375000, -0.500000, -0.125000}, +{0.500000, -0.500000, -0.125000}, +{-0.500000, -0.375000, -0.125000}, +{-0.375000, -0.375000, -0.125000}, +{-0.250000, -0.375000, -0.125000}, +{-0.125000, -0.375000, -0.125000}, +{0.000000, -0.375000, -0.125000}, +{0.125000, -0.375000, -0.125000}, +{0.250000, -0.375000, -0.125000}, +{0.375000, -0.375000, -0.125000}, +{0.500000, -0.375000, -0.125000}, +{-0.500000, -0.250000, -0.125000}, +{-0.375000, -0.250000, -0.125000}, +{-0.250000, -0.250000, -0.125000}, +{-0.125000, -0.250000, -0.125000}, +{0.000000, -0.250000, -0.125000}, +{0.125000, -0.250000, -0.125000}, +{0.250000, -0.250000, -0.125000}, +{0.375000, -0.250000, -0.125000}, +{0.500000, -0.250000, -0.125000}, +{-0.500000, -0.125000, -0.125000}, +{-0.375000, -0.125000, -0.125000}, +{-0.250000, -0.125000, -0.125000}, +{-0.125000, -0.125000, -0.125000}, +{0.000000, -0.125000, -0.125000}, +{0.125000, -0.125000, -0.125000}, +{0.250000, -0.125000, -0.125000}, +{0.375000, -0.125000, -0.125000}, +{0.500000, -0.125000, -0.125000}, +{-0.500000, 0.000000, -0.125000}, +{-0.375000, 0.000000, -0.125000}, +{-0.250000, 0.000000, -0.125000}, +{-0.125000, 0.000000, -0.125000}, +{0.000000, 0.000000, -0.125000}, +{0.125000, 0.000000, -0.125000}, +{0.250000, 0.000000, -0.125000}, +{0.375000, 0.000000, -0.125000}, +{0.500000, 0.000000, -0.125000}, +{-0.500000, 0.125000, -0.125000}, +{-0.375000, 0.125000, -0.125000}, +{-0.250000, 0.125000, -0.125000}, +{-0.125000, 0.125000, -0.125000}, +{0.000000, 0.125000, -0.125000}, +{0.125000, 0.125000, -0.125000}, +{0.250000, 0.125000, -0.125000}, +{0.375000, 0.125000, -0.125000}, +{0.500000, 0.125000, -0.125000}, +{-0.500000, 0.250000, -0.125000}, +{-0.375000, 0.250000, -0.125000}, +{-0.250000, 0.250000, -0.125000}, +{-0.125000, 0.250000, -0.125000}, +{0.000000, 0.250000, -0.125000}, +{0.125000, 0.250000, -0.125000}, +{0.250000, 0.250000, -0.125000}, +{0.375000, 0.250000, -0.125000}, +{0.500000, 0.250000, -0.125000}, +{-0.500000, 0.375000, -0.125000}, +{-0.375000, 0.375000, -0.125000}, +{-0.250000, 0.375000, -0.125000}, +{-0.125000, 0.375000, -0.125000}, +{0.000000, 0.375000, -0.125000}, +{0.125000, 0.375000, -0.125000}, +{0.250000, 0.375000, -0.125000}, +{0.375000, 0.375000, -0.125000}, +{0.500000, 0.375000, -0.125000}, +{-0.500000, 0.500000, -0.125000}, +{-0.375000, 0.500000, -0.125000}, +{-0.250000, 0.500000, -0.125000}, +{-0.125000, 0.500000, -0.125000}, +{0.000000, 0.500000, -0.125000}, +{0.125000, 0.500000, -0.125000}, +{0.250000, 0.500000, -0.125000}, +{0.375000, 0.500000, -0.125000}, +{0.500000, 0.500000, -0.125000}, +{-0.500000, -0.500000, 0.000000}, +{-0.375000, -0.500000, 0.000000}, +{-0.250000, -0.500000, 0.000000}, +{-0.125000, -0.500000, 0.000000}, +{0.000000, -0.500000, 0.000000}, +{0.125000, -0.500000, 0.000000}, +{0.250000, -0.500000, 0.000000}, +{0.375000, -0.500000, 0.000000}, +{0.500000, -0.500000, 0.000000}, +{-0.500000, -0.375000, 0.000000}, +{-0.375000, -0.375000, 0.000000}, +{-0.250000, -0.375000, 0.000000}, +{-0.125000, -0.375000, 0.000000}, +{0.000000, -0.375000, 0.000000}, +{0.125000, -0.375000, 0.000000}, +{0.250000, -0.375000, 0.000000}, +{0.375000, -0.375000, 0.000000}, +{0.500000, -0.375000, 0.000000}, +{-0.500000, -0.250000, 0.000000}, +{-0.375000, -0.250000, 0.000000}, +{-0.250000, -0.250000, 0.000000}, +{-0.125000, -0.250000, 0.000000}, +{0.000000, -0.250000, 0.000000}, +{0.125000, -0.250000, 0.000000}, +{0.250000, -0.250000, 0.000000}, +{0.375000, -0.250000, 0.000000}, +{0.500000, -0.250000, 0.000000}, +{-0.500000, -0.125000, 0.000000}, +{-0.375000, -0.125000, 0.000000}, +{-0.250000, -0.125000, 0.000000}, +{-0.125000, -0.125000, 0.000000}, +{0.000000, -0.125000, 0.000000}, +{0.125000, -0.125000, 0.000000}, +{0.250000, -0.125000, 0.000000}, +{0.375000, -0.125000, 0.000000}, +{0.500000, -0.125000, 0.000000}, +{-0.500000, 0.000000, 0.000000}, +{-0.375000, 0.000000, 0.000000}, +{-0.250000, 0.000000, 0.000000}, +{-0.125000, 0.000000, 0.000000}, +{0.000000, 0.000000, 0.000000}, +{0.125000, 0.000000, 0.000000}, +{0.250000, 0.000000, 0.000000}, +{0.375000, 0.000000, 0.000000}, +{0.500000, 0.000000, 0.000000}, +{-0.500000, 0.125000, 0.000000}, +{-0.375000, 0.125000, 0.000000}, +{-0.250000, 0.125000, 0.000000}, +{-0.125000, 0.125000, 0.000000}, +{0.000000, 0.125000, 0.000000}, +{0.125000, 0.125000, 0.000000}, +{0.250000, 0.125000, 0.000000}, +{0.375000, 0.125000, 0.000000}, +{0.500000, 0.125000, 0.000000}, +{-0.500000, 0.250000, 0.000000}, +{-0.375000, 0.250000, 0.000000}, +{-0.250000, 0.250000, 0.000000}, +{-0.125000, 0.250000, 0.000000}, +{0.000000, 0.250000, 0.000000}, +{0.125000, 0.250000, 0.000000}, +{0.250000, 0.250000, 0.000000}, +{0.375000, 0.250000, 0.000000}, +{0.500000, 0.250000, 0.000000}, +{-0.500000, 0.375000, 0.000000}, +{-0.375000, 0.375000, 0.000000}, +{-0.250000, 0.375000, 0.000000}, +{-0.125000, 0.375000, 0.000000}, +{0.000000, 0.375000, 0.000000}, +{0.125000, 0.375000, 0.000000}, +{0.250000, 0.375000, 0.000000}, +{0.375000, 0.375000, 0.000000}, +{0.500000, 0.375000, 0.000000}, +{-0.500000, 0.500000, 0.000000}, +{-0.375000, 0.500000, 0.000000}, +{-0.250000, 0.500000, 0.000000}, +{-0.125000, 0.500000, 0.000000}, +{0.000000, 0.500000, 0.000000}, +{0.125000, 0.500000, 0.000000}, +{0.250000, 0.500000, 0.000000}, +{0.375000, 0.500000, 0.000000}, +{0.500000, 0.500000, 0.000000}, +{-0.500000, -0.500000, 0.125000}, +{-0.375000, -0.500000, 0.125000}, +{-0.250000, -0.500000, 0.125000}, +{-0.125000, -0.500000, 0.125000}, +{0.000000, -0.500000, 0.125000}, +{0.125000, -0.500000, 0.125000}, +{0.250000, -0.500000, 0.125000}, +{0.375000, -0.500000, 0.125000}, +{0.500000, -0.500000, 0.125000}, +{-0.500000, -0.375000, 0.125000}, +{-0.375000, -0.375000, 0.125000}, +{-0.250000, -0.375000, 0.125000}, +{-0.125000, -0.375000, 0.125000}, +{0.000000, -0.375000, 0.125000}, +{0.125000, -0.375000, 0.125000}, +{0.250000, -0.375000, 0.125000}, +{0.375000, -0.375000, 0.125000}, +{0.500000, -0.375000, 0.125000}, +{-0.500000, -0.250000, 0.125000}, +{-0.375000, -0.250000, 0.125000}, +{-0.250000, -0.250000, 0.125000}, +{-0.125000, -0.250000, 0.125000}, +{0.000000, -0.250000, 0.125000}, +{0.125000, -0.250000, 0.125000}, +{0.250000, -0.250000, 0.125000}, +{0.375000, -0.250000, 0.125000}, +{0.500000, -0.250000, 0.125000}, +{-0.500000, -0.125000, 0.125000}, +{-0.375000, -0.125000, 0.125000}, +{-0.250000, -0.125000, 0.125000}, +{-0.125000, -0.125000, 0.125000}, +{0.000000, -0.125000, 0.125000}, +{0.125000, -0.125000, 0.125000}, +{0.250000, -0.125000, 0.125000}, +{0.375000, -0.125000, 0.125000}, +{0.500000, -0.125000, 0.125000}, +{-0.500000, 0.000000, 0.125000}, +{-0.375000, 0.000000, 0.125000}, +{-0.250000, 0.000000, 0.125000}, +{-0.125000, 0.000000, 0.125000}, +{0.000000, 0.000000, 0.125000}, +{0.125000, 0.000000, 0.125000}, +{0.250000, 0.000000, 0.125000}, +{0.375000, 0.000000, 0.125000}, +{0.500000, 0.000000, 0.125000}, +{-0.500000, 0.125000, 0.125000}, +{-0.375000, 0.125000, 0.125000}, +{-0.250000, 0.125000, 0.125000}, +{-0.125000, 0.125000, 0.125000}, +{0.000000, 0.125000, 0.125000}, +{0.125000, 0.125000, 0.125000}, +{0.250000, 0.125000, 0.125000}, +{0.375000, 0.125000, 0.125000}, +{0.500000, 0.125000, 0.125000}, +{-0.500000, 0.250000, 0.125000}, +{-0.375000, 0.250000, 0.125000}, +{-0.250000, 0.250000, 0.125000}, +{-0.125000, 0.250000, 0.125000}, +{0.000000, 0.250000, 0.125000}, +{0.125000, 0.250000, 0.125000}, +{0.250000, 0.250000, 0.125000}, +{0.375000, 0.250000, 0.125000}, +{0.500000, 0.250000, 0.125000}, +{-0.500000, 0.375000, 0.125000}, +{-0.375000, 0.375000, 0.125000}, +{-0.250000, 0.375000, 0.125000}, +{-0.125000, 0.375000, 0.125000}, +{0.000000, 0.375000, 0.125000}, +{0.125000, 0.375000, 0.125000}, +{0.250000, 0.375000, 0.125000}, +{0.375000, 0.375000, 0.125000}, +{0.500000, 0.375000, 0.125000}, +{-0.500000, 0.500000, 0.125000}, +{-0.375000, 0.500000, 0.125000}, +{-0.250000, 0.500000, 0.125000}, +{-0.125000, 0.500000, 0.125000}, +{0.000000, 0.500000, 0.125000}, +{0.125000, 0.500000, 0.125000}, +{0.250000, 0.500000, 0.125000}, +{0.375000, 0.500000, 0.125000}, +{0.500000, 0.500000, 0.125000}, +{-0.500000, -0.500000, 0.250000}, +{-0.375000, -0.500000, 0.250000}, +{-0.250000, -0.500000, 0.250000}, +{-0.125000, -0.500000, 0.250000}, +{0.000000, -0.500000, 0.250000}, +{0.125000, -0.500000, 0.250000}, +{0.250000, -0.500000, 0.250000}, +{0.375000, -0.500000, 0.250000}, +{0.500000, -0.500000, 0.250000}, +{-0.500000, -0.375000, 0.250000}, +{-0.375000, -0.375000, 0.250000}, +{-0.250000, -0.375000, 0.250000}, +{-0.125000, -0.375000, 0.250000}, +{0.000000, -0.375000, 0.250000}, +{0.125000, -0.375000, 0.250000}, +{0.250000, -0.375000, 0.250000}, +{0.375000, -0.375000, 0.250000}, +{0.500000, -0.375000, 0.250000}, +{-0.500000, -0.250000, 0.250000}, +{-0.375000, -0.250000, 0.250000}, +{-0.250000, -0.250000, 0.250000}, +{-0.125000, -0.250000, 0.250000}, +{0.000000, -0.250000, 0.250000}, +{0.125000, -0.250000, 0.250000}, +{0.250000, -0.250000, 0.250000}, +{0.375000, -0.250000, 0.250000}, +{0.500000, -0.250000, 0.250000}, +{-0.500000, -0.125000, 0.250000}, +{-0.375000, -0.125000, 0.250000}, +{-0.250000, -0.125000, 0.250000}, +{-0.125000, -0.125000, 0.250000}, +{0.000000, -0.125000, 0.250000}, +{0.125000, -0.125000, 0.250000}, +{0.250000, -0.125000, 0.250000}, +{0.375000, -0.125000, 0.250000}, +{0.500000, -0.125000, 0.250000}, +{-0.500000, 0.000000, 0.250000}, +{-0.375000, 0.000000, 0.250000}, +{-0.250000, 0.000000, 0.250000}, +{-0.125000, 0.000000, 0.250000}, +{0.000000, 0.000000, 0.250000}, +{0.125000, 0.000000, 0.250000}, +{0.250000, 0.000000, 0.250000}, +{0.375000, 0.000000, 0.250000}, +{0.500000, 0.000000, 0.250000}, +{-0.500000, 0.125000, 0.250000}, +{-0.375000, 0.125000, 0.250000}, +{-0.250000, 0.125000, 0.250000}, +{-0.125000, 0.125000, 0.250000}, +{0.000000, 0.125000, 0.250000}, +{0.125000, 0.125000, 0.250000}, +{0.250000, 0.125000, 0.250000}, +{0.375000, 0.125000, 0.250000}, +{0.500000, 0.125000, 0.250000}, +{-0.500000, 0.250000, 0.250000}, +{-0.375000, 0.250000, 0.250000}, +{-0.250000, 0.250000, 0.250000}, +{-0.125000, 0.250000, 0.250000}, +{0.000000, 0.250000, 0.250000}, +{0.125000, 0.250000, 0.250000}, +{0.250000, 0.250000, 0.250000}, +{0.375000, 0.250000, 0.250000}, +{0.500000, 0.250000, 0.250000}, +{-0.500000, 0.375000, 0.250000}, +{-0.375000, 0.375000, 0.250000}, +{-0.250000, 0.375000, 0.250000}, +{-0.125000, 0.375000, 0.250000}, +{0.000000, 0.375000, 0.250000}, +{0.125000, 0.375000, 0.250000}, +{0.250000, 0.375000, 0.250000}, +{0.375000, 0.375000, 0.250000}, +{0.500000, 0.375000, 0.250000}, +{-0.500000, 0.500000, 0.250000}, +{-0.375000, 0.500000, 0.250000}, +{-0.250000, 0.500000, 0.250000}, +{-0.125000, 0.500000, 0.250000}, +{0.000000, 0.500000, 0.250000}, +{0.125000, 0.500000, 0.250000}, +{0.250000, 0.500000, 0.250000}, +{0.375000, 0.500000, 0.250000}, +{0.500000, 0.500000, 0.250000}, +{-0.500000, -0.500000, 0.375000}, +{-0.375000, -0.500000, 0.375000}, +{-0.250000, -0.500000, 0.375000}, +{-0.125000, -0.500000, 0.375000}, +{0.000000, -0.500000, 0.375000}, +{0.125000, -0.500000, 0.375000}, +{0.250000, -0.500000, 0.375000}, +{0.375000, -0.500000, 0.375000}, +{0.500000, -0.500000, 0.375000}, +{-0.500000, -0.375000, 0.375000}, +{-0.375000, -0.375000, 0.375000}, +{-0.250000, -0.375000, 0.375000}, +{-0.125000, -0.375000, 0.375000}, +{0.000000, -0.375000, 0.375000}, +{0.125000, -0.375000, 0.375000}, +{0.250000, -0.375000, 0.375000}, +{0.375000, -0.375000, 0.375000}, +{0.500000, -0.375000, 0.375000}, +{-0.500000, -0.250000, 0.375000}, +{-0.375000, -0.250000, 0.375000}, +{-0.250000, -0.250000, 0.375000}, +{-0.125000, -0.250000, 0.375000}, +{0.000000, -0.250000, 0.375000}, +{0.125000, -0.250000, 0.375000}, +{0.250000, -0.250000, 0.375000}, +{0.375000, -0.250000, 0.375000}, +{0.500000, -0.250000, 0.375000}, +{-0.500000, -0.125000, 0.375000}, +{-0.375000, -0.125000, 0.375000}, +{-0.250000, -0.125000, 0.375000}, +{-0.125000, -0.125000, 0.375000}, +{0.000000, -0.125000, 0.375000}, +{0.125000, -0.125000, 0.375000}, +{0.250000, -0.125000, 0.375000}, +{0.375000, -0.125000, 0.375000}, +{0.500000, -0.125000, 0.375000}, +{-0.500000, 0.000000, 0.375000}, +{-0.375000, 0.000000, 0.375000}, +{-0.250000, 0.000000, 0.375000}, +{-0.125000, 0.000000, 0.375000}, +{0.000000, 0.000000, 0.375000}, +{0.125000, 0.000000, 0.375000}, +{0.250000, 0.000000, 0.375000}, +{0.375000, 0.000000, 0.375000}, +{0.500000, 0.000000, 0.375000}, +{-0.500000, 0.125000, 0.375000}, +{-0.375000, 0.125000, 0.375000}, +{-0.250000, 0.125000, 0.375000}, +{-0.125000, 0.125000, 0.375000}, +{0.000000, 0.125000, 0.375000}, +{0.125000, 0.125000, 0.375000}, +{0.250000, 0.125000, 0.375000}, +{0.375000, 0.125000, 0.375000}, +{0.500000, 0.125000, 0.375000}, +{-0.500000, 0.250000, 0.375000}, +{-0.375000, 0.250000, 0.375000}, +{-0.250000, 0.250000, 0.375000}, +{-0.125000, 0.250000, 0.375000}, +{0.000000, 0.250000, 0.375000}, +{0.125000, 0.250000, 0.375000}, +{0.250000, 0.250000, 0.375000}, +{0.375000, 0.250000, 0.375000}, +{0.500000, 0.250000, 0.375000}, +{-0.500000, 0.375000, 0.375000}, +{-0.375000, 0.375000, 0.375000}, +{-0.250000, 0.375000, 0.375000}, +{-0.125000, 0.375000, 0.375000}, +{0.000000, 0.375000, 0.375000}, +{0.125000, 0.375000, 0.375000}, +{0.250000, 0.375000, 0.375000}, +{0.375000, 0.375000, 0.375000}, +{0.500000, 0.375000, 0.375000}, +{-0.500000, 0.500000, 0.375000}, +{-0.375000, 0.500000, 0.375000}, +{-0.250000, 0.500000, 0.375000}, +{-0.125000, 0.500000, 0.375000}, +{0.000000, 0.500000, 0.375000}, +{0.125000, 0.500000, 0.375000}, +{0.250000, 0.500000, 0.375000}, +{0.375000, 0.500000, 0.375000}, +{0.500000, 0.500000, 0.375000}, +{-0.500000, -0.500000, 0.500000}, +{-0.375000, -0.500000, 0.500000}, +{-0.250000, -0.500000, 0.500000}, +{-0.125000, -0.500000, 0.500000}, +{0.000000, -0.500000, 0.500000}, +{0.125000, -0.500000, 0.500000}, +{0.250000, -0.500000, 0.500000}, +{0.375000, -0.500000, 0.500000}, +{0.500000, -0.500000, 0.500000}, +{-0.500000, -0.375000, 0.500000}, +{-0.375000, -0.375000, 0.500000}, +{-0.250000, -0.375000, 0.500000}, +{-0.125000, -0.375000, 0.500000}, +{0.000000, -0.375000, 0.500000}, +{0.125000, -0.375000, 0.500000}, +{0.250000, -0.375000, 0.500000}, +{0.375000, -0.375000, 0.500000}, +{0.500000, -0.375000, 0.500000}, +{-0.500000, -0.250000, 0.500000}, +{-0.375000, -0.250000, 0.500000}, +{-0.250000, -0.250000, 0.500000}, +{-0.125000, -0.250000, 0.500000}, +{0.000000, -0.250000, 0.500000}, +{0.125000, -0.250000, 0.500000}, +{0.250000, -0.250000, 0.500000}, +{0.375000, -0.250000, 0.500000}, +{0.500000, -0.250000, 0.500000}, +{-0.500000, -0.125000, 0.500000}, +{-0.375000, -0.125000, 0.500000}, +{-0.250000, -0.125000, 0.500000}, +{-0.125000, -0.125000, 0.500000}, +{0.000000, -0.125000, 0.500000}, +{0.125000, -0.125000, 0.500000}, +{0.250000, -0.125000, 0.500000}, +{0.375000, -0.125000, 0.500000}, +{0.500000, -0.125000, 0.500000}, +{-0.500000, 0.000000, 0.500000}, +{-0.375000, 0.000000, 0.500000}, +{-0.250000, 0.000000, 0.500000}, +{-0.125000, 0.000000, 0.500000}, +{0.000000, 0.000000, 0.500000}, +{0.125000, 0.000000, 0.500000}, +{0.250000, 0.000000, 0.500000}, +{0.375000, 0.000000, 0.500000}, +{0.500000, 0.000000, 0.500000}, +{-0.500000, 0.125000, 0.500000}, +{-0.375000, 0.125000, 0.500000}, +{-0.250000, 0.125000, 0.500000}, +{-0.125000, 0.125000, 0.500000}, +{0.000000, 0.125000, 0.500000}, +{0.125000, 0.125000, 0.500000}, +{0.250000, 0.125000, 0.500000}, +{0.375000, 0.125000, 0.500000}, +{0.500000, 0.125000, 0.500000}, +{-0.500000, 0.250000, 0.500000}, +{-0.375000, 0.250000, 0.500000}, +{-0.250000, 0.250000, 0.500000}, +{-0.125000, 0.250000, 0.500000}, +{0.000000, 0.250000, 0.500000}, +{0.125000, 0.250000, 0.500000}, +{0.250000, 0.250000, 0.500000}, +{0.375000, 0.250000, 0.500000}, +{0.500000, 0.250000, 0.500000}, +{-0.500000, 0.375000, 0.500000}, +{-0.375000, 0.375000, 0.500000}, +{-0.250000, 0.375000, 0.500000}, +{-0.125000, 0.375000, 0.500000}, +{0.000000, 0.375000, 0.500000}, +{0.125000, 0.375000, 0.500000}, +{0.250000, 0.375000, 0.500000}, +{0.375000, 0.375000, 0.500000}, +{0.500000, 0.375000, 0.500000}, +{-0.500000, 0.500000, 0.500000}, +{-0.375000, 0.500000, 0.500000}, +{-0.250000, 0.500000, 0.500000}, +{-0.125000, 0.500000, 0.500000}, +{0.000000, 0.500000, 0.500000}, +{0.125000, 0.500000, 0.500000}, +{0.250000, 0.500000, 0.500000}, +{0.375000, 0.500000, 0.500000}, +{0.500000, 0.500000, 0.500000}, +}; diff --git a/shaders/vs_cubes.sc b/shaders/vs_cubes.sc new file mode 100644 index 0000000..9b31a30 --- /dev/null +++ b/shaders/vs_cubes.sc @@ -0,0 +1,97 @@ +$input i_data0, i_data1, i_data2, i_data3 +$output tex_coord, world_pos + +#include "verts.sh" +#include "bgfx_shader.sh" +#include "shaderlib.sh" + +// ATTENTION must match config.h +// VertexID Layout for texturing (not used) +// Bits: 19 | 3 | 10 +// Data: texture index | texture corner | vertex index + +const uint vertex_index_bits = 10; +const uint vertex_index_mask = (1 << vertex_index_bits) - 1; +const uint texture_corner_bits = 3; +const uint texture_corner_mask = (1 << texture_corner_bits) - 1; +const uint texture_index_bits = 19; +const uint texture_index_mask = (1 << texture_index_bits) - 1; + +const uint TA_WIDTH = 2; // patches in atlas +const uint TA_HEIGHT = 2; // patches in atlas +const uint TA_MAIN_PATCH_SIZE = 8; // textures per patch +const uint TA_TEXTURE_SIZE = 4; // texels per texture +const uint TA_TEXTURES_PER_ROW = TA_WIDTH * TA_MAIN_PATCH_SIZE; +const uint TA_TEXTURES_PER_COL = TA_HEIGHT * TA_MAIN_PATCH_SIZE; + +const float texel_stride_x = 1.0f / (TA_WIDTH * TA_MAIN_PATCH_SIZE * TA_TEXTURE_SIZE); +const float texel_stride_y = 1.0f / (TA_HEIGHT * TA_MAIN_PATCH_SIZE * TA_TEXTURE_SIZE); +const float texture_stride_x = 1.0f / TA_TEXTURES_PER_ROW; +const float texture_stride_y = 1.0f / TA_TEXTURES_PER_ROW; + +const vec2 corner_offsets[8] = +{ +{ 0.000000, 0.000000}, // TopLeft +{ 0.000000, 2*texel_stride_y}, // CenterLeft +{ 0.000000, 4*texel_stride_y}, // BotLeft +{ 2*texel_stride_x, 4*texel_stride_y}, // CenterBot +{ 4*texel_stride_x, 4*texel_stride_y}, // BotRight +{ 4*texel_stride_x, 2*texel_stride_y}, // CenterRight +{ 4*texel_stride_x, 0.000000}, // TopRight +{ 2*texel_stride_x, 0.000000}, // CenterTop +}; + +// Texturing rework V2 +const uint IA_WIDTH = 2*8; // id's per row +const uint IA_HEIGHT = 2*8; // id's per column +const float ID_STRIDE_X = 1.0f / IA_WIDTH; +const float ID_STRIDE_Y = 1.0f / IA_HEIGHT; + +const vec2 ID_CORNER_OFFSETS[8] = +{ +{ 0.0, 0.0}, // TopLeft +{ 0.0, 0.5*ID_STRIDE_Y}, // CenterLeft +{ 0.0, ID_STRIDE_Y}, // BotLeft +{ 0.5*ID_STRIDE_X, ID_STRIDE_Y}, // CenterBot +{ ID_STRIDE_X, ID_STRIDE_Y}, // BotRight +{ ID_STRIDE_X, 0.5*ID_STRIDE_Y}, // CenterRight +{ ID_STRIDE_X, 0.0}, // TopRight +{ 0.5*ID_STRIDE_X, 0.0}, // CenterTop +}; + +void main() +{ + uint index = uint(gl_VertexID); // gl_VertexIndex ?? + + // Calculate vertex coordinates + uint vert_index = index & vertex_index_mask; + mat4 model_mtx = mtxFromCols(i_data0, i_data1, i_data2, i_data3); + vec4 worldPos = mul(model_mtx, vec4(verts[vert_index], 1.0)); + gl_Position = mul(u_viewProj, worldPos); + + world_pos = (worldPos.xyz); // pass world pos to fs + + // // Calculate texture coordinates + // uint texture_corner = (index >> vertex_index_bits) & texture_corner_mask; + // uint texture_index = (index >> (texture_corner_bits + vertex_index_bits)) & texture_index_mask; + + // uint texture_x = texture_index % TA_TEXTURES_PER_ROW; + // uint texture_y = texture_index / TA_TEXTURES_PER_ROW; + + // tex_coord = vec2(texture_x * texture_stride_x, texture_y * texture_stride_y); + // tex_coord = tex_coord + corner_offsets[texture_corner]; + + // Texturing rework V2 + // split index bits + uint texture_corner = (index >> vertex_index_bits) & texture_corner_mask; + uint id_index = (index >> (texture_corner_bits + vertex_index_bits)) & texture_index_mask; + + // Calc id_coords (in id_atlas, for sampling [tex_id,light_id]) + // in [0;ID_ATLAS_SIZE] + uint id_x = id_index % IA_WIDTH; + uint id_y = id_index / IA_HEIGHT; + + // TODO rename tex_coord->id_coord + tex_coord = vec2(id_x * ID_STRIDE_X, id_y * ID_STRIDE_Y); + tex_coord = tex_coord + ID_CORNER_OFFSETS[texture_corner]; +} diff --git a/shaders/vs_lines.sc b/shaders/vs_lines.sc new file mode 100644 index 0000000..68a9e0d --- /dev/null +++ b/shaders/vs_lines.sc @@ -0,0 +1,9 @@ +$input a_position + +#include "bgfx_shader.sh" +#include "shaderlib.sh" + +void main() +{ + gl_Position = mul(u_modelViewProj, vec4(a_position, 1.0) ); +} diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt new file mode 100644 index 0000000..a3ca4ec --- /dev/null +++ b/src/CMakeLists.txt @@ -0,0 +1,74 @@ + +add_executable(spacegame + "factory.cpp" + "gameloop.cpp" + "graphics.cpp" + "lib/camera.cpp" + "lib/input.cpp" + "lib/stb_image.cpp" + "main.cpp" + "net/debug_server.cpp" + "net/network.cpp" + "renderer.cpp" + "space_input.cpp" + "space_math.cpp" + "util.cpp" + "world.cpp" + +) + +if(SPACEGAME_BUILD_SHADERS) + add_dependencies(spacegame shaders) +endif() + +set_target_properties(spacegame PROPERTIES + OUTPUT_NAME spacegame + CXX_STANDARD 20 + CXX_STANDARD_REQUIRED YES + CXX_EXTENSIONS NO + ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin/x86_64/$ + LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin/x86_64/$ + RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin/x86_64/$ +) + +target_compile_definitions(spacegame PRIVATE + $<$:_DEBUG> +) + +# -m64 - Build for a 64-bit machine +# -fno-exceptions - Disable exceptions (throw exception; => abort()) +# -fno-rtti - Disable runtime type information (no reflection) +target_compile_options(spacegame PRIVATE + -m64 + -fno-exceptions + -fno-rtti +# ${SPACEGAME_COMPILER_WARNINGS} +) + +target_include_directories(spacegame PRIVATE + ${PROJECT_SOURCE_DIR}/src + ${PROJECT_SOURCE_DIR}/src/data + ${PROJECT_SOURCE_DIR}/src/lib + ${PROJECT_SOURCE_DIR}/src/net + ${PROJECT_SOURCE_DIR}/3rdparty/stb +) + +target_link_libraries(spacegame PRIVATE + glfw + bgfx::bx + bgfx::bimg + ${BGFX} + Threads::Threads +) + + +################## release ################### + +# Strip binary for release builds +if(CMAKE_BUILD_TYPE STREQUAL "Release" OR CMAKE_BUILD_TYPE STREQUAL "MinSizeRel") + add_custom_command(TARGET spacegame POST_BUILD + COMMAND echo "Stripping game executable" + COMMAND ${CMAKE_STRIP} --strip-all $ + VERBATIM + ) +endif() diff --git a/src/config.h b/src/config.h new file mode 100644 index 0000000..61a7298 --- /dev/null +++ b/src/config.h @@ -0,0 +1,72 @@ +#pragma once + +#include + +/// Constants are used in many places +/// For ease of use we collect them all here + +// TODO find reasonable numbers for these (take real use case measurements) +// how often do buffers need to grow? Are the initial values to small? too big? + +namespace config +{ + constexpr uint32_t MINIMAL_WINDOW_WIDTH = 800; + constexpr uint32_t MINIMAL_WINDOW_HEIGHT = 450; + + enum LogLevel { + LOG_TRACE, + LOG_DEBUG, + LOG_INFO, + LOG_WARNING, + LOG_ERROR, + }; +#ifdef NDEBUG + // in release mode + static LogLevel LOG_LEVEL = LOG_INFO; +#else + // in debug mode + static LogLevel LOG_LEVEL = LOG_DEBUG; +#endif + + constexpr uint32_t INITIAL_NUM_GRIDS = 64; + constexpr uint32_t INITIAL_NUM_CHUNKS = 1024; + constexpr uint32_t INITIAL_NUM_BLOCKS = 1024 * 8; // all blocks in view distance at a time + constexpr uint32_t INITIAL_NUM_COMPONENTS = 16 * 24; // components are generated in all 24 orientation + constexpr uint32_t INITIAL_NUM_COMPONENT_TEXTURES = INITIAL_NUM_COMPONENTS * 6; + constexpr uint32_t INITIAL_NUM_BLOCK_MODELS = 256; + constexpr uint32_t AVERAGE_BLOCK_MODEL_SIZE = 64; // average number of indices in a block model + constexpr uint32_t INITIAL_BLOCK_MODEL_BUFFER_SIZE = INITIAL_NUM_BLOCK_MODELS * AVERAGE_BLOCK_MODEL_SIZE; // number of uint32_t indices + constexpr uint32_t INITIAL_BLOCK_SELECTION = 1024; // per frame visible and rendered blocks + constexpr uint32_t INITIAL_LINE_BUFFER_SIZE = INITIAL_BLOCK_MODEL_BUFFER_SIZE * 2; // *2 since a line consists of 2 vertices + + /* Texture atlas */ + // Vulkan spec 1.3 defines a minimum of 4096 for device cap maxImageDimension2D + // TODO actually query device caps + + // ATTENTION must match shader + const uint32_t TA_WIDTH = 2; // patches in atlas + const uint32_t TA_HEIGHT = 2; // patches in atlas + const uint32_t TA_MAIN_PATCH_SIZE = 8; // textures per patch + const uint32_t TA_TEXTURE_SIZE = 4; // texel width per texture (aka how many texels per component) + constexpr uint32_t TA_INITIAL_NUMBER_PATCHES = 512; // TODO find reasonable default + + const uint32_t TA_TEXTURES_PER_ROW = TA_WIDTH * TA_MAIN_PATCH_SIZE; // PERF should be power of 2 // per texture row + const uint32_t TA_TEXELS_PER_ROW = TA_TEXTURES_PER_ROW * TA_TEXTURE_SIZE; // per texel row + const uint32_t TA_TEXELS_PER_TEXTURE = TA_TEXTURE_SIZE * TA_TEXTURE_SIZE; + const uint32_t TA_TOTAL_TEXTURES = TA_WIDTH * TA_HEIGHT * TA_MAIN_PATCH_SIZE * TA_MAIN_PATCH_SIZE; + const uint32_t TA_TOTAL_TEXELS = TA_TOTAL_TEXTURES * TA_TEXELS_PER_TEXTURE; + + // at least 8, since a full block must fit (and blocks have 8x8 components) + static_assert(TA_MAIN_PATCH_SIZE >= 8); + // shader can only address 2^19 textures + static_assert(TA_WIDTH * TA_HEIGHT * TA_MAIN_PATCH_SIZE * TA_MAIN_PATCH_SIZE < (1 << 19)); + + static uint64_t DEBUG_EXIT_AFTER_FRAME = 0; // set to 0 to disable + static bool DEBUG_SAVE_TEXTURE_ATLAS_TO_PNG = false; + static bool DEBUG_TRACE_LOG_FOREVER_PATCHES = true; + static bool DEBUG_TRACE_LOG_FIRST_FIT_BUFFER = false; + static bool DEBUG_TRACE_LOG_LINEAR_BUFFER = false; + static bool DEBUG_TRACE_LOG_SLOT_BUFFER = false; + static bool DEBUG_TRACE_LOG_SLOT_LIST = false; + +} diff --git a/src/data/chunk_storage.h b/src/data/chunk_storage.h new file mode 100644 index 0000000..9b392e1 --- /dev/null +++ b/src/data/chunk_storage.h @@ -0,0 +1,37 @@ +/*#pragma once + +#include "data/offset_vector.h" +#include + +class ChunkStorage +{ +private: + OffsetVector>> data; + +public: + ChunkStorage() = default; + ~ChunkStorage() = default; + + void insert(const uint32_t _x, const uint32_t _y, const uint32_t _z, const uint32_t _element) + { + logErr("ChunkStorage.insert is not implemented\n"); + } + + uint32_t at(const uint32_t _x, const uint32_t _y, const uint32_t _z) + { + logErr("ChunkStorage.at is not implemented\n"); + return 0; + } + + void remove(const uint32_t _x, const uint32_t _y, const uint32_t _z) + { + logErr("ChunkStorage.remove is not implemented\n"); + } + + void clean() + { + logErr("ChunkStorage.clean is not implemented\n"); + // TODO shrink all offset vectors + } +}; +*/ \ No newline at end of file diff --git a/src/data/chunk_storage_old.h b/src/data/chunk_storage_old.h new file mode 100644 index 0000000..567adc3 --- /dev/null +++ b/src/data/chunk_storage_old.h @@ -0,0 +1,59 @@ +#pragma once + +#include "data/offset_vector.h" +#include + +class ChunkStorage +{ +private: + OffsetVector>> data; + +public: + ChunkStorage() = default; + ~ChunkStorage() = default; + + void insert(const uint32_t _x, const uint32_t _y, const uint32_t _z, const uint32_t _element) + { + data.reserve_at(_x).reserve_at(_y).reserve_at(_z) = _element; + } + + uint32_t at(const uint32_t _x, const uint32_t _y, const uint32_t _z) + { + return data.get_at(_x).get_at(_y).get_at(_z); + } + + void remove(const uint32_t _x, const uint32_t _y, const uint32_t _z) + { + auto& x = data; + auto& y = x.get_at(_x); + auto& z = y.get_at(_y); + + z.remove_at(_z); + if (z.size() == 0) + { + y.remove_at(_y); + if (y.size() == 0) + { + x.remove_at(_x); + } + } + + } + + void clean() + { + logErr("ChunkStorage.clean is not implemented\n"); + // TODO shrink all offset vectors + } +}; + + +// has to be some kind of 3D array +// only stores uint32_t + +// chunks need to be accessible by coordinates (x,y,z) +// should leave slots temporarily available +// only grow, do not shrink on remove +// only shrink on occasional clean() +// has to support negative offsets + diff --git a/src/data/first_fit_buffer.h b/src/data/first_fit_buffer.h new file mode 100644 index 0000000..8f9aee0 --- /dev/null +++ b/src/data/first_fit_buffer.h @@ -0,0 +1,202 @@ +#pragma once + +#include +#include +#include "util.h" +#include "config.h" + +/// Provides first fit memory allocation +/// On demand defragmentation (if no more memory available) +/// Auto growing (if no more memory, even after defragmentation) +/// Auto updating of block references + +template +class FirstFitBuffer +{ +private: + const uint32_t OFFSET_FREE_MARKER = UINT32_C(1 << 31); + const uint32_t DEFRAGMENTATION_THRESHOLD = UINT32_MAX; // UINT32_C(1); // TODO find reasonable number, percentage? + std::vector buffer; + std::vector offsets; // holds all offsets, incl. the next free one + uint32_t num_free_elements; // removed sections that can be reclaimed by defragmentation + + uint32_t modified_from; // inclusive + uint32_t modified_to; // !! exclusive + + void defrag() + { + if (config::DEBUG_TRACE_LOG_FIRST_FIT_BUFFER) + logTrace("first_fit_buffer running defragmentation\n"); + std::vector ref_mapping(offsets.size(), UINT32_MAX); + + uint32_t src_offset = 0; + uint32_t dst_offset = 0; + uint32_t num_elements = 0; + + uint32_t offset = 0; + uint32_t next_offset = 0; + uint32_t section_size = 0; + + uint32_t last_ref = 0; + + // walk offsets + // on normal offset, increase num elements to copy + // on free, actually copy data + + for (uint32_t ref = 0; ref < offsets.size() - 1; ref++) + { + offset = offsets[ref] & ~OFFSET_FREE_MARKER; + next_offset = offsets[(size_t)ref + 1] & ~OFFSET_FREE_MARKER; + + if (offsets[ref] & OFFSET_FREE_MARKER) + { // we hit a free section => copy anything up to this point + std::memmove( + (void*)(buffer.data() + dst_offset), + (void*)(buffer.data() + src_offset), + sizeof(T) * num_elements); + + src_offset = next_offset; + dst_offset += num_elements; + num_elements = 0; + } + else + { // is an in-use section => just add its size for next copy + section_size = next_offset - offset; // num elements at this offset + num_elements += section_size; + + ref_mapping[ref /*orig ref*/] = last_ref /*new ref*/; + + // update offsets + offsets[last_ref] = offset - (src_offset - dst_offset); + last_ref++; + } + + } + + // copy last section (if any) + std::memmove( + (void*)(buffer.data() + dst_offset), + (void*)(buffer.data() + src_offset), + sizeof(T) * num_elements); + + // shrink offsets + offsets[last_ref] = offsets[(size_t)last_ref - 1] + section_size; + last_ref++; + offsets.resize(last_ref); + + num_free_elements = 0; + + modified_from = 0; + modified_to = offsets.back(); + + // At this point things have changed: + // buffer: data is now at difference offsets (though size remains the same) + // offsets: the ref into the offsets have changed (=> mapping) + + // TODO update + // walk all world_blocks + // update their referenced blocks offsets + // update their gfx_ref with the mapping + logErr("first_fit_buffer defragmentation: mapping update not implemented\n"); + } + +public: + FirstFitBuffer(const uint32_t _initial_size) : + buffer(_initial_size), offsets(0), num_free_elements(0), + modified_from(UINT32_MAX), modified_to(0) + { + // add first free offset + offsets.push_back(0); + } + + ~FirstFitBuffer() = default; + + // Returns reference NOT offset into buffer + uint32_t add(const T* _data, const uint32_t _num_elements) + { + uint32_t offset = offsets.back(); + if (offset + _num_elements > (uint32_t)buffer.size()) + { // not enough space + + // defragmentation worth it? + uint32_t total_free_elements = num_free_elements + ((uint32_t)buffer.size() - offsets.back()); + if (total_free_elements >= _num_elements && + total_free_elements >= DEFRAGMENTATION_THRESHOLD) + { + defrag(); + // we know that there is enough space now + return add(_data, _num_elements); // tail recursive call + } + else + { // defrag not sufficient or not worth it => grow the buffer + if (config::DEBUG_TRACE_LOG_FIRST_FIT_BUFFER) + logTrace("first_fit_buffer resizing from %d to %d\n", buffer.size(), buffer.size() * 2); + // notify caller about change in size + modified_from = (uint32_t)buffer.size(); + buffer.resize(2 * buffer.size()); + modified_to = (uint32_t)buffer.size(); + // we know that there is enough space now + return add(_data, _num_elements); // tail recursive call + } + } + + // copy data + std::memcpy(buffer.data() + offset, _data, sizeof(T) * _num_elements); + + modified_from = std::min(modified_from, offset); + + // add next offset + offsets.push_back(offset + _num_elements); + + modified_to = std::max(modified_to, offsets.back()); + + return (uint32_t)offsets.size() - 2; + } + + // Attention: new data MUST be of equal _num_elements as original + // Returns given _ref + uint32_t update(const uint32_t _ref, const T* _data) + { + uint32_t offset, num_elements; + get_offset(_ref, offset, num_elements); + + // copy data + std::memcpy(buffer.data() + offset, _data, sizeof(T) * num_elements); + + modified_from = std::min(modified_from, offset); + modified_to = std::max(modified_to, offset + num_elements); + + return _ref; + } + + void remove(const uint32_t _ref) + { + num_free_elements += ((offsets[(size_t)_ref + 1] & ~OFFSET_FREE_MARKER) - offsets[_ref]); + // just mark offset as free + offsets[_ref] |= OFFSET_FREE_MARKER; + } + + void get_offset(const uint32_t _ref, uint32_t& _out_offset, uint32_t& _out_num_elements) + { + _out_offset = offsets[_ref]; + _out_num_elements = offsets[(size_t)_ref + 1] - _out_offset; + } + + bool wasModified(uint32_t& _out_offset, uint32_t& _out_num_elements) + { + _out_offset = modified_from; + _out_num_elements = modified_to - modified_from; + return modified_to > modified_from; + } + + void clearModified() + { + modified_from = UINT32_MAX; + modified_to = 0; + } + + T* data() + { + return buffer.data(); + } +}; \ No newline at end of file diff --git a/src/data/linear_buffer.h b/src/data/linear_buffer.h new file mode 100644 index 0000000..c98685b --- /dev/null +++ b/src/data/linear_buffer.h @@ -0,0 +1,93 @@ +#pragma once + +#include +#include "config.h" + +template +class LinearBuffer // aka transient buffer +{ +private: + std::vector buffer; + uint16_t num_elements; // bgfx only allows up to 16-bit number of elements + uint16_t modified_size; + bool modified; + bool resized; + +public: + LinearBuffer(const uint32_t _initial_size) : + buffer(_initial_size), num_elements(0), modified_size(0), modified(false), resized(false) + { } + + ~LinearBuffer() = default; + + // TODO support multi-threading + // Reserves a number of elements, that MUST be filled (use update(..)) + // Returns an offset to be passed to update(..) + uint32_t reserve(uint32_t _num_slots) + { + modified = true; + uint32_t offset = num_elements; + + // sanity check + if ((uint32_t)num_elements + _num_slots > UINT16_MAX) + { + logErr("linear_buffer elements is greater than uint16. This will now break things.\n"); + } + + num_elements += _num_slots; + + if (num_elements > buffer.size()) + { // need to grow + if (config::DEBUG_TRACE_LOG_LINEAR_BUFFER) + logTrace("linear_buffer resizing from %d to %d\n", buffer.size(), 2 * buffer.size()); + buffer.resize(2 * buffer.size()); + // notify caller + modified_size = (uint32_t)buffer.size(); + resized = true; + } + + return offset; + } + + void update(uint32_t _offset, const T& _element) + { + modified = true; + buffer[_offset] = _element; + } + + // Adds an element at the end of the buffer + void add(const T& _element) + { + update(reserve(1), _element); + } + + uint16_t get_num_elements() + { + return num_elements; + } + + bool wasModified(uint32_t& _out_offset, uint32_t& _out_num_elements, bool& _out_resized) + { + _out_offset = 0; + _out_num_elements = std::max(num_elements, modified_size); + _out_resized = resized; + return modified; + } + + T* data() + { + return buffer.data(); + } + + // Clears entire buffer + void clear() + { + modified = false; + resized = false; + modified_size = 0; + num_elements = 0; + } +}; + + + diff --git a/src/data/offset_vector.h b/src/data/offset_vector.h new file mode 100644 index 0000000..2efe5de --- /dev/null +++ b/src/data/offset_vector.h @@ -0,0 +1,158 @@ +/*#pragma once + +#include +#include +#include "util.h" + +// A dynamic container, allowing negative indices and wrap around +// Note: indices alway centered around 0 (ie. min_index=5 max_index=8 still allocates [0;8]) +// Attention: The internal storage capacity will be at least [min_index; 0] + [0; max_index] + +template +class OffsetVector +{ +private: + std::vector data; + int32_t min_index; // smallest in-use index + int32_t max_index; + Ty invalid; + + // Calculates a wrap around index into data + int32_t wrap_index(int32_t _index) + { + //return (_index + data.size()) % data.size(); + return (_index + (int32_t)data.size()) & ((int32_t)data.size() - 1); // equal since size is always power of 2 + } + + void grow(uint32_t _required) + { + uint32_t size = (uint32_t)data.size(); + size = (size == 0) ? 1 : size; + + uint32_t old_size = size; + uint32_t old_wrap_min_index = wrap_index(min_index); // needs to be done before resize + + while (size < _required) + size *= 2; + + data.resize(size); + + // move data + // Note: elements from [0;max_index] always stay in the same location + // only elements from [min_index;data.size()-1] may need to be moved + + if (min_index < 0) + { + // we swap new "empty" elements from the back with the old ones that need moving + // back to front in order not to copy overlapping memory regions + // we use std::swap because std::move is not guaranteed to leave the old element in a + // valid state (in the case its a vector or so) + + uint32_t dst = (uint32_t)data.size(); + for (uint32_t src = old_size - 1; src >= old_wrap_min_index; --src) + { + --dst; + std::swap(data[src], data[dst]); + } + } + } + +public: + // When deleting an element, its slot is set to _invalid. + // This can be a default constructed object. + OffsetVector(Ty _invalid = Ty()) : invalid(_invalid), min_index(0), max_index(0), data(0, _invalid) {}; + ~OffsetVector() = default; + + // reserves the give index + // Returns reference to the slot + Ty& reserve_at(const int32_t _index) + { + if (data.size() == 0) + { + grow(1); + } + + if (_index > max_index) + { // need to expand in positive direction + int32_t free = (int32_t)data.size() - (max_index - min_index + 1); + + if (_index > max_index + free) + { // need to grow + grow(_index - min_index + 1); + } + max_index = _index; // advance max_index + } + + if (_index < min_index) + { // need to expand in negative direction + int32_t free = (int32_t)data.size() - (max_index - min_index + 1); + + if (_index < min_index - free) + { // need to grow + grow(max_index - _index + 1); + } + min_index = _index; // advance min_index + } + + return data[wrap_index(_index)]; + } + + // Convenience function. Should probably use reserve_at + // May overwrite an existing element + void insert_at(const int32_t _index, const Ty _element) + { + reserve_at(_index) = _element; + } + + // May return a "invalid" aka not inserted element + Ty& get_at(const int32_t _index) + { + assert(_index >= min_index); + assert(_index <= max_index); + return data[wrap_index(_index)]; + } + + // Overwrites the element at _index with the _invalid + void remove_at(const int32_t _index) + { + assert(_index >= min_index); + assert(_index <= max_index); + // Note: we do not erase from vector, we just set min/max_index, and reset the element to _invalid + data[wrap_index(_index)] = invalid; + + if (_index == min_index) + { // we have to increase min_index + while (min_index < 0 && data[wrap_index(min_index)] == invalid) + { + ++min_index; + } + } + + if (_index == max_index) + { // we have to decrease max_index + while (max_index > 0 && data[wrap_index(max_index)] == invalid) + { + --max_index; + } + } + } + + uint32_t size() + { + //return (min_index == max_index && data[0] == invalid) ? 0 : max_index - min_index + 1; + + if (min_index == max_index) + { + assert(min_index == 0); + return (data[0] != invalid) ? 1 : 0; + } + return max_index - min_index + 1; + } + + + void shrink_to_fit() + { // TODO + logErr("offset_vector.shrink_to_fit not implemented\n"); + } +}; +*/ diff --git a/src/data/queue.h b/src/data/queue.h new file mode 100644 index 0000000..b5d3eea --- /dev/null +++ b/src/data/queue.h @@ -0,0 +1,83 @@ +#pragma once + +#include +#include "util.h" + +// simple single-threaded queue + +template +class Queue +{ +private: + std::vector buffer; + size_t next_push; // points to next free slot to be pushed + size_t next_pop; // points to next data slot to be popped + size_t size_mask; + +public: + Queue(size_t _initial_size = 2) : + buffer(0), next_push(0), next_pop(0), size_mask(1) + { + // Enforce size is power of 2 + if (_initial_size < 2) { _initial_size = 2; } + size_t size = 2; + while (_initial_size > size) + { + size <<= 1; + size_mask = (size_mask << 1) | 0b1; + } + buffer.resize(size); + } + + ~Queue() = default; + + inline size_t wrap(size_t idx) { + return idx & size_mask; + //return idx % buffer.size(); + } + + void push(const T _t) + { + buffer[next_push] = _t; + + // check if theres is space available + if (wrap(next_push + 1) == next_pop) + { // no more space => need to grow + buffer.resize(buffer.size() << 1); + size_mask = (size_mask << 1) | 0b1; + // move potential wrapped data at the end + if (next_pop > next_push) + { + size_t src = (buffer.size() >> 1) - 1; // old size -1 + size_t dst = buffer.size() - 1; + while (src >= next_pop) + { + std::swap(buffer[dst], buffer[src]); + --dst; + --src; + } + next_pop = dst + 1; + } + + // advance next_push + next_push = wrap(next_push + 1); + + return; + } + + // advance next_push + next_push = wrap(next_push + 1); + } + + // returns true if at least one element is available + bool can_pop() { + return next_pop != next_push; + } + + T pop() + { + T t = buffer[next_pop]; + next_pop = wrap(next_pop + 1); + return t; + } +}; diff --git a/src/data/slot_buffer.h b/src/data/slot_buffer.h new file mode 100644 index 0000000..d388bd2 --- /dev/null +++ b/src/data/slot_buffer.h @@ -0,0 +1,120 @@ +#pragma once + +#include +#include +#include "util.h" +#include "config.h" + +// TODO specialization if T==int (or atleast sizeof(T) >= sizeof(int)) +// store freelist in-place in the buffer itself +// free points to 0 (aka invalid) => use net_free/offset to find next free slot +// on remove(5), point 5 to where ever free points, point free to 5 +// on add, just take the first value from free (or next_free if free==0) + + +// Auto growing buffer +// returns slot reference that stays valid until element is removed +// empty slots are reused + +template +class SlotBuffer +{ +private: + std::vector buffer; + uint32_t offset; // points to next free slot + std::vector free; + uint32_t modified_from; // inclusive + uint32_t modified_to; // inclusive + +public: + SlotBuffer(const uint32_t _initial_size) : + buffer(_initial_size), offset(0), free(0), + modified_from(UINT32_MAX), modified_to(0) + { + if (_initial_size == 0 || _initial_size == UINT32_MAX) { + std::abort(); + } + } + + ~SlotBuffer() = default; + + // Reserves a slot. Data MUST be initialized! + // Returns slot in _out_ref. + // Returns reference to element. + T& add(uint32_t& _out_slot) + { + uint32_t slot = 0; + if (!free.empty()) + { // reuse slot + slot = free.back(); + free.pop_back(); + } + else if (offset < buffer.size()) + { // use new slot + slot = offset; + offset++; + } + else + { // no free slot => need to grow + if (config::DEBUG_TRACE_LOG_SLOT_BUFFER) + logTrace("slot_buffer resizing from %d to %d\n", buffer.size(), 2 * buffer.size()); + // notify caller about change in size + modified_from = (uint32_t)buffer.size(); + + buffer.resize(2 * buffer.size()); + + modified_to = (uint32_t)buffer.size() - 1; + + // now there is enough space + slot = offset; + offset++; + } + + modified_from = std::min(modified_from, slot); + modified_to = std::max(modified_to, slot); + + _out_slot = slot; + return buffer[slot]; + } + + // Provides direct access to the element in _slot + // Does NOT perform bound checks. + // Element will considered modified. + inline T& at(const uint32_t _slot) + { + modified_from = std::min(modified_from, _slot); + modified_to = std::max(modified_to, _slot); + + return buffer[_slot]; + } + + // Provides direct read-only access to the element in _slot + // Does NOT perform bound checks. + inline const T& at_const(const uint32_t _slot) + { + return buffer[_slot]; + } + + void remove(const uint32_t _slot) + { + free.push_back(_slot); + } + + bool wasModified(uint32_t& _out_offset, uint32_t& _out_num_elements) + { + _out_offset = modified_from; + _out_num_elements = modified_to - modified_from + 1; + return modified_to >= modified_from; + } + + void clearModified() + { + modified_from = UINT32_MAX; + modified_to = 0; + } + + T* data() + { + return buffer.data(); + } +}; diff --git a/src/data/slot_list.h b/src/data/slot_list.h new file mode 100644 index 0000000..5c79d76 --- /dev/null +++ b/src/data/slot_list.h @@ -0,0 +1,196 @@ +#pragma once + +#include +#include +#include "util.h" +#include + +// Auto growing buffer +// returns slot reference that remains valid until element is removed +// removed slots are reused +// supports element iteration (in random order) + +template +class SlotList +{ +private: + typedef struct header { + uint32_t prev; + uint32_t next; + } Header; + + // contains links for every used slot to the next used slot + // or from a free slot to the next free slot + std::vector
header_buffer; + // contains the used data + std::vector data_buffer; + // points to the next available/free slot + uint32_t next_free; + uint32_t modified_from; // inclusive + uint32_t modified_to; // inclusive + + const uint32_t first_free_slot = 0; + const uint32_t first_used_slot = 1; + + // removes element at _slot from its current list (free or used) + void list_remove(const uint32_t _slot) { + assert(_slot != first_free_slot); + assert(_slot != first_used_slot); + assert(_slot != UINT32_MAX); + header_buffer[header_buffer[_slot].prev].next = header_buffer[_slot].next; + header_buffer[header_buffer[_slot].next].prev = header_buffer[_slot].prev; + } + // adds element at _slot to free list + void list_add_free(const uint32_t _slot) { + assert(_slot != first_free_slot); + assert(_slot != first_used_slot); + assert(_slot != UINT32_MAX); + header_buffer[_slot].next = header_buffer[first_free_slot].next; + header_buffer[_slot].prev = first_free_slot; + header_buffer[header_buffer[_slot].next].prev = _slot; + header_buffer[first_free_slot].next = _slot; + } + // adds element at _slot to used list + void list_add_used(const uint32_t _slot) { + assert(_slot != first_free_slot); + assert(_slot != first_used_slot); + assert(_slot != UINT32_MAX); + header_buffer[_slot].next = header_buffer[first_used_slot].next; + header_buffer[_slot].prev = first_used_slot; + header_buffer[header_buffer[_slot].next].prev = _slot; + header_buffer[first_used_slot].next = _slot; + } + +public: + SlotList(const uint32_t _initial_size) : + header_buffer(_initial_size), data_buffer(_initial_size), + next_free(2), modified_from(UINT32_MAX), modified_to(0) + { + if (_initial_size < 4 || _initial_size == UINT32_MAX) { + std::abort(); + } + + header_buffer[first_free_slot].prev = first_free_slot; + header_buffer[first_free_slot].next = first_free_slot; + header_buffer[first_used_slot].prev = first_used_slot; + header_buffer[first_used_slot].next = first_used_slot; + } + + ~SlotList() = default; + + // Reserves a slot. Data is NOT automatically initialized! + // Returns slot in _out_ref. + // Returns reference to element. + // Element will considered modified. + T& add(uint32_t& _out_slot) + { + uint32_t slot = 0; + if (header_buffer[first_free_slot].next != first_free_slot) + { // reuse slot + slot = header_buffer[first_free_slot].next; + // remove from free list + list_remove(slot); + } + else if (next_free < data_buffer.size()) + { // use new slot + slot = next_free; + next_free++; + } + else + { // no free slot => need to grow + if (config::DEBUG_TRACE_LOG_SLOT_LIST) + logTrace("slot_list resizing from %d to %d\n", data_buffer.size(), 2 * data_buffer.size()); + // notify caller about change in size + modified_from = (uint32_t)data_buffer.size(); + + header_buffer.resize(2 * data_buffer.size()); + data_buffer.resize(2 * data_buffer.size()); + + modified_to = (uint32_t)data_buffer.size() - 1; + + // now there are free slots + slot = next_free; + next_free++; + } + + modified_from = std::min(modified_from, slot); + modified_to = std::max(modified_to, slot); + + // add to used list + list_add_used(slot); + + _out_slot = slot; + return data_buffer[slot]; + } + + // Provides direct access to the element in _slot + // Does NOT perform bound checks. + // Element will considered modified. + inline T& at(const uint32_t _slot) + { + modified_from = std::min(modified_from, _slot); + modified_to = std::max(modified_to, _slot); + + return data_buffer[_slot]; + } + + // Provides direct read-only access to the element in _slot + // Does NOT perform bound checks. + inline const T& at_const(const uint32_t _slot) const + { + return data_buffer[_slot]; + } + + void remove(const uint32_t _slot) + { + // remove from used list + list_remove(_slot); + // add to free list + list_add_free(_slot); + } + + // Returns true if there are no elements in the container + inline bool empty() const { + return header_buffer[first_used_slot].next == first_used_slot; + } + + // Returns slot of a random element + // Behaviour is undefined, if container is empty. + // Note: Use this function together with next() to iterate all elements + inline uint32_t first() const { + return header_buffer[first_used_slot].next; + } + + // Returns true if element at _slot has a next element + inline bool has_next(const uint32_t _slot) const { + return header_buffer[_slot].next != first_used_slot; + } + + // Returns slot of the next element + // Behaviour is undefined, if _slot is invalid or 'has_next(_slot) == false'. + // Note: Use this function together with first() to iterate all elements + inline uint32_t next(const uint32_t _slot) const { + return header_buffer[_slot].next; + } + + // Returns interval of internal data buffer which was modified (if any) + bool wasModified(uint32_t& _out_offset, uint32_t& _out_num_elements) const + { + _out_offset = modified_from; + _out_num_elements = modified_to - modified_from + 1; + return modified_to >= modified_from; + } + + // Clears internal modified interval + void clearModified() + { + modified_from = UINT32_MAX; + modified_to = 0; + } + + // Returns direct access to internal data buffer + T* data() + { + return data_buffer.data(); + } +}; diff --git a/src/data/slot_queue.h b/src/data/slot_queue.h new file mode 100644 index 0000000..c174b17 --- /dev/null +++ b/src/data/slot_queue.h @@ -0,0 +1,101 @@ +#pragma once + +#include +#include +#include "util.h" + +// Auto growing, slot-based, thread safe queue +// Attention: Thread-safety applies only for 1 producer and 1 consumer thread! + +template +class SlotQueue +{ +private: + std::vector buffer; + size_t next_push; // points to next free slot to be pushed + size_t next_pop; // points to next data slot to be popped + size_t size_mask; + std::mutex mutex; + +public: + SlotQueue(size_t _initial_size = 2) : + buffer(0), next_push(0), next_pop(0), size_mask(1) + { + // Enforce size is power of 2 + if (_initial_size < 2) { _initial_size = 2; } + size_t size = 2; + while (_initial_size > size) + { + size <<= 1; + size_mask = (size_mask << 1) | 0b1; + } + buffer.resize(size); + } + + ~SlotQueue() = default; + + inline size_t wrap(size_t idx) { + return idx & size_mask; + //return idx % buffer.size(); + } + + // Returns a pointer to the next element at the end of the queue, to be produced + // Returned pointer is never NULL + T* begin_push() + { + return &buffer[next_push]; + } + + // Publishes last (with begin_push()) produced element + void end_push() + { + // check if theres is space available + if (wrap(next_push + 1) == next_pop) + { // no more space => need to grow + mutex.lock(); + buffer.resize(buffer.size() << 1); + size_mask = (size_mask << 1) | 0b1; + // move potential wrapped data at the end + if (next_pop > next_push) + { + size_t src = (buffer.size() >> 1) - 1; // old size -1 + size_t dst = buffer.size() - 1; + while (src >= next_pop) + { + std::swap(buffer[dst], buffer[src]); + --dst; + --src; + } + next_pop = dst + 1; + } + + // advance next_push + next_push = wrap(next_push + 1); + + mutex.unlock(); + return; + } + + // advance next_push + mutex.lock(); + next_push = wrap(next_push + 1); + mutex.unlock(); + } + + // Returns pointer to the element at the top of the queue, to be consumed + // Returns NULL if there is no element in the queue + // Can be used to just "peek" at the top element + T* begin_pop() + { + return (next_pop != next_push) ? &buffer[next_pop] : NULL; + } + + // Releases last (with begin_pop()) consumed element + // MUST only be called after a valid (non NULL) 'begin_pop()' + void end_pop() + { + mutex.lock(); + next_pop = wrap(next_pop + 1); + mutex.unlock(); + } +}; diff --git a/src/data/texel_buffer.h b/src/data/texel_buffer.h new file mode 100644 index 0000000..02bd0db --- /dev/null +++ b/src/data/texel_buffer.h @@ -0,0 +1,63 @@ +#pragma once + +#include +#include +#include "util.h" +#include "../space_math.h" +#include + +class TexelBuffer +{ +private: + std::vector buffer; + uint32_t modified_from; // inclusive + uint32_t modified_to; // inclusive + +public: + TexelBuffer(const uint32_t _size) : + buffer(_size), modified_from(UINT32_MAX), modified_to(0) + { + if (_size == 0) { + std::abort(); + } + } + + ~TexelBuffer() = default; + + // copies 4x4 texels from _data into the buffer at _texel_id + void update_texels(const uint32_t _texel_id, const Texel *_data) { + bx::memCopy(buffer.data() + _texel_id, + _data, + 4 * sizeof(Texel)); + bx::memCopy(buffer.data() + _texel_id + config::TA_TEXELS_PER_ROW, + _data + 4, + 4 * sizeof(Texel)); + bx::memCopy(buffer.data() + _texel_id + config::TA_TEXELS_PER_ROW * 2, + _data + 8, + 4 * sizeof(Texel)); + bx::memCopy(buffer.data() + _texel_id + config::TA_TEXELS_PER_ROW * 3, + _data + 12, + 4 * sizeof(Texel)); + + modified_from = std::min(modified_from, _texel_id); + modified_to = std::max(modified_to, _texel_id + config::TA_TEXELS_PER_ROW * 4); + } + + bool wasModified(uint32_t& _out_offset, uint32_t& _out_num_elements) + { + _out_offset = modified_from; + _out_num_elements = modified_to - modified_from + 1; + return modified_to >= modified_from; + } + + void clearModified() + { + modified_from = UINT32_MAX; + modified_to = 0; + } + + Texel* data() + { + return buffer.data(); + } +}; diff --git a/src/data/texture_atlas.h b/src/data/texture_atlas.h new file mode 100644 index 0000000..f0b247d --- /dev/null +++ b/src/data/texture_atlas.h @@ -0,0 +1,463 @@ +#pragma once + +#include +#include +#include "util.h" +#include "assert.h" +#include "slot_list.h" +#include "../space_math.h" +#include "texel_buffer.h" +#include "../config.h" +#include "stb_image.h" +#include "stb_image_write.h" +#include "util.h" +#include "../3rdparty/emilib/hash_map.hpp" + +// Manages texturing data in a combined atlas +// all textures are stored in patches +// a patch is a region in the atlas +// a patch consists of 8x8 or smaller textures +// a texture consists of 4x4 texels +// each texture is addressed by a texture id + +// TODO add normal mapping + +// TODO OPT: Do not update entire rows of the atlas, but more localized blocks +// Ex.: Track modified main textures (sorted list), then when modified() is called, +// combine them into rectangular block. + +// TODO consider +// Sidenote: if we de-couple the actual tex_atlas and the id_atlas +// we can generate patches in id_atlas only, without loading onto gpu right away +// then later on-demand copy texture into tex_atlas (and thereby onto gpu) + +// Note to future self: TextureAtlas 2.0 +// If we ever want to upgrade this implementation we should try to do so by +// considering triangular textures instead of quad textures +// Large trigs waste precious atlas space +// Would need to consider double-trigs +// => Maybe keep current api and detect empty patches, then +// just increase patch ref count +// and add sub-patches to pool of free patches (ensure freeing works!) +// This would imply proper patch-parenting +// Add parent patch_id to each patch and replace main patch logic with this +// +// Note: This also breaks our hashing, except we no actually consider the trig shape somehow +// Maybe combine this sub-texture deduplication? + +// Helper classes for the texture deduplication +struct TextureHash { + const uint16_t width = 0; + const uint16_t height = 0; + const uint32_t stride = 0; // way too big, but alignment + const uint32_t* texture_ids = nullptr; + + TextureHash() = default; + // stride describes how to index into texture_ids + // aka should be '8' for temporary 8x8 arrays and config::TA_TEXTURES_PER_ROW + TextureHash(const uint16_t _width, const uint16_t _height, const uint32_t _stride, const uint32_t* _texture_ids) : width(_width), height(_height), stride(_stride), texture_ids(_texture_ids) {}; + + bool operator==(const TextureHash &_o) const { + if (width != _o.width || height != _o.height) + return false; + + bool equal = true; + for (int y = 0; y < height; y++) + for (int x = 0; x < width; x++) + { + equal &= (texture_ids[x + y * stride] == _o.texture_ids[x + y * stride]); + } + return equal; + }; +}; +const int TextureHashSize = sizeof(TextureHash); + +template <> +struct std::hash +{ + size_t operator()(const TextureHash &_v) const noexcept + { + std::size_t h=0; + hash_combine(h, _v.width, _v.height); + for (int y = 0; y < _v.height; y++) + for (int x = 0; x < _v.width; x++) + hash_combine(h, _v.texture_ids[x + y * _v.stride]); + return h; + } +}; + + +class TextureAtlas +{ +private: + typedef struct patch { + uint32_t refCount : 8 = 0; // steal some bits from texture_id for alignment reasons and since we don't need that many + uint32_t texture_id : 24 = 0; // offset to textures in atlas // TODO maybe store x&y offsets instead + uint16_t width = 0; // number of textures + uint16_t height = 0; // number of textures + } Patch; + static_assert(sizeof(Patch) == 8); + static_assert(config::TA_TOTAL_TEXTURES < (1 << 24)); + + // main patch == full 8x8 patch + typedef struct main_patch { + uint32_t used_count; // can be merged when ==0 + std::vector free_patches; // refs to patches + } MainPatch; + + SlotList patches = SlotList(config::TA_INITIAL_NUMBER_PATCHES); + + // "second" atlas with texture_ids (corresponding to the loaded texels) for deduplication + uint32_t textures[config::TA_TOTAL_TEXTURES] = {}; + emilib::HashMap texture_lookup; + + // free patches sorted by size + // bins[0] = 1x1 + // bins[1] = 1x2 + // bins[2] = 1x3 + // ... + // bins[TA_MAIN_PATCH_SIZE] = 2x1 + std::vector bins[config::TA_MAIN_PATCH_SIZE * config::TA_MAIN_PATCH_SIZE]; // holds refs to patches + + // hold refs to sub patches for merging purposes + MainPatch main_patches[config::TA_WIDTH * config::TA_HEIGHT]; + + TexelBuffer texels = TexelBuffer(config::TA_TOTAL_TEXELS); + + uint32_t main_patch_id_2_texture_id(uint32_t _id) { + const uint32_t x = _id % config::TA_WIDTH; + const uint32_t y = _id / config::TA_WIDTH; + return (y * config::TA_MAIN_PATCH_SIZE * config::TA_TEXTURES_PER_ROW) + (x * config::TA_MAIN_PATCH_SIZE); + } + + uint32_t texture_id_2_main_patch_id(uint32_t _id) { + uint32_t x = _id % config::TA_TEXTURES_PER_ROW; // texture offsets + uint32_t y = _id / config::TA_TEXTURES_PER_ROW; + x /= config::TA_MAIN_PATCH_SIZE; // main patch offsets + y /= config::TA_MAIN_PATCH_SIZE; + return y * config::TA_WIDTH + x; + } + + uint32_t texture_id_2_texel_id(uint32_t _id) { + uint32_t x = _id % config::TA_TEXTURES_PER_ROW; // texture offsets + uint32_t y = _id / config::TA_TEXTURES_PER_ROW; + return (y * config::TA_TEXTURES_PER_ROW * config::TA_TEXELS_PER_TEXTURE) + + (x * config::TA_TEXTURE_SIZE); + } + + // Saves the current texture atlas to a rgba8 texture (.png) + // Starts enumerating at 0 and incrementing on subsequent calls + // Resets suffix on startup + // Overwrites existing files + uint32_t counter = 0; + void save_texture_atlas_to_png() + { + std::string filename = "atlas"; + std::filesystem::create_directories(filename); + filename += "/"; + filename += std::to_string(counter); + filename += ".png"; + + const int width = config::TA_TEXELS_PER_ROW; + const int height = config::TA_HEIGHT * config::TA_MAIN_PATCH_SIZE * config::TA_TEXTURE_SIZE; + stbi_write_png(filename.c_str(), width, height, 4 /* channels (RGBA) */, texels.data(), config::TA_TEXELS_PER_ROW * sizeof(Texel)); + + counter++; + } + +public: + TextureAtlas() = default; + ~TextureAtlas() = default; + + void init() { + texture_lookup.reserve(config::TA_INITIAL_NUMBER_PATCHES); + + // create main patches + for (int i = (config::TA_WIDTH * config::TA_HEIGHT) - 1; i >= 0; i--) { + uint32_t patch_id; + Patch &patch = patches.add(patch_id); + + patch.texture_id = main_patch_id_2_texture_id(i); + patch.width = config::TA_MAIN_PATCH_SIZE; + patch.height = config::TA_MAIN_PATCH_SIZE; + + // add to bins + bins[(config::TA_MAIN_PATCH_SIZE * config::TA_MAIN_PATCH_SIZE) - 1].push_back(patch_id); + + // add to main patches + main_patches[i].used_count = 0; + main_patches[i].free_patches.push_back(patch_id); + } + } + +private: // TODO move implementations into atlas.cpp and fix this + + // finds fitting patch in atlas + // Supports deduplication + // returns patch_id + uint32_t get_new_patch(const uint8_t _width, const uint8_t _height) { + assert(_width > 0); + assert(_height > 0); + assert(_width <= 8); + assert(_height <= 8); + + // search bins + uint32_t bin = (_width-1) * 8 + (_height-1); + while (bin < 64 && bins[bin].size() == 0) { + bin++; // try next bigger one + } + if (bin == 64) { + logErr("TextureAtlas overflow\n"); + std::abort(); // Atlas overflow + } + + // remove from bins + uint32_t patch_id = bins[bin].back(); + bins[bin].pop_back(); + + Patch &patch = patches.at(patch_id); + + // remove from main patch + MainPatch &main_patch = main_patches[texture_id_2_main_patch_id(patch.texture_id)]; + main_patch.used_count++; + auto it = std::find(main_patch.free_patches.begin(), main_patch.free_patches.end(), patch_id); + assert(it != main_patch.free_patches.end()); + main_patch.free_patches.erase(it); + + // cut it up + // [X|ooo] + // [-----] + // [ooooo] + // [ooooo] + // [ooooo] + // & add any extra back into bins + + // extra to the right + if (_width < patch.width) { + uint32_t right_patch_id; + Patch& right_patch = patches.add(right_patch_id); + + right_patch.width = patch.width - _width; + assert(right_patch.width > 0 && right_patch.width < config::TA_MAIN_PATCH_SIZE); + right_patch.height = _height; + right_patch.texture_id = patch.texture_id + _width; + + // add to bins + const uint32_t bin = ((right_patch.width-1) * config::TA_MAIN_PATCH_SIZE) + (right_patch.height-1); + bins[bin].push_back(right_patch_id); + + // add to main patch + main_patch.free_patches.push_back(right_patch_id); + } + + // extra below + if (_height < patch.height) { + uint32_t bottom_patch_id; + Patch& bottom_patch = patches.add(bottom_patch_id); + + bottom_patch.width = patch.width; + bottom_patch.height = patch.height - _height; + assert(bottom_patch.height > 0 && bottom_patch.height < config::TA_MAIN_PATCH_SIZE); + bottom_patch.texture_id = patch.texture_id + (_height * config::TA_TEXTURES_PER_ROW); + + // add to bins + const uint32_t bin = ((bottom_patch.width-1) * config::TA_MAIN_PATCH_SIZE) + (bottom_patch.height-1); + bins[bin].push_back(bottom_patch_id); + + // add to main patch + main_patch.free_patches.push_back(bottom_patch_id); + } + + patch.refCount = 1; + patch.width = _width; + patch.height = _height; + + return patch_id; + } + + // copies a texture into a patch + // _data MUST point to valid memory (depending on configured texture size) + // Note: always copies a full texture (aka TA_TEXTURE_SIZE*TA_TEXTURE_SIZE individual texels) + // Returns texture_id for gfx::vertex buffer generation + uint32_t update_texture(const Patch &patch, const uint8_t _offset_x, const uint8_t _offset_y, const uint32_t* _component_texture_ids) { + const Texel* data = gfx::get_component_texture(_component_texture_ids[_offset_x + _offset_y * 8]); + const uint32_t texture_id = patch.texture_id + _offset_y * config::TA_TEXTURES_PER_ROW + _offset_x; + + assert(_offset_x < patch.width); + assert(_offset_y < patch.height); + + // load texture_ids into "second" atlas for deduplication + textures[texture_id] = _component_texture_ids[_offset_x + _offset_y * 8]; + + // load texels into atlas + const uint32_t texel_id = texture_id_2_texel_id(texture_id); + texels.update_texels(texel_id, data); + +#ifndef NDEBUG + if (config::DEBUG_SAVE_TEXTURE_ATLAS_TO_PNG) + save_texture_atlas_to_png(); +#endif + + return texture_id; + } + + // returns true on success, false otherwise + // _component_texture_ids expected to be inside an array of 8*8 + // it is guaranteed not to access other _component_texture_ids as confined per _width and _height though + bool deduplicate_patch(const TextureHash& _key, uint32_t& _out_patch_id) + { + auto it = texture_lookup.find(_key); + if (it != texture_lookup.end()) + { + _out_patch_id = it->second; + + // increase ref count, but do not overflow + // once we hit the limit, we consider + // the patch promoted to an forever patch + Patch& patch = patches.at(_out_patch_id); + if (patch.refCount < 255) + patch.refCount++; + + if (config::DEBUG_TRACE_LOG_FOREVER_PATCHES && patch.refCount == 255) + logTrace("[TextureAtlas] Patch %d hit a refcount of 255 and was promoted to a forever patch.\n", _out_patch_id); + + return true; + } + + return false; + } + +public: // TODO move implementations into atlas.cpp and fix this + + // Finds fitting patch in atlas + // Supports deduplication + // returns patch_id + // _component_texture_ids expected to be inside an array of 8*8 + // it is guaranteed not to access other _component_texture_ids as confined per _width and _height though + uint32_t add_patch(const uint8_t _width, const uint8_t _height, const uint32_t* _component_texture_ids) { + assert(_width > 0); + assert(_height > 0); + assert(_width <= 8); + assert(_height <= 8); + + uint32_t patch_id = 0; + + // try to de-duplicate the given texture + if (deduplicate_patch(TextureHash(_width, _height, 8, _component_texture_ids), patch_id)) + return patch_id; + + // else get new patch and copy data + patch_id = get_new_patch(_width, _height); + const Patch &patch = patches.at_const(patch_id); + + // 8*8 texels + for (int x = 0; x < _width; x++) + for (int y = 0; y < _height; y++) + update_texture(patch, x, y, _component_texture_ids); + + // add this patch the texture_lookup + texture_lookup.insert_unique(std::move(TextureHash(_width, _height, config::TA_TEXTURES_PER_ROW, &textures[patch.texture_id])), std::move(patch_id)); + + return patch_id; + } + + // Returns 8 texture ids (for vertex buffer generation) of given patch + void get_patch_texture_ids(const uint32_t _patch_id, uint32_t* _out_texture_ids) + { + Patch &patch = patches.at(_patch_id); + + _out_texture_ids[TexCorner::TopLeft] = patch.texture_id; + + // TexCorner for sampling is = (trigSize%2==0) ? TL : CL; + _out_texture_ids[TexCorner::CenterLeft] = patch.texture_id + ((patch.height / (uint32_t)2) * config::TA_TEXTURES_PER_ROW); + + _out_texture_ids[TexCorner::BotLeft] = patch.texture_id + ((patch.height - 1) * config::TA_TEXTURES_PER_ROW); + + // TexCorner for sampling is = (trigSize%2==0) ? BL : CB; + _out_texture_ids[TexCorner::CenterBot] = _out_texture_ids[TexCorner::BotLeft] + (patch.width / (uint32_t)2); + + _out_texture_ids[TexCorner::BotRight] = _out_texture_ids[TexCorner::BotLeft] + (patch.width - 1); + + // TexCorner for sampling is = (trigSize%2==0) ? TR : CR; + _out_texture_ids[TexCorner::CenterRight] = _out_texture_ids[TexCorner::CenterLeft] + (patch.width - 1); + + _out_texture_ids[TexCorner::TopRight] = patch.texture_id + (patch.width - 1); + + // TexCorner for sampling is = (trigSize%2==0) ? TL : CT; + _out_texture_ids[TexCorner::CenterTop] = patch.texture_id + (patch.width / (uint32_t)2); + } + + // frees this patch's space in the atlas + void remove_patch(const uint32_t _patch_id) { + Patch &patch = patches.at(_patch_id); + + // decrement patch ref count + // but do not demote forever patches + if (patch.refCount < 255) + patch.refCount--; + if (patch.refCount > 0) + return; // do no (yet) remove it + + // remove it from the texture_lookup + bool ok = texture_lookup.erase(TextureHash(patch.width, patch.height, config::TA_TEXTURES_PER_ROW, &textures[patch.texture_id])); + assert(ok); + + // decrement main patch used counter + const uint32_t main_patch_id = texture_id_2_main_patch_id(patch.texture_id); + MainPatch &main_patch = main_patches[main_patch_id]; + assert(main_patch.used_count > 0); + main_patch.used_count--; + + // merge main patch? + if (main_patch.used_count == 0) { + logDebug("[TextureAtlas], merging main patch #%d\n", main_patch_id); + // remove sibling patches + for (uint32_t id : main_patch.free_patches) { + const Patch &patch = patches.at_const(id); + const uint32_t bin = ((patch.width-1) * config::TA_MAIN_PATCH_SIZE) + (patch.height-1); + auto it = std::find(bins[bin].begin(), bins[bin].end(), id); + assert(it != bins[bin].end()); + bins[bin].erase(it); + + patches.remove(id); + } + main_patch.free_patches.clear(); + + // add main patch (reuse this patch) + patch.texture_id = main_patch_id; + patch.width = config::TA_MAIN_PATCH_SIZE; + patch.height = config::TA_MAIN_PATCH_SIZE; + main_patch.free_patches.push_back(_patch_id); + + // add to bins + bins[(config::TA_MAIN_PATCH_SIZE * config::TA_MAIN_PATCH_SIZE) - 1].push_back(_patch_id); + + return; + } + + // otherwise, make patch available again + // add to bins + const uint32_t bin = ((patch.width-1) * config::TA_MAIN_PATCH_SIZE) + (patch.height-1); + bins[bin].push_back(_patch_id); + + // add to main patch + main_patch.free_patches.push_back(_patch_id); + } + + + bool wasModified(uint32_t& _out_offset, uint32_t& _out_num_elements) + { + return texels.wasModified(_out_offset, _out_num_elements); + } + + void clearModified() + { + texels.clearModified(); + } + + Texel* data() + { + return texels.data(); + } + +}; diff --git a/src/factory.cpp b/src/factory.cpp new file mode 100644 index 0000000..34f4067 --- /dev/null +++ b/src/factory.cpp @@ -0,0 +1,1484 @@ +#include "factory.h" +#include "util.h" +#include "data/queue.h" +#include "../3rdparty/emilib/hash_map.hpp" +#include "../3rdparty/emilib/hash_set.hpp" +#include +#include +#include + +namespace factory +{ + // ATTENTION must match shader and config.h + // VertexID Layout for texturing + // Bits: 19 | 3 | 10 + // Data: texture index | texture corner | vertex index + + const uint32_t vertex_index_bits = 10; + const uint32_t vertex_index_mask = (1 << vertex_index_bits) - 1; + const uint32_t texture_corner_bits = 3; + const uint32_t texture_corner_mask = (1 << texture_corner_bits) - 1; + const uint32_t texture_index_bits = 19; + const uint32_t texture_index_mask = (1 << texture_index_bits) - 1; + + using TC = TexCorner; + static_assert(TexCorner::Count > texture_corner_mask); + + // Returns TexCorner rotated 90deg to the left + constexpr TexCorner rotL(TexCorner _corner) + { + return (TexCorner)(((_corner+TexCorner::Count)-2)%TexCorner::Count); + } + constexpr TexCorner rotR(TexCorner _corner) + { + return (TexCorner)(((_corner+TexCorner::Count)+2)%TexCorner::Count); + } + + GPULineVertex aabb_outline_data[24]; + GPULineVertex *generate_AABB_outline(const AABB &_aabb) + { + const Vec3 size = bx::sub(_aabb.max, _aabb.min); + + // min -> x + aabb_outline_data[0].position = _aabb.min; + aabb_outline_data[1].position = _aabb.min; + aabb_outline_data[1].position.x += size.x; + + // min -> y + aabb_outline_data[2].position = _aabb.min; + aabb_outline_data[3].position = _aabb.min; + aabb_outline_data[3].position.y += size.y; + + // min -> z + aabb_outline_data[4].position = _aabb.min; + aabb_outline_data[5].position = _aabb.min; + aabb_outline_data[5].position.z += size.z; + + // x -> xy + aabb_outline_data[6].position = aabb_outline_data[1].position; + aabb_outline_data[7].position = aabb_outline_data[1].position; + aabb_outline_data[7].position.y += size.y; + + // x -> xz + aabb_outline_data[8].position = aabb_outline_data[1].position; + aabb_outline_data[9].position = aabb_outline_data[1].position; + aabb_outline_data[9].position.z += size.z; + + // y -> yx + aabb_outline_data[10].position = aabb_outline_data[3].position; + aabb_outline_data[11].position = aabb_outline_data[7].position; + + // y -> yz + aabb_outline_data[12].position = aabb_outline_data[3].position; + aabb_outline_data[13].position = aabb_outline_data[3].position; + aabb_outline_data[13].position.z += size.z; + + // z -> zx + aabb_outline_data[14].position = aabb_outline_data[5].position; + aabb_outline_data[15].position = aabb_outline_data[9].position; + + // z -> zy + aabb_outline_data[16].position = aabb_outline_data[5].position; + aabb_outline_data[17].position = aabb_outline_data[13].position; + + // xy -> max + aabb_outline_data[18].position = aabb_outline_data[7].position; + aabb_outline_data[19].position = _aabb.max; + + // xz -> max + aabb_outline_data[20].position = aabb_outline_data[9].position; + aabb_outline_data[21].position = _aabb.max; + + // yz -> max + aabb_outline_data[22].position = aabb_outline_data[13].position; + aabb_outline_data[23].position = _aabb.max; + + return aabb_outline_data; + } + + // generate vertex index + uint32_t I(uint32_t _x, uint32_t _y, uint32_t _z) + { + assert(_x <= 8); + assert(_y <= 8); + assert(_z <= 8); + return _x + 9 * _y + 9 * 9 * _z; + } + uint32_t VI(uint32_t _texture_index, uint32_t _corner /*type: TexCorner*/, uint32_t vert_index) + { + assert(_texture_index <= texture_index_mask); + assert(_corner <= texture_corner_mask); + return (_texture_index << (texture_corner_bits + vertex_index_bits)) | + (_corner << vertex_index_bits) | + vert_index; + }; + uint32_t VI(uint32_t _texture_index, uint32_t _corner /*type: TexCorner*/, uint32_t _x, uint32_t _y, uint32_t _z) + { + uint32_t vert_index = I(_x, _y, _z); + return VI(_texture_index, _corner, vert_index); + }; + + + uint32_t debug_full_block_1_data[36] = + { + // Note: cam(A,B) == camera looking into direction A, with up being B + + VI(0, TC::BotLeft, 0, 0, 0), VI(0, TC::BotRight, 8, 0, 0), VI(0, TC::TopLeft, 0, 8, 0), // cam(Z+,Y+) => bot-left + VI(0, TC::TopLeft, 0, 8, 0), VI(0, TC::BotRight, 8, 0, 0), VI(0, TC::TopRight, 8, 8, 0), // cam(Z+,Y+) => top-right + + VI(0, TC::BotLeft, 0, 0, 8), VI(0, TC::BotRight, 0, 0, 0), VI(0, TC::TopLeft, 0, 8, 8), // cam(X+,Y+) => bot-left + VI(0, TC::TopLeft, 0, 8, 8), VI(0, TC::BotRight, 0, 0, 0), VI(0, TC::TopRight, 0, 8, 0), // cam(X+,Y+) => top-right + + VI(0, TC::BotLeft, 8, 0, 8), VI(0, TC::BotRight, 0, 0, 8), VI(0, TC::TopLeft, 8, 8, 8), // cam(Z-,Y+) => bot-left + VI(0, TC::TopLeft, 8, 8, 8), VI(0, TC::BotRight, 0, 0, 8), VI(0, TC::TopRight, 0, 8, 8), // cam(Z-,Y+) => top-right + + VI(0, TC::BotLeft, 8, 0, 0), VI(0, TC::BotRight, 8, 0, 8), VI(0, TC::TopLeft, 8, 8, 0), // cam(X-,Y+) => bot-left + VI(0, TC::TopLeft, 8, 8, 0), VI(0, TC::BotRight, 8, 0, 8), VI(0, TC::TopRight, 8, 8, 8), // cam(X-,Y+) => top-right + + VI(0, TC::BotLeft, 0, 8, 0), VI(0, TC::BotRight, 8, 8, 0), VI(0, TC::TopLeft, 0, 8, 8), // cam(Y-,Z+) => bot-left + VI(0, TC::TopLeft, 0, 8, 8), VI(0, TC::BotRight, 8, 8, 0), VI(0, TC::TopRight, 8, 8, 8), // cam(Y-,Z+) => top-right + + VI(0, TC::BotLeft, 0, 0, 8), VI(0, TC::BotRight, 8, 0, 8), VI(0, TC::TopLeft, 0, 0, 0), // cam(Y+,Z-) => bot-left + VI(0, TC::TopLeft, 0, 0, 0), VI(0, TC::BotRight, 8, 0, 8), VI(0, TC::TopRight, 8, 0, 0), // cam(Y+,Z-) => top-right + }; + + // Note: trig apex's are not the second vertex! + // Works, but no longer up to spec + uint32_t *generate_debug_full_block_1_model() + { + return debug_full_block_1_data; + } + + uint32_t debug_full_block_2_data[36] = + { + // Note: cam(A,B) == camera looking into direction A, with up being B + + VI(0, rotL(TC::BotLeft), 0, 8, 0), VI(0, TC::BotLeft, 0, 0, 0), VI(0, rotR(TC::BotLeft), 8, 0, 0), // cam(Z+,Y+) => bot-left + VI(0, rotL(TC::TopRight), 8, 0, 0), VI(0, TC::TopRight, 8, 8, 0), VI(0, rotR(TC::TopRight), 0, 8, 0), // cam(Z+,Y+) => top-right + + VI(0, rotL(TC::BotLeft), 0, 8, 8), VI(0, TC::BotLeft, 0, 0, 8), VI(0, rotR(TC::BotLeft), 0, 0, 0), // cam(X+,Y+) => bot-left + VI(0, rotL(TC::TopRight), 0, 0, 0), VI(0, TC::TopRight, 0, 8, 0), VI(0, rotR(TC::TopRight), 0, 8, 8), // cam(X+,Y+) => top-right + + VI(0, rotL(TC::BotLeft), 8, 8, 8), VI(0, TC::BotLeft, 8, 0, 8), VI(0, rotR(TC::BotLeft), 0, 0, 8), // cam(Z-,Y+) => bot-left + VI(0, rotL(TC::TopRight), 0, 0, 8), VI(0, TC::TopRight, 0, 8, 8), VI(0, rotR(TC::TopRight), 8, 8, 8), // cam(Z-,Y+) => top-right + + VI(0, rotL(TC::BotLeft), 8, 8, 0), VI(0, TC::BotLeft, 8, 0, 0), VI(0, rotR(TC::BotLeft), 8, 0, 8), // cam(X-,Y+) => bot-left + VI(0, rotL(TC::TopRight), 8, 0, 8), VI(0, TC::TopRight, 8, 8, 8), VI(0, rotR(TC::TopRight), 8, 8, 0), // cam(X-,Y+) => top-right + + VI(0, rotL(TC::BotLeft), 0, 8, 8), VI(0, TC::BotLeft, 0, 8, 0), VI(0, rotR(TC::BotLeft), 8, 8, 0), // cam(Y-,Z+) => bot-left + VI(0, rotL(TC::TopRight), 8, 8, 0), VI(0, TC::TopRight, 8, 8, 8), VI(0, rotR(TC::TopRight), 0, 8, 8), // cam(Y-,Z+) => top-right + + VI(0, rotL(TC::BotLeft), 8, 0, 8), VI(0, TC::BotLeft, 8, 0, 0), VI(0, rotR(TC::BotLeft), 0, 0, 0), // cam(Y+,Z+) => bot-left + VI(0, rotL(TC::TopRight), 0, 0, 0), VI(0, TC::TopRight, 0, 0, 8), VI(0, rotR(TC::TopRight), 8, 0, 8), // cam(Y+,Z+) => top-right + }; + + // Uses our new vert[1] must be apex spec + uint32_t *generate_debug_full_block_2_model() + { + return debug_full_block_2_data; + } + + const TexCorner dfb3apexBL = TexCorner::CenterBot; + const TexCorner dfb3apexTR = TexCorner::CenterTop; + uint32_t debug_full_block_3_data[36] = + { + // Note: cam(A,B) == camera looking into direction A, with up being B + + VI(0, rotL(dfb3apexBL), 0, 8, 0), VI(0, dfb3apexBL, 0, 0, 0), VI(0, rotR(dfb3apexBL), 8, 0, 0), // cam(Z+,Y+) => bot-left + VI(0, rotL(dfb3apexTR), 8, 0, 0), VI(0, dfb3apexTR, 8, 8, 0), VI(0, rotR(dfb3apexTR), 0, 8, 0), // cam(Z+,Y+) => top-right + + VI(0, rotL(dfb3apexBL), 0, 8, 8), VI(0, dfb3apexBL, 0, 0, 8), VI(0, rotR(dfb3apexBL), 0, 0, 0), // cam(X+,Y+) => bot-left + VI(0, rotL(dfb3apexTR), 0, 0, 0), VI(0, dfb3apexTR, 0, 8, 0), VI(0, rotR(dfb3apexTR), 0, 8, 8), // cam(X+,Y+) => top-right + + VI(0, rotL(dfb3apexBL), 8, 8, 8), VI(0, dfb3apexBL, 8, 0, 8), VI(0, rotR(dfb3apexBL), 0, 0, 8), // cam(Z-,Y+) => bot-left + VI(0, rotL(dfb3apexTR), 0, 0, 8), VI(0, dfb3apexTR, 0, 8, 8), VI(0, rotR(dfb3apexTR), 8, 8, 8), // cam(Z-,Y+) => top-right + + VI(0, rotL(dfb3apexBL), 8, 8, 0), VI(0, dfb3apexBL, 8, 0, 0), VI(0, rotR(dfb3apexBL), 8, 0, 8), // cam(X-,Y+) => bot-left + VI(0, rotL(dfb3apexTR), 8, 0, 8), VI(0, dfb3apexTR, 8, 8, 8), VI(0, rotR(dfb3apexTR), 8, 8, 0), // cam(X-,Y+) => top-right + + VI(0, rotL(dfb3apexBL), 0, 8, 8), VI(0, dfb3apexBL, 0, 8, 0), VI(0, rotR(dfb3apexBL), 8, 8, 0), // cam(Y-,Z+) => bot-left + VI(0, rotL(dfb3apexTR), 8, 8, 0), VI(0, dfb3apexTR, 8, 8, 8), VI(0, rotR(dfb3apexTR), 0, 8, 8), // cam(Y-,Z+) => top-right + + VI(0, rotL(dfb3apexBL), 8, 0, 8), VI(0, dfb3apexBL, 8, 0, 0), VI(0, rotR(dfb3apexBL), 0, 0, 0), // cam(Y+,Z+) => bot-left + VI(0, rotL(dfb3apexTR), 0, 0, 0), VI(0, dfb3apexTR, 0, 0, 8), VI(0, rotR(dfb3apexTR), 8, 0, 8), // cam(Y+,Z+) => top-right + }; + + uint32_t *generate_debug_full_block_3_model() + { + return debug_full_block_3_data; + } + + uint32_t pyramid_block_data[18]; + uint32_t *generate_debug_pyramid_block_model() + { + using TC = TexCorner; + uint32_t top = I(4, 8, 4); + + uint32_t full_block_trilist[] = + { + // Note: cam(A,B) == camera looking into direction A with up being B + + VI(0, TC::BotLeft, 0, 0, 0), VI(0, TC::BotRight, 8, 0, 0), VI(0, TC::CenterTop, top), // cam(Z+,Y+) + + VI(0, TC::BotLeft, 0, 0, 8), VI(0, TC::BotRight, 0, 0, 0), VI(0, TC::CenterTop, top), // cam(X+,Y+) + + VI(0, TC::BotLeft, 8, 0, 8), VI(0, TC::BotRight, 0, 0, 8), VI(0, TC::CenterTop, top), // cam(Z-,Y+) + + VI(0, TC::BotLeft, 8, 0, 0), VI(0, TC::BotRight, 8, 0, 8), VI(0, TC::CenterTop, top), // cam(X-,Y+) + + VI(0, TC::BotLeft, 0, 0, 8), VI(0, TC::BotRight, 8, 0, 8), VI(0, TC::TopLeft, 0, 0, 0), // cam(Y+,Z-) => bot-left + VI(0, TC::TopLeft, 0, 0, 0), VI(0, TC::BotRight, 8, 0, 8), VI(0, TC::TopRight, 8, 0, 0), // cam(Y+,Z-) => top-right + }; + + std::memcpy(pyramid_block_data, full_block_trilist, sizeof(full_block_trilist)); + + return pyramid_block_data; + } + + void trivial_texture_stitching() + { + // just allocate a 1x1 texture for each face + // and copy it + } + + void texture_stitching(/*&std::vector*/) + { + // just allocate a 1x1 texture for each triangular face and copy it + // greedily combine quads into big rectangles + // maybe copy texture into temp buffer (of size 8x8 before we know how big one patch it gets) + // + // return index buffer + } + + typedef struct flooding_result + { + FaceType face; // the neighboring face that is visible + bool flood_into; // wether to light floods into this neighboring component + } FloodingResult; + + // Handcrafted table with flooding results + constexpr FloodingResult flooding_table[FaceType::Count * FaceType::Count] = { + // From FaceType::None to ... + {FaceType::None, true}, // ... FaceType::None + {FaceType::Quad, false}, // ... FaceType::Quad + {FaceType::TrigBL, true}, // ... FaceType::TrigBL + {FaceType::TrigBR, true}, // ... FaceType::TrigBR + {FaceType::TrigTR, true}, // ... FaceType::TrigTR + {FaceType::TrigTL, true}, // ... FaceType::TrigTL + + // From FaceType::Quad to ... + {FaceType::None, false}, + {FaceType::None, false}, + {FaceType::None, false}, + {FaceType::None, false}, + {FaceType::None, false}, + {FaceType::None, false}, + + // From FaceType::TrigBL to ... + {FaceType::None, true}, + {FaceType::TrigTL, false}, + {FaceType::TrigBL, true}, + {FaceType::None, true}, + {FaceType::TrigTR, true}, + {FaceType::TrigTL, false}, + + // From FaceType::TrigBR to ... + {FaceType::None, true}, + {FaceType::TrigTR, false}, + {FaceType::None, true}, + {FaceType::TrigBR, true}, + {FaceType::TrigTR, false}, + {FaceType::TrigTL, true}, + + // From FaceType::TrigTR to ... + {FaceType::None, true}, + {FaceType::TrigBR, false}, + {FaceType::TrigBL, true}, + {FaceType::TrigBR, false}, + {FaceType::TrigTR, true}, + {FaceType::None, true}, + + // From FaceType::TrigTL to ... + {FaceType::None, true}, + {FaceType::TrigBL, false}, + {FaceType::TrigBL, false}, + {FaceType::TrigBR, true}, + {FaceType::None, true}, + {FaceType::TrigTL, true}, + }; + + constexpr FloodingResult flood(FaceType _from, FaceType _to) + { + return flooding_table[_from * FaceType::Count + _to]; + } + + auto CompIndex = [](const uint8_t _x, const uint8_t _y, const uint8_t _z) + { return _x + _y * 8 + _z * 8 * 8; }; + + typedef struct aggregate_face + { + // ... + // Y [16|17|...] + // ^ [8|9|10|..] + // | [0|1|2|...] + // -> X + uint32_t ids[8 * 8] = {}; // ref into block_faces + + // 0 is top left + uint32_t& at(uint_fast8_t _x, uint_fast8_t _y) + { + assert(_x < 8); + assert(_y < 8); + return ids[_x + _y * 8]; + } + + aggregate_face() : ids{} {} + } AggregateFace; + +/* // Returns true on success, false otherwise + bool flood_light(const BlockModel &_model) + { + using namespace direction_ns; + + // This runs the super awesome light flooding algorithm + // It determines all VISIBLE faces in a block + // It also fills the edge map for the next steps + + // INVARIANTS: + // - A block may consist of multiple pieces + // - Each piece MUST have at least one quad on the outside + + // Sanity check - Is there at least one component in the block? + bool model_is_empty = true; + for (int i = 0; i < 512; i++) + if (_model.component_ids[i] > 0) + { + model_is_empty = false; + break; + } + if (model_is_empty) + return false; + + + // working set + std::vector frontier; // lit_components aka light hits the inside + auto visited = emilib::HashSet(); // visited_components + + // output + std::vector visible_faces; + auto edges = emilib::HashMap>(); + + // check outside faces + // There should be at least one (viewing into PosZ direction) + Direction flood_dir = Direction::PosZ; + for (uint8_t x = 0; x < 8; x++) + { + for (uint8_t y = 0; y < 8; y++) + { + const uint8_t z = 0; + + // const Component &component = _model.components[CompIndex(x, y, z)]; + // // TODO use world::get_component_outer_face(component.model_ref, flood_dir) with applied orientation + // const ComponentModel &c_model = world::get_component_model(component.model_ref); + // const ComponentFace &face = c_model.get_outer_face(flood_dir); + + // const FloodingResult& result = flood(FaceType::None, face.type); + + // if (result.face != FaceType::None) { + // // get vertex coords + + // // transform into block face + // } + + // todo: get face verts from comp, based on actually visible face + // comp_verts = get_comp_outer_face_verts(type, direction) + + // translate into block coords + // block_verts = translate(comp_verts, x, y, z) + + // get tex coords corresponding to comp_verts + + // create blockface + + // add to visible_faces + // insert into edges + + // if (result.flood_into) { + // frontier.push_back({x, y, z + 1}); + // } + } + } + + // Generate all faces on the outside + // TODO + // iterate outside coordinates + corresponding direction + // "flood" onto components + // generate results + + // Flood into block + while (frontier.size() > 0) + { + U8Vec3 src_comp_coord = frontier.back(); + frontier.pop_back(); + + if (visited.contains(src_comp_coord)) + continue; + + visited.insert_unique(src_comp_coord); + + // get comp + uint32_t src_comp_id = _model.component_ids[CompIndex(src_comp_coord.x, src_comp_coord.y, src_comp_coord.z)]; + const Component &src_comp = world::get_component(src_comp_id); + + // Add inner faces (if any) to result + // TODO + + // flood into all surrounding directions + for (int i = 0; i < Direction::Count; i++) + { + // The direction to flood into (away from src comp, into the neighboring target comp) + Direction dir = (Direction)i; + + // Get face of src comp + const ComponentFace& src_face = src_comp.faces[negate(dir)]; + FaceType src_face_type = src_face.faceType(); + + // Get target comp + U8Vec3 target_comp_coord = src_comp_coord; + switch (dir) + { + case PosX: + target_comp_coord.x++; + break; + case NegX: + target_comp_coord.x--; + break; + case PosY: + target_comp_coord.y++; + break; + case NegY: + target_comp_coord.y--; + break; + case PosZ: + target_comp_coord.z++; + break; + case NegZ: + target_comp_coord.z--; + break; + + default: + break; + } + + // Is this a valid coordinate? + if (target_comp_coord.x < 0 || target_comp_coord.x >= 8 || + target_comp_coord.y < 0 || target_comp_coord.y >= 8 || + target_comp_coord.z < 0 || target_comp_coord.z >= 8) + continue; // There is nothing there (outside of block) + + uint32_t target_comp_id = _model.component_ids[CompIndex(target_comp_coord.x, target_comp_coord.y, target_comp_coord.z)]; + const Component &target_comp = world::get_component(target_comp_id); + const ComponentFace& target_face = target_comp.faces[dir]; + +#ifndef NDEBUG + // Sanity check + Direction expected = Direction::Count; + FaceType target_face_type = target_face.faceType(&expected); + assert(expected == dir); +#else + FaceType target_face_type = target_face.faceType(); +#endif + + FloodingResult floodResult = flood(src_face_type, target_face_type); + + if (floodResult.flood_into) + { + // TODO ensure correct visiting and generating results + + // Enqueue this target_comp into the frontier + frontier.push_back(target_comp_coord); + } + + if (floodResult.face != FaceType::None) + { + assert(floodResult.face != FaceType::Count); + + // TODO generate results + } + } + + } + + // Done + return true; + }*/ + + // Returns true on success, false otherwise + bool flood_light2(const BlockModel &_model, std::vector& block_faces, emilib::HashMap>& edge_map) + { + using namespace direction_ns; + + /* This runs the super awesome light flooding algorithm */ + // It determines all VISIBLE faces in a block + // It also fills the edge map for the next steps + + const int32_t neighbor_offsets[Direction::Count] = + { + 1, // Direction::PosX + -1, // Direction::NegX + 8, // Direction::PosY + -8, // Direction::NegY + 8 * 8, // Direction::PosZ + -8 * 8, // Direction::NegZ + }; + + const int32_t dir_mod[Direction::Count] = + { + 8, // Direction::PosX + 8, // Direction::NegX + 8 * 8, // Direction::PosY + 8 * 8, // Direction::NegY + 8 * 8 * 8, // Direction::PosZ + 8 * 8 * 8 // Direction::NegZ + }; + + // returns the component id of the neighboring component from _c into _dir + // iff it is a valid id, otherwise returns 0 (referencing the empty component) + auto getNeighborCompID = [&_model, &neighbor_offsets, &dir_mod](int32_t _c /*current comp index in block*/, Direction _dir) -> uint32_t + { + // cannot go into PosX + // if c%8+1>=8 + // cannot go into NegX + // if c%8==0 + // c%8-1<0 + // cannot go into PosY + // if c%8*8+8>=64 + // cannot go into NegY + // if c%8*8<8 + // c%8*8-8<0 + // cannot go into PosZ + // if c+8*8>=8*8*8 + // c%8*8*8+8*8>=8*8*8 + // cannot go into NegZ + // if c<8*8 + // c%8*8*8-8*8<0 + + int32_t tmp = (_c % dir_mod[_dir]) + neighbor_offsets[_dir]; + if (((_dir % 2) == 0 && tmp >= dir_mod[_dir]) || + ((_dir % 2) != 0 && tmp < 0)) + return 0; // empty component + else + return _model.component_ids[_c + neighbor_offsets[_dir]]; + }; + + bool blockIsEmpty = true; + + // iterate all components and generate faces from all directions onto itself + for (int c = 0; c < 8*8*8; c++) + { + uint32_t comp_id = _model.component_ids[c]; + if (comp_id == 0) + continue; // quick exit + blockIsEmpty = false; + + const Component& comp = world::get_component(comp_id); + + // iterate all directions + for (int d = 0; d < Direction::Count; d++) + { + Direction dir = (Direction)d; + uint32_t n_comp_id = getNeighborCompID(c, dir); + const Component& n_comp = world::get_component(n_comp_id); + + const ComponentFace& face = comp.faces[negate(dir)]; + const ComponentFace& n_face = n_comp.faces[dir]; + +#ifndef NDEBUG + Direction expected_dir; // Note, cant really check neighbor face, as it might be invalid (which is fine for flooding) + const FloodingResult floodResult = flood(n_face.faceType(), face.faceType(&expected_dir)); + assert(expected_dir == negate(dir)); +#else + const FloodingResult floodResult = flood(n_face.faceType(), face.faceType()); +#endif + + // TODO floodResult.into is irrelevant for this algorithm! + + if (floodResult.face != FaceType::None) + { + // comp coordinates in block + int8_t x = c % 8; + int8_t y = (c % (8*8)) / 8; + int8_t z = c / (8*8); + + ComponentVertices comp_verts = get_component_face_vertices(floodResult.face, negate(dir)); + + // transform into block verts + BlockVertices verts; + verts[0] = BlockFace::comp_vertex_2_block_vertex(comp_verts[0], x, y, z); + verts[1] = BlockFace::comp_vertex_2_block_vertex(comp_verts[1], x, y, z), + verts[2] = BlockFace::comp_vertex_2_block_vertex(comp_verts[2], x, y, z); + verts[3] = BlockFace::comp_vertex_2_block_vertex(comp_verts[3], x, y, z); + + // => Generate edges + edge_map[{verts[1], verts[0]}].push_back(block_faces.size()); + edge_map[{verts[2], verts[1]}].push_back(block_faces.size()); + + if (floodResult.face == FaceType::Quad /* aka bface.isQuad()*/) + { // is quad + edge_map[{verts[3], verts[2]}].push_back(block_faces.size()); + edge_map[{verts[0], verts[3]}].push_back(block_faces.size()); + } + else + { // is trig + edge_map[{verts[0], verts[2]}].push_back(block_faces.size()); + } + + // => Generate block face + const BlockFace& bface = block_faces.emplace_back( + verts, x, y, z, + face.tex_apex, + floodResult.face, // This determines if v3 is valid + face.normal, + face.texture_id); + + assert(bface.isQuad() == face.isQuad()); + assert(bface.isQuad() == (floodResult.face == FaceType::Quad)); + } + + } + + // iterate internal faces + for (int i = 6; i <= 7; i++) + { + const ComponentFace& face = comp.faces[i]; + if (!face.isValid()) + continue; // quick exit + + // comp coordinates in block + int8_t x = c % 8; + int8_t y = (c % (8 * 8)) / 8; + int8_t z = c / (8 * 8); + + BlockVertices verts; + verts[0] = BlockFace::comp_vertex_2_block_vertex(face.vertices[0], x, y, z); + verts[1] = BlockFace::comp_vertex_2_block_vertex(face.vertices[1], x, y, z), + verts[2] = BlockFace::comp_vertex_2_block_vertex(face.vertices[2], x, y, z); + verts[3] = BlockFace::comp_vertex_2_block_vertex(face.vertices[3], x, y, z); + + // => Generate edges + edge_map[{verts[1], verts[0]}].push_back(block_faces.size()); + edge_map[{verts[2], verts[1]}].push_back(block_faces.size()); + + if (face.isQuad()) + { // is quad + edge_map[{verts[3], verts[2]}].push_back(block_faces.size()); + edge_map[{verts[0], verts[3]}].push_back(block_faces.size()); + } + else + { // is trig + edge_map[{verts[0], verts[2]}].push_back(block_faces.size()); + } + + // => Generate block face + const BlockFace &bface = block_faces.emplace_back( + verts, x, y, z, + face.tex_apex, + face.faceType(), // This determines if v3 is valid + face.normal, + face.texture_id); + + assert(bface.isQuad() == face.isQuad()); + assert(bface.isQuad() == (bface.faceType == FaceType::Quad)); + } + + } + if (blockIsEmpty) + return false; // there are no components - the block is empty + + return true; + } + + // Currently does not support piece separation (should be extensible though) + void walk_surface(std::vector& block_faces, const emilib::HashMap>& _edge_map, std::vector _out_aggregate_faces) + { + /* this runs the super awesome surface walk algorithm */ + // to determine all face CONNECTIONS + + // working set + uint32_t next_block_face_index = 0; // for scanning block_faces for the next non visited face + std::vector frontier; + + for (;; next_block_face_index++) + { + // 1. find non visited face + auto find_next_block_face = [&next_block_face_index, &block_faces]() + { + for (; next_block_face_index < block_faces.size(); next_block_face_index++) + if (!block_faces[next_block_face_index].visited) + return true; + return false; + }; + + if (!find_next_block_face()) + return; // Were done + + BlockFace& b_face = block_faces[next_block_face_index]; + b_face.visited = true; + + // ! ignore faces with 3d-normals => just set visited true + // ! ignore faces using isosceles sampling => cannot stitch these easily + uint8_t dims = b_face.normal.dims(); + if (dims == 3 || b_face.tex_apex % 2 != 0) + continue; + + // 2. This face is new => create new aggregate_face + auto project_face_onto_aggregate = [](BlockFace& b_face, uint8_t b_face_dims, uint32_t b_face_index, AggregateFace& af){ + int8_t afx = 8, afy = 8; + // puzzle face into AF + // via comp coords x&y + // with mirror(W): 7-W + // Note: We have to ensure that "moving right" on the face surface + // stays "moving right" on the aggregate face, after the projection + if (b_face_dims >= 1 && b_face.normal.z < 0) + { // if normal has -Z => use x,y + afx = b_face.comp_coords[0]; + afy = b_face.comp_coords[1]; + } + else if (b_face_dims >= 1 && b_face.normal.z > 0) + { // if normal has +Z => use mirror(x),y + afx = 7 - b_face.comp_coords[0]; + afy = b_face.comp_coords[1]; + } + else if (b_face_dims >= 1 && b_face.normal.x < 0) + { // if normal == -X => use mirror(z),y + afx = 7 - b_face.comp_coords[2]; + afy = b_face.comp_coords[1]; + } + else if (b_face_dims >= 1 && b_face.normal.x > 0) + { // if normal == +X => use z,y + afx = b_face.comp_coords[2]; + afy = b_face.comp_coords[1]; + } + else if (b_face_dims == 1 && b_face.normal.y < 0) + { // if normal == -Y => use x,mirror(z) + afx = b_face.comp_coords[0]; + afy = 7 - b_face.comp_coords[2]; + } + else if (b_face_dims == 1 && b_face.normal.y > 0) + { // if normal == +Y => use x,z + afx = b_face.comp_coords[0]; + afy = b_face.comp_coords[2]; + } + else + { + assert(false && "Should not happen <.<"); + } + + af.ids[afx + afy * 8] = b_face_index; + + }; + AggregateFace& af = _out_aggregate_faces.emplace_back(); + project_face_onto_aggregate(b_face, dims, next_block_face_index, af); + + // 3. walk (same normal) neighbors and extend AG + // ask edgemap, if same normal & not visited, push into frontier + // repeat until frontier.emtpy() then go to 1. + auto check_edge = [&block_faces, &_edge_map, &b_face, &frontier](uint16_t v0, uint16_t v1) + { + auto it = _edge_map.find(FaceEdge(v0, v1)); + assert(it != _edge_map.cend()); + if (it != _edge_map.cend()) // todo this should always return true.. + for (uint32_t n_face_index : it->second) + { + BlockFace& n_face = block_faces[n_face_index]; + // Check criteria for aggregate faces + if (!n_face.visited && // must not be visited yet - duh + n_face.normal == b_face.normal // must be in the same plane + ) + { + frontier.push_back(n_face_index); + return; + } + } + }; + + // Edge#0 + check_edge(b_face.vertices[0], b_face.vertices[1]); + // Edge#1 + check_edge(b_face.vertices[1], b_face.vertices[2]); + + if (b_face.isQuad()) + { + // Edge#2 + check_edge(b_face.vertices[2], b_face.vertices[3]); + // Edge#3 + check_edge(b_face.vertices[3], b_face.vertices[0]); + } + else + { + // Edge#2 + check_edge(b_face.vertices[2], b_face.vertices[0]); + } + + // Now walk that surface! + while (!frontier.empty()) + { + uint32_t n_face_index = frontier.back(); + frontier.pop_back(); + BlockFace& n_face = block_faces[n_face_index]; + n_face.visited = true; + + // dims is still the same, as we have the same normal! + // TODO OPT this parses the dims+normaldirection again + // even though the normal is the same => just project onto af + project_face_onto_aggregate(n_face, dims, n_face_index, af); + + // Note: we do not know from which edge we came, so just check them all + // Edge#0 + check_edge(n_face.vertices[0], n_face.vertices[1]); + // Edge#1 + check_edge(n_face.vertices[1], n_face.vertices[2]); + + if (n_face.isQuad()) + { + // Edge#2 + check_edge(n_face.vertices[2], n_face.vertices[3]); + // Edge#3 + check_edge(n_face.vertices[3], n_face.vertices[0]); + } + else + { + // Edge#2 + check_edge(n_face.vertices[2], n_face.vertices[0]); + } + } + // We're done walking => get next one + } + + assert(false && "Should never happen y.y"); + } + + // - every pattern has 8*8 "face-states", each represented by 4 bits + // 8 "face-states" are packed into one uint32_t + // => 8*uint32_t + // - every pattern has an equal sized mask + // => 2*[8*uint32_t] + // - every trig-FaceType has 8 patterns, depending on size + // => 8*[2*8*uint32_t] + // - Note: quads are not matched via patterns + // - There are 4 trig-FaceType + // => 4*[8*2*8*uint32_t] + // Access by + // 1. get offset for facetype + // 2. get offset for size + // 3. get offset for mask/pattern + constexpr uint32_t patterns[4*8*2*8] = + { + // todo lol + }; + + inline uint32_t shiftPattern(const uint32_t* _pattern, uint8_t _x, uint8_t _y) + { + return _pattern[_y] >> (_x * 4); + }; + + // _size==0 means a single face + // _size==1 means a 2x2 face + // _size==2 means a 3x3 face + // etc.. + bool test_pattern_match(const uint32_t* statePattern, FaceType _type, uint32_t _x, uint32_t _y, uint32_t _size) + { + // shifting a pattern "outside" the 8x8 area is not allowed + // => a 8-wide pattern (aka _size==7) must have _x==_y==0 + // => a 7-wide pattern (aka _size==6) must have _x<=1 && _y<=1 + // => a 1-wide pattern (aka _size==0) can have _x<=7 && _y<=7 + if (_x > 7 - _size || _y > 7 - _size) + return false; + + const uint32_t faceTypeOffset = (_type - 2) * (8*2*8); + static_assert(FaceType::TrigBL == 2); // if this changes, adjust the offset calculation (aka remove the '-2') + static_assert(FaceType::TrigBR == 3); + static_assert(FaceType::TrigTR == 4); + static_assert(FaceType::TrigTL == 5); + const uint32_t sizeOffset = _size * (2*8); + const uint32_t maskOffset = 8; // aka next pattern + + const uint32_t* mask = patterns + faceTypeOffset + sizeOffset; + const uint32_t* pattern = mask + maskOffset; + + // trig-Patterns are always quadratic in size + // we must check _size+1 rows + + // TODO BENCHMARK variants + + // Variant I (mask first + quick exit) + for (int r = 0; r <= _size; r++) + { + uint32_t row = statePattern[r] & shiftPattern(mask, _x, _y); + row ^= shiftPattern(pattern, _x, _y); + // if the rows are equal the XOR results in a 0 + if (row != 0) + return false; + } + return true; + + // // Variant II (collect) + // bool res = true; + // for (int r = 0; r <= _size; r++) + // { + // uint32_t row = statePattern[r] & shiftPattern(mask, _x, _y); + // row ^= shiftPattern(pattern, _x, _y); + // // if the rows are equal the XOR results in a 0 + // res &= (row == 0); + // } + // return res; + + // // Variant III (xor first + quick exit) + // for (int r = 0; r <= _size; r++) + // { + // uint32_t row = statePattern[r] ^ shiftPattern(pattern, _x, _y); + // // if the rows are equal the XOR results in a 0 + // // if we're lucky the surrounding is empty and we have a matching row already + // if (row == 0) + // continue; + + // row &= shiftPattern(mask, _x, _y); + // if (row != 0) + // return false; + // } + + assert(false); + return true; // never reached + } + + // bit-pack a pattern row + // constexpr uint32_t PR() + // { + + // } + + // TODO rename texture_stitching + void optimize_faces(std::vector& block_faces, const std::vector& _aggregate_faces) + { + // 4. optimize_faces(AG) + // tries to find bigger faces, by combining some + // promotes one face + // sets others invalid + // must generate texture/patch and set texture_ids to promoted face + + // TODO must track generated patches on corresponding block_faces + + // TODO check in ALL PHASES, that the iteration order is y,x for better cache performance + + // TODO OPT we only care about trigs in the statePattern, we do not use the free bit in there + // => we only need 2 bits per triangle <.< + // that would allow us to pack 32 trigs into one uint64, IF we can get the shifting right + // build TrigPattern struct with comparision methods '.matches(other, atX, atY)' + + for (const AggregateFace& af : _aggregate_faces) + { + // build state pattern + uint32_t statePattern[8] = {}; + uint64_t occupationState = 0; // at beginning all are free, we mark all FaceType::None as used + for (uint8_t y = 0; y < 8; y++) + for (uint8_t x = 0; x < 8; x ++) + { + FaceType faceType = block_faces[af.ids[x + y * 7]].faceType; + static_assert(FaceType::None == 0); // if this changes, need to adjust the bitpacking! + // y gives us the row + // x shifts the facetype to the correct position + // free bit is left ==0 + statePattern[y] |= (((uint32_t)faceType) >> (x * 4)); + + occupationState |= (faceType == FaceType::None) >> (x + y * 8); + } + + struct TrigProxy + { + uint8_t x; // [0;7] + uint8_t y; // [0;7] + FaceType faceType; // TODO these are only trigs so 4-bits... + // TODO bit packing? [ty|yyy|xxx] + // just add bit-width to types ;) + // try padding also + }; + const int TrigProxySize = sizeof(TrigProxy); + + // TODO put somewhere global + struct Bins { + // Note: There is a maximum amount of how many times we can detect a trig of some size + // Of size 7 there can only be a single trig + // Of size 6 there can be a max of 2 trig detections + // Of size 5 there can be a max of 3 trig detections + // Of size 4 there can be a max of 4 trig detections + // Of size 3 there can be a max of 7 trig detections + // Of size 2 there can be a max of 12 trig detections + // Of size 1 there can be a max of 25 trig detections + // Of size 0 there can be a max of 64 trig detections + const uint8_t startOffsets[8] = {0, 64, 89, 101, 108, 112, 115, 117}; // which size starts where + uint8_t nextFreeIndex[8] = {}; // one per size, relative to start offset + TrigProxy bins[118] = {}; + + // Careful to access only valid indices - There are no bound checks! + TrigProxy& at(uint8_t _size, uint8_t _index) + { + assert((startOffsets[_size] + _index) < startOffsets[_size+1]); + return bins[startOffsets[_size] + _index]; + } + + // Careful to push only valid amounts per _size - There are no bound checks! + void emplace_back(uint8_t _size, uint8_t _x, uint8_t _y, FaceType _faceType) + { + assert((_size == 7 && nextFreeIndex[_size] == 0) || (nextFreeIndex[_size] < (startOffsets[_size+1]-startOffsets[_size+1]))); + TrigProxy& trig = at(_size, nextFreeIndex[_size]++); + trig.x = _x; + trig.y = _y; + trig.faceType = _faceType; + } + + // Returns how many entries there are for _size + uint8_t count(uint8_t _size) + { + return nextFreeIndex[_size]; + } + + void clear() + { + bx::memSet(nextFreeIndex, 0x0, 8 * sizeof(uint8_t)); + bx::memSet(bins, 0x0, 92 * sizeof(TrigProxy)); + } + }; + const int BinsSize = sizeof(Bins); + + Bins bins; + + // Find ALL triangles (represented by biggest of the family) + // each is uniquely identified by x,y,type,size + // TODO OPT try to restrict inner loops by max possible size (based on x,y) + // TODO OPT try to replace switch with generic offset array i.e. TrigBL: x=x-size y=y + for (uint8_t y = 0; y < 8; y++) + for (uint8_t x = 0; x < 8; x ++) + { + FaceType faceType = (FaceType)((statePattern[y] >> ((7 - x) * 4)) & 0b1111); + uint8_t biggestX = 0; + uint8_t biggestSize = 0; + + switch (faceType) + { + case FaceType::TrigBL : + { + biggestX = x; + biggestSize = 0; + + for (uint8_t size = 1; size < 8; size++) + { + if (!test_pattern_match(statePattern, FaceType::TrigBL, x-size, y, size)) + break; + biggestX = x-size; + biggestSize = size; + } + + bins.emplace_back(biggestSize, biggestX, y, FaceType::TrigBL); + } + break; + case FaceType::TrigBR : + { + biggestSize = 0; + + for (uint8_t size = 1; size < 8; size++) + { + if (!test_pattern_match(statePattern, FaceType::TrigBR, x, y, size)) + break; + biggestSize = size; + } + + bins.emplace_back(biggestSize, x, y, FaceType::TrigBR); + } + break; + case FaceType::TrigTL : + { + biggestSize = 0; + + for (uint8_t size = 1; size < 8; size++) + { + if (!test_pattern_match(statePattern, FaceType::TrigTL, x, y, size)) + break; + biggestSize = size; + } + + bins.emplace_back(biggestSize, x, y, FaceType::TrigTL); + } + break; + case FaceType::TrigTR : + { + biggestX = x; + biggestSize = 0; + + for (uint8_t size = 1; size < 8; size++) + { + if (!test_pattern_match(statePattern, FaceType::TrigTR, x-size, y, size)) + break; + biggestX = x-size; + biggestSize = size; + } + + bins.emplace_back(biggestSize, biggestX, y, FaceType::TrigTR); + } + break; + + case FaceType::None : + case FaceType::Quad : + default: + // Don't care about None or Quads + break; + } + } + + + // 4 trig-faceTypes + // 8 sizes each then shift it + uint64_t occupationMasks[4*8] = + { + // TODO + }; + + auto getOccMask = [&occupationMasks](FaceType _faceType, uint8_t _trigSize) -> uint64_t + { + return occupationMasks[((_faceType - 2) * 8) + _trigSize]; + }; + + // Found one! Now merge it! + auto stitchTriangle = [&block_faces, &occupationState, &getOccMask](const TrigProxy& _trig, uint8_t _trigSize) + { + // TODO stitch trigs together + // create temp texture array & load into atlas + // add patch to block + // mark other block_trigs as invalid + + switch (_trig.faceType) + { + case FaceType::TrigBL : + { + + } + break; + case FaceType::TrigBR : + { + + } + break; + case FaceType::TrigTL : + { + + } + break; + case FaceType::TrigTR : + { + + } + break; + + case FaceType::None : + case FaceType::Quad : + default: + assert(false); // should not happen y.y + break; + } + + // mark as used in occupationState + occupationState |= getOccMask(_trig.faceType, _trigSize); + }; + + // Priority: + // We prefer Double-Trigs over Single-Trigs of SAME SIZE + // Also, Double-Trigs only exist with size <=3 + // => ST7>ST6>ST5>ST4>DT3>ST3>DT2>ST2>DT1>ST1>DT0>ST0 + for (uint8_t size = 7; size >= 4; size--) + { + // the first trig we pick is guaranteed to be free + // Also, if we find one of such size, the smaller one here cannot be free anymore + if (bins.count(size) > 0) + { + const TrigProxy& trig = bins.at(size, 0); // We just choose the index 0 + // pretty much optimal as its on the edge of the af and leave space for other big trigs + // also there just might not be one at index 1... just pick index 0 :) + stitchTriangle(trig, size); + break; // the smaller one cannot be free anymore! + } + } + + // Find Double-Trigs + // They have the greatest potential as they combine 2 trigs and collapse to only one + // Iterate bins and search for ALL double-trigs (biggest->smallest) + // We prefer Double-Trigs over Single-Trigs of SAME SIZE + for (uint8_t size = 3; size >= 0; size--) + { + const uint8_t count = bins.count(size); + + // Test for Double-trigs + for (uint8_t i = 0; i < count; i++) + { + const TrigProxy& trig = bins.at(size, i); + + switch (trig.faceType) + { + case FaceType::TrigBL : + { + // still free? + uint64_t occMask = occupationMasks[(FaceType::TrigBL-2) + size]; + uint64_t stencil = occupationState & occMask; + if (stencil == 0) + { // is free + // check if double trig + // then pick it + + } + } + break; + case FaceType::TrigBR : + { + + } + break; + case FaceType::TrigTL : + { + + } + break; + case FaceType::TrigTR : + { + + } + break; + + case FaceType::None : + case FaceType::Quad : + default: + assert(false); // There should only be trigs in those bins <.< + break; + } + + } + + // Test for Single-trigs + for (uint8_t i = 0; i < count; i++) + { + const TrigProxy& trig = bins.at(size, i); + switch (trig.faceType) + { + case FaceType::TrigBL : + { + // still free? + uint64_t occMask = occupationMasks[(FaceType::TrigBL-2) + size]; + uint64_t stencil = occupationState & occMask; + if (stencil == 0) + { // is free + // pick it + } + else if (stencil == occMask) + { // entire trig area is already used + // do nothing with this one => just continue + } + else + { // area is partially used + // add next smaller child + + } + + } + break; + case FaceType::TrigBR : + { + + } + break; + case FaceType::TrigTL : + { + + } + break; + case FaceType::TrigTR : + { + + } + break; + + case FaceType::None : + case FaceType::Quad : + default: + assert(false); // There should only be trigs in those bins <.< + break; + } + + } + } + + // Merge quads + // There are no more trigs left! Lets merge quads + // use occupation state. Any still free bits, are quads! + + +////////////////////////////////////////////////////////////////////// + // Pass 1: Scan for double-trigs + // They have the greatest potential as they combine 2 trigs and collapse to only one + + // 1.1 scan for double-trigs-apex + // every trig-FaceType represents a double_trig type + // (pairing chosen by slightly better memory access pattern - maybe :/) + + // > mark pattern as used by + // |= mask & 0b1000100010001000 + + + + for (uint8_t x = 0; x < 8; x ++) + for (uint8_t y = 0; y < 8; y++) + { + // TODO query statePattern for the face at this location ? + FaceType faceType = block_faces[af.ids[x + y * 7]].faceType; + switch (faceType) + { + case FaceType::None: // fallthrough + case FaceType::Quad: + { // Ignored + } + break; + case FaceType::TrigBL: + { // BL -> BL+BR + + for (uint32_t size = 0; size < 8-bx::max(x,y); size++) + { + + } + // this trig still free? + test_pattern_match(statePattern, FaceType::TrigBL, x, y, 0); + + // how about its partner? + if (x-1 >= 0) + { + test_pattern_match(statePattern, FaceType::TrigBR, x-1, y, 0); + + } + } + break; + case FaceType::TrigBR: + { // BR -> BR+TR + } + break; + case FaceType::TrigTL: + { // TR -> TR+TL + } + break; + case FaceType::TrigTR: + { // TL -> TL+BL + } + break; + default: + assert(false); + break; + } + } + + + + // Pass 2: Scan for (single-)trigs + // They use up some quads and collapse to one trig, where the individual trigs would not be able to grow bigger anyway + + // Pass 3: Scan for quads + // There are no more trigs left! Only quads + // Try to merge as big as possible + + + + + } + } + + void generate_vertices(BlockModel& _model, std::vector& block_faces) + { + // 5. finally! generate vertex buffer + std::vector buffer; + // TODO OPT we could count all valid faces (and their types) + // then we know we will generate exactly 3* as many vertices + // Then we can reserve and write directly into the gfx buffer + // NO! Just reuse the buffer and its fine :D + + for (BlockFace& b_face : block_faces) + { + if (!b_face.valid) + continue; + + // Texture already available? + if (b_face.texture_ids[0] == 0) + { // no, so this face is a single face => create patch + uint32_t patch_id = gfx::add_patch(1, 1, &b_face.comp_texture_id); + _model.patch_refs.push_back(patch_id); + gfx::get_patch_texture_ids(patch_id, b_face.texture_ids); + } + + for (uint_fast8_t i = 0; i < TC::Count; i++) + assert(b_face.texture_ids[i] != 0); + + // TexCorner aka tex_apex determines the texture rotation on the face + TC apex = b_face.tex_apex; // vertices[1] corresponds to this tex-corner/tex-side + if (apex % 2 != 0) + { + apex = (TC)((apex+TC::Count - 1) % TC::Count); + logWarn("45degree sampling is not (yet?) supported! => reverting the face to normal (90degree) sampling."); + } + + // b_face.vertices[1] is the triangle apex + if (b_face.isQuad()) + { + buffer.push_back(VI(b_face.texture_ids[rotL(apex)], rotL(apex), b_face.vertices[0])); + buffer.push_back(VI(b_face.texture_ids[apex], apex, b_face.vertices[1])); + buffer.push_back(VI(b_face.texture_ids[rotR(apex)], rotR(apex), b_face.vertices[2])); + + apex = rotL(rotL(apex)); // rotate 180 degrees + buffer.push_back(VI(b_face.texture_ids[rotL(apex)], rotL(apex), b_face.vertices[2])); + buffer.push_back(VI(b_face.texture_ids[apex], apex, b_face.vertices[3])); + buffer.push_back(VI(b_face.texture_ids[rotR(apex)], rotR(apex), b_face.vertices[0])); + + // buffer.push_back(VI(b_face.texture_ids[0], rotL(TC::BotLeft), b_face.vertices[0])); + // buffer.push_back(VI(b_face.texture_ids[1], TC::BotLeft, b_face.vertices[1])); + // buffer.push_back(VI(b_face.texture_ids[2], rotR(TC::BotLeft), b_face.vertices[2])); + + // buffer.push_back(VI(b_face.texture_ids[2], rotL(TC::TopRight), b_face.vertices[2])); + // buffer.push_back(VI(b_face.texture_ids[3], TC::TopRight, b_face.vertices[3])); + // buffer.push_back(VI(b_face.texture_ids[0], rotR(TC::TopRight), b_face.vertices[0])); + } + else // is trig + { + buffer.push_back(VI(b_face.texture_ids[rotL(apex)], rotL(apex), b_face.vertices[0])); + buffer.push_back(VI(b_face.texture_ids[apex], apex, b_face.vertices[1])); + buffer.push_back(VI(b_face.texture_ids[rotR(apex)], rotR(apex), b_face.vertices[2])); + } + + + } + + _model.gfx_ref = gfx::add_block_model(buffer.data(), buffer.size()); + + // TODO at end: is resulting block valid? in regard to gameplay rules.. + } + + bool weld_block(BlockModel &_model) + { + // intermediate working set // TODO reuse memory by placing it somewhere global + std::vector block_faces; + auto edge_map = emilib::HashMap>(); + std::vector aggregate_faces; + + if (!factory::flood_light2(_model, block_faces, edge_map)) + return false; + + walk_surface(block_faces, edge_map, aggregate_faces); + + optimize_faces(block_faces, aggregate_faces); + + generate_vertices(_model, block_faces); + + return true; + } + +} diff --git a/src/factory.h b/src/factory.h new file mode 100644 index 0000000..da67f90 --- /dev/null +++ b/src/factory.h @@ -0,0 +1,27 @@ +#pragma once + +#include "space_math.h" +#include "graphics.h" +#include "world.h" + +namespace factory +{ + // Run the amazing block welding algorithm + // Returns true on success, false otherwise + bool weld_block(BlockModel& _model); + + // Returns pointer to generated data + // Data array is always 24 elements long + // Attention: Data ist only valid until next call + GPULineVertex *generate_AABB_outline(const AABB &_aabb); + + // Returns pointer to generated data + // Attention: Data ist only valid until next call + uint32_t *generate_debug_full_block_1_model(); // 36 elements + uint32_t *generate_debug_full_block_2_model(); // 36 elements + uint32_t *generate_debug_full_block_3_model(); // 36 elements + uint32_t *generate_debug_pyramid_block_model(); // 18 elements + + void generate_block_outline(); + +} diff --git a/src/gameloop.cpp b/src/gameloop.cpp new file mode 100644 index 0000000..7f0e2ed --- /dev/null +++ b/src/gameloop.cpp @@ -0,0 +1,448 @@ +#include "gameloop.h" +#include +#include + +#include "graphics.h" +#include "world.h" +#include "renderer.h" +#include "util.h" +#include "factory.h" +#include "lib/camera.h" +#include "space_input.h" +#include "config.h" + +std::atomic_bool game_shutdown_requested = false; +// std::atomic_flag frame_ready_flag = ATOMIC_FLAG_INIT; // frame was rendered - ready for new data +// std::atomic_flag input_ready_flag = ATOMIC_FLAG_INIT; // input was updated - new states can be read from the input system +// std::atomic_flag bgfx_ready_flag = ATOMIC_FLAG_INIT; // bgfx init is done +std::atomic_bool frame_ready_flag; // frame was rendered - ready for new data +std::atomic_bool input_ready_flag; // input was updated - new states can be read from the input system +std::atomic_bool bgfx_ready_flag; // bgfx init is done +SlotQueue event_queue(4); +uint64_t frameCounter = 0; +uint32_t resetFlags = BGFX_RESET_VSYNC; +Camera camera; +extern uint32_t window_width; // defined in main.cpp +extern uint32_t window_height; // we only read the value here + +void init(bgfx::PlatformData *_platformData) +{ + // Initialize bgfx using the native window handle + bgfx::Init init; + init.type = bgfx::RendererType::Vulkan; + init.platformData = *_platformData; + init.resolution.width = window_width; + init.resolution.height = window_height; + init.resolution.reset = resetFlags; + if (!bgfx::init(init)) + DIE("Could not initialize bgfx\n"); + + // Signal main thread that initialization is done + // bgfx_ready_flag.test_and_set(); + bgfx_ready_flag = true; + + bgfx::setViewClear(0, BGFX_CLEAR_COLOR | BGFX_CLEAR_DEPTH, 0x443355FF); + bgfx::setViewRect(0, 0, 0, bgfx::BackbufferRatio::Equal); + + // Render initial frame#0 + flag_wait(&input_ready_flag); + bgfx::touch(0); + bgfx::frame(); + + // Perform startup checks and abort if necessary + startup_checks(); + + // Initialize systems + gfx::init(); + world::init(); + + // Camera + camera = camera_init(); + camera.target_position = {0, 1, -10}; + camera.mode = CAMERA_MODE_FIRST_PERSON; + camera.minPitch = -bx::kPiHalf; + camera.maxPitch = bx::kPiHalf; +} + +void debug_populate_world() +{ + // Add some debug things to the world + + // add models + // BlockModel (Single Debug Component) + uint32_t m0, m1, m2, m3; + + m0 = world::add_block_model(); + uint32_t comps[512] = {}; + comps[0] = 24; // debug component cube + world::set_block_model_components(m0, comps); + + m1 = world::add_block_model(); + bx::memSet(comps, 0x0, 512*sizeof(int32_t)); + comps[1] = 24; // debug component cube + comps[2] = 24; // debug component cube + world::set_block_model_components(m1, comps); + + // TODO add different models + m2 = m3 = m0; + + // add grids + uint32_t g0, g1; + g0 = world::add_grid(Vec3(0.0f), Quat::unit()); + // Vec3 g1_pos = Vec3(-5.0f, -5.0f, -5.0f); + Vec3 g1_pos = Vec3(0.0f, -0.5f, -7.0f); + // g1 = world::add_grid(g1_pos, bx::fromEuler(Vec3(bx::kPiQuarter))); + g1 = world::add_grid(g1_pos, Quat::unit()); + + // add chunks + uint32_t c0, c1, c2, c3; + c0 = world::add_chunk(g0, 0, 0, 0); + c1 = world::add_chunk(g0, 1, 0, 0); + c2 = world::add_chunk(g1, 0, 0, 0); + c3 = world::add_chunk(g0, 1, 1, 0); + + // add a few blocks + uint32_t b0, b1, b2, b3, b4; + b0 = world::add_block(c0, block_transform_pack(7, 2, 1, 0), m0, m0); + b1 = world::add_block(c1, block_transform_pack(0, 2, 1, 0), m1, m1); + b2 = world::add_block(c2, block_transform_pack(0, 0, 0, 0), m1, m1); + b3 = world::add_block(c2, block_transform_pack(1, 0, 0, 0), m2, m2); + + b4 = world::add_block(c0, block_transform_pack(0, 0, 0, 0), m2, m2); + b4 = world::add_block(c0, block_transform_pack(0, 2, 0, 0), m1, m1); + b4 = world::add_block(c0, block_transform_pack(0, 4, 0, 0), m1, m1); + b4 = world::add_block(c0, block_transform_pack(0, 6, 0, 0), m1, m1); + b4 = world::add_block(c0, block_transform_pack(0, 7, 0, 0), m2, m2); + b4 = world::add_block(c0, block_transform_pack(0, 0, 7, 0), m2, m2); + b4 = world::add_block(c0, block_transform_pack(7, 0, 0, 0), m2, m3); + + // setup view into world + camera_look_at(&camera, bx::sub(Vec3(0.0f), camera_eye(&camera)), Vec3(0.0f, 1.0f, 0.0f)); +} + +bool debug_animate = true; +void debug_animate_world() +{ + if (debug_animate) + { + const float increment = bx::toRad(0.2f); + + for (uint32_t grid_id : world::get_grid_list()) + { + if (grid_id == 2) + { + const Grid &grid = world::get_grid(grid_id); + Vec3 pos = grid.position; + pos.x = pos.x * bx::cos(increment) - pos.z * bx::sin(increment); + pos.z = pos.x * bx::sin(increment) + pos.z * bx::cos(increment); + world::update_grid_position(grid_id, pos); + return; + } + } + + debug_animate = false; + } +} + +void handle_event_queue() +{ + // Handle main thread events + for (Event *ev = event_queue.begin_pop(); ev != NULL; event_queue.end_pop(), ev = event_queue.begin_pop()) + { + switch (ev->type) + { + case Resize: + bgfx::reset(window_width, window_height, resetFlags); + bgfx::setViewRect(0, 0, 0, bgfx::BackbufferRatio::Equal); + break; + + default: + logWarn("main: Encountered unknown event type => ignoring\n"); + break; + } + } +} + +int32_t runGameloopThread(bx::Thread *_thread_self, void *_platformData) +{ + using input::Axis; + using input::Button; + + init((bgfx::PlatformData *)_platformData); + + debug_populate_world(); + + uint16_t num_block_selection = 0; + + while (!game_shutdown_requested) + { + + flag_wait(&input_ready_flag); // wait for input updates to be written to input system + + // input::detect_input(); + + camera_move(&camera, Vec3( + input::axis_value_is(Axis::MoveForward), + input::axis_value_is(Axis::MoveUp), + input::axis_value_is(Axis::MoveRight))); + + camera_rotate(&camera, Vec3( + input::axis_value_is(Axis::CameraPitch), + input::axis_value_is(Axis::CameraYaw), + 0.0f)); + + if (input::button_pressed(Button::LookAtOrigin)) + { + const Vec3 dir = cm_normalizeVec3(cm_negate(camera_eye(&camera))); + camera_look_at(&camera, dir, CAMERA_WORLD_UP); + } + + game_shutdown_requested = input::button_released(Button::GameShouldExit); + + // At this point we want to make sure that the frame actually got rendered, before we update world data. + // So we wait for the rendering thread to signal this. + // TODO use bgfx::update(, releaseFunction) to determine when updating of individual buffer is allowed + // instead of this global flag. Maybe even allow per buffer granularity. + flag_wait(&frame_ready_flag); // wait for last frame to be rendered before preparing new one (aka writing to buffers) + + // logTrace(mfc, "API-THREAD: frame #%d\n", frameCounter); + + // Handle inputs etc. + handle_event_queue(); + + bgfx::dbgTextClear(); + + // This dummy draw call is here to make sure that view 0 is cleared if no other draw calls are submitted to view 0. + bgfx::touch(0); + + // Enable stats or debug text. + bgfx::setDebug(input::button_is_down(Button::ShowStats) ? BGFX_DEBUG_STATS : BGFX_DEBUG_TEXT); + + float view[16]; + camera_view_matrix(&camera, view); + + float proj[16]; + bx::mtxProj(proj, 60.0f, float(window_width) / float(window_height), 0.1f, 100.0f, bgfx::getCaps()->homogeneousDepth); + bgfx::setViewTransform(0, view, proj); + + debug_animate_world(); // Move things around + + // Update Game State + world::update(); + num_block_selection = gfx::get_num_block_selection(); // Note: gfx::update() clears this to 0 + gfx::update(); // upload new data onto GPU + + // generate block instance matrices + renderer::dispatch_cubes_cs(camera_eye(&camera), num_block_selection); + // draw the prepared indirect buffer + renderer::draw_cubes(num_block_selection); + + // Block picking + Vec4 red(1.0, 0.0, 0.0, 1.0); + Vec4 green(0.0, 1.0, 0.0, 1.0); + + Ray ray = { + .position = camera_eye(&camera), + .direction = camera_forward(&camera)}; + // InvRay inv_ray = InverseRay(ray); + + float grid_mtx[16]; + float chunk_mtx[16]; + float block_mtx[16]; + float inverse_mtx[16]; + + // iterate all grids + // TODO optimize with manual ray position offset instead of inv_matrices + // maybe even sorted testing? (a grid further away than the closest block is irrelevant?) + // maybe algorithmic solution? walk ray through 3D space and query specific chunks/blocks? + const std::vector &grids = world::get_grid_list(); + float grid_distance = FLT_MAX, chunk_distance = FLT_MAX, block_distance = FLT_MAX; + float closest_grid_distance = FLT_MAX, closest_chunk_distance = FLT_MAX, closest_block_distance = FLT_MAX; + uint32_t closest_grid_id = 0, closest_chunk_id = 0, closest_block_id = 0; + for (uint32_t grid_id : grids) + { + const Grid &grid = world::get_grid(grid_id); + world::get_grid_transform_mtx(grid_id, grid_mtx); + bx::mtxInverse(inverse_mtx, grid_mtx); + if (CheckCollisionRayOrientedBox(ray, grid.aabb, inverse_mtx, grid_distance)) + { // hit grid aabb => check its chunks + + if (closest_chunk_id == 0 && grid_distance < closest_grid_distance) + { + closest_grid_distance = grid_distance; + closest_grid_id = grid_id; + } + + for (uint32_t chunk_id : grid.chunk_id_list) + { + world::get_chunk_transform_mtx(chunk_id, chunk_mtx); + bx::mtxInverse(inverse_mtx, chunk_mtx); + if (CheckCollisionRayOrientedBox(ray, CHUNK_AABB, inverse_mtx, chunk_distance)) + { // hit chunk aabb => check its blocks + + if (closest_block_id == 0 && chunk_distance < closest_chunk_distance) + { + closest_grid_distance = grid_distance; + closest_grid_id = grid_id; + closest_chunk_distance = chunk_distance; + closest_chunk_id = chunk_id; + } + + const Chunk &chunk = world::get_chunk(chunk_id); + for (uint32_t block_id : chunk.block_ids) + { + world::get_block_transform_mtx(block_id, block_mtx); + bx::mtxInverse(inverse_mtx, block_mtx); + if (CheckCollisionRayOrientedBox(ray, BLOCK_AABB, inverse_mtx, block_distance) && block_distance < closest_block_distance) + { // hit block aabb + closest_grid_distance = grid_distance; + closest_grid_id = grid_id; + closest_chunk_distance = chunk_distance; + closest_chunk_id = chunk_id; + closest_block_id = block_id; + closest_block_distance = block_distance; + + // TODO perform geometry collision check aka ray-triangle-checks + } + } + } + } + } + } + + // Draw outlines + info text + Vec3 eye = camera_eye(&camera); + Vec3 forward = camera_forward(&camera); + Vec3 point; + if (closest_grid_id > 0) + { + const Vec4 grid_color = Vec4(114.0f / 255.0f, 159.0f / 255.0f, 207.0f / 255.0f, 1.0f); + world::get_grid_transform_mtx(closest_grid_id, grid_mtx); + renderer::draw_lines(gfx::get_aabb_outline(world::get_grid(closest_grid_id).aabb), grid_color, grid_mtx); + + point = bx::add(eye, bx::mul(forward, closest_grid_distance)); + bgfx::dbgTextPrintf(0, 0, 0x09, "Grid @ %5.2f, %5.2f, %5.2f dist=%5.2f", point.x, point.y, point.z, closest_grid_distance); + } + + if (closest_chunk_id > 0) + { + const Vec4 chunk_color = Vec4(52.0f / 255.0f, 226.0f / 255.0f, 226.0f / 255.0f, 1.0f); + world::get_chunk_transform_mtx(closest_chunk_id, chunk_mtx); + renderer::draw_lines(gfx::get_chunk_aabb_outline(), chunk_color, chunk_mtx); + + point = bx::add(eye, bx::mul(forward, closest_chunk_distance)); + bgfx::dbgTextPrintf(0, 1, 0x0B, "Chunk @ %5.2f, %5.2f, %5.2f dist=%5.2f", point.x, point.y, point.z, closest_chunk_distance); + } + + if (closest_block_id > 0) + { + const Vec4 block_color = Vec4(196.0f / 255.0f, 160.0f / 255.0f, 0.0f, 1.0f); + world::get_block_transform_mtx(closest_block_id, block_mtx); + renderer::draw_lines(gfx::get_block_aabb_outline(), block_color, block_mtx); + + point = bx::add(eye, bx::mul(forward, closest_block_distance)); + bgfx::dbgTextPrintf(0, 2, 0x06, "Block @ %5.2f, %5.2f, %5.2f dist=%5.2f", point.x, point.y, point.z, closest_block_distance); + } + + // Rotate Block + if (closest_block_id > 0 && input::button_pressed(Button::RotateBlockInc)) + { + world::rotate_block(closest_block_id, true); + } + else if (closest_block_id > 0 && input::button_pressed(Button::RotateBlockDec)) + { + world::rotate_block(closest_block_id, false); + } + + // Remove Block + else if (closest_block_id > 0 && input::button_pressed(Button::RemoveBlock)) + { // there is a block in sight + world::remove_block(closest_block_id); // TODO maybe only stop from being selected for rendering and wait for server ack/refuse + // maybe remove from the world, but keep around? PS do not decrement model counter -> gpu unload? world puts it on sideline(just slotbuffer thing with linear search) + // Note: We do not remove chunks or grids. Not even when empty. The server will tell us when to. + } + + // Place Block + else if (closest_block_id > 0) + { + // Calc new block direction + Vec3 hitpoint = bx::add(eye, bx::mul(forward, closest_block_distance)); + Vec3 block_center = world::get_block_world_position(closest_block_id); + + Vec3 dir = bx::sub(hitpoint, block_center); + dir = bx::mul(dir, 2.001f); // extend 0.5 coordinates to <1.0 + int8_t x = (int8_t)dir.x; // These are now either -1, 0 or 1 + int8_t y = (int8_t)dir.y; // We only allow one direction through + int8_t z = (int8_t)dir.z; + assert(x == -1 || x == 0 || x == 1); + assert(y == -1 || y == 0 || y == 1); + assert(z == -1 || z == 0 || z == 1); + if (x != 0) + { + y = z = 0; + } + if (y != 0) + { + x = z = 0; + } + if (z != 0) + { + x = y = 0; + } + + // Get selected block and chunk coordinates in chunk + const Block &block = world::get_block(closest_block_id); + const Chunk &chunk = world::get_chunk(block.parent_ref); + + uint16_t block_offset_x, block_offset_y, block_offset_z, block_orientation; + block_transform_unpack(block.transform, block_offset_x, block_offset_y, block_offset_z, block_orientation); + + uint16_t chunk_offset_x = chunk.parent_offset_x << 3; // <<3 == *8 + uint16_t chunk_offset_y = chunk.parent_offset_y << 3; + uint16_t chunk_offset_z = chunk.parent_offset_z << 3; + + // Get new block offset + block_offset_x += x; + block_offset_y += y; + block_offset_z += z; + + if (block_offset_x >= 0 && block_offset_x < 8 && + block_offset_y >= 0 && block_offset_y < 8 && + block_offset_z >= 0 && block_offset_z < 8) + { // new block is still in same chunk + + if (input::button_pressed(Button::PlaceBlock)) + { // place it + uint16_t new_block_transform = block_transform_pack(block_offset_x, block_offset_y, block_offset_z, 0); + if (chunk.block_ids[block_transform_to_chunk_index(new_block_transform)] == 0) + { // no block yet + world::add_block(closest_chunk_id, block_transform_pack(block_offset_x, block_offset_y, block_offset_z, 0), 1, 1); // TODO do not hardcode block model + } + } + else + { // render outline + const Vec4 block_color = Vec4(0.0f / 255.0f, 0.0f / 255.0f, 0.0f / 255.0f, 1.0f); + world::get_block_transform_mtx(block_offset_x, block_offset_y, block_offset_z, 0, block.parent_ref, block_mtx); + renderer::draw_lines(gfx::get_block_aabb_outline(), block_color, block_mtx); + } + } // TODO new block in neighboring chunk + } + + // Crosshair + renderer::draw_crosshair(&camera); + + // Advance to next frame. Main thread will be kicked to process submitted rendering primitives. + bgfx::frame(); + + frameCounter++; + + if (config::DEBUG_EXIT_AFTER_FRAME > 0 && frameCounter > config::DEBUG_EXIT_AFTER_FRAME) + game_shutdown_requested = true; + } + + // Release resources + gfx::destroy(); + + bgfx::shutdown(); + return 0; +} diff --git a/src/gameloop.h b/src/gameloop.h new file mode 100644 index 0000000..1bd04b5 --- /dev/null +++ b/src/gameloop.h @@ -0,0 +1,73 @@ +#pragma once + +#include +#include +#include +#include +#include "data/slot_queue.h" + +typedef enum event_type +{ + Exit, // Application should shutdown + Key, // Keyboard input + MouseButton, // Mouse buttons + MouseCursor, // Mouse movement input + MouseScroll, // Mouse scrolling + Resize, // Window resized + Joystick, // Joystick connected/disconnected +} EventType; + +typedef struct event +{ + EventType type; + + // struct exit_event {}; + // struct resize_event {}; + struct key_event + { + int keycode; + int action; + }; + struct mouse_button_event + { + int button; + int action; + }; + struct cursor_event + { + float xpos; + float ypos; + }; + struct scroll_event + { + float xoffset; + float yoffset; + }; + struct joystick_event + { + int id; + int event; + }; + + union // purposeful anonymous union, as struct are named + { + struct key_event key; + struct mouse_button_event mouse_button; + struct cursor_event cursor; + struct scroll_event scroll; + struct joystick_event joystick; + }; +} Event; +constexpr size_t event_size = sizeof(Event); + +extern std::atomic_bool game_shutdown_requested; +// extern std::atomic_flag frame_ready_flag; // frame was rendered - ready for new data +// extern std::atomic_flag input_ready_flag; // input for next frame is ready +// extern std::atomic_flag bgfx_ready_flag; // bgfx init is done +extern std::atomic_bool frame_ready_flag; // frame was rendered - ready for new data +extern std::atomic_bool input_ready_flag; // input for next frame is ready +extern std::atomic_bool bgfx_ready_flag; // bgfx init is done +extern SlotQueue event_queue; +extern uint64_t frameCounter; + +int32_t runGameloopThread(bx::Thread *_thread_self, void *_platformData /*userData*/); diff --git a/src/graphics.cpp b/src/graphics.cpp new file mode 100644 index 0000000..91d40a8 --- /dev/null +++ b/src/graphics.cpp @@ -0,0 +1,453 @@ +#include "graphics.h" +#include +#include +#include "data/slot_buffer.h" +#include "data/first_fit_buffer.h" +#include "data/linear_buffer.h" +#include "config.h" +#include +#include "factory.h" +#include "world.h" +#include "data/texture_atlas.h" + +// Static variables +bgfx::VertexLayout GPUGrid::layout; +bgfx::VertexLayout GPUChunk::layout; +bgfx::VertexLayout GPUBlock::layout; +bgfx::VertexLayout GPUBlockSelection::layout; +bgfx::VertexLayout GPURenderInstance::layout; +bgfx::VertexLayout GPUDummyVertex::layout; +bgfx::VertexLayout GPULineVertex::layout; + +namespace gfx +{ + + /* Graphics data */ + // grids + SlotBuffer cpu_grids(config::INITIAL_NUM_GRIDS); + bgfx::DynamicVertexBufferHandle gpu_grids = BGFX_INVALID_HANDLE; + + // chunks + SlotBuffer cpu_chunks(config::INITIAL_NUM_CHUNKS); + bgfx::DynamicVertexBufferHandle gpu_chunks = BGFX_INVALID_HANDLE; + + // blocks (pre culling) + SlotBuffer cpu_blocks(config::INITIAL_NUM_BLOCKS); + bgfx::DynamicVertexBufferHandle gpu_blocks = BGFX_INVALID_HANDLE; + + // block selection (post culling) + LinearBuffer cpu_block_selection(config::INITIAL_BLOCK_SELECTION); // TODO should be uint32_t => use bgfx packing/conversion + bgfx::DynamicVertexBufferHandle gpu_block_selection = BGFX_INVALID_HANDLE; + + // block models (index buffer with block render data) + FirstFitBuffer cpu_block_models(config::INITIAL_BLOCK_MODEL_BUFFER_SIZE); + bgfx::DynamicIndexBufferHandle gpu_block_models = BGFX_INVALID_HANDLE; + + // dummy vertex buffer + bgfx::VertexBufferHandle gpu_dummy_vertex_buffer = BGFX_INVALID_HANDLE; + + // the indirect buffer holds the indirect draw calls generated in the compute shader + bgfx::IndirectBufferHandle gpu_indirect_buffer = BGFX_INVALID_HANDLE; + + // per instance data (matrices) generated by compute shader + bgfx::DynamicVertexBufferHandle gpu_instance_buffer = BGFX_INVALID_HANDLE; + + // line buffer + FirstFitBuffer cpu_lines(config::INITIAL_LINE_BUFFER_SIZE); + bgfx::DynamicVertexBufferHandle gpu_line_buffer = BGFX_INVALID_HANDLE; + + // uniforms + bgfx::UniformHandle texture_atlas_sampler = BGFX_INVALID_HANDLE; + bgfx::UniformHandle u_cubes_compute_params = BGFX_INVALID_HANDLE; + bgfx::UniformHandle u_line_color = BGFX_INVALID_HANDLE; + + // shader + bgfx::ProgramHandle cubes_shader = BGFX_INVALID_HANDLE; + bgfx::ProgramHandle cubes_compute_shader = BGFX_INVALID_HANDLE; + bgfx::ProgramHandle lines_shader = BGFX_INVALID_HANDLE; + + // textures + SlotBuffer cpu_component_textures(config::INITIAL_NUM_COMPONENT_TEXTURES); + TextureAtlas cpu_texture_atlas = TextureAtlas(); + bgfx::TextureHandle gpu_texture_atlas = BGFX_INVALID_HANDLE; + + // lines + uint32_t aabb_outline = 0; // non static + uint32_t chunk_aabb_outline = 0; + uint32_t block_aabb_outline = 0; + + /* Internal functions */ + + static bgfx::ShaderHandle loadShader(const char *filePath) + { + FILE *file = fopen(filePath, "rb"); + if (!file) + DIE("loadShader: fopen(%s) failed\n", filePath); + + fseek(file, 0, SEEK_END); + long fileSize = ftell(file); + fseek(file, 0, SEEK_SET); + + const bgfx::Memory *mem = bgfx::alloc(fileSize + 1); + if (!mem) + DIE("loadShader: bgfx::alloc() failed\n"); + + fread(mem->data, 1, fileSize, file); + mem->data[mem->size - 1] = '\0'; + fclose(file); + + return bgfx::createShader(mem); + } + + /* Public functions */ + + void init() + { + GPUGrid::init(); + GPUChunk::init(); + GPUBlock::init(); + GPUBlockSelection::init(); + GPURenderInstance::init(); + GPUDummyVertex::init(); + GPULineVertex::init(); + + // gpu buffer + gpu_grids = bgfx::createDynamicVertexBuffer(config::INITIAL_NUM_GRIDS, GPUGrid::layout, BGFX_BUFFER_COMPUTE_READ | BGFX_BUFFER_ALLOW_RESIZE); + + gpu_chunks = bgfx::createDynamicVertexBuffer(config::INITIAL_NUM_CHUNKS, GPUChunk::layout, BGFX_BUFFER_COMPUTE_READ | BGFX_BUFFER_ALLOW_RESIZE); + + gpu_blocks = bgfx::createDynamicVertexBuffer(config::INITIAL_NUM_BLOCKS, GPUBlock::layout, BGFX_BUFFER_COMPUTE_READ | BGFX_BUFFER_ALLOW_RESIZE); + + gpu_block_selection = bgfx::createDynamicVertexBuffer(config::INITIAL_BLOCK_SELECTION, GPUBlockSelection::layout, BGFX_BUFFER_COMPUTE_READ | BGFX_BUFFER_ALLOW_RESIZE); + + gpu_block_models = bgfx::createDynamicIndexBuffer(config::INITIAL_BLOCK_MODEL_BUFFER_SIZE, BGFX_BUFFER_INDEX32 | BGFX_BUFFER_ALLOW_RESIZE); + + GPUDummyVertex dummy_vertex = {0}; + gpu_dummy_vertex_buffer = bgfx::createVertexBuffer(bgfx::copy(&dummy_vertex, sizeof(float)), GPUDummyVertex::layout, BGFX_BUFFER_NONE); + + gpu_indirect_buffer = bgfx::createIndirectBuffer(config::INITIAL_BLOCK_SELECTION); + + gpu_instance_buffer = bgfx::createDynamicVertexBuffer(config::INITIAL_BLOCK_SELECTION, GPURenderInstance::layout, BGFX_BUFFER_COMPUTE_WRITE); + + gpu_line_buffer = bgfx::createDynamicVertexBuffer(config::INITIAL_LINE_BUFFER_SIZE, GPULineVertex::layout, BGFX_BUFFER_ALLOW_RESIZE); + + // uniforms + texture_atlas_sampler = bgfx::createUniform("texture_atlas_sampler", bgfx::UniformType::Sampler); + u_cubes_compute_params = bgfx::createUniform("u_cubes_compute_params", bgfx::UniformType::Vec4); + u_line_color = bgfx::createUniform("u_line_color", bgfx::UniformType::Vec4); + + // shader + cubes_shader = bgfx::createProgram( + loadShader("./assets/shaders/vs_cubes.spv"), + loadShader("./assets/shaders/fs_cubes.spv"), true); + + cubes_compute_shader = bgfx::createProgram( + loadShader("./assets/shaders/cs_cubes.spv"), true); + + lines_shader = bgfx::createProgram( + loadShader("./assets/shaders/vs_lines.spv"), + loadShader("./assets/shaders/fs_lines.spv"), true); + + // textures + cpu_texture_atlas.init(); + gpu_texture_atlas = bgfx::createTexture2D( + config::TA_WIDTH * config::TA_MAIN_PATCH_SIZE * config::TA_TEXTURE_SIZE, + config::TA_HEIGHT * config::TA_MAIN_PATCH_SIZE * config::TA_TEXTURE_SIZE, + false, 1, bgfx::TextureFormat::RGBA8, BGFX_TEXTURE_NONE | BGFX_SAMPLER_POINT, NULL); + + // cpu buffer index 0 is invalid for all purposes => we push an empty element + uint32_t ref; + cpu_grids.add(ref); + cpu_grids.clearModified(); + assert(ref == 0); + cpu_chunks.add(ref); + cpu_chunks.clearModified(); + assert(ref == 0); + cpu_blocks.add(ref); + cpu_blocks.clearModified(); + assert(ref == 0); + ref = cpu_block_models.add(NULL, 0); + cpu_block_models.clearModified(); + assert(ref == 0); + // Push debug texture into invalid index 0 + const Texel O = {204, 131, 67, 255}; // Orange + const Texel W = {255, 255, 255, 255}; + const Texel B = {0, 0, 0, 255}; + Texel texture[16] = { + W, O, O, W, + O, W, W, O, + W, O, O, W, + O, W, W, O, + }; + uint32_t debug_comp_tex_id = add_component_textures(texture); + assert(debug_comp_tex_id == 0); + // add a first debug texture to the atlas + // This also reserves the invalid texture_index 0 + cpu_texture_atlas.add_patch(1, 1, &debug_comp_tex_id); + + // load default AABB outlines + aabb_outline = add_line(factory::generate_AABB_outline(BLOCK_AABB), 24); // use block aabb initially + chunk_aabb_outline = add_line(factory::generate_AABB_outline(CHUNK_AABB), 24); + block_aabb_outline = add_line(factory::generate_AABB_outline(BLOCK_AABB), 24); + } + + // Call every frame! + // Updates outstanding changes from CPU -> GPU + void update() + { + uint32_t offset = 0, num_elements = 0; + bool resized = false; + + if (cpu_grids.wasModified(offset, num_elements)) + { + bgfx::update(gpu_grids, offset, bgfx::makeRef(cpu_grids.data() + offset, sizeof(GPUGrid) * num_elements)); + cpu_grids.clearModified(); + } + + if (cpu_chunks.wasModified(offset, num_elements)) + { + bgfx::update(gpu_chunks, offset, bgfx::makeRef(cpu_chunks.data() + offset, sizeof(GPUChunk) * num_elements)); + cpu_chunks.clearModified(); + } + + if (cpu_blocks.wasModified(offset, num_elements)) + { + bgfx::update(gpu_blocks, offset, bgfx::makeRef(cpu_blocks.data() + offset, sizeof(GPUBlock) * num_elements)); + cpu_blocks.clearModified(); + } + + if (cpu_block_selection.wasModified(offset, num_elements, resized)) + { + bgfx::update(gpu_block_selection, offset, bgfx::makeRef(cpu_block_selection.data() + offset, sizeof(float) * num_elements)); + cpu_block_selection.clear(); + + if (resized) + { // This means our indirect and instance buffers need to be resized too + bgfx::destroy(gpu_indirect_buffer); + gpu_indirect_buffer = bgfx::createIndirectBuffer(num_elements); + + bgfx::destroy(gpu_instance_buffer); + gpu_instance_buffer = bgfx::createDynamicVertexBuffer(num_elements, GPURenderInstance::layout, BGFX_BUFFER_COMPUTE_WRITE); + } + } + + if (cpu_block_models.wasModified(offset, num_elements)) + { + bgfx::update(gpu_block_models, offset, bgfx::makeRef(cpu_block_models.data() + offset, sizeof(uint32_t) * num_elements)); + cpu_block_models.clearModified(); + } + + if (cpu_lines.wasModified(offset, num_elements)) + { + bgfx::update(gpu_line_buffer, offset, bgfx::makeRef(cpu_lines.data() + offset, sizeof(GPULineVertex) * num_elements)); + cpu_lines.clearModified(); + } + + if (cpu_texture_atlas.wasModified(offset, num_elements)) + { + // offset == texel_id + // we need to update entire rows, since the texel data is linear + const uint16_t offset_y_begin = offset / config::TA_TEXELS_PER_ROW; + const uint16_t offset_y_end = ((offset + num_elements) / config::TA_TEXELS_PER_ROW) + 1; + const uint16_t num_rows = offset_y_end + offset_y_begin; + bgfx::updateTexture2D(gpu_texture_atlas, 0, 0, 0, offset_y_begin, config::TA_TEXELS_PER_ROW, num_rows, + bgfx::makeRef(cpu_texture_atlas.data() + (offset_y_begin * config::TA_TEXELS_PER_ROW), num_rows * config::TA_TEXELS_PER_ROW * sizeof(Texel))); + cpu_texture_atlas.clearModified(); + } + } + + void destroy() + { + bgfx::destroy(gpu_grids); + gpu_grids = BGFX_INVALID_HANDLE; + bgfx::destroy(gpu_chunks); + gpu_chunks = BGFX_INVALID_HANDLE; + bgfx::destroy(gpu_blocks); + gpu_blocks = BGFX_INVALID_HANDLE; + bgfx::destroy(gpu_block_selection); + gpu_block_selection = BGFX_INVALID_HANDLE; + bgfx::destroy(gpu_block_models); + gpu_block_models = BGFX_INVALID_HANDLE; + bgfx::destroy(gpu_dummy_vertex_buffer); + gpu_dummy_vertex_buffer = BGFX_INVALID_HANDLE; + bgfx::destroy(gpu_indirect_buffer); + gpu_indirect_buffer = BGFX_INVALID_HANDLE; + bgfx::destroy(gpu_instance_buffer); + gpu_instance_buffer = BGFX_INVALID_HANDLE; + bgfx::destroy(gpu_line_buffer); + gpu_line_buffer = BGFX_INVALID_HANDLE; + + bgfx::destroy(texture_atlas_sampler); + texture_atlas_sampler = BGFX_INVALID_HANDLE; + bgfx::destroy(u_cubes_compute_params); + u_cubes_compute_params = BGFX_INVALID_HANDLE; + bgfx::destroy(u_line_color); + u_line_color = BGFX_INVALID_HANDLE; + + bgfx::destroy(cubes_shader); + cubes_shader = BGFX_INVALID_HANDLE; + bgfx::destroy(cubes_compute_shader); + cubes_compute_shader = BGFX_INVALID_HANDLE; + bgfx::destroy(lines_shader); + lines_shader = BGFX_INVALID_HANDLE; + + bgfx::destroy(gpu_texture_atlas); + gpu_texture_atlas = BGFX_INVALID_HANDLE; + } + + GPUGrid &add_grid(uint32_t &_out_ref) + { + return cpu_grids.add(_out_ref); + } + + GPUGrid &update_grid(const uint32_t _ref) + { + return cpu_grids.at(_ref); + } + + void remove_grid(const uint32_t _ref) + { + cpu_grids.remove(_ref); + } + + GPUChunk &add_chunk(uint32_t &_out_ref) + { + return cpu_chunks.add(_out_ref); + } + + void remove_chunk(const uint32_t _ref) + { + cpu_chunks.remove(_ref); + } + + GPUBlock &add_block(uint32_t &_out_ref) + { + return cpu_blocks.add(_out_ref); + } + + GPUBlock &update_block(const uint32_t _ref) + { + return cpu_blocks.at(_ref); + } + + void remove_block(const uint32_t _ref) + { + cpu_blocks.remove(_ref); + } + + void add_block_selection(const uint32_t _ref) + { + cpu_block_selection.add((float)_ref); + } + + uint16_t get_num_block_selection() + { + return cpu_block_selection.get_num_elements(); + } + + uint32_t add_block_model(const uint32_t *_data, const uint32_t _num_elements) + { + return cpu_block_models.add(_data, _num_elements); + } + + void remove_block_model(const uint32_t _ref) + { + cpu_block_models.remove(_ref); + } + + void get_block_model_offset(const uint32_t _ref, uint32_t &_out_offset, uint32_t &_out_num_elements) + { + cpu_block_models.get_offset(_ref, _out_offset, _out_num_elements); + } + + // A line always consists of 2 vertices. + uint32_t add_line(const GPULineVertex *_data, const uint32_t _num_elements) + { + assert(_num_elements % 2 == 0); + return cpu_lines.add(_data, _num_elements); + } + + uint32_t update_line(const uint32_t _ref, const GPULineVertex *_data) + { + return cpu_lines.update(_ref, _data); + } + + void remove_line(const uint32_t _ref) + { + cpu_lines.remove(_ref); + } + + void get_line_offset(const uint32_t _ref, uint32_t &_out_offset, uint32_t &_out_num_elements) + { + cpu_lines.get_offset(_ref, _out_offset, _out_num_elements); + } + +///////////////////////////////////////////////////////////////// + + uint32_t add_component_textures(const Texel *_textures) + { + uint32_t ref; + CompTexture& tex = cpu_component_textures.add(ref); + bx::memCopy(tex.data, _textures, config::TA_TEXELS_PER_TEXTURE * sizeof(Texel)); + return ref; + } + + void remove_component_textures(uint32_t _ref) + { + cpu_component_textures.remove(_ref); + } + + const Texel *get_component_texture(const uint32_t _ref) + { + return cpu_component_textures.at_const(_ref).data; + } + +///////////////////////////////////////////////////////////////// + + uint32_t add_patch(const uint8_t _width, const uint8_t _height, const uint32_t *_component_texture_ids) + { + return cpu_texture_atlas.add_patch(_width, _height, _component_texture_ids); + } + + void remove_patch(const uint8_t _patch_id) + { + cpu_texture_atlas.remove_patch(_patch_id); + } + + void get_patch_texture_ids(const uint8_t _patch_id, uint32_t *_out_texture_ids) + { + cpu_texture_atlas.get_patch_texture_ids(_patch_id, _out_texture_ids); + } + +///////////////////////////////////////////////////////////////// + + bgfx::DynamicVertexBufferHandle get_grid_buffer() { return gpu_grids; } + bgfx::DynamicVertexBufferHandle get_chunk_buffer() { return gpu_chunks; } + bgfx::DynamicVertexBufferHandle get_block_buffer() { return gpu_blocks; } + bgfx::DynamicVertexBufferHandle get_block_selection_buffer() { return gpu_block_selection; } + bgfx::DynamicIndexBufferHandle get_block_model_buffer() { return gpu_block_models; } + bgfx::VertexBufferHandle get_dummy_vertex_buffer() { return gpu_dummy_vertex_buffer; } + bgfx::IndirectBufferHandle get_indirect_buffer() { return gpu_indirect_buffer; } + bgfx::DynamicVertexBufferHandle get_instance_buffer() { return gpu_instance_buffer; } + bgfx::DynamicVertexBufferHandle get_line_buffer() { return gpu_line_buffer; } + + bgfx::UniformHandle get_uniform_texture_atlas_sampler() { return texture_atlas_sampler; } + bgfx::UniformHandle get_uniform_cubes_compute_params() { return u_cubes_compute_params; } + bgfx::UniformHandle get_uniform_line_color() { return u_line_color; } + + bgfx::ProgramHandle get_cubes_shader() { return cubes_shader; } + bgfx::ProgramHandle get_cubes_compute_shader() { return cubes_compute_shader; } + bgfx::ProgramHandle get_lines_shader() { return lines_shader; } + + bgfx::TextureHandle get_texture_atlas() { return gpu_texture_atlas; } + + uint32_t get_aabb_outline(const AABB &_aabb) + { + // Note: update is possible since, an aabb outline always has 24 elements + return update_line(aabb_outline, factory::generate_AABB_outline(_aabb)); + } + uint32_t get_last_aabb_outline() { return aabb_outline; } + uint32_t get_chunk_aabb_outline() { return chunk_aabb_outline; } + uint32_t get_block_aabb_outline() { return block_aabb_outline; } + +} diff --git a/src/graphics.h b/src/graphics.h new file mode 100644 index 0000000..ab7d50e --- /dev/null +++ b/src/graphics.h @@ -0,0 +1,239 @@ +#pragma once + +#include +#include "space_math.h" + +/* * * * * * * * * * * * * * * * * * * * * * * */ +/* The gfx namespace manages all GPU resources */ +/* * * * * * * * * * * * * * * * * * * * * * * */ + +// ------------ \\ +// DATA STRUCTS \\ +// ------------ \\ + +// Per grid data for rendering +typedef struct gpu_grid +{ + float m_mtx[16]; + + static void init() + { + layout + .begin() + .add(bgfx::Attrib::TexCoord0, 4, bgfx::AttribType::Float) + .add(bgfx::Attrib::TexCoord1, 4, bgfx::AttribType::Float) + .add(bgfx::Attrib::TexCoord2, 4, bgfx::AttribType::Float) + .add(bgfx::Attrib::TexCoord3, 4, bgfx::AttribType::Float) + .end(); + }; + + static bgfx::VertexLayout layout; +} GPUGrid; + +// Per chunk data for rendering +typedef struct gpu_chunk +{ + float grid_id; + float grid_offset_x; + float grid_offset_y; + float grid_offset_z; + + static void init() + { + layout + .begin() + .add(bgfx::Attrib::TexCoord0, 4, bgfx::AttribType::Float) + .end(); + }; + + static bgfx::VertexLayout layout; +} GPUChunk; + +// Per block data for rendering (all blocks ready for rendering aka in render distance) +// This points to a block_blueprint in the index buffer +// It represents a single block instance (pre culling) +typedef struct gpu_block +{ + float chunk_id; + float transform; // position in chunk and rotation + float index_buf_offset; + float num_indices; + + static void init() + { + layout + .begin() + .add(bgfx::Attrib::TexCoord0, 4, bgfx::AttribType::Float) + .end(); + }; + + static bgfx::VertexLayout layout; +} GPUBlock; + +// Helper class +struct CompTexture +{ + Texel data[4*4]; +}; + +// Post culling selection +// This is referencing a GPUBlock +typedef struct gpu_block_selection +{ + float index; + + static void init() + { + layout + .begin() + .add(bgfx::Attrib::TexCoord0, 1, bgfx::AttribType::Float) + .end(); + }; + + static bgfx::VertexLayout layout; +} GPUBlockSelection; + +// Per instance data, generated by computer shader +// Required for indirect drawing +// This is just the blocks world matrix (position+orientation in the world) +typedef struct gpu_render_instance +{ + float m_mtx[16]; + + static void init() + { + layout + .begin() + .add(bgfx::Attrib::TexCoord0, 4, bgfx::AttribType::Float) + .add(bgfx::Attrib::TexCoord1, 4, bgfx::AttribType::Float) + .add(bgfx::Attrib::TexCoord2, 4, bgfx::AttribType::Float) + .add(bgfx::Attrib::TexCoord3, 4, bgfx::AttribType::Float) + .end(); + }; + + static bgfx::VertexLayout layout; +} GPURenderInstance; + +// Dummy vertex +typedef struct gpu_dummy_vertex +{ + float data; + + static void init() + { + layout + .begin() + .add(bgfx::Attrib::TexCoord0, 1, bgfx::AttribType::Float) + .end(); + }; + + static bgfx::VertexLayout layout; +} GPUDummyVertex; + +// A line between two points +typedef struct gpu_line_vertex +{ + Vec3 position; + + static void init() + { + layout + .begin() + .add(bgfx::Attrib::TexCoord0, 3, bgfx::AttribType::Float) + .end(); + }; + + static bgfx::VertexLayout layout; +} GPULineVertex; + + + +namespace gfx +{ + + // --------- \\ + // FUNCTIONS \\ + // --------- \\ + + void init(); + void update(); + void destroy(); + + // Adds grid data for the gpu + // Returns reference for updating/removing + GPUGrid &add_grid(uint32_t &_out_ref); + GPUGrid &update_grid(const uint32_t _ref); + void remove_grid(const uint32_t _ref); + + // Adds chunk data for the gpu + // Returns reference for updating/removing + GPUChunk &add_chunk(uint32_t &_out_ref); + void remove_chunk(const uint32_t _ref); + + // Adds block data for the gpu + // Returns reference for updating/removing + GPUBlock &add_block(uint32_t &_out_ref); + GPUBlock &update_block(const uint32_t _ref); + void remove_block(const uint32_t _ref); + + void add_block_selection(const uint32_t _ref); + uint16_t get_num_block_selection(); + + // Adds block model data (index buffer) for the gpu + // Returns reference for updating/removing + uint32_t add_block_model(const uint32_t *_data, const uint32_t _num_elements); + void remove_block_model(const uint32_t _ref); + void get_block_model_offset(const uint32_t _ref, uint32_t &_out_offset, uint32_t &_out_num_elements); + + // Adds line data (line segments) for the gpu + // Returns reference for updating/removing + uint32_t add_line(const GPULineVertex *_data, const uint32_t _num_elements); + // Attention: new _data MUST have the exact same _num_elements as the original + uint32_t update_line(const uint32_t _ref, const GPULineVertex *_data); + void remove_line(const uint32_t _ref); + void get_line_offset(const uint32_t _ref, uint32_t &_out_offset, uint32_t &_out_num_elements); + + // Adds 8*8 texels to the internal buffer + // Returns id to the textures + uint32_t add_component_textures(const Color* textures); + void remove_component_textures(uint32_t _ref); + const Texel* get_component_texture(uint32_t _ref); + + // Adds a patch to the atlas with size _width*_height + // Deduplicates the _component_texture_ids if possible, otherwise copies the data + uint32_t add_patch(const uint8_t _width, const uint8_t _height, const uint32_t* _component_texture_ids); + // Copies _data into the given patch + // Returns texture id for vertex buffer generation + // uint32_t update_patch_texture(const uint32_t _patch_id, const uint8_t _offset_x, const uint8_t _offset_y, const uint32_t _component_texture_id); + void remove_patch(const uint8_t _patch_id); + // Returns 4 texture ids (for vertex buffer generation) of given patch + void get_patch_texture_ids(const uint8_t _patch_id, uint32_t* _out_texture_ids); + + + bgfx::DynamicVertexBufferHandle get_grid_buffer(); + bgfx::DynamicVertexBufferHandle get_chunk_buffer(); + bgfx::DynamicVertexBufferHandle get_block_buffer(); + bgfx::DynamicVertexBufferHandle get_block_selection_buffer(); + bgfx::DynamicIndexBufferHandle get_block_model_buffer(); + bgfx::VertexBufferHandle get_dummy_vertex_buffer(); + bgfx::IndirectBufferHandle get_indirect_buffer(); + bgfx::DynamicVertexBufferHandle get_instance_buffer(); + bgfx::DynamicVertexBufferHandle get_line_buffer(); + + bgfx::UniformHandle get_uniform_texture_atlas_sampler(); + bgfx::UniformHandle get_uniform_cubes_compute_params(); + bgfx::UniformHandle get_uniform_line_color(); + + bgfx::ProgramHandle get_cubes_shader(); + bgfx::ProgramHandle get_cubes_compute_shader(); + bgfx::ProgramHandle get_lines_shader(); + + bgfx::TextureHandle get_texture_atlas(); + + uint32_t get_aabb_outline(const AABB &_aabb); + // Returns last aabb outline generated by "get_aabb_outline(const AABB& _aabb)" + uint32_t get_last_aabb_outline(); + uint32_t get_chunk_aabb_outline(); + uint32_t get_block_aabb_outline(); + +} // namespace gfx diff --git a/src/lib/camera.cpp b/src/lib/camera.cpp new file mode 100644 index 0000000..29a333e --- /dev/null +++ b/src/lib/camera.cpp @@ -0,0 +1,4 @@ +#define CAMERA_IMPLEMENTATION +#include "camera.h" + +/// This file only exists as a translation unit for the library implementation diff --git a/src/lib/camera.h b/src/lib/camera.h new file mode 100644 index 0000000..4b8d281 --- /dev/null +++ b/src/lib/camera.h @@ -0,0 +1,446 @@ +/* + * INFO: + * + * C/C++ (single) header quaternion based 3D camera system + * for games and other 3D graphics applications. + * + * + * FEATURES: + * + * - Quaternion based + * This naturally avoids gimbal lock and enables smooth interpolation (ex. for cinematic camera movement) + * - Precise manipulation + * A call of 'camera_rotate(&camera, {45 * DEG_TO_RAD, 0, 0});' will rotate exactly 45 degrees. + * - Engine agnostic + * No matter your input system, the camera is updated in angles + * - Different camera modes to control the cameras behaviour + * - Allows custom configuration + * - Can be changed at runtime + * - See CAMERA_MODE_* defines for more + * - Pre-defined modes: CAMERA_MODE_FREE, CAMERA_MODE_FIRST_PERSON, CAMERA_MODE_THIRD_PERSON, CAMERA_MODE_ORBITAL + * - Supports seamless camera mode transitions (ex. first person -> third person) + * - Full access to all camera state data - nothing is hidden + * You can access and manipulate the entire camera state at any time! + * - Supports angle clamping + * Restrict the angles your camera is allowed to work in + * + * + * USAGE: + * + * Simply add 'camera.h' and one 'camera_math.h' to your project and '#include "camera.h"' it wherever. + * + * ONE (and only ONE) source file must hold the implementation + * by using '#define CAMERA_IMPLEMENTATION' before including it with '#include "camera.h"'. + * + * Camera code is math heavy. For all required math functions we provide a separate 'camera_math.h' file. + * To get started you can use the 'camera_math_default.h'. + * Simply add it to your project and rename it to 'camera_math.h'. + * + * But your engine probably brings its own math library. + * In order to reduce the duplication of math-code you can create your own + * 'camera_math.h' implementing the required functions with the math library of your choice. + * This also allows you to pass and receive arguments without the need to convert them. + * See 'camera_math_bx.h' as an example. + * + * + * ANGLE CLAMPING: + * + * If angle clamping is activated, the corresponding limits must be set in the camera struct. + * All limits are expected in radians and min* must be smaller than max*. + * They are expected in the range [-pi; pi], with 0 representing no rotation. + * The camera rotations are restricting in WORLD space. + * This means if pitch AND yaw are clamped, this essentially creates a "window" the camera is not allowed to rotate out of. + * + * Example for clamping pitch: + * 1. Activate pitch clamping: 'camera.mode |= CAMERA_MODE_CLAMP_PITCH;' + * 2. Set limits: 'camera.maxPitch = -pi / 2.0f; // aka restrict to 90 deg upwards' + * 'camera.minPitch = pi / 2.0f; // aka restrict to 90 deg downwards' + * + * + * GENERAL NOTES: + * + * ALL camera struct members can be safely manipulated at any time. + * + * To change to a right-handed coordinate system simply change CAMERA_WORLD_FORWARD to (0.0f, 0.0f, -1.0f) + * + * CameraQuaternions are well suited to represent 3D orientation, but they do not accumulate + * many rotations in differing axes well and thus require occasional re-normalization. + * To accommodate for this and for general performance reasons changes are accumulated and the + * orientation quaternion is only updated when the view matrix is requested (once per frame). + * + * Query functions only return the correct value AFTER pending changes have been applied. (i.e. calling camera_view_matrix(..)) + * Example: + * 1. camera_move(..) // Changes NOT yet applied + * 2. camera_eye_position(..) // Returned state does NOT yet include previous move + * 3. camera_view_matrix(..) // Changes are now applied + * 4. camera_eye_position(..) // Returned state DOES now include previous move + * + * + * LICENSE: + * + * MIT License + * + * Copyright (c) 2022 Crydsch Cube + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#ifndef CAMERA_HEADER_GUARD +#define CAMERA_HEADER_GUARD + +#include +#include "camera_math_bx.h" + +/* World and mode defines */ + +#define CAMERA_WORLD_FORWARD CameraVec3(0.0f, 0.0f, 1.0f) +#define CAMERA_WORLD_UP CameraVec3(0.0f, 1.0f, 0.0f) +#define CAMERA_WORLD_RIGHT CameraVec3(1.0f, 0.0f, 0.0f) + +// Camera mode configuration flags +// Can be combined with bitwise OR +#define CAMERA_MODE_DISABLE_ROLL UINT32_C(0x00000001) // Disables the roll axis +#define CAMERA_MODE_MOVE_IN_WORLDPLANE UINT32_C(0x00000002) // Projects movement onto world plane +#define CAMERA_MODE_CLAMP_PITCH_ANGLE UINT32_C(0x00000004) // Limits the pitch angle. Typically used in first/third person to prevent overrotation (i.e. somersaults). +#define CAMERA_MODE_CLAMP_YAW_ANGLE UINT32_C(0x00000008) // Limits the yaw angle. +#define CAMERA_MODE_CLAMP_ROLL_ANGLE UINT32_C(0x00000010) // Limits the roll angle. + +// Free float camera mode (no restrictions applied) +#define CAMERA_MODE_FREE (0) + +// First person camera mode +// Note: Set camera.minPitch = -pi/2 and camera.maxPitch = pi/2 +#define CAMERA_MODE_FIRST_PERSON (0 \ + | CAMERA_MODE_DISABLE_ROLL \ + | CAMERA_MODE_MOVE_IN_WORLDPLANE \ + | CAMERA_MODE_CLAMP_PITCH_ANGLE \ + ) + +// Third person camera mode +// Note: Set camera.minPitch = -pi/2 and camera.maxPitch = pi/2 +// Note: Use a target_distance > 0 +#define CAMERA_MODE_THIRD_PERSON CAMERA_MODE_FIRST_PERSON + +// Orbital camera mode (orbit around some target) +// Useful for inspecting models. +// Note: Set camera.minPitch = -pi/2 and camera.maxPitch = pi/2 +#define CAMERA_MODE_ORBITAL (0 \ + | CAMERA_MODE_DISABLE_ROLL \ + | CAMERA_MODE_CLAMP_PITCH_ANGLE \ + ) + + +/* Camera struct */ + +typedef struct camera { + CameraVec3 target_position; // The target point, the camera is looking at. Aka camera eye position if camera.target_distance == 0. + float target_distance; // Camera distance from eye to target. Note: negative values create zoom-like behaviour. + CameraQuat orientation; // Camera rotation in 3D. + uint32_t mode; // Controls camera behaviour. See CAMERA_MODE_* defines. + + // Temporary accumulator. Cleared on camera_view_matrix(..). + CameraVec3 movement_accumulator; + CameraVec3 rotation_accumulator; + // Angle clamping limits. See "Angle Clamping" for further information. + float minPitch; + float maxPitch; + float minYaw; + float maxYaw; + float minRoll; + float maxRoll; +} Camera; + + +/* Function declarations */ + +// Initialize/Reset the camera struct. +extern Camera camera_init(); + +// Returns the cameras current forward direction (normalized) +extern CameraVec3 camera_forward(const Camera* _cam); + +// Returns the cameras current up direction (normalized) +extern CameraVec3 camera_up(const Camera* _cam); + +// Returns the cameras current right direction (normalized) +extern CameraVec3 camera_right(const Camera* _cam); + +// Returns the cameras current eye position +extern CameraVec3 camera_eye(const Camera* _cam); + +// Move the camera in its relative orientation +// _offset == (forward, up, right) +extern void camera_move(Camera* _cam, const CameraVec3 _offset); + +// Rotate the camera view +// _angles = (pitch, yaw, roll) +// pitch == "Look Up/Down" yaw == "Look Left/Right" roll == "Turn head Left/Right" +// Note: angles are expected in radians +extern void camera_rotate(Camera* _cam, const CameraVec3 _angles); + +// Rotate the camera to look into the direction _forward +// This only changes the camera.orientation, it will still face its camera.target_position! +// Note: _forward and _up are expected to be normalized +extern void camera_look_at(Camera* _cam, CameraVec3 _forward, CameraVec3 _up); + +// Update the camera and generate a view matrix +// Note: _out_matrix is expected to be a float[16] +extern void camera_view_matrix(Camera* _cam, float* _out_matrix); + + +#endif // !CAMERA_HEADER_GUARD + + +#ifdef CAMERA_IMPLEMENTATION + +extern Camera camera_init() +{ + static Camera cam = { + .target_position = cm_init_vec3(0.0f, 0.0f, 0.0f), + .target_distance = 0.0f, + .orientation = cm_init_quat(0.0f, 0.0f, 0.0, 0.0f), + .mode = CAMERA_MODE_FREE, + + .movement_accumulator = cm_init_vec3(0.0f, 0.0f, 0.0f), + .rotation_accumulator = cm_init_vec3(0.0f, 0.0f, 0.0f), + + .minPitch = 0.0f, + .maxPitch = 0.0f, + .minYaw = 0.0f, + .maxYaw = 0.0f, + .minRoll = 0.0f, + .maxRoll = 0.0f, + }; + + return cam; +}; + +extern CameraVec3 camera_forward(const Camera* _cam) +{ + return cm_mul(CAMERA_WORLD_FORWARD, cm_invert(_cam->orientation)); +}; + +extern CameraVec3 camera_up(const Camera* _cam) +{ + return cm_mul(CAMERA_WORLD_UP, cm_invert(_cam->orientation)); +}; + +extern CameraVec3 camera_right(const Camera* _cam) +{ + return cm_mul(CAMERA_WORLD_RIGHT, cm_invert(_cam->orientation)); +}; + +extern CameraVec3 camera_eye(const Camera* _cam) +{ + return cm_add(_cam->target_position, cm_scale(camera_forward(_cam), -_cam->target_distance)); +}; + +extern void camera_move(Camera* _cam, const CameraVec3 _offset) +{ + _cam->movement_accumulator = cm_add(_cam->movement_accumulator, _offset); +} + +extern void camera_rotate(Camera* _cam, const CameraVec3 _angles) +{ + _cam->rotation_accumulator = cm_add(_cam->rotation_accumulator, _angles); +} + +extern void camera_look_at(Camera* _cam, CameraVec3 _forward, CameraVec3 _up) +{ + // Based on typical vector to matrix to quaternion approach + + // Get orthogonal basis vectors + const CameraVec3 right = cm_normalizeVec3(cm_cross(_up, _forward)); + _up = cm_cross(_forward, right); + + // Convert to Quaternion + // Ref.: https://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/ + + const float m0 = right.x; + const float m1 = right.y; + const float m2 = right.z; + + const float m4 = _up.x; + const float m5 = _up.y; + const float m6 = _up.z; + + const float m8 = _forward.x; + const float m9 = _forward.y; + const float m10 = _forward.z; + + float trace = m0 + m5 + m10; + if (trace > 0) { + float s = 0.5f / cm_sqrt(trace + 1.0f); + _cam->orientation.w = 0.25f / s; + _cam->orientation.x = (m6 - m9) * s; + _cam->orientation.y = (m8 - m2) * s; + _cam->orientation.z = (m1 - m4) * s; + } + else { + if (m0 > m5 && m0 > m10) { + float s = 2.0f * cm_sqrt(1.0f + m0 - m5 - m10); + _cam->orientation.w = (m6 - m9) / s; + _cam->orientation.x = 0.25f * s; + _cam->orientation.y = (m4 + m1) / s; + _cam->orientation.z = (m8 + m2) / s; + } + else if (m5 > m10) { + float s = 2.0f * cm_sqrt(1.0f + m5 - m0 - m10); + _cam->orientation.w = (m8 - m2) / s; + _cam->orientation.x = (m4 + m1) / s; + _cam->orientation.y = 0.25f * s; + _cam->orientation.z = (m9 + m6) / s; + } + else { + float s = 2.0f * cm_sqrt(1.0f + m10 - m0 - m5); + _cam->orientation.w = (m1 - m4) / s; + _cam->orientation.x = (m8 + m2) / s; + _cam->orientation.y = (m9 + m6) / s; + _cam->orientation.z = 0.25f * s; + } + } +} + +extern void camera_view_matrix(Camera* _cam, float* _out_matrix) +{ + /* Clamp angles */ + + if (_cam->mode & (CAMERA_MODE_CLAMP_PITCH_ANGLE | CAMERA_MODE_CLAMP_YAW_ANGLE | CAMERA_MODE_CLAMP_ROLL_ANGLE)) + { + const CameraVec3 angles = cm_toEuler(_cam->orientation); + + if (_cam->mode & CAMERA_MODE_CLAMP_PITCH_ANGLE) + { + _cam->rotation_accumulator.x = cm_max(_cam->minPitch - angles.x, _cam->rotation_accumulator.x); + _cam->rotation_accumulator.x = cm_min(_cam->maxPitch - angles.x, _cam->rotation_accumulator.x); + } + + if (_cam->mode & CAMERA_MODE_CLAMP_YAW_ANGLE) + { + _cam->rotation_accumulator.y = cm_max(_cam->minYaw - angles.y, _cam->rotation_accumulator.y); + _cam->rotation_accumulator.y = cm_min(_cam->maxYaw - angles.y, _cam->rotation_accumulator.y); + } + + if (_cam->mode & CAMERA_MODE_CLAMP_ROLL_ANGLE) + { + _cam->rotation_accumulator.z = cm_max(_cam->minRoll - angles.z, _cam->rotation_accumulator.z); + _cam->rotation_accumulator.z = cm_min(_cam->maxRoll - angles.z, _cam->rotation_accumulator.z); + } + } + + + /* Update orientation */ + + const CameraQuat pitch = cm_fromAxisAngle(CAMERA_WORLD_RIGHT, _cam->rotation_accumulator.x); + const CameraQuat yaw = cm_fromAxisAngle(CAMERA_WORLD_UP, _cam->rotation_accumulator.y); + + if (_cam->mode & CAMERA_MODE_DISABLE_ROLL) + { + // Note: The multiplication order is important, not to induce roll from pitch+yaw + _cam->orientation = cm_mulQuat(_cam->orientation, pitch); + _cam->orientation = cm_mulQuat(yaw, _cam->orientation); + } + else + { + const CameraQuat roll = cm_fromAxisAngle(CAMERA_WORLD_FORWARD, _cam->rotation_accumulator.z); + + _cam->orientation = cm_mulQuat(_cam->orientation, pitch); + _cam->orientation = cm_mulQuat(_cam->orientation, yaw); + _cam->orientation = cm_mulQuat(_cam->orientation, roll); + } + + _cam->orientation = cm_normalizeQuat(_cam->orientation); // Re-Normalize orientation quaternion + + // Reset accumulator + _cam->rotation_accumulator.x = 0.0f; + _cam->rotation_accumulator.y = 0.0f; + _cam->rotation_accumulator.z = 0.0f; + + + /* Update target_position */ + + CameraVec3 forward = camera_forward(_cam); + CameraVec3 up = camera_up(_cam); + CameraVec3 right = camera_right(_cam); + + if (_cam->mode & CAMERA_MODE_MOVE_IN_WORLDPLANE) + { + const float epsilon = 0.0001f; // Avoid floating point errors + + if (forward.y > 1.0f - epsilon) // Note: forward is normalized, so checking .y is sufficient + { // Special case: Looking straight up + forward = cm_negate(up); + } + else if (forward.y < -1.0f + epsilon) + { // Special case: Looking straight down + forward = up; + } + else if (right.y > 1.0f - epsilon) + { + right = up; + } + else if (right.y < -1.0f + epsilon) + { + right = cm_negate(up); + } + + // Project the forward and right into the world plane + forward.y = 0; + forward = cm_normalizeVec3(forward); + + right.y = 0; + right = cm_normalizeVec3(right); + + up = CAMERA_WORLD_UP; + } + + // Scale by desired distance + forward = cm_scale(forward, _cam->movement_accumulator.x); + up = cm_scale(up, _cam->movement_accumulator.y); + right = cm_scale(right, _cam->movement_accumulator.z); + + // Apply changes to target_position + _cam->target_position = cm_add(_cam->target_position, forward); + _cam->target_position = cm_add(_cam->target_position, up); + _cam->target_position = cm_add(_cam->target_position, right); + + // Reset accumulator + _cam->movement_accumulator.x = 0.0f; + _cam->movement_accumulator.y = 0.0f; + _cam->movement_accumulator.z = 0.0f; + + + /* Generate view matrix */ + + // Get rotation matrix + cm_matrixFromQuat(_out_matrix, _cam->orientation); + + // Add translation + CameraVec3 translation = cm_negate(camera_eye(_cam)); + + translation = cm_mul(translation, _cam->orientation); + + _out_matrix[12] = translation.x; + _out_matrix[13] = translation.y; + _out_matrix[14] = translation.z; + +} + +#endif // CAMERA_IMPLEMENTATION diff --git a/src/lib/camera_math_bx.h b/src/lib/camera_math_bx.h new file mode 100644 index 0000000..d92dc2b --- /dev/null +++ b/src/lib/camera_math_bx.h @@ -0,0 +1,107 @@ +/* + * INFO: + * + * This file provides an interface to math functions for camera.h + * + * Specifically interfacing with bx/math.h + * Ref.: https://github.com/bkaradzic/bgfx + * Ref.: https://github.com/bkaradzic/bx + * + * + * LICENSE: + * + * MIT License + * + * Copyright (c) 2022 Crydsch Cube + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include + +#define CameraVec3 Vec3 +#define CameraQuat Quat + +static inline CameraVec3 cm_init_vec3(float _x, float _y, float _z) { + return bx::Vec3(_x, _y, _z); +} + +static inline CameraQuat cm_init_quat(float _x, float _y, float _z, float _w) { + return bx::Quaternion(_x, _y, _z, _w); +} + +static inline CameraVec3 cm_mul(CameraVec3 _a, CameraQuat _b) { + return bx::mul(_a, _b); +} + +static inline CameraQuat cm_invert(CameraQuat _a) { + return bx::invert(_a); +} + +static inline CameraVec3 cm_add(CameraVec3 _a, CameraVec3 _b) { + return bx::add(_a, _b); +} + +static inline CameraVec3 cm_scale(CameraVec3 _a, float _b) { + return bx::mul(_a, _b); +} + +static inline CameraVec3 cm_cross(CameraVec3 _a, CameraVec3 _b) { + return bx::cross(_a, _b); +} + +static inline float cm_min(float _a, float _b) { + return bx::min(_a, _b); +} + +static inline float cm_max(float _a, float _b) { + return bx::max(_a, _b); +} + +static inline float cm_sqrt(float _a) { + return bx::sqrt(_a); +} + +static inline CameraVec3 cm_toEuler(CameraQuat _a) { + return bx::toEuler(_a); +} + +static inline CameraQuat cm_fromAxisAngle(CameraVec3 _a, float _b) { + return bx::fromAxisAngle(_a, _b); +} + +static inline CameraQuat cm_mulQuat(CameraQuat _a, CameraQuat _b) { + return bx::mul(_a, _b); +} + +static inline CameraQuat cm_normalizeQuat(CameraQuat _a) { + return bx::normalize(_a); +} + +static inline CameraVec3 cm_normalizeVec3(CameraVec3 _a) { + return bx::normalize(_a); +} + +static inline CameraVec3 cm_negate(CameraVec3 _a) { + return bx::neg(_a); +} + +static inline void cm_matrixFromQuat(float* _a, CameraQuat _b) { + return bx::mtxFromQuaternion(_a, _b); +} diff --git a/src/lib/input.cpp b/src/lib/input.cpp new file mode 100644 index 0000000..de5e3aa --- /dev/null +++ b/src/lib/input.cpp @@ -0,0 +1,641 @@ +#include +#include +#include +#include +#include +#include +#include + +#include "input.h" +#include "util.h" +#include "space_math.h" + +/* +* +* buttons and axes keep state (need to be reset manually) +* axes are considered normalized by default +* +* Simple input handling system, with a fire-and-forget design. +* You configure it once, and it will handle the rest. +* It provides virtual inputs, so you can just poll for the state of your 'throw grenade button' in your gameloop! +* Any number of real buttons (keyboard, mouse, controller) can map to this virtual button. +* +* Features: +* - buttons and axes +* - Supports input sensitivity (via an input multiplier) +* - mappings between +* - buttons -> buttons +* - buttons -> axes (ex. keyboard input -> camera rotation) +* - axes -> buttons (ex. controller input -> fixed speed movement) +* - axes -> axes +* - multiple mappings to the same virtual input +* - 'w' or 'controller_axis_1' -> 'move_forward_axis' +* - combination mappings (only active if both real inputs are active) +* - 'shift' and 'w' -> 'running_button' +*/ + +namespace input { + + // TODO remove_mapping(..) and set active=false + + using Device = struct device { + uint16_t real_buttons_offset; + uint16_t real_axes_offset; + std::string name; + bool connected; + + device(const uint16_t _real_buttons_offset, const uint16_t _real_axes_offset, std::string _name) { + real_buttons_offset = _real_buttons_offset; + real_axes_offset = _real_axes_offset; + name = std::move(_name); + connected = true; + } + }; + + using VirtualButton = struct virtual_button { + std::vector real_inputs; + std::vector multiplier; + }; + + using VirtualAxis = struct virtual_axis { + std::vector real_inputs; + std::vector multiplier; + float min; + float max; + + virtual_axis(const float _min, const float _max) { + min = _min; + max = _max; + } + }; + + const uint16_t BUTTON_BIT = 0x1 << 15; // virtual input is button (instead of axis) + const uint16_t COMBINE_BIT = 0x1 << 14; // combine next input with logical 'and' (instead of 'or') + const uint16_t ID_MASK = COMBINE_BIT - 0x1; // bit mask to extract id + + const float DEADZONE = 0.1f; // per axis deadzone + + std::vector devices = { { 0, 0, "" }}; // Includes first dummy device + std::vector active_devices; // is online and has at least one mapping + std::vector real_buttons_is = { }; + std::vector real_buttons_was = { }; + std::vector real_axes_is = { }; + std::vector real_axes_was = { }; + std::vector virtual_buttons; + std::vector virtual_axes; + + inline uint16_t SET_BUTTON_BIT(uint16_t _id) + { + return _id | BUTTON_BIT; + } + + inline bool GET_BUTTON_BIT(uint16_t _id) + { + return _id & BUTTON_BIT; + } + + inline uint16_t SET_COMBINE_BIT(uint16_t _id) + { + return _id | COMBINE_BIT; + } + + inline bool GET_COMBINE_BIT(uint16_t _id) + { + return _id & COMBINE_BIT; + } + + inline uint16_t GET_ID(uint16_t _id) + { + return _id & ID_MASK; + } + + inline float apply_deadzone(float _value) { + return std::abs(_value) < DEADZONE ? 0.0f : _value; + } + + void update() + { + // Copy state + real_buttons_was = real_buttons_is; + real_axes_was = real_axes_is; + } + + uint16_t get_device_num_buttons(const DID _device) + { + assert(_device < devices.size() - 1); + + Device& device = devices[_device]; + Device& device_next = devices[_device + 1]; + + return device_next.real_buttons_offset - device.real_buttons_offset; + } + + uint16_t get_device_num_axes(const DID _device) + { + assert(_device < devices.size() - 1); + + Device& device = devices[_device]; + Device& device_next = devices[_device + 1]; + + return device_next.real_axes_offset - device.real_axes_offset; + } + + // [internal] + bool device_has_mappings(const DID _device) { + assert(_device < devices.size() - 1); + + // get range of absolute button & axis indices for this device + Device& device = devices[_device]; + uint16_t btn_range_start = device.real_buttons_offset; + uint16_t btn_range_end = device.real_buttons_offset + get_device_num_buttons(_device) - 1; + uint16_t axis_range_start = device.real_axes_offset; + uint16_t axis_range_end = device.real_axes_offset + get_device_num_axes(_device) - 1; + + // iterate all virtual buttons & axes + // check if any of their mappings refer to the set of device indices + for (uint16_t vbid = 0; vbid < virtual_buttons.size(); ++vbid) { + VirtualButton& btn = virtual_buttons[vbid]; + + for (uint16_t rid : btn.real_inputs) { + bool is_button = GET_BUTTON_BIT(rid); + uint16_t index = GET_ID(rid); + + if ((is_button && between(index, btn_range_start, btn_range_end)) || + (!is_button && between(index, axis_range_start, axis_range_end))) + { // Found valid mapping! + return true; + } + } + } + + for (uint16_t vid = 0; vid < virtual_axes.size(); ++vid) { + VirtualAxis& axis = virtual_axes[vid]; + + for (uint16_t rid : axis.real_inputs) { + bool is_button = GET_BUTTON_BIT(rid); + uint16_t index = GET_ID(rid); + + if ((is_button && between(index, btn_range_start, btn_range_end)) || + (!is_button && between(index, axis_range_start, axis_range_end))) + { // Found valid mapping! + return true; + } + } + } + + return false; + } + + bool is_device_active(const DID _device) { + return std::find(active_devices.begin(), active_devices.end(), _device) != active_devices.end(); + } + + // TODO add GUID to device + DID add_device(const uint16_t _num_buttons, const uint16_t _num_axes, std::string _name) + { + // Try to re-identify a disconnected device + for (uint16_t id = 0; id < devices.size(); ++id) { + Device& device = devices[id]; + if (!device.connected && + get_device_num_buttons(id) == _num_buttons && + get_device_num_axes(id) == _num_axes && + device.name == _name) + { + // Found it! + logInfo("Re-identified input device ID: %d Buttons: %d Axes: %d Name: %s\n", id, _num_buttons, _num_axes, _name.c_str()); + device.connected = true; + + // Check if this device has mappings + assert(std::find(active_devices.begin(), active_devices.end(), id) == active_devices.end()); + if (device_has_mappings(id)) { + active_devices.emplace_back(id); + } + + return id; + } + } + + // Add new device + DID id = (uint16_t)devices.size() - 1; + Device& device = devices[id]; + + // We now have the current dummy device + // Its offsets mark the current position on the bitset/vector + + // Add new buttons + assert(real_buttons_is.size() == device.real_buttons_offset); + real_buttons_is.resize(device.real_buttons_offset + _num_buttons); + real_buttons_was.resize(device.real_buttons_offset + _num_buttons); + + // Add new axes + assert(real_axes_is.size() == device.real_axes_offset); + real_axes_is.resize(device.real_axes_offset + _num_axes); + real_axes_was.resize(device.real_axes_offset + _num_axes); + + logInfo("New input device ID: %d Buttons: %d Axes: %d Name: %s\n", id, _num_buttons, _num_axes, _name.c_str()); + + // Set device info + // no need to set offsets, they are already correct! + device.name = std::move(_name); + + // Push new dummy device (marking new end) + devices.emplace_back(real_buttons_is.size(), real_axes_is.size(), ""); + + return id; + } + + void remove_device(const DID _device) { + reset_device(_device); + Device& device = devices[_device]; + device.connected = false; + + // find and remove from active devices + auto it = std::find(active_devices.begin(), active_devices.end(), _device); + if (it != active_devices.end()) { + active_devices.erase(it); + } + } + + void reset_device_buttons(const DID _device) + { + assert(_device < devices.size() - 1); + + Device& device = devices[_device]; + Device& device_next = devices[_device + 1]; + + // Reset all buttons + for (uint16_t i = device.real_buttons_offset; i < device_next.real_buttons_offset; ++i) { + real_buttons_is[i] = 0; + } + } + + void reset_device_axes(const DID _device) + { + assert(_device < devices.size() - 1); + + Device& device = devices[_device]; + Device& device_next = devices[_device + 1]; + + // Reset all axes + for (uint16_t i = device.real_axes_offset; i < device_next.real_axes_offset; ++i) { + real_axes_is[i] = 0.0f; + } + } + + void reset_device(const DID _device) + { + reset_device_buttons(_device); + reset_device_axes(_device); + } + + void update_buttons(const DID _device, const uint16_t _start_index, const uint8_t* _new_states, const uint16_t _num_states) + { + assert(_device < devices.size() - 1); + assert(_new_states != nullptr); + assert(_start_index + _num_states <= get_device_num_buttons(_device)); + + Device& device = devices[_device]; + std::memcpy(real_buttons_is.data() + device.real_buttons_offset + _start_index, _new_states, _num_states * sizeof(uint8_t)); + } + + void update_button(const DID _device, const uint16_t _index, const uint8_t _new_state) + { + update_buttons(_device, _index, &_new_state, 1); + } + + template + void update_axes(const DID _device, const uint16_t _start_index, const float* _new_states, const uint16_t _num_states) + { + assert(_device < devices.size() - 1); + assert(_new_states != nullptr); + assert(_start_index + _num_states <= get_device_num_axes(_device)); + + Device& device = devices[_device]; + float* dst = real_axes_is.data() + device.real_axes_offset + _start_index; + + if constexpr (accumulate) { + for (uint16_t i = 0; i < _num_states; ++i) { + dst[i] += _new_states[i]; + } + } + else { + std::memcpy(dst, _new_states, _num_states * sizeof(float)); + } + } + + void update_axes(const DID _device, const uint16_t _start_index, const float* _new_states, const uint16_t _num_states) + { + update_axes(_device, _start_index, _new_states, _num_states); + } + + void update_axes_acc(const DID _device, const uint16_t _start_index, const float* _new_states, const uint16_t _num_states) + { + update_axes(_device, _start_index, _new_states, _num_states); + } + + VBID add_virtual_button() + { + VBID vbid = (uint16_t)virtual_buttons.size(); + virtual_buttons.emplace_back(); + return vbid; + } + + VAID add_virtual_axis(const float _min_value, const float _max_value) + { + VAID id = (uint16_t)virtual_axes.size(); + virtual_axes.emplace_back(_min_value, _max_value); + return id; + } + + void clear_mapping(const VBID _button) + { + assert(_button < virtual_buttons.size()); + + VirtualButton& v_button = virtual_buttons[_button]; + v_button.real_inputs.clear(); + v_button.multiplier.clear(); + } + + void clear_mapping(const VAID _axis) + { + assert(_axis < virtual_buttons.size()); + + // virtual axis + VirtualAxis& v_axis = virtual_axes[_axis]; + v_axis.real_inputs.clear(); + v_axis.multiplier.clear(); + } + + void map_button(const DID _device, const uint16_t _index, const VBID _button, const bool _combine_with_next) + { + assert(_device < devices.size() - 1); + assert(_index < get_device_num_buttons(_device)); + assert(_button < virtual_buttons.size()); + + // get internal index of this button + Device& device = devices[_device]; + uint16_t internal_id = device.real_buttons_offset + _index; + + internal_id = SET_BUTTON_BIT(internal_id); + if (_combine_with_next) { + internal_id = SET_COMBINE_BIT(internal_id); + } + + // virtual button + VirtualButton& v_button = virtual_buttons[_button]; + v_button.real_inputs.emplace_back(internal_id); + v_button.multiplier.emplace_back(0.0f); // Button to Button mappings do not use multiplier + + if (device.connected) { + // we added a mapping => mark device as active (if not already) + auto it = std::find(active_devices.begin(), active_devices.end(), _device); + if (it == active_devices.end()) { + active_devices.emplace_back(_device); + } + } + } + + void map_button(const DID _device, const uint16_t _index, const VAID _axis, const float _multiplier, const bool _combine_with_next) + { + assert(_device < devices.size() - 1); + assert(_index < get_device_num_buttons(_device)); + assert(_axis < virtual_axes.size()); + + // get internal index of this button + Device& device = devices[_device]; + uint16_t internal_id = device.real_buttons_offset + _index; + + internal_id = SET_BUTTON_BIT(internal_id); + if (_combine_with_next) { + internal_id = SET_COMBINE_BIT(internal_id); + } + + // virtual axis + VirtualAxis& v_axis = virtual_axes[_axis]; + v_axis.real_inputs.emplace_back(internal_id); + v_axis.multiplier.emplace_back(_multiplier); + + if (device.connected) { + // we added a mapping => mark device as active (if not already) + auto it = std::find(active_devices.begin(), active_devices.end(), _device); + if (it == active_devices.end()) { + active_devices.emplace_back(_device); + } + } + } + + void map_axis(const DID _device, const uint16_t _index, const VBID _button, const float _multiplier, const bool _combine_with_next) + { + assert(_device < devices.size() - 1); + assert(_index < get_device_num_axes(_device)); + assert(_button < virtual_buttons.size()); + + // get internal index of this button + Device& device = devices[_device]; + uint16_t internal_id = device.real_axes_offset + _index; + + if (_combine_with_next) { + internal_id = SET_COMBINE_BIT(internal_id); + } + + // virtual button + VirtualButton& v_button = virtual_buttons[_button]; + v_button.real_inputs.emplace_back(internal_id); + v_button.multiplier.emplace_back(_multiplier); + + if (device.connected) { + // we added a mapping => mark device as active (if not already) + auto it = std::find(active_devices.begin(), active_devices.end(), _device); + if (it == active_devices.end()) { + active_devices.emplace_back(_device); + } + } + } + + void map_axis(const DID _device, const uint16_t _index, const VAID _axis, const float _multiplier, const bool _combine_with_next) + { + assert(_device < devices.size() - 1); + assert(_index < get_device_num_axes(_device)); + assert(_axis < virtual_axes.size()); + + // get internal index of this button + Device& device = devices[_device]; + uint16_t internal_id = device.real_axes_offset + _index; + + if (_combine_with_next) { + internal_id = SET_COMBINE_BIT(internal_id); + } + + // virtual axis + VirtualAxis& v_axis = virtual_axes[_axis]; + v_axis.real_inputs.emplace_back(internal_id); + v_axis.multiplier.emplace_back(_multiplier); + + if (device.connected) { + // we added a mapping => mark device as active (if not already) + auto it = std::find(active_devices.begin(), active_devices.end(), _device); + if (it == active_devices.end()) { + active_devices.emplace_back(_device); + } + } + } + + // [internal] + template + bool button_down(const VBID _button) { + VirtualButton& v_button = virtual_buttons[_button]; + + bool down = false; + bool combination_group_down = true; + for (int i = 0; i < v_button.real_inputs.size(); ++i) { + uint16_t r_input = v_button.real_inputs[i]; + + bool is_button = GET_BUTTON_BIT(r_input); + bool is_combination = GET_COMBINE_BIT(r_input); // we have to AND it with the next one + uint16_t index = GET_ID(r_input); + + bool tmp; + if (is_button) { + if constexpr (in_current_frame) { + tmp = real_buttons_is[index] > 0; // v_button.multiplier[i] ignored here + } else { + tmp = real_buttons_was[index] > 0; // v_button.multiplier[i] ignored here + } + } + else { + if constexpr (in_current_frame) { + tmp = (apply_deadzone(real_axes_is[index]) * v_button.multiplier[i]) > 0.5f; + } + else { + tmp = (apply_deadzone(real_axes_was[index]) * v_button.multiplier[i]) > 0.5f; + } + } + + combination_group_down &= tmp; + + if (!is_combination) { + // last combination group ended + down |= combination_group_down; + // reset combination group state + combination_group_down = true; + } + + } + + return down; + } + + bool button_is_down(const VBID _button) { + return button_down(_button); + } + + bool button_was_down(const VBID _button) { + return button_down(_button); + } + + bool button_pressed(const VBID _button) + { + return button_is_down(_button) && !button_was_down(_button); + } + + bool button_released(const VBID _button) + { + return button_was_down(_button) && !button_is_down(_button); + } + + // [internal] + template + float axis_value(const VAID _axis) + { + assert(_axis < virtual_axes.size()); + + VirtualAxis& v_axis = virtual_axes[_axis]; + + float value = 0.0f; + float combination_group_value = FLT_MAX; + for (int i = 0; i < v_axis.real_inputs.size(); ++i) { + uint16_t r_input = v_axis.real_inputs[i]; + + bool is_button = GET_BUTTON_BIT(r_input); + bool is_combination = GET_COMBINE_BIT(r_input); // we have to AND it with the next one + uint16_t index = GET_ID(r_input); + + float tmp; + if (is_button) { + if constexpr (in_current_frame) { + tmp = real_buttons_is[index] * v_axis.multiplier[i]; + } + else { + tmp = real_buttons_was[index] * v_axis.multiplier[i]; + } + } + else { + if constexpr (in_current_frame) { + tmp = apply_deadzone(real_axes_is[index]) * v_axis.multiplier[i]; + } + else { + tmp = apply_deadzone(real_axes_was[index]) * v_axis.multiplier[i]; + } + } + + combination_group_value = std::min(combination_group_value, tmp); + + if (!is_combination) { + // last combination group ended + value += combination_group_value; + // reset combination group state + combination_group_value = FLT_MAX; + } + + } + + return std::clamp(value, v_axis.min, v_axis.max); + } + + float axis_value_is(const VAID _axis) + { + return axis_value(_axis); + } + + float axis_value_was(const VAID _axis) + { + return axis_value(_axis); + } + + const std::vector& get_active_devices() { + return active_devices; + } + + void detect_input_callback(std::function _callback) { + + } + + void detect_input() { + // return did, is_button, index, value, + + for (uint16_t did = 0; did < devices.size() - 1; ++did) { + Device& device = devices[did]; + + uint16_t num_buttons = get_device_num_buttons(did); + for (uint16_t btn_index = 0; btn_index < num_buttons; ++btn_index) { + uint8_t btn_is = real_buttons_is[device.real_buttons_offset + btn_index]; + uint8_t btn_was = real_buttons_was[device.real_buttons_offset + btn_index]; + if (btn_is != btn_was) + { // change detected + logDebug("Device input detected: ID: %d Index: %d Is_Button: %d Value: %f\n", did, btn_index, 1, btn_is ? 1.0f : 0.0f); + } + } + + uint16_t num_axes = get_device_num_axes(did); + for (uint16_t axis_index = 0; axis_index < num_axes; ++axis_index) { + float axis_is = apply_deadzone(real_axes_is[device.real_axes_offset + axis_index]); + float axis_was = apply_deadzone(real_axes_was[device.real_axes_offset + axis_index]); + if (axis_is != axis_was) + { // change detected + logDebug("Device input detected: ID: %d Index: %d Is_Button: %d Value: %f\n", did, axis_index, 0, axis_is); + } + } + + } + } +} diff --git a/src/lib/input.h b/src/lib/input.h new file mode 100644 index 0000000..c6fa742 --- /dev/null +++ b/src/lib/input.h @@ -0,0 +1,172 @@ +#pragma once + +#include +#include +#include +#include + +namespace input { + + // An ID used to identify devices + // Basically just a uint16_t - You can convert freely to and from it + // This type exists to aid correct usage + // Note: Returned ids are guaranteed to start from 0 + using DID = struct device_id { + uint16_t id; + + // default constructor + constexpr device_id() : id(0) {} + + // implicit constructor from uint16_t + constexpr device_id(const uint16_t _id) : id(_id) {} + // implicit conversion to uint16_t + constexpr operator uint16_t() const { return id; } + }; + + // An ID used to identify virtual buttons + // Basically just a uint16_t - You can convert freely to and from it + // This type exists to aid correct usage + // Note: Returned ids are guaranteed to start from 0 + using VBID = struct virtual_button_id { + uint16_t id; + + // implicit constructor from uint16_t + constexpr virtual_button_id(const uint16_t _id) : id(_id) {} + // implicit conversion to uint16_t + constexpr operator uint16_t() const { return id; } + }; + + // An ID used to identify virtual axes + // Basically just a uint16_t - You can convert freely to and from it + // This type exists to aid correct usage + // Note: Returned ids are guaranteed to start from 0 + using VAID = struct virtual_axis_id { + uint16_t id; + + // implicit constructor from uint16_t + constexpr virtual_axis_id(const uint16_t _id) : id(_id) {} + // implicit conversion to uint16_t + constexpr operator uint16_t() const { return id; } + }; + + + // Advance the internal state to the next frame. + // Should be called every frame BEFORE new input is updated + void update(); + + // Tries to re-identify an offline device with the same number + // of buttons, axes and matching name. Otherwise adds a new device + // Returns the ID of the device + // Note: Buttons and axes are indexed from 0 to num_ - 1 + DID add_device(const uint16_t _num_buttons, const uint16_t _num_axes, std::string _name); + + // Marks it as offline + // The internal state is reset, but not actually removed + // A later call to 'add_device(..)' with the same parameters, + // may re-identify this device (and return its original DID) + void remove_device(const DID _device); + + // Resets all buttons (to the state 'up') of the given device + void reset_device_buttons(const DID _device); + + // Resets all axes (to 0.0f) of the given device + void reset_device_axes(const DID _device); + + // Resets all buttons and all axes of the given device + void reset_device(const DID _device); + + // Returns whether the device is active (connected and has at least one mapping) + bool is_device_active(const DID _device); + + // Returns list of all active devices (devices that are connected and have at least one mapping) + // Useful to check which devices need to be updated + const std::vector& get_active_devices(); + + // Update multiple buttons of this device at once + // _start_index is the index of the first button to update + // _new_states must hold at least _num_states + void update_buttons(const DID _device, const uint16_t _start_index, const uint8_t* _new_states, const uint16_t _num_states); + + // Update a single buttons state + void update_button(const DID _device, const uint16_t _index, const uint8_t _new_state); + + // Update multiple axes of this device at once (overwriting the state) + // _start_index is the index of the first axis to update + // _new_states must hold at least _num_states + void update_axes(const DID _device, const uint16_t _start_index, const float* _new_states, const uint16_t _num_states); + + // Update multiple axes of this device at once (accumulating to the state) + // _start_index is the index of the first axis to update + // _new_states must hold at least _num_states + void update_axes_acc(const DID _device, const uint16_t _start_index, const float* _new_states, const uint16_t _num_states); + + // Update a real input buttons state (overwriting the state) + inline void update_axis(const DID _device, const uint16_t _index, const float _new_state) + { + update_axes(_device, _index, &_new_state, 1); + } + + // Update a real input buttons state (accumulating to the state) + inline void update_axis_acc(const DID _device, const uint16_t _index, const float _new_state) + { + update_axes_acc(_device, _index, &_new_state, 1); + } + + // Add a virtual button + VBID add_virtual_button(); + + // Add a virtual axis + // Its range will be confined to [_min_value; _max_value] + // Use defaults for unrestricted values or + // set [-1;1] for a normalized axis or + // typically you want set these to gameplay restriction ex.: max_walk_speed, max_camera_speed. + VAID add_virtual_axis(const float _min_value = -FLT_MAX, const float _max_value = FLT_MAX); + + // Remove all mappings from this virtual button + void clear_mapping(const VBID _button); + + // Remove all mappings from this virtual axis + void clear_mapping(const VAID _axis); + + // Map a real button to a virtual button + // if _combine_with_next is true, this mapping will be combined with the next mapping applied to this virtual button + // Note: The last mapping in a combination group must have _combine_with_next == false + void map_button(const DID _device, const uint16_t _index, const VBID _button, const bool _combine_with_next = false); + + // Map a real button to a virtual axis + // if _combine_with_next is true, this mapping will be combined with the next mapping applied to this virtual axis + // Note: The last mapping in a combination group must have _combine_with_next == false + void map_button(const DID _device, const uint16_t _index, const VAID _axis, const float _multiplier = 1.0f, const bool _combine_with_next = false); + + // Map a real axis to a virtual button + // if _combine_with_next is true, this mapping will be combined with the next mapping applied to this virtual button + // Note: The last mapping in a combination group must have _combine_with_next == false + void map_axis(const DID _device, const uint16_t _index, const VBID _button, const float _multiplier = 1.0f, const bool _combine_with_next = false); + + // Map a real axis to a virtual axis + // if _combine_with_next is true, this mapping will be combined with the next mapping applied to this virtual axis + // Note: The last mapping in a combination group must have _combine_with_next == false + void map_axis(const DID _device, const uint16_t _index, const VAID _axis, const float _multiplier = 1.0f, const bool _combine_with_next = false); + + // Poll whether the virtual button is down in the current frame + bool button_is_down(const VBID _button); + + // Poll whether the virtual button was down in the previous frame + bool button_was_down(const VBID _button); + + // Poll whether the virtual button was pressed from the previous frame to the current frame + // This is a rising edge detection aka is_down && !was_down + bool button_pressed(const VBID _button); + + // Poll whether the virtual button was released from the previous frame to the current frame + // This is a falling edge detection aka was_down && !is_down + bool button_released(const VBID _button); + + // Poll axis value in the current frame + float axis_value_is(const VAID _axis); + + // Poll axis value in the previous frame + float axis_value_was(const VAID _axis); + + void detect_input(); +} diff --git a/src/lib/stb_image.cpp b/src/lib/stb_image.cpp new file mode 100644 index 0000000..9e87803 --- /dev/null +++ b/src/lib/stb_image.cpp @@ -0,0 +1,8 @@ +#define STB_IMAGE_IMPLEMENTATION +#define STB_IMAGE_WRITE_IMPLEMENTATION +#pragma warning( disable : 6262 26451) +#include "stb_image.h" +#include "stb_image_write.h" +#pragma warning( default : 6262 26451) + +/// This file only exists as a translation unit for the library implementation diff --git a/src/main.cpp b/src/main.cpp new file mode 100644 index 0000000..58ace78 --- /dev/null +++ b/src/main.cpp @@ -0,0 +1,392 @@ +#include + +#define GLFW_INCLUDE_NONE +#include + +#if BX_PLATFORM_LINUX || BX_PLATFORM_BSD +#define GLFW_EXPOSE_NATIVE_WAYLAND +#define GLFW_EXPOSE_NATIVE_X11 +#elif BX_PLATFORM_WINDOWS +#define GLFW_EXPOSE_NATIVE_WIN32 +#elif BX_PLATFORM_OSX +#define GLFW_EXPOSE_NATIVE_COCOA +#endif +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "test.h" +#include "util.h" +#include "config.h" +#include "gameloop.h" +#include "space_input.h" + +uint32_t window_width = config::MINIMAL_WINDOW_WIDTH; +uint32_t window_height = config::MINIMAL_WINDOW_HEIGHT; +bool cursor_entered_window = false; +GLFWgamepadstate gamepad_state; + +static void glfw_error_callback(int error, const char *description) +{ + logErr("GLFW error %d: %s\n", error, description); +} + +static void glfw_window_size_callback(GLFWwindow *_window, int _window_width, int _window_height) +{ + window_width = _window_width; + window_height = _window_height; + // Send window resize event to the game thread (to update the bgfx framebuffer size) + Event *ev = event_queue.begin_push(); + ev->type = Resize; + event_queue.end_push(); +} + +static void glfw_window_focus_callback(GLFWwindow *_window, int _focused) +{ + if (_focused) + { + glfwSetCursorPos(_window, 0, 0); // Avoid camera jump + } +} + +static void glfw_key_callback(GLFWwindow *_window, int _key, int _scancode, int _action, int _mods) +{ + if (_action == GLFW_REPEAT) + return; // We ignore key repeats + input::update_button_keycode(_scancode, _action == GLFW_PRESS); +} + +static void glfw_mouse_button_callback(GLFWwindow *_window, int _button, int _action, int _mods) +{ + input::update_button(input::Device::Mouse, _button, _action == GLFW_PRESS); +} + +static void glfw_cursor_callback(GLFWwindow *_window, double _xpos, double _ypos) +{ + // Avoid camera jump on (re-)entering the window + // ex. alt-tabbing + if (cursor_entered_window) + { + cursor_entered_window = false; + _xpos = 0.0; // Ignore first cursor movement + _ypos = 0.0; + } + + const float pos[2] = {(float)_xpos, (float)_ypos}; + // input::update_axis_acc(Device::Mouse, input::MOUSE_CURSOR_X_AXIS_INDEX, pos[0]); + // input::update_axis_acc(Device::Mouse, input::MOUSE_CURSOR_Y_AXIS_INDEX, pos[1]); + // Same as above + input::update_axes_acc(input::Device::Mouse, input::MouseAxis::CursorX, &pos[0], 2); + + // reset internal mouse position to (0,0) + // this gives us relative coordinates every frame + glfwSetCursorPos(_window, 0, 0); +} + +static void glfw_cursor_enter_callback(GLFWwindow *_window, int _entered) +{ + cursor_entered_window = _entered; +} + +static void glfw_scroll_callback(GLFWwindow *_window, double _xoffset, double _yoffset) +{ + const float offset[2] = {(float)_xoffset, (float)_yoffset}; + input::update_axes_acc(input::Device::Mouse, input::MouseAxis::ScrollX, &offset[0], 2); +} + +static void glfw_joystick_callback(int _jid, int _event) +{ + if (_event == GLFW_CONNECTED) + { + // get number of axes & buttons + int button_count = 0, axes_count = 0; + const uint8_t *buttons = glfwGetJoystickButtons(_jid, &button_count); + const float *axes = glfwGetJoystickAxes(_jid, &axes_count); + + // get name + const char *name = glfwGetJoystickName(_jid); + + if (axes == nullptr || buttons == nullptr || name == nullptr) + { + // Something is wrong with this joystick + // Maybe it was disconnected => do nothing + logWarn("Joystick connected, but cannot get data. (disconnected again?)\n"); + return; + } + + // tries to re-identify it or adds it as new + input::DID id = input::add_joystick(_jid, button_count, axes_count, std::string(name) + " (RAW)"); + + // Note: We do not update the state here, polling happens elsewhere + + // is xbox mapping available? + if (glfwJoystickIsGamepad(_jid)) + { + // get input state + int state_valid = glfwGetGamepadState(_jid, &gamepad_state); + + if (state_valid == GLFW_FALSE) + { + // Something is wrong with this joystick + // Maybe it was disconnected => do nothing + logWarn("Joystick (JID: %d) has xbox mappings, but cannot get data. (disconnected again?)\n", _jid); + return; + } + + // try to re-identify it or adds it as new + id = input::add_gamepad(_jid, std::string(name) + " (XBOX)"); + + // Note: We do not update the state here, polling happens elsewhere + + // If no mappings yet => set default mappings + if (!input::is_joystick_active(_jid) && + !input::is_gamepad_active(_jid)) + { + input::set_gamepad_default_mappings(_jid); + } + } + } + else if (_event == GLFW_DISCONNECTED) + { + input::remove_joystick(_jid); + } + else + { + logWarn("Received unknown joystick event\n"); + } +} + +void handle_events(GLFWwindow *_window) +{ + using input::Axis; + using input::Button; + using input::Device; + + game_shutdown_requested = glfwWindowShouldClose(_window); + + // Advance input system to next frame + input::update(); + + // Reset mouse axes + input::reset_device_axes(Device::Mouse); + + glfwPollEvents(); + + // Poll joystick input + for (uint16_t jid : input::get_active_joysticks()) + { + int button_count = 0, axes_count = 0; + const uint8_t *buttons = glfwGetJoystickButtons(jid, &button_count); + const float *axes = glfwGetJoystickAxes(jid, &axes_count); + + if (axes == nullptr || buttons == nullptr) + { + // Something is wrong with this joystick + // Maybe it was disconnected => do nothing + logWarn("Joystick (JID: %d) error: Cannot get data (RAW). (disconnected?)\n", jid); + input::remove_joystick(jid); + continue; + } + + input::update_joystick(jid, buttons, button_count, axes, axes_count); + } + + for (uint16_t jid : input::get_active_gamepads()) + { + // get input state + int state_valid = glfwGetGamepadState(jid, &gamepad_state); + + if (state_valid == GLFW_FALSE) + { + // Something is wrong with this joystick + // Maybe it was disconnected => do nothing + logWarn("Joystick (JID: %d) error: Cannot get data (XBOX). (disconnected?)\n", jid); + input::remove_joystick(jid); + continue; + } + + input::update_gamepad(jid, gamepad_state); + } +} + +int main(int argc, char **argv) +{ + logInfo("Starting spacegame :)\n"); + + // generate_vertices("C:/Users/Crydsch/Desktop/spacegame v5/shaders/verts.sh"); + // generate_glsl_orientation_matrices("/home/crydsch/spacegame/shaders/orientations.sh"); + // return 0; + + // Run some debugging checks +#if not defined(NDEBUG) + if (!test()) + exit(1); +#endif + + // Fix for differences in debugging environment vs deploy environment + find_correct_working_directory(); + + logDebug("GLFW_PLATFORM_WAYLAND supported = %d\n", glfwPlatformSupported(GLFW_PLATFORM_WAYLAND)); + logDebug("GLFW_PLATFORM_X11 supported = %d\n", glfwPlatformSupported(GLFW_PLATFORM_X11)); + logDebug("GLFW_PLATFORM_WIN32 supported = %d\n", glfwPlatformSupported(GLFW_PLATFORM_WIN32)); + + // Create a GLFW window +#if BX_PLATFORM_LINUX || BX_PLATFORM_BSD + bool enable_wayland = should_use_wayland(); + if (enable_wayland) + glfwInitHint(GLFW_PLATFORM, GLFW_PLATFORM_WAYLAND); + else + glfwInitHint(GLFW_PLATFORM, GLFW_PLATFORM_X11); + +#elif BX_PLATFORM_WINDOWS + glfwInitHint(GLFW_PLATFORM, GLFW_PLATFORM_WIN32); +#elif BX_PLATFORM_OSX + glfwInitHint(GLFW_PLATFORM, GLFW_PLATFORM_COCOA); +#endif + + glfwSetErrorCallback(glfw_error_callback); + if (!glfwInit()) + DIE("Could not initialize GLFW\n"); + + int platform = glfwGetPlatform(); + if (platform == GLFW_PLATFORM_WAYLAND) + logInfo("GLFW running on wayland\n"); + else if (platform == GLFW_PLATFORM_X11) + logInfo("GLFW running on x11\n"); + else if (platform == GLFW_PLATFORM_WIN32) + logInfo("GLFW running on win32\n"); + else if (platform == GLFW_PLATFORM_COCOA) + logInfo("GLFW running on cocoa\n"); + else + logInfo("GLFW running on unknown platform\n"); + + glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API); + + GLFWwindow *window = glfwCreateWindow(window_width, window_height, "spacegame", nullptr, nullptr); + if (!window) + DIE("Could not create GLFW window\n"); + + // Initialize input system + // Before we set glfw callbacks + input::init(); + + glfwSetWindowSizeLimits(window, config::MINIMAL_WINDOW_WIDTH, config::MINIMAL_WINDOW_HEIGHT, GLFW_DONT_CARE, GLFW_DONT_CARE); + glfwSetWindowSizeCallback(window, glfw_window_size_callback); + glfwSetWindowFocusCallback(window, glfw_window_focus_callback); + glfwSetCursorPos(window, 0, 0); // once before cursor callbacks + glfwSetCursorEnterCallback(window, glfw_cursor_enter_callback); + glfwSetKeyCallback(window, glfw_key_callback); + glfwSetMouseButtonCallback(window, glfw_mouse_button_callback); + glfwSetCursorPosCallback(window, glfw_cursor_callback); + glfwSetCursorPos(window, 0, 0); // once after cursor callbacks + glfwSetScrollCallback(window, glfw_scroll_callback); + glfwSetJoystickCallback(glfw_joystick_callback); + + // Detect (already connected) joysticks + for (int i = 0; i < GLFW_JOYSTICK_LAST + 1; ++i) + { + if (glfwJoystickPresent(i)) + { + glfw_joystick_callback(i, GLFW_CONNECTED); + } + } + + // Setup cursor +#if NDEBUG + // Catch mouse (Only when not debugging; it will not release the mouse on a breakpoint..) + glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED); +#endif + if (glfwRawMouseMotionSupported()) + { + glfwSetInputMode(window, GLFW_RAW_MOUSE_MOTION, GLFW_TRUE); + } + + // Call bgfx::renderFrame before bgfx::init to signal to bgfx not to create a render thread + // We do the threading ourselves + bgfx::renderFrame(); + + bgfx::PlatformData platformData; +#if BX_PLATFORM_LINUX || BX_PLATFORM_BSD + if (enable_wayland) + { + platformData.type = bgfx::NativeWindowHandleType::Wayland; + platformData.nwh = (void *)(uintptr_t)glfwGetWaylandWindow(window); + platformData.ndt = glfwGetWaylandDisplay(); + } + else + { + platformData.type = bgfx::NativeWindowHandleType::Default; + platformData.nwh = (void *)(uintptr_t)glfwGetX11Window(window); + platformData.ndt = glfwGetX11Display(); + } +#elif BX_PLATFORM_OSX + platformData.nwh = glfwGetCocoaWindow(window); +#elif BX_PLATFORM_WINDOWS + platformData.nwh = glfwGetWin32Window(window); +#endif + + // Create a thread to call the bgfx API from (except bgfx::renderFrame) + bx::Thread gameloopThread; + gameloopThread.init(runGameloopThread, &platformData); + + // Wait until bgfx is initialized + // this includes an initial dummy frame + // while (!bgfx_ready_flag.test()) { + while (!bgfx_ready_flag) + { + // Must be called repeatedly until bgfx is fully initialized + bgfx::renderFrame(100); + }; + + int64_t time_to_seconds = bx::getHPFrequency(); + int64_t time_to_millis = time_to_seconds / 1000; + int64_t time_frame_start = 0; + // should be 16.666 for 60fps + // by slightly under estimating it, we might run slightly above 60fps, which is fine + int64_t frame_time_target_millis = 15; // todo is float neceed here? + + // Main loop + while (!game_shutdown_requested) + { + // Handle glfw events + handle_events(window); + flag_signal(&input_ready_flag); // input is ready to be used + + time_frame_start = bx::getHPCounter(); + + // Block until call to bgfx::frame, then + // Process submitted rendering primitives + bgfx::renderFrame(); + flag_signal(&frame_ready_flag); // signal last frame was rendered + + int64_t now = bx::getHPCounter(); + int64_t frame_time_diff = (now - time_frame_start); + int64_t sleep = frame_time_target_millis - (frame_time_diff / time_to_millis); + + std::this_thread::sleep_for(std::chrono::milliseconds(sleep)); + + // now = bx::getHPCounter(); + // frame_time_diff = (now - time_frame_start); + // logDebug("FPS: %lf\n", 1000.0 / ((double)frame_time_diff / (double)time_to_millis)); + } + + // Wait for bgfx to shut down + while (bgfx::RenderFrame::NoContext != bgfx::renderFrame()) + { + } + gameloopThread.shutdown(); // join game thread + glfwDestroyWindow(window); + glfwTerminate(); + + logInfo("Stopped spacegame gracefully. Till next time :)\n"); + + return gameloopThread.getExitCode(); +} diff --git a/src/net/debug_server.cpp b/src/net/debug_server.cpp new file mode 100644 index 0000000..a032c5d --- /dev/null +++ b/src/net/debug_server.cpp @@ -0,0 +1,20 @@ +#include "debug_server.h" + +namespace debug_server { + + void join(/*player id*/) + { + // TODO check player not already connected + + + // TODO register player joined + } + + + void simulate_server() + { + // TODO check registered event + // send updates to client + + } +} \ No newline at end of file diff --git a/src/net/debug_server.h b/src/net/debug_server.h new file mode 100644 index 0000000..fc44a5b --- /dev/null +++ b/src/net/debug_server.h @@ -0,0 +1,17 @@ +#pragma once + + +namespace debug_server { + + // Requests to join a server + // Server will answer with surrounding grids + void join(/*player id*/); + + + + + + + // will "answer" requests + void simulate_server(); +} diff --git a/src/net/network.cpp b/src/net/network.cpp new file mode 100644 index 0000000..9c3d928 --- /dev/null +++ b/src/net/network.cpp @@ -0,0 +1,382 @@ +//#include "network.h" +//#include "config.h" +//#include "util.h" +//#include +//#include "world.h" +//#include +//#include "debug_server.h" +// +//// This system manages all network communication including: +//// Sending/Receiving client requests +//// Sending/Receiving server updates +//// Server <-> Client id translation +// +//// Request buffers +//std::unordered_map chunk_requests; // chunk_PRid -> where on grid +//std::unordered_map> block_model_requests; +// +// +//// ID mappings remote <-> local +//// Note: All ids here are pure! +//std::vector RtoL_grid_ids(config::INITIAL_NUM_GRIDS, 0x0); +//std::vector LtoR_grid_ids(config::INITIAL_NUM_GRIDS, 0x0); +//std::vector RtoL_chunk_ids(config::INITIAL_NUM_CHUNKS, 0x0); +//std::vector LtoR_chunk_ids(config::INITIAL_NUM_CHUNKS, 0x0); +//std::vector RtoL_block_ids(config::INITIAL_NUM_BLOCKS, 0x0); +//std::vector LtoR_block_ids(config::INITIAL_NUM_BLOCKS, 0x0); +//std::vector RtoL_block_model_ids(config::INITIAL_NUM_BLOCK_MODELS, 0x0); +//std::vector LtoR_block_model_ids(config::INITIAL_NUM_BLOCK_MODELS, 0x0); +// +//namespace net { +// +// // id translation functions +// // Pass one id and get reference to the translated one +// uint32_t& translate_grid_id_RtoL(const uint32_t _PRid) +// { +// while (_PRid >= RtoL_grid_ids.size()) +// { // need to grow array +// RtoL_grid_ids.resize(2 * RtoL_grid_ids.size(), 0); +// } +// +// return RtoL_grid_ids[_PRid]; +// } +// uint32_t& translate_grid_id_LtoR(const uint32_t _PLid) +// { +// while (_PLid >= LtoR_grid_ids.size()) +// { // need to grow array +// LtoR_grid_ids.resize(2 * LtoR_grid_ids.size(), 0); +// } +// +// return LtoR_grid_ids[_PLid]; +// } +// +// uint32_t& translate_chunk_id_RtoL(const uint32_t _PRid) +// { +// while (_PRid >= RtoL_chunk_ids.size()) +// { // need to grow array +// RtoL_chunk_ids.resize(2 * RtoL_chunk_ids.size(), 0); +// } +// +// return RtoL_chunk_ids[_PRid]; +// } +// uint32_t& translate_chunk_id_LtoR(const uint32_t _PLid) +// { +// while (_PLid >= LtoR_chunk_ids.size()) +// { // need to grow array +// LtoR_chunk_ids.resize(2 * LtoR_chunk_ids.size(), 0); +// } +// +// return LtoR_chunk_ids[_PLid]; +// } +// +// uint32_t& translate_block_id_RtoL(const uint32_t _PRid) +// { +// while (_PRid >= RtoL_block_ids.size()) +// { // need to grow array +// RtoL_block_ids.resize(2 * RtoL_block_ids.size(), 0); +// } +// +// return RtoL_block_ids[_PRid]; +// } +// uint32_t& translate_block_id_LtoR(const uint32_t _PLid) +// { +// while (_PLid >= LtoR_block_ids.size()) +// { // need to grow array +// LtoR_block_ids.resize(2 * LtoR_block_ids.size(), 0); +// } +// +// return LtoR_block_ids[_PLid]; +// } +// +// uint32_t& translate_block_model_id_RtoL(const uint32_t _PRid) +// { +// while (_PRid >= RtoL_block_model_ids.size()) +// { // need to grow array +// RtoL_block_model_ids.resize(2 * RtoL_block_model_ids.size(), 0); +// } +// +// return RtoL_block_model_ids[_PRid]; +// } +// uint32_t& translate_block_model_id_LtoR(const uint32_t _PLid) +// { +// while (_PLid >= LtoR_block_model_ids.size()) +// { // need to grow array +// LtoR_block_model_ids.resize(2 * LtoR_block_model_ids.size(), 0); +// } +// +// return LtoR_block_model_ids[_PLid]; +// } +// +// void join() +// { +// debug_server::join(); +// +// // TODO send_CtoS_join +// } +// +// void recv_StoC_grid_update(StoC_grid_update _update) +// { +// uint32_t& grid_PLid = translate_grid_id_RtoL(_update.id); +// +// if (grid_PLid == 0) +// { // id does not exist => new grid +// if (_update.optional != 0b111) +// { // new grid MUST have all optional parameters +// logWarn("recv_StoC_grid_update received update for unknown grid. => ignoring it\n"); +// return; +// } +// +// grid_PLid = world::add_grid(); +// translate_grid_id_LtoR(grid_PLid) = _update.id; +// +// world::update_grid_transform(grid_PLid, _update.position, _update.orientation); +// +// for (uint32_t x = 0; x < 8; x++) +// for (uint32_t y = 0; y < 8; y++) +// for (uint32_t z = 0; z < 8; z++) +// world::update_grid_chunk(grid_PLid, x, y, z, _update.chunks_ids[x][y][z]); +// +// return; +// } +// // else id is valid grid +// +// // determine type of update +// +// if (_update.optional == 0b000) +// { // just id => grid got removed +// +// // TODO remove chunk id mappings +// // world::iterate_grid_chunks +// // RtoL_chunk_ids =0 ... +// +// world::remove_grid(grid_PLid); +// +// // remove id mapping +// translate_chunk_id_LtoR(grid_PLid) = 0; +// grid_PLid = 0; +// return; +// } +// +// if (_update.optional & 0b110) +// { // combined transform update +// world::update_grid_transform(grid_PLid, _update.position, _update.orientation); +// } +// else if (_update.optional & 0b100) +// { // position only +// world::update_grid_position(grid_PLid, _update.position); +// } +// else if (_update.optional & 0b010) +// { // orientation only +// world::update_grid_orientation(grid_PLid, _update.orientation); +// } +// +// if (_update.optional & 0b001) +// { // new chunk ids +// for (uint32_t x = 0; x < 8; x++) +// for (uint32_t y = 0; y < 8; y++) +// for (uint32_t z = 0; z < 8; z++) +// { +// uint32_t chunk_id = _update.chunks_ids[x][y][z]; // TODO rem when updating only valid ids +// if (chunk_id == 0) continue; // We only add new chunks, removal is done via chunk updates +// +// world::update_grid_chunk(grid_PLid, x, y, z, to_remote_id(chunk_id)); +// } +// } +// } +// +// void subscribe_chunk(const uint32_t _PRid, ChunkRequest _req) +// { +// chunk_requests.insert(std::pair(_PRid, _req)); +// send_CtoS_subscribe_chunk(_PRid); +// } +// +// void unsubscribe_requested_chunk(const uint32_t _PRid) +// { +// +// } +// +// void unsubscribe_local_chunk(const uint32_t _PRid, ChunkRequest _req) +// { +// auto req_it = chunk_requests.find(_PRid); +// if (req_it == chunk_requests.end()) +// { // request is still pending => remove it +// chunk_requests.erase(req_it); +// } +// else +// { +// chunk_requests.insert(req_it, std::pair(_PRid, _req)); +// } +// send_CtoS_unsubscribe_chunk(_PRid); +// } +// +// // instruct server to send chunk information (including blocks) AND keep us up-to-date +// void send_CtoS_subscribe_chunk(const uint32_t _PRid) +// { +// // TODO send request +// } +// +// void send_CtoS_unsubscribe_chunk(const uint32_t _PRid) +// { // instruct server to stop sending chunk updates +// // TODO send request +// } +// +// void recv_StoC_chunk_update(StoC_chunk_update _update) +// { +// // #1 DO WE KNOW THIS CHUNK? +// // translate id +// uint32_t& chunk_PLid = translate_chunk_id_RtoL(_update.id); +// +// // #1 NO +// if (chunk_PLid == 0) +// { // might be new chunk +// // #2 DID WE REQUEST IT? +// auto req_it = chunk_requests.find(_update.id); +// // #2 NO +// if (req_it == chunk_requests.end()) +// { // this chunk was not requested! (or unsubscribed before we received this update) +// logTrace("recv_StoC_chunk_update received a non requested chunk => ignoring it\n"); +// send_CtoS_unsubscribe_chunk(_update.id); // prevent further updates +// return; +// } +// // #2 YES +// ChunkRequest req = req_it->second; +// chunk_requests.erase(req_it); +// +// // fail-fast - does update indicate removed chunk? +// if (!_update.block_ids_exist) +// { // chunk does not/no longer exists on server +// return; +// } +// +// // add new chunk AND set id mapping +// chunk_PLid = world::add_chunk(); +// translate_chunk_id_LtoR(chunk_PLid) = _update.id; +// +// // update parent +// world::update_grid_chunk(req.grid_id, req.offset_x, req.offset_y, req.offset_z, chunk_PLid); +// +// // Note: +// } +// // #1 YES +// // else chunk_PLid is known chunk +// +// +// +// +// +// // determine type of update +// if (!_update.block_ids_exist) +// { // just id => chunk got removed +// // update parent grid (set chunk id to original remote id) +// world::update_grid_chunk(req.grid_id, req.offset_x, req.offset_y, req.offset_z, to_remote_id(_update.id)); +// +// world::remove_chunk(chunk_PLid); +// +// // remove id mapping +// translate_chunk_id_LtoR(chunk_PLid) = 0; +// chunk_PLid = 0; +// } +// else +// { // got new block ids +// for (uint32_t x = 0; x < 8; x++) +// for (uint32_t y = 0; y < 8; y++) +// for (uint32_t z = 0; z < 8; z++) +// { +// uint32_t block_PRid = _update.block_ids[x][y][z]; +// uint32_t& block_PLid = translate_block_id_RtoL(block_PRid); +// +// if (block_PLid == 0) continue; // TODO rem when handling only valid updates +// // Note: block removal is done through block updates, not chunk updates +// +// block_PLid = world::add_block(); +// world::update_chunk_block(chunk_PLid, x, y, z, block_PLid); +// +// // init block data +// auto& block_update = _update.block_data[x][y][z]; +// +// world::update_block_rotation(block_PLid, block_update.rotation); +// world::update_block_base_model(block_PLid, to_remote_id(block_update.base_model_id)); +// world::update_block_curr_model(block_PLid, to_remote_id(block_update.curr_model_id)); +// } +// +// } +// +// } +// +// void recv_StoC_block_update(StoC_block_update _update) +// { // aka existing block got removed or changed (model, health, ...) +// +// } +// +// void request_block_model(const uint32_t _PRid, const uint32_t _requesting_block_PLid, const bool _request_base_model /* or curr_model?*/) +// { +// BlockModelRequest req = { .block_id = _requesting_block_PLid, .base_model = _request_base_model }; +// +// auto req_it = block_model_requests.find(_PRid); +// if (req_it == block_model_requests.end()) +// { // new request +// block_model_requests.insert(std::pair(_PRid, std::vector(1, req))); +// send_CtoS_get_block_model(_PRid); +// } +// else +// { // additional request => just enqueue requesting block +// std::vector& reqs = req_it->second; +// reqs.push_back(req); +// } +// } +// +// void send_CtoS_get_block_model(const uint32_t _PRid) +// { +// +// } +// +// void recv_StoC_block_model_update(StoC_block_model_update _update) +// { +// // translate id +// uint32_t& block_model_PLid = translate_block_model_id_RtoL(_update.id); +// +// if (block_model_PLid == 0) +// { // new block model +// auto req_it = block_model_requests.find(_update.id); +// if (req_it == block_model_requests.end()) +// { // this model was not requested! +// logWarn("recv_StoC_block_model_update received a non requested block model => ignoring it\n"); +// return; +// } +// // else was requested +// std::vector reqs = req_it->second; +// block_model_requests.erase(req_it); +// +// if (!_update.data_exists) +// { // model does not/no longer exist on server +// return; +// } +// +// // add model and id mapping +// block_model_PLid = world::add_block_model(); +// translate_block_model_id_LtoR(block_model_PLid) = _update.id; +// +// // init model data +// world::update_block_model_data(block_model_PLid, _update.data, _update.num_data_elements); +// +// // update requesting blocks +// for (BlockModelRequest req : reqs) +// { +// if (req.base_model) +// { +// world::update_block_base_model(req.block_id, to_local_id(block_model_PLid)); +// } +// else +// { +// world::update_block_curr_model(req.block_id, to_local_id(block_model_PLid)); +// } +// +// } +// +// return; +// } +// // else block_model_PLid was already known +// logWarn("recv_StoC_block_model_update received update for already known block model => ignoring it\n"); +// +// } +//} diff --git a/src/net/network.h b/src/net/network.h new file mode 100644 index 0000000..71602e3 --- /dev/null +++ b/src/net/network.h @@ -0,0 +1,83 @@ +//#pragma once +// +//#include "space_math.h" +// +//typedef struct chunk_request { +// uint32_t grid_id; +// uint32_t offset_x; +// uint32_t offset_y; +// uint32_t offset_z; +//} ChunkRequest; +// +//typedef struct block_model_request { +// uint32_t block_id; +// bool base_model; // else was curr_model +//} BlockModelRequest; +// +//struct StoC_grid_update { +// // id +// uint32_t id; +// // 0b000 == grid removed +// // 0b1xx == includes position +// // 0bx1x == includes orientation +// // 0bxx1 == includes chunk ids +// uint8_t optional; // bitmap +// // opt: position +// Vec3 position; +// // opt: orientation +// Quat orientation; +// // opt: chunk_ids +// uint32_t chunks_ids[8][8][8]; // TODO better chunk_storage +//}; +// +//struct CtoS_subscribe_chunk { +// // id +// uint32_t id; +//}; +// +//struct CtoS_unsubscribe_chunk { +// // id +// uint32_t id; +//}; +// +//struct StoC_chunk_update { +// // chunk id +// uint32_t id; +// // block_ids +// bool block_ids_exist; // TODO +// uint32_t block_ids[8][8][8]; +//}; +// +//struct StoC_chunk_block_data { +// uint8_t rotation; +// uint32_t base_model_id; +// uint32_t curr_model_id; +//}; +// +//struct StoC_block_update { +// // block id +// uint32_t id; +// bool curr_model_id_exists; +// uint32_t curr_model_id; // may be ==base_model_id OR damaged variant +//}; +// +//struct StoC_block_model_update { +// // id +// uint32_t id; +// bool data_exists; +// uint32_t num_data_elements; +// uint32_t* data; +//}; +// +// +//namespace net { +// +// void join(); +// +// void subscribe_chunk(const uint32_t _PRid, ChunkRequest _req); +// +// void unsubscribe_chunk(const uint32_t _PRid, ChunkRequest _req); +// +// +// +//} diff --git a/src/renderer.cpp b/src/renderer.cpp new file mode 100644 index 0000000..2f99366 --- /dev/null +++ b/src/renderer.cpp @@ -0,0 +1,97 @@ +#include "renderer.h" +#include "util.h" + +namespace renderer +{ + + void dispatch_cubes_cs(const Vec3 _camera_eye, const uint16_t _num_block_selection) + { + // uniform data + float ud[4] = {_camera_eye.x, _camera_eye.y, _camera_eye.z, float(_num_block_selection)}; + bgfx::setUniform(gfx::get_uniform_cubes_compute_params(), ud); + + bgfx::setBuffer(0, gfx::get_grid_buffer(), bgfx::Access::Read); + bgfx::setBuffer(1, gfx::get_chunk_buffer(), bgfx::Access::Read); + bgfx::setBuffer(2, gfx::get_block_buffer(), bgfx::Access::Read); + bgfx::setBuffer(3, gfx::get_block_selection_buffer(), bgfx::Access::Read); + bgfx::setBuffer(4, gfx::get_indirect_buffer(), bgfx::Access::Write); + bgfx::setBuffer(5, gfx::get_instance_buffer(), bgfx::Access::Write); + + // Dispatch the call. We are using 64 local threads on the GPU to process the object list + // So lets dispatch ceil(numToDraw/64) workgroups of 64 local threads + // TODO investigate this number of threads... + bgfx::dispatch(0, gfx::get_cubes_compute_shader(), 64 /*uint32_t(num_objects / 64 + 1)*/, 1, 1); + } + + void draw_cubes(const uint16_t _num_block_selection) + { + bgfx::setVertexBuffer(0, gfx::get_dummy_vertex_buffer()); // Some GPUs/drivers require a vertex buffer, even if we do not use it. + bgfx::setIndexBuffer(gfx::get_block_model_buffer()); + bgfx::setTexture(0, gfx::get_uniform_texture_atlas_sampler(), gfx::get_texture_atlas(), BGFX_SAMPLER_POINT); + bgfx::setInstanceDataBuffer(gfx::get_instance_buffer(), 0, _num_block_selection); + + bgfx::setState(0 | BGFX_STATE_WRITE_RGB + //| BGFX_STATE_WRITE_A + //| BGFX_STATE_BLEND_ALPHA + | BGFX_STATE_WRITE_Z | BGFX_STATE_DEPTH_TEST_LESS | BGFX_STATE_CULL_CW | BGFX_STATE_MSAA); + + bgfx::submit(0, gfx::get_cubes_shader(), gfx::get_indirect_buffer(), 0, _num_block_selection); + } + + void draw_lines(const uint32_t _ref, const Vec4 _color, const float *_transform_mtx) + { + /// Note: This uses bgfx line primitives. + /// These might not be supported on every platform. + /// Alternative 1: Generate lines as tiny quads ourselves. + /// Allows for line thickness. Maximum control. + /// Requires "line normal" estimation. + /// Cannot do fancy things like caps. + /// Alternative 2: Use par_shapes.h to generate 3D cylindrical shapes. + /// Allows for line thickness. Maximum control. + /// Requires library and generating structures. + /// May be very expensive. + /// Alternative 3: Use par_streamlines.h to generate 2D lines. + /// Allows for a variety of line renderings (caps). + /// Requires library and transforming 3D data into 2D. + /// Requires solving depth issues as lines are 2D. + + uint32_t line_offset = 0, line_num_elems = 0; + gfx::get_line_offset(_ref, line_offset, line_num_elems); + + bgfx::setVertexBuffer(0, gfx::get_line_buffer(), line_offset, line_num_elems); + + bgfx::setUniform(gfx::get_uniform_line_color(), &_color); + bgfx::setTransform(_transform_mtx); + + bgfx::setState(0 | BGFX_STATE_WRITE_RGB | BGFX_STATE_DEPTH_TEST_LEQUAL | BGFX_STATE_MSAA | BGFX_STATE_PT_LINES | BGFX_STATE_LINEAA); + + bgfx::submit(0, gfx::get_lines_shader()); + } + + uint32_t crosshair_ref = 0; // gfx_line + float crosshair_mtx[16]; + void draw_crosshair(const Camera *_cam) + { + vec3 eye = camera_eye(_cam); + vec3 forward = camera_forward(_cam); + assert(bx::isEqual(bx::length(forward), 1.0f, 0.001f)); + Vec3 up = camera_up(_cam); + assert(bx::isEqual(bx::length(up), 1.0f, 0.001f)); + Vec3 right = camera_right(_cam); + assert(bx::isEqual(bx::length(right), 1.0f, 0.001f)); + eye = bx::add(eye, bx::mul(forward, 0.2f)); // in front of camera + GPULineVertex crosshair[4]; + crosshair[0].position = bx::add(eye, bx::mul(up, -0.004f)); // down + crosshair[1].position = bx::add(eye, bx::mul(up, 0.004f)); // up + crosshair[2].position = bx::add(eye, bx::mul(right, -0.004f)); // left + crosshair[3].position = bx::add(eye, bx::mul(right, 0.004f)); // right + if (crosshair_ref == 0) + { // create + crosshair_ref = gfx::add_line(crosshair, 4); + bx::mtxIdentity(crosshair_mtx); + } + gfx::update_line(crosshair_ref, crosshair); + renderer::draw_lines(crosshair_ref, {1.0f, 1.0f, 1.0f, 1.0f}, crosshair_mtx); + } + +} diff --git a/src/renderer.h b/src/renderer.h new file mode 100644 index 0000000..d96c671 --- /dev/null +++ b/src/renderer.h @@ -0,0 +1,34 @@ +#pragma once + +#include "graphics.h" +#include "space_math.h" +#include "lib/camera.h" + +/* * * * * * * * * * * * * * * * * * * * * * * */ +/* The renderer namespace performs draw calls */ +/* * * * * * * * * * * * * * * * * * * * * * * */ + +namespace renderer +{ + + void dispatch_cubes_cs(const Vec3 _camera_eye, const uint16_t _num_block_selection); + + void draw_cubes(const uint16_t _num_block_selection); + + // Draws lines referenced by _ref from the line buffer + void draw_lines(const uint32_t _ref, const Vec4 _color, const float *_transform_mtx); + + void draw_crosshair(const Camera *_cam); + + // void draw_outline(uint32_t _block_id, Vec3 _color) // block_id? + //{ + // get block model -> outline offset/size in line buffer + // set color uniform + // set mvp uniform (grid+block transform) + // set vertex buffer + // set render state + // + // submit line shader call + //} + +} diff --git a/src/space_input.cpp b/src/space_input.cpp new file mode 100644 index 0000000..fd0935f --- /dev/null +++ b/src/space_input.cpp @@ -0,0 +1,512 @@ + +#include +#include +#include +#include +#include + +#include "space_input.h" +#include "util.h" + +// Note: We extend the input namespace with spacegame specific stuff +namespace input +{ + + using Joystick = struct joystick + { + DID raw; + DID xbox; + }; + + const uint16_t max_keyboard_keys = 128; + const float cursor_input_sensitivity = 0.002f; // TODO frame time based // maybe extend input system with global multiplier updated every frame? + const float max_camera_move_speed = 0.05f; // TODO frame time based + const float button_camera_move_speed = max_camera_move_speed; // TODO frame time based + const float button_camera_turn_speed = 0.03f; // TODO frame time based + + std::unordered_map keycode_map; // maps platform specific keycodes to key indices for the input system + uint16_t next_key_index = 0; + Joystick joystick_map[GLFW_JOYSTICK_LAST + 1] = {}; // maps glfw joystick ids to indices for the input system + std::vector active_joysticks; // holds jids + std::vector active_gamepads; // holds jids + std::vector active_gamepads_tmp; // holds temporary jids for detection minigame + bool detect_minigame = false; + + // convenience functions + inline void add_device(const Device _device, const uint16_t _num_buttons, const uint16_t _num_axes, std::string _name) + { + DID did = add_device(_num_buttons, _num_axes, _name); + assert(did == _device); + } + inline void add_button(const Button _button) + { + VBID vbid = add_virtual_button(); + assert(vbid == _button); + } + inline void add_axis(const Axis _axis, const float _min_value = -FLT_MAX, const float _max_value = FLT_MAX) + { + VAID vaid = add_virtual_axis(_min_value, _max_value); + assert(vaid == _axis); + } + inline void map_button(const Device _device, const uint16_t _index, const Button _button, const bool _combine_with_next = false) + { + map_button((DID)_device, _index, (VBID)_button, _combine_with_next); + }; + inline void map_button(const Device _device, const uint16_t _index, const Axis _axis, const float _multiplier = 1.0f, const bool _combine_with_next = false) + { + map_button((DID)_device, _index, (VAID)_axis, _multiplier, _combine_with_next); + }; + inline void map_axis(const Device _device, const uint16_t _index, const Button _button, const float _multiplier = 1.0f, const bool _combine_with_next = false) + { + map_axis((DID)_device, _index, (VBID)_button, _multiplier, _combine_with_next); + }; + inline void map_axis(const Device _device, const uint16_t _index, const Axis _axis, const float _multiplier = 1.0f, const bool _combine_with_next = false) + { + map_axis((DID)_device, _index, (VAID)_axis, _multiplier, _combine_with_next); + }; + // Map a glfw named key + inline void map_key(const int _keyname /*GLFW_KEY_E*/, const Button _button, const bool _combine_with_next = false) + { + int keycode = glfwGetKeyScancode(_keyname); + if (keycode < 0) + { + assert(false); // Does this key not exist on the current platform? + return; + } + + uint16_t index = 0; + auto it = keycode_map.find(keycode); + if (it == keycode_map.end()) + { + // key is not mapped yet + + if (next_key_index >= max_keyboard_keys) + { + assert(false); // cannot map more keys! Need to increase max_keyboard_keys + return; + } + + index = next_key_index++; + keycode_map[keycode] = index; + } + else + { + // key is already mapped + index = it->second; + } + + map_button(Device::Keyboard, index, _button, _combine_with_next); + } + inline void map_key(const int _keyname /*GLFW_KEY_E*/, const Axis _axis, const float _multiplier = 1.0f, const bool _combine_with_next = false) + { + int keycode = glfwGetKeyScancode(_keyname); + if (keycode < 0) + { + assert(false); // Does this key not exist on the current platform? + return; + } + + uint16_t index = 0; + auto it = keycode_map.find(keycode); + if (it == keycode_map.end()) + { + // key is not mapped yet + + if (next_key_index >= max_keyboard_keys) + { + assert(false); // cannot map more keys! Need to increase max_keyboard_keys + return; + } + + index = next_key_index++; + keycode_map[keycode] = index; + } + else + { + // key is already mapped + index = it->second; + } + + map_button(Device::Keyboard, index, _axis, _multiplier, _combine_with_next); + } + // ex. for gamepad: _index == GLFW_GAMEPAD_BUTTON_A + inline void set_joystick_active(const int _jid) + { + assert(joystick_map[_jid].raw != 0); + auto it = std::find(active_joysticks.begin(), active_joysticks.end(), _jid); + if (it == active_joysticks.end()) + { + active_joysticks.emplace_back(_jid); + } + } + inline void set_gamepad_active(const int _jid) + { + assert(joystick_map[_jid].xbox != 0); + auto it = std::find(active_gamepads.begin(), active_gamepads.end(), _jid); + if (it == active_gamepads.end()) + { + active_gamepads.emplace_back(_jid); + } + } + inline void map_joystick_button(const int _jid, const uint16_t _index, const Button _button, const bool _combine_with_next = false) + { + DID did = joystick_map[_jid].raw; + assert(did != 0); + + map_button(did, _index, (VBID)_button, _combine_with_next); + + if (is_device_active(did)) + { + set_joystick_active(_jid); + } + } + inline void map_joystick_button(const int _jid, const uint16_t _index, const Axis _axis, const float _multiplier = 1.0f, const bool _combine_with_next = false) + { + DID did = joystick_map[_jid].raw; + assert(did != 0); + + map_button(did, _index, (VAID)_axis, _multiplier, _combine_with_next); + + if (is_device_active(did)) + { + set_joystick_active(_jid); + } + } + inline void map_joystick_axis(const int _jid, const uint16_t _index, const Button _button, const float _multiplier = 1.0f, const bool _combine_with_next = false) + { + DID did = joystick_map[_jid].raw; + assert(did != 0); + + map_axis(did, _index, (VBID)_button, _multiplier, _combine_with_next); + + if (is_device_active(did)) + { + set_joystick_active(_jid); + } + } + inline void map_joystick_axis(const int _jid, const uint16_t _index, const Axis _axis, const float _multiplier = 1.0f, const bool _combine_with_next = false) + { + DID did = joystick_map[_jid].raw; + assert(did != 0); + + map_axis(did, _index, (VAID)_axis, _multiplier, _combine_with_next); + + if (is_device_active(did)) + { + set_joystick_active(_jid); + } + } + inline void map_gamepad_button(const int _jid, const uint16_t _index, const Button _button, const bool _combine_with_next = false) + { + DID did = joystick_map[_jid].xbox; + assert(did != 0); + + map_button(did, _index, (VBID)_button, _combine_with_next); + + if (is_device_active(did)) + { + set_gamepad_active(_jid); + } + } + inline void map_gamepad_button(const int _jid, const uint16_t _index, const Axis _axis, const float _multiplier = 1.0f, const bool _combine_with_next = false) + { + DID did = joystick_map[_jid].xbox; + assert(did != 0); + + map_button(did, _index, (VAID)_axis, _multiplier, _combine_with_next); + + if (is_device_active(did)) + { + set_gamepad_active(_jid); + } + } + inline void map_gamepad_axis(const int _jid, const uint16_t _index, const Button _button, const float _multiplier = 1.0f, const bool _combine_with_next = false) + { + DID did = joystick_map[_jid].xbox; + assert(did != 0); + + map_axis(did, _index, (VBID)_button, _multiplier, _combine_with_next); + + if (is_device_active(did)) + { + set_gamepad_active(_jid); + } + } + inline void map_gamepad_axis(const int _jid, const uint16_t _index, const Axis _axis, const float _multiplier = 1.0f, const bool _combine_with_next = false) + { + DID did = joystick_map[_jid].xbox; + assert(did != 0); + + map_axis(did, _index, (VAID)_axis, _multiplier, _combine_with_next); + + if (is_device_active(did)) + { + set_gamepad_active(_jid); + } + } + + void init() + { + // Add devices + add_device(Device::Keyboard, max_keyboard_keys, 0, "Keyboard"); + add_device(Device::Mouse, GLFW_MOUSE_BUTTON_LAST + 1, MouseAxis::Count, "Mouse"); + + // Add virtual buttons + for (int i = 0; i < Button::Count; ++i) + { + add_button((Button)i); + } + + // Add virtual Axes + add_axis(Axis::MoveForward, -max_camera_move_speed, max_camera_move_speed); + add_axis(Axis::MoveRight, -max_camera_move_speed, max_camera_move_speed); + add_axis(Axis::MoveUp, -max_camera_move_speed, max_camera_move_speed); + add_axis(Axis::CameraPitch); // Camera rotation is unrestricted + add_axis(Axis::CameraYaw); // Camera rotation is unrestricted + add_axis(Axis::CameraRoll); // Camera rotation is unrestricted + + // Add default mapping + map_key(GLFW_KEY_F1, Button::ShowStats); + map_key(GLFW_KEY_F2, Button::LookAtOrigin); + map_key(GLFW_KEY_ESCAPE, Button::GameShouldExit); + + map_key(GLFW_KEY_W, Axis::MoveForward, max_camera_move_speed); + map_key(GLFW_KEY_S, Axis::MoveForward, -max_camera_move_speed); + map_key(GLFW_KEY_D, Axis::MoveRight, max_camera_move_speed); + map_key(GLFW_KEY_A, Axis::MoveRight, -max_camera_move_speed); + map_key(GLFW_KEY_SPACE, Axis::MoveUp, max_camera_move_speed); + map_key(GLFW_KEY_LEFT_CONTROL, Axis::MoveUp, -max_camera_move_speed); + + map_axis(Device::Mouse, MouseAxis::CursorX, Axis::CameraYaw, cursor_input_sensitivity); + map_axis(Device::Mouse, MouseAxis::CursorY, Axis::CameraPitch, cursor_input_sensitivity); + map_key(GLFW_KEY_LEFT, Axis::CameraYaw, -button_camera_turn_speed); + map_key(GLFW_KEY_RIGHT, Axis::CameraYaw, button_camera_turn_speed); + map_key(GLFW_KEY_UP, Axis::CameraPitch, -button_camera_turn_speed); + map_key(GLFW_KEY_DOWN, Axis::CameraPitch, button_camera_turn_speed); + + map_button(Device::Mouse, GLFW_MOUSE_BUTTON_RIGHT, Button::PlaceBlock); + map_button(Device::Mouse, GLFW_MOUSE_BUTTON_LEFT, Button::RemoveBlock); + + map_key(GLFW_KEY_E, Button::RotateBlockInc); + map_key(GLFW_KEY_Q, Button::RotateBlockDec); + map_axis(Device::Mouse, MouseAxis::ScrollY, Button::RotateBlockInc); + map_axis(Device::Mouse, MouseAxis::ScrollY, Button::RotateBlockDec, -1.0f); + } + + void set_gamepad_default_mappings(const int _jid) + { + assert(joystick_map[_jid].xbox != 0); // is gamepad + + map_gamepad_axis(_jid, GLFW_GAMEPAD_AXIS_LEFT_X, Axis::MoveRight, 0.08f); + map_gamepad_axis(_jid, GLFW_GAMEPAD_AXIS_LEFT_Y, Axis::MoveForward, 0.08f * -1.0f); + map_gamepad_axis(_jid, GLFW_GAMEPAD_AXIS_RIGHT_TRIGGER, Axis::MoveUp, 0.08f); + map_gamepad_axis(_jid, GLFW_GAMEPAD_AXIS_LEFT_TRIGGER, Axis::MoveUp, 0.08f * -1.0f); + map_gamepad_axis(_jid, GLFW_GAMEPAD_AXIS_RIGHT_X, Axis::CameraYaw, 0.03f); + map_gamepad_axis(_jid, GLFW_GAMEPAD_AXIS_RIGHT_Y, Axis::CameraPitch, 0.03f); + map_gamepad_button(_jid, GLFW_GAMEPAD_BUTTON_LEFT_BUMPER, Button::PlaceBlock); + map_gamepad_button(_jid, GLFW_GAMEPAD_BUTTON_RIGHT_BUMPER, Button::RemoveBlock); + map_gamepad_button(_jid, GLFW_GAMEPAD_BUTTON_START, Button::GameShouldExit); + } + + void update_button_keycode(const int _keycode, const uint8_t _new_state) + { + auto it = keycode_map.find(_keycode); + if (it == keycode_map.end()) + { + return; // ignore unmapped key + } + + update_button(Device::Keyboard, it->second, _new_state); + } + + DID add_joystick(const int _jid, const uint16_t _num_buttons, const uint16_t _num_axes, std::string _name) + { + logInfo("Joystick connected JID: %d\n", _jid); + // add device (may re-identify it and returns the correct id) + DID id = input::add_device(_num_buttons, _num_axes, _name); + assert(id != 0); + // update id mapping + joystick_map[_jid].raw = id; + + if (is_device_active(id)) + { + active_joysticks.emplace_back(_jid); + } + + return id; + } + + DID add_gamepad(const int _jid, std::string _name) + { + logInfo("Joystick (JID: %d) has xbox mappings\n", _jid); + // add device (may re-identify it and returns the correct id) + DID id = input::add_device(GLFW_GAMEPAD_BUTTON_LAST + 1, GLFW_GAMEPAD_AXIS_LAST + 1, _name); + assert(id != 0); + // update id mapping + joystick_map[_jid].xbox = id; + + if (is_device_active(id)) + { + active_gamepads.emplace_back(_jid); + } + + return id; + } + + void remove_joystick(const int _jid) + { + DID id = joystick_map[_jid].raw; + if (id > 0) + { + logInfo("Joystick (JID: %d) disconnected\n", _jid); + joystick_map[_jid].raw = 0; // reset mapping + remove_device(id); + + // find and remove from active joysticks + auto it = std::find(active_joysticks.begin(), active_joysticks.end(), _jid); + if (it != active_joysticks.end()) + { + active_joysticks.erase(it); + } + }; + + id = joystick_map[_jid].xbox; + if (id > 0) + { + joystick_map[_jid].xbox = 0; // reset mapping + remove_device(id); + + // find and remove from active gamepads + auto it = std::find(active_gamepads.begin(), active_gamepads.end(), _jid); + if (it != active_gamepads.end()) + { + active_gamepads.erase(it); + } + }; + } + + void update_joystick(const int _jid, const uint8_t *_buttons, const uint16_t _button_count, const float *_axes, const uint16_t _axes_count) + { + DID did = joystick_map[_jid].raw; + assert(did != 0); + input::update_buttons(did, 0, _buttons, _button_count); + input::update_axes(did, 0, _axes, _axes_count); + } + + void update_gamepad(const int _jid, const GLFWgamepadstate &_state) + { + DID did = joystick_map[_jid].xbox; + assert(did != 0); + input::update_buttons(did, 0, _state.buttons, GLFW_GAMEPAD_BUTTON_LAST + 1); + input::update_axes(did, 0, _state.axes, GLFW_GAMEPAD_AXIS_LAST + 1); + } + + bool is_joystick_active(const int _jid) + { + assert(joystick_map[_jid].raw != 0); // is joystick + return std::find(active_joysticks.begin(), active_joysticks.end(), _jid) != active_joysticks.end(); + } + + bool is_gamepad_active(const int _jid) + { + assert(joystick_map[_jid].xbox != 0); // is joystick + return std::find(active_gamepads.begin(), active_gamepads.end(), _jid) != active_gamepads.end(); + } + + const std::vector &get_active_joysticks() + { + // Sanity check +#ifndef NDEBUG + auto ad = get_active_devices(); + // All entries in active_joysticks are also active devices + for (auto jid : active_joysticks) + { + assert(joystick_map[jid].raw != 0); + assert(std::find(ad.begin(), ad.end(), joystick_map[jid].raw) != ad.end()); + } + // All joysticks in active_devices are also active joysticks + for (auto d : ad) + { + for (int jid = 0; jid < GLFW_JOYSTICK_LAST + 1; ++jid) + { + if (d != 0 && d == joystick_map[jid].raw) + { // d is joystick + assert(std::find(ad.begin(), ad.end(), jid) != ad.end()); + } + } + } +#endif // NDEBUG + + if (detect_minigame) + { // return all (connected) joysticks + active_gamepads_tmp.clear(); + for (int jid = 0; jid < GLFW_JOYSTICK_LAST + 1; ++jid) + { + if (joystick_map[jid].raw != 0) + { + active_gamepads_tmp.emplace_back(jid); + } + } + return active_gamepads_tmp; + } + + return active_joysticks; + } + + const std::vector &get_active_gamepads() + { + // Sanity check +#ifndef NDEBUG + auto ad = get_active_devices(); + // All entries in active_joysticks are also active devices + for (auto jid : active_gamepads) + { + assert(joystick_map[jid].xbox != 0); + assert(std::find(ad.begin(), ad.end(), joystick_map[jid].xbox) != ad.end()); + } + // All joysticks in active_devices are also active joysticks + for (auto d : ad) + { + for (int jid = 0; jid < GLFW_JOYSTICK_LAST + 1; ++jid) + { + if (d != 0 && d == joystick_map[jid].xbox) + { // d is joystick + assert(std::find(ad.begin(), ad.end(), jid) != ad.end()); + } + } + } +#endif // NDEBUG + + if (detect_minigame) + { // return all (connected) gamepads + active_gamepads_tmp.clear(); + for (int jid = 0; jid < GLFW_JOYSTICK_LAST + 1; ++jid) + { + if (joystick_map[jid].xbox != 0) + { + active_gamepads_tmp.emplace_back(jid); + } + } + return active_gamepads_tmp; + } + + return active_gamepads; + } + + void detect_minigame_start() + { + detect_minigame = true; + } + + void detect_minigame_frame() + { + // must set active joysticks/gamepad to all connected + // or input will not be polled + // reset after detection is done + } + + void detect_minigame_stop() + { + detect_minigame = false; + } +} diff --git a/src/space_input.h b/src/space_input.h new file mode 100644 index 0000000..c361e4f --- /dev/null +++ b/src/space_input.h @@ -0,0 +1,109 @@ +#pragma once + +#include + +#include "lib/input.h" + +// Note: We extend the input namespace with spacegame specific stuff +namespace input +{ + + namespace device_ns + { + enum device : uint16_t + { + Keyboard, + Mouse + }; + } + using Device = device_ns::device; + + namespace button_ns + { + enum button : uint16_t + { + // Debug & Internal actions + ShowStats, + LookAtOrigin, + GameShouldExit, // On window closed or menu exit + + // Gameplay actions + PlaceBlock, + RemoveBlock, + RotateBlockDec, + RotateBlockInc, + + Count + }; + } + using Button = button_ns::button; + + namespace axis_ns + { + enum axis : uint16_t + { + MoveForward, + MoveRight, + MoveUp, + CameraPitch, + CameraYaw, + CameraRoll, + + Count + }; + } + using Axis = axis_ns::axis; + + namespace mouse_axis_ns + { + enum mouse_axis : uint16_t + { + CursorX, + CursorY, + ScrollX, + ScrollY, + + Count + }; + } + using MouseAxis = mouse_axis_ns::mouse_axis; + + // Initialize input system + // Set default mapping + void init(); + + // Update the button state of a keyboard keycode + void update_button_keycode(const int _keycode, const uint8_t _new_state); + + DID add_joystick(const int jid, const uint16_t _num_buttons, const uint16_t _num_axes, std::string _name); + + // Adds a joystick as a gamepad + // Note: Use the same _jid to associate it with its raw counterpart + // Note: Automatically removed the corresponding joystick is removed + DID add_gamepad(const int jid, std::string _name); + + void set_gamepad_default_mappings(const int _jid); + + // Mark joystick as disconnected + void remove_joystick(const int _jid); + + // Update joystick state (RAW) + void update_joystick(const int _jid, const uint8_t *_buttons, const uint16_t _button_count, const float *_axes, const uint16_t _axes_count); + + // Update gamepad state (XBOX) + void update_gamepad(const int _jid, const GLFWgamepadstate &_state); + + // Returns whether the given joystick is active (connected and at least one mapping) + bool is_joystick_active(const int _jid); + + // Returns whether the given gamepad is active (connected and at least one mapping) + bool is_gamepad_active(const int _jid); + + // Returns list of active joysticks (connected and at least one mapping) + // Useful to check which devices need to be updated + const std::vector &get_active_joysticks(); + + // Returns list of active gamepads (connected and at least one mapping) + // Useful to check which devices need to be updated + const std::vector &get_active_gamepads(); +} diff --git a/src/space_math.cpp b/src/space_math.cpp new file mode 100644 index 0000000..cf8128f --- /dev/null +++ b/src/space_math.cpp @@ -0,0 +1,448 @@ +#include "space_math.h" + +constexpr Vec3 dir_to_normal_table[Direction::Count] = { + // PosX + Vec3(1.0f, 0.0f, 0.0f), + // NegX + Vec3(-1.0f, 0.0f, 0.0f), + // PosY + Vec3(0.0f, 1.0f, 0.0f), + // NegY + Vec3(0.0f, -1.0f, 0.0f), + // PosZ + Vec3(0.0f, 0.0f, 1.0f), + // NegZ + Vec3(0.0f, 0.0f, -1.0f), +}; + +Vec3 dir_to_normal(Direction _dir) +{ + return dir_to_normal_table[_dir]; +} + +void transform_mtx(const Vec3 &_position, const Quat &_orientation, float *_out_mtx) +{ + bx::mtxFromQuaternion(_out_mtx, _orientation); + bx::store(&_out_mtx[12], _position); +} + +inline constexpr InvRay InverseRay(Ray &_ray) +{ + return { + _ray.position, + {1.0f / _ray.direction.x, + 1.0f / _ray.direction.y, + 1.0f / _ray.direction.z}}; +} + +bool CheckCollisionRayBox(const InvRay &_ray, const AABB &_box, float &_out_distance) +{ + static float t[8]; + + t[0] = (_box.min.x - _ray.position.x) * _ray.inverse_direction.x; + t[1] = (_box.max.x - _ray.position.x) * _ray.inverse_direction.x; + t[2] = (_box.min.y - _ray.position.y) * _ray.inverse_direction.y; + t[3] = (_box.max.y - _ray.position.y) * _ray.inverse_direction.y; + t[4] = (_box.min.z - _ray.position.z) * _ray.inverse_direction.z; + t[5] = (_box.max.z - _ray.position.z) * _ray.inverse_direction.z; + t[6] = bx::max(bx::max(bx::min(t[0], t[1]), bx::min(t[2], t[3])), bx::min(t[4], t[5])); + t[7] = bx::min(bx::min(bx::max(t[0], t[1]), bx::max(t[2], t[3])), bx::max(t[4], t[5])); + + _out_distance = t[6]; + return !(t[7] < 0 || t[6] > t[7]); +} + +bool CheckCollisionRayOrientedBox(const Ray &_ray, const AABB &_box, const float *_inverse_model_matrix, float &_distance) +{ + // We transform the ray "back" to the models origin where the axis aligned bounding box is valid + bx::Vec3 ray_origin = _ray.position; + bx::Vec3 ray_lookat = bx::add(_ray.position, _ray.direction); + + Ray new_ray = { + .position = bx::mul(ray_origin, _inverse_model_matrix), + .direction = bx::mul(ray_lookat, _inverse_model_matrix), + }; + new_ray.direction = bx::normalize(bx::sub(new_ray.direction, new_ray.position)); + + // now do normal AABB + return CheckCollisionRayBox(InverseRay(new_ray), _box, _distance); +} + +// TODO check if it reports same results as matrix version +bool CheckCollisionRayOrientedBox(Ray _ray, const AABB &_box, const bx::Vec3 &_model_position, const bx::Quaternion &_model_orientation, float &_distance) +{ + // We transform the ray "back" to the models origin where the axis aligned bounding box is valid + _ray.position = bx::sub(_ray.position, _model_position); + _ray.position = bx::mul(_ray.position, bx::invert(_model_orientation)); + _ray.direction = bx::mul(_ray.direction, bx::invert(_model_orientation)); + + // now do normal AABB check + return CheckCollisionRayBox(InverseRay(_ray), _box, _distance); +} + +// Check ray <-> AABB collision +// Optimized version with box.min==(0,0,0) +bool CheckCollisionRayBoxOrigin(const InvRay &_ray, const bx::Vec3 &_box_max, float &_distance) +{ + static float t[8]; + + t[0] = (0.0f - _ray.position.x) * _ray.inverse_direction.x; + t[1] = (_box_max.x - _ray.position.x) * _ray.inverse_direction.x; + t[2] = (0.0f - _ray.position.y) * _ray.inverse_direction.y; + t[3] = (_box_max.y - _ray.position.y) * _ray.inverse_direction.y; + t[4] = (0.0f - _ray.position.z) * _ray.inverse_direction.z; + t[5] = (_box_max.z - _ray.position.z) * _ray.inverse_direction.z; + t[6] = bx::max(bx::max(bx::min(t[0], t[1]), bx::min(t[2], t[3])), bx::min(t[4], t[5])); + t[7] = bx::min(bx::min(bx::max(t[0], t[1]), bx::max(t[2], t[3])), bx::max(t[4], t[5])); + + _distance = t[6]; + return !(t[7] < 0 || t[6] > t[7]); +} + +void combineAABB(AABB &_a, const AABB &_b) +{ + _a.min.x = bx::min(_a.min.x, _b.min.x); + _a.min.y = bx::min(_a.min.y, _b.min.y); + _a.min.z = bx::min(_a.min.z, _b.min.z); + _a.max.x = bx::max(_a.max.x, _b.max.x); + _a.max.y = bx::max(_a.max.y, _b.max.y); + _a.max.z = bx::max(_a.max.z, _b.max.z); +} + +float angle_between(const Vec3 _v0, const Vec3 _v1, const Vec3 _axis) +{ + // Ref.: https://math.stackexchange.com/questions/878785/how-to-find-an-angle-in-range0-360-between-2-vectors + const float dot = bx::dot(_v0, _v1); + const float det = bx::dot(_axis, bx::cross(_v0, _v1)); + return bx::atan2(det, dot); +} + +float angle_between_faces(const BlockFace _f0, const BlockFace _f1, const Vec3 _common_edge) +{ + // Note: The angle from face0 to face1 is the same as from neg(normal0) to normal1 + // We use the common edge as the rotational axis + // const Vec3 n0 = toVec3(negate(_f0.normal)); + // const Vec3 n1 = toVec3(_f1.normal); + + // return angle_between(n0, n1, bx::normalize(_common_edge)); + assert(false); + return 0.0f; +} + +uint64_t reverse_bits(uint64_t b) +{ + b = (b & 0xAAAAAAAAAAAAAAAA) >> 1 | (b & 0x5555555555555555) << 1; + b = (b & 0xCCCCCCCCCCCCCCCC) >> 2 | (b & 0x3333333333333333) << 2; + b = (b & 0xF0F0F0F0F0F0F0F0) >> 4 | (b & 0x0F0F0F0F0F0F0F0F) << 4; + b = (b & 0xFF00FF00FF00FF00) >> 8 | (b & 0xFF00FF00FF00FF00) << 8; + b = (b & 0xFFFF0000FFFF0000) >> 16 | (b & 0x0000FFFF0000FFFF) << 16; + b = b >> 32 | b << 32; + return b; +} + +uint64_t reverse_bits_in_bytes(uint64_t b) +{ + b = (b & 0xAAAAAAAAAAAAAAAA) >> 1 | (b & 0x5555555555555555) << 1; + b = (b & 0xCCCCCCCCCCCCCCCC) >> 2 | (b & 0x3333333333333333) << 2; + b = (b & 0xF0F0F0F0F0F0F0F0) >> 4 | (b & 0x0F0F0F0F0F0F0F0F) << 4; + return b; +} + +// TODO return result in 'uint8_t[4] _out_res' +ComponentVertices get_component_face_vertices(FaceType _face, Direction _view_dir) +{ + ComponentVertices res = {8, 8, 8, 8}; // init all as invalid + + // TODO OPT try + // use Quad verts then rotate values (bytes in a uint32?) + // by (_face-2) positions to the left + // and set vert[3] = 8 + switch (_face) + { + case FaceType::None : + return res; + + case FaceType::Quad : + res = {2,0,1,3}; + break; + case FaceType::TrigBL : + res = {2,0,1,8}; + break; + case FaceType::TrigBR : + res = {0,1,3,8}; + break; + case FaceType::TrigTR : + res = {1,3,2,8}; + break; + case FaceType::TrigTL : + res = {3,2,0,8}; + break; + + default: + break; + } + + // construct normalized orientation based on _view_dir + Orientation ori; + + switch (_view_dir) + { + case Direction::PosX: + ori = Orientation(_view_dir, Direction::PosY); + break; + case Direction::NegX: + ori = Orientation(_view_dir, Direction::PosY); + break; + case Direction::PosY: + ori = Orientation(_view_dir, Direction::PosZ); + break; + case Direction::NegY: + ori = Orientation(_view_dir, Direction::PosZ); + break; + case Direction::PosZ: + ori = Orientation(_view_dir, Direction::PosY); + break; + case Direction::NegZ: + ori = Orientation(_view_dir, Direction::PosY); + break; + default: + assert(false); + return res; + } + + // construct rot matrix from ori + I8RotMat3x3 mat(ori); + + // rotate via matrix + res[0] = mat.rotateComponentVertex(res[0]); + res[1] = mat.rotateComponentVertex(res[1]); + res[2] = mat.rotateComponentVertex(res[2]); + if (_face == FaceType::Quad) + res[3] = mat.rotateComponentVertex(res[3]); + + return res; +} + +const struct component_face component_face::NONE = +{ + .normal = FaceNormal(0, 1, 0), // irrelevant, but valid + .tex_apex = TexCorner::Count, // irrelevant + .vertices = {0, 0, 0, 0}, // irrelevant, and marking it as invalid + .texture_id = 0, // irrelevant +}; + +////////////////////////////////////////////////////////////////////////////////////////////////////// +/* Integer Math */ + +void mulMtxVec3(int8_t *_result, const int8_t *_mat, const int8_t *_vec) +{ + _result[0] = _vec[0] * _mat[0] + _vec[1] * _mat[3] + _vec[2] * _mat[6]; + _result[1] = _vec[0] * _mat[1] + _vec[1] * _mat[4] + _vec[2] * _mat[7]; + _result[2] = _vec[0] * _mat[2] + _vec[1] * _mat[5] + _vec[2] * _mat[8]; +} + +void mulMtxMtx(int8_t *_result, const int8_t *_a, const int8_t *_b) +{ + mulMtxVec3(&_result[ 0], &_a[ 0], _b); + mulMtxVec3(&_result[ 3], &_a[ 3], _b); + mulMtxVec3(&_result[ 6], &_a[ 6], _b); +} + +i8rotmat3x3::i8rotmat3x3(const Orientation _ori) + { + Direction forward, up; + _ori.to_dirs(forward, up); + Direction right = rotate(forward, up); + + I8Vec3 x(right); + I8Vec3 y(up); + I8Vec3 z(forward); + + m[0] = x.x; + m[1] = x.y; + m[2] = x.z; + m[3] = y.x; + m[4] = y.y; + m[5] = y.z; + m[6] = z.x; + m[7] = z.y; + m[8] = z.z; + } + + +I8Vec3 i8rotmat3x3::mul(const I8Vec3 &_vec) +{ + I8Vec3 res; + mulMtxVec3(&res.x, m, &_vec.x); + return res; +} + +i8rotmat3x3 i8rotmat3x3::mul(const i8rotmat3x3 &_m) +{ + i8rotmat3x3 res; + mulMtxMtx(res.m, m, _m.m); + return res; +} + +i8rotmat3x3 i8rotmat3x3::transpose() +{ + // [0] [3] [6] [0] [1] [2] + // [1] [4] [7] => [3] [4] [5] + // [2] [5] [8] [6] [7] [8] + i8rotmat3x3 res; + res.m[0] = m[0]; + res.m[1] = m[3]; + res.m[2] = m[6]; + res.m[3] = m[1]; + res.m[4] = m[4]; + res.m[5] = m[7]; + res.m[6] = m[2]; + res.m[7] = m[5]; + res.m[8] = m[8]; + return res; +} + +// _vert MUST be in [0;7] +uint8_t i8rotmat3x3::rotateComponentVertex(uint8_t _vert) +{ + assert(_vert >= 0 && _vert <= 7); + + I8Vec3 vec(_vert); + + vec = mul(vec); + + assert(vec.x >= -1 && vec.x <= 1 && + vec.y >= -1 && vec.y <= 1 && + vec.z >= -1 && vec.z <= 1); + + return vec.toCompVert(); +} + +FaceNormal i8rotmat3x3::rotateFaceNormal(const FaceNormal _normal) +{ + return FaceNormal(mul(_normal)); +} + +Direction i8rotmat3x3::rotateDirection(Direction _dir) +{ + return mul(I8Vec3(_dir)).toDirection(); +} + +FaceType component_face::faceType(Direction* _out_view_dir) const +{ + using namespace direction_ns; + // Faces are interpreted in normalized orientation + // up== PosY when forward== {PosX,NegX,PosZ,NegZ} + // and up== PosZ when forward== {PosY,NegY} + + // vertices => type + direction + + if (_out_view_dir) + *_out_view_dir = Direction::Count; + + if (!isValid()) + return FaceType::None; + + // ! The triangle apex determines the type ! + + // determine direction + + // vertices are 3 bits representing z,y,x respectively + // & will leave only the common 1 bit => aka the direction + // if x=1,y=1 or z=1, if the common bit is 0 however + // we need to negate first + // => copy the 3 bits over and to everything in one go + uint8_t bits = vertices[0] & vertices[1] & vertices[2]; + uint8_t bits2 = (~vertices[0] & ~vertices[1] & ~vertices[2]) & 0b111; + bits |= (bits2 << 3); + + // Now the bits are: + // [0|0|PosZ|PosY|PosX|NegZ|NegY|NegX] + Direction dir; + Direction up; + switch (bits) + { + case 1: + dir = NegX; + up = PosY; + break; + case 2: + dir = NegY; + up = PosZ; + break; + case 4: + dir = NegZ; + up = PosY; + break; + case 8: + dir = PosX; + up = PosY; + break; + case 16: + dir = PosY; + up = PosZ; + break; + case 32: + dir = PosZ; + up = PosY; + break; + + default: + break; + } + + if (_out_view_dir) + *_out_view_dir = dir; + + if (isQuad()) + return FaceType::Quad; + + // Alternatively we could maybe? + // Shuffle bits to be + // [0|0|NegZ|PosZ|NegY|PosY|NegX|PosX] + // and just + // Direction dir = (Direction)bx::countTrailingZeros(bits); + + // rotate apex in reverse + I8RotMat3x3 mat(Orientation(dir, up)); + mat = mat.transpose(); + + uint8_t vert = mat.rotateComponentVertex(vertices[1]); + + // match with "front" vertices + switch (vert) + { + case 0: + return FaceType::TrigBL; + case 1: + return FaceType::TrigBR; + case 2: + return FaceType::TrigTL; + case 3: + return FaceType::TrigTR; + default: + break; + } + + return FaceType::None; +} + +Direction i8vec3::toDirection() +{ + assert(x >= -1 && x <= 1 && + y >= -1 && y <= 1 && + z >= -1 && z <= 1 && + (x + y + z != 0)); + return (Direction)((x == -1) + + (y == 1) * 2 + (y == -1) * 3 + + (z == 1) * 4 + (z == -1) * 5); +} + +uint8_t face_normal::dims() +{ + uint8_t res = 0; + res += x != 0; + res += y != 0; + res += z != 0; + return res; +} diff --git a/src/space_math.h b/src/space_math.h new file mode 100644 index 0000000..9064191 --- /dev/null +++ b/src/space_math.h @@ -0,0 +1,821 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +// Dummy defines for broken intellisense +// #define size_t (unsigned long int) + +// Note: bx structs behave a bit strange (ex. deleted default constructor) +// For ease of use we provide our own equivalents with implicit casting +// to and from the corresponding bx types. + +// TODO move all lookup tables into .cpp without constexpr +// => constexpr copies it into each translation unit! + +typedef struct vec3 +{ + float x, y, z; + + vec3() = default; + ~vec3() = default; + + constexpr vec3(const float _v) : x(_v), y(_v), z(_v) {}; + + constexpr vec3(const float _x, const float _y, const float _z) + : x(_x), y(_y), z(_z) {} + + // User-defined implicit constructor from bx::Vec3 + constexpr vec3(const bx::Vec3 &_v) : x(_v.x), y(_v.y), z(_v.z) {} + + // User-defined implicit conversion to bx::Vec3 + constexpr operator bx::Vec3() const { return bx::Vec3(x, y, z); } +} Vec3; + +typedef struct orientation Orientation; +typedef struct face_normal FaceNormal; + +typedef struct vec4 +{ + float x, y, z, w; + + vec4() = default; + ~vec4() = default; + + constexpr vec4(const float _v) : x(_v), y(_v), z(_v), w(_v) {}; + + constexpr vec4(const float _x, const float _y, const float _z, const float _w) + : x(_x), y(_y), z(_z), w(_w) {} +} Vec4; + +typedef struct quat +{ + float x, y, z, w; + + quat() = default; + ~quat() = default; + + constexpr quat(const float _x, const float _y, const float _z, const float _w) + : x(_x), y(_y), z(_z), w(_w) {} + + // User-defined implicit constructor from bx::Quaternion + constexpr quat(const bx::Quaternion &_v) : x(_v.x), y(_v.y), z(_v.z), w(_v.w) {} + + // User-defined implicit conversion to bx::Quaternion + constexpr operator bx::Quaternion() const { return bx::Quaternion(x, y, z, w); } + + static constexpr quat unit() + { + return quat(0.0f, 0.0f, 0.0f, 1.0f); + } +} Quat; + +typedef struct color +{ + uint8_t r, g, b, a; + + color() : r(0), g(0), b(0), a(1) {} + color(uint8_t _r, uint8_t _g, uint8_t _b, uint8_t _a) : r(_r), g(_g), b(_b), a(_a) {} +} Color; + +typedef Color Texel; + +typedef struct ray +{ + Vec3 position; + Vec3 direction; +} Ray; + +typedef struct inv_ray +{ + Vec3 position; + Vec3 inverse_direction; +} InvRay; + +typedef struct aabb +{ + Vec3 min; + Vec3 max; + + aabb() = default; + ~aabb() = default; + + constexpr aabb(const Vec3 _min, const Vec3 _max) : min(_min), max(_max) {}; +} AABB; + +namespace direction_ns +{ + enum direction : uint8_t + { + // The order MUST stay like this + // Other systems rely on this exact order + // e.g. FaceNormals + PosX, + NegX, + PosY, + NegY, + PosZ, + NegZ, + + Count + }; +} +using Direction = direction_ns::direction; + +constexpr Direction negate(const Direction _dir) +{ + // +1 if even or -1 if odd + return (Direction)((uint8_t)_dir ^ 0x1); +} + +// TODO OPT bitpack or padding +typedef struct i8vec3 +{ + int8_t x, y, z; + + constexpr i8vec3() : x(0), y(0), z(0) {} + + constexpr i8vec3(const int8_t _x, const int8_t _y, const int8_t _z) : x(_x), y(_y), z(_z) + { + } + + constexpr i8vec3(Direction _dir) + { + using namespace direction_ns; + x = (_dir > 1) ? 0 : (_dir == PosX) ? 1 : -1; + z = (_dir < 4) ? 0 : (_dir == PosZ) ? 1 : -1; + // x==z only if x==z==0 + y = (x != z) ? 0 : (_dir == PosY) ? 1 : -1; + } + + constexpr i8vec3(uint8_t _comp_vert) { + x = ((_comp_vert & 0b001) == 0) ? -1 : 1; + y = ((_comp_vert & 0b010) == 0) ? -1 : 1; + z = ((_comp_vert & 0b100) == 0) ? -1 : 1; + } + + i8vec3 negate() + { + return i8vec3(-x, -y, -z); + } + + // Must be a valid conversion (no safety checks) + Direction toDirection(); + + uint8_t toCompVert() { + return (x == 1) + (y == 1) * 2 + (z == 1) * 4; + } + + auto operator<=>(const i8vec3&) const = default; +} I8Vec3; +static_assert(sizeof(I8Vec3) == 3); + +// A integer based rotation matrix +// Values should only be in [-1;1] and only encode valid rotations +typedef struct i8rotmat3x3 +{ + // [0] [3] [6] + // [1] [4] [7] + // [2] [5] [8] + int8_t m[9] = {}; + + i8rotmat3x3() {} + + i8rotmat3x3(const Orientation _ori); + + // consider the static version of this method: mulMtxVec3 + I8Vec3 mul(const I8Vec3& _vec); + // consider the static version of this method: mulMtxMtx + i8rotmat3x3 mul(const i8rotmat3x3& _vec); + + // Note: For a rotation matrix the inverse IS the transpose + i8rotmat3x3 transpose(); + + uint8_t rotateComponentVertex(uint8_t _vert); + + FaceNormal rotateFaceNormal(const FaceNormal _normal); + + Direction rotateDirection(Direction _dir); + + // we want real, but integer based matrix multiplication + // specifically for rotations + // convert to and from orientations + // ori -> directions(up, forward) -> to vectors + // match base vectors with directions -> ori from dirs + // must be able to rotate + // - component vertices + // - face normals (and therefore directions) + +} I8RotMat3x3; + +// TODO OBB ? +// but with vec3 & quat - right? <.< + +// reverses all bits i.e. bit#0 is swapped with bit#63, bit#1 with bit#62, ... +uint64_t reverse_bits(uint64_t b); + +// reverse all bits in each byte i.e. bit#0 is swapped with bit#7, bit#1 with bit6,... +uint64_t reverse_bits_in_bytes(uint64_t b); + +constexpr Direction rotate_dir[7 * 7] = + { + // Manually defined direction rotation + // PosX*6+PosX + Direction::PosX, + // PosX*6+NegX + Direction::PosX, + // PosX*6+PosY + Direction::NegZ, + // PosX*6+NegY + Direction::PosZ, + // PosX*6+PosZ + Direction::PosY, + // PosX*6+NegZ + Direction::NegY, + // PosX*6+Count + Direction::Count, + + // NegX*6+PosX + Direction::NegX, + // NegX*6+NegX + Direction::NegX, + // NegX*6+PosY + Direction::PosZ, + // NegX*6+NegY + Direction::NegZ, + // NegX*6+PosZ + Direction::NegY, + // NegX*6+NegZ + Direction::PosY, + // NegX*6+Count + Direction::Count, + + // PosY*6+PosX + Direction::PosZ, + // PosY*6+NegX + Direction::NegZ, + // PosY*6+PosY + Direction::PosY, + // PosY*6+NegY + Direction::PosY, + // PosY*6+PosZ + Direction::NegX, + // PosY*6+NegZ + Direction::PosX, + // PosY*6+Count + Direction::Count, + + // NegY*6+PosX + Direction::NegZ, + // NegY*6+NegX + Direction::PosZ, + // NegY*6+PosY + Direction::NegY, + // NegY*6+NegY + Direction::NegY, + // NegY*6+PosZ + Direction::PosX, + // NegY*6+NegZ + Direction::NegX, + // NegY*6+Count + Direction::Count, + + // PosZ*6+PosX + Direction::NegY, + // PosZ*6+NegX + Direction::PosY, + // PosZ*6+PosY + Direction::PosX, + // PosZ*6+NegY + Direction::NegX, + // PosZ*6+PosZ + Direction::PosZ, + // PosZ*6+NegZ + Direction::PosZ, + // PosZ*6+Count + Direction::Count, + + // NegZ*6+PosX + Direction::PosY, + // NegZ*6+NegX + Direction::NegY, + // NegZ*6+PosY + Direction::NegX, + // NegZ*6+NegY + Direction::PosX, + // NegZ*6+PosZ + Direction::NegZ, + // NegZ*6+NegZ + Direction::NegZ, + // NegZ*6+Count + Direction::Count, + + // Count*6+PosX + Direction::Count, + // Count*6+NegX + Direction::Count, + // Count*6+PosY + Direction::Count, + // Count*6+NegY + Direction::Count, + // Count*6+PosZ + Direction::Count, + // Count*6+NegZ + Direction::Count, + // Count*6+Count + Direction::Count, +}; + +// Rotates _dir by 90deg around _axis according to the left-claw-rule +// Supports Direction::Count as input and always returns Count then +constexpr Direction rotate(Direction _dir, Direction _axis) +{ + return rotate_dir[_dir * 7 + _axis]; +} + +Vec3 dir_to_normal(Direction _dir); + +constexpr uint8_t pack_dirs(const Direction _forward, const Direction _up) +{ + // each dir is 3 bit wide + return (_forward << 3) | _up; +} + +constexpr void unpack_dirs(const uint8_t _packed, Direction &_out_forward, Direction &_out_up) +{ + // each dir is 3 bit wide + _out_forward = (Direction)(_packed >> 3); + _out_up = (Direction)(_packed & 0b111); +} + +constexpr uint8_t orientation_2_packed_dirs[24] = + { + // Manually defined orientation + // The default orientation is forward==PosZ with up==PosY + // We then rotate up according to the left-claw-rule (around forward) + // We then rotate forward according to the left-claw-rule (around up) + pack_dirs(Direction::PosZ, Direction::PosY), // 0 + pack_dirs(Direction::PosZ, Direction::NegX), // 1 + pack_dirs(Direction::PosZ, Direction::NegY), // 2 + pack_dirs(Direction::PosZ, Direction::PosX), // 3 + + pack_dirs(Direction::PosX, Direction::PosY), // 4 + pack_dirs(Direction::PosX, Direction::PosZ), // 5 + pack_dirs(Direction::PosX, Direction::NegY), // 6 + pack_dirs(Direction::PosX, Direction::NegZ), // 7 + + pack_dirs(Direction::NegZ, Direction::PosY), // 8 + pack_dirs(Direction::NegZ, Direction::PosX), // 9 + pack_dirs(Direction::NegZ, Direction::NegY), // 10 + pack_dirs(Direction::NegZ, Direction::NegX), // 11 + + pack_dirs(Direction::NegX, Direction::PosY), // 12 + pack_dirs(Direction::NegX, Direction::NegZ), // 13 + pack_dirs(Direction::NegX, Direction::NegY), // 14 + pack_dirs(Direction::NegX, Direction::PosZ), // 15 + + // For forward==NegY we default to up==PosZ + pack_dirs(Direction::NegY, Direction::PosZ), // 16 + pack_dirs(Direction::NegY, Direction::NegX), // 17 + pack_dirs(Direction::NegY, Direction::NegZ), // 18 + pack_dirs(Direction::NegY, Direction::PosX), // 19 + + // For forward==PosY we default to up==NegZ + pack_dirs(Direction::PosY, Direction::NegZ), // 20 + pack_dirs(Direction::PosY, Direction::NegX), // 21 + pack_dirs(Direction::PosY, Direction::PosZ), // 22 + pack_dirs(Direction::PosY, Direction::PosX), // 23 +}; + +// Note packed dirs MUST be valid as orientation +// aka dirs must be perpendicular +constexpr uint8_t packed_dirs_2_orientation[44] = + { // Reverse of ori_to_packed_dirs + UINT8_MAX, + UINT8_MAX, + 4, + 6, + 5, + 7, + UINT8_MAX, + UINT8_MAX, + UINT8_MAX, + UINT8_MAX, + 12, // 10th element + 14, + 15, + 13, + UINT8_MAX, + UINT8_MAX, + 23, + 21, + UINT8_MAX, + UINT8_MAX, + 22, // 20th element + 20, + UINT8_MAX, + UINT8_MAX, + 19, + 17, + UINT8_MAX, + UINT8_MAX, + 16, + 18, + UINT8_MAX, // 30th element + UINT8_MAX, + 3, + 1, + 0, + 2, + UINT8_MAX, + UINT8_MAX, + UINT8_MAX, + UINT8_MAX, + 9, // 40th element + 11, + 8, + 10}; + +typedef struct orientation +{ + uint8_t data; + + orientation() : data(packed_dirs_2_orientation[pack_dirs(Direction::PosZ, Direction::PosY)]) {}; + ~orientation() = default; + + constexpr orientation(uint8_t _data) : data(_data) {}; + + constexpr orientation(const Direction _forward, const Direction _up) + : data(packed_dirs_2_orientation[pack_dirs(_forward, _up)]) {}; + + constexpr void to_dirs(Direction &_out_forward, Direction &_out_up) const + { + unpack_dirs(orientation_2_packed_dirs[data], _out_forward, _out_up); + } + + constexpr void to_matrix(float *_out_mtx) + { + Direction forward; + Direction up; + to_dirs(forward, up); + Vec3 forward_vec = dir_to_normal(forward); + Vec3 up_vec = dir_to_normal(up); + Vec3 right_vec = bx::cross(up_vec, forward_vec); + // default orientation: forward = PosZ & up = PosY + // should result in identity matrix + // => mtx = (right, up, forward) + // Note: GLSL matrices are column major + _out_mtx[0] = right_vec.x == -0.0f ? 0.0f : right_vec.x; + _out_mtx[1] = right_vec.y == -0.0f ? 0.0f : right_vec.y; + _out_mtx[2] = right_vec.z == -0.0f ? 0.0f : right_vec.z; + _out_mtx[3] = up_vec.x == -0.0f ? 0.0f : up_vec.x; + _out_mtx[4] = up_vec.y == -0.0f ? 0.0f : up_vec.y; + _out_mtx[5] = up_vec.z == -0.0f ? 0.0f : up_vec.z; + _out_mtx[6] = forward_vec.x == -0.0f ? 0.0f : forward_vec.x; + _out_mtx[7] = forward_vec.y == -0.0f ? 0.0f : forward_vec.y; + _out_mtx[8] = forward_vec.z == -0.0f ? 0.0f : forward_vec.z; + } +} Orientation; + +// Ensure default orientation equals integer 0 +constexpr auto check_default_orientation = []() constexpr -> bool +{ + Orientation ori(Direction::PosZ, Direction::PosY); + return ori.data == 0; +}; +static_assert(check_default_orientation()); + +// Rotates _ori by 90deg around _axis according to the left-claw-rule +constexpr Orientation rotate(const Orientation _ori, const Direction _axis) +{ + Direction forward; + Direction up; + _ori.to_dirs(forward, up); + + forward = rotate(forward, _axis); + up = rotate(up, _axis); + + return Orientation(forward, up); +} + +////////////////////////////////////////////////////////////////////////////////////////////////////// + +typedef struct face_normal : public I8Vec3 +{ + // default constructor, creates invalid face normal + face_normal() : I8Vec3() {}; + + face_normal(I8Vec3 _v) : I8Vec3(_v.x, _v.y, _v.z) + { + assert((_v.x >= -1 && _v.x <= 1) && + (_v.y >= -1 && _v.y <= 1) && + (_v.z >= -1 && _v.z <= 1) && + !(_v.x == 0 && _v.y == 0 && _v.z == 0)); + } + + // Parameter MUST be in [-1;1] + // At least one MUST NOT be 0 + face_normal(int8_t _x, int8_t _y, int8_t _z) : + I8Vec3(_x, _y, _z) + { + assert((_x >= -1 && _x <= 1) && + (_y >= -1 && _y <= 1) && + (_z >= -1 && _z <= 1) && + !(_x == 0 && _y == 0 && _z == 0)); + } + + // Convenience constructor + face_normal(Direction _dir) : face_normal(I8Vec3(_dir)) {} + + // returns the dimensionality (e.g. dims({0,1,0})==1 ; dims({-1,1,1})==3) + uint8_t dims(); + +} FaceNormal; + + +////////////////////////////////////////////////////////////////////////////////////////////////////// + +// Work around X.h shenanigan macros +#ifdef None +#define OldNone None +#define PlsRedefineNone 1 +#undef None +#endif + +namespace component_face_type_ns +{ + enum component_face_type : uint8_t + { + // TODO this would be much nicer if the trigs corresponded with + // their comp vertices (apex) + // 0 == BL, 1 == BR, 2 == TL, 3 == TR + None, + Quad, + TrigBL, + TrigBR, + TrigTR, + TrigTL, + + Count + }; +} +using FaceType = component_face_type_ns::component_face_type; + +// Work around X.h shenanigan macros +#ifdef PlsRedefineNone +#define None OldNone +#endif + +static_assert(FaceType::None == 0); // necessary for light flooding algorithm +static_assert(FaceType::Quad == 1); // see "flooding_table" +static_assert(FaceType::TrigBL == 2); +static_assert(FaceType::TrigBR == 3); +static_assert(FaceType::TrigTR == 4); +static_assert(FaceType::TrigTL == 5); +static_assert(FaceType::Count == 6); + +////////////////////////////////////////////////////////////////////////////////////////////////////// + +typedef struct face_edge +{ + uint16_t v0; + uint16_t v1; + + face_edge() = default; + face_edge(uint16_t _v0, uint16_t _v1) : v0(_v0), v1(_v1) {}; + + bool operator==(const face_edge &) const = default; +} FaceEdge; + +template <> +struct std::hash +{ + size_t operator()(const FaceEdge &_v) const noexcept + { + uint32_t val = (((uint32_t)_v.v0) << 16) | (((uint32_t)_v.v1)); + return std::hash{}(val); + } +}; + +typedef struct u8_vec3 +{ + uint8_t x; + uint8_t y; + uint8_t z; + + u8_vec3() = default; + u8_vec3(const uint8_t _x, const uint8_t _y, const uint8_t _z) : x(_x), y(_y), z(_z) {}; + + bool operator==(const u8_vec3 &) const = default; +} U8Vec3; + +template <> +struct std::hash +{ + size_t operator()(const U8Vec3 &_v) const noexcept + { + uint32_t val = (((uint32_t)_v.x) << 16) | (((uint32_t)_v.y) << 8) | (((uint32_t)_v.z)); + return std::hash{}(val); + } +}; + +////////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace texture_corner_ns +{ + enum corner : uint32_t + { + TopLeft, + CenterLeft, + BotLeft, + CenterBot, + BotRight, + CenterRight, + TopRight, + CenterTop, + + Count + }; +} +using TexCorner = texture_corner_ns::corner; + +// Convenience struct (Basically an array in a trench-coat) +struct ComponentVertices { + uint8_t data[4]; + + uint8_t& operator[](int _index) + { + return data[_index]; + } + const uint8_t& operator[] (int _index) const + { + return data[_index]; + } + + bool operator==(const ComponentVertices& _o) const + { + return data[0]==_o[0] && data[1]==_o[1] && data[2]==_o[2] && data[3]==_o[3]; + } +}; + +// Convenience struct (Basically an array in a trench-coat) +struct BlockVertices { + uint16_t data[4]; + + uint16_t& operator[](int _index) + { + return data[_index]; + } + const uint16_t& operator[] (int _index) const + { + return data[_index]; + } + + bool operator==(const BlockVertices& _o) const + { + return data[0]==_o[0] && data[1]==_o[1] && data[2]==_o[2] && data[3]==_o[3]; + } +}; + +// TODO move to world.h? +typedef struct component_face +{ + FaceNormal normal; + TexCorner tex_apex; // defines how the triangle/quad samples its texture. In [0;7] + // vertices[1] corresponds with this tex-corners + ComponentVertices vertices; // counter-clockwise winding // vertices[1] MUST be triangle apex // TODO OPT if we find a way to rotate FaceType's we can get rid of these + uint32_t texture_id; // gfx::add_component_textures + + bool isValid() const { return vertices[0] != vertices[1]; } + bool isQuad() const { return vertices[3] < 8; } + + // Returns the face type as seen in normalized direction, based on the vertices + // Inner faces not supported for now + // Also returns the view direction if non-nullptr + FaceType faceType(Direction* _out_view_dir = nullptr) const; + + // Convenience initializer for non-existant/empty faces + static const struct component_face NONE; +} ComponentFace; +constexpr size_t component_face_size = sizeof(ComponentFace); + + +// A face in block coordinates +// used for block welding +typedef struct block_face +{ + const uint16_t INVALID_VERT = 1024; // anything >728 will do, maybe this value can be optimized + + uint8_t comp_coords[3]; // coordinates of origination component in block + TexCorner tex_apex; + + // block space vertex ids [0;728] + // vert[3] may be any value, but is only valid of isQuad()==true + BlockVertices vertices; // counter-clockwise winding // vertices[1] MUST be triangle apex + + FaceType faceType; + FaceNormal normal; + uint32_t comp_texture_id; + + bool visited; // for surface walk + bool valid; // for optimization (only valid faces get into the vertex buffer) + uint32_t texture_ids[TexCorner::Count]; // one per TexCorner // for optimization + + // Constructor from ComponentFace + explicit block_face(const BlockVertices& block_verts, uint8_t _off_x, uint8_t _off_y, uint8_t _off_z, + TexCorner _tex_apex, FaceType _faceType, FaceNormal _normal, uint32_t _comp_texture_id) : + comp_coords{_off_x, _off_y, _off_z}, + tex_apex(_tex_apex), vertices(block_verts), + faceType(_faceType), normal(_normal), comp_texture_id(_comp_texture_id), + visited(false), valid(true), texture_ids{0,0,0,0} + {}; + + // Default constructor, invalid face + explicit block_face() : + comp_coords{8, 8, 8}, + tex_apex(TexCorner::Count), + vertices{INVALID_VERT, INVALID_VERT, INVALID_VERT, INVALID_VERT}, + faceType(FaceType::None), + normal(), visited(false), valid(false), texture_ids{0,0,0,0} {}; + + constexpr bool isQuad() const { return faceType == FaceType::Quad; }; + + static constexpr uint16_t comp_vertex_2_block_vertex(const uint8_t _v, const uint8_t _off_x, const uint8_t _off_y, const uint8_t _off_z) + { + // place comp in block at origin + // 0 0b000 -> 0 + // 1 0b001 -> 1 +1 + // 2 0b010 -> 9 +9 + // 3 0b011 -> 10 +9+1 + // 4 0b100 -> 81 +81 + // 5 0b101 -> 82 +81 +1 + // 6 0b110 -> 90 +81+9 + // 7 0b111 -> 91 +81+9+1 + + // then shift inside block by offset + return (((_v & 0b001) > 0) + _off_x) + + (((_v & 0b010) > 0) + _off_y) * 9 + + (((_v & 0b100) > 0) + _off_z) * 9 * 9; + } + +} BlockFace; +const int BlockFaceSize = sizeof(BlockFace); + +////////////////////////////////////////////////////////////////////////////////////////////////////// + +// Returns the (up to 4) vertices in [0;8] of the component +// Note: vertex == 8 <=> invalid <=> no vertex +// => vert[4] != 8 <=> FaceType::Quad +// => vert[1] is the apex +ComponentVertices get_component_face_vertices(FaceType _face, Direction _view_dir); + +////////////////////////////////////////////////////////////////////////////////////////////////////// + +// Returns the angle between _v0 and _v1 around _axis in radians [-pi;pi] +// All input expected to be unit vectors +// Note: Rotation direction is determined by right claw rule +// Returns values in [-pi;pi] +float angle_between(const Vec3 _v0, const Vec3 _v1, const Vec3 _axis); + +// Note: rotation follows right claw rule along _common_edge +float angle_between_faces(const BlockFace _f0, const BlockFace _f1, const Vec3 _common_edge); + +void transform_mtx(const Vec3 &_position, const Quat &_orientation, float *_out_mtx); + +// Inverse ray (used as pre-calculation for collision checks) +inline constexpr InvRay InverseRay(Ray &_ray); + +// Check ray <-> AABB collision +bool CheckCollisionRayBox(const InvRay &_ray, const AABB &_box, float &_out_distance); + +// Check ray <-> OBB collision +bool CheckCollisionRayOrientedBox(const Ray &_ray, const AABB &_box, const float *_inverse_model_matrix, float &_distance); + +void combineAABB(AABB &_a, const AABB &_b); + +// Return wether _value lies between _start and _end (both inclusive) +template +inline bool between(T _value, T _start, T _end) +{ + return _value >= _start && _value <= _end; +} + + +////////////////////////////////////////////////////////////////////////////////////////////////////// +/* Integer Math */ + + + + + +// _result and _vec should be I8Vec3 and _mat should be I8RotMat3x3 +void mulMtxVec3(int8_t* _result, const int8_t* _mat, const int8_t* _vec); + +// All parameters should be I8RotMat3x3 +void mulMtxMtx(int8_t* _result, const int8_t* _a, const int8_t* _b); diff --git a/src/test.h b/src/test.h new file mode 100644 index 0000000..b28ad70 --- /dev/null +++ b/src/test.h @@ -0,0 +1,1217 @@ + + +#include +#include "data/slot_buffer.h" +#include "data/slot_list.h" +#include "data/first_fit_buffer.h" +#include +#include "graphics.h" +#include "data/slot_queue.h" +#include +#include +#include +#include "world.h" +// #include "data/offset_vector.h" +// #include "data/chunk_storage.h" +#define ASSERT(X) \ + do \ + { \ + bool ASSERT_RESULT = (X); \ + if (!ASSERT_RESULT) \ + { \ + fprintf(stderr, "ASSERT failed: '%s' in %s@%d\n", #X, __FILE__, __LINE__); \ + return false; \ + } \ + } while (0) + +bx::Quaternion QuaternionFromAxisAngle(bx::Vec3 axis, float angle) +{ + bx::Quaternion result = {0.0f, 0.0f, 0.0f, 1.0f}; + + float axisLength = bx::sqrt(axis.x * axis.x + axis.y * axis.y + axis.z * axis.z); + + if (axisLength != 0.0f) + { + angle *= 0.5f; + + float length = 0.0f; + float ilength = 0.0f; + + // Vector3Normalize(axis) + bx::Vec3 v = axis; + length = bx::sqrt(v.x * v.x + v.y * v.y + v.z * v.z); + if (length == 0.0f) + length = 1.0f; + ilength = 1.0f / length; + axis.x *= ilength; + axis.y *= ilength; + axis.z *= ilength; + + float sinres = bx::sin(angle); + float cosres = bx::cos(angle); + + result.x = axis.x * sinres; + result.y = axis.y * sinres; + result.z = axis.z * sinres; + result.w = cosres; + + // QuaternionNormalize(q); + bx::Quaternion q = result; + length = bx::sqrt(q.x * q.x + q.y * q.y + q.z * q.z + q.w * q.w); + if (length == 0.0f) + length = 1.0f; + ilength = 1.0f / length; + result.x = q.x * ilength; + result.y = q.y * ilength; + result.z = q.z * ilength; + result.w = q.w * ilength; + } + + return result; +} + +void testAxisAngle() +{ + + bx::Vec3 axises[] = { + {0, 0, 1}, + {0, 1, 0}, + {1, 0, 0}, + {1, 1, 1}, + {1, 0, 1}, + {0, 0, -1}, + {0, -1, 0}, + {-1, 0, 0}, + {-1, -1, -1}, + {-1, 0, -1}, + }; + + for (int o = 0; o < sizeof(axises) / sizeof(bx::Vec3); o++) + { + bx::Vec3 axis = bx::normalize(axises[o]); + + float angle = 0; + + for (int i = 0; i < 8; i++) + { + angle = i * bx::kPiQuarter; + + bx::Quaternion bxq = bx::fromAxisAngle(axis, angle); + + bx::Quaternion rq = QuaternionFromAxisAngle(axis, angle); + + if (!bx::isEqual(bxq, rq, 0.000001f)) + { + logDebug("aha!\n"); + } + } + } +} + +bx::Vec3 Vector3RotateByQuaternion(bx::Vec3 v, bx::Quaternion q) +{ + bx::Vec3 result = {0, 0, 0}; + + result.x = v.x * (q.x * q.x + q.w * q.w - q.y * q.y - q.z * q.z) + v.y * (2 * q.x * q.y - 2 * q.w * q.z) + v.z * (2 * q.x * q.z + 2 * q.w * q.y); + result.y = v.x * (2 * q.w * q.z + 2 * q.x * q.y) + v.y * (q.w * q.w - q.x * q.x + q.y * q.y - q.z * q.z) + v.z * (-2 * q.w * q.x + 2 * q.y * q.z); + result.z = v.x * (-2 * q.w * q.y + 2 * q.x * q.z) + v.y * (2 * q.w * q.x + 2 * q.y * q.z) + v.z * (q.w * q.w - q.x * q.x - q.y * q.y + q.z * q.z); + + return result; +} + +bx::Vec3 QuaternionToEuler(bx::Quaternion q) +{ + bx::Vec3 result = {0, 0, 0}; + + // Roll (x-axis rotation) + float x0 = 2.0f * (q.w * q.x + q.y * q.z); + float x1 = 1.0f - 2.0f * (q.x * q.x + q.y * q.y); + result.x = bx::atan2(x0, x1); + + // Pitch (y-axis rotation) + float y0 = 2.0f * (q.w * q.y - q.z * q.x); + y0 = y0 > 1.0f ? 1.0f : y0; + y0 = y0 < -1.0f ? -1.0f : y0; + result.y = bx::asin(y0); + + // Yaw (z-axis rotation) + float z0 = 2.0f * (q.w * q.z + q.x * q.y); + float z1 = 1.0f - 2.0f * (q.y * q.y + q.z * q.z); + result.z = bx::atan2(z0, z1); + + return result; +} + +bool testSlotBuffer() +{ + SlotBuffer buf(4); + + uint32_t off, num; + ASSERT(buf.wasModified(off, num) == false); + + // add 1 + uint32_t id; + buf.add(id) = 0; + ASSERT(id == 0); + + ASSERT(buf.wasModified(off, num)); + ASSERT(off == 0); + ASSERT(num == 1); + + buf.clearModified(); + ASSERT(buf.wasModified(off, num) == false); + + // add 2 + buf.add(id) = 1; + ASSERT(id == 1); + buf.add(id) = 2; + ASSERT(id == 2); + + ASSERT(buf.wasModified(off, num)); + ASSERT(off == 1); + ASSERT(num == 2); + + buf.clearModified(); + ASSERT(buf.wasModified(off, num) == false); + + // update + buf.add(id) = 3; + ASSERT(id == 3); + ASSERT(buf.wasModified(off, num)); + ASSERT(off == 3); + ASSERT(num == 1); + + buf.at(1) = 1337; + ASSERT(buf.wasModified(off, num)); + ASSERT(off == 1); + ASSERT(num == 3); + + buf.clearModified(); + + // data + uint32_t *data = buf.data(); + ASSERT(data); + ASSERT(data[0] == 0); + ASSERT(data[1] == 1337); + ASSERT(data[2] == 2); + ASSERT(data[3] == 3); + + // grow + buf.add(id) = 4; + ASSERT(id == 4); // buffer has to grow at this point + + ASSERT(buf.wasModified(off, num)); + ASSERT(off == 4); + ASSERT(num == 4); + + buf.clearModified(); + + data = buf.data(); // data should be still be the same + ASSERT(data); + ASSERT(data[0] == 0); + ASSERT(data[1] == 1337); + ASSERT(data[2] == 2); + ASSERT(data[3] == 3); + ASSERT(data[4] == 4); + + // remove + buf.remove(1); + buf.remove(3); + + buf.add(id) = 42; + ASSERT(id == 3); + + ASSERT(buf.wasModified(off, num)); + ASSERT(off == 3); + ASSERT(num == 1); + + buf.add(id) = 8; + ASSERT(id == 1); + + ASSERT(buf.wasModified(off, num)); + ASSERT(off == 1); + ASSERT(num == 3); + + data = buf.data(); + ASSERT(data[0] == 0); + ASSERT(data[1] == 8); + ASSERT(data[2] == 2); + ASSERT(data[3] == 42); + ASSERT(data[4] == 4); + + buf.clearModified(); + + logDebug("testSlotBuffer() - ok\n"); + return true; +} + +bool testSlotList() +{ + SlotList buf(6); + + uint32_t off, num; + ASSERT(buf.empty()); + ASSERT(buf.wasModified(off, num) == false); + + // add 1 + uint32_t id; + buf.add(id) = 0; + ASSERT(id == 2); + ASSERT(buf.empty() == false); + + ASSERT(buf.wasModified(off, num)); + ASSERT(off == 2); + ASSERT(num == 1); + + buf.clearModified(); + ASSERT(buf.wasModified(off, num) == false); + + ASSERT(buf.empty() == false); + ASSERT(buf.first() == 2); + ASSERT(buf.has_next(2) == false); + + // add 2 + buf.add(id) = 1; + ASSERT(id == 3); + buf.add(id) = 2; + ASSERT(id == 4); + + ASSERT(buf.empty() == false); + ASSERT(buf.wasModified(off, num)); + ASSERT(off == 3); + ASSERT(num == 2); + + buf.clearModified(); + ASSERT(buf.wasModified(off, num) == false); + + ASSERT(buf.empty() == false); + ASSERT(buf.first() == 4); + ASSERT(buf.has_next(4)); + ASSERT(buf.next(4) == 3); + ASSERT(buf.has_next(3)); + ASSERT(buf.next(3) == 2); + ASSERT(buf.has_next(2) == false); + + // update + buf.add(id) = 3; + ASSERT(id == 5); + ASSERT(buf.empty() == false); + ASSERT(buf.wasModified(off, num)); + ASSERT(off == 5); + ASSERT(num == 1); + + buf.at(3) = 1337; + ASSERT(buf.empty() == false); + ASSERT(buf.wasModified(off, num)); + ASSERT(off == 3); + ASSERT(num == 3); + + buf.clearModified(); + + // data + uint32_t *data = buf.data(); + ASSERT(data); + ASSERT(data[2] == 0); + ASSERT(data[3] == 1337); + ASSERT(data[4] == 2); + ASSERT(data[5] == 3); + + // iterate + ASSERT(buf.empty() == false); + ASSERT(buf.first() == 5); + ASSERT(buf.has_next(5)); + ASSERT(buf.next(5) == 4); + ASSERT(buf.has_next(4)); + ASSERT(buf.next(4) == 3); + ASSERT(buf.has_next(3)); + ASSERT(buf.next(3) == 2); + ASSERT(buf.has_next(2) == false); + + // grow + buf.add(id) = 4; + ASSERT(id == 6); // buffer has to grow at this point + + ASSERT(buf.empty() == false); + ASSERT(buf.wasModified(off, num)); + ASSERT(off == 6); + ASSERT(num == 6); + + buf.clearModified(); + + data = buf.data(); // data should be still be the same + ASSERT(data); + ASSERT(data[2] == 0); + ASSERT(data[3] == 1337); + ASSERT(data[4] == 2); + ASSERT(data[5] == 3); + ASSERT(data[6] == 4); + + // iterate + ASSERT(buf.empty() == false); + ASSERT(buf.first() == 6); + ASSERT(buf.has_next(6)); + ASSERT(buf.next(6) == 5); + ASSERT(buf.has_next(5)); + ASSERT(buf.next(5) == 4); + ASSERT(buf.has_next(4)); + ASSERT(buf.next(4) == 3); + ASSERT(buf.has_next(3)); + ASSERT(buf.next(3) == 2); + ASSERT(buf.has_next(2) == false); + + // remove + buf.remove(3); + + ASSERT(buf.empty() == false); + ASSERT(buf.first() == 6); + ASSERT(buf.has_next(6)); + ASSERT(buf.next(6) == 5); + ASSERT(buf.has_next(5)); + ASSERT(buf.next(5) == 4); + ASSERT(buf.has_next(4)); + ASSERT(buf.next(4) == 2); + ASSERT(buf.has_next(2) == false); + + buf.remove(5); + + ASSERT(buf.empty() == false); + ASSERT(buf.first() == 6); + ASSERT(buf.has_next(6)); + ASSERT(buf.next(6) == 4); + ASSERT(buf.has_next(4)); + ASSERT(buf.next(4) == 2); + ASSERT(buf.has_next(2) == false); + + buf.add(id) = 42; + ASSERT(id == 5); + + ASSERT(buf.empty() == false); + ASSERT(buf.wasModified(off, num)); + ASSERT(off == 5); + ASSERT(num == 1); + + ASSERT(buf.empty() == false); + ASSERT(buf.first() == 5); + ASSERT(buf.has_next(5)); + ASSERT(buf.next(5) == 6); + ASSERT(buf.has_next(6)); + ASSERT(buf.next(6) == 4); + ASSERT(buf.has_next(4)); + ASSERT(buf.next(4) == 2); + ASSERT(buf.has_next(2) == false); + + buf.add(id) = 8; + ASSERT(id == 3); + + ASSERT(buf.empty() == false); + ASSERT(buf.wasModified(off, num)); + ASSERT(off == 3); + ASSERT(num == 3); + + data = buf.data(); + ASSERT(data[2] == 0); + ASSERT(data[3] == 8); + ASSERT(data[4] == 2); + ASSERT(data[5] == 42); + ASSERT(data[6] == 4); + + buf.clearModified(); + + ASSERT(buf.empty() == false); + ASSERT(buf.first() == 3); + ASSERT(buf.has_next(3)); + ASSERT(buf.next(3) == 5); + ASSERT(buf.has_next(5)); + ASSERT(buf.next(5) == 6); + ASSERT(buf.has_next(6)); + ASSERT(buf.next(6) == 4); + ASSERT(buf.has_next(4)); + ASSERT(buf.next(4) == 2); + ASSERT(buf.has_next(2) == false); + + logDebug("testSlotBuffer() - ok\n"); + return true; +} + +bool testFirstFitBuffer() +{ + FirstFitBuffer buf(8); + + uint32_t off, num; + ASSERT(buf.wasModified(off, num) == false); + + // add 1 + uint32_t a[] = {0, 1}; + ASSERT(buf.add(a, sizeof(a) / sizeof(uint32_t)) == 0); + + ASSERT(buf.wasModified(off, num)); + ASSERT(off == 0); + ASSERT(num == 2); + + buf.clearModified(); + ASSERT(!buf.wasModified(off, num)); + + // add 2 + uint32_t b[] = {2, 3, 4}; + ASSERT(buf.add(b, sizeof(b) / sizeof(uint32_t)) == 1); + + uint32_t c[] = {5, 6}; + ASSERT(buf.add(c, sizeof(c) / sizeof(uint32_t)) == 2); + + ASSERT(buf.wasModified(off, num)); + ASSERT(off == 2); + ASSERT(num == 5); + + buf.clearModified(); + + // data + uint32_t *data = buf.data(); + ASSERT(data); + ASSERT(data[0] == 0); + ASSERT(data[1] == 1); + ASSERT(data[2] == 2); + ASSERT(data[3] == 3); + ASSERT(data[4] == 4); + ASSERT(data[5] == 5); + ASSERT(data[6] == 6); + + // remove b + buf.remove(1); + + // buf == [0, 1, x, x, x, 5, 6, x] + + ASSERT(!buf.wasModified(off, num)); // remove does not mark buf as modified! + + buf.clearModified(); + + // defrag NOTE defrag is currently not enabled + // uint32_t d[] = { 7, 8, 9, 10 }; + // ASSERT(buf.add(d, sizeof(d) / sizeof(uint32_t)) == 2); + + //// buf == [0, 1, 5, 6, 7, 8, 9, 10] + // data = buf.data(); + // ASSERT(data); + // ASSERT(data[0] == 0); + // ASSERT(data[1] == 1); + // ASSERT(data[2] == 5); + // ASSERT(data[3] == 6); + // ASSERT(data[4] == 7); + // ASSERT(data[5] == 8); + // ASSERT(data[6] == 9); + // ASSERT(data[7] == 10); + + //// TODO test mapping?! + + //// grow + // uint32_t e[] = { 11 }; + // ASSERT(buf.add(e, sizeof(e) / sizeof(uint32_t)) == 3); + + // data = buf.data(); + // ASSERT(data[7] == 10); + // ASSERT(data[8] == 11); + + // ASSERT(buf.wasModified(off, num)); + // ASSERT(off == 8); // after resize + // ASSERT(num == 8); + + // buf.clearModified(); + + logDebug("testGPUBlockModelBuffer() - ok\n"); + return true; +} + +bool testPODs() +{ + // logDebug("Vec3 is POD = %d\n", bx::isPod()); + ASSERT(bx::isPod()); + Vec3 v; // default constructor + v = Vec3(0); + v = bx::add(v, v); // implicit conversion + + // logDebug("Quat is POD = %d\n", bx::isPod()); + ASSERT(bx::isPod()); + Quat q; // default constructor + q = Quat::unit(); + q = bx::mul(q, q); // implicit conversion + + return true; +} + +struct X +{ + int i1; + int i2; +}; +struct Y +{ + char c; + X x; + int i[2]; + float f; + +protected: + static double d; + +private: + void g() {} +}; + +bool testBxIsAggregate() +{ + // Note: compiler macro '__is_aggregate(Type)' seems broken on msvc + // This does some checks if it works now after fix [ return !!__is_aggregate(Ty); ] + // + // Ref.: https://stackoverflow.com/questions/4178175/what-are-aggregates-and-pods-and-how-why-are-they-special + + // class NotAggregate1 + //{ + // virtual void f() {} //remember? no virtual functions + // }; + // ASSERT(!bx::isAggregate()); + + // class NotAggregate2 + //{ + // int x; //x is private by default and non-static + // }; + // ASSERT(!bx::isAggregate()); + + // class NotAggregate3 + //{ + // public: + // NotAggregate3(int) {} //oops, user-defined constructor + // }; + // ASSERT(!bx::isAggregate()); + + // class Aggregate1 + //{ + // public: + // NotAggregate1 member1; //ok, public member + // Aggregate1& operator=(Aggregate1 const& rhs) {/* */ } //ok, copy-assignment + // private: + // void f() {} // ok, just a private function + // }; + // ASSERT(bx::isAggregate()); + + // ASSERT(bx::isAggregate()); + // ASSERT(bx::isAggregate()); + + // Y y = { 'a', {10, 20}, {20, 30} }; + return true; +} + +bool testSlotQueue() +{ + SlotQueue q(2); + int *elem = NULL; + + ASSERT((elem = q.begin_pop()) == NULL); + + *q.begin_push() = 1; + q.end_push(); + + // [1,x] + + ASSERT((elem = q.begin_pop()) != NULL); + ASSERT(*elem == 1); + q.end_pop(); + + // [x,x] + + ASSERT((elem = q.begin_pop()) == NULL); + + *q.begin_push() = 2; + q.end_push(); + + // [x,2] + + ASSERT((elem = q.begin_pop()) != NULL); // peek at value + ASSERT(*elem == 2); + + // [x,2] + + *q.begin_push() = 3; + q.end_push(); // buffer should grow + + // [3,x,x,2] + + ASSERT((elem = q.begin_pop()) != NULL); // peek at value again + ASSERT(*elem == 2); + q.end_pop(); + + // [3,x,x,x] + + ASSERT((elem = q.begin_pop()) != NULL); // peek at value again + ASSERT(*elem == 3); + q.end_pop(); + + // [x,x,x,x] + + logDebug("testSlotQueue() - ok\n"); + return true; +} + +/*bool testOffsetVector() +{ + // insert remove on 0 + OffsetVector ov; + ov.insert_at(0, 'a'); + ASSERT(ov.get_at(0) == 'a'); + ov.remove_at(0); + + // only positive indices + ov = OffsetVector(); + ov.insert_at(5, 'b'); + ASSERT(ov.get_at(5) == 'b'); + ov.remove_at(5); + + ov.insert_at(0, 'c'); + ov.insert_at(1, 'd'); + ov.insert_at(5, 'e'); + ov.insert_at(7, 'f'); + ASSERT(ov.get_at(7) == 'f'); + ASSERT(ov.get_at(1) == 'd'); + ASSERT(ov.get_at(5) == 'e'); + ASSERT(ov.get_at(0) == 'c'); + ov.remove_at(5); + ASSERT(ov.get_at(7) == 'f'); + ASSERT(ov.get_at(1) == 'd'); + ASSERT(ov.get_at(0) == 'c'); + ov.remove_at(0); + ASSERT(ov.get_at(7) == 'f'); + ASSERT(ov.get_at(1) == 'd'); + ov.remove_at(7); + ASSERT(ov.get_at(1) == 'd'); + ov.remove_at(1); + + // only negative indices + ov = OffsetVector(); + ov.insert_at(-5, 'b'); + ASSERT(ov.get_at(-5) == 'b'); + ov.remove_at(-5); + + ov.insert_at(0, 'c'); + ov.insert_at(-1, 'd'); + ov.insert_at(-5, 'e'); + ov.insert_at(-7, 'f'); + ASSERT(ov.get_at(-7) == 'f'); + ASSERT(ov.get_at(-1) == 'd'); + ASSERT(ov.get_at(-5) == 'e'); + ASSERT(ov.get_at(0) == 'c'); + ov.remove_at(-5); + ASSERT(ov.get_at(-7) == 'f'); + ASSERT(ov.get_at(-1) == 'd'); + ASSERT(ov.get_at(0) == 'c'); + ov.remove_at(0); + ASSERT(ov.get_at(-7) == 'f'); + ASSERT(ov.get_at(-1) == 'd'); + ov.remove_at(-7); + ASSERT(ov.get_at(-1) == 'd'); + ov.remove_at(-1); + + // positive/negative indices mixed + ov = OffsetVector(); + ov.insert_at(0, 'g'); + ov.insert_at(-1, 'h'); + ov.insert_at(1, 'i'); + ASSERT(ov.get_at(-1) == 'h'); + ASSERT(ov.get_at(0) == 'g'); + ASSERT(ov.get_at(1) == 'i'); + + ov.remove_at(0); + ASSERT(ov.get_at(-1) == 'h'); + ASSERT(ov.get_at(1) == 'i'); + + ov.insert_at(-2, 'j'); + ov.insert_at(2, 'k'); + ov.insert_at(-3, 'l'); + ov.insert_at(3, 'm'); + ASSERT(ov.get_at(-2) == 'j'); + ASSERT(ov.get_at(2) == 'k'); + ASSERT(ov.get_at(-3) == 'l'); + ASSERT(ov.get_at(3) == 'm'); + + ov.remove_at(-2); + ov.remove_at(2); + ASSERT(ov.get_at(-3) == 'l'); + ASSERT(ov.get_at(3) == 'm'); + + ov.insert_at(0, 'n'); + ASSERT(ov.get_at(0) == 'n'); + + ASSERT(ov.get_at(-1) == 'h'); + ASSERT(ov.get_at(0) == 'n'); + ASSERT(ov.get_at(1) == 'i'); + ov.remove_at(1); + ASSERT(ov.get_at(-1) == 'h'); + ov.remove_at(-1); + + ov.remove_at(3); + ov.remove_at(-3); + + ASSERT(ov.get_at(0) == 'n'); + ov.remove_at(0); + + logDebug("testOffsetVector() - ok\n"); + return true; +}*/ + +/*bool testChunkStorage() +{ + ChunkStorage cs; + + cs.insert(0, 0, 0, 1337); + ASSERT(cs.at(0,0,0) == 1337); + cs.insert(0, 0, 0, 1337); + + + cs.insert(0, 0, 0, 1337); + cs.insert(420, 69, -69, 420); + cs.insert(42, -8, 6, 30035); + cs.insert(0, -1, -2, 42); + cs.insert(-13, -1, -100, 5); + + ASSERT(cs.at(0, 0, 0) == 1337); + ASSERT(cs.at(420, 69, -69) == 420); + ASSERT(cs.at(42, -8, 6) == 30035); + ASSERT(cs.at(0, -1, -2) == 42); + ASSERT(cs.at(-13, -1, -100) == 5); + + cs.remove(0, 0, 0); + ASSERT(cs.at(420, 69, -69) == 420); + ASSERT(cs.at(42, -8, 6) == 30035); + ASSERT(cs.at(0, -1, -2) == 42); + ASSERT(cs.at(-13, -1, -100) == 5); + + cs.remove(420, 69, -69); + cs.remove(0, -1, -2); + ASSERT(cs.at(42, -8, 6) == 30035); + ASSERT(cs.at(-13, -1, -100) == 5); + + cs.remove(42, -8, 6); + cs.remove(-13, -1, -100); + + logDebug("testOffsetVector() - ok\n"); + return true; +}*/ + +bool testChunkOffsetPacking() +{ + int16_t xl[] = {INT16_MIN, -1, 0, 1, INT16_MAX, ((int16_t)(UINT16_MAX))}; + + for (int16_t x : xl) + { + // uint32_t ux = (uint32_t)x; // this causes sign extension + uint32_t ux = (uint32_t)((uint16_t)x); + ASSERT(ux <= UINT16_MAX); + } + + for (int i = 0; i < 5; ++i) + { + for (int o = 0; o < 5; ++o) + { + if (i != o) + { + int16_t a = xl[i]; + int16_t b = xl[o]; + ASSERT(((uint32_t)((uint16_t)a)) != ((uint32_t)((uint16_t)b))); + } + } + } + + logDebug("testOffsetVector() - ok\n"); + return true; +} + +bool testMathAngleBetween() +{ + Vec3 a = bx::normalize(Vec3(1, -1, 1)); + Vec3 b = bx::normalize(Vec3(-1, 1, 1)); + Vec3 n = bx::normalize(Vec3(-1, -1, 0)); + + float result = angle_between(a, b, n); + + ASSERT(between(result, bx::toRad(109.46f), bx::toRad(109.48f))); // should be 109.47 degrees + + a = bx::normalize(Vec3(0.41f, -0.41f, -0.82f)); + b = bx::normalize(Vec3(0.41f, -0.41f, 0.82f)); + n = bx::normalize(Vec3(-1, -1, 0)); + + result = angle_between(a, b, n); + + ASSERT(between(result, bx::toRad(109.46f), bx::toRad(109.48f))); // should be 109.47 degrees + + a = bx::normalize(Vec3(1, 0, -1)); + b = bx::normalize(Vec3(0, -1, 1)); + n = bx::normalize(Vec3(-1, -1, -1)); + + result = angle_between(a, b, n); + + ASSERT(between(result, bx::toRad(119.99f), bx::toRad(120.01f))); // should be 120 degrees + + logDebug("testMathAngleBetween() - ok\n"); + return true; +} + +// bool testFaceNormal() +// { +// for (uint8_t i = 0; i < FaceNormal::Count; i++) +// { +// FaceNormal n = (FaceNormal)i; +// FaceNormal nn = negate(n); +// +// // unit length +// Vec3 vn = toVec3(n); +// ASSERT(bx::isEqual(bx::length(vn), 1.0f, 0.001f)); +// +// // unit length +// Vec3 vnn = toVec3(nn); +// ASSERT(bx::isEqual(bx::length(vnn), 1.0f, 0.001f)); +// +// // negation +// ASSERT(bx::isEqual(bx::neg(vn), vnn, 0.001f)); +// } +// +// logDebug("testFaceNormal() - ok\n"); +// return true; +// } + +bool testOrientation() +{ + for (uint8_t i = 0; i < 24; i++) + { + Orientation ori = Orientation(i); + + // Convert tp packed dirs and back + uint8_t packed_dirs = orientation_2_packed_dirs[ori.data]; + Orientation ori2 = Orientation(packed_dirs_2_orientation[packed_dirs]); + ASSERT(ori.data == ori2.data); + + // Convert to directions and back + Direction forward, up; + ori.to_dirs(forward, up); + Orientation ori3 = Orientation(forward, up); + ASSERT(ori.data == ori3.data); + } + + logDebug("testOrientation() - ok\n"); + return true; +} + +bool test90DegRotationMatrices() +{ + + float angles[4] = {0.0f, bx::kPiHalf, bx::kPi, bx::kPi + bx::kPiHalf}; + + ASSERT(bx::cos(0.0f) == 1.0f); + + for (int a = 0; a < 4; a++) + { + for (int b = 0; b < 4; b++) + { + for (int y = 0; y < 4; y++) + { + // cosa*cosb + float m_00 = bx::cos(angles[a]) * bx::cos(angles[b]); + ASSERT(m_00 >= -1.0f && m_00 <= 1.0f); + + // sina*cosb + float m_01 = bx::sin(angles[a]) * bx::cos(angles[b]); + ASSERT(m_01 >= -1.0f && m_01 <= 1.0f); + + // -sinb + float m_02 = -1.0f * bx::sin(angles[b]); + ASSERT(m_02 >= -1.0f && m_02 <= 1.0f); + + // cosa*sinb*siny-sina*cosy + float m_10 = (bx::cos(angles[a]) * bx::sin(angles[b]) * bx::sin(angles[y])) - + (bx::sin(angles[a]) * bx::cos(angles[y])); + // logDebug("%5.2f %5.2f %5.2f = %5.2f\n", angles[a], angles[b], angles[y], m_10); + ASSERT(m_10 >= -1.0f && m_10 <= 1.0f); + + // sina*sinb*siny+cosa*cosy + float m_11 = (bx::sin(angles[a]) * bx::sin(angles[b]) * bx::sin(angles[y])) + + (bx::cos(angles[a]) * bx::cos(angles[y])); + ASSERT(m_11 >= -1.0f && m_11 <= 1.0f); + + // cosb*siny + float m_12 = bx::cos(angles[b]) * bx::sin(angles[y]); + ASSERT(m_12 >= -1.0f && m_12 <= 1.0f); + + // cosa*sinb*cosy+sina*siny + float m_20 = (bx::cos(angles[a]) * bx::sin(angles[b]) * bx::cos(angles[y])) + + (bx::sin(angles[a]) * bx::sin(angles[y])); + ASSERT(m_20 >= -1.0f && m_20 <= 1.0f); + + // sina*sinb*cosy-cosa*siny + float m_21 = (bx::sin(angles[a]) * bx::sin(angles[b]) * bx::cos(angles[y])) - + (bx::cos(angles[a]) * bx::sin(angles[y])); + ASSERT(m_21 >= -1.0f && m_21 <= 1.0f); + + // cosb*cosy + float m_22 = bx::cos(angles[b]) * bx::cos(angles[y]); + ASSERT(m_22 >= -1.0f && m_22 <= 1.0f); + + // cast to columns and check only 1 element set + int8_t im_00 = (int8_t)bx::round(m_00); + ASSERT(im_00 == -1 || im_00 == 0 || im_00 == 1); + int8_t im_01 = (int8_t)bx::round(m_01); + ASSERT(im_01 == -1 || im_01 == 0 || im_01 == 1); + int8_t im_02 = (int8_t)bx::round(m_02); + ASSERT(im_02 == -1 || im_02 == 0 || im_02 == 1); + + bool set_00 = (im_00 != 0) && (im_01 == 0) && (im_02 == 0); + bool set_01 = (im_00 == 0) && (im_01 != 0) && (im_02 == 0); + bool set_02 = (im_00 == 0) && (im_01 == 0) && (im_02 != 0); + + ASSERT(set_00 ^ set_01 ^ set_02); + + int8_t im_10 = (int8_t)bx::round(m_10); + ASSERT(im_10 == -1 || im_10 == 0 || im_10 == 1); + int8_t im_11 = (int8_t)bx::round(m_11); + ASSERT(im_11 == -1 || im_11 == 0 || im_11 == 1); + int8_t im_12 = (int8_t)bx::round(m_12); + ASSERT(im_12 == -1 || im_12 == 0 || im_12 == 1); + + bool set_10 = (im_10 != 0) && (im_11 == 0) && (im_12 == 0); + bool set_11 = (im_10 == 0) && (im_11 != 0) && (im_12 == 0); + bool set_12 = (im_10 == 0) && (im_11 == 0) && (im_12 != 0); + + ASSERT(set_10 ^ set_11 ^ set_12); + + int8_t im_20 = (int8_t)bx::round(m_20); + ASSERT(im_20 == -1 || im_20 == 0 || im_20 == 1); + int8_t im_21 = (int8_t)bx::round(m_21); + ASSERT(im_21 == -1 || im_21 == 0 || im_21 == 1); + int8_t im_22 = (int8_t)bx::round(m_22); + ASSERT(im_22 == -1 || im_22 == 0 || im_22 == 1); + + bool set_20 = (im_20 != 0) && (im_21 == 0) && (im_22 == 0); + bool set_21 = (im_20 == 0) && (im_21 != 0) && (im_22 == 0); + bool set_22 = (im_20 == 0) && (im_21 == 0) && (im_22 != 0); + + ASSERT(set_20 ^ set_21 ^ set_22); + } + } + } + + logDebug("test90DegRotationMatrices() - ok\n"); + return true; +} + +// 2024-11-28 +// This looks really clever, but I no longer understand how to use it -.- +// // A bit-packet vector consisting of 4 signed elements with a width of 3 bit each. +// // Each element is in [-7;+7] +// typedef struct i4vec4 +// { +// uint16_t v; // 4 nibbles formatted as [sxxx] each +// // s is the sign bit and xxx is a 3 bit unsigned value +// +// i4vec4(int _x, int _y, int _z) : i4vec4((int16_t)_x, (int16_t)_y, (int16_t)_z) +// { +// } +// +// i4vec4(int16_t _x, int16_t _y, int16_t _z) +// { +// assert(_x >= -7 && _x <= 7); +// assert(_y >= -7 && _y <= 7); +// assert(_z >= -7 && _z <= 7); +// v = (std::abs(_z) << 8) | (std::abs(_y) << 4) | std::abs(_x); +// v |= (std::signbit(_z) > 0) << 11; +// v |= (std::signbit(_y) > 0) << 7; +// v |= (std::signbit(_x) > 0) << 3; +// } +// +// i4vec4(int _x, int _y, int _z, int _w) : +// i4vec4((int16_t)_x, (int16_t)_y, (int16_t)_z, (int16_t)_w) +// { +// } +// +// i4vec4(int16_t _x, int16_t _y, int16_t _z, int16_t _w) +// { +// assert(_x >= -7 && _x <= 7); +// assert(_y >= -7 && _y <= 7); +// assert(_z >= -7 && _z <= 7); +// assert(_w >= -7 && _w <= 7); +// v = (std::abs(_w) << 12) | (std::abs(_z) << 8) | (std::abs(_y) << 4) | std::abs(_x); +// v |= (std::signbit(_w) > 0) << 15; +// v |= (std::signbit(_z) > 0) << 11; +// v |= (std::signbit(_y) > 0) << 7; +// v |= (std::signbit(_x) > 0) << 3; +// } +// +// i4vec4(uint16_t _x, uint16_t _y, uint16_t _z) +// { +// assert(_x <= 7); +// assert(_y <= 7); +// assert(_z <= 7); +// v = (_z << 8) | (_y << 4) | (_x); +// } +// +// i4vec4(uint16_t _x, uint16_t _y, uint16_t _z, uint16_t _w) +// { +// assert(_x <= 7); +// assert(_y <= 7); +// assert(_z <= 7); +// assert(_w <= 7); +// v = (_w << 12) | (_z << 8) | (_y << 4) | (_x); +// } +// +// // Returns the unsigned value of element at offset _o +// uint16_t val(uint16_t _o) const +// { +// return (v >> (4 * _o)) & 0b0111; +// } +// // Returns the signbit of element at offset _o +// bool neg(uint16_t _o) const +// { +// return ((v >> (4 * _o)) & 0b1000) > 0; +// } +// // Returns the first signed value +// int16_t x() +// { +// int16_t value = val(0); +// return value * (-1 * neg(0)); +// } +// // Returns the second signed value +// int16_t y() +// { +// int16_t value = val(1); +// return value * (-1 * neg(1)); +// } +// // Returns the third signed value +// int16_t z() +// { +// int16_t value = val(2); +// return value * (-1 * neg(2)); +// } +// // Returns the fourth signed value +// int16_t w() +// { +// int16_t value = val(3); +// return value * (-1 * neg(3)); +// } +// } I4Vec4; + +// // Can be used to combine 90 deg rotations +// // Can be used to apply 90 deg rotations +// I4Vec4 mul(const I4Vec4 _a, const I4Vec4 _b) +// { +// // combine permutations +// uint16_t x = _b.val(_a.val(0)); +// uint16_t y = _b.val(_a.val(1)); +// uint16_t z = _b.val(_a.val(2)); +// // apply negations +// I4Vec4 r = I4Vec4(x, y, z); +// r.v |= (_a.v & 0b100010001000) ^ (_b.v & 0b100010001000); +// return r; +// } + +bool testCompFaceType2Vertices2FaceType() +{ + ComponentVertices res; + + // test FaceType::None generates invalid indices + for (int i = 0; i < Direction::Count; i++) + { + Direction d = (Direction)i; + res = get_component_face_vertices(FaceType::None, d); + + ComponentFace face; + face.vertices[0] = res[0]; + face.vertices[1] = res[1]; + face.vertices[2] = res[2]; + face.vertices[3] = res[3]; + ASSERT(!face.isValid()); + } + + // test quad + res = get_component_face_vertices(FaceType::Quad, Direction::PosZ); + ASSERT((res == ComponentVertices{2,0,1,3})); + res = get_component_face_vertices(FaceType::Quad, Direction::PosX); + ASSERT((res == ComponentVertices{6,4,0,2})); + res = get_component_face_vertices(FaceType::Quad, Direction::NegX); + ASSERT((res == ComponentVertices{3,1,5,7})); + res = get_component_face_vertices(FaceType::Quad, Direction::NegZ); + ASSERT((res == ComponentVertices{7,5,4,6})); + res = get_component_face_vertices(FaceType::Quad, Direction::PosY); + ASSERT((res == ComponentVertices{5,1,0,4})); + res = get_component_face_vertices(FaceType::Quad, Direction::NegY); + ASSERT((res == ComponentVertices{6,2,3,7})); + + // test some trigs + res = get_component_face_vertices(FaceType::TrigBR, Direction::PosZ); + ASSERT((res == ComponentVertices{0,1,3,8})); + res = get_component_face_vertices(FaceType::TrigBR, Direction::PosX); + ASSERT((res == ComponentVertices{4,0,2,8})); + res = get_component_face_vertices(FaceType::TrigBR, Direction::PosY); + ASSERT((res == ComponentVertices{1,0,4,8})); + res = get_component_face_vertices(FaceType::TrigBR, Direction::NegY); + ASSERT((res == ComponentVertices{2,3,7,8})); + + res = get_component_face_vertices(FaceType::TrigTR, Direction::PosZ); + ASSERT((res == ComponentVertices{1,3,2,8})); + res = get_component_face_vertices(FaceType::TrigTR, Direction::PosX); + ASSERT((res == ComponentVertices{0,2,6,8})); + res = get_component_face_vertices(FaceType::TrigTR, Direction::PosY); + ASSERT((res == ComponentVertices{0,4,5,8})); + res = get_component_face_vertices(FaceType::TrigTR, Direction::NegY); + ASSERT((res == ComponentVertices{3,7,6,8})); + + res = get_component_face_vertices(FaceType::TrigBL, Direction::NegY); + ASSERT((res == ComponentVertices{6,2,3,8})); + + res = get_component_face_vertices(FaceType::TrigTL, Direction::NegY); + ASSERT((res == ComponentVertices{7,6,2,8})); + + // test reverse + for (int f = 0; f < FaceType::Count; f++) + { + FaceType face = (FaceType)f; + for (int i = 0; i < Direction::Count; i++) + { + Direction d = (Direction)i; + + res = get_component_face_vertices(face, d); + + ComponentFace cface; + cface.vertices[0] = res[0]; + cface.vertices[1] = res[1]; + cface.vertices[2] = res[2]; + cface.vertices[3] = res[3]; + + ASSERT(cface.faceType() == face); + } + } + + logDebug("testCompFaceType2Vertices2FaceType() - ok\n"); + return true; +} + + +bool test() +{ + logDebug("Testing stuff..\n"); + bool res = true; + + // res &= testFirstFitBuffer(); + // res &= testSlotBuffer(); + // res &= testSlotQueue(); + // res &= testOffsetVector(); + // res &= testChunkStorage(); + // res &= testChunkOffsetPacking(); + // res &= testMathAngleBetween(); + // res &= testSlotList(); + // res &= testFaceNormal(); + res &= testOrientation(); + res &= test90DegRotationMatrices(); + // res &= testI4Vec4(); + // res &= testIntegerMath(); + res &= testCompFaceType2Vertices2FaceType(); + + // TODO test int <=> float precision (aka how many int bits can a float represent) + // TODO test int <=> float on GPU + + logDebug("Testing done\n"); + return res; +} diff --git a/src/util.cpp b/src/util.cpp new file mode 100644 index 0000000..388b684 --- /dev/null +++ b/src/util.cpp @@ -0,0 +1,271 @@ +#include "util.h" +#include +#include +#include +#include +#include +#include +#include "config.h" +#include "space_math.h" +#include +#include +#include "world.h" +#include + +// TODO logging output should go to a file as well! +void logImpl(FILE *stream, const char *pre, const char *fmt, va_list args) +{ + fprintf(stream, "%s", pre); + vfprintf(stream, fmt, args); +} + +void logTrace(const char *fmt, ...) +{ + if (config::LOG_LEVEL <= config::LOG_TRACE) + { + va_list valist; + va_start(valist, fmt); + logImpl(stderr, "[TRACE] ", fmt, valist); + va_end(valist); + } +} + +void logDebug(const char *fmt, ...) +{ + if (config::LOG_LEVEL <= config::LOG_DEBUG) + { + va_list valist; + va_start(valist, fmt); + logImpl(stderr, "[DEBUG] ", fmt, valist); + va_end(valist); + } +} + +void logInfo(const char *fmt, ...) +{ + if (config::LOG_LEVEL <= config::LOG_INFO) + { + va_list valist; + va_start(valist, fmt); + logImpl(stdout, "[INFO] ", fmt, valist); + va_end(valist); + } +} + +void logWarn(const char *fmt, ...) +{ + if (config::LOG_LEVEL <= config::LOG_WARNING) + { + va_list valist; + va_start(valist, fmt); + logImpl(stderr, "[WARN] ", fmt, valist); + va_end(valist); + } +} + +void logErr(const char *fmt, ...) +{ + if (config::LOG_LEVEL <= config::LOG_ERROR) + { + va_list valist; + va_start(valist, fmt); + logImpl(stderr, "[ERROR] ", fmt, valist); + va_end(valist); + } +} + +[[noreturn]] void DIE(const char *fmt, ...) +{ + fflush(stdout); // better safe than sorry + + va_list valist; + va_start(valist, fmt); + logImpl(stderr, "[FATAL] ", fmt, valist); + va_end(valist); + + fflush(stderr); // better safe than sorry + + std::abort(); // abort to show error +} + +void generate_vertices(const char *filePath) +{ + FILE *file = fopen(filePath, "w+"); // open for writing & truncate if exists + if (!file) + DIE("generateVertices: fopen(%s) failed\n", filePath); + + fprintf(file, "const vec3 verts[729] = \n{\n"); + + // generate vert coordinates + // order: x -> y -> z (chosen arbitrarily..) + + const float dist = 1.0f / 8.0f; // normalized distance between vertices + const float off = 0.5f; // offset to center unit cube around (0,0,0) + for (int z = 0; z < 9; z++) + { + float nz = z * dist - off; + for (int y = 0; y < 9; y++) + { + float ny = y * dist - off; + for (int x = 0; x < 9; x++) + { + float nx = x * dist - off; + fprintf(file, "{%f, %f, %f},\n", nx, ny, nz); + } + } + } + + fprintf(file, "};\n"); + + fclose(file); +} + +// Generates all 24 block/component orientations as glsl matrices +// Outputs a glsl lookup table (orientation_id -> matrix) +void generate_glsl_orientation_matrices(const char *_filePath) +{ + FILE *file = fopen(_filePath, "w+"); // open for writing & truncate if exists + if (!file) + DIE("generateVertices: fopen(%s) failed\n", _filePath); + + fprintf(file, "const mat3 orientations[24] = \n{\n"); + + float mtx[9]; + for (uint8_t i = 0; i < 24; i++) + { + Orientation ori(i); + ori.to_matrix(mtx); + + fprintf(file, "{{%f, %f, %f}, {%f, %f, %f}, {%f, %f, %f}},\n", + mtx[0], mtx[1], mtx[2], + mtx[3], mtx[4], mtx[5], + mtx[6], mtx[7], mtx[8]); + } + + fprintf(file, "};\n"); + + fclose(file); +} + +void find_correct_working_directory() +{ + // When debugging we want to change our current working directory + // to the repo root to simulate deployment environment + + auto path = std::filesystem::current_path(); + + // Note: We check for the correct path, by searching for the shaders directory + if (std::filesystem::exists(path / "assets")) + { // We're already set + return; + } + + // Search in upwards + while (path != path.root_path()) + { + path = path.parent_path(); + + if (std::filesystem::exists(path / "assets")) + { + logInfo("Correcting cwd to %s\n", path.string().c_str()); + std::filesystem::current_path(path); + return; + } + } + + DIE("Could not find correct working directory. I can't find my assets :(\n"); +} + +bool should_use_wayland() +{ + // Check user override + if (getenv("SPACEGAME_USE_WAYLAND")) + { + return true; + } + if (getenv("SPACEGAME_USE_X11")) + { + return false; + } + + // Try to detect if wayland is supported + char *xdg_session_type = getenv("XDG_SESSION_TYPE"); + if (xdg_session_type == NULL) + { // Fallback + return getenv("WAYLAND_DISPLAY") != NULL; + } + return std::strcmp(xdg_session_type, "wayland") == 0; +} + +// todo add to config? +void startup_checks() +{ + uint64_t caps = bgfx::getCaps()->supported; + + if (!(BGFX_CAPS_COMPUTE & caps)) // vulkan core 1.0 + DIE("compute shaders not supported!\n"); + if (!(BGFX_CAPS_DRAW_INDIRECT & caps)) // vulkan core 1.0 + DIE("draw indirect not supported!\n"); + if (!(BGFX_CAPS_INSTANCING & caps)) + DIE("instancing not supported!\n"); + if (!(BGFX_CAPS_INDEX32 & caps)) // vulkan core 1.0 (but max value may be limited) + DIE("index32 not supported!\n"); + // if (!(BGFX_CAPS_TEXTURE_2D_ARRAY & caps)) // vulkan core 1.0 + // DIE("texture2DArrays not supported!\n"); + + bgfx::Caps::Limits limits = bgfx::getCaps()->limits; + + // vulkan property maxDrawIndirectCount + // typically 2^32 -1 + // TODO add check + + // vulkan property fullDrawIndexUint32 must be supported + // or maxDrawIndexedIndexValue must be 2^32-1 + // otherwise 32-bit indices only support values up to 2^24-1 + + // config::MAX_TEXTURE_LAYERS = limits.maxTextureLayers; + // if (config::MAX_TEXTURE_LAYERS < config::INITIAL_TEXTURE_LAYERS) + // DIE("limit maxTextureLayers is too small (require <=%d; was %d)", + // config::INITIAL_TEXTURE_LAYERS, config::MAX_TEXTURE_LAYERS); +} + +// void flag_wait(std::atomic_flag* _flag) { +// std::atomic_flag_wait(_flag, false); // expect flag==false, wait until ==true +// std::atomic_flag_clear(_flag); // reset to ==false +// } +// +// void flag_signal(std::atomic_flag* _flag) { +// while (std::atomic_flag_test_and_set(_flag)) {}; // set flag==true (spin if not yet ==false) +// std::atomic_flag_notify_one(_flag); // wake up potentially waiting thread +// } + +// void flag_wait(std::atomic_flag* _flag) { +// std::atomic_flag_wait(_flag, false); // expect flag==false, wait until ==true +// std::atomic_flag_clear(_flag); // reset to ==false +// std::atomic_flag_notify_one(_flag); // wake up potentially wating thread +// } + +// void flag_signal(std::atomic_flag* _flag) { +// while (std::atomic_flag_test_and_set(_flag)) { // set flag==true (if still true wait until cleared) +// std::atomic_flag_wait(_flag, true); // expect flag==true, wait until ==false +// }; +// std::atomic_flag_notify_one(_flag); // wake up potentially waiting thread +// } + +void flag_wait(std::atomic_bool *_flag) +{ + std::atomic_wait(_flag, false); // expect flag==false, wait until ==true + _flag->store(false); // reset to ==false + std::atomic_notify_one(_flag); // wake up potentially wating thread +} + +void flag_signal(std::atomic_bool *_flag) +{ + bool expect = false; + while (!_flag->compare_exchange_weak(expect, true)) + { // set flag==true (if still true wait until cleared) + expect = false; // reset expected value + std::atomic_wait(_flag, true); // expect flag==true, wait until ==false + }; + std::atomic_notify_one(_flag); // wake up potentially waiting thread +} diff --git a/src/util.h b/src/util.h new file mode 100644 index 0000000..d91f31a --- /dev/null +++ b/src/util.h @@ -0,0 +1,74 @@ +#pragma once + +#include +#include +#include +#include + +void logTrace(const char *fmt, ...); +void logDebug(const char *fmt, ...); +void logInfo(const char *fmt, ...); +void logWarn(const char *fmt, ...); +void logErr(const char *fmt, ...); + +[[noreturn]] void DIE(const char *fmt, ...); + +// generate vertex buffer as shader constant +// generate_vertices("C:/Users/Crydsch/Desktop/spacegame v5/shaders/verts.sh"); +void generate_vertices(const char *filePath); + +void generate_glsl_orientation_matrices(const char *_filePath); + +void find_correct_working_directory(); + +// Returns true if wayland should be used instead of x11 +// Only makes sense on *nix systems +bool should_use_wayland(); + +// performs startup checks ensuring hardware capabilities and integrity +void startup_checks(); + +// void flag_wait(std::atomic_flag* _flag); TODO maybe re-activate once glibc supports it -.- +// void flag_signal(std::atomic_flag* _flag); + +void flag_wait(std::atomic_bool *_flag); +void flag_signal(std::atomic_bool *_flag); + +template +inline void freeContainer(T &p_container) +{ + T empty{}; + std::swap(p_container, empty); +}; + +// Ref.: https://stackoverflow.com/questions/2590677/how-do-i-combine-hash-values-in-c0x +// ref.: https://www.boost.org/doc/libs/1_84_0/libs/container_hash/doc/html/hash.html#combine +// TODO update to this https://www.boost.org/doc/libs/1_86_0/libs/container_hash/doc/html/hash.html#notes_hash_combine +// aka https://github.com/boostorg/container_hash/blob/89e5b98f6bc05841a21069d76cc5adcbee62b9cc/include/boost/container_hash/detail/hash_mix.hpp +// and https://github.com/boostorg/container_hash/blob/89e5b98f6bc05841a21069d76cc5adcbee62b9cc/include/boost/container_hash/hash.hpp#L469 +template +inline void hash_combine(std::size_t& seed, const T& v) +{ + std::hash hasher; + seed ^= hasher(v) + 0x9e3779b9 + (seed<<6) + (seed>>2); +} +template +inline void hash_combine(size_t &seed, const T &v, Rest... rest) +{ + std::hash hasher; + seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); + hash_combine(seed, rest...); +} +// use like: +// std::size_t h=0; +// hash_combine(h, obj1, obj2, obj3); + +#define BENCH_START \ + int64_t bench_time_to_seconds = bx::getHPFrequency(); \ + int64_t bench_time_to_millis = bench_time_to_seconds / 1000; \ + int64_t bench_time_start = bx::getHPCounter(); + +#define BENCH_STOP \ + int64_t bench_time_stop = bx::getHPCounter(); \ + int64_t bench_time_diff = (bench_time_stop - bench_time_start); \ + logInfo("BENCHMARK: %f ms\n", ((float)bench_time_diff / (float)bench_time_to_millis)); diff --git a/src/world.cpp b/src/world.cpp new file mode 100644 index 0000000..510a5ad --- /dev/null +++ b/src/world.cpp @@ -0,0 +1,1070 @@ +#include "world.h" +#include "config.h" +#include "data/slot_buffer.h" +#include "factory.h" +#include +#include +#include +#include "util.h" + +/* World data */ +std::vector grid_list; // kept sorted +SlotBuffer grids(config::INITIAL_NUM_GRIDS); +SlotBuffer chunks(config::INITIAL_NUM_CHUNKS); +SlotBuffer blocks(config::INITIAL_NUM_BLOCKS); +SlotBuffer block_models(config::INITIAL_NUM_BLOCKS); +SlotBuffer components(config::INITIAL_NUM_COMPONENTS); +std::vector block_model_list; // kept sorted // TODO use SlotList instead? +std::unordered_map> block_model_requests; // holds blocks waiting for their model to be gpu_loaded + +// const Color B = Color(0, 0, 0, 255); +const Color W = Color(255, 255, 255, 255); +const Color R = Color(255, 0, 0, 255); +const Color B = Color(0, 0, 255, 255); + +namespace world { + + uint32_t add_component_empty() { + ComponentFace faces[8]; + + faces[Direction::PosZ] = ComponentFace::NONE; // outer.. + faces[Direction::NegX] = ComponentFace::NONE; + faces[Direction::NegZ] = ComponentFace::NONE; + faces[Direction::PosX] = ComponentFace::NONE; + faces[Direction::NegY] = ComponentFace::NONE; + faces[Direction::PosY] = ComponentFace::NONE; // ..faces + faces[6] = ComponentFace::NONE; // inner.. + faces[7] = ComponentFace::NONE; // ..faces + + return world::add_component(faces); + } + uint32_t add_component_debug_cube() { + ComponentFace faces[8]; + Color textures[6* 8*8] = { + // Direction::PosX // 0 + R, W, W, R, + W, W, R, W, + W, R, R, W, + R, W, W, R, + // Direction::NegX // 1 + B, W, W, B, + W, W, B, W, + W, B, B, W, + B, W, W, B, + // Direction::PosY // 2 + R, W, W, R, + W, R, R, W, + W, R, R, W, + W, R, R, W, + // Direction::NegY // 3 + B, W, W, B, + W, B, B, W, + W, B, B, W, + W, B, B, W, + // Direction::PosZ // 4 + R, R, R, R, + W, W, R, R, + W, R, W, W, + R, R, R, R, + // Direction::NegZ // 5 + B, B, B, B, + W, W, B, B, + W, B, W, W, + B, B, B, B, + }; + + faces[Direction::PosX] = // up==PosY + { + .normal = FaceNormal(negate(Direction::PosX)), + .tex_apex = TexCorner::BotLeft, + .vertices = get_component_face_vertices(FaceType::Quad, Direction::PosX), + .texture_id = gfx::add_component_textures(&textures[0 * config::TA_TEXELS_PER_TEXTURE]), + }; + faces[Direction::NegX] = // up==PosY + { + .normal = FaceNormal(negate(Direction::NegX)), + .tex_apex = TexCorner::BotLeft, + .vertices = get_component_face_vertices(FaceType::Quad, Direction::NegX), + .texture_id = gfx::add_component_textures(&textures[1 * config::TA_TEXELS_PER_TEXTURE]), + }; + faces[Direction::PosY] = // up==PosZ + { + .normal = FaceNormal(negate(Direction::PosY)), + .tex_apex = TexCorner::BotLeft, + .vertices = get_component_face_vertices(FaceType::Quad, Direction::PosY), + .texture_id = gfx::add_component_textures(&textures[2 * config::TA_TEXELS_PER_TEXTURE]), + }; + faces[Direction::NegY] = // up==PosZ + { + .normal = FaceNormal(negate(Direction::NegY)), + .tex_apex = TexCorner::BotLeft, + .vertices = get_component_face_vertices(FaceType::Quad, Direction::NegY), + .texture_id = gfx::add_component_textures(&textures[3 * config::TA_TEXELS_PER_TEXTURE]), + }; + faces[Direction::PosZ] = // up==PosY + { + .normal = FaceNormal(negate(Direction::PosZ)), + .tex_apex = TexCorner::BotLeft, + .vertices = get_component_face_vertices(FaceType::Quad, Direction::PosZ), + .texture_id = gfx::add_component_textures(&textures[4 * config::TA_TEXELS_PER_TEXTURE]), + }; + faces[Direction::NegZ] = // up==PosY + { + .normal = FaceNormal(negate(Direction::NegZ)), + .tex_apex = TexCorner::BotLeft, + .vertices = get_component_face_vertices(FaceType::Quad, Direction::NegZ), + .texture_id = gfx::add_component_textures(&textures[5 * config::TA_TEXELS_PER_TEXTURE]), + }; + faces[6] = ComponentFace::NONE; // inner.. + faces[7] = ComponentFace::NONE; // ..faces + + return world::add_component(faces); + } + + void reserve_invalid_buffer_indices() { + // Some indices (ie. 0) is invalid for all purposes + // so we push an empty element into all buffers + // Note: We do not use world::add_* functions, because these + // objects are not actually valid + uint32_t ref = 0; + + // Grid + Grid& grid = grids.add(ref); + assert(ref == 0); + grid.position = Vec3(FLT_MAX); + grid.orientation = Quat::unit(); + grid.aabb = { Vec3(0.0f), Vec3(0.0f) }; + grid.gfx_ref = 0; + + // Chunk + Chunk& chunk = chunks.add(ref); + assert(ref == 0); + chunk.parent_ref = 0; + chunk.parent_offset_x = 0; + chunk.parent_offset_y = 0; + chunk.parent_offset_z = 0; + std::fill_n(chunk.block_ids, 512, 0); + chunk.gfx_ref = 0; + + // Block + Block& block = blocks.add(ref); + assert(ref == 0); + block.parent_ref = 0; + block.transform = 0; + block.base_model_ref = 0; + block.curr_model_ref = 0; + block.gfx_ref = 0; + + // BlockModel + BlockModel& model = block_models.add(ref); + assert(ref == 0); + model.ram_ref_count = 1; // do not ram-unload + model.gpu_ref_count = 0; + bx::memSet(model.component_ids, 0x0, 512 * sizeof(uint32_t)); + model.gfx_ref = 0; + model.outline_ref = 0; + model.flags = 0; + + // Component + ref = add_component_empty(); + assert(ref == 0); + } + + void init() + { + reserve_invalid_buffer_indices(); + + /* Initialize Base Game Assets */ + // ComponentModels + uint32_t ref = add_component_debug_cube(); + assert(ref == 24); + // TODO + // add_component_model_debug_slope() + // add_component_model_debug_corner() + // add_component_model_debug_inverse_corner() + // add_component_model_debug_double_corner() + // add_component_model_debug_pyramid() + } + + + // ---------- + // Private function declarations + uint64_t pack_chunk_offsets(const int16_t _x, const int16_t _y, const int16_t _z); + void gpu_load_grid(Grid& _grid); + void gpu_unload_grid(Grid& _grid); + void gpu_load_chunk(Chunk& _chunk); + void gpu_unload_chunk(Chunk& _chunk); + void try_gpu_load_block(const uint32_t _id); + void try_gpu_unload_block(const uint32_t _id); + void gpu_load_block(Block& _block, BlockModel& _model); + void gpu_unload_block(Block& _block, BlockModel& _model); + void gpu_load_block_model(BlockModel& _model); + void gpu_unload_block_model(BlockModel& _model); + void unload_block_models(); + AABB get_chunk_AABB(const Chunk& _chunk); + + // ---------- + + uint32_t add_grid(const Vec3 _position, const Quat _orientation) + { + uint32_t ref = 0; + Grid& grid = grids.add(ref); + grid.position = _position; + grid.orientation = _orientation; + grid.aabb = { Vec3(0.0f), Vec3(0.0f) }; + grid.aabb_dirty = false; + grid.gfx_ref = 0; + + // add this new grid to the grid_list + list_add_sorted(grid_list, ref); + + return ref; + } + + void update_grid_position(const uint32_t _id, const Vec3 _new_position) + { + Grid& grid = grids.at(_id); + grid.position = _new_position; + + // update gpu data + if (grid.gfx_ref > 0) + { + GPUGrid& gpu_grid = gfx::update_grid(grid.gfx_ref); + transform_mtx(_new_position, grid.orientation, gpu_grid.m_mtx); + } + } + + void update_grid_orientation(const uint32_t _id, const Quat _new_orientation) + { + Grid& grid = grids.at(_id); + grid.orientation = _new_orientation; + + // update gpu data + if (grid.gfx_ref > 0) + { + GPUGrid& gpu_grid = gfx::update_grid(grid.gfx_ref); + transform_mtx(grid.position, _new_orientation, gpu_grid.m_mtx); + } + } + + void update_grid_transform(const uint32_t _id, const Vec3 _new_position, const Quat _new_orientation) + { + Grid& grid = grids.at(_id); + grid.position = _new_position; + grid.orientation = _new_orientation; + + // update gpu data + if (grid.gfx_ref > 0) + { + GPUGrid& gpu_grid = gfx::update_grid(grid.gfx_ref); + transform_mtx(_new_position, _new_orientation, gpu_grid.m_mtx); + } + } + + void remove_grid(const uint32_t _id) + { + // remove from grid_list + list_remove_sorted(grid_list, _id); + + Grid& grid = grids.at(_id); + + // remove all chunks + for (uint32_t chunk_id : grid.chunk_id_list) + remove_chunk(chunk_id); + + // free memory + freeContainer(grid.chunk_ids); + freeContainer(grid.chunk_id_list); + + if (grid.gfx_ref > 0) + { + gpu_unload_grid(grid); + } + + grids.remove(_id); + } + + const Grid& get_grid(const uint32_t _id) + { + return grids.at_const(_id); + } + + const std::vector& get_grid_list() + { + return grid_list; + } + + // [private] + // Loads grid ONLY + inline void gpu_load_grid(Grid& _grid) + { + assert(_grid.gfx_ref == 0); + GPUGrid& gpu_grid = gfx::add_grid(_grid.gfx_ref); + transform_mtx(_grid.position, _grid.orientation, gpu_grid.m_mtx); + } + + // [private] + // Unloads grid ONLY + inline void gpu_unload_grid(Grid& _grid) + { + assert(_grid.gfx_ref > 0); + gfx::remove_grid(_grid.gfx_ref); + _grid.gfx_ref = 0; + } + + // ---------- + + uint32_t add_chunk(const uint32_t _parent_grid_id, const int16_t _parent_offset_x, const int16_t _parent_offset_y, const int16_t _parent_offset_z) + { + uint32_t ref = 0; + Chunk& chunk = chunks.add(ref); + + // initialize + chunk.parent_ref = _parent_grid_id; + chunk.parent_offset_x = _parent_offset_x; + chunk.parent_offset_y = _parent_offset_y; + chunk.parent_offset_z = _parent_offset_z; + std::fill_n(chunk.block_ids, 512, 0); + chunk.gfx_ref = 0; + + // update parent + Grid& grid = grids.at(chunk.parent_ref); + assert(grid.chunk_ids.insert(std::pair(pack_chunk_offsets(_parent_offset_x, _parent_offset_y, _parent_offset_z), ref)).second); + list_add_sorted(grid.chunk_id_list, ref); + // grow AABB + combineAABB(grid.aabb, get_chunk_AABB(chunk)); + + return ref; + } + + void remove_chunk(const uint32_t _id) + { + Chunk& chunk = chunks.at(_id); + + // update parent + Grid& grid = grids.at(chunk.parent_ref); + assert(grid.chunk_ids.erase(_id) == 1); + list_remove_sorted(grid.chunk_id_list, _id); + grid.aabb_dirty = true; + + // unload all blocks + for (uint32_t block_id : chunk.block_ids) + { + if (block_id > 0) + remove_block(block_id); + } + + if (chunk.gfx_ref > 0) + { + gpu_unload_chunk(chunk); + } + } + + const Chunk& get_chunk(const uint32_t _id) + { + return chunks.at_const(_id); + } + + // [private] + inline void gpu_load_chunk(Chunk& _chunk) + { // load chunk itself and (try) all blocks onto gpu + + assert(grids.at(_chunk.parent_ref).gfx_ref > 0); + assert(_chunk.gfx_ref == 0); + GPUChunk& gpu_chunk = gfx::add_chunk(_chunk.gfx_ref); + gpu_chunk.grid_id = (float)grids.at_const(_chunk.parent_ref).gfx_ref; // must be grid id ON GPU + gpu_chunk.grid_offset_x = (float)_chunk.parent_offset_x; + gpu_chunk.grid_offset_y = (float)_chunk.parent_offset_y; + gpu_chunk.grid_offset_z = (float)_chunk.parent_offset_z; + + for (uint32_t block_id : _chunk.block_ids) + { + if (block_id > 0) + try_gpu_load_block(block_id); + } + } + + // [private] + inline void gpu_unload_chunk(Chunk& _chunk) + { + // unload chunk itself and all blocks + + assert(_chunk.gfx_ref > 0); + gfx::remove_chunk(_chunk.gfx_ref); + _chunk.gfx_ref = 0; + + for (uint32_t block_id : _chunk.block_ids) + { + if (block_id > 0) + try_gpu_unload_block(block_id); + } + } + + // ---------- + + uint32_t add_block(const uint32_t _parent_chunk_id, const uint16_t _transform, const uint32_t _base_model_id, const uint32_t _curr_model_id) + { + uint32_t ref; + Block& block = blocks.add(ref); + + // initialize + block.parent_ref = _parent_chunk_id; + block.transform = _transform; + block.base_model_ref = _base_model_id; + block.curr_model_ref = _curr_model_id; + block.gfx_ref = 0; + + // add model reference + block_models.at(_base_model_id).ram_ref_count++; + block_models.at(_curr_model_id).ram_ref_count++; + + // update parent + Chunk& chunk = chunks.at(_parent_chunk_id); + const uint16_t index = block_transform_to_chunk_index(block.transform); + assert(chunk.block_ids[index] == 0); + chunk.block_ids[index] = ref; + + // gpu load + if (chunk.gfx_ref > 0) + { + try_gpu_load_block(ref); + } + + return ref; + } + + void update_block_curr_model(const uint32_t _id, const uint32_t _curr_model_id) + { + // TODO + //Block& block = blocks.at(_id); + //block_models.at(block.curr_model_ref).ref_count--; + //block.curr_model_ref = _curr_model_id; + //BlockModel& new_model = block_models.at(block.curr_model_ref); + //new_model.ref_count++; + + //if (block.gfx_ref > 0) + //{ + // GPUBlock& gpu_block = gfx::update_block(block.gfx_ref); + // + //} + } + + void rotate_block(const uint32_t _id, const bool _inc) + { + Block& block = blocks.at(_id); + uint16_t _off_x, _off_y, _off_z, _ortt; + block_transform_unpack(block.transform, _off_x, _off_y, _off_z, _ortt); + + _ortt += 24; + if (_inc) + { + _ortt++; + } + else + { + _ortt--; + } + _ortt %= 24; + + block.transform = block_transform_pack(_off_x, _off_y, _off_z, _ortt); + + // update gpu data + if (block.gfx_ref > 0) + { + GPUBlock& gpu_block = gfx::update_block(block.gfx_ref); + gpu_block.transform = block.transform; + } + } + + void rotate_block(const uint32_t _id, const Direction _axis) + { + // current orientation is rotated by _dir (left-claw-rule) + + Block& block = blocks.at(_id); + uint16_t _off_x, _off_y, _off_z, _ortt; + block_transform_unpack(block.transform, _off_x, _off_y, _off_z, _ortt); + + _ortt = rotate(Orientation(_ortt), _axis).data; + + block.transform = block_transform_pack(_off_x, _off_y, _off_z, _ortt); + + // update gpu data + if (block.gfx_ref > 0) + { + GPUBlock& gpu_block = gfx::update_block(block.gfx_ref); + gpu_block.transform = block.transform; + } + } + + void remove_block(const uint32_t _id) + { + Block& block = blocks.at(_id); + + // update parent + const uint16_t index = block_transform_to_chunk_index(block.transform); + Chunk& chunk = chunks.at(block.parent_ref); + assert(chunk.block_ids[index] > 0); + chunk.block_ids[index] = 0; + + // gpu unload + try_gpu_unload_block(_id); + + // remove model reference + block_models.at(block.base_model_ref).ram_ref_count--; + block_models.at(block.curr_model_ref).ram_ref_count--; + + blocks.remove(_id); + } + + const Block& get_block(const uint32_t _id) + { + return blocks.at_const(_id); + } + + // [private] + // loads block if possible, otherwise enqueues request + // Attention: You should probably call gpu_load_chunk(..) instead + void try_gpu_load_block(const uint32_t _id) + { + Block& block = blocks.at(_id); + assert(block.gfx_ref == 0); + + // check model + BlockModel& model = block_models.at(block.curr_model_ref); + if (model.gfx_ref > 0) + { // all good => just load block + gpu_load_block(block, model); + } + else if (/*model.gfx_ref == 0 && */ model.has_components()) + { // we can load the model first + gpu_load_block_model(model); + gpu_load_block(block, model); + } + else + { // we have to wait until model is local + auto it = block_model_requests.find(block.curr_model_ref); + if (it == block_model_requests.end()) + { // first request + block_model_requests.insert( + std::pair(block.curr_model_ref, std::vector(1, _id))); + } + else + { // add request + it->second.push_back(_id); + } + } + } + + // [private] + // unloads block if possible, otherwise removes pending request (if any) + // Attention: You should probably call gpu_unload_chunk(..) instead + void try_gpu_unload_block(const uint32_t _id) + { + Block& block = blocks.at(_id); + + if (block.gfx_ref > 0) + { // ok, just unload + gpu_unload_block(block, block_models.at(block.curr_model_ref)); + } + else + { // check if theres a pending request + auto it = block_model_requests.find(block.curr_model_ref); + if (it != block_model_requests.end()) + { + auto v_it = std::find(it->second.begin(), it->second.end(), _id); + if (v_it != it->second.end()) + { // entry found! + it->second.erase(v_it); + } + } + } + } + + // [private] + // Attention: You should probably call try_gpu_load_block(..) instead + inline void gpu_load_block(Block& _block, BlockModel& _model) + { + assert(_block.gfx_ref == 0); + assert(_model.gfx_ref > 0); + assert(chunks.at_const(_block.parent_ref).gfx_ref > 0); + + _model.gpu_ref_count++; + + uint32_t offset, num_elements; + gfx::get_block_model_offset(_model.gfx_ref, offset, num_elements); + + GPUBlock& gpu_block = gfx::add_block(_block.gfx_ref); + gpu_block.chunk_id = (float)chunks.at_const(_block.parent_ref).gfx_ref; // must be chunk id ON GPU + gpu_block.transform = (float)_block.transform; + gpu_block.index_buf_offset = (float)offset; + gpu_block.num_indices = (float)num_elements; + } + + // [private] + // Attention: You should probably call try_gpu_unload_block(..) instead + inline void gpu_unload_block(Block& _block, BlockModel& _model) + { + assert(_block.gfx_ref > 0); + + _model.gpu_ref_count--; + + gfx::remove_block(_block.gfx_ref); + _block.gfx_ref = 0; + } + + // ---------- + + uint32_t add_block_model() + { + uint32_t ref = 0; + BlockModel& model = block_models.add(ref); + + // initialize/reset + bx::memSet(model.component_ids, 0x0, 512 * sizeof(uint32_t)); + model.ram_ref_count = 0; + model.gfx_ref = 0; + model.outline_ref = 0; + model.flags = 0; + + list_add_sorted(block_model_list, ref); + + // request data + // TODO net::request_block_model + + return ref; + } + + void set_block_model_components(const uint32_t _id, const uint32_t _component_ids[512]) + { + BlockModel& model = block_models.at(_id); + + assert(!model.has_components()); + + // Note: This copy is fine as we will copy/load from a file or netstream in the future + bx::memCopy(model.component_ids, _component_ids, 512 * sizeof(uint32_t)); + model.flags |= BlockModel::FLAG_HAS_COMPONENTS; + + // gpu load + if (model.gpu_ref_count > 0) + { + gpu_load_block_model(model); + + // notify waiting blocks + auto it = block_model_requests.find(_id); + if (it != block_model_requests.end()) + { + for (uint32_t block_id : it->second) + { + gpu_load_block(blocks.at(block_id), model); + } + + block_model_requests.erase(it); + } + } + } + + const BlockModel& get_block_model(const uint32_t _id) + { + return block_models.at_const(_id); + } + + uint32_t add_component(const ComponentFace _faces[8]) + { + uint32_t base_id, id; + Component& base_comp = components.add(base_id); + assert(base_id % 24 == 0); + + // Note: This copy is fine as we will copy/load from a file or netstream in the future + bx::memCopy(base_comp.faces, _faces, 8 * sizeof(ComponentFace)); + + // Generate all 23 other orientations + for (int i = 1; i < 24; i++) + { + Orientation ori(i); // We can just increment the orientation + I8RotMat3x3 mat(ori); + + Component& comp = components.add(id); + + // The direction we look at the component changes + for (int d = 0; d < Direction::Count; d++) + { + Direction dir = (Direction)d; + + // Rotate face direction (find from which direction we will see this face) + ComponentFace& oldFace = base_comp.faces[dir]; + ComponentFace& newFace = comp.faces[mat.rotateDirection(dir)]; + + if (!oldFace.isValid()) + { + newFace = oldFace; // copy invalid face + continue; + } + + // Rotate vertices + newFace.vertices[0] = mat.rotateComponentVertex(oldFace.vertices[0]); + newFace.vertices[1] = mat.rotateComponentVertex(oldFace.vertices[1]); + newFace.vertices[2] = mat.rotateComponentVertex(oldFace.vertices[2]); + if (oldFace.isQuad()) + newFace.vertices[3] = mat.rotateComponentVertex(oldFace.vertices[3]); + else + newFace.vertices[3] = 8; // invalid index + + // Rotate face normal + newFace.normal = mat.rotateFaceNormal(oldFace.normal); + + // Copy texture + newFace.texture_id = oldFace.texture_id; + newFace.tex_apex = oldFace.tex_apex; + } + + // Sanity check +#ifndef NDEBUG + for (int d = 0; d < Direction::Count; d++) + { + Direction dir = (Direction)d; + ComponentFace& newFace = comp.faces[dir]; + if (newFace.isValid()){ + Direction expected = Direction::Count; + newFace.faceType(&expected); + assert(dir == expected); + } + } +#endif + + // Copy inner faces + for (int i = 6; i <= 7; i++) + { + ComponentFace& oldFace = base_comp.faces[i]; + ComponentFace& newFace = comp.faces[i]; + + if (!oldFace.isValid()) + { + newFace = oldFace; // copy invalid face + continue; + } + + // Rotate vertices + newFace.vertices[0] = mat.rotateComponentVertex(oldFace.vertices[0]); + newFace.vertices[1] = mat.rotateComponentVertex(oldFace.vertices[1]); + newFace.vertices[2] = mat.rotateComponentVertex(oldFace.vertices[2]); + if (oldFace.isQuad()) + newFace.vertices[3] = mat.rotateComponentVertex(oldFace.vertices[3]); + else + newFace.vertices[3] = 8; // invalid index + + // Rotate face normal + newFace.normal = mat.rotateFaceNormal(oldFace.normal); + + // Copy texture + newFace.texture_id = oldFace.texture_id; + newFace.tex_apex = oldFace.tex_apex; + } + } + + return base_id; + } + + const Component &get_component(const uint32_t _id) + { + return components.at_const(_id); + } + + // [private] + // Attention: Automatically loaded when a block requires it + inline void gpu_load_block_model(BlockModel& _model) + { + assert(_model.gfx_ref == 0); + assert(_model.has_components()); + + // generate this models visuals (geometry & texture) + BENCH_START; + if (!factory::weld_block(_model)) + logWarn("BlockWelding failed!\n"); + BENCH_STOP; + } + + // [private] + // Attention: Automatically unloaded when no block requires it anymore + inline void gpu_unload_block_model(BlockModel& _model) + { + assert(_model.gfx_ref > 0); + gfx::remove_block_model(_model.gfx_ref); + _model.gfx_ref = 0; + } + + // [private] + // Checks all block models for ram_unload or gpu_unload + void unload_block_models() + { + for (uint32_t id : block_model_list) + { + BlockModel& model = block_models.at(id); + if (model.has_components()) + { // quick exit, if model is still pending, there is nothing we can do here + continue; + } + + // gpu unload + if (model.gfx_ref > 0 && model.gpu_ref_count == 0) + { + gpu_unload_block_model(model); + } + + // ram unload + if (model.ram_ref_count == 0) + { + assert(model.gfx_ref == 0); + assert(model.gpu_ref_count == 0); + block_models.remove(id); + list_remove_sorted(block_model_list, id); + } + + } + + } + + // ---------- + + // Adds all blocks in a chunk to be rendered + void render_chunk(Chunk& _chunk) + { + // TODO do not rebuild buffer every frame + // do not clear entire instance buffer every frame + // determine changes and only update (somehow...) + // maybe only remove and re-add chunks that changed + // TODO render chunks ordered from the camera + // TODO render blocks ordered from camera + + assert(_chunk.gfx_ref > 0); + assert(grids.at(_chunk.parent_ref).gfx_ref > 0); + + // iterate all blocks + for (uint32_t block_id : _chunk.block_ids) + { + if (block_id > 0) + { + Block& block = blocks.at(block_id); + if (block.gfx_ref > 0) + { + assert(block_models.at(block.curr_model_ref).gfx_ref > 0); + gfx::add_block_selection(block.gfx_ref); + } + } + } + } + + AABB get_chunk_AABB(const Chunk& _chunk) // TODO find better way to grow grid AABB and remove this function + { + AABB aabb = CHUNK_AABB; + Vec3 chunk_offset = Vec3((float)_chunk.parent_offset_x, (float)_chunk.parent_offset_y, (float)_chunk.parent_offset_z); + chunk_offset = bx::mul(chunk_offset, 8.0f); + aabb.min = bx::add(aabb.min, chunk_offset); + aabb.max = bx::add(aabb.max, chunk_offset); + return aabb; + } + + AABB recalc_grid_AABB(const Grid& _grid) + { + int32_t min_offset_x = INT32_MAX; + int32_t min_offset_y = INT32_MAX; + int32_t min_offset_z = INT32_MAX; + int32_t max_offset_x = INT32_MIN; + int32_t max_offset_y = INT32_MIN; + int32_t max_offset_z = INT32_MIN; + + for (uint32_t chunk_id : _grid.chunk_id_list) + { + Chunk& chunk = chunks.at(chunk_id); + min_offset_x = bx::min(min_offset_x, chunk.parent_offset_x); + min_offset_y = bx::min(min_offset_y, chunk.parent_offset_y); + min_offset_z = bx::min(min_offset_z, chunk.parent_offset_z); + max_offset_x = bx::max(max_offset_x, chunk.parent_offset_x); + max_offset_y = bx::max(max_offset_y, chunk.parent_offset_y); + max_offset_z = bx::max(max_offset_z, chunk.parent_offset_z); + } + + Vec3 min = Vec3((float)min_offset_x, (float)min_offset_y, (float)min_offset_z); + min = bx::mul(min, 8.0f); + Vec3 max = Vec3((float)max_offset_x, (float)max_offset_y, (float)max_offset_z); + max = bx::mul(max, 8.0f); + return AABB(min, max); + } + + bool AABB_in_range() { return true; } // TODO + + void get_grid_transform_mtx(const uint32_t _ref, float* _out_mtx) + { + const Grid& grid = grids.at_const(_ref); + transform_mtx(grid.position, grid.orientation, _out_mtx); + } + + void get_chunk_transform_mtx(const uint32_t _ref, float* _out_mtx) + { + const Chunk& chunk = chunks.at_const(_ref); + const Grid& grid = grids.at_const(chunk.parent_ref); + + const Vec3 chunk_offset = Vec3((float)(chunk.parent_offset_x << 3), (float)(chunk.parent_offset_y << 3), (float)(chunk.parent_offset_z << 3)); // <<3 == *8 + + transform_mtx(bx::add(grid.position, chunk_offset), grid.orientation, _out_mtx); + } + + // TODO block rotation + void get_block_transform_mtx(uint16_t _block_offset_x, uint16_t _block_offset_y, uint16_t _block_offset_z, uint16_t _block_orientation, const uint32_t _chunk_id, float* _out_mtx) + { + const Chunk& chunk = chunks.at_const(_chunk_id); + const Grid& grid = grids.at_const(chunk.parent_ref); + + const Vec3 block_offset = Vec3((float)_block_offset_x, (float)_block_offset_y, (float)_block_offset_z); + + const Vec3 chunk_offset = Vec3((float)(chunk.parent_offset_x << 3), (float)(chunk.parent_offset_y << 3), (float)(chunk.parent_offset_z << 3)); // <<3 == *8 + + // TODO get block_orientation => quat? matrix? + const Vec3 translation = bx::add(bx::add(grid.position, chunk_offset), block_offset); + + transform_mtx(translation, grid.orientation, _out_mtx); + // apply block orientation + float tmp[16]; + bx::mtxIdentity((float*)&tmp); + bx::mtxMul(_out_mtx, _out_mtx, (float*)&tmp); // TODO + } + + void get_block_transform_mtx(const uint32_t _ref, float* _out_mtx) + { + const Block& block = blocks.at_const(_ref); + + uint16_t block_offset_x, block_offset_y, block_offset_z, block_orientation; + block_transform_unpack(block.transform, block_offset_x, block_offset_y, block_offset_z, block_orientation); + + get_block_transform_mtx(block_offset_x, block_offset_y, block_offset_z, block_orientation, block.parent_ref, _out_mtx); + } + + Vec3 get_block_world_position(const uint32_t _id) + { + const Block& block = blocks.at_const(_id); + const Chunk& chunk = chunks.at_const(block.parent_ref); + const Grid& grid = grids.at_const(chunk.parent_ref); + + uint16_t block_offset_x, block_offset_y, block_offset_z, block_orientation; + block_transform_unpack(block.transform, block_offset_x, block_offset_y, block_offset_z, block_orientation); + const Vec3 block_offset = Vec3((float)block_offset_x, (float)block_offset_y, (float)block_offset_z); + + const Vec3 chunk_offset = Vec3((float)(chunk.parent_offset_x << 3), (float)(chunk.parent_offset_y << 3), (float)(chunk.parent_offset_z << 3)); // <<3 == *8 + + return bx::add(bx::add(grid.position, chunk_offset), block_offset); + } + + uint64_t pack_chunk_offsets(const int16_t _x, const int16_t _y, const int16_t _z) + { + const uint64_t x = (uint64_t)((uint16_t)_x); + const uint64_t y = (uint64_t)((uint16_t)_y); + const uint64_t z = (uint64_t)((uint16_t)_z); + return (x << 32) | (y << 16) | z; + } + + + // Issues chunk requests + // Performs GPU load/unload + // Determines visible blocks + // Call AFTER net::update() + // Call BEFORE gfx::update() + void update() + { + // Perform gpu un/load + // iterate all grids + for (uint32_t grid_id : grid_list) + { + Grid& grid = grids.at(grid_id); + + // grid gpu un/load + if (grid.gfx_ref == 0 && AABB_in_range()) + { // load it + gpu_load_grid(grid); + } + else if (grid.gfx_ref > 0 && !AABB_in_range()) + { // unload it + gpu_unload_grid(grid); + } + + // iterate all chunks + for (uint32_t chunk_id : grid.chunk_id_list) + { + Chunk& chunk = chunks.at(chunk_id); + + // chunk gpu un/load + if (chunk.gfx_ref == 0 && AABB_in_range()) + { // load it + gpu_load_chunk(chunk); + } + else if (chunk.gfx_ref > 0 && !AABB_in_range()) + { // unload it + gpu_unload_chunk(chunk); + } + + // render chunk // TODO use view frustum (in extra go?) + if (chunk.gfx_ref > 0 && AABB_in_range()) + { + render_chunk(chunk); + } + } + } + } + + void destroy() + { + // TODO release resources? + + } + + void clean() // TODO + { + unload_block_models(); + + // recalc dirty grid AABBs + for (uint32_t grid_id : grid_list) + { + Grid& grid = grids.at(grid_id); + if (grid.aabb_dirty) + { + grid.aabb = recalc_grid_AABB(grid); + } + } + + // TODO should be callable in incremental steps (for timekeeping) + // TODO call gfx::clean() to defrag gpu memory & update refs + } + +} + +Orientation component::getOrientation(const uint32_t _id) +{ // Current orientation IS the offset in this components block of 24 + // Thats how we generate them (world::add_component) + return Orientation(_id % 24); +} + +uint32_t component::rotate(const uint32_t _id, const bool _inc) +{ // Current orientation IS the offset in this components block of 24 + // Thats how we generate them (world::add_component) + uint32_t base = _id / 24; + uint32_t off = _id % 24; + + off += 24; + if (_inc) + off++; + else + off--; + off %= 24; + + return base * 24 + off; +} + +uint32_t component::rotate(const uint32_t _id, const Direction _axis) +{ // Current orientation IS the offset in this components block of 24 + // Thats how we generate them (world::add_component) + uint32_t base = _id / 24; + uint32_t off = _id % 24; + + Orientation ori(off); + ori = ::rotate(ori, _axis); + + return base * 24 + ori.data; +} diff --git a/src/world.h b/src/world.h new file mode 100644 index 0000000..d6df086 --- /dev/null +++ b/src/world.h @@ -0,0 +1,196 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include "space_math.h" +// #include "graphics.h" + +/* * * * * * * * * * * * * * * * * * * * * * */ +/* The world namespace manages all CPU data */ +/* * * * * * * * * * * * * * * * * * * * * * */ + +/// +/// All things are referenced per their id. +/// Some ids have special meanings: +/// 0: is invalid for all purposes. Allows efficient initialization. + +// Helper functions + +// Returns the packed transform of a block (offset in chunk + orientation) +// [off_x | off_y | off_z | orientation] +// TODO should use Orientation struct! +inline uint16_t block_transform_pack(const uint16_t off_x, const uint16_t off_y, const uint16_t off_z, const uint16_t orientation) +{ + uint16_t res = 0; + res |= off_x; + res <<= 3; + res |= off_y; + res <<= 3; + res |= off_z; + res <<= 5; + res |= orientation; + return res; +}; +// TODO should use Orientation struct! +inline void block_transform_unpack(const uint16_t _transform, uint16_t &_out_off_x, uint16_t &_out_off_y, uint16_t &_out_off_z, uint16_t &_out_orientation) +{ + _out_orientation = _transform & 0x1F; // 5 bit + _out_off_z = (_transform >> 5) & 0x7; // 3 bit + _out_off_y = (_transform >> 8) & 0x7; // 3 bit + _out_off_x = (_transform >> 11) & 0x7; // 3 bit +} +inline uint16_t block_transform_to_chunk_index(const uint16_t _transform) +{ + return (_transform >> 5); // just remove orientation +} +inline void list_add_sorted(std::vector &_list, const uint32_t _val) +{ + auto it = std::upper_bound(_list.begin(), _list.end(), _val); + assert(it == _list.end() || *it != _val); // value should not yet exist in the list + _list.insert(it, _val); +} +inline void list_remove_sorted(std::vector &_list, const uint32_t _val) +{ + auto it = std::lower_bound(_list.begin(), _list.end(), _val); + assert(it != _list.end()); // value must exist in the list + assert(*it == _val); // value must exist in the list + _list.erase(it); +} + +typedef struct grid +{ + Vec3 position; + Quat orientation; + std::unordered_map chunk_ids; // Note: if hashmap too slow try other (ex. emilib/hashmap) + std::vector chunk_id_list; // kept sorted + AABB aabb; + bool aabb_dirty; // TODO uint_8 state ? + uint32_t gfx_ref; // => GPUGrid +} Grid; + +typedef struct chunk +{ + uint32_t parent_ref; // => Grid + int16_t parent_offset_x; + int16_t parent_offset_y; + int16_t parent_offset_z; + uint32_t block_ids[512]; // 512 = 8*8*8 + // TODO block_list with only valid block_ids ? + uint32_t gfx_ref; // => GPUChunk +} Chunk; + +constexpr AABB CHUNK_AABB = {bx::sub(Vec3(0.0f), Vec3(0.5f)), bx::sub(Vec3(8.0f), Vec3(0.5f))}; // A chunk consists of 8*8*8 blocks + +typedef struct block +{ + /* engine internals */ + uint32_t parent_ref; // => Chunk + uint16_t transform; // offset + orientation in parent chunk + + uint32_t base_model_ref; // => BlockModel // blueprint OR freshly built + uint32_t curr_model_ref; // => BlockModel // may be ==base_model_id OR damaged variant + + uint32_t gfx_ref; // => GPUBlock + + /* gameplay attributes */ + // uint32_t health; // TODO +} Block; + +constexpr AABB BLOCK_AABB = {{-0.5f, -0.5f, -0.5f}, {0.5f, 0.5f, 0.5f}}; // A block has a side length of 1 + +// instance of a component in a block model +// Note: Components are read-only +// We generate all 24 orientations once during loading. They are fixed from then on. +typedef struct component +{ + ComponentFace faces[8]; + + static Orientation getOrientation(const uint32_t _id); + static uint32_t rotate(const uint32_t _id, const bool _inc); + static uint32_t rotate(const uint32_t _id, const Direction _axis); +} Component; + +// Block Model +// What we render for a block +// Depending on its state we might render it differently +// Blueprint => generate & render scaffolding +// During construction => generate & render +// Fully intact => render base model +// Damaged => generate & render "sub"-model +// Note: self glow is encoded in the texture data +// Note: emittance is handled by lighting +typedef struct block_model +{ + // ref_counts aka how many blocks refer to this model + uint32_t ram_ref_count; // if ==0 model can be ram unloaded (on cleanup run) + uint32_t gpu_ref_count; // if ==0 model can be gpu unloaded + + uint32_t component_ids[512]; // 512 = 8*8*8 + + uint32_t gfx_ref; // => GPUBlockModel + std::vector patch_refs; // => GPUTextureAtlasPatch + // TODO free patches on gpu_unload? + uint32_t outline_ref; // => GPULine + + uint32_t flags; + static const uint32_t FLAG_HAS_COMPONENTS = 0x1; + + // TODO rename to + // has_cpu_data & has_gpu_data + // is_ram_loaded & is_gpu_loaded + bool has_components() { return (flags & FLAG_HAS_COMPONENTS) > 0; } +} BlockModel; + +namespace world +{ + + void init(); + // Call every frame! Call BEFORE gfx::update() + void update(); + void destroy(); + + uint32_t add_grid(const Vec3 _position, const Quat _orientation); + void update_grid_position(const uint32_t _id, const Vec3 _new_position); + void update_grid_orientation(const uint32_t _id, const Quat _new_orientation); + void update_grid_transform(const uint32_t _id, const Vec3 _new_position, const Quat _new_orientation); + void remove_grid(const uint32_t _id); + const Grid &get_grid(const uint32_t _id); + const std::vector &get_grid_list(); + + uint32_t add_chunk(const uint32_t _parent_grid_id, const int16_t _parent_offset_x, const int16_t _parent_offset_y, const int16_t _parent_offset_z); + void remove_chunk(const uint32_t _id); + const Chunk &get_chunk(const uint32_t _id); + + uint32_t add_block(const uint32_t _parent_chunk_id, const uint16_t _transform, const uint32_t _base_model_id, const uint32_t _curr_model_id); + void update_block_curr_model(const uint32_t _id, const uint32_t _curr_model_id); + void rotate_block(const uint32_t _id, const bool _inc); + void rotate_block(const uint32_t _id, const Direction _dir); + void remove_block(const uint32_t _id); + const Block &get_block(const uint32_t _id); + + uint32_t add_block_model(); + // Note: models are immutable (cannot be changed after setting it once) + void set_block_model_components(const uint32_t _id, const uint32_t _component_ids[512]); + // Note: models are automatically unloaded when no longer referenced by any blocks + const BlockModel &get_block_model(const uint32_t _id); + + // The faces are given in Direction order and the last two are internal faces + // e.g. faces[Direction::PosX] + // Faces are interpreted in normalized orientation + // up== PosY when forward== {PosX,NegX,PosZ,NegZ} + // and up== PosZ when forward== {PosY,NegY} + uint32_t add_component(const ComponentFace _faces[8]); + const Component &get_component(const uint32_t _id); + + + void get_grid_transform_mtx(const uint32_t _id, float *_out_mtx); + void get_chunk_transform_mtx(const uint32_t _id, float *_out_mtx); + void get_block_transform_mtx(uint16_t _block_offset_x, uint16_t _block_offset_y, uint16_t _block_offset_z, uint16_t _block_orientation, const uint32_t _chunk_id, float *_out_mtx); + void get_block_transform_mtx(const uint32_t _id, float *_out_mtx); + Vec3 get_block_world_position(const uint32_t _id); + +}