From 2010e99f1bf3b4eef495c56906e3e3d9717407f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=91=D1=80=D0=B0=D0=BD=D0=B8=D0=BC=D0=B8=D1=80=20=D0=9A?= =?UTF-8?q?=D0=B0=D1=80=D0=B0=D1=9F=D0=B8=D1=9B?= Date: Sat, 14 Sep 2019 07:26:41 -0700 Subject: [PATCH] Updated meshoptimizer. --- 3rdparty/meshoptimizer/src/meshoptimizer.h | 284 ++++++++++++--------- 1 file changed, 167 insertions(+), 117 deletions(-) diff --git a/3rdparty/meshoptimizer/src/meshoptimizer.h b/3rdparty/meshoptimizer/src/meshoptimizer.h index 0fa213501..711f9c782 100644 --- a/3rdparty/meshoptimizer/src/meshoptimizer.h +++ b/3rdparty/meshoptimizer/src/meshoptimizer.h @@ -413,7 +413,168 @@ inline float meshopt_quantizeFloat(float v, int N); * When the supplied type is the same size as that of unsigned int, the wrappers are zero-cost; when it's not, * the wrappers end up allocating memory and copying index data to convert from one type to another. */ +#if defined(__cplusplus) && !defined(MESHOPTIMIZER_NO_WRAPPERS) +template +inline size_t meshopt_generateVertexRemap(unsigned int* destination, const T* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size); +template +inline size_t meshopt_generateVertexRemapMulti(unsigned int* destination, const T* indices, size_t index_count, size_t vertex_count, const meshopt_Stream* streams, size_t stream_count); +template +inline void meshopt_remapIndexBuffer(T* destination, const T* indices, size_t index_count, const unsigned int* remap); +template +inline void meshopt_generateShadowIndexBuffer(T* destination, const T* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size, size_t vertex_stride); +template +inline void meshopt_generateShadowIndexBufferMulti(T* destination, const T* indices, size_t index_count, size_t vertex_count, const meshopt_Stream* streams, size_t stream_count); +template +inline void meshopt_optimizeVertexCache(T* destination, const T* indices, size_t index_count, size_t vertex_count); +template +inline void meshopt_optimizeVertexCacheFifo(T* destination, const T* indices, size_t index_count, size_t vertex_count, unsigned int cache_size); +template +inline void meshopt_optimizeOverdraw(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, float threshold); +template +inline size_t meshopt_optimizeVertexFetchRemap(unsigned int* destination, const T* indices, size_t index_count, size_t vertex_count); +template +inline size_t meshopt_optimizeVertexFetch(void* destination, T* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size); +template +inline size_t meshopt_encodeIndexBuffer(unsigned char* buffer, size_t buffer_size, const T* indices, size_t index_count); +template +inline int meshopt_decodeIndexBuffer(T* destination, size_t index_count, const unsigned char* buffer, size_t buffer_size); +template +inline size_t meshopt_simplify(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_index_count, float target_error); +template +inline size_t meshopt_simplifySloppy(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_index_count); +template +inline size_t meshopt_stripify(T* destination, const T* indices, size_t index_count, size_t vertex_count, T restart_index); +template +inline size_t meshopt_unstripify(T* destination, const T* indices, size_t index_count, T restart_index); +template +inline meshopt_VertexCacheStatistics meshopt_analyzeVertexCache(const T* indices, size_t index_count, size_t vertex_count, unsigned int cache_size, unsigned int warp_size, unsigned int buffer_size); +template +inline meshopt_OverdrawStatistics meshopt_analyzeOverdraw(const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride); +template +inline meshopt_VertexFetchStatistics meshopt_analyzeVertexFetch(const T* indices, size_t index_count, size_t vertex_count, size_t vertex_size); +template +inline size_t meshopt_buildMeshlets(meshopt_Meshlet* destination, const T* indices, size_t index_count, size_t vertex_count, size_t max_vertices, size_t max_triangles); +template +inline meshopt_Bounds meshopt_computeClusterBounds(const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride); +template +inline void meshopt_spatialSortTriangles(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride); +#endif + +/* Inline implementation */ #ifdef __cplusplus +inline int meshopt_quantizeUnorm(float v, int N) +{ + const float scale = float((1 << N) - 1); + + v = (v >= 0) ? v : 0; + v = (v <= 1) ? v : 1; + + return int(v * scale + 0.5f); +} + +inline int meshopt_quantizeSnorm(float v, int N) +{ + const float scale = float((1 << (N - 1)) - 1); + + float round = (v >= 0 ? 0.5f : -0.5f); + + v = (v >= -1) ? v : -1; + v = (v <= +1) ? v : +1; + + return int(v * scale + round); +} + +inline unsigned short meshopt_quantizeHalf(float v) +{ + union { float f; unsigned int ui; } u = {v}; + unsigned int ui = u.ui; + + int s = (ui >> 16) & 0x8000; + int em = ui & 0x7fffffff; + + /* bias exponent and round to nearest; 112 is relative exponent bias (127-15) */ + int h = (em - (112 << 23) + (1 << 12)) >> 13; + + /* underflow: flush to zero; 113 encodes exponent -14 */ + h = (em < (113 << 23)) ? 0 : h; + + /* overflow: infinity; 143 encodes exponent 16 */ + h = (em >= (143 << 23)) ? 0x7c00 : h; + + /* NaN; note that we convert all types of NaN to qNaN */ + h = (em > (255 << 23)) ? 0x7e00 : h; + + return (unsigned short)(s | h); +} + +inline float meshopt_quantizeFloat(float v, int N) +{ + union { float f; unsigned int ui; } u = {v}; + unsigned int ui = u.ui; + + const int mask = (1 << (23 - N)) - 1; + const int round = (1 << (23 - N)) >> 1; + + int e = ui & 0x7f800000; + unsigned int rui = (ui + round) & ~mask; + + /* round all numbers except inf/nan; this is important to make sure nan doesn't overflow into -0 */ + ui = e == 0x7f800000 ? ui : rui; + + /* flush denormals to zero */ + ui = e == 0 ? 0 : ui; + + u.ui = ui; + return u.f; +} +#endif + +/* Internal implementation helpers */ +#ifdef __cplusplus +class meshopt_Allocator +{ +public: + template + struct StorageT + { + static void* (*allocate)(size_t); + static void (*deallocate)(void*); + }; + + typedef StorageT Storage; + + meshopt_Allocator() + : blocks() + , count(0) + { + } + + ~meshopt_Allocator() + { + for (size_t i = count; i > 0; --i) + Storage::deallocate(blocks[i - 1]); + } + + template T* allocate(size_t size) + { + assert(count < sizeof(blocks) / sizeof(blocks[0])); + T* result = static_cast(Storage::allocate(size > size_t(-1) / sizeof(T) ? size_t(-1) : size * sizeof(T))); + blocks[count++] = result; + return result; + } + +private: + void* blocks[16]; + size_t count; +}; + +// This makes sure that allocate/deallocate are lazily generated in translation units that need them and are deduplicated by the linker +template void* (*meshopt_Allocator::StorageT::allocate)(size_t) = operator new; +template void (*meshopt_Allocator::StorageT::deallocate)(void*) = operator delete; +#endif + +/* Inline implementation for C++ templated wrappers */ +#if defined(__cplusplus) && !defined(MESHOPTIMIZER_NO_WRAPPERS) template struct meshopt_IndexAdapter; @@ -429,7 +590,9 @@ struct meshopt_IndexAdapter , data(0) , count(count_) { - data = new unsigned int[count]; + size_t size = count > size_t(-1) / sizeof(unsigned int) ? size_t(-1) : count * sizeof(unsigned int); + + data = static_cast(meshopt_Allocator::Storage::allocate(size)); if (input) { @@ -446,7 +609,7 @@ struct meshopt_IndexAdapter result[i] = T(data[i]); } - delete[] data; + meshopt_Allocator::Storage::deallocate(data); } }; @@ -487,7 +650,7 @@ inline void meshopt_remapIndexBuffer(T* destination, const T* indices, size_t in } template -void meshopt_generateShadowIndexBuffer(T* destination, const T* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size, size_t vertex_stride) +inline void meshopt_generateShadowIndexBuffer(T* destination, const T* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size, size_t vertex_stride) { meshopt_IndexAdapter in(0, indices, index_count); meshopt_IndexAdapter out(destination, 0, index_count); @@ -496,7 +659,7 @@ void meshopt_generateShadowIndexBuffer(T* destination, const T* indices, size_t } template -void meshopt_generateShadowIndexBufferMulti(T* destination, const T* indices, size_t index_count, size_t vertex_count, const meshopt_Stream* streams, size_t stream_count) +inline void meshopt_generateShadowIndexBufferMulti(T* destination, const T* indices, size_t index_count, size_t vertex_count, const meshopt_Stream* streams, size_t stream_count) { meshopt_IndexAdapter in(0, indices, index_count); meshopt_IndexAdapter out(destination, 0, index_count); @@ -650,119 +813,6 @@ inline void meshopt_spatialSortTriangles(T* destination, const T* indices, size_ } #endif -/* Inline implementation */ -#ifdef __cplusplus -inline int meshopt_quantizeUnorm(float v, int N) -{ - const float scale = float((1 << N) - 1); - - v = (v >= 0) ? v : 0; - v = (v <= 1) ? v : 1; - - return int(v * scale + 0.5f); -} - -inline int meshopt_quantizeSnorm(float v, int N) -{ - const float scale = float((1 << (N - 1)) - 1); - - float round = (v >= 0 ? 0.5f : -0.5f); - - v = (v >= -1) ? v : -1; - v = (v <= +1) ? v : +1; - - return int(v * scale + round); -} - -inline unsigned short meshopt_quantizeHalf(float v) -{ - union { float f; unsigned int ui; } u = {v}; - unsigned int ui = u.ui; - - int s = (ui >> 16) & 0x8000; - int em = ui & 0x7fffffff; - - /* bias exponent and round to nearest; 112 is relative exponent bias (127-15) */ - int h = (em - (112 << 23) + (1 << 12)) >> 13; - - /* underflow: flush to zero; 113 encodes exponent -14 */ - h = (em < (113 << 23)) ? 0 : h; - - /* overflow: infinity; 143 encodes exponent 16 */ - h = (em >= (143 << 23)) ? 0x7c00 : h; - - /* NaN; note that we convert all types of NaN to qNaN */ - h = (em > (255 << 23)) ? 0x7e00 : h; - - return (unsigned short)(s | h); -} - -inline float meshopt_quantizeFloat(float v, int N) -{ - union { float f; unsigned int ui; } u = {v}; - unsigned int ui = u.ui; - - const int mask = (1 << (23 - N)) - 1; - const int round = (1 << (23 - N)) >> 1; - - int e = ui & 0x7f800000; - unsigned int rui = (ui + round) & ~mask; - - /* round all numbers except inf/nan; this is important to make sure nan doesn't overflow into -0 */ - ui = e == 0x7f800000 ? ui : rui; - - /* flush denormals to zero */ - ui = e == 0 ? 0 : ui; - - u.ui = ui; - return u.f; -} -#endif - -/* Internal implementation helpers */ -#ifdef __cplusplus -class meshopt_Allocator -{ -public: - template - struct StorageT - { - static void* (*allocate)(size_t); - static void (*deallocate)(void*); - }; - - typedef StorageT Storage; - - meshopt_Allocator() - : blocks() - , count(0) - { - } - - ~meshopt_Allocator() - { - for (size_t i = count; i > 0; --i) - Storage::deallocate(blocks[i - 1]); - } - - template T* allocate(size_t size) - { - assert(count < sizeof(blocks) / sizeof(blocks[0])); - T* result = static_cast(Storage::allocate(size > size_t(-1) / sizeof(T) ? size_t(-1) : size * sizeof(T))); - blocks[count++] = result; - return result; - } - -private: - void* blocks[16]; - size_t count; -}; - -// This makes sure that allocate/deallocate are lazily generated in translation units that need them and are deduplicated by the linker -template void* (*meshopt_Allocator::StorageT::allocate)(size_t) = operator new; -template void (*meshopt_Allocator::StorageT::deallocate)(void*) = operator delete; -#endif - /** * Copyright (c) 2016-2019 Arseny Kapoulkine *