mirror of
https://github.com/bkaradzic/bgfx.git
synced 2026-02-17 20:52:36 +01:00
Updated meshoptimizer.
This commit is contained in:
9
3rdparty/meshoptimizer/src/meshoptimizer.h
vendored
9
3rdparty/meshoptimizer/src/meshoptimizer.h
vendored
@@ -1,5 +1,5 @@
|
||||
/**
|
||||
* meshoptimizer - version 0.14
|
||||
* meshoptimizer - version 0.15
|
||||
*
|
||||
* Copyright (C) 2016-2020, by Arseny Kapoulkine (arseny.kapoulkine@gmail.com)
|
||||
* Report bugs and download new versions at https://github.com/zeux/meshoptimizer
|
||||
@@ -12,7 +12,7 @@
|
||||
#include <stddef.h>
|
||||
|
||||
/* Version macro; major * 1000 + minor * 10 + patch */
|
||||
#define MESHOPTIMIZER_VERSION 140
|
||||
#define MESHOPTIMIZER_VERSION 150 /* 0.15 */
|
||||
|
||||
/* If no API is defined, assume default */
|
||||
#ifndef MESHOPTIMIZER_API
|
||||
@@ -42,6 +42,7 @@ struct meshopt_Stream
|
||||
* Generates a vertex remap table from the vertex buffer and an optional index buffer and returns number of unique vertices
|
||||
* As a result, all vertices that are binary equivalent map to the same (new) location, with no gaps in the resulting sequence.
|
||||
* Resulting remap table maps old vertices to new vertices and can be used in meshopt_remapVertexBuffer/meshopt_remapIndexBuffer.
|
||||
* Note that binary equivalence considers all vertex_size bytes, including padding which should be zero-initialized.
|
||||
*
|
||||
* destination must contain enough space for the resulting remap table (vertex_count elements)
|
||||
* indices can be NULL if the input is unindexed
|
||||
@@ -53,6 +54,7 @@ MESHOPTIMIZER_API size_t meshopt_generateVertexRemap(unsigned int* destination,
|
||||
* As a result, all vertices that are binary equivalent map to the same (new) location, with no gaps in the resulting sequence.
|
||||
* Resulting remap table maps old vertices to new vertices and can be used in meshopt_remapVertexBuffer/meshopt_remapIndexBuffer.
|
||||
* To remap vertex buffers, you will need to call meshopt_remapVertexBuffer for each vertex stream.
|
||||
* Note that binary equivalence considers all size bytes in each stream, including padding which should be zero-initialized.
|
||||
*
|
||||
* destination must contain enough space for the resulting remap table (vertex_count elements)
|
||||
* indices can be NULL if the input is unindexed
|
||||
@@ -79,6 +81,7 @@ MESHOPTIMIZER_API void meshopt_remapIndexBuffer(unsigned int* destination, const
|
||||
* Generate index buffer that can be used for more efficient rendering when only a subset of the vertex attributes is necessary
|
||||
* All vertices that are binary equivalent (wrt first vertex_size bytes) map to the first vertex in the original vertex buffer.
|
||||
* This makes it possible to use the index buffer for Z pre-pass or shadowmap rendering, while using the original index buffer for regular rendering.
|
||||
* Note that binary equivalence considers all vertex_size bytes, including padding which should be zero-initialized.
|
||||
*
|
||||
* destination must contain enough space for the resulting index buffer (index_count elements)
|
||||
*/
|
||||
@@ -88,6 +91,7 @@ MESHOPTIMIZER_API void meshopt_generateShadowIndexBuffer(unsigned int* destinati
|
||||
* Generate index buffer that can be used for more efficient rendering when only a subset of the vertex attributes is necessary
|
||||
* All vertices that are binary equivalent (wrt specified streams) map to the first vertex in the original vertex buffer.
|
||||
* This makes it possible to use the index buffer for Z pre-pass or shadowmap rendering, while using the original index buffer for regular rendering.
|
||||
* Note that binary equivalence considers all size bytes in each stream, including padding which should be zero-initialized.
|
||||
*
|
||||
* destination must contain enough space for the resulting index buffer (index_count elements)
|
||||
*/
|
||||
@@ -209,6 +213,7 @@ MESHOPTIMIZER_EXPERIMENTAL int meshopt_decodeIndexSequence(void* destination, si
|
||||
* Encodes vertex data into an array of bytes that is generally smaller and compresses better compared to original.
|
||||
* Returns encoded data size on success, 0 on error; the only error condition is if buffer doesn't have enough space
|
||||
* This function works for a single vertex stream; for multiple vertex streams, call meshopt_encodeVertexBuffer for each stream.
|
||||
* Note that all vertex_size bytes of each vertex are encoded verbatim, including padding which should be zero-initialized.
|
||||
*
|
||||
* buffer must contain enough space for the encoded vertex buffer (use meshopt_encodeVertexBufferBound to compute worst case size)
|
||||
*/
|
||||
|
||||
3
3rdparty/meshoptimizer/src/simplifier.cpp
vendored
3
3rdparty/meshoptimizer/src/simplifier.cpp
vendored
@@ -291,8 +291,7 @@ static void classifyVertices(unsigned char* result, unsigned int* loop, unsigned
|
||||
if (openiv != ~0u && openiv != i && openov != ~0u && openov != i &&
|
||||
openiw != ~0u && openiw != w && openow != ~0u && openow != w)
|
||||
{
|
||||
if (remap[openiv] == remap[openow] && remap[openov] == remap[openiw] &&
|
||||
remap[openiw] == remap[openov] && remap[openow] == remap[openiv])
|
||||
if (remap[openiv] == remap[openow] && remap[openov] == remap[openiw])
|
||||
{
|
||||
result[i] = Kind_Seam;
|
||||
}
|
||||
|
||||
14
3rdparty/meshoptimizer/src/vertexcodec.cpp
vendored
14
3rdparty/meshoptimizer/src/vertexcodec.cpp
vendored
@@ -447,7 +447,7 @@ static const unsigned char* decodeVertexBlock(const unsigned char* data, const u
|
||||
static unsigned char kDecodeBytesGroupShuffle[256][8];
|
||||
static unsigned char kDecodeBytesGroupCount[256];
|
||||
|
||||
#ifdef EMSCRIPTEN
|
||||
#ifdef __wasm__
|
||||
__attribute__((cold)) // this saves 500 bytes in the output binary - we don't need to vectorize this loop!
|
||||
#endif
|
||||
static bool
|
||||
@@ -736,11 +736,9 @@ static const unsigned char* decodeBytesGroupSimd(const unsigned char* data, unsi
|
||||
SIMD_TARGET
|
||||
static v128_t decodeShuffleMask(unsigned char mask0, unsigned char mask1)
|
||||
{
|
||||
// TODO: 8b buffer overrun - should we use splat or extend buffers?
|
||||
v128_t sm0 = wasm_v128_load(&kDecodeBytesGroupShuffle[mask0]);
|
||||
v128_t sm1 = wasm_v128_load(&kDecodeBytesGroupShuffle[mask1]);
|
||||
|
||||
// TODO: we should use v8x16_load_splat
|
||||
v128_t sm1off = wasm_v128_load(&kDecodeBytesGroupCount[mask0]);
|
||||
sm1off = wasm_v8x16_shuffle(sm1off, sm1off, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
|
||||
|
||||
@@ -754,7 +752,6 @@ static void wasmMoveMask(v128_t mask, unsigned char& mask0, unsigned char& mask1
|
||||
{
|
||||
v128_t mask_0 = wasm_v32x4_shuffle(mask, mask, 0, 2, 1, 3);
|
||||
|
||||
// TODO: when Chrome supports v128.const we can try doing vectorized and?
|
||||
uint64_t mask_1a = wasm_i64x2_extract_lane(mask_0, 0) & 0x0804020108040201ull;
|
||||
uint64_t mask_1b = wasm_i64x2_extract_lane(mask_0, 1) & 0x8040201080402010ull;
|
||||
|
||||
@@ -786,7 +783,6 @@ static const unsigned char* decodeBytesGroupSimd(const unsigned char* data, unsi
|
||||
|
||||
case 1:
|
||||
{
|
||||
// TODO: test 4b load splat
|
||||
v128_t sel2 = wasm_v128_load(data);
|
||||
v128_t rest = wasm_v128_load(data + 4);
|
||||
|
||||
@@ -801,7 +797,6 @@ static const unsigned char* decodeBytesGroupSimd(const unsigned char* data, unsi
|
||||
|
||||
v128_t shuf = decodeShuffleMask(mask0, mask1);
|
||||
|
||||
// TODO: test or/andnot
|
||||
v128_t result = wasm_v128_bitselect(wasm_v8x16_swizzle(rest, shuf), sel, mask);
|
||||
|
||||
wasm_v128_store(buffer, result);
|
||||
@@ -811,7 +806,6 @@ static const unsigned char* decodeBytesGroupSimd(const unsigned char* data, unsi
|
||||
|
||||
case 2:
|
||||
{
|
||||
// TODO: test 8b load splat
|
||||
v128_t sel4 = wasm_v128_load(data);
|
||||
v128_t rest = wasm_v128_load(data + 8);
|
||||
|
||||
@@ -825,7 +819,6 @@ static const unsigned char* decodeBytesGroupSimd(const unsigned char* data, unsi
|
||||
|
||||
v128_t shuf = decodeShuffleMask(mask0, mask1);
|
||||
|
||||
// TODO: test or/andnot
|
||||
v128_t result = wasm_v128_bitselect(wasm_v8x16_swizzle(rest, shuf), sel, mask);
|
||||
|
||||
wasm_v128_store(buffer, result);
|
||||
@@ -917,8 +910,7 @@ SIMD_TARGET
|
||||
static v128_t unzigzag8(v128_t v)
|
||||
{
|
||||
v128_t xl = wasm_i8x16_neg(wasm_v128_and(v, wasm_i8x16_splat(1)));
|
||||
// TODO: use wasm_u8x16_shr when v8 fixes codegen for constant shifts
|
||||
v128_t xr = wasm_v128_and(wasm_u16x8_shr(v, 1), wasm_i8x16_splat(127));
|
||||
v128_t xr = wasm_u8x16_shr(v, 1);
|
||||
|
||||
return wasm_v128_xor(xl, xr);
|
||||
}
|
||||
@@ -1010,7 +1002,7 @@ static const unsigned char* decodeVertexBlockSimd(const unsigned char* data, con
|
||||
|
||||
#ifdef SIMD_WASM
|
||||
#define TEMP v128_t
|
||||
#define PREP() v128_t pi = wasm_v128_load(last_vertex + k) // TODO: use wasm_v32x4_load_splat to avoid buffer overrun
|
||||
#define PREP() v128_t pi = wasm_v128_load(last_vertex + k)
|
||||
#define LOAD(i) v128_t r##i = wasm_v128_load(buffer + j + i * vertex_count_aligned)
|
||||
#define GRP4(i) t0 = wasmx_splat_v32x4(r##i, 0), t1 = wasmx_splat_v32x4(r##i, 1), t2 = wasmx_splat_v32x4(r##i, 2), t3 = wasmx_splat_v32x4(r##i, 3)
|
||||
#define FIXD(i) t##i = pi = wasm_i8x16_add(pi, t##i)
|
||||
|
||||
2
3rdparty/meshoptimizer/src/vertexfilter.cpp
vendored
2
3rdparty/meshoptimizer/src/vertexfilter.cpp
vendored
@@ -623,7 +623,7 @@ static void decodeFilterOctSimd(signed char* data, size_t count)
|
||||
static void decodeFilterOctSimd(short* data, size_t count)
|
||||
{
|
||||
const v128_t sign = wasm_f32x4_splat(-0.f);
|
||||
volatile v128_t zmask = wasm_i32x4_splat(0x7fff); // TODO: volatile works around LLVM shuffle "optimizations"
|
||||
const v128_t zmask = wasm_i32x4_splat(0x7fff);
|
||||
|
||||
for (size_t i = 0; i < count; i += 4)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user