Updated meshoptimizer.
This commit is contained in:
parent
8fc9516dcd
commit
69eb4a54f3
97
3rdparty/meshoptimizer/src/indexgenerator.cpp
vendored
97
3rdparty/meshoptimizer/src/indexgenerator.cpp
vendored
@ -6,6 +6,7 @@
|
|||||||
|
|
||||||
// This work is based on:
|
// This work is based on:
|
||||||
// John McDonald, Mark Kilgard. Crack-Free Point-Normal Triangles using Adjacent Edge Normals. 2010
|
// John McDonald, Mark Kilgard. Crack-Free Point-Normal Triangles using Adjacent Edge Normals. 2010
|
||||||
|
// John Hable. Variable Rate Shading with Visibility Buffer Rendering. 2024
|
||||||
namespace meshopt
|
namespace meshopt
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -576,3 +577,99 @@ void meshopt_generateTessellationIndexBuffer(unsigned int* destination, const un
|
|||||||
memcpy(destination + i * 4, patch, sizeof(patch));
|
memcpy(destination + i * 4, patch, sizeof(patch));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t meshopt_generateProvokingIndexBuffer(unsigned int* destination, unsigned int* reorder, const unsigned int* indices, size_t index_count, size_t vertex_count)
|
||||||
|
{
|
||||||
|
assert(index_count % 3 == 0);
|
||||||
|
|
||||||
|
meshopt_Allocator allocator;
|
||||||
|
|
||||||
|
unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
|
||||||
|
memset(remap, -1, vertex_count * sizeof(unsigned int));
|
||||||
|
|
||||||
|
// compute vertex valence; this is used to prioritize least used corner
|
||||||
|
// note: we use 8-bit counters for performance; for outlier vertices the valence is incorrect but that just affects the heuristic
|
||||||
|
unsigned char* valence = allocator.allocate<unsigned char>(vertex_count);
|
||||||
|
memset(valence, 0, vertex_count);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < index_count; ++i)
|
||||||
|
{
|
||||||
|
unsigned int index = indices[i];
|
||||||
|
assert(index < vertex_count);
|
||||||
|
|
||||||
|
valence[index]++;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned int reorder_offset = 0;
|
||||||
|
|
||||||
|
// assign provoking vertices; leave the rest for the next pass
|
||||||
|
for (size_t i = 0; i < index_count; i += 3)
|
||||||
|
{
|
||||||
|
unsigned int a = indices[i + 0], b = indices[i + 1], c = indices[i + 2];
|
||||||
|
assert(a < vertex_count && b < vertex_count && c < vertex_count);
|
||||||
|
|
||||||
|
// try to rotate triangle such that provoking vertex hasn't been seen before
|
||||||
|
// if multiple vertices are new, prioritize the one with least valence
|
||||||
|
// this reduces the risk that a future triangle will have all three vertices seen
|
||||||
|
unsigned int va = remap[a] == ~0u ? valence[a] : ~0u;
|
||||||
|
unsigned int vb = remap[b] == ~0u ? valence[b] : ~0u;
|
||||||
|
unsigned int vc = remap[c] == ~0u ? valence[c] : ~0u;
|
||||||
|
|
||||||
|
if (vb != ~0u && vb <= va && vb <= vc)
|
||||||
|
{
|
||||||
|
// abc -> bca
|
||||||
|
unsigned int t = a;
|
||||||
|
a = b, b = c, c = t;
|
||||||
|
}
|
||||||
|
else if (vc != ~0u && vc <= va && vc <= vb)
|
||||||
|
{
|
||||||
|
// abc -> cab
|
||||||
|
unsigned int t = c;
|
||||||
|
c = b, b = a, a = t;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned int newidx = reorder_offset;
|
||||||
|
|
||||||
|
// now remap[a] = ~0u or all three vertices are old
|
||||||
|
// recording remap[a] makes it possible to remap future references to the same index, conserving space
|
||||||
|
if (remap[a] == ~0u)
|
||||||
|
remap[a] = newidx;
|
||||||
|
|
||||||
|
// we need to clone the provoking vertex to get a unique index
|
||||||
|
// if all three are used the choice is arbitrary since no future triangle will be able to reuse any of these
|
||||||
|
reorder[reorder_offset++] = a;
|
||||||
|
|
||||||
|
// note: first vertex is final, the other two will be fixed up in next pass
|
||||||
|
destination[i + 0] = newidx;
|
||||||
|
destination[i + 1] = b;
|
||||||
|
destination[i + 2] = c;
|
||||||
|
|
||||||
|
// update vertex valences for corner heuristic
|
||||||
|
valence[a]--;
|
||||||
|
valence[b]--;
|
||||||
|
valence[c]--;
|
||||||
|
}
|
||||||
|
|
||||||
|
// remap or clone non-provoking vertices (iterating to skip provoking vertices)
|
||||||
|
int step = 1;
|
||||||
|
|
||||||
|
for (size_t i = 1; i < index_count; i += step, step ^= 3)
|
||||||
|
{
|
||||||
|
unsigned int index = destination[i];
|
||||||
|
|
||||||
|
if (remap[index] == ~0u)
|
||||||
|
{
|
||||||
|
// we haven't seen the vertex before as a provoking vertex
|
||||||
|
// to maintain the reference to the original vertex we need to clone it
|
||||||
|
unsigned int newidx = reorder_offset;
|
||||||
|
|
||||||
|
remap[index] = newidx;
|
||||||
|
reorder[reorder_offset++] = index;
|
||||||
|
}
|
||||||
|
|
||||||
|
destination[i] = remap[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(reorder_offset <= vertex_count + index_count / 3);
|
||||||
|
return reorder_offset;
|
||||||
|
}
|
||||||
|
59
3rdparty/meshoptimizer/src/meshoptimizer.h
vendored
59
3rdparty/meshoptimizer/src/meshoptimizer.h
vendored
@ -29,7 +29,9 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Experimental APIs have unstable interface and might have implementation that's not fully tested or optimized */
|
/* Experimental APIs have unstable interface and might have implementation that's not fully tested or optimized */
|
||||||
|
#ifndef MESHOPTIMIZER_EXPERIMENTAL
|
||||||
#define MESHOPTIMIZER_EXPERIMENTAL MESHOPTIMIZER_API
|
#define MESHOPTIMIZER_EXPERIMENTAL MESHOPTIMIZER_API
|
||||||
|
#endif
|
||||||
|
|
||||||
/* C interface */
|
/* C interface */
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
@ -137,6 +139,19 @@ MESHOPTIMIZER_API void meshopt_generateAdjacencyIndexBuffer(unsigned int* destin
|
|||||||
*/
|
*/
|
||||||
MESHOPTIMIZER_API void meshopt_generateTessellationIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
|
MESHOPTIMIZER_API void meshopt_generateTessellationIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Experimental: Generate index buffer that can be used for visibility buffer rendering and returns the size of the reorder table
|
||||||
|
* Each triangle's provoking vertex index is equal to primitive id; this allows passing it to the fragment shader using nointerpolate attribute.
|
||||||
|
* This is important for performance on hardware where primitive id can't be accessed efficiently in fragment shader.
|
||||||
|
* The reorder table stores the original vertex id for each vertex in the new index buffer, and should be used in the vertex shader to load vertex data.
|
||||||
|
* The provoking vertex is assumed to be the first vertex in the triangle; if this is not the case (OpenGL), rotate each triangle (abc -> bca) before rendering.
|
||||||
|
* For maximum efficiency the input index buffer should be optimized for vertex cache first.
|
||||||
|
*
|
||||||
|
* destination must contain enough space for the resulting index buffer (index_count elements)
|
||||||
|
* reorder must contain enough space for the worst case reorder table (vertex_count + index_count/3 elements)
|
||||||
|
*/
|
||||||
|
MESHOPTIMIZER_EXPERIMENTAL size_t meshopt_generateProvokingIndexBuffer(unsigned int* destination, unsigned int* reorder, const unsigned int* indices, size_t index_count, size_t vertex_count);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Vertex transform cache optimizer
|
* Vertex transform cache optimizer
|
||||||
* Reorders indices to reduce the number of GPU vertex shader invocations
|
* Reorders indices to reduce the number of GPU vertex shader invocations
|
||||||
@ -340,7 +355,7 @@ enum
|
|||||||
* Mesh simplifier
|
* Mesh simplifier
|
||||||
* Reduces the number of triangles in the mesh, attempting to preserve mesh appearance as much as possible
|
* Reduces the number of triangles in the mesh, attempting to preserve mesh appearance as much as possible
|
||||||
* The algorithm tries to preserve mesh topology and can stop short of the target goal based on topology constraints or target error.
|
* The algorithm tries to preserve mesh topology and can stop short of the target goal based on topology constraints or target error.
|
||||||
* If not all attributes from the input mesh are required, it's recommended to reindex the mesh using meshopt_generateShadowIndexBuffer prior to simplification.
|
* If not all attributes from the input mesh are required, it's recommended to reindex the mesh without them prior to simplification.
|
||||||
* Returns the number of indices after simplification, with destination containing new index data
|
* Returns the number of indices after simplification, with destination containing new index data
|
||||||
* The resulting index buffer references vertices from the original vertex buffer.
|
* The resulting index buffer references vertices from the original vertex buffer.
|
||||||
* If the original vertex data isn't required, creating a compact vertex buffer using meshopt_optimizeVertexFetch is recommended.
|
* If the original vertex data isn't required, creating a compact vertex buffer using meshopt_optimizeVertexFetch is recommended.
|
||||||
@ -360,9 +375,8 @@ MESHOPTIMIZER_API size_t meshopt_simplify(unsigned int* destination, const unsig
|
|||||||
*
|
*
|
||||||
* vertex_attributes should have attribute_count floats for each vertex
|
* vertex_attributes should have attribute_count floats for each vertex
|
||||||
* attribute_weights should have attribute_count floats in total; the weights determine relative priority of attributes between each other and wrt position. The recommended weight range is [1e-3..1e-1], assuming attribute data is in [0..1] range.
|
* attribute_weights should have attribute_count floats in total; the weights determine relative priority of attributes between each other and wrt position. The recommended weight range is [1e-3..1e-1], assuming attribute data is in [0..1] range.
|
||||||
* attribute_count must be <= 16
|
* attribute_count must be <= 32
|
||||||
* vertex_lock can be NULL; when it's not NULL, it should have a value for each vertex; 1 denotes vertices that can't be moved
|
* vertex_lock can be NULL; when it's not NULL, it should have a value for each vertex; 1 denotes vertices that can't be moved
|
||||||
* TODO target_error/result_error currently use combined distance+attribute error; this may change in the future
|
|
||||||
*/
|
*/
|
||||||
MESHOPTIMIZER_EXPERIMENTAL size_t meshopt_simplifyWithAttributes(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, const float* vertex_attributes, size_t vertex_attributes_stride, const float* attribute_weights, size_t attribute_count, const unsigned char* vertex_lock, size_t target_index_count, float target_error, unsigned int options, float* result_error);
|
MESHOPTIMIZER_EXPERIMENTAL size_t meshopt_simplifyWithAttributes(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, const float* vertex_attributes, size_t vertex_attributes_stride, const float* attribute_weights, size_t attribute_count, const unsigned char* vertex_lock, size_t target_index_count, float target_error, unsigned int options, float* result_error);
|
||||||
|
|
||||||
@ -469,6 +483,13 @@ struct meshopt_VertexFetchStatistics
|
|||||||
*/
|
*/
|
||||||
MESHOPTIMIZER_API struct meshopt_VertexFetchStatistics meshopt_analyzeVertexFetch(const unsigned int* indices, size_t index_count, size_t vertex_count, size_t vertex_size);
|
MESHOPTIMIZER_API struct meshopt_VertexFetchStatistics meshopt_analyzeVertexFetch(const unsigned int* indices, size_t index_count, size_t vertex_count, size_t vertex_size);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Meshlet is a small mesh cluster (subset) that consists of:
|
||||||
|
* - triangles, an 8-bit micro triangle (index) buffer, that for each triangle specifies three local vertices to use;
|
||||||
|
* - vertices, a 32-bit vertex indirection buffer, that for each local vertex specifies which mesh vertex to fetch vertex attributes from.
|
||||||
|
*
|
||||||
|
* For efficiency, meshlet triangles and vertices are packed into two large arrays; this structure contains offsets and counts to access the data.
|
||||||
|
*/
|
||||||
struct meshopt_Meshlet
|
struct meshopt_Meshlet
|
||||||
{
|
{
|
||||||
/* offsets within meshlet_vertices and meshlet_triangles arrays with meshlet data */
|
/* offsets within meshlet_vertices and meshlet_triangles arrays with meshlet data */
|
||||||
@ -484,6 +505,7 @@ struct meshopt_Meshlet
|
|||||||
* Meshlet builder
|
* Meshlet builder
|
||||||
* Splits the mesh into a set of meshlets where each meshlet has a micro index buffer indexing into meshlet vertices that refer to the original vertex buffer
|
* Splits the mesh into a set of meshlets where each meshlet has a micro index buffer indexing into meshlet vertices that refer to the original vertex buffer
|
||||||
* The resulting data can be used to render meshes using NVidia programmable mesh shading pipeline, or in other cluster-based renderers.
|
* The resulting data can be used to render meshes using NVidia programmable mesh shading pipeline, or in other cluster-based renderers.
|
||||||
|
* When targeting mesh shading hardware, for maximum efficiency meshlets should be further optimized using meshopt_optimizeMeshlet.
|
||||||
* When using buildMeshlets, vertex positions need to be provided to minimize the size of the resulting clusters.
|
* When using buildMeshlets, vertex positions need to be provided to minimize the size of the resulting clusters.
|
||||||
* When using buildMeshletsScan, for maximum efficiency the index buffer being converted has to be optimized for vertex cache first.
|
* When using buildMeshletsScan, for maximum efficiency the index buffer being converted has to be optimized for vertex cache first.
|
||||||
*
|
*
|
||||||
@ -544,7 +566,8 @@ struct meshopt_Bounds
|
|||||||
* Real-Time Rendering 4th Edition, section 19.3).
|
* Real-Time Rendering 4th Edition, section 19.3).
|
||||||
*
|
*
|
||||||
* vertex_positions should have float3 position in the first 12 bytes of each vertex
|
* vertex_positions should have float3 position in the first 12 bytes of each vertex
|
||||||
* index_count/3 should be less than or equal to 512 (the function assumes clusters of limited size)
|
* vertex_count should specify the number of vertices in the entire mesh, not cluster or meshlet
|
||||||
|
* index_count/3 and triangle_count must not exceed implementation limits (<= 512)
|
||||||
*/
|
*/
|
||||||
MESHOPTIMIZER_API struct meshopt_Bounds meshopt_computeClusterBounds(const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
|
MESHOPTIMIZER_API struct meshopt_Bounds meshopt_computeClusterBounds(const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
|
||||||
MESHOPTIMIZER_API struct meshopt_Bounds meshopt_computeMeshletBounds(const unsigned int* meshlet_vertices, const unsigned char* meshlet_triangles, size_t triangle_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
|
MESHOPTIMIZER_API struct meshopt_Bounds meshopt_computeMeshletBounds(const unsigned int* meshlet_vertices, const unsigned char* meshlet_triangles, size_t triangle_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
|
||||||
@ -642,6 +665,8 @@ inline void meshopt_generateAdjacencyIndexBuffer(T* destination, const T* indice
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
inline void meshopt_generateTessellationIndexBuffer(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
|
inline void meshopt_generateTessellationIndexBuffer(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride);
|
||||||
template <typename T>
|
template <typename T>
|
||||||
|
inline size_t meshopt_generateProvokingIndexBuffer(T* destination, unsigned int* reorder, const T* indices, size_t index_count, size_t vertex_count);
|
||||||
|
template <typename T>
|
||||||
inline void meshopt_optimizeVertexCache(T* destination, const T* indices, size_t index_count, size_t vertex_count);
|
inline void meshopt_optimizeVertexCache(T* destination, const T* indices, size_t index_count, size_t vertex_count);
|
||||||
template <typename T>
|
template <typename T>
|
||||||
inline void meshopt_optimizeVertexCacheStrip(T* destination, const T* indices, size_t index_count, size_t vertex_count);
|
inline void meshopt_optimizeVertexCacheStrip(T* destination, const T* indices, size_t index_count, size_t vertex_count);
|
||||||
@ -727,8 +752,8 @@ public:
|
|||||||
typedef StorageT<void> Storage;
|
typedef StorageT<void> Storage;
|
||||||
|
|
||||||
meshopt_Allocator()
|
meshopt_Allocator()
|
||||||
: blocks()
|
: blocks()
|
||||||
, count(0)
|
, count(0)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -738,7 +763,8 @@ public:
|
|||||||
Storage::deallocate(blocks[i - 1]);
|
Storage::deallocate(blocks[i - 1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T> T* allocate(size_t size)
|
template <typename T>
|
||||||
|
T* allocate(size_t size)
|
||||||
{
|
{
|
||||||
assert(count < sizeof(blocks) / sizeof(blocks[0]));
|
assert(count < sizeof(blocks) / sizeof(blocks[0]));
|
||||||
T* result = static_cast<T*>(Storage::allocate(size > size_t(-1) / sizeof(T) ? size_t(-1) : size * sizeof(T)));
|
T* result = static_cast<T*>(Storage::allocate(size > size_t(-1) / sizeof(T) ? size_t(-1) : size * sizeof(T)));
|
||||||
@ -759,8 +785,10 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
// This makes sure that allocate/deallocate are lazily generated in translation units that need them and are deduplicated by the linker
|
// This makes sure that allocate/deallocate are lazily generated in translation units that need them and are deduplicated by the linker
|
||||||
template <typename T> void* (MESHOPTIMIZER_ALLOC_CALLCONV *meshopt_Allocator::StorageT<T>::allocate)(size_t) = operator new;
|
template <typename T>
|
||||||
template <typename T> void (MESHOPTIMIZER_ALLOC_CALLCONV *meshopt_Allocator::StorageT<T>::deallocate)(void*) = operator delete;
|
void* (MESHOPTIMIZER_ALLOC_CALLCONV *meshopt_Allocator::StorageT<T>::allocate)(size_t) = operator new;
|
||||||
|
template <typename T>
|
||||||
|
void (MESHOPTIMIZER_ALLOC_CALLCONV *meshopt_Allocator::StorageT<T>::deallocate)(void*) = operator delete;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Inline implementation for C++ templated wrappers */
|
/* Inline implementation for C++ templated wrappers */
|
||||||
@ -875,6 +903,19 @@ inline void meshopt_generateTessellationIndexBuffer(T* destination, const T* ind
|
|||||||
meshopt_generateTessellationIndexBuffer(out.data, in.data, index_count, vertex_positions, vertex_count, vertex_positions_stride);
|
meshopt_generateTessellationIndexBuffer(out.data, in.data, index_count, vertex_positions, vertex_count, vertex_positions_stride);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
inline size_t meshopt_generateProvokingIndexBuffer(T* destination, unsigned int* reorder, const T* indices, size_t index_count, size_t vertex_count)
|
||||||
|
{
|
||||||
|
meshopt_IndexAdapter<T> in(NULL, indices, index_count);
|
||||||
|
meshopt_IndexAdapter<T> out(destination, NULL, index_count);
|
||||||
|
|
||||||
|
size_t bound = vertex_count + (index_count / 3);
|
||||||
|
assert(size_t(T(bound - 1)) == bound - 1); // bound - 1 must fit in T
|
||||||
|
(void)bound;
|
||||||
|
|
||||||
|
return meshopt_generateProvokingIndexBuffer(out.data, reorder, in.data, index_count, vertex_count);
|
||||||
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
inline void meshopt_optimizeVertexCache(T* destination, const T* indices, size_t index_count, size_t vertex_count)
|
inline void meshopt_optimizeVertexCache(T* destination, const T* indices, size_t index_count, size_t vertex_count)
|
||||||
{
|
{
|
||||||
|
209
3rdparty/meshoptimizer/src/simplifier.cpp
vendored
209
3rdparty/meshoptimizer/src/simplifier.cpp
vendored
@ -503,7 +503,7 @@ static float rescalePositions(Vector3* result, const float* vertex_positions_dat
|
|||||||
return extent;
|
return extent;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rescaleAttributes(float* result, const float* vertex_attributes_data, size_t vertex_count, size_t vertex_attributes_stride, const float* attribute_weights, size_t attribute_count, const unsigned int* sparse_remap)
|
static void rescaleAttributes(float* result, const float* vertex_attributes_data, size_t vertex_count, size_t vertex_attributes_stride, const float* attribute_weights, size_t attribute_count, const unsigned int* attribute_remap, const unsigned int* sparse_remap)
|
||||||
{
|
{
|
||||||
size_t vertex_attributes_stride_float = vertex_attributes_stride / sizeof(float);
|
size_t vertex_attributes_stride_float = vertex_attributes_stride / sizeof(float);
|
||||||
|
|
||||||
@ -513,14 +513,15 @@ static void rescaleAttributes(float* result, const float* vertex_attributes_data
|
|||||||
|
|
||||||
for (size_t k = 0; k < attribute_count; ++k)
|
for (size_t k = 0; k < attribute_count; ++k)
|
||||||
{
|
{
|
||||||
float a = vertex_attributes_data[ri * vertex_attributes_stride_float + k];
|
unsigned int rk = attribute_remap[k];
|
||||||
|
float a = vertex_attributes_data[ri * vertex_attributes_stride_float + rk];
|
||||||
|
|
||||||
result[i * attribute_count + k] = a * attribute_weights[k];
|
result[i * attribute_count + k] = a * attribute_weights[rk];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static const size_t kMaxAttributes = 16;
|
static const size_t kMaxAttributes = 32;
|
||||||
|
|
||||||
struct Quadric
|
struct Quadric
|
||||||
{
|
{
|
||||||
@ -597,7 +598,7 @@ static void quadricAdd(QuadricGrad* G, const QuadricGrad* R, size_t attribute_co
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static float quadricError(const Quadric& Q, const Vector3& v)
|
static float quadricEval(const Quadric& Q, const Vector3& v)
|
||||||
{
|
{
|
||||||
float rx = Q.b0;
|
float rx = Q.b0;
|
||||||
float ry = Q.b1;
|
float ry = Q.b1;
|
||||||
@ -620,6 +621,12 @@ static float quadricError(const Quadric& Q, const Vector3& v)
|
|||||||
r += ry * v.y;
|
r += ry * v.y;
|
||||||
r += rz * v.z;
|
r += rz * v.z;
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
static float quadricError(const Quadric& Q, const Vector3& v)
|
||||||
|
{
|
||||||
|
float r = quadricEval(Q, v);
|
||||||
float s = Q.w == 0.f ? 0.f : 1.f / Q.w;
|
float s = Q.w == 0.f ? 0.f : 1.f / Q.w;
|
||||||
|
|
||||||
return fabsf(r) * s;
|
return fabsf(r) * s;
|
||||||
@ -627,26 +634,7 @@ static float quadricError(const Quadric& Q, const Vector3& v)
|
|||||||
|
|
||||||
static float quadricError(const Quadric& Q, const QuadricGrad* G, size_t attribute_count, const Vector3& v, const float* va)
|
static float quadricError(const Quadric& Q, const QuadricGrad* G, size_t attribute_count, const Vector3& v, const float* va)
|
||||||
{
|
{
|
||||||
float rx = Q.b0;
|
float r = quadricEval(Q, v);
|
||||||
float ry = Q.b1;
|
|
||||||
float rz = Q.b2;
|
|
||||||
|
|
||||||
rx += Q.a10 * v.y;
|
|
||||||
ry += Q.a21 * v.z;
|
|
||||||
rz += Q.a20 * v.x;
|
|
||||||
|
|
||||||
rx *= 2;
|
|
||||||
ry *= 2;
|
|
||||||
rz *= 2;
|
|
||||||
|
|
||||||
rx += Q.a00 * v.x;
|
|
||||||
ry += Q.a11 * v.y;
|
|
||||||
rz += Q.a22 * v.z;
|
|
||||||
|
|
||||||
float r = Q.c;
|
|
||||||
r += rx * v.x;
|
|
||||||
r += ry * v.y;
|
|
||||||
r += rz * v.z;
|
|
||||||
|
|
||||||
// see quadricFromAttributes for general derivation; here we need to add the parts of (eval(pos) - attr)^2 that depend on attr
|
// see quadricFromAttributes for general derivation; here we need to add the parts of (eval(pos) - attr)^2 that depend on attr
|
||||||
for (size_t k = 0; k < attribute_count; ++k)
|
for (size_t k = 0; k < attribute_count; ++k)
|
||||||
@ -654,14 +642,11 @@ static float quadricError(const Quadric& Q, const QuadricGrad* G, size_t attribu
|
|||||||
float a = va[k];
|
float a = va[k];
|
||||||
float g = v.x * G[k].gx + v.y * G[k].gy + v.z * G[k].gz + G[k].gw;
|
float g = v.x * G[k].gx + v.y * G[k].gy + v.z * G[k].gz + G[k].gw;
|
||||||
|
|
||||||
r += a * a * Q.w;
|
r += a * (a * Q.w - 2 * g);
|
||||||
r -= 2 * a * g;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: weight normalization is breaking attribute error somehow
|
// note: unlike position error, we do not normalize by Q.w to retain edge scaling as described in quadricFromAttributes
|
||||||
float s = 1; // Q.w == 0.f ? 0.f : 1.f / Q.w;
|
return fabsf(r);
|
||||||
|
|
||||||
return fabsf(r) * s;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void quadricFromPlane(Quadric& Q, float a, float b, float c, float d, float w)
|
static void quadricFromPlane(Quadric& Q, float a, float b, float c, float d, float w)
|
||||||
@ -702,20 +687,24 @@ static void quadricFromTriangle(Quadric& Q, const Vector3& p0, const Vector3& p1
|
|||||||
static void quadricFromTriangleEdge(Quadric& Q, const Vector3& p0, const Vector3& p1, const Vector3& p2, float weight)
|
static void quadricFromTriangleEdge(Quadric& Q, const Vector3& p0, const Vector3& p1, const Vector3& p2, float weight)
|
||||||
{
|
{
|
||||||
Vector3 p10 = {p1.x - p0.x, p1.y - p0.y, p1.z - p0.z};
|
Vector3 p10 = {p1.x - p0.x, p1.y - p0.y, p1.z - p0.z};
|
||||||
float length = normalize(p10);
|
|
||||||
|
|
||||||
// p20p = length of projection of p2-p0 onto normalize(p1 - p0)
|
// edge length; keep squared length around for projection correction
|
||||||
|
float lengthsq = p10.x * p10.x + p10.y * p10.y + p10.z * p10.z;
|
||||||
|
float length = sqrtf(lengthsq);
|
||||||
|
|
||||||
|
// p20p = length of projection of p2-p0 onto p1-p0; note that p10 is unnormalized so we need to correct it later
|
||||||
Vector3 p20 = {p2.x - p0.x, p2.y - p0.y, p2.z - p0.z};
|
Vector3 p20 = {p2.x - p0.x, p2.y - p0.y, p2.z - p0.z};
|
||||||
float p20p = p20.x * p10.x + p20.y * p10.y + p20.z * p10.z;
|
float p20p = p20.x * p10.x + p20.y * p10.y + p20.z * p10.z;
|
||||||
|
|
||||||
// normal = altitude of triangle from point p2 onto edge p1-p0
|
// perp = perpendicular vector from p2 to line segment p1-p0
|
||||||
Vector3 normal = {p20.x - p10.x * p20p, p20.y - p10.y * p20p, p20.z - p10.z * p20p};
|
// note: since p10 is unnormalized we need to correct the projection; we scale p20 instead to take advantage of normalize below
|
||||||
normalize(normal);
|
Vector3 perp = {p20.x * lengthsq - p10.x * p20p, p20.y * lengthsq - p10.y * p20p, p20.z * lengthsq - p10.z * p20p};
|
||||||
|
normalize(perp);
|
||||||
|
|
||||||
float distance = normal.x * p0.x + normal.y * p0.y + normal.z * p0.z;
|
float distance = perp.x * p0.x + perp.y * p0.y + perp.z * p0.z;
|
||||||
|
|
||||||
// note: the weight is scaled linearly with edge length; this has to match the triangle weight
|
// note: the weight is scaled linearly with edge length; this has to match the triangle weight
|
||||||
quadricFromPlane(Q, normal.x, normal.y, normal.z, -distance, length * weight);
|
quadricFromPlane(Q, perp.x, perp.y, perp.z, -distance, length * weight);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void quadricFromAttributes(Quadric& Q, QuadricGrad* G, const Vector3& p0, const Vector3& p1, const Vector3& p2, const float* va0, const float* va1, const float* va2, size_t attribute_count)
|
static void quadricFromAttributes(Quadric& Q, QuadricGrad* G, const Vector3& p0, const Vector3& p1, const Vector3& p2, const float* va0, const float* va1, const float* va2, size_t attribute_count)
|
||||||
@ -728,16 +717,21 @@ static void quadricFromAttributes(Quadric& Q, QuadricGrad* G, const Vector3& p0,
|
|||||||
Vector3 p10 = {p1.x - p0.x, p1.y - p0.y, p1.z - p0.z};
|
Vector3 p10 = {p1.x - p0.x, p1.y - p0.y, p1.z - p0.z};
|
||||||
Vector3 p20 = {p2.x - p0.x, p2.y - p0.y, p2.z - p0.z};
|
Vector3 p20 = {p2.x - p0.x, p2.y - p0.y, p2.z - p0.z};
|
||||||
|
|
||||||
// weight is scaled linearly with edge length
|
// normal = cross(p1 - p0, p2 - p0)
|
||||||
Vector3 normal = {p10.y * p20.z - p10.z * p20.y, p10.z * p20.x - p10.x * p20.z, p10.x * p20.y - p10.y * p20.x};
|
Vector3 normal = {p10.y * p20.z - p10.z * p20.y, p10.z * p20.x - p10.x * p20.z, p10.x * p20.y - p10.y * p20.x};
|
||||||
float area = sqrtf(normal.x * normal.x + normal.y * normal.y + normal.z * normal.z);
|
float area = sqrtf(normal.x * normal.x + normal.y * normal.y + normal.z * normal.z) * 0.5f;
|
||||||
float w = sqrtf(area); // TODO this needs more experimentation
|
|
||||||
|
// quadric is weighted with the square of edge length (= area)
|
||||||
|
// this equalizes the units with the positional error (which, after normalization, is a square of distance)
|
||||||
|
// as a result, a change in weighted attribute of 1 along distance d is approximately equivalent to a change in position of d
|
||||||
|
float w = area;
|
||||||
|
|
||||||
// we compute gradients using barycentric coordinates; barycentric coordinates can be computed as follows:
|
// we compute gradients using barycentric coordinates; barycentric coordinates can be computed as follows:
|
||||||
// v = (d11 * d20 - d01 * d21) / denom
|
// v = (d11 * d20 - d01 * d21) / denom
|
||||||
// w = (d00 * d21 - d01 * d20) / denom
|
// w = (d00 * d21 - d01 * d20) / denom
|
||||||
// u = 1 - v - w
|
// u = 1 - v - w
|
||||||
// here v0, v1 are triangle edge vectors, v2 is a vector from point to triangle corner, and dij = dot(vi, vj)
|
// here v0, v1 are triangle edge vectors, v2 is a vector from point to triangle corner, and dij = dot(vi, vj)
|
||||||
|
// note: v2 and d20/d21 can not be evaluated here as v2 is effectively an unknown variable; we need these only as variables for derivation of gradients
|
||||||
const Vector3& v0 = p10;
|
const Vector3& v0 = p10;
|
||||||
const Vector3& v1 = p20;
|
const Vector3& v1 = p20;
|
||||||
float d00 = v0.x * v0.x + v0.y * v0.y + v0.z * v0.z;
|
float d00 = v0.x * v0.x + v0.y * v0.y + v0.z * v0.z;
|
||||||
@ -747,7 +741,7 @@ static void quadricFromAttributes(Quadric& Q, QuadricGrad* G, const Vector3& p0,
|
|||||||
float denomr = denom == 0 ? 0.f : 1.f / denom;
|
float denomr = denom == 0 ? 0.f : 1.f / denom;
|
||||||
|
|
||||||
// precompute gradient factors
|
// precompute gradient factors
|
||||||
// these are derived by directly computing derivative of eval(pos) = a0 * u + a1 * v + a2 * w and factoring out common factors that are shared between attributes
|
// these are derived by directly computing derivative of eval(pos) = a0 * u + a1 * v + a2 * w and factoring out expressions that are shared between attributes
|
||||||
float gx1 = (d11 * v0.x - d01 * v1.x) * denomr;
|
float gx1 = (d11 * v0.x - d01 * v1.x) * denomr;
|
||||||
float gx2 = (d00 * v1.x - d01 * v0.x) * denomr;
|
float gx2 = (d00 * v1.x - d01 * v0.x) * denomr;
|
||||||
float gy1 = (d11 * v0.y - d01 * v1.y) * denomr;
|
float gy1 = (d11 * v0.y - d01 * v1.y) * denomr;
|
||||||
@ -772,6 +766,7 @@ static void quadricFromAttributes(Quadric& Q, QuadricGrad* G, const Vector3& p0,
|
|||||||
|
|
||||||
// quadric encodes (eval(pos)-attr)^2; this means that the resulting expansion needs to compute, for example, pos.x * pos.y * K
|
// quadric encodes (eval(pos)-attr)^2; this means that the resulting expansion needs to compute, for example, pos.x * pos.y * K
|
||||||
// since quadrics already encode factors for pos.x * pos.y, we can accumulate almost everything in basic quadric fields
|
// since quadrics already encode factors for pos.x * pos.y, we can accumulate almost everything in basic quadric fields
|
||||||
|
// note: for simplicity we scale all factors by weight here instead of outside the loop
|
||||||
Q.a00 += w * (gx * gx);
|
Q.a00 += w * (gx * gx);
|
||||||
Q.a11 += w * (gy * gy);
|
Q.a11 += w * (gy * gy);
|
||||||
Q.a22 += w * (gz * gz);
|
Q.a22 += w * (gz * gz);
|
||||||
@ -859,7 +854,7 @@ static void fillEdgeQuadrics(Quadric* vertex_quadrics, const unsigned int* indic
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fillAttributeQuadrics(Quadric* attribute_quadrics, QuadricGrad* attribute_gradients, const unsigned int* indices, size_t index_count, const Vector3* vertex_positions, const float* vertex_attributes, size_t attribute_count, const unsigned int* remap)
|
static void fillAttributeQuadrics(Quadric* attribute_quadrics, QuadricGrad* attribute_gradients, const unsigned int* indices, size_t index_count, const Vector3* vertex_positions, const float* vertex_attributes, size_t attribute_count)
|
||||||
{
|
{
|
||||||
for (size_t i = 0; i < index_count; i += 3)
|
for (size_t i = 0; i < index_count; i += 3)
|
||||||
{
|
{
|
||||||
@ -871,14 +866,13 @@ static void fillAttributeQuadrics(Quadric* attribute_quadrics, QuadricGrad* attr
|
|||||||
QuadricGrad G[kMaxAttributes];
|
QuadricGrad G[kMaxAttributes];
|
||||||
quadricFromAttributes(QA, G, vertex_positions[i0], vertex_positions[i1], vertex_positions[i2], &vertex_attributes[i0 * attribute_count], &vertex_attributes[i1 * attribute_count], &vertex_attributes[i2 * attribute_count], attribute_count);
|
quadricFromAttributes(QA, G, vertex_positions[i0], vertex_positions[i1], vertex_positions[i2], &vertex_attributes[i0 * attribute_count], &vertex_attributes[i1 * attribute_count], &vertex_attributes[i2 * attribute_count], attribute_count);
|
||||||
|
|
||||||
// TODO: This blends together attribute weights across attribute discontinuities, which is probably not a great idea
|
quadricAdd(attribute_quadrics[i0], QA);
|
||||||
quadricAdd(attribute_quadrics[remap[i0]], QA);
|
quadricAdd(attribute_quadrics[i1], QA);
|
||||||
quadricAdd(attribute_quadrics[remap[i1]], QA);
|
quadricAdd(attribute_quadrics[i2], QA);
|
||||||
quadricAdd(attribute_quadrics[remap[i2]], QA);
|
|
||||||
|
|
||||||
quadricAdd(&attribute_gradients[remap[i0] * attribute_count], G, attribute_count);
|
quadricAdd(&attribute_gradients[i0 * attribute_count], G, attribute_count);
|
||||||
quadricAdd(&attribute_gradients[remap[i1] * attribute_count], G, attribute_count);
|
quadricAdd(&attribute_gradients[i1 * attribute_count], G, attribute_count);
|
||||||
quadricAdd(&attribute_gradients[remap[i2] * attribute_count], G, attribute_count);
|
quadricAdd(&attribute_gradients[i2 * attribute_count], G, attribute_count);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -923,7 +917,13 @@ static bool hasTriangleFlips(const EdgeAdjacency& adjacency, const Vector3* vert
|
|||||||
|
|
||||||
// early-out when at least one triangle flips due to a collapse
|
// early-out when at least one triangle flips due to a collapse
|
||||||
if (hasTriangleFlip(vertex_positions[a], vertex_positions[b], v0, v1))
|
if (hasTriangleFlip(vertex_positions[a], vertex_positions[b], v0, v1))
|
||||||
|
{
|
||||||
|
#if TRACE >= 2
|
||||||
|
printf("edge block %d -> %d: flip welded %d %d %d\n", i0, i1, a, i0, b);
|
||||||
|
#endif
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
@ -1026,16 +1026,31 @@ static void rankEdgeCollapses(Collapse* collapses, size_t collapse_count, const
|
|||||||
float ei = quadricError(vertex_quadrics[remap[i0]], vertex_positions[i1]);
|
float ei = quadricError(vertex_quadrics[remap[i0]], vertex_positions[i1]);
|
||||||
float ej = quadricError(vertex_quadrics[remap[j0]], vertex_positions[j1]);
|
float ej = quadricError(vertex_quadrics[remap[j0]], vertex_positions[j1]);
|
||||||
|
|
||||||
|
#if TRACE >= 2
|
||||||
|
float di = ei, dj = ej;
|
||||||
|
#endif
|
||||||
|
|
||||||
if (attribute_count)
|
if (attribute_count)
|
||||||
{
|
{
|
||||||
ei += quadricError(attribute_quadrics[remap[i0]], &attribute_gradients[remap[i0] * attribute_count], attribute_count, vertex_positions[i1], &vertex_attributes[i1 * attribute_count]);
|
// note: ideally we would evaluate max/avg of attribute errors for seam edges, but it's not clear if it's worth the extra cost
|
||||||
ej += quadricError(attribute_quadrics[remap[j0]], &attribute_gradients[remap[j0] * attribute_count], attribute_count, vertex_positions[j1], &vertex_attributes[j1 * attribute_count]);
|
ei += quadricError(attribute_quadrics[i0], &attribute_gradients[i0 * attribute_count], attribute_count, vertex_positions[i1], &vertex_attributes[i1 * attribute_count]);
|
||||||
|
ej += quadricError(attribute_quadrics[j0], &attribute_gradients[j0 * attribute_count], attribute_count, vertex_positions[j1], &vertex_attributes[j1 * attribute_count]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// pick edge direction with minimal error
|
// pick edge direction with minimal error
|
||||||
c.v0 = ei <= ej ? i0 : j0;
|
c.v0 = ei <= ej ? i0 : j0;
|
||||||
c.v1 = ei <= ej ? i1 : j1;
|
c.v1 = ei <= ej ? i1 : j1;
|
||||||
c.error = ei <= ej ? ei : ej;
|
c.error = ei <= ej ? ei : ej;
|
||||||
|
|
||||||
|
#if TRACE >= 2
|
||||||
|
if (i0 == j0) // c.bidi has been overwritten
|
||||||
|
printf("edge eval %d -> %d: error %f (pos %f, attr %f)\n", c.v0, c.v1,
|
||||||
|
sqrtf(c.error), sqrtf(ei <= ej ? di : dj), sqrtf(ei <= ej ? ei - di : ej - dj));
|
||||||
|
else
|
||||||
|
printf("edge eval %d -> %d: error %f (pos %f, attr %f); reverse %f (pos %f, attr %f)\n", c.v0, c.v1,
|
||||||
|
sqrtf(ei <= ej ? ei : ej), sqrtf(ei <= ej ? di : dj), sqrtf(ei <= ej ? ei - di : ej - dj),
|
||||||
|
sqrtf(ei <= ej ? ej : ei), sqrtf(ei <= ej ? dj : di), sqrtf(ei <= ej ? ej - dj : ei - di));
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1117,6 +1132,8 @@ static size_t performEdgeCollapses(unsigned int* collapse_remap, unsigned char*
|
|||||||
unsigned int r0 = remap[i0];
|
unsigned int r0 = remap[i0];
|
||||||
unsigned int r1 = remap[i1];
|
unsigned int r1 = remap[i1];
|
||||||
|
|
||||||
|
unsigned char kind = vertex_kind[i0];
|
||||||
|
|
||||||
// we don't collapse vertices that had source or target vertex involved in a collapse
|
// we don't collapse vertices that had source or target vertex involved in a collapse
|
||||||
// it's important to not move the vertices twice since it complicates the tracking/remapping logic
|
// it's important to not move the vertices twice since it complicates the tracking/remapping logic
|
||||||
// it's important to not move other vertices towards a moved vertex to preserve error since we don't re-rank collapses mid-pass
|
// it's important to not move other vertices towards a moved vertex to preserve error since we don't re-rank collapses mid-pass
|
||||||
@ -1135,6 +1152,10 @@ static size_t performEdgeCollapses(unsigned int* collapse_remap, unsigned char*
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if TRACE >= 2
|
||||||
|
printf("edge commit %d -> %d: kind %d->%d, error %f\n", i0, i1, vertex_kind[i0], vertex_kind[i1], sqrtf(c.error));
|
||||||
|
#endif
|
||||||
|
|
||||||
assert(collapse_remap[r0] == r0);
|
assert(collapse_remap[r0] == r0);
|
||||||
assert(collapse_remap[r1] == r1);
|
assert(collapse_remap[r1] == r1);
|
||||||
|
|
||||||
@ -1142,26 +1163,35 @@ static size_t performEdgeCollapses(unsigned int* collapse_remap, unsigned char*
|
|||||||
|
|
||||||
if (attribute_count)
|
if (attribute_count)
|
||||||
{
|
{
|
||||||
quadricAdd(attribute_quadrics[r1], attribute_quadrics[r0]);
|
quadricAdd(attribute_quadrics[i1], attribute_quadrics[i0]);
|
||||||
quadricAdd(&attribute_gradients[r1 * attribute_count], &attribute_gradients[r0 * attribute_count], attribute_count);
|
quadricAdd(&attribute_gradients[i1 * attribute_count], &attribute_gradients[i0 * attribute_count], attribute_count);
|
||||||
|
|
||||||
|
// note: this is intentionally missing handling for Kind_Complex; we assume that complex vertices have similar attribute values so just using the primary vertex is fine
|
||||||
|
if (kind == Kind_Seam)
|
||||||
|
{
|
||||||
|
// seam collapses involve two edges so we need to update attribute quadrics for both target vertices; position quadrics are shared
|
||||||
|
unsigned int s0 = wedge[i0], s1 = wedge[i1];
|
||||||
|
|
||||||
|
quadricAdd(attribute_quadrics[s1], attribute_quadrics[s0]);
|
||||||
|
quadricAdd(&attribute_gradients[s1 * attribute_count], &attribute_gradients[s0 * attribute_count], attribute_count);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vertex_kind[i0] == Kind_Complex)
|
if (kind == Kind_Complex)
|
||||||
{
|
{
|
||||||
|
// remap all vertices in the complex to the target vertex
|
||||||
unsigned int v = i0;
|
unsigned int v = i0;
|
||||||
|
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
collapse_remap[v] = r1;
|
collapse_remap[v] = i1;
|
||||||
v = wedge[v];
|
v = wedge[v];
|
||||||
} while (v != i0);
|
} while (v != i0);
|
||||||
}
|
}
|
||||||
else if (vertex_kind[i0] == Kind_Seam)
|
else if (kind == Kind_Seam)
|
||||||
{
|
{
|
||||||
// remap v0 to v1 and seam pair of v0 to seam pair of v1
|
// remap v0 to v1 and seam pair of v0 to seam pair of v1
|
||||||
unsigned int s0 = wedge[i0];
|
unsigned int s0 = wedge[i0], s1 = wedge[i1];
|
||||||
unsigned int s1 = wedge[i1];
|
|
||||||
|
|
||||||
assert(s0 != i0 && s1 != i1);
|
assert(s0 != i0 && s1 != i1);
|
||||||
assert(wedge[s0] == i0 && wedge[s1] == i1);
|
assert(wedge[s0] == i0 && wedge[s1] == i1);
|
||||||
|
|
||||||
@ -1179,7 +1209,7 @@ static size_t performEdgeCollapses(unsigned int* collapse_remap, unsigned char*
|
|||||||
collapse_locked[r1] = 1;
|
collapse_locked[r1] = 1;
|
||||||
|
|
||||||
// border edges collapse 1 triangle, other edges collapse 2 or more
|
// border edges collapse 1 triangle, other edges collapse 2 or more
|
||||||
triangle_collapses += (vertex_kind[i0] == Kind_Border) ? 1 : 2;
|
triangle_collapses += (kind == Kind_Border) ? 1 : 2;
|
||||||
edge_collapses++;
|
edge_collapses++;
|
||||||
|
|
||||||
result_error = result_error < c.error ? c.error : result_error;
|
result_error = result_error < c.error ? c.error : result_error;
|
||||||
@ -1546,12 +1576,11 @@ static float interpolate(float y, float x0, float y0, float x1, float y1, float
|
|||||||
|
|
||||||
} // namespace meshopt
|
} // namespace meshopt
|
||||||
|
|
||||||
#ifndef NDEBUG
|
// Note: this is only exposed for debug visualization purposes; do *not* use
|
||||||
// Note: this is only exposed for debug visualization purposes; do *not* use these in debug builds
|
enum
|
||||||
MESHOPTIMIZER_API unsigned char* meshopt_simplifyDebugKind = NULL;
|
{
|
||||||
MESHOPTIMIZER_API unsigned int* meshopt_simplifyDebugLoop = NULL;
|
meshopt_SimplifyInternalDebug = 1 << 30
|
||||||
MESHOPTIMIZER_API unsigned int* meshopt_simplifyDebugLoopBack = NULL;
|
};
|
||||||
#endif
|
|
||||||
|
|
||||||
size_t meshopt_simplifyEdge(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride, const float* vertex_attributes_data, size_t vertex_attributes_stride, const float* attribute_weights, size_t attribute_count, const unsigned char* vertex_lock, size_t target_index_count, float target_error, unsigned int options, float* out_result_error)
|
size_t meshopt_simplifyEdge(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride, const float* vertex_attributes_data, size_t vertex_attributes_stride, const float* attribute_weights, size_t attribute_count, const unsigned char* vertex_lock, size_t target_index_count, float target_error, unsigned int options, float* out_result_error)
|
||||||
{
|
{
|
||||||
@ -1561,10 +1590,13 @@ size_t meshopt_simplifyEdge(unsigned int* destination, const unsigned int* indic
|
|||||||
assert(vertex_positions_stride >= 12 && vertex_positions_stride <= 256);
|
assert(vertex_positions_stride >= 12 && vertex_positions_stride <= 256);
|
||||||
assert(vertex_positions_stride % sizeof(float) == 0);
|
assert(vertex_positions_stride % sizeof(float) == 0);
|
||||||
assert(target_index_count <= index_count);
|
assert(target_index_count <= index_count);
|
||||||
assert((options & ~(meshopt_SimplifyLockBorder | meshopt_SimplifySparse | meshopt_SimplifyErrorAbsolute)) == 0);
|
assert(target_error >= 0);
|
||||||
|
assert((options & ~(meshopt_SimplifyLockBorder | meshopt_SimplifySparse | meshopt_SimplifyErrorAbsolute | meshopt_SimplifyInternalDebug)) == 0);
|
||||||
assert(vertex_attributes_stride >= attribute_count * sizeof(float) && vertex_attributes_stride <= 256);
|
assert(vertex_attributes_stride >= attribute_count * sizeof(float) && vertex_attributes_stride <= 256);
|
||||||
assert(vertex_attributes_stride % sizeof(float) == 0);
|
assert(vertex_attributes_stride % sizeof(float) == 0);
|
||||||
assert(attribute_count <= kMaxAttributes);
|
assert(attribute_count <= kMaxAttributes);
|
||||||
|
for (size_t i = 0; i < attribute_count; ++i)
|
||||||
|
assert(attribute_weights[i] >= 0);
|
||||||
|
|
||||||
meshopt_Allocator allocator;
|
meshopt_Allocator allocator;
|
||||||
|
|
||||||
@ -1616,8 +1648,17 @@ size_t meshopt_simplifyEdge(unsigned int* destination, const unsigned int* indic
|
|||||||
|
|
||||||
if (attribute_count)
|
if (attribute_count)
|
||||||
{
|
{
|
||||||
|
unsigned int attribute_remap[kMaxAttributes];
|
||||||
|
|
||||||
|
// remap attributes to only include ones with weight > 0 to minimize memory/compute overhead for quadrics
|
||||||
|
size_t attributes_used = 0;
|
||||||
|
for (size_t i = 0; i < attribute_count; ++i)
|
||||||
|
if (attribute_weights[i] > 0)
|
||||||
|
attribute_remap[attributes_used++] = unsigned(i);
|
||||||
|
|
||||||
|
attribute_count = attributes_used;
|
||||||
vertex_attributes = allocator.allocate<float>(vertex_count * attribute_count);
|
vertex_attributes = allocator.allocate<float>(vertex_count * attribute_count);
|
||||||
rescaleAttributes(vertex_attributes, vertex_attributes_data, vertex_count, vertex_attributes_stride, attribute_weights, attribute_count, sparse_remap);
|
rescaleAttributes(vertex_attributes, vertex_attributes_data, vertex_count, vertex_attributes_stride, attribute_weights, attribute_count, attribute_remap, sparse_remap);
|
||||||
}
|
}
|
||||||
|
|
||||||
Quadric* vertex_quadrics = allocator.allocate<Quadric>(vertex_count);
|
Quadric* vertex_quadrics = allocator.allocate<Quadric>(vertex_count);
|
||||||
@ -1639,7 +1680,7 @@ size_t meshopt_simplifyEdge(unsigned int* destination, const unsigned int* indic
|
|||||||
fillEdgeQuadrics(vertex_quadrics, result, index_count, vertex_positions, remap, vertex_kind, loop, loopback);
|
fillEdgeQuadrics(vertex_quadrics, result, index_count, vertex_positions, remap, vertex_kind, loop, loopback);
|
||||||
|
|
||||||
if (attribute_count)
|
if (attribute_count)
|
||||||
fillAttributeQuadrics(attribute_quadrics, attribute_gradients, result, index_count, vertex_positions, vertex_attributes, attribute_count, remap);
|
fillAttributeQuadrics(attribute_quadrics, attribute_gradients, result, index_count, vertex_positions, vertex_attributes, attribute_count);
|
||||||
|
|
||||||
#if TRACE
|
#if TRACE
|
||||||
size_t pass_count = 0;
|
size_t pass_count = 0;
|
||||||
@ -1671,6 +1712,10 @@ size_t meshopt_simplifyEdge(unsigned int* destination, const unsigned int* indic
|
|||||||
if (edge_collapse_count == 0)
|
if (edge_collapse_count == 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
#if TRACE
|
||||||
|
printf("pass %d:%c", int(pass_count++), TRACE >= 2 ? '\n' : ' ');
|
||||||
|
#endif
|
||||||
|
|
||||||
rankEdgeCollapses(edge_collapses, edge_collapse_count, vertex_positions, vertex_attributes, vertex_quadrics, attribute_quadrics, attribute_gradients, attribute_count, remap);
|
rankEdgeCollapses(edge_collapses, edge_collapse_count, vertex_positions, vertex_attributes, vertex_quadrics, attribute_quadrics, attribute_gradients, attribute_count, remap);
|
||||||
|
|
||||||
sortEdgeCollapses(collapse_order, edge_collapses, edge_collapse_count);
|
sortEdgeCollapses(collapse_order, edge_collapses, edge_collapse_count);
|
||||||
@ -1682,10 +1727,6 @@ size_t meshopt_simplifyEdge(unsigned int* destination, const unsigned int* indic
|
|||||||
|
|
||||||
memset(collapse_locked, 0, vertex_count);
|
memset(collapse_locked, 0, vertex_count);
|
||||||
|
|
||||||
#if TRACE
|
|
||||||
printf("pass %d: ", int(pass_count++));
|
|
||||||
#endif
|
|
||||||
|
|
||||||
size_t collapses = performEdgeCollapses(collapse_remap, collapse_locked, vertex_quadrics, attribute_quadrics, attribute_gradients, attribute_count, edge_collapses, edge_collapse_count, collapse_order, remap, wedge, vertex_kind, vertex_positions, adjacency, triangle_collapse_goal, error_limit, result_error);
|
size_t collapses = performEdgeCollapses(collapse_remap, collapse_locked, vertex_quadrics, attribute_quadrics, attribute_gradients, attribute_count, edge_collapses, edge_collapse_count, collapse_order, remap, wedge, vertex_kind, vertex_positions, adjacency, triangle_collapse_goal, error_limit, result_error);
|
||||||
|
|
||||||
// no edges can be collapsed any more due to hitting the error limit or triangle collapse limit
|
// no edges can be collapsed any more due to hitting the error limit or triangle collapse limit
|
||||||
@ -1705,16 +1746,20 @@ size_t meshopt_simplifyEdge(unsigned int* destination, const unsigned int* indic
|
|||||||
printf("result: %d triangles, error: %e; total %d passes\n", int(result_count / 3), sqrtf(result_error), int(pass_count));
|
printf("result: %d triangles, error: %e; total %d passes\n", int(result_count / 3), sqrtf(result_error), int(pass_count));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef NDEBUG
|
// if debug visualization data is requested, fill it instead of index data; for simplicity, this doesn't work with sparsity
|
||||||
if (meshopt_simplifyDebugKind)
|
if ((options & meshopt_SimplifyInternalDebug) && !sparse_remap)
|
||||||
memcpy(meshopt_simplifyDebugKind, vertex_kind, vertex_count);
|
{
|
||||||
|
assert(Kind_Count <= 8 && vertex_count < (1 << 28)); // 3 bit kind, 1 bit loop
|
||||||
|
|
||||||
if (meshopt_simplifyDebugLoop)
|
for (size_t i = 0; i < result_count; i += 3)
|
||||||
memcpy(meshopt_simplifyDebugLoop, loop, vertex_count * sizeof(unsigned int));
|
{
|
||||||
|
unsigned int a = result[i + 0], b = result[i + 1], c = result[i + 2];
|
||||||
|
|
||||||
if (meshopt_simplifyDebugLoopBack)
|
result[i + 0] |= (vertex_kind[a] << 28) | (unsigned(loop[a] == b || loopback[b] == a) << 31);
|
||||||
memcpy(meshopt_simplifyDebugLoopBack, loopback, vertex_count * sizeof(unsigned int));
|
result[i + 1] |= (vertex_kind[b] << 28) | (unsigned(loop[b] == c || loopback[c] == b) << 31);
|
||||||
#endif
|
result[i + 2] |= (vertex_kind[c] << 28) | (unsigned(loop[c] == a || loopback[a] == c) << 31);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// convert resulting indices back into the dense space of the larger mesh
|
// convert resulting indices back into the dense space of the larger mesh
|
||||||
if (sparse_remap)
|
if (sparse_remap)
|
||||||
|
11
3rdparty/meshoptimizer/src/stripifier.cpp
vendored
11
3rdparty/meshoptimizer/src/stripifier.cpp
vendored
@ -10,14 +10,14 @@
|
|||||||
namespace meshopt
|
namespace meshopt
|
||||||
{
|
{
|
||||||
|
|
||||||
static unsigned int findStripFirst(const unsigned int buffer[][3], unsigned int buffer_size, const unsigned int* valence)
|
static unsigned int findStripFirst(const unsigned int buffer[][3], unsigned int buffer_size, const unsigned char* valence)
|
||||||
{
|
{
|
||||||
unsigned int index = 0;
|
unsigned int index = 0;
|
||||||
unsigned int iv = ~0u;
|
unsigned int iv = ~0u;
|
||||||
|
|
||||||
for (size_t i = 0; i < buffer_size; ++i)
|
for (size_t i = 0; i < buffer_size; ++i)
|
||||||
{
|
{
|
||||||
unsigned int va = valence[buffer[i][0]], vb = valence[buffer[i][1]], vc = valence[buffer[i][2]];
|
unsigned char va = valence[buffer[i][0]], vb = valence[buffer[i][1]], vc = valence[buffer[i][2]];
|
||||||
unsigned int v = (va < vb && va < vc) ? va : (vb < vc ? vb : vc);
|
unsigned int v = (va < vb && va < vc) ? va : (vb < vc ? vb : vc);
|
||||||
|
|
||||||
if (v < iv)
|
if (v < iv)
|
||||||
@ -71,8 +71,9 @@ size_t meshopt_stripify(unsigned int* destination, const unsigned int* indices,
|
|||||||
size_t strip_size = 0;
|
size_t strip_size = 0;
|
||||||
|
|
||||||
// compute vertex valence; this is used to prioritize starting triangle for strips
|
// compute vertex valence; this is used to prioritize starting triangle for strips
|
||||||
unsigned int* valence = allocator.allocate<unsigned int>(vertex_count);
|
// note: we use 8-bit counters for performance; for outlier vertices the valence is incorrect but that just affects the heuristic
|
||||||
memset(valence, 0, vertex_count * sizeof(unsigned int));
|
unsigned char* valence = allocator.allocate<unsigned char>(vertex_count);
|
||||||
|
memset(valence, 0, vertex_count);
|
||||||
|
|
||||||
for (size_t i = 0; i < index_count; ++i)
|
for (size_t i = 0; i < index_count; ++i)
|
||||||
{
|
{
|
||||||
@ -151,7 +152,7 @@ size_t meshopt_stripify(unsigned int* destination, const unsigned int* indices,
|
|||||||
{
|
{
|
||||||
// if we didn't find anything, we need to find the next new triangle
|
// if we didn't find anything, we need to find the next new triangle
|
||||||
// we use a heuristic to maximize the strip length
|
// we use a heuristic to maximize the strip length
|
||||||
unsigned int i = findStripFirst(buffer, buffer_size, &valence[0]);
|
unsigned int i = findStripFirst(buffer, buffer_size, valence);
|
||||||
unsigned int a = buffer[i][0], b = buffer[i][1], c = buffer[i][2];
|
unsigned int a = buffer[i][0], b = buffer[i][1], c = buffer[i][2];
|
||||||
|
|
||||||
// ordered removal from the buffer
|
// ordered removal from the buffer
|
||||||
|
2
3rdparty/meshoptimizer/src/vertexcodec.cpp
vendored
2
3rdparty/meshoptimizer/src/vertexcodec.cpp
vendored
@ -383,6 +383,7 @@ static const unsigned char* decodeVertexBlock(const unsigned char* data, const u
|
|||||||
unsigned char transposed[kVertexBlockSizeBytes];
|
unsigned char transposed[kVertexBlockSizeBytes];
|
||||||
|
|
||||||
size_t vertex_count_aligned = (vertex_count + kByteGroupSize - 1) & ~(kByteGroupSize - 1);
|
size_t vertex_count_aligned = (vertex_count + kByteGroupSize - 1) & ~(kByteGroupSize - 1);
|
||||||
|
assert(vertex_count <= vertex_count_aligned);
|
||||||
|
|
||||||
for (size_t k = 0; k < vertex_size; ++k)
|
for (size_t k = 0; k < vertex_size; ++k)
|
||||||
{
|
{
|
||||||
@ -1246,3 +1247,4 @@ int meshopt_decodeVertexBuffer(void* destination, size_t vertex_count, size_t ve
|
|||||||
#undef SIMD_WASM
|
#undef SIMD_WASM
|
||||||
#undef SIMD_FALLBACK
|
#undef SIMD_FALLBACK
|
||||||
#undef SIMD_TARGET
|
#undef SIMD_TARGET
|
||||||
|
#undef SIMD_LATENCYOPT
|
||||||
|
6
3rdparty/meshoptimizer/src/vertexfilter.cpp
vendored
6
3rdparty/meshoptimizer/src/vertexfilter.cpp
vendored
@ -1010,6 +1010,11 @@ void meshopt_encodeFilterExp(void* destination_, size_t count, size_t stride, in
|
|||||||
component_exp[j] = (min_exp < e) ? e : min_exp;
|
component_exp[j] = (min_exp < e) ? e : min_exp;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// the code below assumes component_exp is initialized outside of the loop
|
||||||
|
assert(mode == meshopt_EncodeExpSharedComponent);
|
||||||
|
}
|
||||||
|
|
||||||
for (size_t j = 0; j < stride_float; ++j)
|
for (size_t j = 0; j < stride_float; ++j)
|
||||||
{
|
{
|
||||||
@ -1020,7 +1025,6 @@ void meshopt_encodeFilterExp(void* destination_, size_t count, size_t stride, in
|
|||||||
|
|
||||||
// compute renormalized rounded mantissa for each component
|
// compute renormalized rounded mantissa for each component
|
||||||
int mmask = (1 << 24) - 1;
|
int mmask = (1 << 24) - 1;
|
||||||
|
|
||||||
int m = int(v[j] * optexp2(-exp) + (v[j] >= 0 ? 0.5f : -0.5f));
|
int m = int(v[j] * optexp2(-exp) + (v[j] >= 0 ? 0.5f : -0.5f));
|
||||||
|
|
||||||
d[j] = (m & mmask) | (unsigned(exp) << 24);
|
d[j] = (m & mmask) | (unsigned(exp) << 24);
|
||||||
|
Loading…
Reference in New Issue
Block a user