Fixes #1671 and Fixes #1139. Flush the nanovg draw commands whenever the next draw would overflow the uint16_t index type. (#3207)

This commit is contained in:
Martijn Courteaux 2023-11-25 18:23:05 +01:00 committed by GitHub
parent 2d17b36a61
commit 6dea6a22b6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -556,8 +556,12 @@ namespace
static void fan(uint32_t _start, uint32_t _count)
{
uint32_t numTris = _count-2;
BX_ASSERT(_count >= 3, "less than one triangle");
BX_ASSERT(_start + ((numTris - 1) * 3) + 2 <= UINT16_MAX, "index overflow");
bgfx::TransientIndexBuffer tib;
bgfx::allocTransientIndexBuffer(&tib, numTris*3);
BX_ASSERT(tib.size == numTris*3*(tib.isIndex16 ? 2 : 4), "did not get enough room for indices");
uint16_t* data = (uint16_t*)tib.data;
for (uint32_t ii = 0; ii < numTris; ++ii)
{
@ -823,6 +827,7 @@ _cleanup:
return count;
}
static int glnvg__mini(int a, int b) { return a < b ? a : b; }
static int glnvg__maxi(int a, int b) { return a > b ? a : b; }
static struct GLNVGcall* glnvg__allocCall(struct GLNVGcontext* gl)
@ -856,11 +861,15 @@ _cleanup:
static int glnvg__allocVerts(GLNVGcontext* gl, int n)
{
// Before calling this function, make sure that glnvg__flushIfNeeded()
// is called, before allocating the NVGCall.
int ret = 0;
BX_ASSERT(gl->nverts + n <= UINT16_MAX, "index overflow is imminent, please flush.");
if (gl->nverts+n > gl->cverts)
{
NVGvertex* verts;
int cverts = glnvg__maxi(gl->nverts + n, 4096) + gl->cverts/2; // 1.5x Overallocate
cverts = glnvg__mini(cverts, UINT16_MAX);
verts = (NVGvertex*)bx::realloc(gl->allocator, gl->verts, sizeof(NVGvertex) * cverts);
if (verts == NULL) return -1;
gl->verts = verts;
@ -892,6 +901,12 @@ _cleanup:
vtx->v = v;
}
static void glnvg__flushIfNeeded(struct GLNVGcontext *gl, int nverts) {
if (gl->nverts + nverts > UINT16_MAX) {
nvgRenderFlush(gl);
}
}
static void nvgRenderFill(
void* _userPtr
, NVGpaint* paint
@ -904,11 +919,13 @@ _cleanup:
)
{
struct GLNVGcontext* gl = (struct GLNVGcontext*)_userPtr;
int maxverts = glnvg__maxVertCount(paths, npaths) + 6;
glnvg__flushIfNeeded(gl, maxverts);
struct GLNVGcall* call = glnvg__allocCall(gl);
struct NVGvertex* quad;
struct GLNVGfragUniforms* frag;
int i, maxverts, offset;
int i, offset;
call->type = GLNVG_FILL;
call->pathOffset = glnvg__allocPaths(gl, npaths);
@ -922,7 +939,6 @@ _cleanup:
}
// Allocate vertices for all the paths.
maxverts = glnvg__maxVertCount(paths, npaths) + 6;
offset = glnvg__allocVerts(gl, maxverts);
for (i = 0; i < npaths; i++)
@ -990,9 +1006,11 @@ _cleanup:
)
{
struct GLNVGcontext* gl = (struct GLNVGcontext*)_userPtr;
int maxverts = glnvg__maxVertCount(paths, npaths);
glnvg__flushIfNeeded(gl, maxverts);
struct GLNVGcall* call = glnvg__allocCall(gl);
int i, maxverts, offset;
int i, offset;
call->type = GLNVG_STROKE;
call->pathOffset = glnvg__allocPaths(gl, npaths);
@ -1001,7 +1019,6 @@ _cleanup:
call->blendFunc = glnvg__blendCompositeOperation(compositeOperation);
// Allocate vertices for all the paths.
maxverts = glnvg__maxVertCount(paths, npaths);
offset = glnvg__allocVerts(gl, maxverts);
for (i = 0; i < npaths; i++)
@ -1009,6 +1026,7 @@ _cleanup:
struct GLNVGpath* copy = &gl->paths[call->pathOffset + i];
const struct NVGpath* path = &paths[i];
bx::memSet(copy, 0, sizeof(struct GLNVGpath) );
BX_ASSERT(path->nfill == 0, "strokes should not have any fill");
if (path->nstroke)
{
copy->strokeOffset = offset;
@ -1027,6 +1045,8 @@ _cleanup:
const struct NVGvertex* verts, int nverts)
{
struct GLNVGcontext* gl = (struct GLNVGcontext*)_userPtr;
glnvg__flushIfNeeded(gl, nverts);
struct GLNVGcall* call = glnvg__allocCall(gl);
struct GLNVGfragUniforms* frag;