Tables have a 'lastfree' information only when needed

Only tables with some minimum number of entries in their hash part
have a 'lastfree' field, kept in a header before the node vector.
This commit is contained in:
Roberto Ierusalimschy 2022-11-01 15:42:08 -03:00
parent ee645472eb
commit 8047b2d03e
7 changed files with 84 additions and 24 deletions

2
lmem.h
View File

@ -63,6 +63,8 @@
#define luaM_newobject(L,tag,s) luaM_malloc_(L, (s), tag) #define luaM_newobject(L,tag,s) luaM_malloc_(L, (s), tag)
#define luaM_newblock(L, size) luaM_newvector(L, size, char)
#define luaM_growvector(L,v,nelems,size,t,limit,e) \ #define luaM_growvector(L,v,nelems,size,t,limit,e) \
((v)=cast(t *, luaM_growaux_(L,v,nelems,&(size),sizeof(t), \ ((v)=cast(t *, luaM_growaux_(L,v,nelems,&(size),sizeof(t), \
luaM_limitN(limit,t),e))) luaM_limitN(limit,t),e)))

View File

@ -744,7 +744,6 @@ typedef struct Table {
unsigned int alimit; /* "limit" of 'array' array */ unsigned int alimit; /* "limit" of 'array' array */
TValue *array; /* array part */ TValue *array; /* array part */
Node *node; Node *node;
Node *lastfree; /* any free position is before this position */
struct Table *metatable; struct Table *metatable;
GCObject *gclist; GCObject *gclist;
} Table; } Table;

View File

@ -39,6 +39,27 @@
#include "lvm.h" #include "lvm.h"
/*
** Only tables with hash parts larget than LIMFORLAST has a 'lastfree'
** field that optimizes finding a free slot. Smaller tables do a
** complete search when looking for a free slot.
*/
#define LLIMFORLAST 2 /* log2 of LIMTFORLAST */
#define LIMFORLAST twoto(LLIMFORLAST)
/*
** Union to store an int field ensuring that what follows it in
** memory is properly aligned to store a TValue.
*/
typedef union {
int lastfree;
char padding[offsetof(struct { int i; TValue v; }, v)];
} Limbox;
#define haslastfree(t) ((t)->lsizenode > LLIMFORLAST)
#define getlastfree(t) (&((cast(Limbox *, (t)->node) - 1)->lastfree))
/* /*
** MAXABITS is the largest integer such that MAXASIZE fits in an ** MAXABITS is the largest integer such that MAXASIZE fits in an
** unsigned int. ** unsigned int.
@ -367,8 +388,15 @@ int luaH_next (lua_State *L, Table *t, StkId key) {
static void freehash (lua_State *L, Table *t) { static void freehash (lua_State *L, Table *t) {
if (!isdummy(t)) if (!isdummy(t)) {
luaM_freearray(L, t->node, cast_sizet(sizenode(t))); size_t bsize = sizenode(t) * sizeof(Node); /* 'node' size in bytes */
char *arr = cast_charp(t->node);
if (haslastfree(t)) {
bsize += sizeof(Limbox);
arr -= sizeof(Limbox);
}
luaM_freearray(L, arr, bsize);
}
} }
@ -479,7 +507,7 @@ static void setnodevector (lua_State *L, Table *t, unsigned int size) {
if (size == 0) { /* no elements to hash part? */ if (size == 0) { /* no elements to hash part? */
t->node = cast(Node *, dummynode); /* use common 'dummynode' */ t->node = cast(Node *, dummynode); /* use common 'dummynode' */
t->lsizenode = 0; t->lsizenode = 0;
t->lastfree = NULL; /* signal that it is using dummy node */ setdummy(t); /* signal that it is using dummy node */
} }
else { else {
int i; int i;
@ -487,15 +515,22 @@ static void setnodevector (lua_State *L, Table *t, unsigned int size) {
if (lsize > MAXHBITS || (1u << lsize) > MAXHSIZE) if (lsize > MAXHBITS || (1u << lsize) > MAXHSIZE)
luaG_runerror(L, "table overflow"); luaG_runerror(L, "table overflow");
size = twoto(lsize); size = twoto(lsize);
if (lsize <= LLIMFORLAST) /* no 'lastfree' field? */
t->node = luaM_newvector(L, size, Node); t->node = luaM_newvector(L, size, Node);
else {
size_t bsize = size * sizeof(Node) + sizeof(Limbox);
char *node = luaM_newblock(L, bsize);
t->node = cast(Node *, node + sizeof(Limbox));
*getlastfree(t) = size; /* all positions are free */
}
t->lsizenode = cast_byte(lsize);
setnodummy(t);
for (i = 0; i < cast_int(size); i++) { for (i = 0; i < cast_int(size); i++) {
Node *n = gnode(t, i); Node *n = gnode(t, i);
gnext(n) = 0; gnext(n) = 0;
setnilkey(n); setnilkey(n);
setempty(gval(n)); setempty(gval(n));
} }
t->lsizenode = cast_byte(lsize);
t->lastfree = gnode(t, size); /* all positions are free */
} }
} }
@ -520,18 +555,21 @@ static void reinsert (lua_State *L, Table *ot, Table *t) {
/* /*
** Exchange the hash part of 't1' and 't2'. ** Exchange the hash part of 't1' and 't2'. (In 'flags', only the
** dummy bit must be exchanged: The 'isrealasize' is not related
** to the hash part, and the metamethod bits do not change during
** a resize, so the "real" table can keep their values.)
*/ */
static void exchangehashpart (Table *t1, Table *t2) { static void exchangehashpart (Table *t1, Table *t2) {
lu_byte lsizenode = t1->lsizenode; lu_byte lsizenode = t1->lsizenode;
Node *node = t1->node; Node *node = t1->node;
Node *lastfree = t1->lastfree; int bitdummy1 = t1->flags & BITDUMMY;
t1->lsizenode = t2->lsizenode; t1->lsizenode = t2->lsizenode;
t1->node = t2->node; t1->node = t2->node;
t1->lastfree = t2->lastfree; t1->flags = (t1->flags & NOTBITDUMMY) | (t2->flags & BITDUMMY);
t2->lsizenode = lsizenode; t2->lsizenode = lsizenode;
t2->node = node; t2->node = node;
t2->lastfree = lastfree; t2->flags = (t2->flags & NOTBITDUMMY) | bitdummy1;
} }
@ -555,6 +593,7 @@ void luaH_resize (lua_State *L, Table *t, unsigned int newasize,
unsigned int oldasize = setlimittosize(t); unsigned int oldasize = setlimittosize(t);
TValue *newarray; TValue *newarray;
/* create new hash part with appropriate size into 'newt' */ /* create new hash part with appropriate size into 'newt' */
newt.flags = 0;
setnodevector(L, &newt, nhsize); setnodevector(L, &newt, nhsize);
if (newasize < oldasize) { /* will array shrink? */ if (newasize < oldasize) { /* will array shrink? */
t->alimit = newasize; /* pretend array has new size... */ t->alimit = newasize; /* pretend array has new size... */
@ -641,11 +680,22 @@ void luaH_free (lua_State *L, Table *t) {
static Node *getfreepos (Table *t) { static Node *getfreepos (Table *t) {
if (haslastfree(t)) { /* does it have 'lastfree' information? */
/* look for a spot before 'lastfree', updating 'lastfree' */
while (*getlastfree(t) > 0) {
Node *free = gnode(t, --(*getlastfree(t)));
if (keyisnil(free))
return free;
}
}
else { /* no 'lastfree' information */
if (!isdummy(t)) { if (!isdummy(t)) {
while (t->lastfree > t->node) { int i = sizenode(t);
t->lastfree--; while (i--) { /* do a linear search */
if (keyisnil(t->lastfree)) Node *free = gnode(t, i);
return t->lastfree; if (keyisnil(free))
return free;
}
} }
} }
return NULL; /* could not find a free place */ return NULL; /* could not find a free place */

View File

@ -23,8 +23,18 @@
#define invalidateTMcache(t) ((t)->flags &= ~maskflags) #define invalidateTMcache(t) ((t)->flags &= ~maskflags)
/* true when 't' is using 'dummynode' as its hash part */ /*
#define isdummy(t) ((t)->lastfree == NULL) ** Bit BITDUMMY set in 'flags' means the table is using the dummy node
** for its hash part.
*/
#define BITDUMMY (1 << 6)
#define NOTBITDUMMY cast_byte(~BITDUMMY)
#define isdummy(t) ((t)->flags & BITDUMMY)
#define setnodummy(t) ((t)->flags &= NOTBITDUMMY)
#define setdummy(t) ((t)->flags |= BITDUMMY)
/* allocated size for hash nodes */ /* allocated size for hash nodes */

View File

@ -999,9 +999,8 @@ static int table_query (lua_State *L) {
if (i == -1) { if (i == -1) {
lua_pushinteger(L, asize); lua_pushinteger(L, asize);
lua_pushinteger(L, allocsizenode(t)); lua_pushinteger(L, allocsizenode(t));
lua_pushinteger(L, isdummy(t) ? 0 : t->lastfree - t->node);
lua_pushinteger(L, t->alimit); lua_pushinteger(L, t->alimit);
return 4; return 3;
} }
else if ((unsigned int)i < asize) { else if ((unsigned int)i < asize) {
lua_pushinteger(L, i); lua_pushinteger(L, i);

4
ltm.h
View File

@ -48,8 +48,8 @@ typedef enum {
/* /*
** Mask with 1 in all fast-access methods. A 1 in any of these bits ** Mask with 1 in all fast-access methods. A 1 in any of these bits
** in the flag of a (meta)table means the metatable does not have the ** in the flag of a (meta)table means the metatable does not have the
** corresponding metamethod field. (Bit 7 of the flag is used for ** corresponding metamethod field. (Bit 6 of the flag indicates that
** 'isrealasize'.) ** the table is using the dummy node; bit 7 is used for 'isrealasize'.)
*/ */
#define maskflags (~(~0u << (TM_EQ + 1))) #define maskflags (~(~0u << (TM_EQ + 1)))

View File

@ -210,9 +210,9 @@ assert(T.querytab(a) == 64) -- array part has 64 elements
a[32] = true; a[48] = true; -- binary search will find these ones a[32] = true; a[48] = true; -- binary search will find these ones
a[51] = true -- binary search will miss this one a[51] = true -- binary search will miss this one
assert(#a == 48) -- this will set the limit assert(#a == 48) -- this will set the limit
assert(select(4, T.querytab(a)) == 48) -- this is the limit now assert(select(3, T.querytab(a)) == 48) -- this is the limit now
a[50] = true -- this will set a new limit a[50] = true -- this will set a new limit
assert(select(4, T.querytab(a)) == 50) -- this is the limit now assert(select(3, T.querytab(a)) == 50) -- this is the limit now
-- but the size is larger (and still inside the array part) -- but the size is larger (and still inside the array part)
assert(#a == 51) assert(#a == 51)