diff --git a/lmem.h b/lmem.h index 8c75a44b..c5dada9c 100644 --- a/lmem.h +++ b/lmem.h @@ -63,6 +63,8 @@ #define luaM_newobject(L,tag,s) luaM_malloc_(L, (s), tag) +#define luaM_newblock(L, size) luaM_newvector(L, size, char) + #define luaM_growvector(L,v,nelems,size,t,limit,e) \ ((v)=cast(t *, luaM_growaux_(L,v,nelems,&(size),sizeof(t), \ luaM_limitN(limit,t),e))) diff --git a/lobject.h b/lobject.h index e7f58cbd..74a6fd1e 100644 --- a/lobject.h +++ b/lobject.h @@ -744,7 +744,6 @@ typedef struct Table { unsigned int alimit; /* "limit" of 'array' array */ TValue *array; /* array part */ Node *node; - Node *lastfree; /* any free position is before this position */ struct Table *metatable; GCObject *gclist; } Table; diff --git a/ltable.c b/ltable.c index cc7993e0..485563f3 100644 --- a/ltable.c +++ b/ltable.c @@ -39,6 +39,27 @@ #include "lvm.h" +/* +** Only tables with hash parts larget than LIMFORLAST has a 'lastfree' +** field that optimizes finding a free slot. Smaller tables do a +** complete search when looking for a free slot. +*/ +#define LLIMFORLAST 2 /* log2 of LIMTFORLAST */ +#define LIMFORLAST twoto(LLIMFORLAST) + +/* +** Union to store an int field ensuring that what follows it in +** memory is properly aligned to store a TValue. +*/ +typedef union { + int lastfree; + char padding[offsetof(struct { int i; TValue v; }, v)]; +} Limbox; + +#define haslastfree(t) ((t)->lsizenode > LLIMFORLAST) +#define getlastfree(t) (&((cast(Limbox *, (t)->node) - 1)->lastfree)) + + /* ** MAXABITS is the largest integer such that MAXASIZE fits in an ** unsigned int. @@ -367,8 +388,15 @@ int luaH_next (lua_State *L, Table *t, StkId key) { static void freehash (lua_State *L, Table *t) { - if (!isdummy(t)) - luaM_freearray(L, t->node, cast_sizet(sizenode(t))); + if (!isdummy(t)) { + size_t bsize = sizenode(t) * sizeof(Node); /* 'node' size in bytes */ + char *arr = cast_charp(t->node); + if (haslastfree(t)) { + bsize += sizeof(Limbox); + arr -= sizeof(Limbox); + } + luaM_freearray(L, arr, bsize); + } } @@ -479,7 +507,7 @@ static void setnodevector (lua_State *L, Table *t, unsigned int size) { if (size == 0) { /* no elements to hash part? */ t->node = cast(Node *, dummynode); /* use common 'dummynode' */ t->lsizenode = 0; - t->lastfree = NULL; /* signal that it is using dummy node */ + setdummy(t); /* signal that it is using dummy node */ } else { int i; @@ -487,15 +515,22 @@ static void setnodevector (lua_State *L, Table *t, unsigned int size) { if (lsize > MAXHBITS || (1u << lsize) > MAXHSIZE) luaG_runerror(L, "table overflow"); size = twoto(lsize); - t->node = luaM_newvector(L, size, Node); + if (lsize <= LLIMFORLAST) /* no 'lastfree' field? */ + t->node = luaM_newvector(L, size, Node); + else { + size_t bsize = size * sizeof(Node) + sizeof(Limbox); + char *node = luaM_newblock(L, bsize); + t->node = cast(Node *, node + sizeof(Limbox)); + *getlastfree(t) = size; /* all positions are free */ + } + t->lsizenode = cast_byte(lsize); + setnodummy(t); for (i = 0; i < cast_int(size); i++) { Node *n = gnode(t, i); gnext(n) = 0; setnilkey(n); setempty(gval(n)); } - t->lsizenode = cast_byte(lsize); - t->lastfree = gnode(t, size); /* all positions are free */ } } @@ -520,18 +555,21 @@ static void reinsert (lua_State *L, Table *ot, Table *t) { /* -** Exchange the hash part of 't1' and 't2'. +** Exchange the hash part of 't1' and 't2'. (In 'flags', only the +** dummy bit must be exchanged: The 'isrealasize' is not related +** to the hash part, and the metamethod bits do not change during +** a resize, so the "real" table can keep their values.) */ static void exchangehashpart (Table *t1, Table *t2) { lu_byte lsizenode = t1->lsizenode; Node *node = t1->node; - Node *lastfree = t1->lastfree; + int bitdummy1 = t1->flags & BITDUMMY; t1->lsizenode = t2->lsizenode; t1->node = t2->node; - t1->lastfree = t2->lastfree; + t1->flags = (t1->flags & NOTBITDUMMY) | (t2->flags & BITDUMMY); t2->lsizenode = lsizenode; t2->node = node; - t2->lastfree = lastfree; + t2->flags = (t2->flags & NOTBITDUMMY) | bitdummy1; } @@ -555,6 +593,7 @@ void luaH_resize (lua_State *L, Table *t, unsigned int newasize, unsigned int oldasize = setlimittosize(t); TValue *newarray; /* create new hash part with appropriate size into 'newt' */ + newt.flags = 0; setnodevector(L, &newt, nhsize); if (newasize < oldasize) { /* will array shrink? */ t->alimit = newasize; /* pretend array has new size... */ @@ -641,11 +680,22 @@ void luaH_free (lua_State *L, Table *t) { static Node *getfreepos (Table *t) { - if (!isdummy(t)) { - while (t->lastfree > t->node) { - t->lastfree--; - if (keyisnil(t->lastfree)) - return t->lastfree; + if (haslastfree(t)) { /* does it have 'lastfree' information? */ + /* look for a spot before 'lastfree', updating 'lastfree' */ + while (*getlastfree(t) > 0) { + Node *free = gnode(t, --(*getlastfree(t))); + if (keyisnil(free)) + return free; + } + } + else { /* no 'lastfree' information */ + if (!isdummy(t)) { + int i = sizenode(t); + while (i--) { /* do a linear search */ + Node *free = gnode(t, i); + if (keyisnil(free)) + return free; + } } } return NULL; /* could not find a free place */ diff --git a/ltable.h b/ltable.h index 75dd9e26..dce8c2f7 100644 --- a/ltable.h +++ b/ltable.h @@ -23,8 +23,18 @@ #define invalidateTMcache(t) ((t)->flags &= ~maskflags) -/* true when 't' is using 'dummynode' as its hash part */ -#define isdummy(t) ((t)->lastfree == NULL) +/* +** Bit BITDUMMY set in 'flags' means the table is using the dummy node +** for its hash part. +*/ + +#define BITDUMMY (1 << 6) +#define NOTBITDUMMY cast_byte(~BITDUMMY) +#define isdummy(t) ((t)->flags & BITDUMMY) + +#define setnodummy(t) ((t)->flags &= NOTBITDUMMY) +#define setdummy(t) ((t)->flags |= BITDUMMY) + /* allocated size for hash nodes */ diff --git a/ltests.c b/ltests.c index 4a0a6af1..1caed04c 100644 --- a/ltests.c +++ b/ltests.c @@ -999,9 +999,8 @@ static int table_query (lua_State *L) { if (i == -1) { lua_pushinteger(L, asize); lua_pushinteger(L, allocsizenode(t)); - lua_pushinteger(L, isdummy(t) ? 0 : t->lastfree - t->node); lua_pushinteger(L, t->alimit); - return 4; + return 3; } else if ((unsigned int)i < asize) { lua_pushinteger(L, i); diff --git a/ltm.h b/ltm.h index 73b833c6..f3872655 100644 --- a/ltm.h +++ b/ltm.h @@ -48,8 +48,8 @@ typedef enum { /* ** Mask with 1 in all fast-access methods. A 1 in any of these bits ** in the flag of a (meta)table means the metatable does not have the -** corresponding metamethod field. (Bit 7 of the flag is used for -** 'isrealasize'.) +** corresponding metamethod field. (Bit 6 of the flag indicates that +** the table is using the dummy node; bit 7 is used for 'isrealasize'.) */ #define maskflags (~(~0u << (TM_EQ + 1))) diff --git a/testes/nextvar.lua b/testes/nextvar.lua index 0874e5bb..80b3d05c 100644 --- a/testes/nextvar.lua +++ b/testes/nextvar.lua @@ -210,9 +210,9 @@ assert(T.querytab(a) == 64) -- array part has 64 elements a[32] = true; a[48] = true; -- binary search will find these ones a[51] = true -- binary search will miss this one assert(#a == 48) -- this will set the limit -assert(select(4, T.querytab(a)) == 48) -- this is the limit now +assert(select(3, T.querytab(a)) == 48) -- this is the limit now a[50] = true -- this will set a new limit -assert(select(4, T.querytab(a)) == 50) -- this is the limit now +assert(select(3, T.querytab(a)) == 50) -- this is the limit now -- but the size is larger (and still inside the array part) assert(#a == 51)