mirror of https://github.com/postgres/postgres
Remove the limit on the number of entries allowed in catcaches, and
remove the infrastructure needed to enforce the limit, ie, the global LRU list of cache entries. On small-to-middling databases this wins because maintaining the LRU list is a waste of time. On large databases this wins because it's better to keep more cache entries (we assume such users can afford to use some more per-backend memory than was contemplated in the Berkeley-era catcache design). This provides a noticeable improvement in the speed of psql \d on a 10000-table database, though it doesn't make it instantaneous. While at it, use per-catcache settings for the number of hash buckets per catcache, rather than the former one-size-fits-all value. It's a bit silly to be using the same number of hash buckets for, eg, pg_am and pg_attribute. The specific values I used might need some tuning, but they seem to be in the right ballpark based on CATCACHE_STATS results from the standard regression tests.
This commit is contained in:
parent
e1e133f264
commit
8b9bc234ad
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.128 2006/03/05 15:58:45 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.129 2006/06/15 02:08:09 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -37,20 +37,6 @@
|
|||
|
||||
/* #define CACHEDEBUG */ /* turns DEBUG elogs on */
|
||||
|
||||
/*
|
||||
* Constants related to size of the catcache.
|
||||
*
|
||||
* NCCBUCKETS must be a power of two and must be less than 64K (because
|
||||
* SharedInvalCatcacheMsg crams hash indexes into a uint16 field). In
|
||||
* practice it should be a lot less, anyway, to avoid chewing up too much
|
||||
* space on hash bucket headers.
|
||||
*
|
||||
* MAXCCTUPLES could be as small as a few hundred, if per-backend memory
|
||||
* consumption is at a premium.
|
||||
*/
|
||||
#define NCCBUCKETS 256 /* Hash buckets per CatCache */
|
||||
#define MAXCCTUPLES 5000 /* Maximum # of tuples in all caches */
|
||||
|
||||
/*
|
||||
* Given a hash value and the size of the hash table, find the bucket
|
||||
* in which the hash value belongs. Since the hash table must contain
|
||||
|
@ -89,7 +75,7 @@ static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache,
|
|||
HeapTuple tuple);
|
||||
|
||||
#ifdef CATCACHE_STATS
|
||||
static void CatCachePrintStats(void);
|
||||
static void CatCachePrintStats(int code, Datum arg);
|
||||
#endif
|
||||
static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
|
||||
static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
|
||||
|
@ -97,7 +83,6 @@ static void CatalogCacheInitializeCache(CatCache *cache);
|
|||
static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
|
||||
uint32 hashValue, Index hashIndex,
|
||||
bool negative);
|
||||
static void CatalogCacheCleanup(CatCTup *savect);
|
||||
static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys);
|
||||
|
||||
|
||||
|
@ -281,7 +266,7 @@ CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
|
|||
#ifdef CATCACHE_STATS
|
||||
|
||||
static void
|
||||
CatCachePrintStats(void)
|
||||
CatCachePrintStats(int code, Datum arg)
|
||||
{
|
||||
CatCache *cache;
|
||||
long cc_searches = 0;
|
||||
|
@ -289,18 +274,14 @@ CatCachePrintStats(void)
|
|||
long cc_neg_hits = 0;
|
||||
long cc_newloads = 0;
|
||||
long cc_invals = 0;
|
||||
long cc_discards = 0;
|
||||
long cc_lsearches = 0;
|
||||
long cc_lhits = 0;
|
||||
|
||||
elog(DEBUG2, "catcache stats dump: %d/%d tuples in catcaches",
|
||||
CacheHdr->ch_ntup, CacheHdr->ch_maxtup);
|
||||
|
||||
for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
|
||||
{
|
||||
if (cache->cc_ntup == 0 && cache->cc_searches == 0)
|
||||
continue; /* don't print unused caches */
|
||||
elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards, %ld lsrch, %ld lhits",
|
||||
elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
|
||||
cache->cc_relname,
|
||||
cache->cc_indexoid,
|
||||
cache->cc_ntup,
|
||||
|
@ -312,7 +293,6 @@ CatCachePrintStats(void)
|
|||
cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
|
||||
cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
|
||||
cache->cc_invals,
|
||||
cache->cc_discards,
|
||||
cache->cc_lsearches,
|
||||
cache->cc_lhits);
|
||||
cc_searches += cache->cc_searches;
|
||||
|
@ -320,11 +300,10 @@ CatCachePrintStats(void)
|
|||
cc_neg_hits += cache->cc_neg_hits;
|
||||
cc_newloads += cache->cc_newloads;
|
||||
cc_invals += cache->cc_invals;
|
||||
cc_discards += cache->cc_discards;
|
||||
cc_lsearches += cache->cc_lsearches;
|
||||
cc_lhits += cache->cc_lhits;
|
||||
}
|
||||
elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards, %ld lsrch, %ld lhits",
|
||||
elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
|
||||
CacheHdr->ch_ntup,
|
||||
cc_searches,
|
||||
cc_hits,
|
||||
|
@ -334,7 +313,6 @@ CatCachePrintStats(void)
|
|||
cc_searches - cc_hits - cc_neg_hits - cc_newloads,
|
||||
cc_searches - cc_hits - cc_neg_hits,
|
||||
cc_invals,
|
||||
cc_discards,
|
||||
cc_lsearches,
|
||||
cc_lhits);
|
||||
}
|
||||
|
@ -367,8 +345,7 @@ CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
|
|||
return; /* nothing left to do */
|
||||
}
|
||||
|
||||
/* delink from linked lists */
|
||||
DLRemove(&ct->lrulist_elem);
|
||||
/* delink from linked list */
|
||||
DLRemove(&ct->cache_elem);
|
||||
|
||||
/* free associated tuple data */
|
||||
|
@ -568,11 +545,13 @@ AtEOXact_CatCache(bool isCommit)
|
|||
if (assert_enabled)
|
||||
{
|
||||
CatCache *ccp;
|
||||
Dlelem *elt;
|
||||
|
||||
/* Check CatCLists */
|
||||
for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
|
||||
{
|
||||
Dlelem *elt;
|
||||
int i;
|
||||
|
||||
/* Check CatCLists */
|
||||
for (elt = DLGetHead(&ccp->cc_lists); elt; elt = DLGetSucc(elt))
|
||||
{
|
||||
CatCList *cl = (CatCList *) DLE_VAL(elt);
|
||||
|
@ -581,16 +560,21 @@ AtEOXact_CatCache(bool isCommit)
|
|||
Assert(cl->refcount == 0);
|
||||
Assert(!cl->dead);
|
||||
}
|
||||
}
|
||||
|
||||
/* Check individual tuples */
|
||||
for (elt = DLGetHead(&CacheHdr->ch_lrulist); elt; elt = DLGetSucc(elt))
|
||||
{
|
||||
CatCTup *ct = (CatCTup *) DLE_VAL(elt);
|
||||
/* Check individual tuples */
|
||||
for (i = 0; i < ccp->cc_nbuckets; i++)
|
||||
{
|
||||
for (elt = DLGetHead(&ccp->cc_bucket[i]);
|
||||
elt;
|
||||
elt = DLGetSucc(elt))
|
||||
{
|
||||
CatCTup *ct = (CatCTup *) DLE_VAL(elt);
|
||||
|
||||
Assert(ct->ct_magic == CT_MAGIC);
|
||||
Assert(ct->refcount == 0);
|
||||
Assert(!ct->dead);
|
||||
Assert(ct->ct_magic == CT_MAGIC);
|
||||
Assert(ct->refcount == 0);
|
||||
Assert(!ct->dead);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -796,12 +780,27 @@ InitCatCache(int id,
|
|||
Oid indexoid,
|
||||
int reloidattr,
|
||||
int nkeys,
|
||||
const int *key)
|
||||
const int *key,
|
||||
int nbuckets)
|
||||
{
|
||||
CatCache *cp;
|
||||
MemoryContext oldcxt;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* nbuckets is the number of hash buckets to use in this catcache.
|
||||
* Currently we just use a hard-wired estimate of an appropriate size
|
||||
* for each cache; maybe later make them dynamically resizable?
|
||||
*
|
||||
* nbuckets must be a power of two. We check this via Assert rather than
|
||||
* a full runtime check because the values will be coming from constant
|
||||
* tables.
|
||||
*
|
||||
* If you're confused by the power-of-two check, see comments in
|
||||
* bitmapset.c for an explanation.
|
||||
*/
|
||||
Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
|
||||
|
||||
/*
|
||||
* first switch to the cache context so our allocations do not vanish at
|
||||
* the end of a transaction
|
||||
|
@ -812,17 +811,15 @@ InitCatCache(int id,
|
|||
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
|
||||
|
||||
/*
|
||||
* if first time through, initialize the cache group header, including
|
||||
* global LRU list header
|
||||
* if first time through, initialize the cache group header
|
||||
*/
|
||||
if (CacheHdr == NULL)
|
||||
{
|
||||
CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
|
||||
CacheHdr->ch_caches = NULL;
|
||||
CacheHdr->ch_ntup = 0;
|
||||
CacheHdr->ch_maxtup = MAXCCTUPLES;
|
||||
DLInitList(&CacheHdr->ch_lrulist);
|
||||
#ifdef CATCACHE_STATS
|
||||
/* set up to dump stats at backend exit */
|
||||
on_proc_exit(CatCachePrintStats, 0);
|
||||
#endif
|
||||
}
|
||||
|
@ -832,7 +829,7 @@ InitCatCache(int id,
|
|||
*
|
||||
* Note: we assume zeroing initializes the Dllist headers correctly
|
||||
*/
|
||||
cp = (CatCache *) palloc0(sizeof(CatCache) + NCCBUCKETS * sizeof(Dllist));
|
||||
cp = (CatCache *) palloc0(sizeof(CatCache) + nbuckets * sizeof(Dllist));
|
||||
|
||||
/*
|
||||
* initialize the cache's relation information for the relation
|
||||
|
@ -847,7 +844,7 @@ InitCatCache(int id,
|
|||
cp->cc_tupdesc = (TupleDesc) NULL;
|
||||
cp->cc_reloidattr = reloidattr;
|
||||
cp->cc_ntup = 0;
|
||||
cp->cc_nbuckets = NCCBUCKETS;
|
||||
cp->cc_nbuckets = nbuckets;
|
||||
cp->cc_nkeys = nkeys;
|
||||
for (i = 0; i < nkeys; ++i)
|
||||
cp->cc_key[i] = key[i];
|
||||
|
@ -1162,13 +1159,11 @@ SearchCatCache(CatCache *cache,
|
|||
continue;
|
||||
|
||||
/*
|
||||
* we found a match in the cache: move it to the front of the global
|
||||
* LRU list. We also move it to the front of the list for its
|
||||
* hashbucket, in order to speed subsequent searches. (The most
|
||||
* frequently accessed elements in any hashbucket will tend to be near
|
||||
* the front of the hashbucket's list.)
|
||||
* We found a match in the cache. Move it to the front of the list
|
||||
* for its hashbucket, in order to speed subsequent searches. (The
|
||||
* most frequently accessed elements in any hashbucket will tend to be
|
||||
* near the front of the hashbucket's list.)
|
||||
*/
|
||||
DLMoveToFront(&ct->lrulist_elem);
|
||||
DLMoveToFront(&ct->cache_elem);
|
||||
|
||||
/*
|
||||
|
@ -1414,14 +1409,12 @@ SearchCatCacheList(CatCache *cache,
|
|||
continue;
|
||||
|
||||
/*
|
||||
* We found a matching list: mark it as touched since the last
|
||||
* CatalogCacheCleanup() sweep. Also move the list to the front of
|
||||
* the cache's list-of-lists, to speed subsequent searches. (We do not
|
||||
* We found a matching list. Move the list to the front of the
|
||||
* cache's list-of-lists, to speed subsequent searches. (We do not
|
||||
* move the members to the fronts of their hashbucket lists, however,
|
||||
* since there's no point in that unless they are searched for
|
||||
* individually.)
|
||||
*/
|
||||
cl->touched = true;
|
||||
DLMoveToFront(&cl->cache_elem);
|
||||
|
||||
/* Bump the list's refcount and return it */
|
||||
|
@ -1504,10 +1497,7 @@ SearchCatCacheList(CatCache *cache,
|
|||
if (ct->c_list)
|
||||
continue;
|
||||
|
||||
/* Found a match, so move it to front */
|
||||
DLMoveToFront(&ct->lrulist_elem);
|
||||
|
||||
break;
|
||||
break; /* A-OK */
|
||||
}
|
||||
|
||||
if (elt == NULL)
|
||||
|
@ -1577,7 +1567,6 @@ SearchCatCacheList(CatCache *cache,
|
|||
cl->refcount = 0; /* for the moment */
|
||||
cl->dead = false;
|
||||
cl->ordered = ordered;
|
||||
cl->touched = false; /* we already moved members to front */
|
||||
cl->nkeys = nkeys;
|
||||
cl->hash_value = lHashValue;
|
||||
cl->n_members = nmembers;
|
||||
|
@ -1654,11 +1643,10 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
|
|||
|
||||
/*
|
||||
* Finish initializing the CatCTup header, and add it to the cache's
|
||||
* linked lists and counts.
|
||||
* linked list and counts.
|
||||
*/
|
||||
ct->ct_magic = CT_MAGIC;
|
||||
ct->my_cache = cache;
|
||||
DLInitElem(&ct->lrulist_elem, (void *) ct);
|
||||
DLInitElem(&ct->cache_elem, (void *) ct);
|
||||
ct->c_list = NULL;
|
||||
ct->refcount = 0; /* for the moment */
|
||||
|
@ -1666,97 +1654,14 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
|
|||
ct->negative = negative;
|
||||
ct->hash_value = hashValue;
|
||||
|
||||
DLAddHead(&CacheHdr->ch_lrulist, &ct->lrulist_elem);
|
||||
DLAddHead(&cache->cc_bucket[hashIndex], &ct->cache_elem);
|
||||
|
||||
cache->cc_ntup++;
|
||||
CacheHdr->ch_ntup++;
|
||||
|
||||
/*
|
||||
* If we've exceeded the desired size of the caches, try to throw away the
|
||||
* least recently used entry(s). NB: be careful not to throw away the
|
||||
* newly-built entry...
|
||||
*/
|
||||
if (CacheHdr->ch_ntup > CacheHdr->ch_maxtup)
|
||||
CatalogCacheCleanup(ct);
|
||||
|
||||
return ct;
|
||||
}
|
||||
|
||||
/*
|
||||
* CatalogCacheCleanup
|
||||
* Try to reduce the size of the catcaches when they get too big
|
||||
*
|
||||
* savect can be NULL, or a specific CatCTup not to remove even if it
|
||||
* has zero refcount.
|
||||
*/
|
||||
static void
|
||||
CatalogCacheCleanup(CatCTup *savect)
|
||||
{
|
||||
int tup_target;
|
||||
CatCache *ccp;
|
||||
Dlelem *elt,
|
||||
*prevelt;
|
||||
|
||||
/*
|
||||
* Each time we have to do this, try to cut the cache size down to about
|
||||
* 90% of the maximum.
|
||||
*/
|
||||
tup_target = (CacheHdr->ch_maxtup * 9) / 10;
|
||||
|
||||
/*
|
||||
* Our strategy for managing CatCLists is that, each time we have to throw
|
||||
* away some cache entries, we first move-to-front all the members of
|
||||
* CatCLists that have been touched since the last cleanup sweep. Then we
|
||||
* do strict LRU elimination by individual tuples, zapping a list if any
|
||||
* of its members gets zapped. Before PostgreSQL 8.1, we moved members to
|
||||
* front each time their owning list was touched, which was arguably more
|
||||
* fair in balancing list members against standalone tuples --- but the
|
||||
* overhead for large lists was horrendous. This scheme is more heavily
|
||||
* biased towards preserving lists, but that is not necessarily bad
|
||||
* either.
|
||||
*/
|
||||
for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
|
||||
{
|
||||
for (elt = DLGetHead(&ccp->cc_lists); elt; elt = DLGetSucc(elt))
|
||||
{
|
||||
CatCList *cl = (CatCList *) DLE_VAL(elt);
|
||||
|
||||
Assert(cl->cl_magic == CL_MAGIC);
|
||||
if (cl->touched && !cl->dead)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cl->n_members; i++)
|
||||
DLMoveToFront(&cl->members[i]->lrulist_elem);
|
||||
}
|
||||
cl->touched = false;
|
||||
}
|
||||
}
|
||||
|
||||
/* Now get rid of unreferenced tuples in reverse global LRU order */
|
||||
for (elt = DLGetTail(&CacheHdr->ch_lrulist); elt; elt = prevelt)
|
||||
{
|
||||
CatCTup *ct = (CatCTup *) DLE_VAL(elt);
|
||||
|
||||
prevelt = DLGetPred(elt);
|
||||
|
||||
if (ct->refcount == 0 &&
|
||||
(ct->c_list == NULL || ct->c_list->refcount == 0) &&
|
||||
ct != savect)
|
||||
{
|
||||
#ifdef CATCACHE_STATS
|
||||
ct->my_cache->cc_discards++;
|
||||
#endif
|
||||
CatCacheRemoveCTup(ct->my_cache, ct);
|
||||
|
||||
/* Quit when we've removed enough tuples */
|
||||
if (CacheHdr->ch_ntup <= tup_target)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* build_dummy_tuple
|
||||
* Generate a palloc'd HeapTuple that contains the specified key
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/utils/cache/syscache.c,v 1.103 2006/05/03 22:45:26 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/utils/cache/syscache.c,v 1.104 2006/06/15 02:08:09 tgl Exp $
|
||||
*
|
||||
* NOTES
|
||||
* These routines allow the parser/planner/executor to perform
|
||||
|
@ -55,14 +55,18 @@
|
|||
the list sorted alphabetically and adjust the cache numbers
|
||||
accordingly.
|
||||
|
||||
Add your entry to the cacheinfo[] array below. All cache lists are
|
||||
alphabetical, so add it in the proper place. Specify the relation
|
||||
OID, index OID, number of keys, and key attribute numbers. If the
|
||||
relation contains tuples that are associated with a particular relation
|
||||
(for example, its attributes, rules, triggers, etc) then specify the
|
||||
attribute number that contains the OID of the associated relation.
|
||||
This is used by CatalogCacheFlushRelation() to remove the correct
|
||||
tuples during a table drop or relcache invalidation event.
|
||||
Add your entry to the cacheinfo[] array below. All cache lists are
|
||||
alphabetical, so add it in the proper place. Specify the relation OID,
|
||||
index OID, number of keys, key attribute numbers, and number of hash
|
||||
buckets. If the relation contains tuples that are associated with a
|
||||
particular relation (for example, its attributes, rules, triggers, etc)
|
||||
then specify the attribute number that contains the OID of the associated
|
||||
relation. This is used by CatalogCacheFlushRelation() to remove the
|
||||
correct tuples during a table drop or relcache invalidation event.
|
||||
|
||||
The number of hash buckets must be a power of 2. It's reasonable to
|
||||
set this to the number of entries that might be in the particular cache
|
||||
in a medium-size database.
|
||||
|
||||
There must be a unique index underlying each syscache (ie, an index
|
||||
whose key is the same as that of the cache). If there is not one
|
||||
|
@ -90,6 +94,7 @@ struct cachedesc
|
|||
int reloidattr; /* attr number of rel OID reference, or 0 */
|
||||
int nkeys; /* # of keys needed for cache lookup */
|
||||
int key[4]; /* attribute numbers of key attrs */
|
||||
int nbuckets; /* number of hash buckets for this cache */
|
||||
};
|
||||
|
||||
static const struct cachedesc cacheinfo[] = {
|
||||
|
@ -102,7 +107,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
0,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
32
|
||||
},
|
||||
{AccessMethodRelationId, /* AMNAME */
|
||||
AmNameIndexId,
|
||||
0,
|
||||
|
@ -112,7 +119,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
0,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
4
|
||||
},
|
||||
{AccessMethodRelationId, /* AMOID */
|
||||
AmOidIndexId,
|
||||
0,
|
||||
|
@ -122,7 +131,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
0,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
4
|
||||
},
|
||||
{AccessMethodOperatorRelationId, /* AMOPOPID */
|
||||
AccessMethodOperatorIndexId,
|
||||
0,
|
||||
|
@ -132,7 +143,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
Anum_pg_amop_amopclaid,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
64
|
||||
},
|
||||
{AccessMethodOperatorRelationId, /* AMOPSTRATEGY */
|
||||
AccessMethodStrategyIndexId,
|
||||
0,
|
||||
|
@ -142,7 +155,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
Anum_pg_amop_amopsubtype,
|
||||
Anum_pg_amop_amopstrategy,
|
||||
0
|
||||
}},
|
||||
},
|
||||
64
|
||||
},
|
||||
{AccessMethodProcedureRelationId, /* AMPROCNUM */
|
||||
AccessMethodProcedureIndexId,
|
||||
0,
|
||||
|
@ -152,7 +167,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
Anum_pg_amproc_amprocsubtype,
|
||||
Anum_pg_amproc_amprocnum,
|
||||
0
|
||||
}},
|
||||
},
|
||||
64
|
||||
},
|
||||
{AttributeRelationId, /* ATTNAME */
|
||||
AttributeRelidNameIndexId,
|
||||
Anum_pg_attribute_attrelid,
|
||||
|
@ -162,7 +179,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
Anum_pg_attribute_attname,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
2048
|
||||
},
|
||||
{AttributeRelationId, /* ATTNUM */
|
||||
AttributeRelidNumIndexId,
|
||||
Anum_pg_attribute_attrelid,
|
||||
|
@ -172,7 +191,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
Anum_pg_attribute_attnum,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
2048
|
||||
},
|
||||
{AuthMemRelationId, /* AUTHMEMMEMROLE */
|
||||
AuthMemMemRoleIndexId,
|
||||
0,
|
||||
|
@ -182,7 +203,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
Anum_pg_auth_members_roleid,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
128
|
||||
},
|
||||
{AuthMemRelationId, /* AUTHMEMROLEMEM */
|
||||
AuthMemRoleMemIndexId,
|
||||
0,
|
||||
|
@ -192,7 +215,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
Anum_pg_auth_members_member,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
128
|
||||
},
|
||||
{AuthIdRelationId, /* AUTHNAME */
|
||||
AuthIdRolnameIndexId,
|
||||
0,
|
||||
|
@ -202,7 +227,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
0,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
128
|
||||
},
|
||||
{AuthIdRelationId, /* AUTHOID */
|
||||
AuthIdOidIndexId,
|
||||
0,
|
||||
|
@ -212,7 +239,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
0,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
128
|
||||
},
|
||||
{
|
||||
CastRelationId, /* CASTSOURCETARGET */
|
||||
CastSourceTargetIndexId,
|
||||
|
@ -223,7 +252,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
Anum_pg_cast_casttarget,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
256
|
||||
},
|
||||
{OperatorClassRelationId, /* CLAAMNAMENSP */
|
||||
OpclassAmNameNspIndexId,
|
||||
0,
|
||||
|
@ -233,7 +264,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
Anum_pg_opclass_opcname,
|
||||
Anum_pg_opclass_opcnamespace,
|
||||
0
|
||||
}},
|
||||
},
|
||||
64
|
||||
},
|
||||
{OperatorClassRelationId, /* CLAOID */
|
||||
OpclassOidIndexId,
|
||||
0,
|
||||
|
@ -243,7 +276,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
0,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
64
|
||||
},
|
||||
{ConversionRelationId, /* CONDEFAULT */
|
||||
ConversionDefaultIndexId,
|
||||
0,
|
||||
|
@ -253,7 +288,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
Anum_pg_conversion_conforencoding,
|
||||
Anum_pg_conversion_contoencoding,
|
||||
ObjectIdAttributeNumber,
|
||||
}},
|
||||
},
|
||||
128
|
||||
},
|
||||
{ConversionRelationId, /* CONNAMENSP */
|
||||
ConversionNameNspIndexId,
|
||||
0,
|
||||
|
@ -263,7 +300,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
Anum_pg_conversion_connamespace,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
128
|
||||
},
|
||||
{ConversionRelationId, /* CONOID */
|
||||
ConversionOidIndexId,
|
||||
0,
|
||||
|
@ -273,7 +312,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
0,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
128
|
||||
},
|
||||
{DatabaseRelationId, /* DATABASEOID */
|
||||
DatabaseOidIndexId,
|
||||
0,
|
||||
|
@ -283,7 +324,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
0,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
4
|
||||
},
|
||||
{IndexRelationId, /* INDEXRELID */
|
||||
IndexRelidIndexId,
|
||||
Anum_pg_index_indrelid,
|
||||
|
@ -293,7 +336,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
0,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
1024
|
||||
},
|
||||
{InheritsRelationId, /* INHRELID */
|
||||
InheritsRelidSeqnoIndexId,
|
||||
Anum_pg_inherits_inhrelid,
|
||||
|
@ -303,7 +348,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
Anum_pg_inherits_inhseqno,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
256
|
||||
},
|
||||
{LanguageRelationId, /* LANGNAME */
|
||||
LanguageNameIndexId,
|
||||
0,
|
||||
|
@ -313,7 +360,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
0,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
4
|
||||
},
|
||||
{LanguageRelationId, /* LANGOID */
|
||||
LanguageOidIndexId,
|
||||
0,
|
||||
|
@ -323,7 +372,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
0,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
4
|
||||
},
|
||||
{NamespaceRelationId, /* NAMESPACENAME */
|
||||
NamespaceNameIndexId,
|
||||
0,
|
||||
|
@ -333,7 +384,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
0,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
256
|
||||
},
|
||||
{NamespaceRelationId, /* NAMESPACEOID */
|
||||
NamespaceOidIndexId,
|
||||
0,
|
||||
|
@ -343,7 +396,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
0,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
256
|
||||
},
|
||||
{OperatorRelationId, /* OPERNAMENSP */
|
||||
OperatorNameNspIndexId,
|
||||
0,
|
||||
|
@ -353,7 +408,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
Anum_pg_operator_oprleft,
|
||||
Anum_pg_operator_oprright,
|
||||
Anum_pg_operator_oprnamespace
|
||||
}},
|
||||
},
|
||||
1024
|
||||
},
|
||||
{OperatorRelationId, /* OPEROID */
|
||||
OperatorOidIndexId,
|
||||
0,
|
||||
|
@ -363,7 +420,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
0,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
1024
|
||||
},
|
||||
{ProcedureRelationId, /* PROCNAMEARGSNSP */
|
||||
ProcedureNameArgsNspIndexId,
|
||||
0,
|
||||
|
@ -373,7 +432,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
Anum_pg_proc_proargtypes,
|
||||
Anum_pg_proc_pronamespace,
|
||||
0
|
||||
}},
|
||||
},
|
||||
2048
|
||||
},
|
||||
{ProcedureRelationId, /* PROCOID */
|
||||
ProcedureOidIndexId,
|
||||
0,
|
||||
|
@ -383,7 +444,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
0,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
2048
|
||||
},
|
||||
{RelationRelationId, /* RELNAMENSP */
|
||||
ClassNameNspIndexId,
|
||||
ObjectIdAttributeNumber,
|
||||
|
@ -393,7 +456,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
Anum_pg_class_relnamespace,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
1024
|
||||
},
|
||||
{RelationRelationId, /* RELOID */
|
||||
ClassOidIndexId,
|
||||
ObjectIdAttributeNumber,
|
||||
|
@ -403,7 +468,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
0,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
1024
|
||||
},
|
||||
{RewriteRelationId, /* RULERELNAME */
|
||||
RewriteRelRulenameIndexId,
|
||||
Anum_pg_rewrite_ev_class,
|
||||
|
@ -413,7 +480,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
Anum_pg_rewrite_rulename,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
1024
|
||||
},
|
||||
{StatisticRelationId, /* STATRELATT */
|
||||
StatisticRelidAttnumIndexId,
|
||||
Anum_pg_statistic_starelid,
|
||||
|
@ -423,7 +492,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
Anum_pg_statistic_staattnum,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
1024
|
||||
},
|
||||
{TypeRelationId, /* TYPENAMENSP */
|
||||
TypeNameNspIndexId,
|
||||
Anum_pg_type_typrelid,
|
||||
|
@ -433,7 +504,9 @@ static const struct cachedesc cacheinfo[] = {
|
|||
Anum_pg_type_typnamespace,
|
||||
0,
|
||||
0
|
||||
}},
|
||||
},
|
||||
1024
|
||||
},
|
||||
{TypeRelationId, /* TYPEOID */
|
||||
TypeOidIndexId,
|
||||
Anum_pg_type_typrelid,
|
||||
|
@ -443,11 +516,12 @@ static const struct cachedesc cacheinfo[] = {
|
|||
0,
|
||||
0,
|
||||
0
|
||||
}}
|
||||
},
|
||||
1024
|
||||
}
|
||||
};
|
||||
|
||||
static CatCache *SysCache[
|
||||
lengthof(cacheinfo)];
|
||||
static CatCache *SysCache[lengthof(cacheinfo)];
|
||||
static int SysCacheSize = lengthof(cacheinfo);
|
||||
static bool CacheInitialized = false;
|
||||
|
||||
|
@ -476,7 +550,8 @@ InitCatalogCache(void)
|
|||
cacheinfo[cacheId].indoid,
|
||||
cacheinfo[cacheId].reloidattr,
|
||||
cacheinfo[cacheId].nkeys,
|
||||
cacheinfo[cacheId].key);
|
||||
cacheinfo[cacheId].key,
|
||||
cacheinfo[cacheId].nbuckets);
|
||||
if (!PointerIsValid(SysCache[cacheId]))
|
||||
elog(ERROR, "could not initialize cache %u (%d)",
|
||||
cacheinfo[cacheId].reloid, cacheId);
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $PostgreSQL: pgsql/src/include/utils/catcache.h,v 1.58 2006/03/05 15:59:07 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/include/utils/catcache.h,v 1.59 2006/06/15 02:08:09 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
@ -60,7 +60,6 @@ typedef struct catcache
|
|||
* searches, each of which will result in loading a negative entry
|
||||
*/
|
||||
long cc_invals; /* # of entries invalidated from cache */
|
||||
long cc_discards; /* # of entries discarded due to overflow */
|
||||
long cc_lsearches; /* total # list-searches */
|
||||
long cc_lhits; /* # of matches against existing lists */
|
||||
#endif
|
||||
|
@ -75,11 +74,10 @@ typedef struct catctup
|
|||
CatCache *my_cache; /* link to owning catcache */
|
||||
|
||||
/*
|
||||
* Each tuple in a cache is a member of two Dllists: one lists all the
|
||||
* elements in all the caches in LRU order, and the other lists just the
|
||||
* elements in one hashbucket of one cache, also in LRU order.
|
||||
* Each tuple in a cache is a member of a Dllist that stores the elements
|
||||
* of its hash bucket. We keep each Dllist in LRU order to speed repeated
|
||||
* lookups.
|
||||
*/
|
||||
Dlelem lrulist_elem; /* list member of global LRU list */
|
||||
Dlelem cache_elem; /* list member of per-bucket list */
|
||||
|
||||
/*
|
||||
|
@ -125,9 +123,8 @@ typedef struct catclist
|
|||
* table rows satisfying the partial key. (Note: none of these will be
|
||||
* negative cache entries.)
|
||||
*
|
||||
* A CatCList is only a member of a per-cache list; we do not do separate
|
||||
* LRU management for CatCLists. See CatalogCacheCleanup() for the
|
||||
* details of the management algorithm.
|
||||
* A CatCList is only a member of a per-cache list; we do not currently
|
||||
* divide them into hash buckets.
|
||||
*
|
||||
* A list marked "dead" must not be returned by subsequent searches.
|
||||
* However, it won't be physically deleted from the cache until its
|
||||
|
@ -143,7 +140,6 @@ typedef struct catclist
|
|||
int refcount; /* number of active references */
|
||||
bool dead; /* dead but not yet removed? */
|
||||
bool ordered; /* members listed in index order? */
|
||||
bool touched; /* used since last CatalogCacheCleanup? */
|
||||
short nkeys; /* number of lookup keys specified */
|
||||
uint32 hash_value; /* hash value for lookup keys */
|
||||
HeapTupleData tuple; /* header for tuple holding keys */
|
||||
|
@ -156,8 +152,6 @@ typedef struct catcacheheader
|
|||
{
|
||||
CatCache *ch_caches; /* head of list of CatCache structs */
|
||||
int ch_ntup; /* # of tuples in all caches */
|
||||
int ch_maxtup; /* max # of tuples allowed (LRU) */
|
||||
Dllist ch_lrulist; /* overall LRU list, most recent first */
|
||||
} CatCacheHeader;
|
||||
|
||||
|
||||
|
@ -169,7 +163,8 @@ extern void AtEOXact_CatCache(bool isCommit);
|
|||
|
||||
extern CatCache *InitCatCache(int id, Oid reloid, Oid indexoid,
|
||||
int reloidattr,
|
||||
int nkeys, const int *key);
|
||||
int nkeys, const int *key,
|
||||
int nbuckets);
|
||||
extern void InitCatCachePhase2(CatCache *cache);
|
||||
|
||||
extern HeapTuple SearchCatCache(CatCache *cache,
|
||||
|
|
Loading…
Reference in New Issue