Move I/O before the index_update_stats() buffer lock region.

Commit a07e03fd8f enlarged the work done
here under the pg_class heap buffer lock.  Two preexisting actions are
best done before holding that lock.  Both RelationGetNumberOfBlocks()
and visibilitymap_count() do I/O, and the latter might exclusive-lock a
visibility map buffer.  Moving these reduces contention and risk of
undetected LWLock deadlock.  Back-patch to v12, like that commit.

Discussion: https://postgr.es/m/20241031200139.b4@rfd.leadboat.com
This commit is contained in:
Noah Misch 2024-11-02 09:04:55 -07:00
parent 2fa255ce9b
commit b412f402d1
1 changed files with 40 additions and 22 deletions

View File

@ -2806,6 +2806,9 @@ index_update_stats(Relation rel,
bool hasindex,
double reltuples)
{
bool update_stats;
BlockNumber relpages;
BlockNumber relallvisible;
Oid relid = RelationGetRelid(rel);
Relation pg_class;
ScanKeyData key[1];
@ -2814,6 +2817,42 @@ index_update_stats(Relation rel,
Form_pg_class rd_rel;
bool dirty;
/*
* As a special hack, if we are dealing with an empty table and the
* existing reltuples is -1, we leave that alone. This ensures that
* creating an index as part of CREATE TABLE doesn't cause the table to
* prematurely look like it's been vacuumed. The rd_rel we modify may
* differ from rel->rd_rel due to e.g. commit of concurrent GRANT, but the
* commands that change reltuples take locks conflicting with ours. (Even
* if a command changed reltuples under a weaker lock, this affects only
* statistics for an empty table.)
*/
if (reltuples == 0 && rel->rd_rel->reltuples < 0)
reltuples = -1;
/*
* Don't update statistics during binary upgrade, because the indexes are
* created before the data is moved into place.
*/
update_stats = reltuples >= 0 && !IsBinaryUpgrade;
/*
* Finish I/O and visibility map buffer locks before
* systable_inplace_update_begin() locks the pg_class buffer. The rd_rel
* we modify may differ from rel->rd_rel due to e.g. commit of concurrent
* GRANT, but no command changes a relkind from non-index to index. (Even
* if one did, relallvisible doesn't break functionality.)
*/
if (update_stats)
{
relpages = RelationGetNumberOfBlocks(rel);
if (rel->rd_rel->relkind != RELKIND_INDEX)
visibilitymap_count(rel, &relallvisible, NULL);
else /* don't bother for indexes */
relallvisible = 0;
}
/*
* We always update the pg_class row using a non-transactional,
* overwrite-in-place update. There are several reasons for this:
@ -2858,15 +2897,6 @@ index_update_stats(Relation rel,
/* Should this be a more comprehensive test? */
Assert(rd_rel->relkind != RELKIND_PARTITIONED_INDEX);
/*
* As a special hack, if we are dealing with an empty table and the
* existing reltuples is -1, we leave that alone. This ensures that
* creating an index as part of CREATE TABLE doesn't cause the table to
* prematurely look like it's been vacuumed.
*/
if (reltuples == 0 && rd_rel->reltuples < 0)
reltuples = -1;
/* Apply required updates, if any, to copied tuple */
dirty = false;
@ -2876,20 +2906,8 @@ index_update_stats(Relation rel,
dirty = true;
}
/*
* Avoid updating statistics during binary upgrade, because the indexes
* are created before the data is moved into place.
*/
if (reltuples >= 0 && !IsBinaryUpgrade)
if (update_stats)
{
BlockNumber relpages = RelationGetNumberOfBlocks(rel);
BlockNumber relallvisible;
if (rd_rel->relkind != RELKIND_INDEX)
visibilitymap_count(rel, &relallvisible, NULL);
else /* don't bother for indexes */
relallvisible = 0;
if (rd_rel->relpages != (int32) relpages)
{
rd_rel->relpages = (int32) relpages;