vacuumlazy.c: Standardize rel_pages terminology.

VACUUM's rel_pages field indicates the size of the target heap rel just
after the table_relation_vacuum() operation began.  There are specific
expectations around how rel_pages can be related to other nearby state.
In particular, the range of rel_pages must contain every tuple in the
relation whose tuple headers might contain an XID < OldestXmin.

Consistently refer to the field as rel_pages to make this clearer and
more discoverable.

This is follow-up work to commit 73f6ec3d from earlier today.

Author: Peter Geoghegan <pg@bowt.ie>
Reviewed-By: Andres Freund <andres@anarazel.de>
Discussion: https://postgr.es/m/20220311031351.sbge5m2bpvy2ttxg@alap3.anarazel.de
This commit is contained in:
Peter Geoghegan 2022-03-12 13:20:45 -08:00
parent 73f6ec3d3c
commit e370f100f0

View File

@ -825,7 +825,7 @@ static void
lazy_scan_heap(LVRelState *vacrel, int nworkers) lazy_scan_heap(LVRelState *vacrel, int nworkers)
{ {
VacDeadItems *dead_items; VacDeadItems *dead_items;
BlockNumber nblocks = vacrel->rel_pages, BlockNumber rel_pages = vacrel->rel_pages,
blkno, blkno,
next_unskippable_block, next_unskippable_block,
next_failsafe_block, next_failsafe_block,
@ -858,7 +858,7 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers)
/* Report that we're scanning the heap, advertising total # of blocks */ /* Report that we're scanning the heap, advertising total # of blocks */
initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP; initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP;
initprog_val[1] = nblocks; initprog_val[1] = rel_pages;
initprog_val[2] = dead_items->max_items; initprog_val[2] = dead_items->max_items;
pgstat_progress_update_multi_param(3, initprog_index, initprog_val); pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
@ -882,9 +882,9 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers)
* Before entering the main loop, establish the invariant that * Before entering the main loop, establish the invariant that
* next_unskippable_block is the next block number >= blkno that we can't * next_unskippable_block is the next block number >= blkno that we can't
* skip based on the visibility map, either all-visible for a regular scan * skip based on the visibility map, either all-visible for a regular scan
* or all-frozen for an aggressive scan. We set it to nblocks if there's * or all-frozen for an aggressive scan. We set it to rel_pages when
* no such block. We also set up the skipping_blocks flag correctly at * there's no such block. We also set up the skipping_blocks flag
* this stage. * correctly at this stage.
* *
* Note: The value returned by visibilitymap_get_status could be slightly * Note: The value returned by visibilitymap_get_status could be slightly
* out-of-date, since we make this test before reading the corresponding * out-of-date, since we make this test before reading the corresponding
@ -902,7 +902,7 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers)
next_unskippable_block = 0; next_unskippable_block = 0;
if (vacrel->skipwithvm) if (vacrel->skipwithvm)
{ {
while (next_unskippable_block < nblocks) while (next_unskippable_block < rel_pages)
{ {
uint8 vmstatus; uint8 vmstatus;
@ -929,7 +929,7 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers)
else else
skipping_blocks = false; skipping_blocks = false;
for (blkno = 0; blkno < nblocks; blkno++) for (blkno = 0; blkno < rel_pages; blkno++)
{ {
Buffer buf; Buffer buf;
Page page; Page page;
@ -947,7 +947,7 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers)
next_unskippable_block++; next_unskippable_block++;
if (vacrel->skipwithvm) if (vacrel->skipwithvm)
{ {
while (next_unskippable_block < nblocks) while (next_unskippable_block < rel_pages)
{ {
uint8 vmskipflags; uint8 vmskipflags;
@ -992,7 +992,7 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers)
/* /*
* The current page can be skipped if we've seen a long enough run * The current page can be skipped if we've seen a long enough run
* of skippable blocks to justify skipping it -- provided it's not * of skippable blocks to justify skipping it -- provided it's not
* the last page in the relation (according to rel_pages/nblocks). * the last page in the relation (according to rel_pages).
* *
* We always scan the table's last page to determine whether it * We always scan the table's last page to determine whether it
* has tuples or not, even if it would otherwise be skipped. This * has tuples or not, even if it would otherwise be skipped. This
@ -1000,7 +1000,7 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers)
* on the table to attempt a truncation that just fails * on the table to attempt a truncation that just fails
* immediately because there are tuples on the last page. * immediately because there are tuples on the last page.
*/ */
if (skipping_blocks && blkno < nblocks - 1) if (skipping_blocks && blkno < rel_pages - 1)
{ {
/* /*
* Tricky, tricky. If this is in aggressive vacuum, the page * Tricky, tricky. If this is in aggressive vacuum, the page
@ -1367,7 +1367,7 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers)
vacrel->blkno = InvalidBlockNumber; vacrel->blkno = InvalidBlockNumber;
/* now we can compute the new value for pg_class.reltuples */ /* now we can compute the new value for pg_class.reltuples */
vacrel->new_live_tuples = vac_estimate_reltuples(vacrel->rel, nblocks, vacrel->new_live_tuples = vac_estimate_reltuples(vacrel->rel, rel_pages,
vacrel->scanned_pages, vacrel->scanned_pages,
vacrel->live_tuples); vacrel->live_tuples);