Remove hastup from LVPagePruneState.

Instead, just have lazy_scan_prune() and lazy_scan_noprune() update
LVRelState->nonempty_pages directly. This makes the two functions
more similar and also removes makes lazy_scan_noprune need one fewer
output parameters.

Melanie Plageman, reviewed by Andres Freund, Michael Paquier, and me

Discussion: http://postgr.es/m/CAAKRu_btji_wQdg=ok-5E4v_bGVxKYnnFFe7RA6Frc1EcOwtSg@mail.gmail.com
This commit is contained in:
Robert Haas 2024-01-11 13:22:04 -05:00
parent 5faffa0434
commit e2d5b3b9b6

View File

@ -217,7 +217,6 @@ typedef struct LVRelState
*/ */
typedef struct LVPagePruneState typedef struct LVPagePruneState
{ {
bool hastup; /* Page prevents rel truncation? */
bool has_lpdead_items; /* includes existing LP_DEAD items */ bool has_lpdead_items; /* includes existing LP_DEAD items */
/* /*
@ -253,7 +252,7 @@ static void lazy_scan_prune(LVRelState *vacrel, Buffer buf,
LVPagePruneState *prunestate); LVPagePruneState *prunestate);
static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf, static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf,
BlockNumber blkno, Page page, BlockNumber blkno, Page page,
bool *hastup, bool *recordfreespace); bool *recordfreespace);
static void lazy_vacuum(LVRelState *vacrel); static void lazy_vacuum(LVRelState *vacrel);
static bool lazy_vacuum_all_indexes(LVRelState *vacrel); static bool lazy_vacuum_all_indexes(LVRelState *vacrel);
static void lazy_vacuum_heap_rel(LVRelState *vacrel); static void lazy_vacuum_heap_rel(LVRelState *vacrel);
@ -959,8 +958,7 @@ lazy_scan_heap(LVRelState *vacrel)
page = BufferGetPage(buf); page = BufferGetPage(buf);
if (!ConditionalLockBufferForCleanup(buf)) if (!ConditionalLockBufferForCleanup(buf))
{ {
bool hastup, bool recordfreespace;
recordfreespace;
LockBuffer(buf, BUFFER_LOCK_SHARE); LockBuffer(buf, BUFFER_LOCK_SHARE);
@ -972,20 +970,21 @@ lazy_scan_heap(LVRelState *vacrel)
continue; continue;
} }
/* Collect LP_DEAD items in dead_items array, count tuples */ /*
if (lazy_scan_noprune(vacrel, buf, blkno, page, &hastup, * Collect LP_DEAD items in dead_items array, count tuples,
* determine if rel truncation is safe
*/
if (lazy_scan_noprune(vacrel, buf, blkno, page,
&recordfreespace)) &recordfreespace))
{ {
Size freespace = 0; Size freespace = 0;
/* /*
* Processed page successfully (without cleanup lock) -- just * Processed page successfully (without cleanup lock) -- just
* need to perform rel truncation and FSM steps, much like the * need to update the FSM, much like the lazy_scan_prune case.
* lazy_scan_prune case. Don't bother trying to match its * Don't bother trying to match its visibility map setting
* visibility map setting steps, though. * steps, though.
*/ */
if (hastup)
vacrel->nonempty_pages = blkno + 1;
if (recordfreespace) if (recordfreespace)
freespace = PageGetHeapFreeSpace(page); freespace = PageGetHeapFreeSpace(page);
UnlockReleaseBuffer(buf); UnlockReleaseBuffer(buf);
@ -1017,16 +1016,13 @@ lazy_scan_heap(LVRelState *vacrel)
* dead_items array. This includes LP_DEAD line pointers that we * dead_items array. This includes LP_DEAD line pointers that we
* pruned ourselves, as well as existing LP_DEAD line pointers that * pruned ourselves, as well as existing LP_DEAD line pointers that
* were pruned some time earlier. Also considers freezing XIDs in the * were pruned some time earlier. Also considers freezing XIDs in the
* tuple headers of remaining items with storage. * tuple headers of remaining items with storage. It also determines
* if truncating this block is safe.
*/ */
lazy_scan_prune(vacrel, buf, blkno, page, &prunestate); lazy_scan_prune(vacrel, buf, blkno, page, &prunestate);
Assert(!prunestate.all_visible || !prunestate.has_lpdead_items); Assert(!prunestate.all_visible || !prunestate.has_lpdead_items);
/* Remember the location of the last page with nonremovable tuples */
if (prunestate.hastup)
vacrel->nonempty_pages = blkno + 1;
if (vacrel->nindexes == 0) if (vacrel->nindexes == 0)
{ {
/* /*
@ -1555,6 +1551,7 @@ lazy_scan_prune(LVRelState *vacrel,
live_tuples, live_tuples,
recently_dead_tuples; recently_dead_tuples;
HeapPageFreeze pagefrz; HeapPageFreeze pagefrz;
bool hastup = false;
int64 fpi_before = pgWalUsage.wal_fpi; int64 fpi_before = pgWalUsage.wal_fpi;
OffsetNumber deadoffsets[MaxHeapTuplesPerPage]; OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
HeapTupleFreeze frozen[MaxHeapTuplesPerPage]; HeapTupleFreeze frozen[MaxHeapTuplesPerPage];
@ -1593,7 +1590,6 @@ lazy_scan_prune(LVRelState *vacrel,
* Now scan the page to collect LP_DEAD items and check for tuples * Now scan the page to collect LP_DEAD items and check for tuples
* requiring freezing among remaining tuples with storage * requiring freezing among remaining tuples with storage
*/ */
prunestate->hastup = false;
prunestate->has_lpdead_items = false; prunestate->has_lpdead_items = false;
prunestate->all_visible = true; prunestate->all_visible = true;
prunestate->all_frozen = true; prunestate->all_frozen = true;
@ -1620,7 +1616,7 @@ lazy_scan_prune(LVRelState *vacrel,
if (ItemIdIsRedirected(itemid)) if (ItemIdIsRedirected(itemid))
{ {
/* page makes rel truncation unsafe */ /* page makes rel truncation unsafe */
prunestate->hastup = true; hastup = true;
continue; continue;
} }
@ -1750,7 +1746,7 @@ lazy_scan_prune(LVRelState *vacrel,
break; break;
} }
prunestate->hastup = true; /* page makes rel truncation unsafe */ hastup = true; /* page makes rel truncation unsafe */
/* Tuple with storage -- consider need to freeze */ /* Tuple with storage -- consider need to freeze */
if (heap_prepare_freeze_tuple(htup, &vacrel->cutoffs, &pagefrz, if (heap_prepare_freeze_tuple(htup, &vacrel->cutoffs, &pagefrz,
@ -1918,6 +1914,10 @@ lazy_scan_prune(LVRelState *vacrel,
vacrel->lpdead_items += lpdead_items; vacrel->lpdead_items += lpdead_items;
vacrel->live_tuples += live_tuples; vacrel->live_tuples += live_tuples;
vacrel->recently_dead_tuples += recently_dead_tuples; vacrel->recently_dead_tuples += recently_dead_tuples;
/* Can't truncate this page */
if (hastup)
vacrel->nonempty_pages = blkno + 1;
} }
/* /*
@ -1935,7 +1935,6 @@ lazy_scan_prune(LVRelState *vacrel,
* one or more tuples on the page. We always return true for non-aggressive * one or more tuples on the page. We always return true for non-aggressive
* callers. * callers.
* *
* See lazy_scan_prune for an explanation of hastup return flag.
* recordfreespace flag instructs caller on whether or not it should do * recordfreespace flag instructs caller on whether or not it should do
* generic FSM processing for page. * generic FSM processing for page.
*/ */
@ -1944,7 +1943,6 @@ lazy_scan_noprune(LVRelState *vacrel,
Buffer buf, Buffer buf,
BlockNumber blkno, BlockNumber blkno,
Page page, Page page,
bool *hastup,
bool *recordfreespace) bool *recordfreespace)
{ {
OffsetNumber offnum, OffsetNumber offnum,
@ -1953,6 +1951,7 @@ lazy_scan_noprune(LVRelState *vacrel,
live_tuples, live_tuples,
recently_dead_tuples, recently_dead_tuples,
missed_dead_tuples; missed_dead_tuples;
bool hastup;
HeapTupleHeader tupleheader; HeapTupleHeader tupleheader;
TransactionId NoFreezePageRelfrozenXid = vacrel->NewRelfrozenXid; TransactionId NoFreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
MultiXactId NoFreezePageRelminMxid = vacrel->NewRelminMxid; MultiXactId NoFreezePageRelminMxid = vacrel->NewRelminMxid;
@ -1960,7 +1959,7 @@ lazy_scan_noprune(LVRelState *vacrel,
Assert(BufferGetBlockNumber(buf) == blkno); Assert(BufferGetBlockNumber(buf) == blkno);
*hastup = false; /* for now */ hastup = false; /* for now */
*recordfreespace = false; /* for now */ *recordfreespace = false; /* for now */
lpdead_items = 0; lpdead_items = 0;
@ -1984,7 +1983,7 @@ lazy_scan_noprune(LVRelState *vacrel,
if (ItemIdIsRedirected(itemid)) if (ItemIdIsRedirected(itemid))
{ {
*hastup = true; hastup = true;
continue; continue;
} }
@ -1998,7 +1997,7 @@ lazy_scan_noprune(LVRelState *vacrel,
continue; continue;
} }
*hastup = true; /* page prevents rel truncation */ hastup = true; /* page prevents rel truncation */
tupleheader = (HeapTupleHeader) PageGetItem(page, itemid); tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
if (heap_tuple_should_freeze(tupleheader, &vacrel->cutoffs, if (heap_tuple_should_freeze(tupleheader, &vacrel->cutoffs,
&NoFreezePageRelfrozenXid, &NoFreezePageRelfrozenXid,
@ -2100,7 +2099,7 @@ lazy_scan_noprune(LVRelState *vacrel,
* but it beats having to maintain specialized heap vacuuming code * but it beats having to maintain specialized heap vacuuming code
* forever, for vanishingly little benefit.) * forever, for vanishingly little benefit.)
*/ */
*hastup = true; hastup = true;
missed_dead_tuples += lpdead_items; missed_dead_tuples += lpdead_items;
} }
@ -2156,6 +2155,10 @@ lazy_scan_noprune(LVRelState *vacrel,
if (missed_dead_tuples > 0) if (missed_dead_tuples > 0)
vacrel->missed_dead_pages++; vacrel->missed_dead_pages++;
/* Can't truncate this page */
if (hastup)
vacrel->nonempty_pages = blkno + 1;
/* Caller won't need to call lazy_scan_prune with same page */ /* Caller won't need to call lazy_scan_prune with same page */
return true; return true;
} }