*/
typedef struct LVPagePruneState
{
- bool hastup; /* Page prevents rel truncation? */
bool has_lpdead_items; /* includes existing LP_DEAD items */
/*
LVPagePruneState *prunestate);
static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf,
BlockNumber blkno, Page page,
- bool *hastup, bool *recordfreespace);
+ bool *recordfreespace);
static void lazy_vacuum(LVRelState *vacrel);
static bool lazy_vacuum_all_indexes(LVRelState *vacrel);
static void lazy_vacuum_heap_rel(LVRelState *vacrel);
page = BufferGetPage(buf);
if (!ConditionalLockBufferForCleanup(buf))
{
- bool hastup,
- recordfreespace;
+ bool recordfreespace;
LockBuffer(buf, BUFFER_LOCK_SHARE);
continue;
}
- /* Collect LP_DEAD items in dead_items array, count tuples */
- if (lazy_scan_noprune(vacrel, buf, blkno, page, &hastup,
+ /*
+ * Collect LP_DEAD items in dead_items array, count tuples,
+ * determine if rel truncation is safe
+ */
+ if (lazy_scan_noprune(vacrel, buf, blkno, page,
&recordfreespace))
{
Size freespace = 0;
/*
* Processed page successfully (without cleanup lock) -- just
- * need to perform rel truncation and FSM steps, much like the
- * lazy_scan_prune case. Don't bother trying to match its
- * visibility map setting steps, though.
+ * need to update the FSM, much like the lazy_scan_prune case.
+ * Don't bother trying to match its visibility map setting
+ * steps, though.
*/
- if (hastup)
- vacrel->nonempty_pages = blkno + 1;
if (recordfreespace)
freespace = PageGetHeapFreeSpace(page);
UnlockReleaseBuffer(buf);
* dead_items array. This includes LP_DEAD line pointers that we
* pruned ourselves, as well as existing LP_DEAD line pointers that
* were pruned some time earlier. Also considers freezing XIDs in the
- * tuple headers of remaining items with storage.
+ * tuple headers of remaining items with storage. It also determines
+ * if truncating this block is safe.
*/
lazy_scan_prune(vacrel, buf, blkno, page, &prunestate);
Assert(!prunestate.all_visible || !prunestate.has_lpdead_items);
- /* Remember the location of the last page with nonremovable tuples */
- if (prunestate.hastup)
- vacrel->nonempty_pages = blkno + 1;
-
if (vacrel->nindexes == 0)
{
/*
live_tuples,
recently_dead_tuples;
HeapPageFreeze pagefrz;
+ bool hastup = false;
int64 fpi_before = pgWalUsage.wal_fpi;
OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
HeapTupleFreeze frozen[MaxHeapTuplesPerPage];
* Now scan the page to collect LP_DEAD items and check for tuples
* requiring freezing among remaining tuples with storage
*/
- prunestate->hastup = false;
prunestate->has_lpdead_items = false;
prunestate->all_visible = true;
prunestate->all_frozen = true;
if (ItemIdIsRedirected(itemid))
{
/* page makes rel truncation unsafe */
- prunestate->hastup = true;
+ hastup = true;
continue;
}
break;
}
- prunestate->hastup = true; /* page makes rel truncation unsafe */
+ hastup = true; /* page makes rel truncation unsafe */
/* Tuple with storage -- consider need to freeze */
if (heap_prepare_freeze_tuple(htup, &vacrel->cutoffs, &pagefrz,
vacrel->lpdead_items += lpdead_items;
vacrel->live_tuples += live_tuples;
vacrel->recently_dead_tuples += recently_dead_tuples;
+
+ /* Can't truncate this page */
+ if (hastup)
+ vacrel->nonempty_pages = blkno + 1;
}
/*
* one or more tuples on the page. We always return true for non-aggressive
* callers.
*
- * See lazy_scan_prune for an explanation of hastup return flag.
* recordfreespace flag instructs caller on whether or not it should do
* generic FSM processing for page.
*/
Buffer buf,
BlockNumber blkno,
Page page,
- bool *hastup,
bool *recordfreespace)
{
OffsetNumber offnum,
live_tuples,
recently_dead_tuples,
missed_dead_tuples;
+ bool hastup;
HeapTupleHeader tupleheader;
TransactionId NoFreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
MultiXactId NoFreezePageRelminMxid = vacrel->NewRelminMxid;
Assert(BufferGetBlockNumber(buf) == blkno);
- *hastup = false; /* for now */
+ hastup = false; /* for now */
*recordfreespace = false; /* for now */
lpdead_items = 0;
if (ItemIdIsRedirected(itemid))
{
- *hastup = true;
+ hastup = true;
continue;
}
continue;
}
- *hastup = true; /* page prevents rel truncation */
+ hastup = true; /* page prevents rel truncation */
tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
if (heap_tuple_should_freeze(tupleheader, &vacrel->cutoffs,
&NoFreezePageRelfrozenXid,
* but it beats having to maintain specialized heap vacuuming code
* forever, for vanishingly little benefit.)
*/
- *hastup = true;
+ hastup = true;
missed_dead_tuples += lpdead_items;
}
if (missed_dead_tuples > 0)
vacrel->missed_dead_pages++;
+ /* Can't truncate this page */
+ if (hastup)
+ vacrel->nonempty_pages = blkno + 1;
+
/* Caller won't need to call lazy_scan_prune with same page */
return true;
}