TransactionId *visibility_cutoff_xid, bool *all_frozen);
static int compute_parallel_vacuum_workers(LVRelState *vacrel,
int nrequested,
- bool *can_parallel_vacuum);
+ bool *will_parallel_vacuum);
static void update_index_statistics(LVRelState *vacrel);
static LVParallelState *begin_parallel_vacuum(LVRelState *vacrel,
BlockNumber nblocks,
vacrel->lps->lvshared->first_time = false;
/*
- * We can only provide an approximate value of num_heap_tuples in vacuum
- * cases.
+ * We can only provide an approximate value of num_heap_tuples, at least
+ * for now. Matches serial VACUUM case.
*/
vacrel->lps->lvshared->reltuples = vacrel->old_live_tuples;
vacrel->lps->lvshared->estimated_count = true;
if (idx >= vacrel->nindexes)
break;
- /* Get the index statistics of this index from DSM */
+ /* Get the index statistics space from DSM, if any */
shared_istat = parallel_stats_for_idx(lvshared, idx);
/* Skip indexes not participating in parallelism */
}
/*
- * Vacuum or cleanup indexes that can be processed by only the leader process
- * because these indexes don't support parallel operation at that phase.
+ * Perform parallel processing of indexes in leader process.
+ *
+ * Handles index vacuuming (or index cleanup) for indexes that are not
+ * parallel safe. It's possible that this will vary for a given index, based
+ * on details like whether we're performing for_cleanup processing right now.
+ *
+ * Also performs processing of smaller indexes that fell under the size cutoff
+ * enforced by compute_parallel_vacuum_workers(). These indexes never get a
+ * slot for statistics in DSM.
*/
static void
do_serial_processing_for_unsafe_indexes(LVRelState *vacrel, LVShared *lvshared)
IndexBulkDeleteResult *istat;
shared_istat = parallel_stats_for_idx(lvshared, idx);
-
- /* Skip already-complete indexes */
- if (shared_istat != NULL)
- continue;
-
indrel = vacrel->indrels[idx];
/*
- * We're only here for the unsafe indexes
+ * We're only here for the indexes that parallel workers won't
+ * process. Note that the shared_istat test ensures that we process
+ * indexes that fell under initial size cutoff.
*/
- if (parallel_processing_is_safe(indrel, lvshared))
+ if (shared_istat != NULL &&
+ parallel_processing_is_safe(indrel, lvshared))
continue;
/* Do vacuum or cleanup of the index */
* nrequested is the number of parallel workers that user requested. If
* nrequested is 0, we compute the parallel degree based on nindexes, that is
* the number of indexes that support parallel vacuum. This function also
- * sets can_parallel_vacuum to remember indexes that participate in parallel
+ * sets will_parallel_vacuum to remember indexes that participate in parallel
* vacuum.
*/
static int
compute_parallel_vacuum_workers(LVRelState *vacrel, int nrequested,
- bool *can_parallel_vacuum)
+ bool *will_parallel_vacuum)
{
int nindexes_parallel = 0;
int nindexes_parallel_bulkdel = 0;
RelationGetNumberOfBlocks(indrel) < min_parallel_index_scan_size)
continue;
- can_parallel_vacuum[idx] = true;
+ will_parallel_vacuum[idx] = true;
if ((vacoptions & VACUUM_OPTION_PARALLEL_BULKDEL) != 0)
nindexes_parallel_bulkdel++;
LVDeadTuples *dead_tuples;
BufferUsage *buffer_usage;
WalUsage *wal_usage;
- bool *can_parallel_vacuum;
+ bool *will_parallel_vacuum;
long maxtuples;
Size est_shared;
Size est_deadtuples;
/*
* Compute the number of parallel vacuum workers to launch
*/
- can_parallel_vacuum = (bool *) palloc0(sizeof(bool) * nindexes);
+ will_parallel_vacuum = (bool *) palloc0(sizeof(bool) * nindexes);
parallel_workers = compute_parallel_vacuum_workers(vacrel,
nrequested,
- can_parallel_vacuum);
+ will_parallel_vacuum);
/* Can't perform vacuum in parallel */
if (parallel_workers <= 0)
{
- pfree(can_parallel_vacuum);
+ pfree(will_parallel_vacuum);
return lps;
}
Assert(vacoptions <= VACUUM_OPTION_MAX_VALID_VALUE);
/* Skip indexes that don't participate in parallel vacuum */
- if (!can_parallel_vacuum[idx])
+ if (!will_parallel_vacuum[idx])
continue;
if (indrel->rd_indam->amusemaintenanceworkmem)
memset(shared->bitmap, 0x00, BITMAPLEN(nindexes));
for (int idx = 0; idx < nindexes; idx++)
{
- if (!can_parallel_vacuum[idx])
+ if (!will_parallel_vacuum[idx])
continue;
/* Set NOT NULL as this index does support parallelism */
PARALLEL_VACUUM_KEY_QUERY_TEXT, sharedquery);
}
- pfree(can_parallel_vacuum);
+ pfree(will_parallel_vacuum);
return lps;
}
shared_istat = parallel_stats_for_idx(lps->lvshared, idx);
/*
- * Skip unused slot. The statistics of this index are already stored
- * in local memory.
+ * Skip index -- it must have been processed by the leader, from
+ * inside do_serial_processing_for_unsafe_indexes()
*/
if (shared_istat == NULL)
continue;
/*
* Return shared memory statistics for index at offset 'getidx', if any
+ *
+ * Returning NULL indicates that compute_parallel_vacuum_workers() determined
+ * that the index is a totally unsuitable target for all parallel processing
+ * up front. For example, the index could be < min_parallel_index_scan_size
+ * cutoff.
*/
static LVSharedIndStats *
parallel_stats_for_idx(LVShared *lvshared, int getidx)
--- /dev/null
+SET max_parallel_maintenance_workers TO 4;
+SET min_parallel_index_scan_size TO '128kB';
+-- Bug #17245: Make sure that we don't totally fail to VACUUM individual indexes that
+-- happen to be below min_parallel_index_scan_size during parallel VACUUM:
+CREATE TABLE parallel_vacuum_table (a int) WITH (autovacuum_enabled = off);
+INSERT INTO parallel_vacuum_table SELECT i from generate_series(1, 10000) i;
+-- Parallel VACUUM will never be used unless there are at least two indexes
+-- that exceed min_parallel_index_scan_size. Create two such indexes, and
+-- a third index that is smaller than min_parallel_index_scan_size.
+CREATE INDEX regular_sized_index ON parallel_vacuum_table(a);
+CREATE INDEX typically_sized_index ON parallel_vacuum_table(a);
+-- Note: vacuum_in_leader_small_index can apply deduplication, making it ~3x
+-- smaller than the other indexes
+CREATE INDEX vacuum_in_leader_small_index ON parallel_vacuum_table((1));
+-- Verify (as best we can) that the cost model for parallel VACUUM
+-- will make our VACUUM run in parallel, while always leaving it up to the
+-- parallel leader to handle the vacuum_in_leader_small_index index:
+SELECT EXISTS (
+SELECT 1
+FROM pg_class
+WHERE oid = 'vacuum_in_leader_small_index'::regclass AND
+ pg_relation_size(oid) <
+ pg_size_bytes(current_setting('min_parallel_index_scan_size'))
+) as leader_will_handle_small_index;
+ leader_will_handle_small_index
+--------------------------------
+ t
+(1 row)
+
+SELECT count(*) as trigger_parallel_vacuum_nindexes
+FROM pg_class
+WHERE oid in ('regular_sized_index'::regclass, 'typically_sized_index'::regclass) AND
+ pg_relation_size(oid) >=
+ pg_size_bytes(current_setting('min_parallel_index_scan_size'));
+ trigger_parallel_vacuum_nindexes
+----------------------------------
+ 2
+(1 row)
+
+-- Parallel VACUUM with B-Tree page deletions, ambulkdelete calls:
+DELETE FROM parallel_vacuum_table;
+VACUUM (PARALLEL 4, INDEX_CLEANUP ON) parallel_vacuum_table;
+-- Since vacuum_in_leader_small_index uses deduplication, we expect an
+-- assertion failure with bug #17245 (in the absence of bugfix):
+INSERT INTO parallel_vacuum_table SELECT i FROM generate_series(1, 10000) i;
+RESET max_parallel_maintenance_workers;
+RESET min_parallel_index_scan_size;
+-- Deliberately don't drop table, to get further coverage from tools like
+-- pg_amcheck in some testing scenarios
--- /dev/null
+SET max_parallel_maintenance_workers TO 4;
+SET min_parallel_index_scan_size TO '128kB';
+
+-- Bug #17245: Make sure that we don't totally fail to VACUUM individual indexes that
+-- happen to be below min_parallel_index_scan_size during parallel VACUUM:
+CREATE TABLE parallel_vacuum_table (a int) WITH (autovacuum_enabled = off);
+INSERT INTO parallel_vacuum_table SELECT i from generate_series(1, 10000) i;
+
+-- Parallel VACUUM will never be used unless there are at least two indexes
+-- that exceed min_parallel_index_scan_size. Create two such indexes, and
+-- a third index that is smaller than min_parallel_index_scan_size.
+CREATE INDEX regular_sized_index ON parallel_vacuum_table(a);
+CREATE INDEX typically_sized_index ON parallel_vacuum_table(a);
+-- Note: vacuum_in_leader_small_index can apply deduplication, making it ~3x
+-- smaller than the other indexes
+CREATE INDEX vacuum_in_leader_small_index ON parallel_vacuum_table((1));
+
+-- Verify (as best we can) that the cost model for parallel VACUUM
+-- will make our VACUUM run in parallel, while always leaving it up to the
+-- parallel leader to handle the vacuum_in_leader_small_index index:
+SELECT EXISTS (
+SELECT 1
+FROM pg_class
+WHERE oid = 'vacuum_in_leader_small_index'::regclass AND
+ pg_relation_size(oid) <
+ pg_size_bytes(current_setting('min_parallel_index_scan_size'))
+) as leader_will_handle_small_index;
+SELECT count(*) as trigger_parallel_vacuum_nindexes
+FROM pg_class
+WHERE oid in ('regular_sized_index'::regclass, 'typically_sized_index'::regclass) AND
+ pg_relation_size(oid) >=
+ pg_size_bytes(current_setting('min_parallel_index_scan_size'));
+
+-- Parallel VACUUM with B-Tree page deletions, ambulkdelete calls:
+DELETE FROM parallel_vacuum_table;
+VACUUM (PARALLEL 4, INDEX_CLEANUP ON) parallel_vacuum_table;
+
+-- Since vacuum_in_leader_small_index uses deduplication, we expect an
+-- assertion failure with bug #17245 (in the absence of bugfix):
+INSERT INTO parallel_vacuum_table SELECT i FROM generate_series(1, 10000) i;
+
+RESET max_parallel_maintenance_workers;
+RESET min_parallel_index_scan_size;
+
+-- Deliberately don't drop table, to get further coverage from tools like
+-- pg_amcheck in some testing scenarios