aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/heap/vacuumlazy.c1442
-rw-r--r--src/backend/access/index/indexam.c8
-rw-r--r--src/backend/commands/vacuum.c76
-rw-r--r--src/include/access/genam.h4
-rw-r--r--src/include/access/heapam.h2
-rw-r--r--src/include/access/tableam.h2
6 files changed, 814 insertions, 720 deletions
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index efe8761702d..cc98ede5394 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -146,7 +146,7 @@
* Macro to check if we are in a parallel vacuum. If true, we are in the
* parallel mode and the DSM segment is initialized.
*/
-#define ParallelVacuumIsActive(lps) PointerIsValid(lps)
+#define ParallelVacuumIsActive(vacrel) ((vacrel)->lps != NULL)
/* Phases of vacuum during which we report error context. */
typedef enum
@@ -264,7 +264,7 @@ typedef struct LVShared
typedef struct LVSharedIndStats
{
bool updated; /* are the stats updated? */
- IndexBulkDeleteResult stats;
+ IndexBulkDeleteResult istat;
} LVSharedIndStats;
/* Struct for maintaining a parallel vacuum state. */
@@ -290,41 +290,68 @@ typedef struct LVParallelState
int nindexes_parallel_condcleanup;
} LVParallelState;
-typedef struct LVRelStats
+typedef struct LVRelState
{
- char *relnamespace;
- char *relname;
+ /* Target heap relation and its indexes */
+ Relation rel;
+ Relation *indrels;
+ int nindexes;
/* useindex = true means two-pass strategy; false means one-pass */
bool useindex;
- /* Overall statistics about rel */
+
+ /* Buffer access strategy and parallel state */
+ BufferAccessStrategy bstrategy;
+ LVParallelState *lps;
+
+ /* Statistics from pg_class when we start out */
BlockNumber old_rel_pages; /* previous value of pg_class.relpages */
+ double old_live_tuples; /* previous value of pg_class.reltuples */
+ /* rel's initial relfrozenxid and relminmxid */
+ TransactionId relfrozenxid;
+ MultiXactId relminmxid;
+ TransactionId latestRemovedXid;
+
+ /* VACUUM operation's cutoff for pruning */
+ TransactionId OldestXmin;
+ /* VACUUM operation's cutoff for freezing XIDs and MultiXactIds */
+ TransactionId FreezeLimit;
+ MultiXactId MultiXactCutoff;
+
+ /* Error reporting state */
+ char *relnamespace;
+ char *relname;
+ char *indname;
+ BlockNumber blkno; /* used only for heap operations */
+ OffsetNumber offnum; /* used only for heap operations */
+ VacErrPhase phase;
+
+ /*
+ * State managed by lazy_scan_heap() follows
+ */
+ LVDeadTuples *dead_tuples; /* items to vacuum from indexes */
BlockNumber rel_pages; /* total number of pages */
BlockNumber scanned_pages; /* number of pages we examined */
- BlockNumber pinskipped_pages; /* # of pages we skipped due to a pin */
+ BlockNumber pinskipped_pages; /* # of pages skipped due to a pin */
BlockNumber frozenskipped_pages; /* # of frozen pages we skipped */
BlockNumber tupcount_pages; /* pages whose tuples we counted */
- double old_live_tuples; /* previous value of pg_class.reltuples */
- double new_rel_tuples; /* new estimated total # of tuples */
- double new_live_tuples; /* new estimated total # of live tuples */
- double new_dead_tuples; /* new estimated total # of dead tuples */
- BlockNumber pages_removed;
- double tuples_deleted;
+ BlockNumber pages_removed; /* pages remove by truncation */
BlockNumber nonempty_pages; /* actually, last nonempty page + 1 */
- LVDeadTuples *dead_tuples;
- int num_index_scans;
- TransactionId latestRemovedXid;
bool lock_waiter_detected;
- /* Statistics about indexes */
+ /* Statistics output by us, for table */
+ double new_rel_tuples; /* new estimated total # of tuples */
+ double new_live_tuples; /* new estimated total # of live tuples */
+ /* Statistics output by index AMs */
IndexBulkDeleteResult **indstats;
- int nindexes;
- /* Used for error callback */
- char *indname;
- BlockNumber blkno; /* used only for heap operations */
- OffsetNumber offnum; /* used only for heap operations */
- VacErrPhase phase;
-} LVRelStats;
+ /* Instrumentation counters */
+ int num_index_scans;
+ int64 tuples_deleted; /* # deleted from table */
+ int64 new_dead_tuples; /* new estimated total # of dead items in
+ * table */
+ int64 num_tuples; /* total number of nonremovable tuples */
+ int64 live_tuples; /* live tuples (reltuples estimate) */
+} LVRelState;
/* Struct for saving and restoring vacuum error information. */
typedef struct LVSavedErrInfo
@@ -334,77 +361,72 @@ typedef struct LVSavedErrInfo
VacErrPhase phase;
} LVSavedErrInfo;
-/* A few variables that don't seem worth passing around as parameters */
+/* elevel controls whole VACUUM's verbosity */
static int elevel = -1;
-static TransactionId OldestXmin;
-static TransactionId FreezeLimit;
-static MultiXactId MultiXactCutoff;
-
-static BufferAccessStrategy vac_strategy;
-
/* non-export function prototypes */
-static void lazy_scan_heap(Relation onerel, VacuumParams *params,
- LVRelStats *vacrelstats, Relation *Irel, int nindexes,
+static void lazy_scan_heap(LVRelState *vacrel, VacuumParams *params,
bool aggressive);
-static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
+static void lazy_vacuum_all_indexes(LVRelState *vacrel);
+static void lazy_vacuum_heap_rel(LVRelState *vacrel);
+static int lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno,
+ Buffer buffer, int tupindex, Buffer *vmbuffer);
static bool lazy_check_needs_freeze(Buffer buf, bool *hastup,
- LVRelStats *vacrelstats);
-static void lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
- LVRelStats *vacrelstats, LVParallelState *lps,
- int nindexes);
-static void lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
- LVDeadTuples *dead_tuples, double reltuples, LVRelStats *vacrelstats);
-static void lazy_cleanup_index(Relation indrel,
- IndexBulkDeleteResult **stats,
- double reltuples, bool estimated_count, LVRelStats *vacrelstats);
-static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
- int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
-static bool should_attempt_truncation(VacuumParams *params,
- LVRelStats *vacrelstats);
-static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
-static BlockNumber count_nondeletable_pages(Relation onerel,
- LVRelStats *vacrelstats);
-static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks);
+ LVRelState *vacrel);
+static void do_parallel_lazy_vacuum_all_indexes(LVRelState *vacrel);
+static void do_parallel_lazy_cleanup_all_indexes(LVRelState *vacrel);
+static void do_parallel_vacuum_or_cleanup(LVRelState *vacrel, int nworkers);
+static void do_parallel_processing(LVRelState *vacrel,
+ LVShared *lvshared);
+static void do_serial_processing_for_unsafe_indexes(LVRelState *vacrel,
+ LVShared *lvshared);
+static IndexBulkDeleteResult *parallel_process_one_index(Relation indrel,
+ IndexBulkDeleteResult *istat,
+ LVShared *lvshared,
+ LVSharedIndStats *shared_indstats,
+ LVRelState *vacrel);
+static void lazy_cleanup_all_indexes(LVRelState *vacrel);
+static IndexBulkDeleteResult *lazy_vacuum_one_index(Relation indrel,
+ IndexBulkDeleteResult *istat,
+ double reltuples,
+ LVRelState *vacrel);
+static IndexBulkDeleteResult *lazy_cleanup_one_index(Relation indrel,
+ IndexBulkDeleteResult *istat,
+ double reltuples,
+ bool estimated_count,
+ LVRelState *vacrel);
+static bool should_attempt_truncation(LVRelState *vacrel,
+ VacuumParams *params);
+static void lazy_truncate_heap(LVRelState *vacrel);
+static BlockNumber count_nondeletable_pages(LVRelState *vacrel);
+static long compute_max_dead_tuples(BlockNumber relblocks, bool hasindex);
+static void lazy_space_alloc(LVRelState *vacrel, int nworkers,
+ BlockNumber relblocks);
+static void lazy_space_free(LVRelState *vacrel);
static void lazy_record_dead_tuple(LVDeadTuples *dead_tuples,
ItemPointer itemptr);
static bool lazy_tid_reaped(ItemPointer itemptr, void *state);
static int vac_cmp_itemptr(const void *left, const void *right);
-static bool heap_page_is_all_visible(Relation rel, Buffer buf,
- LVRelStats *vacrelstats,
+static bool heap_page_is_all_visible(LVRelState *vacrel, Buffer buf,
TransactionId *visibility_cutoff_xid, bool *all_frozen);
-static void lazy_parallel_vacuum_indexes(Relation *Irel, LVRelStats *vacrelstats,
- LVParallelState *lps, int nindexes);
-static void parallel_vacuum_index(Relation *Irel, LVShared *lvshared,
- LVDeadTuples *dead_tuples, int nindexes,
- LVRelStats *vacrelstats);
-static void vacuum_indexes_leader(Relation *Irel, LVRelStats *vacrelstats,
- LVParallelState *lps, int nindexes);
-static void vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
- LVShared *lvshared, LVSharedIndStats *shared_indstats,
- LVDeadTuples *dead_tuples, LVRelStats *vacrelstats);
-static void lazy_cleanup_all_indexes(Relation *Irel, LVRelStats *vacrelstats,
- LVParallelState *lps, int nindexes);
-static long compute_max_dead_tuples(BlockNumber relblocks, bool hasindex);
-static int compute_parallel_vacuum_workers(Relation *Irel, int nindexes, int nrequested,
+static int compute_parallel_vacuum_workers(LVRelState *vacrel,
+ int nrequested,
bool *can_parallel_vacuum);
-static void prepare_index_statistics(LVShared *lvshared, bool *can_parallel_vacuum,
- int nindexes);
-static void update_index_statistics(Relation *Irel, IndexBulkDeleteResult **stats,
- int nindexes);
-static LVParallelState *begin_parallel_vacuum(Oid relid, Relation *Irel,
- LVRelStats *vacrelstats, BlockNumber nblocks,
- int nindexes, int nrequested);
-static void end_parallel_vacuum(IndexBulkDeleteResult **stats,
- LVParallelState *lps, int nindexes);
-static LVSharedIndStats *get_indstats(LVShared *lvshared, int n);
-static bool skip_parallel_vacuum_index(Relation indrel, LVShared *lvshared);
+static void update_index_statistics(LVRelState *vacrel);
+static LVParallelState *begin_parallel_vacuum(LVRelState *vacrel,
+ BlockNumber nblocks,
+ int nrequested);
+static void end_parallel_vacuum(LVRelState *vacrel);
+static LVSharedIndStats *parallel_stats_for_idx(LVShared *lvshared, int getidx);
+static bool parallel_processing_is_safe(Relation indrel, LVShared *lvshared);
static void vacuum_error_callback(void *arg);
-static void update_vacuum_error_info(LVRelStats *errinfo, LVSavedErrInfo *saved_err_info,
+static void update_vacuum_error_info(LVRelState *vacrel,
+ LVSavedErrInfo *saved_vacrel,
int phase, BlockNumber blkno,
OffsetNumber offnum);
-static void restore_vacuum_error_info(LVRelStats *errinfo, const LVSavedErrInfo *saved_err_info);
+static void restore_vacuum_error_info(LVRelState *vacrel,
+ const LVSavedErrInfo *saved_vacrel);
/*
@@ -417,12 +439,10 @@ static void restore_vacuum_error_info(LVRelStats *errinfo, const LVSavedErrInfo
* and locked the relation.
*/
void
-heap_vacuum_rel(Relation onerel, VacuumParams *params,
+heap_vacuum_rel(Relation rel, VacuumParams *params,
BufferAccessStrategy bstrategy)
{
- LVRelStats *vacrelstats;
- Relation *Irel;
- int nindexes;
+ LVRelState *vacrel;
PGRUsage ru0;
TimestampTz starttime = 0;
WalUsage walusage_start = pgWalUsage;
@@ -444,15 +464,14 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
ErrorContextCallback errcallback;
PgStat_Counter startreadtime = 0;
PgStat_Counter startwritetime = 0;
+ TransactionId OldestXmin;
+ TransactionId FreezeLimit;
+ MultiXactId MultiXactCutoff;
Assert(params != NULL);
Assert(params->index_cleanup != VACOPT_TERNARY_DEFAULT);
Assert(params->truncate != VACOPT_TERNARY_DEFAULT);
- /* not every AM requires these to be valid, but heap does */
- Assert(TransactionIdIsNormal(onerel->rd_rel->relfrozenxid));
- Assert(MultiXactIdIsValid(onerel->rd_rel->relminmxid));
-
/* measure elapsed time iff autovacuum logging requires it */
if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
{
@@ -471,11 +490,9 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
elevel = DEBUG2;
pgstat_progress_start_command(PROGRESS_COMMAND_VACUUM,
- RelationGetRelid(onerel));
-
- vac_strategy = bstrategy;
+ RelationGetRelid(rel));
- vacuum_set_xid_limits(onerel,
+ vacuum_set_xid_limits(rel,
params->freeze_min_age,
params->freeze_table_age,
params->multixact_freeze_min_age,
@@ -489,42 +506,46 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
* table's minimum MultiXactId is older than or equal to the requested
* mxid full-table scan limit; or if DISABLE_PAGE_SKIPPING was specified.
*/
- aggressive = TransactionIdPrecedesOrEquals(onerel->rd_rel->relfrozenxid,
+ aggressive = TransactionIdPrecedesOrEquals(rel->rd_rel->relfrozenxid,
xidFullScanLimit);
- aggressive |= MultiXactIdPrecedesOrEquals(onerel->rd_rel->relminmxid,
+ aggressive |= MultiXactIdPrecedesOrEquals(rel->rd_rel->relminmxid,
mxactFullScanLimit);
if (params->options & VACOPT_DISABLE_PAGE_SKIPPING)
aggressive = true;
- vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats));
-
- vacrelstats->relnamespace = get_namespace_name(RelationGetNamespace(onerel));
- vacrelstats->relname = pstrdup(RelationGetRelationName(onerel));
- vacrelstats->indname = NULL;
- vacrelstats->phase = VACUUM_ERRCB_PHASE_UNKNOWN;
- vacrelstats->old_rel_pages = onerel->rd_rel->relpages;
- vacrelstats->old_live_tuples = onerel->rd_rel->reltuples;
- vacrelstats->num_index_scans = 0;
- vacrelstats->pages_removed = 0;
- vacrelstats->lock_waiter_detected = false;
-
- /* Open all indexes of the relation */
- vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel);
- vacrelstats->useindex = (nindexes > 0 &&
- params->index_cleanup == VACOPT_TERNARY_ENABLED);
-
- vacrelstats->indstats = (IndexBulkDeleteResult **)
- palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
- vacrelstats->nindexes = nindexes;
+ vacrel = (LVRelState *) palloc0(sizeof(LVRelState));
+
+ /* Set up high level stuff about rel */
+ vacrel->rel = rel;
+ vac_open_indexes(vacrel->rel, RowExclusiveLock, &vacrel->nindexes,
+ &vacrel->indrels);
+ vacrel->useindex = (vacrel->nindexes > 0 &&
+ params->index_cleanup == VACOPT_TERNARY_ENABLED);
+ vacrel->bstrategy = bstrategy;
+ vacrel->old_rel_pages = rel->rd_rel->relpages;
+ vacrel->old_live_tuples = rel->rd_rel->reltuples;
+ vacrel->relfrozenxid = rel->rd_rel->relfrozenxid;
+ vacrel->relminmxid = rel->rd_rel->relminmxid;
+ vacrel->latestRemovedXid = InvalidTransactionId;
+
+ /* Set cutoffs for entire VACUUM */
+ vacrel->OldestXmin = OldestXmin;
+ vacrel->FreezeLimit = FreezeLimit;
+ vacrel->MultiXactCutoff = MultiXactCutoff;
+
+ vacrel->relnamespace = get_namespace_name(RelationGetNamespace(rel));
+ vacrel->relname = pstrdup(RelationGetRelationName(rel));
+ vacrel->indname = NULL;
+ vacrel->phase = VACUUM_ERRCB_PHASE_UNKNOWN;
/* Save index names iff autovacuum logging requires it */
- if (IsAutoVacuumWorkerProcess() &&
- params->log_min_duration >= 0 &&
- vacrelstats->nindexes > 0)
+ if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0 &&
+ vacrel->nindexes > 0)
{
- indnames = palloc(sizeof(char *) * vacrelstats->nindexes);
- for (int i = 0; i < vacrelstats->nindexes; i++)
- indnames[i] = pstrdup(RelationGetRelationName(Irel[i]));
+ indnames = palloc(sizeof(char *) * vacrel->nindexes);
+ for (int i = 0; i < vacrel->nindexes; i++)
+ indnames[i] =
+ pstrdup(RelationGetRelationName(vacrel->indrels[i]));
}
/*
@@ -539,15 +560,15 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
* information is restored at the end of those phases.
*/
errcallback.callback = vacuum_error_callback;
- errcallback.arg = vacrelstats;
+ errcallback.arg = vacrel;
errcallback.previous = error_context_stack;
error_context_stack = &errcallback;
/* Do the vacuuming */
- lazy_scan_heap(onerel, params, vacrelstats, Irel, nindexes, aggressive);
+ lazy_scan_heap(vacrel, params, aggressive);
/* Done with indexes */
- vac_close_indexes(nindexes, Irel, NoLock);
+ vac_close_indexes(vacrel->nindexes, vacrel->indrels, NoLock);
/*
* Compute whether we actually scanned the all unfrozen pages. If we did,
@@ -556,8 +577,8 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
* NB: We need to check this before truncating the relation, because that
* will change ->rel_pages.
*/
- if ((vacrelstats->scanned_pages + vacrelstats->frozenskipped_pages)
- < vacrelstats->rel_pages)
+ if ((vacrel->scanned_pages + vacrel->frozenskipped_pages)
+ < vacrel->rel_pages)
{
Assert(!aggressive);
scanned_all_unfrozen = false;
@@ -568,17 +589,17 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
/*
* Optionally truncate the relation.
*/
- if (should_attempt_truncation(params, vacrelstats))
+ if (should_attempt_truncation(vacrel, params))
{
/*
* Update error traceback information. This is the last phase during
* which we add context information to errors, so we don't need to
* revert to the previous phase.
*/
- update_vacuum_error_info(vacrelstats, NULL, VACUUM_ERRCB_PHASE_TRUNCATE,
- vacrelstats->nonempty_pages,
+ update_vacuum_error_info(vacrel, NULL, VACUUM_ERRCB_PHASE_TRUNCATE,
+ vacrel->nonempty_pages,
InvalidOffsetNumber);
- lazy_truncate_heap(onerel, vacrelstats);
+ lazy_truncate_heap(vacrel);
}
/* Pop the error context stack */
@@ -602,30 +623,30 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
* Also, don't change relfrozenxid/relminmxid if we skipped any pages,
* since then we don't know for certain that all tuples have a newer xmin.
*/
- new_rel_pages = vacrelstats->rel_pages;
- new_live_tuples = vacrelstats->new_live_tuples;
+ new_rel_pages = vacrel->rel_pages;
+ new_live_tuples = vacrel->new_live_tuples;
- visibilitymap_count(onerel, &new_rel_allvisible, NULL);
+ visibilitymap_count(rel, &new_rel_allvisible, NULL);
if (new_rel_allvisible > new_rel_pages)
new_rel_allvisible = new_rel_pages;
new_frozen_xid = scanned_all_unfrozen ? FreezeLimit : InvalidTransactionId;
new_min_multi = scanned_all_unfrozen ? MultiXactCutoff : InvalidMultiXactId;
- vac_update_relstats(onerel,
+ vac_update_relstats(rel,
new_rel_pages,
new_live_tuples,
new_rel_allvisible,
- nindexes > 0,
+ vacrel->nindexes > 0,
new_frozen_xid,
new_min_multi,
false);
/* report results to the stats collector, too */
- pgstat_report_vacuum(RelationGetRelid(onerel),
- onerel->rd_rel->relisshared,
+ pgstat_report_vacuum(RelationGetRelid(rel),
+ rel->rd_rel->relisshared,
Max(new_live_tuples, 0),
- vacrelstats->new_dead_tuples);
+ vacrel->new_dead_tuples);
pgstat_progress_end_command();
/* and log the action if appropriate */
@@ -662,6 +683,12 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
initStringInfo(&buf);
if (params->is_wraparound)
{
+ /*
+ * While it's possible for a VACUUM to be both is_wraparound
+ * and !aggressive, that's just a corner-case -- is_wraparound
+ * implies aggressive. Produce distinct output for the corner
+ * case all the same, just in case.
+ */
if (aggressive)
msgfmt = _("automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n");
else
@@ -676,39 +703,39 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
}
appendStringInfo(&buf, msgfmt,
get_database_name(MyDatabaseId),
- vacrelstats->relnamespace,
- vacrelstats->relname,
- vacrelstats->num_index_scans);
+ vacrel->relnamespace,
+ vacrel->relname,
+ vacrel->num_index_scans);
appendStringInfo(&buf, _("pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n"),
- vacrelstats->pages_removed,
- vacrelstats->rel_pages,
- vacrelstats->pinskipped_pages,
- vacrelstats->frozenskipped_pages);
+ vacrel->pages_removed,
+ vacrel->rel_pages,
+ vacrel->pinskipped_pages,
+ vacrel->frozenskipped_pages);
appendStringInfo(&buf,
- _("tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable, oldest xmin: %u\n"),
- vacrelstats->tuples_deleted,
- vacrelstats->new_rel_tuples,
- vacrelstats->new_dead_tuples,
+ _("tuples: %lld removed, %lld remain, %lld are dead but not yet removable, oldest xmin: %u\n"),
+ (long long) vacrel->tuples_deleted,
+ (long long) vacrel->new_rel_tuples,
+ (long long) vacrel->new_dead_tuples,
OldestXmin);
appendStringInfo(&buf,
_("buffer usage: %lld hits, %lld misses, %lld dirtied\n"),
(long long) VacuumPageHit,
(long long) VacuumPageMiss,
(long long) VacuumPageDirty);
- for (int i = 0; i < vacrelstats->nindexes; i++)
+ for (int i = 0; i < vacrel->nindexes; i++)
{
- IndexBulkDeleteResult *stats = vacrelstats->indstats[i];
+ IndexBulkDeleteResult *istat = vacrel->indstats[i];
- if (!stats)
+ if (!istat)
continue;
appendStringInfo(&buf,
_("index \"%s\": pages: %u in total, %u newly deleted, %u currently deleted, %u reusable\n"),
indnames[i],
- stats->num_pages,
- stats->pages_newly_deleted,
- stats->pages_deleted,
- stats->pages_free);
+ istat->num_pages,
+ istat->pages_newly_deleted,
+ istat->pages_deleted,
+ istat->pages_free);
}
appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
read_rate, write_rate);
@@ -737,10 +764,10 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
}
/* Cleanup index statistics and index names */
- for (int i = 0; i < vacrelstats->nindexes; i++)
+ for (int i = 0; i < vacrel->nindexes; i++)
{
- if (vacrelstats->indstats[i])
- pfree(vacrelstats->indstats[i]);
+ if (vacrel->indstats[i])
+ pfree(vacrel->indstats[i]);
if (indnames && indnames[i])
pfree(indnames[i]);
@@ -764,20 +791,21 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
* which would be after the rows have become inaccessible.
*/
static void
-vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
+vacuum_log_cleanup_info(LVRelState *vacrel)
{
/*
* Skip this for relations for which no WAL is to be written, or if we're
* not trying to support archive recovery.
*/
- if (!RelationNeedsWAL(rel) || !XLogIsNeeded())
+ if (!RelationNeedsWAL(vacrel->rel) || !XLogIsNeeded())
return;
/*
* No need to write the record at all unless it contains a valid value
*/
- if (TransactionIdIsValid(vacrelstats->latestRemovedXid))
- (void) log_heap_cleanup_info(rel->rd_node, vacrelstats->latestRemovedXid);
+ if (TransactionIdIsValid(vacrel->latestRemovedXid))
+ (void) log_heap_cleanup_info(vacrel->rel->rd_node,
+ vacrel->latestRemovedXid);
}
/*
@@ -788,9 +816,9 @@ vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
* page, and set commit status bits (see heap_page_prune). It also builds
* lists of dead tuples and pages with free space, calculates statistics
* on the number of live tuples in the heap, and marks pages as
- * all-visible if appropriate. When done, or when we run low on space for
- * dead-tuple TIDs, invoke vacuuming of indexes and call lazy_vacuum_heap
- * to reclaim dead line pointers.
+ * all-visible if appropriate. When done, or when we run low on space
+ * for dead-tuple TIDs, invoke vacuuming of indexes and reclaim dead line
+ * pointers.
*
* If the table has at least two indexes, we execute both index vacuum
* and index cleanup with parallel workers unless parallel vacuum is
@@ -809,16 +837,12 @@ vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
* reference them have been killed.
*/
static void
-lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
- Relation *Irel, int nindexes, bool aggressive)
+lazy_scan_heap(LVRelState *vacrel, VacuumParams *params, bool aggressive)
{
- LVParallelState *lps = NULL;
LVDeadTuples *dead_tuples;
BlockNumber nblocks,
blkno;
HeapTupleData tuple;
- TransactionId relfrozenxid = onerel->rd_rel->relfrozenxid;
- TransactionId relminmxid = onerel->rd_rel->relminmxid;
BlockNumber empty_pages,
vacuumed_pages,
next_fsm_block_to_vacuum;
@@ -847,63 +871,47 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
if (aggressive)
ereport(elevel,
(errmsg("aggressively vacuuming \"%s.%s\"",
- vacrelstats->relnamespace,
- vacrelstats->relname)));
+ vacrel->relnamespace,
+ vacrel->relname)));
else
ereport(elevel,
(errmsg("vacuuming \"%s.%s\"",
- vacrelstats->relnamespace,
- vacrelstats->relname)));
+ vacrel->relnamespace,
+ vacrel->relname)));
empty_pages = vacuumed_pages = 0;
next_fsm_block_to_vacuum = (BlockNumber) 0;
num_tuples = live_tuples = tups_vacuumed = nkeep = nunused = 0;
- nblocks = RelationGetNumberOfBlocks(onerel);
- vacrelstats->rel_pages = nblocks;
- vacrelstats->scanned_pages = 0;
- vacrelstats->tupcount_pages = 0;
- vacrelstats->nonempty_pages = 0;
- vacrelstats->latestRemovedXid = InvalidTransactionId;
+ nblocks = RelationGetNumberOfBlocks(vacrel->rel);
+ vacrel->rel_pages = nblocks;
+ vacrel->scanned_pages = 0;
+ vacrel->pinskipped_pages = 0;
+ vacrel->frozenskipped_pages = 0;
+ vacrel->tupcount_pages = 0;
+ vacrel->pages_removed = 0;
+ vacrel->nonempty_pages = 0;
+ vacrel->lock_waiter_detected = false;
- vistest = GlobalVisTestFor(onerel);
+ /* Initialize instrumentation counters */
+ vacrel->num_index_scans = 0;
+ vacrel->tuples_deleted = 0;
+ vacrel->new_dead_tuples = 0;
+ vacrel->num_tuples = 0;
+ vacrel->live_tuples = 0;
- /*
- * Initialize state for a parallel vacuum. As of now, only one worker can
- * be used for an index, so we invoke parallelism only if there are at
- * least two indexes on a table.
- */
- if (params->nworkers >= 0 && vacrelstats->useindex && nindexes > 1)
- {
- /*
- * Since parallel workers cannot access data in temporary tables, we
- * can't perform parallel vacuum on them.
- */
- if (RelationUsesLocalBuffers(onerel))
- {
- /*
- * Give warning only if the user explicitly tries to perform a
- * parallel vacuum on the temporary table.
- */
- if (params->nworkers > 0)
- ereport(WARNING,
- (errmsg("disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
- vacrelstats->relname)));
- }
- else
- lps = begin_parallel_vacuum(RelationGetRelid(onerel), Irel,
- vacrelstats, nblocks, nindexes,
- params->nworkers);
- }
+ vistest = GlobalVisTestFor(vacrel->rel);
+
+ vacrel->indstats = (IndexBulkDeleteResult **)
+ palloc0(vacrel->nindexes * sizeof(IndexBulkDeleteResult *));
/*
- * Allocate the space for dead tuples in case parallel vacuum is not
- * initialized.
+ * Allocate the space for dead tuples. Note that this handles parallel
+ * VACUUM initialization as part of allocating shared memory space used
+ * for dead_tuples.
*/
- if (!ParallelVacuumIsActive(lps))
- lazy_space_alloc(vacrelstats, nblocks);
-
- dead_tuples = vacrelstats->dead_tuples;
+ lazy_space_alloc(vacrel, params->nworkers, nblocks);
+ dead_tuples = vacrel->dead_tuples;
frozen = palloc(sizeof(xl_heap_freeze_tuple) * MaxHeapTuplesPerPage);
/* Report that we're scanning the heap, advertising total # of blocks */
@@ -963,7 +971,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
{
uint8 vmstatus;
- vmstatus = visibilitymap_get_status(onerel, next_unskippable_block,
+ vmstatus = visibilitymap_get_status(vacrel->rel,
+ next_unskippable_block,
&vmbuffer);
if (aggressive)
{
@@ -1004,11 +1013,11 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
/* see note above about forcing scanning of last page */
#define FORCE_CHECK_PAGE() \
- (blkno == nblocks - 1 && should_attempt_truncation(params, vacrelstats))
+ (blkno == nblocks - 1 && should_attempt_truncation(vacrel, params))
pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
- update_vacuum_error_info(vacrelstats, NULL, VACUUM_ERRCB_PHASE_SCAN_HEAP,
+ update_vacuum_error_info(vacrel, NULL, VACUUM_ERRCB_PHASE_SCAN_HEAP,
blkno, InvalidOffsetNumber);
if (blkno == next_unskippable_block)
@@ -1021,7 +1030,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
{
uint8 vmskipflags;
- vmskipflags = visibilitymap_get_status(onerel,
+ vmskipflags = visibilitymap_get_status(vacrel->rel,
next_unskippable_block,
&vmbuffer);
if (aggressive)
@@ -1053,7 +1062,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* it's not all-visible. But in an aggressive vacuum we know only
* that it's not all-frozen, so it might still be all-visible.
*/
- if (aggressive && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
+ if (aggressive && VM_ALL_VISIBLE(vacrel->rel, blkno, &vmbuffer))
all_visible_according_to_vm = true;
}
else
@@ -1077,8 +1086,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* know whether it was all-frozen, so we have to recheck; but
* in this case an approximate answer is OK.
*/
- if (aggressive || VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
- vacrelstats->frozenskipped_pages++;
+ if (aggressive || VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
+ vacrel->frozenskipped_pages++;
continue;
}
all_visible_according_to_vm = true;
@@ -1106,10 +1115,10 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
}
/* Work on all the indexes, then the heap */
- lazy_vacuum_all_indexes(onerel, Irel, vacrelstats, lps, nindexes);
+ lazy_vacuum_all_indexes(vacrel);
/* Remove tuples from heap */
- lazy_vacuum_heap(onerel, vacrelstats);
+ lazy_vacuum_heap_rel(vacrel);
/*
* Forget the now-vacuumed tuples, and press on, but be careful
@@ -1122,7 +1131,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* Vacuum the Free Space Map to make newly-freed space visible on
* upper-level FSM pages. Note we have not yet processed blkno.
*/
- FreeSpaceMapVacuumRange(onerel, next_fsm_block_to_vacuum, blkno);
+ FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
+ blkno);
next_fsm_block_to_vacuum = blkno;
/* Report that we are once again scanning the heap */
@@ -1137,12 +1147,11 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* possible that (a) next_unskippable_block is covered by a different
* VM page than the current block or (b) we released our pin and did a
* cycle of index vacuuming.
- *
*/
- visibilitymap_pin(onerel, blkno, &vmbuffer);
+ visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
- buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
- RBM_NORMAL, vac_strategy);
+ buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno,
+ RBM_NORMAL, vacrel->bstrategy);
/* We need buffer cleanup lock so that we can prune HOT chains. */
if (!ConditionalLockBufferForCleanup(buf))
@@ -1156,7 +1165,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
if (!aggressive && !FORCE_CHECK_PAGE())
{
ReleaseBuffer(buf);
- vacrelstats->pinskipped_pages++;
+ vacrel->pinskipped_pages++;
continue;
}
@@ -1177,13 +1186,13 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* to use lazy_check_needs_freeze() for both situations, though.
*/
LockBuffer(buf, BUFFER_LOCK_SHARE);
- if (!lazy_check_needs_freeze(buf, &hastup, vacrelstats))
+ if (!lazy_check_needs_freeze(buf, &hastup, vacrel))
{
UnlockReleaseBuffer(buf);
- vacrelstats->scanned_pages++;
- vacrelstats->pinskipped_pages++;
+ vacrel->scanned_pages++;
+ vacrel->pinskipped_pages++;
if (hastup)
- vacrelstats->nonempty_pages = blkno + 1;
+ vacrel->nonempty_pages = blkno + 1;
continue;
}
if (!aggressive)
@@ -1193,9 +1202,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* to claiming that the page contains no freezable tuples.
*/
UnlockReleaseBuffer(buf);
- vacrelstats->pinskipped_pages++;
+ vacrel->pinskipped_pages++;
if (hastup)
- vacrelstats->nonempty_pages = blkno + 1;
+ vacrel->nonempty_pages = blkno + 1;
continue;
}
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
@@ -1203,8 +1212,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
/* drop through to normal processing */
}
- vacrelstats->scanned_pages++;
- vacrelstats->tupcount_pages++;
+ vacrel->scanned_pages++;
+ vacrel->tupcount_pages++;
page = BufferGetPage(buf);
@@ -1233,12 +1242,12 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
empty_pages++;
- if (GetRecordedFreeSpace(onerel, blkno) == 0)
+ if (GetRecordedFreeSpace(vacrel->rel, blkno) == 0)
{
Size freespace;
freespace = BufferGetPageSize(buf) - SizeOfPageHeaderData;
- RecordPageWithFreeSpace(onerel, blkno, freespace);
+ RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
}
continue;
}
@@ -1269,19 +1278,19 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* page has been previously WAL-logged, and if not, do that
* now.
*/
- if (RelationNeedsWAL(onerel) &&
+ if (RelationNeedsWAL(vacrel->rel) &&
PageGetLSN(page) == InvalidXLogRecPtr)
log_newpage_buffer(buf, true);
PageSetAllVisible(page);
- visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
+ visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
vmbuffer, InvalidTransactionId,
VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
END_CRIT_SECTION();
}
UnlockReleaseBuffer(buf);
- RecordPageWithFreeSpace(onerel, blkno, freespace);
+ RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
continue;
}
@@ -1291,10 +1300,10 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* We count tuples removed by the pruning step as removed by VACUUM
* (existing LP_DEAD line pointers don't count).
*/
- tups_vacuumed += heap_page_prune(onerel, buf, vistest,
+ tups_vacuumed += heap_page_prune(vacrel->rel, buf, vistest,
InvalidTransactionId, 0, false,
- &vacrelstats->latestRemovedXid,
- &vacrelstats->offnum);
+ &vacrel->latestRemovedXid,
+ &vacrel->offnum);
/*
* Now scan the page to collect vacuumable items and check for tuples
@@ -1321,7 +1330,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* Set the offset number so that we can display it along with any
* error that occurred while processing this tuple.
*/
- vacrelstats->offnum = offnum;
+ vacrel->offnum = offnum;
itemid = PageGetItemId(page, offnum);
/* Unused items require no processing, but we count 'em */
@@ -1361,7 +1370,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
tuple.t_len = ItemIdGetLength(itemid);
- tuple.t_tableOid = RelationGetRelid(onerel);
+ tuple.t_tableOid = RelationGetRelid(vacrel->rel);
tupgone = false;
@@ -1376,7 +1385,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* cases impossible (e.g. in-progress insert from the same
* transaction).
*/
- switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
+ switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->OldestXmin, buf))
{
case HEAPTUPLE_DEAD:
@@ -1446,7 +1455,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* enough that everyone sees it as committed?
*/
xmin = HeapTupleHeaderGetXmin(tuple.t_data);
- if (!TransactionIdPrecedes(xmin, OldestXmin))
+ if (!TransactionIdPrecedes(xmin, vacrel->OldestXmin))
{
all_visible = false;
break;
@@ -1500,7 +1509,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
{
lazy_record_dead_tuple(dead_tuples, &(tuple.t_self));
HeapTupleHeaderAdvanceLatestRemovedXid(tuple.t_data,
- &vacrelstats->latestRemovedXid);
+ &vacrel->latestRemovedXid);
tups_vacuumed += 1;
has_dead_items = true;
}
@@ -1516,8 +1525,10 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* freezing. Note we already have exclusive buffer lock.
*/
if (heap_prepare_freeze_tuple(tuple.t_data,
- relfrozenxid, relminmxid,
- FreezeLimit, MultiXactCutoff,
+ vacrel->relfrozenxid,
+ vacrel->relminmxid,
+ vacrel->FreezeLimit,
+ vacrel->MultiXactCutoff,
&frozen[nfrozen],
&tuple_totally_frozen))
frozen[nfrozen++].offset = offnum;
@@ -1531,7 +1542,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* Clear the offset information once we have processed all the tuples
* on the page.
*/
- vacrelstats->offnum = InvalidOffsetNumber;
+ vacrel->offnum = InvalidOffsetNumber;
/*
* If we froze any tuples, mark the buffer dirty, and write a WAL
@@ -1557,12 +1568,12 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
}
/* Now WAL-log freezing if necessary */
- if (RelationNeedsWAL(onerel))
+ if (RelationNeedsWAL(vacrel->rel))
{
XLogRecPtr recptr;
- recptr = log_heap_freeze(onerel, buf, FreezeLimit,
- frozen, nfrozen);
+ recptr = log_heap_freeze(vacrel->rel, buf,
+ vacrel->FreezeLimit, frozen, nfrozen);
PageSetLSN(page, recptr);
}
@@ -1574,12 +1585,12 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* doing a second scan. Also we don't do that but forget dead tuples
* when index cleanup is disabled.
*/
- if (!vacrelstats->useindex && dead_tuples->num_tuples > 0)
+ if (!vacrel->useindex && dead_tuples->num_tuples > 0)
{
- if (nindexes == 0)
+ if (vacrel->nindexes == 0)
{
/* Remove tuples from heap if the table has no index */
- lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats, &vmbuffer);
+ lazy_vacuum_heap_page(vacrel, blkno, buf, 0, &vmbuffer);
vacuumed_pages++;
has_dead_items = false;
}
@@ -1613,7 +1624,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
*/
if (blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
{
- FreeSpaceMapVacuumRange(onerel, next_fsm_block_to_vacuum,
+ FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
blkno);
next_fsm_block_to_vacuum = blkno;
}
@@ -1644,7 +1655,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
*/
PageSetAllVisible(page);
MarkBufferDirty(buf);
- visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
+ visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
vmbuffer, visibility_cutoff_xid, flags);
}
@@ -1656,11 +1667,11 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* that something bad has happened.
*/
else if (all_visible_according_to_vm && !PageIsAllVisible(page)
- && VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
+ && VM_ALL_VISIBLE(vacrel->rel, blkno, &vmbuffer))
{
elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
- vacrelstats->relname, blkno);
- visibilitymap_clear(onerel, blkno, vmbuffer,
+ vacrel->relname, blkno);
+ visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
VISIBILITYMAP_VALID_BITS);
}
@@ -1682,10 +1693,10 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
else if (PageIsAllVisible(page) && has_dead_items)
{
elog(WARNING, "page containing dead tuples is marked as all-visible in relation \"%s\" page %u",
- vacrelstats->relname, blkno);
+ vacrel->relname, blkno);
PageClearAllVisible(page);
MarkBufferDirty(buf);
- visibilitymap_clear(onerel, blkno, vmbuffer,
+ visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
VISIBILITYMAP_VALID_BITS);
}
@@ -1695,14 +1706,14 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* all_visible is true, so we must check both.
*/
else if (all_visible_according_to_vm && all_visible && all_frozen &&
- !VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
+ !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
{
/*
* We can pass InvalidTransactionId as the cutoff XID here,
* because setting the all-frozen bit doesn't cause recovery
* conflicts.
*/
- visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
+ visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
vmbuffer, InvalidTransactionId,
VISIBILITYMAP_ALL_FROZEN);
}
@@ -1711,43 +1722,42 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
/* Remember the location of the last page with nonremovable tuples */
if (hastup)
- vacrelstats->nonempty_pages = blkno + 1;
+ vacrel->nonempty_pages = blkno + 1;
/*
* If we remembered any tuples for deletion, then the page will be
- * visited again by lazy_vacuum_heap, which will compute and record
+ * visited again by lazy_vacuum_heap_rel, which will compute and record
* its post-compaction free space. If not, then we're done with this
* page, so remember its free space as-is. (This path will always be
* taken if there are no indexes.)
*/
if (dead_tuples->num_tuples == prev_dead_count)
- RecordPageWithFreeSpace(onerel, blkno, freespace);
+ RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
}
/* report that everything is scanned and vacuumed */
pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
/* Clear the block number information */
- vacrelstats->blkno = InvalidBlockNumber;
+ vacrel->blkno = InvalidBlockNumber;
pfree(frozen);
/* save stats for use later */
- vacrelstats->tuples_deleted = tups_vacuumed;
- vacrelstats->new_dead_tuples = nkeep;
+ vacrel->tuples_deleted = tups_vacuumed;
+ vacrel->new_dead_tuples = nkeep;
/* now we can compute the new value for pg_class.reltuples */
- vacrelstats->new_live_tuples = vac_estimate_reltuples(onerel,
- nblocks,
- vacrelstats->tupcount_pages,
- live_tuples);
+ vacrel->new_live_tuples = vac_estimate_reltuples(vacrel->rel, nblocks,
+ vacrel->tupcount_pages,
+ live_tuples);
/*
* Also compute the total number of surviving heap entries. In the
* (unlikely) scenario that new_live_tuples is -1, take it as zero.
*/
- vacrelstats->new_rel_tuples =
- Max(vacrelstats->new_live_tuples, 0) + vacrelstats->new_dead_tuples;
+ vacrel->new_rel_tuples =
+ Max(vacrel->new_live_tuples, 0) + vacrel->new_dead_tuples;
/*
* Release any remaining pin on visibility map page.
@@ -1763,10 +1773,10 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
if (dead_tuples->num_tuples > 0)
{
/* Work on all the indexes, and then the heap */
- lazy_vacuum_all_indexes(onerel, Irel, vacrelstats, lps, nindexes);
+ lazy_vacuum_all_indexes(vacrel);
/* Remove tuples from heap */
- lazy_vacuum_heap(onerel, vacrelstats);
+ lazy_vacuum_heap_rel(vacrel);
}
/*
@@ -1774,47 +1784,47 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
* not there were indexes.
*/
if (blkno > next_fsm_block_to_vacuum)
- FreeSpaceMapVacuumRange(onerel, next_fsm_block_to_vacuum, blkno);
+ FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum, blkno);
/* report all blocks vacuumed */
pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno);
/* Do post-vacuum cleanup */
- if (vacrelstats->useindex)
- lazy_cleanup_all_indexes(Irel, vacrelstats, lps, nindexes);
+ if (vacrel->useindex)
+ lazy_cleanup_all_indexes(vacrel);
/*
- * End parallel mode before updating index statistics as we cannot write
- * during parallel mode.
+ * Free resources managed by lazy_space_alloc(). (We must end parallel
+ * mode/free shared memory before updating index statistics. We cannot
+ * write while in parallel mode.)
*/
- if (ParallelVacuumIsActive(lps))
- end_parallel_vacuum(vacrelstats->indstats, lps, nindexes);
+ lazy_space_free(vacrel);
/* Update index statistics */
- if (vacrelstats->useindex)
- update_index_statistics(Irel, vacrelstats->indstats, nindexes);
+ if (vacrel->useindex)
+ update_index_statistics(vacrel);
- /* If no indexes, make log report that lazy_vacuum_heap would've made */
+ /* If no indexes, make log report that lazy_vacuum_heap_rel would've made */
if (vacuumed_pages)
ereport(elevel,
(errmsg("\"%s\": removed %.0f row versions in %u pages",
- vacrelstats->relname,
+ vacrel->relname,
tups_vacuumed, vacuumed_pages)));
initStringInfo(&buf);
appendStringInfo(&buf,
_("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"),
- nkeep, OldestXmin);
+ nkeep, vacrel->OldestXmin);
appendStringInfo(&buf, _("There were %.0f unused item identifiers.\n"),
nunused);
appendStringInfo(&buf, ngettext("Skipped %u page due to buffer pins, ",
"Skipped %u pages due to buffer pins, ",
- vacrelstats->pinskipped_pages),
- vacrelstats->pinskipped_pages);
+ vacrel->pinskipped_pages),
+ vacrel->pinskipped_pages);
appendStringInfo(&buf, ngettext("%u frozen page.\n",
"%u frozen pages.\n",
- vacrelstats->frozenskipped_pages),
- vacrelstats->frozenskipped_pages);
+ vacrel->frozenskipped_pages),
+ vacrel->frozenskipped_pages);
appendStringInfo(&buf, ngettext("%u page is entirely empty.\n",
"%u pages are entirely empty.\n",
empty_pages),
@@ -1823,82 +1833,71 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
ereport(elevel,
(errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
- vacrelstats->relname,
+ vacrel->relname,
tups_vacuumed, num_tuples,
- vacrelstats->scanned_pages, nblocks),
+ vacrel->scanned_pages, nblocks),
errdetail_internal("%s", buf.data)));
pfree(buf.data);
}
/*
- * lazy_vacuum_all_indexes() -- vacuum all indexes of relation.
- *
- * We process the indexes serially unless we are doing parallel vacuum.
+ * lazy_vacuum_all_indexes() -- Main entry for index vacuuming
*/
static void
-lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
- LVRelStats *vacrelstats, LVParallelState *lps,
- int nindexes)
+lazy_vacuum_all_indexes(LVRelState *vacrel)
{
Assert(!IsParallelWorker());
- Assert(nindexes > 0);
+ Assert(vacrel->nindexes > 0);
+ Assert(TransactionIdIsNormal(vacrel->relfrozenxid));
+ Assert(MultiXactIdIsValid(vacrel->relminmxid));
/* Log cleanup info before we touch indexes */
- vacuum_log_cleanup_info(onerel, vacrelstats);
+ vacuum_log_cleanup_info(vacrel);
/* Report that we are now vacuuming indexes */
pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
PROGRESS_VACUUM_PHASE_VACUUM_INDEX);
- /* Perform index vacuuming with parallel workers for parallel vacuum. */
- if (ParallelVacuumIsActive(lps))
+ if (!ParallelVacuumIsActive(vacrel))
{
- /* Tell parallel workers to do index vacuuming */
- lps->lvshared->for_cleanup = false;
- lps->lvshared->first_time = false;
-
- /*
- * We can only provide an approximate value of num_heap_tuples in
- * vacuum cases.
- */
- lps->lvshared->reltuples = vacrelstats->old_live_tuples;
- lps->lvshared->estimated_count = true;
+ for (int idx = 0; idx < vacrel->nindexes; idx++)
+ {
+ Relation indrel = vacrel->indrels[idx];
+ IndexBulkDeleteResult *istat = vacrel->indstats[idx];
- lazy_parallel_vacuum_indexes(Irel, vacrelstats, lps, nindexes);
+ vacrel->indstats[idx] =
+ lazy_vacuum_one_index(indrel, istat, vacrel->old_live_tuples,
+ vacrel);
+ }
}
else
{
- int idx;
-
- for (idx = 0; idx < nindexes; idx++)
- lazy_vacuum_index(Irel[idx], &(vacrelstats->indstats[idx]),
- vacrelstats->dead_tuples,
- vacrelstats->old_live_tuples, vacrelstats);
+ /* Outsource everything to parallel variant */
+ do_parallel_lazy_vacuum_all_indexes(vacrel);
}
/* Increase and report the number of index scans */
- vacrelstats->num_index_scans++;
+ vacrel->num_index_scans++;
pgstat_progress_update_param(PROGRESS_VACUUM_NUM_INDEX_VACUUMS,
- vacrelstats->num_index_scans);
+ vacrel->num_index_scans);
}
-
/*
- * lazy_vacuum_heap() -- second pass over the heap
+ * lazy_vacuum_heap_rel() -- second pass over the heap for two pass strategy
*
- * This routine marks dead tuples as unused and compacts out free
- * space on their pages. Pages not having dead tuples recorded from
- * lazy_scan_heap are not visited at all.
+ * This routine marks dead tuples as unused and compacts out free space on
+ * their pages. Pages not having dead tuples recorded from lazy_scan_heap are
+ * not visited at all.
*
- * Note: the reason for doing this as a second pass is we cannot remove
- * the tuples until we've removed their index entries, and we want to
- * process index entry removal in batches as large as possible.
+ * Note: the reason for doing this as a second pass is we cannot remove the
+ * tuples until we've removed their index entries, and we want to process
+ * index entry removal in batches as large as possible.
*/
static void
-lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
+lazy_vacuum_heap_rel(LVRelState *vacrel)
{
int tupindex;
- int npages;
+ int vacuumed_pages;
PGRUsage ru0;
Buffer vmbuffer = InvalidBuffer;
LVSavedErrInfo saved_err_info;
@@ -1908,14 +1907,15 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
PROGRESS_VACUUM_PHASE_VACUUM_HEAP);
/* Update error traceback information */
- update_vacuum_error_info(vacrelstats, &saved_err_info, VACUUM_ERRCB_PHASE_VACUUM_HEAP,
+ update_vacuum_error_info(vacrel, &saved_err_info,
+ VACUUM_ERRCB_PHASE_VACUUM_HEAP,
InvalidBlockNumber, InvalidOffsetNumber);
pg_rusage_init(&ru0);
- npages = 0;
+ vacuumed_pages = 0;
tupindex = 0;
- while (tupindex < vacrelstats->dead_tuples->num_tuples)
+ while (tupindex < vacrel->dead_tuples->num_tuples)
{
BlockNumber tblk;
Buffer buf;
@@ -1924,30 +1924,30 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
vacuum_delay_point();
- tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples->itemptrs[tupindex]);
- vacrelstats->blkno = tblk;
- buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
- vac_strategy);
+ tblk = ItemPointerGetBlockNumber(&vacrel->dead_tuples->itemptrs[tupindex]);
+ vacrel->blkno = tblk;
+ buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, tblk, RBM_NORMAL,
+ vacrel->bstrategy);
if (!ConditionalLockBufferForCleanup(buf))
{
ReleaseBuffer(buf);
++tupindex;
continue;
}
- tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats,
- &vmbuffer);
+ tupindex = lazy_vacuum_heap_page(vacrel, tblk, buf, tupindex,
+ &vmbuffer);
/* Now that we've compacted the page, record its available space */
page = BufferGetPage(buf);
freespace = PageGetHeapFreeSpace(page);
UnlockReleaseBuffer(buf);
- RecordPageWithFreeSpace(onerel, tblk, freespace);
- npages++;
+ RecordPageWithFreeSpace(vacrel->rel, tblk, freespace);
+ vacuumed_pages++;
}
/* Clear the block number information */
- vacrelstats->blkno = InvalidBlockNumber;
+ vacrel->blkno = InvalidBlockNumber;
if (BufferIsValid(vmbuffer))
{
@@ -1956,32 +1956,31 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
}
ereport(elevel,
- (errmsg("\"%s\": removed %d row versions in %d pages",
- vacrelstats->relname,
- tupindex, npages),
+ (errmsg("\"%s\": removed %d dead item identifiers in %u pages",
+ vacrel->relname, tupindex, vacuumed_pages),
errdetail_internal("%s", pg_rusage_show(&ru0))));
/* Revert to the previous phase information for error traceback */
- restore_vacuum_error_info(vacrelstats, &saved_err_info);
+ restore_vacuum_error_info(vacrel, &saved_err_info);
}
/*
- * lazy_vacuum_page() -- free dead tuples on a page
- * and repair its fragmentation.
+ * lazy_vacuum_heap_page() -- free dead tuples on a page
+ * and repair its fragmentation.
*
* Caller must hold pin and buffer cleanup lock on the buffer.
*
- * tupindex is the index in vacrelstats->dead_tuples of the first dead
- * tuple for this page. We assume the rest follow sequentially.
- * The return value is the first tupindex after the tuples of this page.
+ * tupindex is the index in vacrel->dead_tuples of the first dead tuple for
+ * this page. We assume the rest follow sequentially. The return value is
+ * the first tupindex after the tuples of this page.
*/
static int
-lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
- int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
+lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
+ int tupindex, Buffer *vmbuffer)
{
- LVDeadTuples *dead_tuples = vacrelstats->dead_tuples;
+ LVDeadTuples *dead_tuples = vacrel->dead_tuples;
Page page = BufferGetPage(buffer);
- OffsetNumber unused[MaxOffsetNumber];
+ OffsetNumber unused[MaxHeapTuplesPerPage];
int uncnt = 0;
TransactionId visibility_cutoff_xid;
bool all_frozen;
@@ -1990,8 +1989,9 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno);
/* Update error traceback information */
- update_vacuum_error_info(vacrelstats, &saved_err_info, VACUUM_ERRCB_PHASE_VACUUM_HEAP,
- blkno, InvalidOffsetNumber);
+ update_vacuum_error_info(vacrel, &saved_err_info,
+ VACUUM_ERRCB_PHASE_VACUUM_HEAP, blkno,
+ InvalidOffsetNumber);
START_CRIT_SECTION();
@@ -2018,14 +2018,14 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
MarkBufferDirty(buffer);
/* XLOG stuff */
- if (RelationNeedsWAL(onerel))
+ if (RelationNeedsWAL(vacrel->rel))
{
XLogRecPtr recptr;
- recptr = log_heap_clean(onerel, buffer,
+ recptr = log_heap_clean(vacrel->rel, buffer,
NULL, 0, NULL, 0,
unused, uncnt,
- vacrelstats->latestRemovedXid);
+ vacrel->latestRemovedXid);
PageSetLSN(page, recptr);
}
@@ -2043,8 +2043,7 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
* dirty, exclusively locked, and, if needed, a full page image has been
* emitted in the log_heap_clean() above.
*/
- if (heap_page_is_all_visible(onerel, buffer, vacrelstats,
- &visibility_cutoff_xid,
+ if (heap_page_is_all_visible(vacrel, buffer, &visibility_cutoff_xid,
&all_frozen))
PageSetAllVisible(page);
@@ -2055,8 +2054,9 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
*/
if (PageIsAllVisible(page))
{
- uint8 vm_status = visibilitymap_get_status(onerel, blkno, vmbuffer);
uint8 flags = 0;
+ uint8 vm_status = visibilitymap_get_status(vacrel->rel,
+ blkno, vmbuffer);
/* Set the VM all-frozen bit to flag, if needed */
if ((vm_status & VISIBILITYMAP_ALL_VISIBLE) == 0)
@@ -2066,12 +2066,12 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
Assert(BufferIsValid(*vmbuffer));
if (flags != 0)
- visibilitymap_set(onerel, blkno, buffer, InvalidXLogRecPtr,
+ visibilitymap_set(vacrel->rel, blkno, buffer, InvalidXLogRecPtr,
*vmbuffer, visibility_cutoff_xid, flags);
}
/* Revert to the previous phase information for error traceback */
- restore_vacuum_error_info(vacrelstats, &saved_err_info);
+ restore_vacuum_error_info(vacrel, &saved_err_info);
return tupindex;
}
@@ -2083,7 +2083,7 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
* Also returns a flag indicating whether page contains any tuples at all.
*/
static bool
-lazy_check_needs_freeze(Buffer buf, bool *hastup, LVRelStats *vacrelstats)
+lazy_check_needs_freeze(Buffer buf, bool *hastup, LVRelState *vacrel)
{
Page page = BufferGetPage(buf);
OffsetNumber offnum,
@@ -2112,7 +2112,7 @@ lazy_check_needs_freeze(Buffer buf, bool *hastup, LVRelStats *vacrelstats)
* Set the offset number so that we can display it along with any
* error that occurred while processing this tuple.
*/
- vacrelstats->offnum = offnum;
+ vacrel->offnum = offnum;
itemid = PageGetItemId(page, offnum);
/* this should match hastup test in count_nondeletable_pages() */
@@ -2125,44 +2125,88 @@ lazy_check_needs_freeze(Buffer buf, bool *hastup, LVRelStats *vacrelstats)
tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
- if (heap_tuple_needs_freeze(tupleheader, FreezeLimit,
- MultiXactCutoff, buf))
+ if (heap_tuple_needs_freeze(tupleheader, vacrel->FreezeLimit,
+ vacrel->MultiXactCutoff, buf))
break;
} /* scan along page */
/* Clear the offset information once we have processed the given page. */
- vacrelstats->offnum = InvalidOffsetNumber;
+ vacrel->offnum = InvalidOffsetNumber;
return (offnum <= maxoff);
}
/*
+ * Perform lazy_vacuum_all_indexes() steps in parallel
+ */
+static void
+do_parallel_lazy_vacuum_all_indexes(LVRelState *vacrel)
+{
+ /* Tell parallel workers to do index vacuuming */
+ vacrel->lps->lvshared->for_cleanup = false;
+ vacrel->lps->lvshared->first_time = false;
+
+ /*
+ * We can only provide an approximate value of num_heap_tuples in vacuum
+ * cases.
+ */
+ vacrel->lps->lvshared->reltuples = vacrel->old_live_tuples;
+ vacrel->lps->lvshared->estimated_count = true;
+
+ do_parallel_vacuum_or_cleanup(vacrel,
+ vacrel->lps->nindexes_parallel_bulkdel);
+}
+
+/*
+ * Perform lazy_cleanup_all_indexes() steps in parallel
+ */
+static void
+do_parallel_lazy_cleanup_all_indexes(LVRelState *vacrel)
+{
+ int nworkers;
+
+ /*
+ * If parallel vacuum is active we perform index cleanup with parallel
+ * workers.
+ *
+ * Tell parallel workers to do index cleanup.
+ */
+ vacrel->lps->lvshared->for_cleanup = true;
+ vacrel->lps->lvshared->first_time = (vacrel->num_index_scans == 0);
+
+ /*
+ * Now we can provide a better estimate of total number of surviving
+ * tuples (we assume indexes are more interested in that than in the
+ * number of nominally live tuples).
+ */
+ vacrel->lps->lvshared->reltuples = vacrel->new_rel_tuples;
+ vacrel->lps->lvshared->estimated_count =
+ (vacrel->tupcount_pages < vacrel->rel_pages);
+
+ /* Determine the number of parallel workers to launch */
+ if (vacrel->lps->lvshared->first_time)
+ nworkers = vacrel->lps->nindexes_parallel_cleanup +
+ vacrel->lps->nindexes_parallel_condcleanup;
+ else
+ nworkers = vacrel->lps->nindexes_parallel_cleanup;
+
+ do_parallel_vacuum_or_cleanup(vacrel, nworkers);
+}
+
+/*
* Perform index vacuum or index cleanup with parallel workers. This function
* must be used by the parallel vacuum leader process. The caller must set
* lps->lvshared->for_cleanup to indicate whether to perform vacuum or
* cleanup.
*/
static void
-lazy_parallel_vacuum_indexes(Relation *Irel, LVRelStats *vacrelstats,
- LVParallelState *lps, int nindexes)
+do_parallel_vacuum_or_cleanup(LVRelState *vacrel, int nworkers)
{
- int nworkers;
+ LVParallelState *lps = vacrel->lps;
Assert(!IsParallelWorker());
- Assert(ParallelVacuumIsActive(lps));
- Assert(nindexes > 0);
-
- /* Determine the number of parallel workers to launch */
- if (lps->lvshared->for_cleanup)
- {
- if (lps->lvshared->first_time)
- nworkers = lps->nindexes_parallel_cleanup +
- lps->nindexes_parallel_condcleanup;
- else
- nworkers = lps->nindexes_parallel_cleanup;
- }
- else
- nworkers = lps->nindexes_parallel_bulkdel;
+ Assert(ParallelVacuumIsActive(vacrel));
+ Assert(vacrel->nindexes > 0);
/* The leader process will participate */
nworkers--;
@@ -2177,7 +2221,7 @@ lazy_parallel_vacuum_indexes(Relation *Irel, LVRelStats *vacrelstats,
/* Setup the shared cost-based vacuum delay and launch workers */
if (nworkers > 0)
{
- if (vacrelstats->num_index_scans > 0)
+ if (vacrel->num_index_scans > 0)
{
/* Reset the parallel index processing counter */
pg_atomic_write_u32(&(lps->lvshared->idx), 0);
@@ -2232,14 +2276,13 @@ lazy_parallel_vacuum_indexes(Relation *Irel, LVRelStats *vacrelstats,
}
/* Process the indexes that can be processed by only leader process */
- vacuum_indexes_leader(Irel, vacrelstats, lps, nindexes);
+ do_serial_processing_for_unsafe_indexes(vacrel, lps->lvshared);
/*
* Join as a parallel worker. The leader process alone processes all the
* indexes in the case where no workers are launched.
*/
- parallel_vacuum_index(Irel, lps->lvshared, vacrelstats->dead_tuples,
- nindexes, vacrelstats);
+ do_parallel_processing(vacrel, lps->lvshared);
/*
* Next, accumulate buffer and WAL usage. (This must wait for the workers
@@ -2247,12 +2290,10 @@ lazy_parallel_vacuum_indexes(Relation *Irel, LVRelStats *vacrelstats,
*/
if (nworkers > 0)
{
- int i;
-
/* Wait for all vacuum workers to finish */
WaitForParallelWorkersToFinish(lps->pcxt);
- for (i = 0; i < lps->pcxt->nworkers_launched; i++)
+ for (int i = 0; i < lps->pcxt->nworkers_launched; i++)
InstrAccumParallelQuery(&lps->buffer_usage[i], &lps->wal_usage[i]);
}
@@ -2272,9 +2313,7 @@ lazy_parallel_vacuum_indexes(Relation *Irel, LVRelStats *vacrelstats,
* vacuum worker processes to process the indexes in parallel.
*/
static void
-parallel_vacuum_index(Relation *Irel, LVShared *lvshared,
- LVDeadTuples *dead_tuples, int nindexes,
- LVRelStats *vacrelstats)
+do_parallel_processing(LVRelState *vacrel, LVShared *lvshared)
{
/*
* Increment the active worker count if we are able to launch any worker.
@@ -2286,29 +2325,39 @@ parallel_vacuum_index(Relation *Irel, LVShared *lvshared,
for (;;)
{
int idx;
- LVSharedIndStats *shared_indstats;
+ LVSharedIndStats *shared_istat;
+ Relation indrel;
+ IndexBulkDeleteResult *istat;
/* Get an index number to process */
idx = pg_atomic_fetch_add_u32(&(lvshared->idx), 1);
/* Done for all indexes? */
- if (idx >= nindexes)
+ if (idx >= vacrel->nindexes)
break;
/* Get the index statistics of this index from DSM */
- shared_indstats = get_indstats(lvshared, idx);
+ shared_istat = parallel_stats_for_idx(lvshared, idx);
+
+ /* Skip indexes not participating in parallelism */
+ if (shared_istat == NULL)
+ continue;
+
+ indrel = vacrel->indrels[idx];
/*
- * Skip processing indexes that don't participate in parallel
- * operation
+ * Skip processing indexes that are unsafe for workers (these are
+ * processed in do_serial_processing_for_unsafe_indexes() by leader)
*/
- if (shared_indstats == NULL ||
- skip_parallel_vacuum_index(Irel[idx], lvshared))
+ if (!parallel_processing_is_safe(indrel, lvshared))
continue;
/* Do vacuum or cleanup of the index */
- vacuum_one_index(Irel[idx], &(vacrelstats->indstats[idx]), lvshared,
- shared_indstats, dead_tuples, vacrelstats);
+ istat = (vacrel->indstats[idx]);
+ vacrel->indstats[idx] = parallel_process_one_index(indrel, istat,
+ lvshared,
+ shared_istat,
+ vacrel);
}
/*
@@ -2324,11 +2373,8 @@ parallel_vacuum_index(Relation *Irel, LVShared *lvshared,
* because these indexes don't support parallel operation at that phase.
*/
static void
-vacuum_indexes_leader(Relation *Irel, LVRelStats *vacrelstats,
- LVParallelState *lps, int nindexes)
+do_serial_processing_for_unsafe_indexes(LVRelState *vacrel, LVShared *lvshared)
{
- int i;
-
Assert(!IsParallelWorker());
/*
@@ -2337,18 +2383,32 @@ vacuum_indexes_leader(Relation *Irel, LVRelStats *vacrelstats,
if (VacuumActiveNWorkers)
pg_atomic_add_fetch_u32(VacuumActiveNWorkers, 1);
- for (i = 0; i < nindexes; i++)
+ for (int idx = 0; idx < vacrel->nindexes; idx++)
{
- LVSharedIndStats *shared_indstats;
+ LVSharedIndStats *shared_istat;
+ Relation indrel;
+ IndexBulkDeleteResult *istat;
+
+ shared_istat = parallel_stats_for_idx(lvshared, idx);
+
+ /* Skip already-complete indexes */
+ if (shared_istat != NULL)
+ continue;
- shared_indstats = get_indstats(lps->lvshared, i);
+ indrel = vacrel->indrels[idx];
+
+ /*
+ * We're only here for the unsafe indexes
+ */
+ if (parallel_processing_is_safe(indrel, lvshared))
+ continue;
- /* Process the indexes skipped by parallel workers */
- if (shared_indstats == NULL ||
- skip_parallel_vacuum_index(Irel[i], lps->lvshared))
- vacuum_one_index(Irel[i], &(vacrelstats->indstats[i]), lps->lvshared,
- shared_indstats, vacrelstats->dead_tuples,
- vacrelstats);
+ /* Do vacuum or cleanup of the index */
+ istat = (vacrel->indstats[idx]);
+ vacrel->indstats[idx] = parallel_process_one_index(indrel, istat,
+ lvshared,
+ shared_istat,
+ vacrel);
}
/*
@@ -2365,33 +2425,29 @@ vacuum_indexes_leader(Relation *Irel, LVRelStats *vacrelstats,
* statistics returned from ambulkdelete and amvacuumcleanup to the DSM
* segment.
*/
-static void
-vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
- LVShared *lvshared, LVSharedIndStats *shared_indstats,
- LVDeadTuples *dead_tuples, LVRelStats *vacrelstats)
+static IndexBulkDeleteResult *
+parallel_process_one_index(Relation indrel,
+ IndexBulkDeleteResult *istat,
+ LVShared *lvshared,
+ LVSharedIndStats *shared_istat,
+ LVRelState *vacrel)
{
- IndexBulkDeleteResult *bulkdelete_res = NULL;
-
- if (shared_indstats)
- {
- /* Get the space for IndexBulkDeleteResult */
- bulkdelete_res = &(shared_indstats->stats);
+ IndexBulkDeleteResult *istat_res;
- /*
- * Update the pointer to the corresponding bulk-deletion result if
- * someone has already updated it.
- */
- if (shared_indstats->updated && *stats == NULL)
- *stats = bulkdelete_res;
- }
+ /*
+ * Update the pointer to the corresponding bulk-deletion result if someone
+ * has already updated it
+ */
+ if (shared_istat && shared_istat->updated && istat == NULL)
+ istat = &shared_istat->istat;
/* Do vacuum or cleanup of the index */
if (lvshared->for_cleanup)
- lazy_cleanup_index(indrel, stats, lvshared->reltuples,
- lvshared->estimated_count, vacrelstats);
+ istat_res = lazy_cleanup_one_index(indrel, istat, lvshared->reltuples,
+ lvshared->estimated_count, vacrel);
else
- lazy_vacuum_index(indrel, stats, dead_tuples,
- lvshared->reltuples, vacrelstats);
+ istat_res = lazy_vacuum_one_index(indrel, istat, lvshared->reltuples,
+ vacrel);
/*
* Copy the index bulk-deletion result returned from ambulkdelete and
@@ -2405,83 +2461,71 @@ vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
* Since all vacuum workers write the bulk-deletion result at different
* slots we can write them without locking.
*/
- if (shared_indstats && !shared_indstats->updated && *stats != NULL)
+ if (shared_istat && !shared_istat->updated && istat_res != NULL)
{
- memcpy(bulkdelete_res, *stats, sizeof(IndexBulkDeleteResult));
- shared_indstats->updated = true;
+ memcpy(&shared_istat->istat, istat_res, sizeof(IndexBulkDeleteResult));
+ shared_istat->updated = true;
- /*
- * Now that stats[idx] points to the DSM segment, we don't need the
- * locally allocated results.
- */
- pfree(*stats);
- *stats = bulkdelete_res;
+ /* Free the locally-allocated bulk-deletion result */
+ pfree(istat_res);
+
+ /* return the pointer to the result from shared memory */
+ return &shared_istat->istat;
}
+
+ return istat_res;
}
/*
* lazy_cleanup_all_indexes() -- cleanup all indexes of relation.
- *
- * Cleanup indexes. We process the indexes serially unless we are doing
- * parallel vacuum.
*/
static void
-lazy_cleanup_all_indexes(Relation *Irel, LVRelStats *vacrelstats,
- LVParallelState *lps, int nindexes)
+lazy_cleanup_all_indexes(LVRelState *vacrel)
{
- int idx;
-
Assert(!IsParallelWorker());
- Assert(nindexes > 0);
+ Assert(vacrel->nindexes > 0);
/* Report that we are now cleaning up indexes */
pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
PROGRESS_VACUUM_PHASE_INDEX_CLEANUP);
- /*
- * If parallel vacuum is active we perform index cleanup with parallel
- * workers.
- */
- if (ParallelVacuumIsActive(lps))
+ if (!ParallelVacuumIsActive(vacrel))
{
- /* Tell parallel workers to do index cleanup */
- lps->lvshared->for_cleanup = true;
- lps->lvshared->first_time =
- (vacrelstats->num_index_scans == 0);
+ double reltuples = vacrel->new_rel_tuples;
+ bool estimated_count =
+ vacrel->tupcount_pages < vacrel->rel_pages;
- /*
- * Now we can provide a better estimate of total number of surviving
- * tuples (we assume indexes are more interested in that than in the
- * number of nominally live tuples).
- */
- lps->lvshared->reltuples = vacrelstats->new_rel_tuples;
- lps->lvshared->estimated_count =
- (vacrelstats->tupcount_pages < vacrelstats->rel_pages);
+ for (int idx = 0; idx < vacrel->nindexes; idx++)
+ {
+ Relation indrel = vacrel->indrels[idx];
+ IndexBulkDeleteResult *istat = vacrel->indstats[idx];
- lazy_parallel_vacuum_indexes(Irel, vacrelstats, lps, nindexes);
+ vacrel->indstats[idx] =
+ lazy_cleanup_one_index(indrel, istat, reltuples,
+ estimated_count, vacrel);
+ }
}
else
{
- for (idx = 0; idx < nindexes; idx++)
- lazy_cleanup_index(Irel[idx], &(vacrelstats->indstats[idx]),
- vacrelstats->new_rel_tuples,
- vacrelstats->tupcount_pages < vacrelstats->rel_pages,
- vacrelstats);
+ /* Outsource everything to parallel variant */
+ do_parallel_lazy_cleanup_all_indexes(vacrel);
}
}
/*
- * lazy_vacuum_index() -- vacuum one index relation.
+ * lazy_vacuum_one_index() -- vacuum index relation.
*
* Delete all the index entries pointing to tuples listed in
* dead_tuples, and update running statistics.
*
* reltuples is the number of heap tuples to be passed to the
* bulkdelete callback. It's always assumed to be estimated.
+ *
+ * Returns bulk delete stats derived from input stats
*/
-static void
-lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
- LVDeadTuples *dead_tuples, double reltuples, LVRelStats *vacrelstats)
+static IndexBulkDeleteResult *
+lazy_vacuum_one_index(Relation indrel, IndexBulkDeleteResult *istat,
+ double reltuples, LVRelState *vacrel)
{
IndexVacuumInfo ivinfo;
PGRUsage ru0;
@@ -2495,7 +2539,7 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
ivinfo.estimated_count = true;
ivinfo.message_level = elevel;
ivinfo.num_heap_tuples = reltuples;
- ivinfo.strategy = vac_strategy;
+ ivinfo.strategy = vacrel->bstrategy;
/*
* Update error traceback information.
@@ -2503,38 +2547,41 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
* The index name is saved during this phase and restored immediately
* after this phase. See vacuum_error_callback.
*/
- Assert(vacrelstats->indname == NULL);
- vacrelstats->indname = pstrdup(RelationGetRelationName(indrel));
- update_vacuum_error_info(vacrelstats, &saved_err_info,
+ Assert(vacrel->indname == NULL);
+ vacrel->indname = pstrdup(RelationGetRelationName(indrel));
+ update_vacuum_error_info(vacrel, &saved_err_info,
VACUUM_ERRCB_PHASE_VACUUM_INDEX,
InvalidBlockNumber, InvalidOffsetNumber);
/* Do bulk deletion */
- *stats = index_bulk_delete(&ivinfo, *stats,
- lazy_tid_reaped, (void *) dead_tuples);
+ istat = index_bulk_delete(&ivinfo, istat, lazy_tid_reaped,
+ (void *) vacrel->dead_tuples);
ereport(elevel,
(errmsg("scanned index \"%s\" to remove %d row versions",
- vacrelstats->indname,
- dead_tuples->num_tuples),
+ vacrel->indname, vacrel->dead_tuples->num_tuples),
errdetail_internal("%s", pg_rusage_show(&ru0))));
/* Revert to the previous phase information for error traceback */
- restore_vacuum_error_info(vacrelstats, &saved_err_info);
- pfree(vacrelstats->indname);
- vacrelstats->indname = NULL;
+ restore_vacuum_error_info(vacrel, &saved_err_info);
+ pfree(vacrel->indname);
+ vacrel->indname = NULL;
+
+ return istat;
}
/*
- * lazy_cleanup_index() -- do post-vacuum cleanup for one index relation.
+ * lazy_cleanup_one_index() -- do post-vacuum cleanup for index relation.
*
* reltuples is the number of heap tuples and estimated_count is true
* if reltuples is an estimated value.
+ *
+ * Returns bulk delete stats derived from input stats
*/
-static void
-lazy_cleanup_index(Relation indrel,
- IndexBulkDeleteResult **stats,
- double reltuples, bool estimated_count, LVRelStats *vacrelstats)
+static IndexBulkDeleteResult *
+lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat,
+ double reltuples, bool estimated_count,
+ LVRelState *vacrel)
{
IndexVacuumInfo ivinfo;
PGRUsage ru0;
@@ -2549,7 +2596,7 @@ lazy_cleanup_index(Relation indrel,
ivinfo.message_level = elevel;
ivinfo.num_heap_tuples = reltuples;
- ivinfo.strategy = vac_strategy;
+ ivinfo.strategy = vacrel->bstrategy;
/*
* Update error traceback information.
@@ -2557,35 +2604,37 @@ lazy_cleanup_index(Relation indrel,
* The index name is saved during this phase and restored immediately
* after this phase. See vacuum_error_callback.
*/
- Assert(vacrelstats->indname == NULL);
- vacrelstats->indname = pstrdup(RelationGetRelationName(indrel));
- update_vacuum_error_info(vacrelstats, &saved_err_info,
+ Assert(vacrel->indname == NULL);
+ vacrel->indname = pstrdup(RelationGetRelationName(indrel));
+ update_vacuum_error_info(vacrel, &saved_err_info,
VACUUM_ERRCB_PHASE_INDEX_CLEANUP,
InvalidBlockNumber, InvalidOffsetNumber);
- *stats = index_vacuum_cleanup(&ivinfo, *stats);
+ istat = index_vacuum_cleanup(&ivinfo, istat);
- if (*stats)
+ if (istat)
{
ereport(elevel,
(errmsg("index \"%s\" now contains %.0f row versions in %u pages",
RelationGetRelationName(indrel),
- (*stats)->num_index_tuples,
- (*stats)->num_pages),
+ (istat)->num_index_tuples,
+ (istat)->num_pages),
errdetail("%.0f index row versions were removed.\n"
"%u index pages were newly deleted.\n"
"%u index pages are currently deleted, of which %u are currently reusable.\n"
"%s.",
- (*stats)->tuples_removed,
- (*stats)->pages_newly_deleted,
- (*stats)->pages_deleted, (*stats)->pages_free,
+ (istat)->tuples_removed,
+ (istat)->pages_newly_deleted,
+ (istat)->pages_deleted, (istat)->pages_free,
pg_rusage_show(&ru0))));
}
/* Revert to the previous phase information for error traceback */
- restore_vacuum_error_info(vacrelstats, &saved_err_info);
- pfree(vacrelstats->indname);
- vacrelstats->indname = NULL;
+ restore_vacuum_error_info(vacrel, &saved_err_info);
+ pfree(vacrel->indname);
+ vacrel->indname = NULL;
+
+ return istat;
}
/*
@@ -2608,17 +2657,17 @@ lazy_cleanup_index(Relation indrel,
* careful to depend only on fields that lazy_scan_heap updates on-the-fly.
*/
static bool
-should_attempt_truncation(VacuumParams *params, LVRelStats *vacrelstats)
+should_attempt_truncation(LVRelState *vacrel, VacuumParams *params)
{
BlockNumber possibly_freeable;
if (params->truncate == VACOPT_TERNARY_DISABLED)
return false;
- possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
+ possibly_freeable = vacrel->rel_pages - vacrel->nonempty_pages;
if (possibly_freeable > 0 &&
(possibly_freeable >= REL_TRUNCATE_MINIMUM ||
- possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) &&
+ possibly_freeable >= vacrel->rel_pages / REL_TRUNCATE_FRACTION) &&
old_snapshot_threshold < 0)
return true;
else
@@ -2629,9 +2678,9 @@ should_attempt_truncation(VacuumParams *params, LVRelStats *vacrelstats)
* lazy_truncate_heap - try to truncate off any empty pages at the end
*/
static void
-lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
+lazy_truncate_heap(LVRelState *vacrel)
{
- BlockNumber old_rel_pages = vacrelstats->rel_pages;
+ BlockNumber old_rel_pages = vacrel->rel_pages;
BlockNumber new_rel_pages;
int lock_retry;
@@ -2655,11 +2704,11 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
* (which is quite possible considering we already hold a lower-grade
* lock).
*/
- vacrelstats->lock_waiter_detected = false;
+ vacrel->lock_waiter_detected = false;
lock_retry = 0;
while (true)
{
- if (ConditionalLockRelation(onerel, AccessExclusiveLock))
+ if (ConditionalLockRelation(vacrel->rel, AccessExclusiveLock))
break;
/*
@@ -2675,10 +2724,10 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
* We failed to establish the lock in the specified number of
* retries. This means we give up truncating.
*/
- vacrelstats->lock_waiter_detected = true;
+ vacrel->lock_waiter_detected = true;
ereport(elevel,
(errmsg("\"%s\": stopping truncate due to conflicting lock request",
- vacrelstats->relname)));
+ vacrel->relname)));
return;
}
@@ -2690,17 +2739,17 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
* whilst we were vacuuming with non-exclusive lock. If so, give up;
* the newly added pages presumably contain non-deletable tuples.
*/
- new_rel_pages = RelationGetNumberOfBlocks(onerel);
+ new_rel_pages = RelationGetNumberOfBlocks(vacrel->rel);
if (new_rel_pages != old_rel_pages)
{
/*
- * Note: we intentionally don't update vacrelstats->rel_pages with
- * the new rel size here. If we did, it would amount to assuming
- * that the new pages are empty, which is unlikely. Leaving the
- * numbers alone amounts to assuming that the new pages have the
- * same tuple density as existing ones, which is less unlikely.
+ * Note: we intentionally don't update vacrel->rel_pages with the
+ * new rel size here. If we did, it would amount to assuming that
+ * the new pages are empty, which is unlikely. Leaving the numbers
+ * alone amounts to assuming that the new pages have the same
+ * tuple density as existing ones, which is less unlikely.
*/
- UnlockRelation(onerel, AccessExclusiveLock);
+ UnlockRelation(vacrel->rel, AccessExclusiveLock);
return;
}
@@ -2710,20 +2759,20 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
* other backends could have added tuples to these pages whilst we
* were vacuuming.
*/
- new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
- vacrelstats->blkno = new_rel_pages;
+ new_rel_pages = count_nondeletable_pages(vacrel);
+ vacrel->blkno = new_rel_pages;
if (new_rel_pages >= old_rel_pages)
{
/* can't do anything after all */
- UnlockRelation(onerel, AccessExclusiveLock);
+ UnlockRelation(vacrel->rel, AccessExclusiveLock);
return;
}
/*
* Okay to truncate.
*/
- RelationTruncate(onerel, new_rel_pages);
+ RelationTruncate(vacrel->rel, new_rel_pages);
/*
* We can release the exclusive lock as soon as we have truncated.
@@ -2732,25 +2781,25 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
* that should happen as part of standard invalidation processing once
* they acquire lock on the relation.
*/
- UnlockRelation(onerel, AccessExclusiveLock);
+ UnlockRelation(vacrel->rel, AccessExclusiveLock);
/*
* Update statistics. Here, it *is* correct to adjust rel_pages
* without also touching reltuples, since the tuple count wasn't
* changed by the truncation.
*/
- vacrelstats->pages_removed += old_rel_pages - new_rel_pages;
- vacrelstats->rel_pages = new_rel_pages;
+ vacrel->pages_removed += old_rel_pages - new_rel_pages;
+ vacrel->rel_pages = new_rel_pages;
ereport(elevel,
(errmsg("\"%s\": truncated %u to %u pages",
- vacrelstats->relname,
+ vacrel->relname,
old_rel_pages, new_rel_pages),
errdetail_internal("%s",
pg_rusage_show(&ru0))));
old_rel_pages = new_rel_pages;
- } while (new_rel_pages > vacrelstats->nonempty_pages &&
- vacrelstats->lock_waiter_detected);
+ } while (new_rel_pages > vacrel->nonempty_pages &&
+ vacrel->lock_waiter_detected);
}
/*
@@ -2759,7 +2808,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
* Returns number of nondeletable pages (last nonempty page + 1).
*/
static BlockNumber
-count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
+count_nondeletable_pages(LVRelState *vacrel)
{
BlockNumber blkno;
BlockNumber prefetchedUntil;
@@ -2774,11 +2823,11 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
* unsigned.) To make the scan faster, we prefetch a few blocks at a time
* in forward direction, so that OS-level readahead can kick in.
*/
- blkno = vacrelstats->rel_pages;
+ blkno = vacrel->rel_pages;
StaticAssertStmt((PREFETCH_SIZE & (PREFETCH_SIZE - 1)) == 0,
"prefetch size must be power of 2");
prefetchedUntil = InvalidBlockNumber;
- while (blkno > vacrelstats->nonempty_pages)
+ while (blkno > vacrel->nonempty_pages)
{
Buffer buf;
Page page;
@@ -2805,13 +2854,13 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000)
>= VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL)
{
- if (LockHasWaitersRelation(onerel, AccessExclusiveLock))
+ if (LockHasWaitersRelation(vacrel->rel, AccessExclusiveLock))
{
ereport(elevel,
(errmsg("\"%s\": suspending truncate due to conflicting lock request",
- vacrelstats->relname)));
+ vacrel->relname)));
- vacrelstats->lock_waiter_detected = true;
+ vacrel->lock_waiter_detected = true;
return blkno;
}
starttime = currenttime;
@@ -2836,14 +2885,14 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
{
- PrefetchBuffer(onerel, MAIN_FORKNUM, pblkno);
+ PrefetchBuffer(vacrel->rel, MAIN_FORKNUM, pblkno);
CHECK_FOR_INTERRUPTS();
}
prefetchedUntil = prefetchStart;
}
- buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
- RBM_NORMAL, vac_strategy);
+ buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
+ vacrel->bstrategy);
/* In this phase we only need shared access to the buffer */
LockBuffer(buf, BUFFER_LOCK_SHARE);
@@ -2891,7 +2940,7 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
* pages still are; we need not bother to look at the last known-nonempty
* page.
*/
- return vacrelstats->nonempty_pages;
+ return vacrel->nonempty_pages;
}
/*
@@ -2930,18 +2979,64 @@ compute_max_dead_tuples(BlockNumber relblocks, bool useindex)
* See the comments at the head of this file for rationale.
*/
static void
-lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
+lazy_space_alloc(LVRelState *vacrel, int nworkers, BlockNumber nblocks)
{
- LVDeadTuples *dead_tuples = NULL;
+ LVDeadTuples *dead_tuples;
long maxtuples;
- maxtuples = compute_max_dead_tuples(relblocks, vacrelstats->useindex);
+ /*
+ * Initialize state for a parallel vacuum. As of now, only one worker can
+ * be used for an index, so we invoke parallelism only if there are at
+ * least two indexes on a table.
+ */
+ if (nworkers >= 0 && vacrel->nindexes > 1)
+ {
+ /*
+ * Since parallel workers cannot access data in temporary tables, we
+ * can't perform parallel vacuum on them.
+ */
+ if (RelationUsesLocalBuffers(vacrel->rel))
+ {
+ /*
+ * Give warning only if the user explicitly tries to perform a
+ * parallel vacuum on the temporary table.
+ */
+ if (nworkers > 0)
+ ereport(WARNING,
+ (errmsg("disabling parallel option of vacuum on \"%s\" --- cannot vacuum temporary tables in parallel",
+ vacrel->relname)));
+ }
+ else
+ vacrel->lps = begin_parallel_vacuum(vacrel, nblocks, nworkers);
+
+ /* If parallel mode started, we're done */
+ if (ParallelVacuumIsActive(vacrel))
+ return;
+ }
+
+ maxtuples = compute_max_dead_tuples(nblocks, vacrel->nindexes > 0);
dead_tuples = (LVDeadTuples *) palloc(SizeOfDeadTuples(maxtuples));
dead_tuples->num_tuples = 0;
dead_tuples->max_tuples = (int) maxtuples;
- vacrelstats->dead_tuples = dead_tuples;
+ vacrel->dead_tuples = dead_tuples;
+}
+
+/*
+ * lazy_space_free - free space allocated in lazy_space_alloc
+ */
+static void
+lazy_space_free(LVRelState *vacrel)
+{
+ if (!ParallelVacuumIsActive(vacrel))
+ return;
+
+ /*
+ * End parallel mode before updating index statistics as we cannot write
+ * during parallel mode.
+ */
+ end_parallel_vacuum(vacrel);
}
/*
@@ -3039,8 +3134,7 @@ vac_cmp_itemptr(const void *left, const void *right)
* on this page is frozen.
*/
static bool
-heap_page_is_all_visible(Relation rel, Buffer buf,
- LVRelStats *vacrelstats,
+heap_page_is_all_visible(LVRelState *vacrel, Buffer buf,
TransactionId *visibility_cutoff_xid,
bool *all_frozen)
{
@@ -3069,7 +3163,7 @@ heap_page_is_all_visible(Relation rel, Buffer buf,
* Set the offset number so that we can display it along with any
* error that occurred while processing this tuple.
*/
- vacrelstats->offnum = offnum;
+ vacrel->offnum = offnum;
itemid = PageGetItemId(page, offnum);
/* Unused or redirect line pointers are of no interest */
@@ -3093,9 +3187,9 @@ heap_page_is_all_visible(Relation rel, Buffer buf,
tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
tuple.t_len = ItemIdGetLength(itemid);
- tuple.t_tableOid = RelationGetRelid(rel);
+ tuple.t_tableOid = RelationGetRelid(vacrel->rel);
- switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf))
+ switch (HeapTupleSatisfiesVacuum(&tuple, vacrel->OldestXmin, buf))
{
case HEAPTUPLE_LIVE:
{
@@ -3114,7 +3208,7 @@ heap_page_is_all_visible(Relation rel, Buffer buf,
* that everyone sees it as committed?
*/
xmin = HeapTupleHeaderGetXmin(tuple.t_data);
- if (!TransactionIdPrecedes(xmin, OldestXmin))
+ if (!TransactionIdPrecedes(xmin, vacrel->OldestXmin))
{
all_visible = false;
*all_frozen = false;
@@ -3148,7 +3242,7 @@ heap_page_is_all_visible(Relation rel, Buffer buf,
} /* scan along page */
/* Clear the offset information once we have processed the given page. */
- vacrelstats->offnum = InvalidOffsetNumber;
+ vacrel->offnum = InvalidOffsetNumber;
return all_visible;
}
@@ -3167,14 +3261,13 @@ heap_page_is_all_visible(Relation rel, Buffer buf,
* vacuum.
*/
static int
-compute_parallel_vacuum_workers(Relation *Irel, int nindexes, int nrequested,
+compute_parallel_vacuum_workers(LVRelState *vacrel, int nrequested,
bool *can_parallel_vacuum)
{
int nindexes_parallel = 0;
int nindexes_parallel_bulkdel = 0;
int nindexes_parallel_cleanup = 0;
int parallel_workers;
- int i;
/*
* We don't allow performing parallel operation in standalone backend or
@@ -3186,15 +3279,16 @@ compute_parallel_vacuum_workers(Relation *Irel, int nindexes, int nrequested,
/*
* Compute the number of indexes that can participate in parallel vacuum.
*/
- for (i = 0; i < nindexes; i++)
+ for (int idx = 0; idx < vacrel->nindexes; idx++)
{
- uint8 vacoptions = Irel[i]->rd_indam->amparallelvacuumoptions;
+ Relation indrel = vacrel->indrels[idx];
+ uint8 vacoptions = indrel->rd_indam->amparallelvacuumoptions;
if (vacoptions == VACUUM_OPTION_NO_PARALLEL ||
- RelationGetNumberOfBlocks(Irel[i]) < min_parallel_index_scan_size)
+ RelationGetNumberOfBlocks(indrel) < min_parallel_index_scan_size)
continue;
- can_parallel_vacuum[i] = true;
+ can_parallel_vacuum[idx] = true;
if ((vacoptions & VACUUM_OPTION_PARALLEL_BULKDEL) != 0)
nindexes_parallel_bulkdel++;
@@ -3224,51 +3318,29 @@ compute_parallel_vacuum_workers(Relation *Irel, int nindexes, int nrequested,
}
/*
- * Initialize variables for shared index statistics, set NULL bitmap and the
- * size of stats for each index.
- */
-static void
-prepare_index_statistics(LVShared *lvshared, bool *can_parallel_vacuum,
- int nindexes)
-{
- int i;
-
- /* Currently, we don't support parallel vacuum for autovacuum */
- Assert(!IsAutoVacuumWorkerProcess());
-
- /* Set NULL for all indexes */
- memset(lvshared->bitmap, 0x00, BITMAPLEN(nindexes));
-
- for (i = 0; i < nindexes; i++)
- {
- if (!can_parallel_vacuum[i])
- continue;
-
- /* Set NOT NULL as this index does support parallelism */
- lvshared->bitmap[i >> 3] |= 1 << (i & 0x07);
- }
-}
-
-/*
* Update index statistics in pg_class if the statistics are accurate.
*/
static void
-update_index_statistics(Relation *Irel, IndexBulkDeleteResult **stats,
- int nindexes)
+update_index_statistics(LVRelState *vacrel)
{
- int i;
+ Relation *indrels = vacrel->indrels;
+ int nindexes = vacrel->nindexes;
+ IndexBulkDeleteResult **indstats = vacrel->indstats;
Assert(!IsInParallelMode());
- for (i = 0; i < nindexes; i++)
+ for (int idx = 0; idx < nindexes; idx++)
{
- if (stats[i] == NULL || stats[i]->estimated_count)
+ Relation indrel = indrels[idx];
+ IndexBulkDeleteResult *istat = indstats[idx];
+
+ if (istat == NULL || istat->estimated_count)
continue;
/* Update index statistics */
- vac_update_relstats(Irel[i],
- stats[i]->num_pages,
- stats[i]->num_index_tuples,
+ vac_update_relstats(indrel,
+ istat->num_pages,
+ istat->num_index_tuples,
0,
false,
InvalidTransactionId,
@@ -3283,10 +3355,12 @@ update_index_statistics(Relation *Irel, IndexBulkDeleteResult **stats,
* create a parallel context, and then initialize the DSM segment.
*/
static LVParallelState *
-begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
- BlockNumber nblocks, int nindexes, int nrequested)
+begin_parallel_vacuum(LVRelState *vacrel, BlockNumber nblocks,
+ int nrequested)
{
LVParallelState *lps = NULL;
+ Relation *indrels = vacrel->indrels;
+ int nindexes = vacrel->nindexes;
ParallelContext *pcxt;
LVShared *shared;
LVDeadTuples *dead_tuples;
@@ -3299,7 +3373,6 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
int nindexes_mwm = 0;
int parallel_workers = 0;
int querylen;
- int i;
/*
* A parallel vacuum must be requested and there must be indexes on the
@@ -3312,7 +3385,7 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
* Compute the number of parallel vacuum workers to launch
*/
can_parallel_vacuum = (bool *) palloc0(sizeof(bool) * nindexes);
- parallel_workers = compute_parallel_vacuum_workers(Irel, nindexes,
+ parallel_workers = compute_parallel_vacuum_workers(vacrel,
nrequested,
can_parallel_vacuum);
@@ -3333,9 +3406,10 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
/* Estimate size for shared information -- PARALLEL_VACUUM_KEY_SHARED */
est_shared = MAXALIGN(add_size(SizeOfLVShared, BITMAPLEN(nindexes)));
- for (i = 0; i < nindexes; i++)
+ for (int idx = 0; idx < nindexes; idx++)
{
- uint8 vacoptions = Irel[i]->rd_indam->amparallelvacuumoptions;
+ Relation indrel = indrels[idx];
+ uint8 vacoptions = indrel->rd_indam->amparallelvacuumoptions;
/*
* Cleanup option should be either disabled, always performing in
@@ -3346,10 +3420,10 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
Assert(vacoptions <= VACUUM_OPTION_MAX_VALID_VALUE);
/* Skip indexes that don't participate in parallel vacuum */
- if (!can_parallel_vacuum[i])
+ if (!can_parallel_vacuum[idx])
continue;
- if (Irel[i]->rd_indam->amusemaintenanceworkmem)
+ if (indrel->rd_indam->amusemaintenanceworkmem)
nindexes_mwm++;
est_shared = add_size(est_shared, sizeof(LVSharedIndStats));
@@ -3404,7 +3478,7 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
/* Prepare shared information */
shared = (LVShared *) shm_toc_allocate(pcxt->toc, est_shared);
MemSet(shared, 0, est_shared);
- shared->relid = relid;
+ shared->relid = RelationGetRelid(vacrel->rel);
shared->elevel = elevel;
shared->maintenance_work_mem_worker =
(nindexes_mwm > 0) ?
@@ -3415,7 +3489,20 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
pg_atomic_init_u32(&(shared->active_nworkers), 0);
pg_atomic_init_u32(&(shared->idx), 0);
shared->offset = MAXALIGN(add_size(SizeOfLVShared, BITMAPLEN(nindexes)));
- prepare_index_statistics(shared, can_parallel_vacuum, nindexes);
+
+ /*
+ * Initialize variables for shared index statistics, set NULL bitmap and
+ * the size of stats for each index.
+ */
+ memset(shared->bitmap, 0x00, BITMAPLEN(nindexes));
+ for (int idx = 0; idx < nindexes; idx++)
+ {
+ if (!can_parallel_vacuum[idx])
+ continue;
+
+ /* Set NOT NULL as this index does support parallelism */
+ shared->bitmap[idx >> 3] |= 1 << (idx & 0x07);
+ }
shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_SHARED, shared);
lps->lvshared = shared;
@@ -3426,7 +3513,7 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
dead_tuples->num_tuples = 0;
MemSet(dead_tuples->itemptrs, 0, sizeof(ItemPointerData) * maxtuples);
shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_DEAD_TUPLES, dead_tuples);
- vacrelstats->dead_tuples = dead_tuples;
+ vacrel->dead_tuples = dead_tuples;
/*
* Allocate space for each worker's BufferUsage and WalUsage; no need to
@@ -3467,32 +3554,35 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
* context, but that won't be safe (see ExitParallelMode).
*/
static void
-end_parallel_vacuum(IndexBulkDeleteResult **stats, LVParallelState *lps,
- int nindexes)
+end_parallel_vacuum(LVRelState *vacrel)
{
- int i;
+ IndexBulkDeleteResult **indstats = vacrel->indstats;
+ LVParallelState *lps = vacrel->lps;
+ int nindexes = vacrel->nindexes;
Assert(!IsParallelWorker());
/* Copy the updated statistics */
- for (i = 0; i < nindexes; i++)
+ for (int idx = 0; idx < nindexes; idx++)
{
- LVSharedIndStats *indstats = get_indstats(lps->lvshared, i);
+ LVSharedIndStats *shared_istat;
+
+ shared_istat = parallel_stats_for_idx(lps->lvshared, idx);
/*
* Skip unused slot. The statistics of this index are already stored
* in local memory.
*/
- if (indstats == NULL)
+ if (shared_istat == NULL)
continue;
- if (indstats->updated)
+ if (shared_istat->updated)
{
- stats[i] = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
- memcpy(stats[i], &(indstats->stats), sizeof(IndexBulkDeleteResult));
+ indstats[idx] = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
+ memcpy(indstats[idx], &(shared_istat->istat), sizeof(IndexBulkDeleteResult));
}
else
- stats[i] = NULL;
+ indstats[idx] = NULL;
}
DestroyParallelContext(lps->pcxt);
@@ -3500,23 +3590,24 @@ end_parallel_vacuum(IndexBulkDeleteResult **stats, LVParallelState *lps,
/* Deactivate parallel vacuum */
pfree(lps);
- lps = NULL;
+ vacrel->lps = NULL;
}
-/* Return the Nth index statistics or NULL */
+/*
+ * Return shared memory statistics for index at offset 'getidx', if any
+ */
static LVSharedIndStats *
-get_indstats(LVShared *lvshared, int n)
+parallel_stats_for_idx(LVShared *lvshared, int getidx)
{
- int i;
char *p;
- if (IndStatsIsNull(lvshared, n))
+ if (IndStatsIsNull(lvshared, getidx))
return NULL;
p = (char *) GetSharedIndStats(lvshared);
- for (i = 0; i < n; i++)
+ for (int idx = 0; idx < getidx; idx++)
{
- if (IndStatsIsNull(lvshared, i))
+ if (IndStatsIsNull(lvshared, idx))
continue;
p += sizeof(LVSharedIndStats);
@@ -3526,11 +3617,11 @@ get_indstats(LVShared *lvshared, int n)
}
/*
- * Returns true, if the given index can't participate in parallel index vacuum
- * or parallel index cleanup, false, otherwise.
+ * Returns false, if the given index can't participate in parallel index
+ * vacuum or parallel index cleanup
*/
static bool
-skip_parallel_vacuum_index(Relation indrel, LVShared *lvshared)
+parallel_processing_is_safe(Relation indrel, LVShared *lvshared)
{
uint8 vacoptions = indrel->rd_indam->amparallelvacuumoptions;
@@ -3552,15 +3643,15 @@ skip_parallel_vacuum_index(Relation indrel, LVShared *lvshared)
*/
if (!lvshared->first_time &&
((vacoptions & VACUUM_OPTION_PARALLEL_COND_CLEANUP) != 0))
- return true;
+ return false;
}
else if ((vacoptions & VACUUM_OPTION_PARALLEL_BULKDEL) == 0)
{
/* Skip if the index does not support parallel bulk deletion */
- return true;
+ return false;
}
- return false;
+ return true;
}
/*
@@ -3572,7 +3663,7 @@ skip_parallel_vacuum_index(Relation indrel, LVShared *lvshared)
void
parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
{
- Relation onerel;
+ Relation rel;
Relation *indrels;
LVShared *lvshared;
LVDeadTuples *dead_tuples;
@@ -3580,7 +3671,7 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
WalUsage *wal_usage;
int nindexes;
char *sharedquery;
- LVRelStats vacrelstats;
+ LVRelState vacrel;
ErrorContextCallback errcallback;
lvshared = (LVShared *) shm_toc_lookup(toc, PARALLEL_VACUUM_KEY_SHARED,
@@ -3602,13 +3693,13 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
* okay because the lock mode does not conflict among the parallel
* workers.
*/
- onerel = table_open(lvshared->relid, ShareUpdateExclusiveLock);
+ rel = table_open(lvshared->relid, ShareUpdateExclusiveLock);
/*
* Open all indexes. indrels are sorted in order by OID, which should be
* matched to the leader's one.
*/
- vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &indrels);
+ vac_open_indexes(rel, RowExclusiveLock, &nindexes, &indrels);
Assert(nindexes > 0);
/* Set dead tuple space */
@@ -3626,24 +3717,27 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
VacuumSharedCostBalance = &(lvshared->cost_balance);
VacuumActiveNWorkers = &(lvshared->active_nworkers);
- vacrelstats.indstats = (IndexBulkDeleteResult **)
+ vacrel.rel = rel;
+ vacrel.indrels = indrels;
+ vacrel.nindexes = nindexes;
+ vacrel.indstats = (IndexBulkDeleteResult **)
palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
if (lvshared->maintenance_work_mem_worker > 0)
maintenance_work_mem = lvshared->maintenance_work_mem_worker;
/*
- * Initialize vacrelstats for use as error callback arg by parallel
- * worker.
+ * Initialize vacrel for use as error callback arg by parallel worker.
*/
- vacrelstats.relnamespace = get_namespace_name(RelationGetNamespace(onerel));
- vacrelstats.relname = pstrdup(RelationGetRelationName(onerel));
- vacrelstats.indname = NULL;
- vacrelstats.phase = VACUUM_ERRCB_PHASE_UNKNOWN; /* Not yet processing */
+ vacrel.relnamespace = get_namespace_name(RelationGetNamespace(rel));
+ vacrel.relname = pstrdup(RelationGetRelationName(rel));
+ vacrel.indname = NULL;
+ vacrel.phase = VACUUM_ERRCB_PHASE_UNKNOWN; /* Not yet processing */
+ vacrel.dead_tuples = dead_tuples;
/* Setup error traceback support for ereport() */
errcallback.callback = vacuum_error_callback;
- errcallback.arg = &vacrelstats;
+ errcallback.arg = &vacrel;
errcallback.previous = error_context_stack;
error_context_stack = &errcallback;
@@ -3651,8 +3745,7 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
InstrStartParallelQuery();
/* Process indexes to perform vacuum/cleanup */
- parallel_vacuum_index(indrels, lvshared, dead_tuples, nindexes,
- &vacrelstats);
+ do_parallel_processing(&vacrel, lvshared);
/* Report buffer/WAL usage during parallel execution */
buffer_usage = shm_toc_lookup(toc, PARALLEL_VACUUM_KEY_BUFFER_USAGE, false);
@@ -3664,8 +3757,8 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
error_context_stack = errcallback.previous;
vac_close_indexes(nindexes, indrels, RowExclusiveLock);
- table_close(onerel, ShareUpdateExclusiveLock);
- pfree(vacrelstats.indstats);
+ table_close(rel, ShareUpdateExclusiveLock);
+ pfree(vacrel.indstats);
}
/*
@@ -3674,7 +3767,7 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
static void
vacuum_error_callback(void *arg)
{
- LVRelStats *errinfo = arg;
+ LVRelState *errinfo = arg;
switch (errinfo->phase)
{
@@ -3736,28 +3829,29 @@ vacuum_error_callback(void *arg)
* the current information which can be later restored via restore_vacuum_error_info.
*/
static void
-update_vacuum_error_info(LVRelStats *errinfo, LVSavedErrInfo *saved_err_info, int phase,
- BlockNumber blkno, OffsetNumber offnum)
+update_vacuum_error_info(LVRelState *vacrel, LVSavedErrInfo *saved_vacrel,
+ int phase, BlockNumber blkno, OffsetNumber offnum)
{
- if (saved_err_info)
+ if (saved_vacrel)
{
- saved_err_info->offnum = errinfo->offnum;
- saved_err_info->blkno = errinfo->blkno;
- saved_err_info->phase = errinfo->phase;
+ saved_vacrel->offnum = vacrel->offnum;
+ saved_vacrel->blkno = vacrel->blkno;
+ saved_vacrel->phase = vacrel->phase;
}
- errinfo->blkno = blkno;
- errinfo->offnum = offnum;
- errinfo->phase = phase;
+ vacrel->blkno = blkno;
+ vacrel->offnum = offnum;
+ vacrel->phase = phase;
}
/*
* Restores the vacuum information saved via a prior call to update_vacuum_error_info.
*/
static void
-restore_vacuum_error_info(LVRelStats *errinfo, const LVSavedErrInfo *saved_err_info)
+restore_vacuum_error_info(LVRelState *vacrel,
+ const LVSavedErrInfo *saved_vacrel)
{
- errinfo->blkno = saved_err_info->blkno;
- errinfo->offnum = saved_err_info->offnum;
- errinfo->phase = saved_err_info->phase;
+ vacrel->blkno = saved_vacrel->blkno;
+ vacrel->offnum = saved_vacrel->offnum;
+ vacrel->phase = saved_vacrel->phase;
}
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 3d2dbed7083..9b5afa12ad6 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -689,7 +689,7 @@ index_getbitmap(IndexScanDesc scan, TIDBitmap *bitmap)
*/
IndexBulkDeleteResult *
index_bulk_delete(IndexVacuumInfo *info,
- IndexBulkDeleteResult *stats,
+ IndexBulkDeleteResult *istat,
IndexBulkDeleteCallback callback,
void *callback_state)
{
@@ -698,7 +698,7 @@ index_bulk_delete(IndexVacuumInfo *info,
RELATION_CHECKS;
CHECK_REL_PROCEDURE(ambulkdelete);
- return indexRelation->rd_indam->ambulkdelete(info, stats,
+ return indexRelation->rd_indam->ambulkdelete(info, istat,
callback, callback_state);
}
@@ -710,14 +710,14 @@ index_bulk_delete(IndexVacuumInfo *info,
*/
IndexBulkDeleteResult *
index_vacuum_cleanup(IndexVacuumInfo *info,
- IndexBulkDeleteResult *stats)
+ IndexBulkDeleteResult *istat)
{
Relation indexRelation = info->index;
RELATION_CHECKS;
CHECK_REL_PROCEDURE(amvacuumcleanup);
- return indexRelation->rd_indam->amvacuumcleanup(info, stats);
+ return indexRelation->rd_indam->amvacuumcleanup(info, istat);
}
/* ----------------
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 662aff04b45..25465b05ddb 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -617,7 +617,7 @@ Relation
vacuum_open_relation(Oid relid, RangeVar *relation, bits32 options,
bool verbose, LOCKMODE lmode)
{
- Relation onerel;
+ Relation rel;
bool rel_lock = true;
int elevel;
@@ -633,18 +633,18 @@ vacuum_open_relation(Oid relid, RangeVar *relation, bits32 options,
* in non-blocking mode, before calling try_relation_open().
*/
if (!(options & VACOPT_SKIP_LOCKED))
- onerel = try_relation_open(relid, lmode);
+ rel = try_relation_open(relid, lmode);
else if (ConditionalLockRelationOid(relid, lmode))
- onerel = try_relation_open(relid, NoLock);
+ rel = try_relation_open(relid, NoLock);
else
{
- onerel = NULL;
+ rel = NULL;
rel_lock = false;
}
/* if relation is opened, leave */
- if (onerel)
- return onerel;
+ if (rel)
+ return rel;
/*
* Relation could not be opened, hence generate if possible a log
@@ -1726,8 +1726,8 @@ static bool
vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params)
{
LOCKMODE lmode;
- Relation onerel;
- LockRelId onerelid;
+ Relation rel;
+ LockRelId lockrelid;
Oid toast_relid;
Oid save_userid;
int save_sec_context;
@@ -1792,11 +1792,11 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params)
AccessExclusiveLock : ShareUpdateExclusiveLock;
/* open the relation and get the appropriate lock on it */
- onerel = vacuum_open_relation(relid, relation, params->options,
- params->log_min_duration >= 0, lmode);
+ rel = vacuum_open_relation(relid, relation, params->options,
+ params->log_min_duration >= 0, lmode);
/* leave if relation could not be opened or locked */
- if (!onerel)
+ if (!rel)
{
PopActiveSnapshot();
CommitTransactionCommand();
@@ -1811,11 +1811,11 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params)
* changed in-between. Make sure to only generate logs for VACUUM in this
* case.
*/
- if (!vacuum_is_relation_owner(RelationGetRelid(onerel),
- onerel->rd_rel,
+ if (!vacuum_is_relation_owner(RelationGetRelid(rel),
+ rel->rd_rel,
params->options & VACOPT_VACUUM))
{
- relation_close(onerel, lmode);
+ relation_close(rel, lmode);
PopActiveSnapshot();
CommitTransactionCommand();
return false;
@@ -1824,15 +1824,15 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params)
/*
* Check that it's of a vacuumable relkind.
*/
- if (onerel->rd_rel->relkind != RELKIND_RELATION &&
- onerel->rd_rel->relkind != RELKIND_MATVIEW &&
- onerel->rd_rel->relkind != RELKIND_TOASTVALUE &&
- onerel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
+ if (rel->rd_rel->relkind != RELKIND_RELATION &&
+ rel->rd_rel->relkind != RELKIND_MATVIEW &&
+ rel->rd_rel->relkind != RELKIND_TOASTVALUE &&
+ rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
{
ereport(WARNING,
(errmsg("skipping \"%s\" --- cannot vacuum non-tables or special system tables",
- RelationGetRelationName(onerel))));
- relation_close(onerel, lmode);
+ RelationGetRelationName(rel))));
+ relation_close(rel, lmode);
PopActiveSnapshot();
CommitTransactionCommand();
return false;
@@ -1845,9 +1845,9 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params)
* warning here; it would just lead to chatter during a database-wide
* VACUUM.)
*/
- if (RELATION_IS_OTHER_TEMP(onerel))
+ if (RELATION_IS_OTHER_TEMP(rel))
{
- relation_close(onerel, lmode);
+ relation_close(rel, lmode);
PopActiveSnapshot();
CommitTransactionCommand();
return false;
@@ -1858,9 +1858,9 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params)
* useful work is on their child partitions, which have been queued up for
* us separately.
*/
- if (onerel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
+ if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
{
- relation_close(onerel, lmode);
+ relation_close(rel, lmode);
PopActiveSnapshot();
CommitTransactionCommand();
/* It's OK to proceed with ANALYZE on this table */
@@ -1877,14 +1877,14 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params)
* because the lock manager knows that both lock requests are from the
* same process.
*/
- onerelid = onerel->rd_lockInfo.lockRelId;
- LockRelationIdForSession(&onerelid, lmode);
+ lockrelid = rel->rd_lockInfo.lockRelId;
+ LockRelationIdForSession(&lockrelid, lmode);
/* Set index cleanup option based on reloptions if not yet */
if (params->index_cleanup == VACOPT_TERNARY_DEFAULT)
{
- if (onerel->rd_options == NULL ||
- ((StdRdOptions *) onerel->rd_options)->vacuum_index_cleanup)
+ if (rel->rd_options == NULL ||
+ ((StdRdOptions *) rel->rd_options)->vacuum_index_cleanup)
params->index_cleanup = VACOPT_TERNARY_ENABLED;
else
params->index_cleanup = VACOPT_TERNARY_DISABLED;
@@ -1893,8 +1893,8 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params)
/* Set truncate option based on reloptions if not yet */
if (params->truncate == VACOPT_TERNARY_DEFAULT)
{
- if (onerel->rd_options == NULL ||
- ((StdRdOptions *) onerel->rd_options)->vacuum_truncate)
+ if (rel->rd_options == NULL ||
+ ((StdRdOptions *) rel->rd_options)->vacuum_truncate)
params->truncate = VACOPT_TERNARY_ENABLED;
else
params->truncate = VACOPT_TERNARY_DISABLED;
@@ -1907,7 +1907,7 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params)
*/
if ((params->options & VACOPT_PROCESS_TOAST) != 0 &&
(params->options & VACOPT_FULL) == 0)
- toast_relid = onerel->rd_rel->reltoastrelid;
+ toast_relid = rel->rd_rel->reltoastrelid;
else
toast_relid = InvalidOid;
@@ -1918,7 +1918,7 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params)
* unnecessary, but harmless, for lazy VACUUM.)
*/
GetUserIdAndSecContext(&save_userid, &save_sec_context);
- SetUserIdAndSecContext(onerel->rd_rel->relowner,
+ SetUserIdAndSecContext(rel->rd_rel->relowner,
save_sec_context | SECURITY_RESTRICTED_OPERATION);
save_nestlevel = NewGUCNestLevel();
@@ -1930,8 +1930,8 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params)
ClusterParams cluster_params = {0};
/* close relation before vacuuming, but hold lock until commit */
- relation_close(onerel, NoLock);
- onerel = NULL;
+ relation_close(rel, NoLock);
+ rel = NULL;
if ((params->options & VACOPT_VERBOSE) != 0)
cluster_params.options |= CLUOPT_VERBOSE;
@@ -1940,7 +1940,7 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params)
cluster_rel(relid, InvalidOid, &cluster_params);
}
else
- table_relation_vacuum(onerel, params, vac_strategy);
+ table_relation_vacuum(rel, params, vac_strategy);
/* Roll back any GUC changes executed by index functions */
AtEOXact_GUC(false, save_nestlevel);
@@ -1949,8 +1949,8 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params)
SetUserIdAndSecContext(save_userid, save_sec_context);
/* all done with this class, but hold lock until commit */
- if (onerel)
- relation_close(onerel, NoLock);
+ if (rel)
+ relation_close(rel, NoLock);
/*
* Complete the transaction and free all temporary memory used.
@@ -1971,7 +1971,7 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params)
/*
* Now release the session-level lock on the main table.
*/
- UnlockRelationIdForSession(&onerelid, lmode);
+ UnlockRelationIdForSession(&lockrelid, lmode);
/* Report that we really did it. */
return true;
diff --git a/src/include/access/genam.h b/src/include/access/genam.h
index 4515401869f..480a4762f52 100644
--- a/src/include/access/genam.h
+++ b/src/include/access/genam.h
@@ -177,11 +177,11 @@ extern bool index_getnext_slot(IndexScanDesc scan, ScanDirection direction,
extern int64 index_getbitmap(IndexScanDesc scan, TIDBitmap *bitmap);
extern IndexBulkDeleteResult *index_bulk_delete(IndexVacuumInfo *info,
- IndexBulkDeleteResult *stats,
+ IndexBulkDeleteResult *istat,
IndexBulkDeleteCallback callback,
void *callback_state);
extern IndexBulkDeleteResult *index_vacuum_cleanup(IndexVacuumInfo *info,
- IndexBulkDeleteResult *stats);
+ IndexBulkDeleteResult *istat);
extern bool index_can_return(Relation indexRelation, int attno);
extern RegProcedure index_getprocid(Relation irel, AttrNumber attnum,
uint16 procnum);
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index d803f277877..ceb625e13ac 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -196,7 +196,7 @@ extern void heap_get_root_tuples(Page page, OffsetNumber *root_offsets);
/* in heap/vacuumlazy.c */
struct VacuumParams;
-extern void heap_vacuum_rel(Relation onerel,
+extern void heap_vacuum_rel(Relation rel,
struct VacuumParams *params, BufferAccessStrategy bstrategy);
extern void parallel_vacuum_main(dsm_segment *seg, shm_toc *toc);
diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h
index 414b6b4d578..9f1e4a1ac96 100644
--- a/src/include/access/tableam.h
+++ b/src/include/access/tableam.h
@@ -625,7 +625,7 @@ typedef struct TableAmRoutine
* There probably, in the future, needs to be a separate callback to
* integrate with autovacuum's scheduling.
*/
- void (*relation_vacuum) (Relation onerel,
+ void (*relation_vacuum) (Relation rel,
struct VacuumParams *params,
BufferAccessStrategy bstrategy);