aboutsummaryrefslogtreecommitdiff
path: root/src/backend/commands/vacuum.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/commands/vacuum.c')
-rw-r--r--src/backend/commands/vacuum.c154
1 files changed, 150 insertions, 4 deletions
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 5c4bc15b441..3b481bcf860 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -3,10 +3,12 @@
* vacuum.c
* The postgres vacuum cleaner.
*
- * This file now includes only control and dispatch code for VACUUM and
- * ANALYZE commands. Regular VACUUM is implemented in vacuumlazy.c,
- * ANALYZE in analyze.c, and VACUUM FULL is a variant of CLUSTER, handled
- * in cluster.c.
+ * This file includes (a) control and dispatch code for VACUUM and ANALYZE
+ * commands, (b) code to compute various vacuum thresholds, and (c) index
+ * vacuum code.
+ *
+ * VACUUM for heap AM is implemented in vacuumlazy.c, ANALYZE in analyze.c, and
+ * VACUUM FULL is a variant of CLUSTER, handled in cluster.c.
*
*
* Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
@@ -32,6 +34,7 @@
#include "access/transam.h"
#include "access/xact.h"
#include "catalog/namespace.h"
+#include "catalog/index.h"
#include "catalog/pg_database.h"
#include "catalog/pg_inherits.h"
#include "catalog/pg_namespace.h"
@@ -51,6 +54,7 @@
#include "utils/fmgroids.h"
#include "utils/guc.h"
#include "utils/memutils.h"
+#include "utils/pg_rusage.h"
#include "utils/snapmgr.h"
#include "utils/syscache.h"
@@ -89,6 +93,8 @@ static void vac_truncate_clog(TransactionId frozenXID,
static bool vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params);
static double compute_parallel_delay(void);
static VacOptValue get_vacoptval_from_boolean(DefElem *def);
+static bool vac_tid_reaped(ItemPointer itemptr, void *state);
+static int vac_cmp_itemptr(const void *left, const void *right);
/*
* Primary entry point for manual VACUUM and ANALYZE commands
@@ -2258,3 +2264,143 @@ get_vacoptval_from_boolean(DefElem *def)
{
return defGetBoolean(def) ? VACOPTVALUE_ENABLED : VACOPTVALUE_DISABLED;
}
+
+/*
+ * vac_bulkdel_one_index() -- bulk-deletion for index relation.
+ *
+ * Returns bulk delete stats derived from input stats
+ */
+IndexBulkDeleteResult *
+vac_bulkdel_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat,
+ VacDeadItems *dead_items)
+{
+ PGRUsage ru0;
+
+ pg_rusage_init(&ru0);
+
+ /* Do bulk deletion */
+ istat = index_bulk_delete(ivinfo, istat, vac_tid_reaped,
+ (void *) dead_items);
+
+ ereport(ivinfo->message_level,
+ (errmsg("scanned index \"%s\" to remove %d row versions",
+ RelationGetRelationName(ivinfo->index),
+ dead_items->num_items),
+ errdetail_internal("%s", pg_rusage_show(&ru0))));
+
+ return istat;
+}
+
+/*
+ * vac_cleanup_one_index() -- do post-vacuum cleanup for index relation.
+ *
+ * Returns bulk delete stats derived from input stats
+ */
+IndexBulkDeleteResult *
+vac_cleanup_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat)
+{
+ PGRUsage ru0;
+
+ pg_rusage_init(&ru0);
+
+ istat = index_vacuum_cleanup(ivinfo, istat);
+
+ if (istat)
+ {
+ ereport(ivinfo->message_level,
+ (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
+ RelationGetRelationName(ivinfo->index),
+ istat->num_index_tuples,
+ istat->num_pages),
+ errdetail("%.0f index row versions were removed.\n"
+ "%u index pages were newly deleted.\n"
+ "%u index pages are currently deleted, of which %u are currently reusable.\n"
+ "%s.",
+ istat->tuples_removed,
+ istat->pages_newly_deleted,
+ istat->pages_deleted, istat->pages_free,
+ pg_rusage_show(&ru0))));
+ }
+
+ return istat;
+}
+
+/*
+ * Returns the total required space for VACUUM's dead_items array given a
+ * max_items value.
+ */
+inline Size
+vac_max_items_to_alloc_size(int max_items)
+{
+ Assert(max_items <= MAXDEADITEMS(MaxAllocSize));
+
+ return offsetof(VacDeadItems, items) + sizeof(ItemPointerData) * max_items;
+}
+
+/*
+ * vac_tid_reaped() -- is a particular tid deletable?
+ *
+ * This has the right signature to be an IndexBulkDeleteCallback.
+ *
+ * Assumes dead_items array is sorted (in ascending TID order).
+ */
+static bool
+vac_tid_reaped(ItemPointer itemptr, void *state)
+{
+ VacDeadItems *dead_items = (VacDeadItems *) state;
+ int64 litem,
+ ritem,
+ item;
+ ItemPointer res;
+
+ litem = itemptr_encode(&dead_items->items[0]);
+ ritem = itemptr_encode(&dead_items->items[dead_items->num_items - 1]);
+ item = itemptr_encode(itemptr);
+
+ /*
+ * Doing a simple bound check before bsearch() is useful to avoid the
+ * extra cost of bsearch(), especially if dead items on the heap are
+ * concentrated in a certain range. Since this function is called for
+ * every index tuple, it pays to be really fast.
+ */
+ if (item < litem || item > ritem)
+ return false;
+
+ res = (ItemPointer) bsearch((void *) itemptr,
+ (void *) dead_items->items,
+ dead_items->num_items,
+ sizeof(ItemPointerData),
+ vac_cmp_itemptr);
+
+ return (res != NULL);
+}
+
+/*
+ * Comparator routines for use with qsort() and bsearch().
+ */
+static int
+vac_cmp_itemptr(const void *left, const void *right)
+{
+ BlockNumber lblk,
+ rblk;
+ OffsetNumber loff,
+ roff;
+
+ lblk = ItemPointerGetBlockNumber((ItemPointer) left);
+ rblk = ItemPointerGetBlockNumber((ItemPointer) right);
+
+ if (lblk < rblk)
+ return -1;
+ if (lblk > rblk)
+ return 1;
+
+ loff = ItemPointerGetOffsetNumber((ItemPointer) left);
+ roff = ItemPointerGetOffsetNumber((ItemPointer) right);
+
+ if (loff < roff)
+ return -1;
+ if (loff > roff)
+ return 1;
+
+ return 0;
+}