aboutsummaryrefslogtreecommitdiff
path: root/src/backend/commands/vacuumlazy.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/commands/vacuumlazy.c')
-rw-r--r--src/backend/commands/vacuumlazy.c67
1 files changed, 66 insertions, 1 deletions
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index 14c66b498d3..c2711528770 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -31,7 +31,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.34 2004/02/03 17:34:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.35 2004/02/06 19:36:17 wieck Exp $
*
*-------------------------------------------------------------------------
*/
@@ -148,6 +148,11 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
vac_open_indexes(onerel, &nindexes, &Irel);
hasindex = (nindexes > 0);
+ /* Turn on vacuum cost accounting */
+ if (VacuumCostNaptime > 0)
+ VacuumCostActive = true;
+ VacuumCostBalance = 0;
+
/* Do the vacuuming */
lazy_scan_heap(onerel, vacrelstats, Irel, nindexes);
@@ -168,6 +173,9 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
/* Update shared free space map with final free space info */
lazy_update_fsm(onerel, vacrelstats);
+ /* Turn off vacuum cost accounting */
+ VacuumCostActive = false;
+
/* Update statistics in pg_class */
vac_update_relstats(RelationGetRelid(onerel), vacrelstats->rel_pages,
vacrelstats->rel_tuples, hasindex);
@@ -229,6 +237,25 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
CHECK_FOR_INTERRUPTS();
/*
+ * Do the napping in a cost based vacuum.
+ */
+ if (VacuumCostActive && !InterruptPending &&
+ VacuumCostBalance >= VacuumCostLimit)
+ {
+ int msec;
+
+ msec = VacuumCostNaptime * VacuumCostBalance / VacuumCostLimit;
+ if (msec < VacuumCostNaptime * 4)
+ PG_MSLEEP(msec);
+ else
+ PG_MSLEEP(VacuumCostNaptime * 4);
+
+ VacuumCostBalance = 0;
+
+ CHECK_FOR_INTERRUPTS();
+ }
+
+ /*
* If we are close to overrunning the available space for
* dead-tuple TIDs, pause and do a cycle of vacuuming before we
* tackle this page.
@@ -469,6 +496,25 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
CHECK_FOR_INTERRUPTS();
+ /*
+ * Do the napping in a cost based vacuum.
+ */
+ if (VacuumCostActive && !InterruptPending &&
+ VacuumCostBalance >= VacuumCostLimit)
+ {
+ int msec;
+
+ msec = VacuumCostNaptime * VacuumCostBalance / VacuumCostLimit;
+ if (msec < VacuumCostNaptime * 4)
+ PG_MSLEEP(msec);
+ else
+ PG_MSLEEP(VacuumCostNaptime * 4);
+
+ VacuumCostBalance = 0;
+
+ CHECK_FOR_INTERRUPTS();
+ }
+
tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
buf = ReadBuffer(onerel, tblk);
LockBufferForCleanup(buf);
@@ -800,6 +846,25 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
CHECK_FOR_INTERRUPTS();
+ /*
+ * Do the napping in a cost based vacuum.
+ */
+ if (VacuumCostActive && !InterruptPending &&
+ VacuumCostBalance >= VacuumCostLimit)
+ {
+ int msec;
+
+ msec = VacuumCostNaptime * VacuumCostBalance / VacuumCostLimit;
+ if (msec < VacuumCostNaptime * 4)
+ PG_MSLEEP(msec);
+ else
+ PG_MSLEEP(VacuumCostNaptime * 4);
+
+ VacuumCostBalance = 0;
+
+ CHECK_FOR_INTERRUPTS();
+ }
+
blkno--;
buf = ReadBuffer(onerel, blkno);