aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/heap/hio.c31
-rw-r--r--src/backend/access/heap/vacuumlazy.c77
2 files changed, 59 insertions, 49 deletions
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 3da0b49ccc4..985c4c47779 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -204,7 +204,8 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
/*
* Extend by one page. This should generally match the main-line
* extension code in RelationGetBufferForTuple, except that we hold
- * the relation extension lock throughout.
+ * the relation extension lock throughout, and we don't immediately
+ * initialize the page (see below).
*/
buffer = ReadBufferBI(relation, P_NEW, bistate);
@@ -216,18 +217,16 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
BufferGetBlockNumber(buffer),
RelationGetRelationName(relation));
- PageInit(page, BufferGetPageSize(buffer), 0);
-
/*
- * We mark all the new buffers dirty, but do nothing to write them
- * out; they'll probably get used soon, and even if they are not, a
- * crash will leave an okay all-zeroes page on disk.
+ * Add the page to the FSM without initializing. If we were to
+ * initialize here the page would potentially get flushed out to disk
+ * before we add any useful content. There's no guarantee that that'd
+ * happen before a potential crash, so we need to deal with
+ * uninitialized pages anyway, thus avoid the potential for
+ * unnecessary writes.
*/
- MarkBufferDirty(buffer);
-
- /* we'll need this info below */
blockNum = BufferGetBlockNumber(buffer);
- freespace = PageGetHeapFreeSpace(page);
+ freespace = BufferGetPageSize(buffer) - SizeOfPageHeaderData;
UnlockReleaseBuffer(buffer);
@@ -479,6 +478,18 @@ loop:
* we're done.
*/
page = BufferGetPage(buffer);
+
+ /*
+ * Initialize page, it'll be used soon. We could avoid dirtying the
+ * buffer here, and rely on the caller to do so whenever it puts a
+ * tuple onto the page, but there seems not much benefit in doing so.
+ */
+ if (PageIsNew(page))
+ {
+ PageInit(page, BufferGetPageSize(buffer), 0);
+ MarkBufferDirty(buffer);
+ }
+
pageFreeSpace = PageGetHeapFreeSpace(page);
if (len + saveFreeSpace <= pageFreeSpace)
{
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 37aa484ec3a..7f60d688686 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -861,42 +861,38 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
if (PageIsNew(page))
{
/*
- * An all-zeroes page could be left over if a backend extends the
- * relation but crashes before initializing the page. Reclaim such
- * pages for use.
+ * All-zeroes pages can be left over if either a backend extends
+ * the relation by a single page, but crashes before the newly
+ * initialized page has been written out, or when bulk-extending
+ * the relation (which creates a number of empty pages at the tail
+ * end of the relation, but enters them into the FSM).
*
- * We have to be careful here because we could be looking at a
- * page that someone has just added to the relation and not yet
- * been able to initialize (see RelationGetBufferForTuple). To
- * protect against that, release the buffer lock, grab the
- * relation extension lock momentarily, and re-lock the buffer. If
- * the page is still uninitialized by then, it must be left over
- * from a crashed backend, and we can initialize it.
+ * Make sure these pages are in the FSM, to ensure they can be
+ * reused. Do that by testing if there's any space recorded for
+ * the page. If not, enter it.
*
- * We don't really need the relation lock when this is a new or
- * temp relation, but it's probably not worth the code space to
- * check that, since this surely isn't a critical path.
- *
- * Note: the comparable code in vacuum.c need not worry because
- * it's got exclusive lock on the whole relation.
+ * Note we do not enter the page into the visibilitymap. That has
+ * the downside that we repeatedly visit this page in subsequent
+ * vacuums, but otherwise we'll never not discover the space on a
+ * promoted standby. The harm of repeated checking ought to
+ * normally not be too bad - the space usually should be used at
+ * some point, otherwise there wouldn't be any regular vacuums.
+ */
+ empty_pages++;
+
+ /*
+ * Perform checking of FSM after releasing lock, the fsm is
+ * approximate, after all.
*/
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- LockRelationForExtension(onerel, ExclusiveLock);
- UnlockRelationForExtension(onerel, ExclusiveLock);
- LockBufferForCleanup(buf);
- if (PageIsNew(page))
- {
- ereport(WARNING,
- (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
- relname, blkno)));
- PageInit(page, BufferGetPageSize(buf), 0);
- empty_pages++;
- }
- freespace = PageGetHeapFreeSpace(page);
- MarkBufferDirty(buf);
UnlockReleaseBuffer(buf);
- RecordPageWithFreeSpace(onerel, blkno, freespace);
+ if (GetRecordedFreeSpace(onerel, blkno) == 0)
+ {
+ Size freespace;
+
+ freespace = BufferGetPageSize(buf) - SizeOfPageHeaderData;
+ RecordPageWithFreeSpace(onerel, blkno, freespace);
+ }
continue;
}
@@ -905,7 +901,10 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
empty_pages++;
freespace = PageGetHeapFreeSpace(page);
- /* empty pages are always all-visible and all-frozen */
+ /*
+ * Empty pages are always all-visible and all-frozen (note that
+ * the same is currently not true for new pages, see above).
+ */
if (!PageIsAllVisible(page))
{
START_CRIT_SECTION();
@@ -1639,12 +1638,13 @@ lazy_check_needs_freeze(Buffer buf, bool *hastup)
*hastup = false;
- /* If we hit an uninitialized page, we want to force vacuuming it. */
- if (PageIsNew(page))
- return true;
-
- /* Quick out for ordinary empty page. */
- if (PageIsEmpty(page))
+ /*
+ * New and empty pages, obviously, don't contain tuples. We could make
+ * sure that the page is registered in the FSM, but it doesn't seem worth
+ * waiting for a cleanup lock just for that, especially because it's
+ * likely that the pin holder will do so.
+ */
+ if (PageIsNew(page) || PageIsEmpty(page))
return false;
maxoff = PageGetMaxOffsetNumber(page);
@@ -2029,7 +2029,6 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
if (PageIsNew(page) || PageIsEmpty(page))
{
- /* PageIsNew probably shouldn't happen... */
UnlockReleaseBuffer(buf);
continue;
}