aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/nbtree/nbtpage.c7
-rw-r--r--src/backend/access/nbtree/nbtree.c3
-rw-r--r--src/include/access/nbtree.h8
3 files changed, 8 insertions, 10 deletions
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 151ad37a542..0144c3ab571 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -935,7 +935,7 @@ _bt_getbuf(Relation rel, Relation heaprel, BlockNumber blkno, int access)
return buf;
}
- if (BTPageIsRecyclable(page))
+ if (BTPageIsRecyclable(page, heaprel))
{
/*
* If we are generating WAL for Hot Standby then create a
@@ -2963,6 +2963,7 @@ void
_bt_pendingfsm_finalize(Relation rel, BTVacState *vstate)
{
IndexBulkDeleteResult *stats = vstate->stats;
+ Relation heaprel = vstate->info->heaprel;
Assert(stats->pages_newly_deleted >= vstate->npendingpages);
@@ -2995,7 +2996,7 @@ _bt_pendingfsm_finalize(Relation rel, BTVacState *vstate)
* essential; GlobalVisCheckRemovableFullXid() will not reliably recognize
* that it is now safe to recycle newly deleted pages without this step.
*/
- GetOldestNonRemovableTransactionId(NULL);
+ GetOldestNonRemovableTransactionId(heaprel);
for (int i = 0; i < vstate->npendingpages; i++)
{
@@ -3010,7 +3011,7 @@ _bt_pendingfsm_finalize(Relation rel, BTVacState *vstate)
* must be non-recyclable too, since _bt_pendingfsm_add() adds pages
* to the array in safexid order.
*/
- if (!GlobalVisCheckRemovableFullXid(NULL, safexid))
+ if (!GlobalVisCheckRemovableFullXid(heaprel, safexid))
break;
RecordFreeIndexPage(rel, target);
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 97a39b0f658..409a2c12100 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -1039,6 +1039,7 @@ btvacuumpage(BTVacState *vstate, BlockNumber scanblkno)
IndexBulkDeleteCallback callback = vstate->callback;
void *callback_state = vstate->callback_state;
Relation rel = info->index;
+ Relation heaprel = info->heaprel;
bool attempt_pagedel;
BlockNumber blkno,
backtrack_to;
@@ -1124,7 +1125,7 @@ backtrack:
}
}
- if (!opaque || BTPageIsRecyclable(page))
+ if (!opaque || BTPageIsRecyclable(page, heaprel))
{
/* Okay to recycle this page (which could be leaf or internal) */
RecordFreeIndexPage(rel, blkno);
diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h
index 6dee3070420..953bf6586b9 100644
--- a/src/include/access/nbtree.h
+++ b/src/include/access/nbtree.h
@@ -288,7 +288,7 @@ BTPageGetDeleteXid(Page page)
* well need special handling for new pages anyway.
*/
static inline bool
-BTPageIsRecyclable(Page page)
+BTPageIsRecyclable(Page page, Relation heaprel)
{
BTPageOpaque opaque;
@@ -307,12 +307,8 @@ BTPageIsRecyclable(Page page)
* For that check if the deletion XID could still be visible to
* anyone. If not, then no scan that's still in progress could have
* seen its downlink, and we can recycle it.
- *
- * XXX: If we had the heap relation we could be more aggressive about
- * recycling deleted pages in non-catalog relations. For now we just
- * pass NULL. That is at least simple and consistent.
*/
- return GlobalVisCheckRemovableFullXid(NULL, BTPageGetDeleteXid(page));
+ return GlobalVisCheckRemovableFullXid(heaprel, BTPageGetDeleteXid(page));
}
return false;