aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorPeter Eisentraut <peter@eisentraut.org>2022-11-16 16:17:18 +0100
committerPeter Eisentraut <peter@eisentraut.org>2022-11-16 16:40:34 +0100
commit8e1db29cdbbd218ab6ba53eea56624553c3bef8c (patch)
tree3476d40c9a60ac02c620004171106aa0bb0ca2c0 /src
parentd1cb4e9f92c3db5c8fb52ccd1d502c73ffcc47e0 (diff)
downloadpostgresql-8e1db29cdbbd218ab6ba53eea56624553c3bef8c.tar.gz
postgresql-8e1db29cdbbd218ab6ba53eea56624553c3bef8c.zip
Variable renaming in preparation for refactoring
Rename page -> block and dp -> page where appropriate. The old naming mixed up block and page in confusing ways. Author: Melanie Plageman <melanieplageman@gmail.com> Discussion: https://www.postgresql.org/message-id/flat/CAAKRu_YSOnhKsDyFcqJsKtBSrd32DP-jjXmv7hL0BPD-z0TGXQ@mail.gmail.com
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/heap/heapam.c216
-rw-r--r--src/backend/access/heap/heapam_handler.c28
-rw-r--r--src/include/access/heapam.h2
3 files changed, 123 insertions, 123 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 6eff40a315d..2e5bb7e9c2e 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -373,19 +373,19 @@ heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlk
* which tuples on the page are visible.
*/
void
-heapgetpage(TableScanDesc sscan, BlockNumber page)
+heapgetpage(TableScanDesc sscan, BlockNumber block)
{
HeapScanDesc scan = (HeapScanDesc) sscan;
Buffer buffer;
Snapshot snapshot;
- Page dp;
+ Page page;
int lines;
int ntup;
OffsetNumber lineoff;
ItemId lpp;
bool all_visible;
- Assert(page < scan->rs_nblocks);
+ Assert(block < scan->rs_nblocks);
/* release previous scan buffer, if any */
if (BufferIsValid(scan->rs_cbuf))
@@ -402,9 +402,9 @@ heapgetpage(TableScanDesc sscan, BlockNumber page)
CHECK_FOR_INTERRUPTS();
/* read page using selected strategy */
- scan->rs_cbuf = ReadBufferExtended(scan->rs_base.rs_rd, MAIN_FORKNUM, page,
+ scan->rs_cbuf = ReadBufferExtended(scan->rs_base.rs_rd, MAIN_FORKNUM, block,
RBM_NORMAL, scan->rs_strategy);
- scan->rs_cblock = page;
+ scan->rs_cblock = block;
if (!(scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE))
return;
@@ -424,9 +424,9 @@ heapgetpage(TableScanDesc sscan, BlockNumber page)
*/
LockBuffer(buffer, BUFFER_LOCK_SHARE);
- dp = BufferGetPage(buffer);
- TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, dp);
- lines = PageGetMaxOffsetNumber(dp);
+ page = BufferGetPage(buffer);
+ TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, page);
+ lines = PageGetMaxOffsetNumber(page);
ntup = 0;
/*
@@ -449,9 +449,9 @@ heapgetpage(TableScanDesc sscan, BlockNumber page)
* full page write. Until we can prove that beyond doubt, let's check each
* tuple for visibility the hard way.
*/
- all_visible = PageIsAllVisible(dp) && !snapshot->takenDuringRecovery;
+ all_visible = PageIsAllVisible(page) && !snapshot->takenDuringRecovery;
- for (lineoff = FirstOffsetNumber, lpp = PageGetItemId(dp, lineoff);
+ for (lineoff = FirstOffsetNumber, lpp = PageGetItemId(page, lineoff);
lineoff <= lines;
lineoff++, lpp++)
{
@@ -461,9 +461,9 @@ heapgetpage(TableScanDesc sscan, BlockNumber page)
bool valid;
loctup.t_tableOid = RelationGetRelid(scan->rs_base.rs_rd);
- loctup.t_data = (HeapTupleHeader) PageGetItem(dp, lpp);
+ loctup.t_data = (HeapTupleHeader) PageGetItem(page, lpp);
loctup.t_len = ItemIdGetLength(lpp);
- ItemPointerSet(&(loctup.t_self), page, lineoff);
+ ItemPointerSet(&(loctup.t_self), block, lineoff);
if (all_visible)
valid = true;
@@ -516,9 +516,9 @@ heapgettup(HeapScanDesc scan,
HeapTuple tuple = &(scan->rs_ctup);
Snapshot snapshot = scan->rs_base.rs_snapshot;
bool backward = ScanDirectionIsBackward(dir);
- BlockNumber page;
+ BlockNumber block;
bool finished;
- Page dp;
+ Page page;
int lines;
OffsetNumber lineoff;
int linesleft;
@@ -550,11 +550,11 @@ heapgettup(HeapScanDesc scan,
table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
pbscanwork, pbscan);
- page = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
- pbscanwork, pbscan);
+ block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
+ pbscanwork, pbscan);
/* Other processes might have already finished the scan. */
- if (page == InvalidBlockNumber)
+ if (block == InvalidBlockNumber)
{
Assert(!BufferIsValid(scan->rs_cbuf));
tuple->t_data = NULL;
@@ -562,25 +562,25 @@ heapgettup(HeapScanDesc scan,
}
}
else
- page = scan->rs_startblock; /* first page */
- heapgetpage((TableScanDesc) scan, page);
+ block = scan->rs_startblock; /* first page */
+ heapgetpage((TableScanDesc) scan, block);
lineoff = FirstOffsetNumber; /* first offnum */
scan->rs_inited = true;
}
else
{
/* continue from previously returned page/tuple */
- page = scan->rs_cblock; /* current page */
+ block = scan->rs_cblock; /* current page */
lineoff = /* next offnum */
OffsetNumberNext(ItemPointerGetOffsetNumber(&(tuple->t_self)));
}
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
- dp = BufferGetPage(scan->rs_cbuf);
- TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, dp);
- lines = PageGetMaxOffsetNumber(dp);
- /* page and lineoff now reference the physically next tid */
+ page = BufferGetPage(scan->rs_cbuf);
+ TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, page);
+ lines = PageGetMaxOffsetNumber(page);
+ /* block and lineoff now reference the physically next tid */
linesleft = lines - lineoff + 1;
}
@@ -614,24 +614,24 @@ heapgettup(HeapScanDesc scan,
* rs_numblocks if it's been adjusted by heap_setscanlimits().
*/
if (scan->rs_numblocks != InvalidBlockNumber)
- page = (scan->rs_startblock + scan->rs_numblocks - 1) % scan->rs_nblocks;
+ block = (scan->rs_startblock + scan->rs_numblocks - 1) % scan->rs_nblocks;
else if (scan->rs_startblock > 0)
- page = scan->rs_startblock - 1;
+ block = scan->rs_startblock - 1;
else
- page = scan->rs_nblocks - 1;
- heapgetpage((TableScanDesc) scan, page);
+ block = scan->rs_nblocks - 1;
+ heapgetpage((TableScanDesc) scan, block);
}
else
{
/* continue from previously returned page/tuple */
- page = scan->rs_cblock; /* current page */
+ block = scan->rs_cblock; /* current page */
}
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
- dp = BufferGetPage(scan->rs_cbuf);
- TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, dp);
- lines = PageGetMaxOffsetNumber(dp);
+ page = BufferGetPage(scan->rs_cbuf);
+ TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, page);
+ lines = PageGetMaxOffsetNumber(page);
if (!scan->rs_inited)
{
@@ -643,14 +643,14 @@ heapgettup(HeapScanDesc scan,
/*
* The previous returned tuple may have been vacuumed since the
* previous scan when we use a non-MVCC snapshot, so we must
- * re-establish the lineoff <= PageGetMaxOffsetNumber(dp)
+ * re-establish the lineoff <= PageGetMaxOffsetNumber(page)
* invariant
*/
lineoff = /* previous offnum */
Min(lines,
OffsetNumberPrev(ItemPointerGetOffsetNumber(&(tuple->t_self))));
}
- /* page and lineoff now reference the physically previous tid */
+ /* block and lineoff now reference the physically previous tid */
linesleft = lineoff;
}
@@ -666,18 +666,18 @@ heapgettup(HeapScanDesc scan,
return;
}
- page = ItemPointerGetBlockNumber(&(tuple->t_self));
- if (page != scan->rs_cblock)
- heapgetpage((TableScanDesc) scan, page);
+ block = ItemPointerGetBlockNumber(&(tuple->t_self));
+ if (block != scan->rs_cblock)
+ heapgetpage((TableScanDesc) scan, block);
/* Since the tuple was previously fetched, needn't lock page here */
- dp = BufferGetPage(scan->rs_cbuf);
- TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, dp);
+ page = BufferGetPage(scan->rs_cbuf);
+ TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, page);
lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
- lpp = PageGetItemId(dp, lineoff);
+ lpp = PageGetItemId(page, lineoff);
Assert(ItemIdIsNormal(lpp));
- tuple->t_data = (HeapTupleHeader) PageGetItem(dp, lpp);
+ tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
tuple->t_len = ItemIdGetLength(lpp);
return;
@@ -687,7 +687,7 @@ heapgettup(HeapScanDesc scan,
* advance the scan until we find a qualifying tuple or run out of stuff
* to scan
*/
- lpp = PageGetItemId(dp, lineoff);
+ lpp = PageGetItemId(page, lineoff);
for (;;)
{
/*
@@ -703,9 +703,9 @@ heapgettup(HeapScanDesc scan,
{
bool valid;
- tuple->t_data = (HeapTupleHeader) PageGetItem(dp, lpp);
+ tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
tuple->t_len = ItemIdGetLength(lpp);
- ItemPointerSet(&(tuple->t_self), page, lineoff);
+ ItemPointerSet(&(tuple->t_self), block, lineoff);
/*
* if current tuple qualifies, return it.
@@ -756,11 +756,11 @@ heapgettup(HeapScanDesc scan,
*/
if (backward)
{
- finished = (page == scan->rs_startblock) ||
+ finished = (block == scan->rs_startblock) ||
(scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
- if (page == 0)
- page = scan->rs_nblocks;
- page--;
+ if (block == 0)
+ block = scan->rs_nblocks;
+ block--;
}
else if (scan->rs_base.rs_parallel != NULL)
{
@@ -769,16 +769,16 @@ heapgettup(HeapScanDesc scan,
ParallelBlockTableScanWorker pbscanwork =
scan->rs_parallelworkerdata;
- page = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
+ block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
pbscanwork, pbscan);
- finished = (page == InvalidBlockNumber);
+ finished = (block == InvalidBlockNumber);
}
else
{
- page++;
- if (page >= scan->rs_nblocks)
- page = 0;
- finished = (page == scan->rs_startblock) ||
+ block++;
+ if (block >= scan->rs_nblocks)
+ block = 0;
+ finished = (block == scan->rs_startblock) ||
(scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
/*
@@ -794,7 +794,7 @@ heapgettup(HeapScanDesc scan,
* We don't guarantee any specific ordering in general, though.
*/
if (scan->rs_base.rs_flags & SO_ALLOW_SYNC)
- ss_report_location(scan->rs_base.rs_rd, page);
+ ss_report_location(scan->rs_base.rs_rd, block);
}
/*
@@ -811,23 +811,23 @@ heapgettup(HeapScanDesc scan,
return;
}
- heapgetpage((TableScanDesc) scan, page);
+ heapgetpage((TableScanDesc) scan, block);
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
- dp = BufferGetPage(scan->rs_cbuf);
- TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, dp);
- lines = PageGetMaxOffsetNumber((Page) dp);
+ page = BufferGetPage(scan->rs_cbuf);
+ TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, page);
+ lines = PageGetMaxOffsetNumber(page);
linesleft = lines;
if (backward)
{
lineoff = lines;
- lpp = PageGetItemId(dp, lines);
+ lpp = PageGetItemId(page, lines);
}
else
{
lineoff = FirstOffsetNumber;
- lpp = PageGetItemId(dp, FirstOffsetNumber);
+ lpp = PageGetItemId(page, FirstOffsetNumber);
}
}
}
@@ -853,9 +853,9 @@ heapgettup_pagemode(HeapScanDesc scan,
{
HeapTuple tuple = &(scan->rs_ctup);
bool backward = ScanDirectionIsBackward(dir);
- BlockNumber page;
+ BlockNumber block;
bool finished;
- Page dp;
+ Page page;
int lines;
int lineindex;
OffsetNumber lineoff;
@@ -888,11 +888,11 @@ heapgettup_pagemode(HeapScanDesc scan,
table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
pbscanwork, pbscan);
- page = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
- pbscanwork, pbscan);
+ block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
+ pbscanwork, pbscan);
/* Other processes might have already finished the scan. */
- if (page == InvalidBlockNumber)
+ if (block == InvalidBlockNumber)
{
Assert(!BufferIsValid(scan->rs_cbuf));
tuple->t_data = NULL;
@@ -900,22 +900,22 @@ heapgettup_pagemode(HeapScanDesc scan,
}
}
else
- page = scan->rs_startblock; /* first page */
- heapgetpage((TableScanDesc) scan, page);
+ block = scan->rs_startblock; /* first page */
+ heapgetpage((TableScanDesc) scan, block);
lineindex = 0;
scan->rs_inited = true;
}
else
{
/* continue from previously returned page/tuple */
- page = scan->rs_cblock; /* current page */
+ block = scan->rs_cblock; /* current page */
lineindex = scan->rs_cindex + 1;
}
- dp = BufferGetPage(scan->rs_cbuf);
- TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, dp);
+ page = BufferGetPage(scan->rs_cbuf);
+ TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
lines = scan->rs_ntuples;
- /* page and lineindex now reference the next visible tid */
+ /* block and lineindex now reference the next visible tid */
linesleft = lines - lineindex;
}
@@ -949,21 +949,21 @@ heapgettup_pagemode(HeapScanDesc scan,
* rs_numblocks if it's been adjusted by heap_setscanlimits().
*/
if (scan->rs_numblocks != InvalidBlockNumber)
- page = (scan->rs_startblock + scan->rs_numblocks - 1) % scan->rs_nblocks;
+ block = (scan->rs_startblock + scan->rs_numblocks - 1) % scan->rs_nblocks;
else if (scan->rs_startblock > 0)
- page = scan->rs_startblock - 1;
+ block = scan->rs_startblock - 1;
else
- page = scan->rs_nblocks - 1;
- heapgetpage((TableScanDesc) scan, page);
+ block = scan->rs_nblocks - 1;
+ heapgetpage((TableScanDesc) scan, block);
}
else
{
/* continue from previously returned page/tuple */
- page = scan->rs_cblock; /* current page */
+ block = scan->rs_cblock; /* current page */
}
- dp = BufferGetPage(scan->rs_cbuf);
- TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, dp);
+ page = BufferGetPage(scan->rs_cbuf);
+ TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
lines = scan->rs_ntuples;
if (!scan->rs_inited)
@@ -975,7 +975,7 @@ heapgettup_pagemode(HeapScanDesc scan,
{
lineindex = scan->rs_cindex - 1;
}
- /* page and lineindex now reference the previous visible tid */
+ /* block and lineindex now reference the previous visible tid */
linesleft = lineindex + 1;
}
@@ -991,18 +991,18 @@ heapgettup_pagemode(HeapScanDesc scan,
return;
}
- page = ItemPointerGetBlockNumber(&(tuple->t_self));
- if (page != scan->rs_cblock)
- heapgetpage((TableScanDesc) scan, page);
+ block = ItemPointerGetBlockNumber(&(tuple->t_self));
+ if (block != scan->rs_cblock)
+ heapgetpage((TableScanDesc) scan, block);
/* Since the tuple was previously fetched, needn't lock page here */
- dp = BufferGetPage(scan->rs_cbuf);
- TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, dp);
+ page = BufferGetPage(scan->rs_cbuf);
+ TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
- lpp = PageGetItemId(dp, lineoff);
+ lpp = PageGetItemId(page, lineoff);
Assert(ItemIdIsNormal(lpp));
- tuple->t_data = (HeapTupleHeader) PageGetItem(dp, lpp);
+ tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
tuple->t_len = ItemIdGetLength(lpp);
/* check that rs_cindex is in sync */
@@ -1021,12 +1021,12 @@ heapgettup_pagemode(HeapScanDesc scan,
while (linesleft > 0)
{
lineoff = scan->rs_vistuples[lineindex];
- lpp = PageGetItemId(dp, lineoff);
+ lpp = PageGetItemId(page, lineoff);
Assert(ItemIdIsNormal(lpp));
- tuple->t_data = (HeapTupleHeader) PageGetItem(dp, lpp);
+ tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
tuple->t_len = ItemIdGetLength(lpp);
- ItemPointerSet(&(tuple->t_self), page, lineoff);
+ ItemPointerSet(&(tuple->t_self), block, lineoff);
/*
* if current tuple qualifies, return it.
@@ -1065,11 +1065,11 @@ heapgettup_pagemode(HeapScanDesc scan,
*/
if (backward)
{
- finished = (page == scan->rs_startblock) ||
+ finished = (block == scan->rs_startblock) ||
(scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
- if (page == 0)
- page = scan->rs_nblocks;
- page--;
+ if (block == 0)
+ block = scan->rs_nblocks;
+ block--;
}
else if (scan->rs_base.rs_parallel != NULL)
{
@@ -1078,16 +1078,16 @@ heapgettup_pagemode(HeapScanDesc scan,
ParallelBlockTableScanWorker pbscanwork =
scan->rs_parallelworkerdata;
- page = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
+ block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
pbscanwork, pbscan);
- finished = (page == InvalidBlockNumber);
+ finished = (block == InvalidBlockNumber);
}
else
{
- page++;
- if (page >= scan->rs_nblocks)
- page = 0;
- finished = (page == scan->rs_startblock) ||
+ block++;
+ if (block >= scan->rs_nblocks)
+ block = 0;
+ finished = (block == scan->rs_startblock) ||
(scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
/*
@@ -1103,7 +1103,7 @@ heapgettup_pagemode(HeapScanDesc scan,
* We don't guarantee any specific ordering in general, though.
*/
if (scan->rs_base.rs_flags & SO_ALLOW_SYNC)
- ss_report_location(scan->rs_base.rs_rd, page);
+ ss_report_location(scan->rs_base.rs_rd, block);
}
/*
@@ -1120,10 +1120,10 @@ heapgettup_pagemode(HeapScanDesc scan,
return;
}
- heapgetpage((TableScanDesc) scan, page);
+ heapgetpage((TableScanDesc) scan, block);
- dp = BufferGetPage(scan->rs_cbuf);
- TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, dp);
+ page = BufferGetPage(scan->rs_cbuf);
+ TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
lines = scan->rs_ntuples;
linesleft = lines;
if (backward)
@@ -1680,7 +1680,7 @@ heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
Snapshot snapshot, HeapTuple heapTuple,
bool *all_dead, bool first_call)
{
- Page dp = (Page) BufferGetPage(buffer);
+ Page page = BufferGetPage(buffer);
TransactionId prev_xmax = InvalidTransactionId;
BlockNumber blkno;
OffsetNumber offnum;
@@ -1708,10 +1708,10 @@ heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
ItemId lp;
/* check for bogus TID */
- if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
+ if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
break;
- lp = PageGetItemId(dp, offnum);
+ lp = PageGetItemId(page, offnum);
/* check for unused, dead, or redirected items */
if (!ItemIdIsNormal(lp))
@@ -1734,7 +1734,7 @@ heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
* because the SSI checks and the *Satisfies routine for historical
* MVCC snapshots need the correct tid to decide about the visibility.
*/
- heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
+ heapTuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
heapTuple->t_len = ItemIdGetLength(lp);
heapTuple->t_tableOid = RelationGetRelid(relation);
ItemPointerSet(&heapTuple->t_self, blkno, offnum);
diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
index 103ddbd9788..ab1bcf3522d 100644
--- a/src/backend/access/heap/heapam_handler.c
+++ b/src/backend/access/heap/heapam_handler.c
@@ -2109,7 +2109,7 @@ heapam_scan_bitmap_next_block(TableScanDesc scan,
TBMIterateResult *tbmres)
{
HeapScanDesc hscan = (HeapScanDesc) scan;
- BlockNumber page = tbmres->blockno;
+ BlockNumber block = tbmres->blockno;
Buffer buffer;
Snapshot snapshot;
int ntup;
@@ -2123,7 +2123,7 @@ heapam_scan_bitmap_next_block(TableScanDesc scan,
* only hold an AccessShareLock, and it could be inserts from this
* backend).
*/
- if (page >= hscan->rs_nblocks)
+ if (block >= hscan->rs_nblocks)
return false;
/*
@@ -2131,8 +2131,8 @@ heapam_scan_bitmap_next_block(TableScanDesc scan,
*/
hscan->rs_cbuf = ReleaseAndReadBuffer(hscan->rs_cbuf,
scan->rs_rd,
- page);
- hscan->rs_cblock = page;
+ block);
+ hscan->rs_cblock = block;
buffer = hscan->rs_cbuf;
snapshot = scan->rs_snapshot;
@@ -2168,7 +2168,7 @@ heapam_scan_bitmap_next_block(TableScanDesc scan,
ItemPointerData tid;
HeapTupleData heapTuple;
- ItemPointerSet(&tid, page, offnum);
+ ItemPointerSet(&tid, block, offnum);
if (heap_hot_search_buffer(&tid, scan->rs_rd, buffer, snapshot,
&heapTuple, NULL, true))
hscan->rs_vistuples[ntup++] = ItemPointerGetOffsetNumber(&tid);
@@ -2180,8 +2180,8 @@ heapam_scan_bitmap_next_block(TableScanDesc scan,
* Bitmap is lossy, so we must examine each line pointer on the page.
* But we can ignore HOT chains, since we'll check each tuple anyway.
*/
- Page dp = (Page) BufferGetPage(buffer);
- OffsetNumber maxoff = PageGetMaxOffsetNumber(dp);
+ Page page = BufferGetPage(buffer);
+ OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
OffsetNumber offnum;
for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
@@ -2190,13 +2190,13 @@ heapam_scan_bitmap_next_block(TableScanDesc scan,
HeapTupleData loctup;
bool valid;
- lp = PageGetItemId(dp, offnum);
+ lp = PageGetItemId(page, offnum);
if (!ItemIdIsNormal(lp))
continue;
- loctup.t_data = (HeapTupleHeader) PageGetItem(dp, lp);
+ loctup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
loctup.t_len = ItemIdGetLength(lp);
loctup.t_tableOid = scan->rs_rd->rd_id;
- ItemPointerSet(&loctup.t_self, page, offnum);
+ ItemPointerSet(&loctup.t_self, block, offnum);
valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
if (valid)
{
@@ -2224,7 +2224,7 @@ heapam_scan_bitmap_next_tuple(TableScanDesc scan,
{
HeapScanDesc hscan = (HeapScanDesc) scan;
OffsetNumber targoffset;
- Page dp;
+ Page page;
ItemId lp;
/*
@@ -2234,11 +2234,11 @@ heapam_scan_bitmap_next_tuple(TableScanDesc scan,
return false;
targoffset = hscan->rs_vistuples[hscan->rs_cindex];
- dp = (Page) BufferGetPage(hscan->rs_cbuf);
- lp = PageGetItemId(dp, targoffset);
+ page = BufferGetPage(hscan->rs_cbuf);
+ lp = PageGetItemId(page, targoffset);
Assert(ItemIdIsNormal(lp));
- hscan->rs_ctup.t_data = (HeapTupleHeader) PageGetItem(dp, lp);
+ hscan->rs_ctup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
hscan->rs_ctup.t_len = ItemIdGetLength(lp);
hscan->rs_ctup.t_tableOid = scan->rs_rd->rd_id;
ItemPointerSet(&hscan->rs_ctup.t_self, hscan->rs_cblock, targoffset);
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index ebe723abb06..810baaf9d08 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -133,7 +133,7 @@ extern TableScanDesc heap_beginscan(Relation relation, Snapshot snapshot,
uint32 flags);
extern void heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk,
BlockNumber numBlks);
-extern void heapgetpage(TableScanDesc sscan, BlockNumber page);
+extern void heapgetpage(TableScanDesc sscan, BlockNumber block);
extern void heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
bool allow_strat, bool allow_sync, bool allow_pagemode);
extern void heap_endscan(TableScanDesc sscan);