aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2015-01-16 13:28:30 -0500
committerTom Lane <tgl@sss.pgh.pa.us>2015-01-16 13:28:30 -0500
commit779fdcdeeeb9cdbfd271f8dc5bde76ed0c7b0813 (patch)
tree0e909dc966a3dece21d861e4cb841c61da832ca7
parentf5ae3ba4828ece02bae2d16b4cbce847fbcea850 (diff)
downloadpostgresql-779fdcdeeeb9cdbfd271f8dc5bde76ed0c7b0813.tar.gz
postgresql-779fdcdeeeb9cdbfd271f8dc5bde76ed0c7b0813.zip
Improve new caching logic in tbm_add_tuples().
For no significant extra complexity, we can cache knowledge that the target page is lossy, and save a hash_search per iteration in that case as well. This probably makes little difference, since the extra rechecks that must occur when pages are lossy are way more expensive than anything we can save here ... but we might as well do it if we're going to cache anything.
-rw-r--r--src/backend/nodes/tidbitmap.c31
1 files changed, 18 insertions, 13 deletions
diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c
index dd1c6657fa7..e8b25eb9e14 100644
--- a/src/backend/nodes/tidbitmap.c
+++ b/src/backend/nodes/tidbitmap.c
@@ -268,8 +268,9 @@ void
tbm_add_tuples(TIDBitmap *tbm, const ItemPointer tids, int ntids,
bool recheck)
{
- int i;
- PagetableEntry *page = NULL;
+ BlockNumber currblk = InvalidBlockNumber;
+ PagetableEntry *page = NULL; /* only valid when currblk is valid */
+ int i;
Assert(!tbm->iterating);
for (i = 0; i < ntids; i++)
@@ -283,19 +284,23 @@ tbm_add_tuples(TIDBitmap *tbm, const ItemPointer tids, int ntids,
if (off < 1 || off > MAX_TUPLES_PER_PAGE)
elog(ERROR, "tuple offset out of range: %u", off);
- if (page == NULL || page->blockno != blk)
+ /*
+ * Look up target page unless we already did. This saves cycles when
+ * the input includes consecutive tuples on the same page, which is
+ * common enough to justify an extra test here.
+ */
+ if (blk != currblk)
{
if (tbm_page_is_lossy(tbm, blk))
- continue; /* whole page is already marked */
-
- /*
- * Cache this page as it's quite likely that we'll see the same
- * page again in the next iteration. This will save having to
- * lookup the page in the hashtable again.
- */
- page = tbm_get_pageentry(tbm, blk);
+ page = NULL; /* remember page is lossy */
+ else
+ page = tbm_get_pageentry(tbm, blk);
+ currblk = blk;
}
+ if (page == NULL)
+ continue; /* whole page is already marked */
+
if (page->ischunk)
{
/* The page is a lossy chunk header, set bit for itself */
@@ -313,8 +318,8 @@ tbm_add_tuples(TIDBitmap *tbm, const ItemPointer tids, int ntids,
if (tbm->nentries > tbm->maxentries)
{
tbm_lossify(tbm);
- /* Cached page could become lossy or freed */
- page = NULL;
+ /* Page could have been converted to lossy, so force new lookup */
+ currblk = InvalidBlockNumber;
}
}
}