diff options
author | Tom Lane <tgl@sss.pgh.pa.us> | 2011-10-14 17:23:01 -0400 |
---|---|---|
committer | Tom Lane <tgl@sss.pgh.pa.us> | 2011-10-14 17:23:46 -0400 |
commit | e6858e665731c0f56d3ecc9fbb245c32d24f8ef7 (patch) | |
tree | 4df2705d53d53b1bbd7a14d7017cb519d82ee227 /src/backend/access/heap/visibilitymap.c | |
parent | dea95c7a7beb5ef66ce89269dd0e84d0c26e5523 (diff) | |
download | postgresql-e6858e665731c0f56d3ecc9fbb245c32d24f8ef7.tar.gz postgresql-e6858e665731c0f56d3ecc9fbb245c32d24f8ef7.zip |
Measure the number of all-visible pages for use in index-only scan costing.
Add a column pg_class.relallvisible to remember the number of pages that
were all-visible according to the visibility map as of the last VACUUM
(or ANALYZE, or some other operations that update pg_class.relpages).
Use relallvisible/relpages, instead of an arbitrary constant, to estimate
how many heap page fetches can be avoided during an index-only scan.
This is pretty primitive and will no doubt see refinements once we've
acquired more field experience with the index-only scan mechanism, but
it's way better than using a constant.
Note: I had to adjust an underspecified query in the window.sql regression
test, because it was changing answers when the plan changed to use an
index-only scan. Some of the adjacent tests perhaps should be adjusted
as well, but I didn't do that here.
Diffstat (limited to 'src/backend/access/heap/visibilitymap.c')
-rw-r--r-- | src/backend/access/heap/visibilitymap.c | 68 |
1 files changed, 68 insertions, 0 deletions
diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c index 5a0511f1988..919e8de0426 100644 --- a/src/backend/access/heap/visibilitymap.c +++ b/src/backend/access/heap/visibilitymap.c @@ -16,6 +16,8 @@ * visibilitymap_pin_ok - check whether correct map page is already pinned * visibilitymap_set - set a bit in a previously pinned page * visibilitymap_test - test if a bit is set + * visibilitymap_count - count number of bits set in visibility map + * visibilitymap_truncate - truncate the visibility map * * NOTES * @@ -110,6 +112,26 @@ #define HEAPBLK_TO_MAPBYTE(x) (((x) % HEAPBLOCKS_PER_PAGE) / HEAPBLOCKS_PER_BYTE) #define HEAPBLK_TO_MAPBIT(x) ((x) % HEAPBLOCKS_PER_BYTE) +/* table for fast counting of set bits */ +static const uint8 number_of_ones[256] = { + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 +}; + /* prototypes for internal routines */ static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend); static void vm_extend(Relation rel, BlockNumber nvmblocks); @@ -308,6 +330,52 @@ visibilitymap_test(Relation rel, BlockNumber heapBlk, Buffer *buf) } /* + * visibilitymap_count - count number of bits set in visibility map + * + * Note: we ignore the possibility of race conditions when the table is being + * extended concurrently with the call. New pages added to the table aren't + * going to be marked all-visible, so they won't affect the result. + */ +BlockNumber +visibilitymap_count(Relation rel) +{ + BlockNumber result = 0; + BlockNumber mapBlock; + + for (mapBlock = 0; ; mapBlock++) + { + Buffer mapBuffer; + unsigned char *map; + int i; + + /* + * Read till we fall off the end of the map. We assume that any + * extra bytes in the last page are zeroed, so we don't bother + * excluding them from the count. + */ + mapBuffer = vm_readbuf(rel, mapBlock, false); + if (!BufferIsValid(mapBuffer)) + break; + + /* + * We choose not to lock the page, since the result is going to be + * immediately stale anyway if anyone is concurrently setting or + * clearing bits, and we only really need an approximate value. + */ + map = (unsigned char *) PageGetContents(BufferGetPage(mapBuffer)); + + for (i = 0; i < MAPSIZE; i++) + { + result += number_of_ones[map[i]]; + } + + ReleaseBuffer(mapBuffer); + } + + return result; +} + +/* * visibilitymap_truncate - truncate the visibility map * * The caller must hold AccessExclusiveLock on the relation, to ensure that |