aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/hash/hash_xlog.c5
-rw-r--r--src/backend/access/hash/hashovfl.c19
-rw-r--r--src/test/regress/expected/hash_index.out29
-rw-r--r--src/test/regress/sql/hash_index.sql40
4 files changed, 91 insertions, 2 deletions
diff --git a/src/backend/access/hash/hash_xlog.c b/src/backend/access/hash/hash_xlog.c
index e8e06c62a95..40debf40288 100644
--- a/src/backend/access/hash/hash_xlog.c
+++ b/src/backend/access/hash/hash_xlog.c
@@ -655,7 +655,10 @@ hash_xlog_squeeze_page(XLogReaderState *record)
*/
(void) XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &bucketbuf);
- action = XLogReadBufferForRedo(record, 1, &writebuf);
+ if (xldata->ntups > 0 || xldata->is_prev_bucket_same_wrt)
+ action = XLogReadBufferForRedo(record, 1, &writebuf);
+ else
+ action = BLK_NOTFOUND;
}
/* replay the record for adding entries in overflow buffer */
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index 9d1ff20b922..2bd4432265c 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -668,14 +668,31 @@ _hash_freeovflpage(Relation rel, Buffer bucketbuf, Buffer ovflbuf,
XLogRegisterBuffer(0, bucketbuf, flags);
}
- XLogRegisterBuffer(1, wbuf, REGBUF_STANDARD);
if (xlrec.ntups > 0)
{
+ XLogRegisterBuffer(1, wbuf, REGBUF_STANDARD);
XLogRegisterBufData(1, (char *) itup_offsets,
nitups * sizeof(OffsetNumber));
for (i = 0; i < nitups; i++)
XLogRegisterBufData(1, (char *) itups[i], tups_size[i]);
}
+ else if (xlrec.is_prim_bucket_same_wrt || xlrec.is_prev_bucket_same_wrt)
+ {
+ uint8 wbuf_flags;
+
+ /*
+ * A write buffer needs to be registered even if no tuples are
+ * added to it to ensure that we can acquire a cleanup lock on it
+ * if it is the same as primary bucket buffer or update the
+ * nextblkno if it is same as the previous bucket buffer.
+ */
+ Assert(xlrec.ntups == 0);
+
+ wbuf_flags = REGBUF_STANDARD;
+ if (!xlrec.is_prev_bucket_same_wrt)
+ wbuf_flags |= REGBUF_NO_CHANGE;
+ XLogRegisterBuffer(1, wbuf, wbuf_flags);
+ }
XLogRegisterBuffer(2, ovflbuf, REGBUF_STANDARD);
diff --git a/src/test/regress/expected/hash_index.out b/src/test/regress/expected/hash_index.out
index a2036a15972..0df348b5dd8 100644
--- a/src/test/regress/expected/hash_index.out
+++ b/src/test/regress/expected/hash_index.out
@@ -271,6 +271,35 @@ ALTER INDEX hash_split_index SET (fillfactor = 10);
REINDEX INDEX hash_split_index;
-- Clean up.
DROP TABLE hash_split_heap;
+-- Testcases for removing overflow pages.
+CREATE TABLE hash_cleanup_heap(keycol INT);
+CREATE INDEX hash_cleanup_index on hash_cleanup_heap USING HASH (keycol);
+-- Insert tuples to both the primary bucket page and overflow pages.
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
+-- Fill overflow pages by "dead" tuples.
+BEGIN;
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1000) as i;
+ROLLBACK;
+-- Checkpoint will ensure that all hash buffers are cleaned before we try
+-- to remove overflow pages.
+CHECKPOINT;
+-- This will squeeze the bucket and remove overflow pages.
+VACUUM hash_cleanup_heap;
+TRUNCATE hash_cleanup_heap;
+-- Insert a few tuples so that the primary bucket page doesn't get full and
+-- tuples can be moved to it.
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 50) as i;
+-- Fill overflow pages by "dead" tuples.
+BEGIN;
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1500) as i;
+ROLLBACK;
+-- And insert some tuples again. During squeeze operation, these will be moved
+-- to the primary bucket allowing to test freeing intermediate overflow pages.
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
+CHECKPOINT;
+VACUUM hash_cleanup_heap;
+-- Clean up.
+DROP TABLE hash_cleanup_heap;
-- Index on temp table.
CREATE TEMP TABLE hash_temp_heap (x int, y int);
INSERT INTO hash_temp_heap VALUES (1,1);
diff --git a/src/test/regress/sql/hash_index.sql b/src/test/regress/sql/hash_index.sql
index 527024f7109..943bd0ecf17 100644
--- a/src/test/regress/sql/hash_index.sql
+++ b/src/test/regress/sql/hash_index.sql
@@ -247,6 +247,46 @@ REINDEX INDEX hash_split_index;
-- Clean up.
DROP TABLE hash_split_heap;
+-- Testcases for removing overflow pages.
+CREATE TABLE hash_cleanup_heap(keycol INT);
+CREATE INDEX hash_cleanup_index on hash_cleanup_heap USING HASH (keycol);
+
+-- Insert tuples to both the primary bucket page and overflow pages.
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
+
+-- Fill overflow pages by "dead" tuples.
+BEGIN;
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1000) as i;
+ROLLBACK;
+
+-- Checkpoint will ensure that all hash buffers are cleaned before we try
+-- to remove overflow pages.
+CHECKPOINT;
+
+-- This will squeeze the bucket and remove overflow pages.
+VACUUM hash_cleanup_heap;
+
+TRUNCATE hash_cleanup_heap;
+
+-- Insert a few tuples so that the primary bucket page doesn't get full and
+-- tuples can be moved to it.
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 50) as i;
+
+-- Fill overflow pages by "dead" tuples.
+BEGIN;
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1500) as i;
+ROLLBACK;
+
+-- And insert some tuples again. During squeeze operation, these will be moved
+-- to the primary bucket allowing to test freeing intermediate overflow pages.
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
+
+CHECKPOINT;
+VACUUM hash_cleanup_heap;
+
+-- Clean up.
+DROP TABLE hash_cleanup_heap;
+
-- Index on temp table.
CREATE TEMP TABLE hash_temp_heap (x int, y int);
INSERT INTO hash_temp_heap VALUES (1,1);