Fix an uninitialized access in hash_xlog_squeeze_page().
Commit 861f86beea changed hash_xlog_squeeze_page() to start reading the write buffer conditionally but forgot to initialize it leading to an uninitialized access. Reported-by: Alexander Lakhin Author: Hayato Kuroda Reviewed-by: Alexander Lakhin, Amit Kapila Discussion: http://postgr.es/m/62ed1a9f-746a-8e86-904b-51b9b806a1d9@gmail.com
This commit is contained in:
parent
aa11a9c149
commit
f66fcc5cd6
@ -632,7 +632,7 @@ hash_xlog_squeeze_page(XLogReaderState *record)
|
|||||||
XLogRecPtr lsn = record->EndRecPtr;
|
XLogRecPtr lsn = record->EndRecPtr;
|
||||||
xl_hash_squeeze_page *xldata = (xl_hash_squeeze_page *) XLogRecGetData(record);
|
xl_hash_squeeze_page *xldata = (xl_hash_squeeze_page *) XLogRecGetData(record);
|
||||||
Buffer bucketbuf = InvalidBuffer;
|
Buffer bucketbuf = InvalidBuffer;
|
||||||
Buffer writebuf;
|
Buffer writebuf = InvalidBuffer;
|
||||||
Buffer ovflbuf;
|
Buffer ovflbuf;
|
||||||
Buffer prevbuf = InvalidBuffer;
|
Buffer prevbuf = InvalidBuffer;
|
||||||
Buffer mapbuf;
|
Buffer mapbuf;
|
||||||
|
@ -298,6 +298,20 @@ ROLLBACK;
|
|||||||
INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
|
INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
|
||||||
CHECKPOINT;
|
CHECKPOINT;
|
||||||
VACUUM hash_cleanup_heap;
|
VACUUM hash_cleanup_heap;
|
||||||
|
TRUNCATE hash_cleanup_heap;
|
||||||
|
-- Insert tuples to both the primary bucket page and overflow pages.
|
||||||
|
INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
|
||||||
|
-- Fill overflow pages by "dead" tuples.
|
||||||
|
BEGIN;
|
||||||
|
INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1500) as i;
|
||||||
|
ROLLBACK;
|
||||||
|
-- And insert some tuples again. During squeeze operation, these will be moved
|
||||||
|
-- to other overflow pages and also allow overflow pages filled by dead tuples
|
||||||
|
-- to be freed. Note the main purpose of this test is to test the case where
|
||||||
|
-- we don't need to move any tuple from the overflow page being freed.
|
||||||
|
INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 50) as i;
|
||||||
|
CHECKPOINT;
|
||||||
|
VACUUM hash_cleanup_heap;
|
||||||
-- Clean up.
|
-- Clean up.
|
||||||
DROP TABLE hash_cleanup_heap;
|
DROP TABLE hash_cleanup_heap;
|
||||||
-- Index on temp table.
|
-- Index on temp table.
|
||||||
|
@ -284,6 +284,23 @@ INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
|
|||||||
CHECKPOINT;
|
CHECKPOINT;
|
||||||
VACUUM hash_cleanup_heap;
|
VACUUM hash_cleanup_heap;
|
||||||
|
|
||||||
|
TRUNCATE hash_cleanup_heap;
|
||||||
|
|
||||||
|
-- Insert tuples to both the primary bucket page and overflow pages.
|
||||||
|
INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
|
||||||
|
-- Fill overflow pages by "dead" tuples.
|
||||||
|
BEGIN;
|
||||||
|
INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1500) as i;
|
||||||
|
ROLLBACK;
|
||||||
|
-- And insert some tuples again. During squeeze operation, these will be moved
|
||||||
|
-- to other overflow pages and also allow overflow pages filled by dead tuples
|
||||||
|
-- to be freed. Note the main purpose of this test is to test the case where
|
||||||
|
-- we don't need to move any tuple from the overflow page being freed.
|
||||||
|
INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 50) as i;
|
||||||
|
|
||||||
|
CHECKPOINT;
|
||||||
|
VACUUM hash_cleanup_heap;
|
||||||
|
|
||||||
-- Clean up.
|
-- Clean up.
|
||||||
DROP TABLE hash_cleanup_heap;
|
DROP TABLE hash_cleanup_heap;
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user