diff --git a/src/backend/access/hash/hash_xlog.c b/src/backend/access/hash/hash_xlog.c
index e8e06c62a9..40debf4028 100644
--- a/src/backend/access/hash/hash_xlog.c
+++ b/src/backend/access/hash/hash_xlog.c
@@ -655,7 +655,10 @@ hash_xlog_squeeze_page(XLogReaderState *record)
 		 */
 		(void) XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &bucketbuf);
 
-		action = XLogReadBufferForRedo(record, 1, &writebuf);
+		if (xldata->ntups > 0 || xldata->is_prev_bucket_same_wrt)
+			action = XLogReadBufferForRedo(record, 1, &writebuf);
+		else
+			action = BLK_NOTFOUND;
 	}
 
 	/* replay the record for adding entries in overflow buffer */
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index 9d1ff20b92..40647a7cb3 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -646,7 +646,6 @@ _hash_freeovflpage(Relation rel, Buffer bucketbuf, Buffer ovflbuf,
 	{
 		xl_hash_squeeze_page xlrec;
 		XLogRecPtr	recptr;
-		int			i;
 
 		xlrec.prevblkno = prevblkno;
 		xlrec.nextblkno = nextblkno;
@@ -668,14 +667,31 @@ _hash_freeovflpage(Relation rel, Buffer bucketbuf, Buffer ovflbuf,
 			XLogRegisterBuffer(0, bucketbuf, flags);
 		}
 
-		XLogRegisterBuffer(1, wbuf, REGBUF_STANDARD);
 		if (xlrec.ntups > 0)
 		{
+			XLogRegisterBuffer(1, wbuf, REGBUF_STANDARD);
 			XLogRegisterBufData(1, (char *) itup_offsets,
 								nitups * sizeof(OffsetNumber));
-			for (i = 0; i < nitups; i++)
+			for (int i = 0; i < nitups; i++)
 				XLogRegisterBufData(1, (char *) itups[i], tups_size[i]);
 		}
+		/*
+		 * A write buffer needs to be registered even if no tuples are added to
+		 * it to ensure that we can acquire a cleanup lock on it if it is the
+		 * same as primary bucket buffer or update the nextblkno if it is same
+		 * as the previous bucket buffer.
+		 */
+		else if (xlrec.is_prim_bucket_same_wrt || xlrec.is_prev_bucket_same_wrt)
+		{
+			uint8		wbuf_flags;
+
+			Assert(xlrec.ntups == 0);
+
+			wbuf_flags = REGBUF_STANDARD;
+			if (!xlrec.is_prev_bucket_same_wrt)
+				wbuf_flags |= REGBUF_NO_CHANGE;
+			XLogRegisterBuffer(1, wbuf, wbuf_flags);
+		}
 
 		XLogRegisterBuffer(2, ovflbuf, REGBUF_STANDARD);
 
diff --git a/src/test/regress/expected/hash_index.out b/src/test/regress/expected/hash_index.out
index a2036a1597..bff84c16f9 100644
--- a/src/test/regress/expected/hash_index.out
+++ b/src/test/regress/expected/hash_index.out
@@ -271,6 +271,31 @@ ALTER INDEX hash_split_index SET (fillfactor = 10);
 REINDEX INDEX hash_split_index;
 -- Clean up.
 DROP TABLE hash_split_heap;
+-- Testcases for removing overflow pages.
+CREATE TABLE hash_cleanup_heap(keycol INT);
+CREATE INDEX hash_cleanup_index on hash_cleanup_heap USING HASH (keycol);
+-- Insert many tuples to both the primary bucket page and overflow pages.
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
+-- Fill overflow pages by "dead" tuples.
+BEGIN;
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1000) as i;
+ABORT;
+-- And do CHECKPOINT and vacuum. Some overflow pages will be removed.
+CHECKPOINT;
+VACUUM hash_cleanup_heap;
+TRUNCATE hash_cleanup_heap;
+-- Insert few tuples, the primary bucket page will not satisfy
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 50) as i;
+-- Fill overflow pages by "dead" tuples.
+BEGIN;
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1500) as i;
+ABORT;
+-- And insert some tuples again. Only intermediate buckets are dead.
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
+CHECKPOINT;
+VACUUM hash_cleanup_heap;
+-- Clean up.
+DROP TABLE hash_cleanup_heap;
 -- Index on temp table.
 CREATE TEMP TABLE hash_temp_heap (x int, y int);
 INSERT INTO hash_temp_heap VALUES (1,1);
diff --git a/src/test/regress/sql/hash_index.sql b/src/test/regress/sql/hash_index.sql
index 527024f710..165dc18f0f 100644
--- a/src/test/regress/sql/hash_index.sql
+++ b/src/test/regress/sql/hash_index.sql
@@ -247,6 +247,37 @@ REINDEX INDEX hash_split_index;
 -- Clean up.
 DROP TABLE hash_split_heap;
 
+-- Testcases for removing overflow pages.
+CREATE TABLE hash_cleanup_heap(keycol INT);
+CREATE INDEX hash_cleanup_index on hash_cleanup_heap USING HASH (keycol);
+
+-- Insert many tuples to both the primary bucket page and overflow pages.
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
+-- Fill overflow pages by "dead" tuples.
+BEGIN;
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1000) as i;
+ABORT;
+-- And do CHECKPOINT and vacuum. Some overflow pages will be removed.
+CHECKPOINT;
+VACUUM hash_cleanup_heap;
+
+TRUNCATE hash_cleanup_heap;
+
+-- Insert few tuples, the primary bucket page will not satisfy
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 50) as i;
+-- Fill overflow pages by "dead" tuples.
+BEGIN;
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 1500) as i;
+ABORT;
+-- And insert some tuples again. Only intermediate buckets are dead.
+INSERT INTO hash_cleanup_heap SELECT 1 FROM generate_series(1, 500) as i;
+
+CHECKPOINT;
+VACUUM hash_cleanup_heap;
+
+-- Clean up.
+DROP TABLE hash_cleanup_heap;
+
 -- Index on temp table.
 CREATE TEMP TABLE hash_temp_heap (x int, y int);
 INSERT INTO hash_temp_heap VALUES (1,1);
