From 3228c906ce1aa3d8de2b2f3c0c10e6e509510e4a Mon Sep 17 00:00:00 2001
From: Thomas Munro <thomas.munro@enterprisedb.com>
Date: Wed, 3 Jan 2018 16:01:40 +1300
Subject: [PATCH] Fix some fencepost errors in Parallel Hash memory allocation.

For consistency, ExecParallelHashTupleAlloc() shouldn't treat tuples of size
HASH_CHUNK_THRESHOLD as large tuples (only tuples larger than that).

ExecParallelHashTuplePrealloc() should check the whole size being preallocated
(ie typically a chunk), not the size requested by the caller, otherwise memory
budget exhaustion won't be detected until the next call.

Thomas Munro, based on code review from Tom Lane.
---
 src/backend/executor/nodeHash.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 52f5c0c26e0..7c94938ff8c 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -2740,7 +2740,7 @@ ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
 	 */
 	chunk = hashtable->current_chunk;
 	if (chunk != NULL &&
-		size < HASH_CHUNK_THRESHOLD &&
+		size <= HASH_CHUNK_THRESHOLD &&
 		chunk->maxlen - chunk->used >= size)
 	{
 
@@ -3280,7 +3280,8 @@ ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
 
 	if (pstate->growth != PHJ_GROWTH_DISABLED &&
 		batch->at_least_one_chunk &&
-		(batch->shared->estimated_size + size > pstate->space_allowed))
+		(batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE
+		 > pstate->space_allowed))
 	{
 		/*
 		 * We have determined that this batch would exceed the space budget if
-- 
2.15.0

