Fix some minor errors in new PHJ code.
Correct ExecParallelHashTuplePrealloc's estimate of whether the space_allowed limit is exceeded. Be more consistent about tuples that are exactly HASH_CHUNK_THRESHOLD in size (they're "small", not "large"). Neither of these things explain the current buildfarm unhappiness, but they're still bugs. Thomas Munro, per gripe by me Discussion: https://postgr.es/m/CAEepm=34PDuR69kfYVhmZPgMdy8pSA-MYbpesEN1SR+2oj3Y+w@mail.gmail.com
This commit is contained in:
parent
3decd150a2
commit
6fcde24063
@ -2740,7 +2740,7 @@ ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
|
||||
*/
|
||||
chunk = hashtable->current_chunk;
|
||||
if (chunk != NULL &&
|
||||
size < HASH_CHUNK_THRESHOLD &&
|
||||
size <= HASH_CHUNK_THRESHOLD &&
|
||||
chunk->maxlen - chunk->used >= size)
|
||||
{
|
||||
|
||||
@ -3260,6 +3260,7 @@ ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
|
||||
|
||||
Assert(batchno > 0);
|
||||
Assert(batchno < hashtable->nbatch);
|
||||
Assert(size == MAXALIGN(size));
|
||||
|
||||
LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
|
||||
|
||||
@ -3280,7 +3281,8 @@ ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
|
||||
|
||||
if (pstate->growth != PHJ_GROWTH_DISABLED &&
|
||||
batch->at_least_one_chunk &&
|
||||
(batch->shared->estimated_size + size > pstate->space_allowed))
|
||||
(batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE
|
||||
> pstate->space_allowed))
|
||||
{
|
||||
/*
|
||||
* We have determined that this batch would exceed the space budget if
|
||||
|
Loading…
x
Reference in New Issue
Block a user