diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 5ca1751f95..586509c50b 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -1839,15 +1839,15 @@ hash_agg_check_limits(AggState *aggstate) uint64 ngroups = aggstate->hash_ngroups_current; Size meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt, true); - Size hash_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, - true); + Size hashkey_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, + true); /* * Don't spill unless there's at least one group in the hash table so we * can be sure to make progress even in edge cases. */ if (aggstate->hash_ngroups_current > 0 && - (meta_mem + hash_mem > aggstate->hash_mem_limit || + (meta_mem + hashkey_mem > aggstate->hash_mem_limit || ngroups > aggstate->hash_ngroups_limit)) { hash_agg_enter_spill_mode(aggstate); @@ -1898,7 +1898,7 @@ static void hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions) { Size meta_mem; - Size hash_mem; + Size hashkey_mem; Size buffer_mem; Size total_mem; @@ -1910,7 +1910,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions) meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt, true); /* memory for the group keys and transition states */ - hash_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, true); + hashkey_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, true); /* memory for read/write tape buffers, if spilled */ buffer_mem = npartitions * HASHAGG_WRITE_BUFFER_SIZE; @@ -1918,7 +1918,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions) buffer_mem += HASHAGG_READ_BUFFER_SIZE; /* update peak mem */ - total_mem = meta_mem + hash_mem + buffer_mem; + total_mem = meta_mem + hashkey_mem + buffer_mem; if (total_mem > aggstate->hash_mem_peak) aggstate->hash_mem_peak = total_mem; @@ -1936,7 +1936,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions) { aggstate->hashentrysize = sizeof(TupleHashEntryData) + - (hash_mem / (double) aggstate->hash_ngroups_current); + (hashkey_mem / (double) aggstate->hash_ngroups_current); } }