mirror of https://github.com/postgres/postgres
heapam: Pass number of required pages to RelationGetBufferForTuple()
A future commit will use this information to determine how aggressively to extend the relation by. In heap_multi_insert() we know accurately how many pages we need once we need to extend the relation, providing an accurate lower bound for how much to extend. Reviewed-by: Melanie Plageman <melanieplageman@gmail.com> Discussion: https://postgr.es/m/20221029025420.eplyow6k7tgu6he3@awork3.anarazel.de
This commit is contained in:
parent
7d71d3dd08
commit
5279e9db8e
|
@ -1847,7 +1847,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
|
|||
*/
|
||||
buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
|
||||
InvalidBuffer, options, bistate,
|
||||
&vmbuffer, NULL);
|
||||
&vmbuffer, NULL,
|
||||
0);
|
||||
|
||||
/*
|
||||
* We're about to do the actual insert -- but check for conflict first, to
|
||||
|
@ -2050,6 +2051,32 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
|
|||
return tup;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper for heap_multi_insert() that computes the number of entire pages
|
||||
* that inserting the remaining heaptuples requires. Used to determine how
|
||||
* much the relation needs to be extended by.
|
||||
*/
|
||||
static int
|
||||
heap_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size saveFreeSpace)
|
||||
{
|
||||
size_t page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
|
||||
int npages = 1;
|
||||
|
||||
for (int i = done; i < ntuples; i++)
|
||||
{
|
||||
size_t tup_sz = sizeof(ItemIdData) + MAXALIGN(heaptuples[i]->t_len);
|
||||
|
||||
if (page_avail < tup_sz)
|
||||
{
|
||||
npages++;
|
||||
page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
|
||||
}
|
||||
page_avail -= tup_sz;
|
||||
}
|
||||
|
||||
return npages;
|
||||
}
|
||||
|
||||
/*
|
||||
* heap_multi_insert - insert multiple tuples into a heap
|
||||
*
|
||||
|
@ -2076,6 +2103,9 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
|
|||
Size saveFreeSpace;
|
||||
bool need_tuple_data = RelationIsLogicallyLogged(relation);
|
||||
bool need_cids = RelationIsAccessibleInLogicalDecoding(relation);
|
||||
bool starting_with_empty_page = false;
|
||||
int npages = 0;
|
||||
int npages_used = 0;
|
||||
|
||||
/* currently not needed (thus unsupported) for heap_multi_insert() */
|
||||
Assert(!(options & HEAP_INSERT_NO_LOGICAL));
|
||||
|
@ -2126,13 +2156,31 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
|
|||
while (ndone < ntuples)
|
||||
{
|
||||
Buffer buffer;
|
||||
bool starting_with_empty_page;
|
||||
bool all_visible_cleared = false;
|
||||
bool all_frozen_set = false;
|
||||
int nthispage;
|
||||
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
/*
|
||||
* Compute number of pages needed to fit the to-be-inserted tuples in
|
||||
* the worst case. This will be used to determine how much to extend
|
||||
* the relation by in RelationGetBufferForTuple(), if needed. If we
|
||||
* filled a prior page from scratch, we can just update our last
|
||||
* computation, but if we started with a partially filled page,
|
||||
* recompute from scratch, the number of potentially required pages
|
||||
* can vary due to tuples needing to fit onto the page, page headers
|
||||
* etc.
|
||||
*/
|
||||
if (ndone == 0 || !starting_with_empty_page)
|
||||
{
|
||||
npages = heap_multi_insert_pages(heaptuples, ndone, ntuples,
|
||||
saveFreeSpace);
|
||||
npages_used = 0;
|
||||
}
|
||||
else
|
||||
npages_used++;
|
||||
|
||||
/*
|
||||
* Find buffer where at least the next tuple will fit. If the page is
|
||||
* all-visible, this will also pin the requisite visibility map page.
|
||||
|
@ -2142,7 +2190,8 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
|
|||
*/
|
||||
buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
|
||||
InvalidBuffer, options, bistate,
|
||||
&vmbuffer, NULL);
|
||||
&vmbuffer, NULL,
|
||||
npages - npages_used);
|
||||
page = BufferGetPage(buffer);
|
||||
|
||||
starting_with_empty_page = PageGetMaxOffsetNumber(page) == 0;
|
||||
|
@ -3576,7 +3625,8 @@ l2:
|
|||
/* It doesn't fit, must use RelationGetBufferForTuple. */
|
||||
newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
|
||||
buffer, 0, NULL,
|
||||
&vmbuffer_new, &vmbuffer);
|
||||
&vmbuffer_new, &vmbuffer,
|
||||
0);
|
||||
/* We're all done. */
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -301,6 +301,11 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
|
|||
* Returns pinned and exclusive-locked buffer of a page in given relation
|
||||
* with free space >= given len.
|
||||
*
|
||||
* If num_pages is > 1, we will try to extend the relation by at least that
|
||||
* many pages when we decide to extend the relation. This is more efficient
|
||||
* for callers that know they will need multiple pages
|
||||
* (e.g. heap_multi_insert()).
|
||||
*
|
||||
* If otherBuffer is not InvalidBuffer, then it references a previously
|
||||
* pinned buffer of another page in the same relation; on return, this
|
||||
* buffer will also be exclusive-locked. (This case is used by heap_update;
|
||||
|
@ -359,7 +364,8 @@ Buffer
|
|||
RelationGetBufferForTuple(Relation relation, Size len,
|
||||
Buffer otherBuffer, int options,
|
||||
BulkInsertState bistate,
|
||||
Buffer *vmbuffer, Buffer *vmbuffer_other)
|
||||
Buffer *vmbuffer, Buffer *vmbuffer_other,
|
||||
int num_pages)
|
||||
{
|
||||
bool use_fsm = !(options & HEAP_INSERT_SKIP_FSM);
|
||||
Buffer buffer = InvalidBuffer;
|
||||
|
|
|
@ -38,6 +38,7 @@ extern void RelationPutHeapTuple(Relation relation, Buffer buffer,
|
|||
extern Buffer RelationGetBufferForTuple(Relation relation, Size len,
|
||||
Buffer otherBuffer, int options,
|
||||
BulkInsertStateData *bistate,
|
||||
Buffer *vmbuffer, Buffer *vmbuffer_other);
|
||||
Buffer *vmbuffer, Buffer *vmbuffer_other,
|
||||
int num_pages);
|
||||
|
||||
#endif /* HIO_H */
|
||||
|
|
Loading…
Reference in New Issue