Avoid access to uninitialized memory in shared tidbitmap iteration.
Primarily, this didn't work correctly when the tidbitmap ended up empty. Dilip Kumar, per a report from Emre Hasegeli Discussion: http://postgr.es/m/CAFiTN-ujHFKb8WSLhK54rfqQT3r2yiPQOyeBrCDsA4p9Fwp_jw@mail.gmail.com
This commit is contained in:
parent
befd73c50f
commit
80824ddda3
@ -302,6 +302,10 @@ tbm_create(long maxbytes, dsa_area *dsa)
|
|||||||
tbm->maxentries = (int) nbuckets;
|
tbm->maxentries = (int) nbuckets;
|
||||||
tbm->lossify_start = 0;
|
tbm->lossify_start = 0;
|
||||||
tbm->dsa = dsa;
|
tbm->dsa = dsa;
|
||||||
|
tbm->dsapagetable = InvalidDsaPointer;
|
||||||
|
tbm->dsapagetableold = InvalidDsaPointer;
|
||||||
|
tbm->ptpages = InvalidDsaPointer;
|
||||||
|
tbm->ptchunks = InvalidDsaPointer;
|
||||||
|
|
||||||
return tbm;
|
return tbm;
|
||||||
}
|
}
|
||||||
@ -363,20 +367,23 @@ void
|
|||||||
tbm_free_shared_area(dsa_area *dsa, dsa_pointer dp)
|
tbm_free_shared_area(dsa_area *dsa, dsa_pointer dp)
|
||||||
{
|
{
|
||||||
TBMSharedIteratorState *istate = dsa_get_address(dsa, dp);
|
TBMSharedIteratorState *istate = dsa_get_address(dsa, dp);
|
||||||
PTEntryArray *ptbase = dsa_get_address(dsa, istate->pagetable);
|
PTEntryArray *ptbase;
|
||||||
PTIterationArray *ptpages;
|
PTIterationArray *ptpages;
|
||||||
PTIterationArray *ptchunks;
|
PTIterationArray *ptchunks;
|
||||||
|
|
||||||
if (pg_atomic_sub_fetch_u32(&ptbase->refcount, 1) == 0)
|
if (DsaPointerIsValid(istate->pagetable))
|
||||||
dsa_free(dsa, istate->pagetable);
|
{
|
||||||
|
ptbase = dsa_get_address(dsa, istate->pagetable);
|
||||||
if (istate->spages)
|
if (pg_atomic_sub_fetch_u32(&ptbase->refcount, 1) == 0)
|
||||||
|
dsa_free(dsa, istate->pagetable);
|
||||||
|
}
|
||||||
|
if (DsaPointerIsValid(istate->spages))
|
||||||
{
|
{
|
||||||
ptpages = dsa_get_address(dsa, istate->spages);
|
ptpages = dsa_get_address(dsa, istate->spages);
|
||||||
if (pg_atomic_sub_fetch_u32(&ptpages->refcount, 1) == 0)
|
if (pg_atomic_sub_fetch_u32(&ptpages->refcount, 1) == 0)
|
||||||
dsa_free(dsa, istate->spages);
|
dsa_free(dsa, istate->spages);
|
||||||
}
|
}
|
||||||
if (istate->schunks)
|
if (DsaPointerIsValid(istate->schunks))
|
||||||
{
|
{
|
||||||
ptchunks = dsa_get_address(dsa, istate->schunks);
|
ptchunks = dsa_get_address(dsa, istate->schunks);
|
||||||
if (pg_atomic_sub_fetch_u32(&ptchunks->refcount, 1) == 0)
|
if (pg_atomic_sub_fetch_u32(&ptchunks->refcount, 1) == 0)
|
||||||
@ -786,7 +793,7 @@ tbm_prepare_shared_iterate(TIDBitmap *tbm)
|
|||||||
{
|
{
|
||||||
dsa_pointer dp;
|
dsa_pointer dp;
|
||||||
TBMSharedIteratorState *istate;
|
TBMSharedIteratorState *istate;
|
||||||
PTEntryArray *ptbase;
|
PTEntryArray *ptbase = NULL;
|
||||||
PTIterationArray *ptpages = NULL;
|
PTIterationArray *ptpages = NULL;
|
||||||
PTIterationArray *ptchunks = NULL;
|
PTIterationArray *ptchunks = NULL;
|
||||||
|
|
||||||
@ -797,7 +804,7 @@ tbm_prepare_shared_iterate(TIDBitmap *tbm)
|
|||||||
* Allocate TBMSharedIteratorState from DSA to hold the shared members and
|
* Allocate TBMSharedIteratorState from DSA to hold the shared members and
|
||||||
* lock, this will also be used by multiple worker for shared iterate.
|
* lock, this will also be used by multiple worker for shared iterate.
|
||||||
*/
|
*/
|
||||||
dp = dsa_allocate(tbm->dsa, sizeof(TBMSharedIteratorState));
|
dp = dsa_allocate0(tbm->dsa, sizeof(TBMSharedIteratorState));
|
||||||
istate = dsa_get_address(tbm->dsa, dp);
|
istate = dsa_get_address(tbm->dsa, dp);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -856,7 +863,7 @@ tbm_prepare_shared_iterate(TIDBitmap *tbm)
|
|||||||
Assert(npages == tbm->npages);
|
Assert(npages == tbm->npages);
|
||||||
Assert(nchunks == tbm->nchunks);
|
Assert(nchunks == tbm->nchunks);
|
||||||
}
|
}
|
||||||
else
|
else if (tbm->status == TBM_ONE_PAGE)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* In one page mode allocate the space for one pagetable entry and
|
* In one page mode allocate the space for one pagetable entry and
|
||||||
@ -868,8 +875,8 @@ tbm_prepare_shared_iterate(TIDBitmap *tbm)
|
|||||||
ptpages->index[0] = 0;
|
ptpages->index[0] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
pg_atomic_init_u32(&ptbase->refcount, 0);
|
if (ptbase != NULL)
|
||||||
|
pg_atomic_init_u32(&ptbase->refcount, 0);
|
||||||
if (npages > 1)
|
if (npages > 1)
|
||||||
qsort_arg((void *) (ptpages->index), npages, sizeof(int),
|
qsort_arg((void *) (ptpages->index), npages, sizeof(int),
|
||||||
tbm_shared_comparator, (void *) ptbase->ptentry);
|
tbm_shared_comparator, (void *) ptbase->ptentry);
|
||||||
@ -899,10 +906,11 @@ tbm_prepare_shared_iterate(TIDBitmap *tbm)
|
|||||||
* increase the refcount by 1 so that while freeing the shared iterator
|
* increase the refcount by 1 so that while freeing the shared iterator
|
||||||
* we don't free pagetable and iterator array until its refcount becomes 0.
|
* we don't free pagetable and iterator array until its refcount becomes 0.
|
||||||
*/
|
*/
|
||||||
pg_atomic_add_fetch_u32(&ptbase->refcount, 1);
|
if (ptbase != NULL)
|
||||||
if (ptpages)
|
pg_atomic_add_fetch_u32(&ptbase->refcount, 1);
|
||||||
|
if (ptpages != NULL)
|
||||||
pg_atomic_add_fetch_u32(&ptpages->refcount, 1);
|
pg_atomic_add_fetch_u32(&ptpages->refcount, 1);
|
||||||
if (ptchunks)
|
if (ptchunks != NULL)
|
||||||
pg_atomic_add_fetch_u32(&ptchunks->refcount, 1);
|
pg_atomic_add_fetch_u32(&ptchunks->refcount, 1);
|
||||||
|
|
||||||
/* Initialize the iterator lock */
|
/* Initialize the iterator lock */
|
||||||
@ -1069,9 +1077,16 @@ tbm_shared_iterate(TBMSharedIterator *iterator)
|
|||||||
{
|
{
|
||||||
TBMIterateResult *output = &iterator->output;
|
TBMIterateResult *output = &iterator->output;
|
||||||
TBMSharedIteratorState *istate = iterator->state;
|
TBMSharedIteratorState *istate = iterator->state;
|
||||||
PagetableEntry *ptbase = iterator->ptbase->ptentry;
|
PagetableEntry *ptbase = NULL;
|
||||||
int *idxpages = iterator->ptpages->index;
|
int *idxpages = NULL;
|
||||||
int *idxchunks = iterator->ptchunks->index;
|
int *idxchunks = NULL;
|
||||||
|
|
||||||
|
if (iterator->ptbase != NULL)
|
||||||
|
ptbase = iterator->ptbase->ptentry;
|
||||||
|
if (iterator->ptpages != NULL)
|
||||||
|
idxpages = iterator->ptpages->index;
|
||||||
|
if (iterator->ptchunks != NULL)
|
||||||
|
idxchunks = iterator->ptchunks->index;
|
||||||
|
|
||||||
/* Acquire the LWLock before accessing the shared members */
|
/* Acquire the LWLock before accessing the shared members */
|
||||||
LWLockAcquire(&istate->lock, LW_EXCLUSIVE);
|
LWLockAcquire(&istate->lock, LW_EXCLUSIVE);
|
||||||
@ -1480,7 +1495,7 @@ tbm_attach_shared_iterate(dsa_area *dsa, dsa_pointer dp)
|
|||||||
* Create the TBMSharedIterator struct, with enough trailing space to
|
* Create the TBMSharedIterator struct, with enough trailing space to
|
||||||
* serve the needs of the TBMIterateResult sub-struct.
|
* serve the needs of the TBMIterateResult sub-struct.
|
||||||
*/
|
*/
|
||||||
iterator = (TBMSharedIterator *) palloc(sizeof(TBMSharedIterator) +
|
iterator = (TBMSharedIterator *) palloc0(sizeof(TBMSharedIterator) +
|
||||||
MAX_TUPLES_PER_PAGE * sizeof(OffsetNumber));
|
MAX_TUPLES_PER_PAGE * sizeof(OffsetNumber));
|
||||||
|
|
||||||
istate = (TBMSharedIteratorState *) dsa_get_address(dsa, dp);
|
istate = (TBMSharedIteratorState *) dsa_get_address(dsa, dp);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user