Revert "For inplace update, send nontransactional invalidations."

This reverts commit 95c5acb3fc261067ab65ddc0b2dca8e162f09442 (v17) and
counterparts in each other non-master branch.  If released, that commit
would have caused a worst-in-years minor release regression, via
undetected LWLock self-deadlock.  This commit and its self-deadlock fix
warrant more bake time in the master branch.

Reported by Alexander Lakhin.

Discussion: https://postgr.es/m/10ec0bc3-5933-1189-6bb8-5dec4114558e@gmail.com
This commit is contained in:
Noah Misch 2024-11-02 09:05:00 -07:00 committed by Muhammad Usama
parent f343d8f9a4
commit dd5b957c5a
13 changed files with 137 additions and 320 deletions
src
backend
access
heap
transam
catalog
commands
replication/logical
utils/cache
include/utils
test/isolation
tools/pgindent

@ -6355,24 +6355,6 @@ heap_inplace_update_and_unlock(Relation relation,
if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
elog(ERROR, "wrong tuple length");
/*
* Construct shared cache inval if necessary. Note that because we only
* pass the new version of the tuple, this mustn't be used for any
* operations that could change catcache lookup keys. But we aren't
* bothering with index updates either, so that's true a fortiori.
*/
CacheInvalidateHeapTupleInplace(relation, tuple, NULL);
/*
* Unlink relcache init files as needed. If unlinking, acquire
* RelCacheInitLock until after associated invalidations. By doing this
* in advance, if we checkpoint and then crash between inplace
* XLogInsert() and inval, we don't rely on StartupXLOG() ->
* RelationCacheInitFileRemove(). That uses elevel==LOG, so replay would
* neglect to PANIC on EIO.
*/
PreInplace_Inval();
/* NO EREPORT(ERROR) from here till changes are logged */
START_CRIT_SECTION();
@ -6416,28 +6398,17 @@ heap_inplace_update_and_unlock(Relation relation,
PageSetLSN(BufferGetPage(buffer), recptr);
}
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
/*
* Send invalidations to shared queue. SearchSysCacheLocked1() assumes we
* do this before UnlockTuple().
*
* If we're mutating a tuple visible only to this transaction, there's an
* equivalent transactional inval from the action that created the tuple,
* and this inval is superfluous.
*/
AtInplace_Inval();
END_CRIT_SECTION();
UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
AcceptInvalidationMessages(); /* local processing of just-sent inval */
heap_inplace_unlock(relation, oldtup, buffer);
/*
* Queue a transactional inval. The immediate invalidation we just sent
* is the only one known to be necessary. To reduce risk from the
* transition to immediate invalidation, continue sending a transactional
* invalidation like we've long done. Third-party code might rely on it.
* Send out shared cache inval if necessary. Note that because we only
* pass the new version of the tuple, this mustn't be used for any
* operations that could change catcache lookup keys. But we aren't
* bothering with index updates either, so that's true a fortiori.
*
* XXX ROLLBACK discards the invalidation. See test inplace-inval.spec.
*/
if (!IsBootstrapProcessingMode())
CacheInvalidateHeapTuple(relation, tuple, NULL);

@ -1358,24 +1358,14 @@ RecordTransactionCommit(void)
/*
* Transactions without an assigned xid can contain invalidation
* messages. While inplace updates do this, this is not known to be
* necessary; see comment at inplace CacheInvalidateHeapTuple().
* Extensions might still rely on this capability, and standbys may
* need to process those invals. We can't emit a commit record
* without an xid, and we don't want to force assigning an xid,
* because that'd be problematic for e.g. vacuum. Hence we emit a
* bespoke record for the invalidations. We don't want to use that in
* case a commit record is emitted, so they happen synchronously with
* commits (besides not wanting to emit more WAL records).
*
* XXX Every known use of this capability is a defect. Since an XID
* isn't controlling visibility of the change that prompted invals,
* other sessions need the inval even if this transactions aborts.
*
* ON COMMIT DELETE ROWS does a nontransactional index_build(), which
* queues a relcache inval, including in transactions without an xid
* that had read the (empty) table. Standbys don't need any ON COMMIT
* DELETE ROWS invals, but we've not done the work to withhold them.
* messages (e.g. explicit relcache invalidations or catcache
* invalidations for inplace updates); standbys need to process those.
* We can't emit a commit record without an xid, and we don't want to
* force assigning an xid, because that'd be problematic for e.g.
* vacuum. Hence we emit a bespoke record for the invalidations. We
* don't want to use that in case a commit record is emitted, so they
* happen synchronously with commits (besides not wanting to emit more
* WAL records).
*/
if (nmsgs != 0)
{

@ -2889,19 +2889,12 @@ index_update_stats(Relation rel,
if (dirty)
{
systable_inplace_update_finish(state, tuple);
/* the above sends transactional and immediate cache inval messages */
/* the above sends a cache inval message */
}
else
{
systable_inplace_update_cancel(state);
/*
* While we didn't change relhasindex, CREATE INDEX needs a
* transactional inval for when the new index's catalog rows become
* visible. Other CREATE INDEX and REINDEX code happens to also queue
* this inval, but keep this in case rare callers rely on this part of
* our API contract.
*/
/* no need to change tuple, but force relcache inval anyway */
CacheInvalidateRelcacheByTuple(tuple);
}

@ -975,6 +975,11 @@ EventTriggerOnLogin(void)
* this instead of regular updates serves two purposes. First,
* that avoids possible waiting on the row-level lock. Second,
* that avoids dealing with TOAST.
*
* Changes made by inplace update may be lost due to
* concurrent normal updates; see inplace-inval.spec. However,
* we are OK with that. The subsequent connections will still
* have a chance to set "dathasloginevt" to false.
*/
systable_inplace_update_finish(state, tuple);
}

@ -508,19 +508,23 @@ heap_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
/*
* Inplace updates are only ever performed on catalog tuples and
* can, per definition, not change tuple visibility. Inplace
* updates don't affect storage or interpretation of table rows,
* so they don't affect logicalrep_write_tuple() outcomes. Hence,
* we don't process invalidations from the original operation. If
* inplace updates did affect those things, invalidations wouldn't
* make it work, since there are no snapshot-specific versions of
* inplace-updated values. Since we also don't decode catalog
* tuples, we're not interested in the record's contents.
* can, per definition, not change tuple visibility. Since we
* don't decode catalog tuples, we're not interested in the
* record's contents.
*
* WAL contains likely-unnecessary commit-time invals from the
* CacheInvalidateHeapTuple() call in heap_inplace_update().
* Excess invalidation is safe.
* In-place updates can be used either by XID-bearing transactions
* (e.g. in CREATE INDEX CONCURRENTLY) or by XID-less
* transactions (e.g. VACUUM). In the former case, the commit
* record will include cache invalidations, so we mark the
* transaction as catalog modifying here. Currently that's
* redundant because the commit will do that as well, but once we
* support decoding in-progress relations, this will be important.
*/
if (!TransactionIdIsValid(xid))
break;
(void) SnapBuildProcessChange(builder, xid, buf->origptr);
ReorderBufferXidSetCatalogChanges(ctx->reorder, xid, buf->origptr);
break;
case XLOG_HEAP_CONFIRM:

@ -2288,8 +2288,7 @@ void
PrepareToInvalidateCacheTuple(Relation relation,
HeapTuple tuple,
HeapTuple newtuple,
void (*function) (int, uint32, Oid, void *),
void *context)
void (*function) (int, uint32, Oid))
{
slist_iter iter;
Oid reloid;
@ -2330,7 +2329,7 @@ PrepareToInvalidateCacheTuple(Relation relation,
hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
(*function) (ccp->id, hashvalue, dbid, context);
(*function) (ccp->id, hashvalue, dbid);
if (newtuple)
{
@ -2339,7 +2338,7 @@ PrepareToInvalidateCacheTuple(Relation relation,
newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
if (newhashvalue != hashvalue)
(*function) (ccp->id, newhashvalue, dbid, context);
(*function) (ccp->id, newhashvalue, dbid);
}
}
}

@ -94,10 +94,6 @@
* worth trying to avoid sending such inval traffic in the future, if those
* problems can be overcome cheaply.
*
* When making a nontransactional change to a cacheable object, we must
* likewise send the invalidation immediately, before ending the change's
* critical section. This includes inplace heap updates, relmap, and smgr.
*
* When wal_level=logical, write invalidations into WAL at each command end to
* support the decoding of the in-progress transactions. See
* CommandEndInvalidationMessages.
@ -134,15 +130,13 @@
/*
* Pending requests are stored as ready-to-send SharedInvalidationMessages.
* We keep the messages themselves in arrays in TopTransactionContext (there
* are separate arrays for catcache and relcache messages). For transactional
* messages, control information is kept in a chain of TransInvalidationInfo
* structs, also allocated in TopTransactionContext. (We could keep a
* subtransaction's TransInvalidationInfo in its CurTransactionContext; but
* that's more wasteful not less so, since in very many scenarios it'd be the
* only allocation in the subtransaction's CurTransactionContext.) For
* inplace update messages, control information appears in an
* InvalidationInfo, allocated in CurrentMemoryContext.
* We keep the messages themselves in arrays in TopTransactionContext
* (there are separate arrays for catcache and relcache messages). Control
* information is kept in a chain of TransInvalidationInfo structs, also
* allocated in TopTransactionContext. (We could keep a subtransaction's
* TransInvalidationInfo in its CurTransactionContext; but that's more
* wasteful not less so, since in very many scenarios it'd be the only
* allocation in the subtransaction's CurTransactionContext.)
*
* We can store the message arrays densely, and yet avoid moving data around
* within an array, because within any one subtransaction we need only
@ -153,9 +147,7 @@
* struct. Similarly, we need distinguish messages of prior subtransactions
* from those of the current subtransaction only until the subtransaction
* completes, after which we adjust the array indexes in the parent's
* TransInvalidationInfo to include the subtransaction's messages. Inplace
* invalidations don't need a concept of command or subtransaction boundaries,
* since we send them during the WAL insertion critical section.
* TransInvalidationInfo to include the subtransaction's messages.
*
* The ordering of the individual messages within a command's or
* subtransaction's output is not considered significant, although this
@ -208,7 +200,7 @@ typedef struct InvalidationMsgsGroup
/*----------------
* Transactional invalidation messages are divided into two groups:
* Invalidation messages are divided into two groups:
* 1) events so far in current command, not yet reflected to caches.
* 2) events in previous commands of current transaction; these have
* been reflected to local caches, and must be either broadcast to
@ -224,36 +216,26 @@ typedef struct InvalidationMsgsGroup
*----------------
*/
/* fields common to both transactional and inplace invalidation */
typedef struct InvalidationInfo
{
/* Events emitted by current command */
InvalidationMsgsGroup CurrentCmdInvalidMsgs;
/* init file must be invalidated? */
bool RelcacheInitFileInval;
} InvalidationInfo;
/* subclass adding fields specific to transactional invalidation */
typedef struct TransInvalidationInfo
{
/* Base class */
struct InvalidationInfo ii;
/* Events emitted by previous commands of this (sub)transaction */
InvalidationMsgsGroup PriorCmdInvalidMsgs;
/* Back link to parent transaction's info */
struct TransInvalidationInfo *parent;
/* Subtransaction nesting depth */
int my_level;
/* Events emitted by current command */
InvalidationMsgsGroup CurrentCmdInvalidMsgs;
/* Events emitted by previous commands of this (sub)transaction */
InvalidationMsgsGroup PriorCmdInvalidMsgs;
/* init file must be invalidated? */
bool RelcacheInitFileInval;
} TransInvalidationInfo;
static TransInvalidationInfo *transInvalInfo = NULL;
static InvalidationInfo *inplaceInvalInfo = NULL;
/* GUC storage */
int debug_discard_caches = 0;
@ -561,12 +543,9 @@ ProcessInvalidationMessagesMulti(InvalidationMsgsGroup *group,
static void
RegisterCatcacheInvalidation(int cacheId,
uint32 hashValue,
Oid dbId,
void *context)
Oid dbId)
{
InvalidationInfo *info = (InvalidationInfo *) context;
AddCatcacheInvalidationMessage(&info->CurrentCmdInvalidMsgs,
AddCatcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
cacheId, hashValue, dbId);
}
@ -576,9 +555,10 @@ RegisterCatcacheInvalidation(int cacheId,
* Register an invalidation event for all catcache entries from a catalog.
*/
static void
RegisterCatalogInvalidation(InvalidationInfo *info, Oid dbId, Oid catId)
RegisterCatalogInvalidation(Oid dbId, Oid catId)
{
AddCatalogInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, catId);
AddCatalogInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
dbId, catId);
}
/*
@ -587,9 +567,10 @@ RegisterCatalogInvalidation(InvalidationInfo *info, Oid dbId, Oid catId)
* As above, but register a relcache invalidation event.
*/
static void
RegisterRelcacheInvalidation(InvalidationInfo *info, Oid dbId, Oid relId)
RegisterRelcacheInvalidation(Oid dbId, Oid relId)
{
AddRelcacheInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, relId);
AddRelcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
dbId, relId);
/*
* Most of the time, relcache invalidation is associated with system
@ -606,7 +587,7 @@ RegisterRelcacheInvalidation(InvalidationInfo *info, Oid dbId, Oid relId)
* as well. Also zap when we are invalidating whole relcache.
*/
if (relId == InvalidOid || RelationIdIsInInitFile(relId))
info->RelcacheInitFileInval = true;
transInvalInfo->RelcacheInitFileInval = true;
}
/*
@ -616,27 +597,24 @@ RegisterRelcacheInvalidation(InvalidationInfo *info, Oid dbId, Oid relId)
* Only needed for catalogs that don't have catcaches.
*/
static void
RegisterSnapshotInvalidation(InvalidationInfo *info, Oid dbId, Oid relId)
RegisterSnapshotInvalidation(Oid dbId, Oid relId)
{
AddSnapshotInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, relId);
AddSnapshotInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
dbId, relId);
}
/*
* PrepareInvalidationState
* Initialize inval data for the current (sub)transaction.
*/
static InvalidationInfo *
static void
PrepareInvalidationState(void)
{
TransInvalidationInfo *myInfo;
Assert(IsTransactionState());
/* Can't queue transactional message while collecting inplace messages. */
Assert(inplaceInvalInfo == NULL);
if (transInvalInfo != NULL &&
transInvalInfo->my_level == GetCurrentTransactionNestLevel())
return (InvalidationInfo *) transInvalInfo;
return;
myInfo = (TransInvalidationInfo *)
MemoryContextAllocZero(TopTransactionContext,
@ -659,7 +637,7 @@ PrepareInvalidationState(void)
* counter. This is a convenient place to check for that, as well as
* being important to keep management of the message arrays simple.
*/
if (NumMessagesInGroup(&transInvalInfo->ii.CurrentCmdInvalidMsgs) != 0)
if (NumMessagesInGroup(&transInvalInfo->CurrentCmdInvalidMsgs) != 0)
elog(ERROR, "cannot start a subtransaction when there are unprocessed inval messages");
/*
@ -668,8 +646,8 @@ PrepareInvalidationState(void)
* to update them to follow whatever is already in the arrays.
*/
SetGroupToFollow(&myInfo->PriorCmdInvalidMsgs,
&transInvalInfo->ii.CurrentCmdInvalidMsgs);
SetGroupToFollow(&myInfo->ii.CurrentCmdInvalidMsgs,
&transInvalInfo->CurrentCmdInvalidMsgs);
SetGroupToFollow(&myInfo->CurrentCmdInvalidMsgs,
&myInfo->PriorCmdInvalidMsgs);
}
else
@ -685,41 +663,6 @@ PrepareInvalidationState(void)
}
transInvalInfo = myInfo;
return (InvalidationInfo *) myInfo;
}
/*
* PrepareInplaceInvalidationState
* Initialize inval data for an inplace update.
*
* See previous function for more background.
*/
static InvalidationInfo *
PrepareInplaceInvalidationState(void)
{
InvalidationInfo *myInfo;
Assert(IsTransactionState());
/* limit of one inplace update under assembly */
Assert(inplaceInvalInfo == NULL);
/* gone after WAL insertion CritSection ends, so use current context */
myInfo = (InvalidationInfo *) palloc0(sizeof(InvalidationInfo));
/* Stash our messages past end of the transactional messages, if any. */
if (transInvalInfo != NULL)
SetGroupToFollow(&myInfo->CurrentCmdInvalidMsgs,
&transInvalInfo->ii.CurrentCmdInvalidMsgs);
else
{
InvalMessageArrays[CatCacheMsgs].msgs = NULL;
InvalMessageArrays[CatCacheMsgs].maxmsgs = 0;
InvalMessageArrays[RelCacheMsgs].msgs = NULL;
InvalMessageArrays[RelCacheMsgs].maxmsgs = 0;
}
inplaceInvalInfo = myInfo;
return myInfo;
}
/* ----------------------------------------------------------------
@ -959,7 +902,7 @@ xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
* after we send the SI messages. However, we need not do anything unless
* we committed.
*/
*RelcacheInitFileInval = transInvalInfo->ii.RelcacheInitFileInval;
*RelcacheInitFileInval = transInvalInfo->RelcacheInitFileInval;
/*
* Collect all the pending messages into a single contiguous array of
@ -970,7 +913,7 @@ xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
* not new ones.
*/
nummsgs = NumMessagesInGroup(&transInvalInfo->PriorCmdInvalidMsgs) +
NumMessagesInGroup(&transInvalInfo->ii.CurrentCmdInvalidMsgs);
NumMessagesInGroup(&transInvalInfo->CurrentCmdInvalidMsgs);
*msgs = msgarray = (SharedInvalidationMessage *)
MemoryContextAlloc(CurTransactionContext,
@ -983,7 +926,7 @@ xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
msgs,
n * sizeof(SharedInvalidationMessage)),
nmsgs += n));
ProcessMessageSubGroupMulti(&transInvalInfo->ii.CurrentCmdInvalidMsgs,
ProcessMessageSubGroupMulti(&transInvalInfo->CurrentCmdInvalidMsgs,
CatCacheMsgs,
(memcpy(msgarray + nmsgs,
msgs,
@ -995,7 +938,7 @@ xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
msgs,
n * sizeof(SharedInvalidationMessage)),
nmsgs += n));
ProcessMessageSubGroupMulti(&transInvalInfo->ii.CurrentCmdInvalidMsgs,
ProcessMessageSubGroupMulti(&transInvalInfo->CurrentCmdInvalidMsgs,
RelCacheMsgs,
(memcpy(msgarray + nmsgs,
msgs,
@ -1081,9 +1024,7 @@ ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs,
void
AtEOXact_Inval(bool isCommit)
{
inplaceInvalInfo = NULL;
/* Quick exit if no transactional messages */
/* Quick exit if no messages */
if (transInvalInfo == NULL)
return;
@ -1097,16 +1038,16 @@ AtEOXact_Inval(bool isCommit)
* after we send the SI messages. However, we need not do anything
* unless we committed.
*/
if (transInvalInfo->ii.RelcacheInitFileInval)
if (transInvalInfo->RelcacheInitFileInval)
RelationCacheInitFilePreInvalidate();
AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
&transInvalInfo->ii.CurrentCmdInvalidMsgs);
&transInvalInfo->CurrentCmdInvalidMsgs);
ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs,
SendSharedInvalidMessages);
if (transInvalInfo->ii.RelcacheInitFileInval)
if (transInvalInfo->RelcacheInitFileInval)
RelationCacheInitFilePostInvalidate();
}
else
@ -1119,44 +1060,6 @@ AtEOXact_Inval(bool isCommit)
transInvalInfo = NULL;
}
/*
* PreInplace_Inval
* Process queued-up invalidation before inplace update critical section.
*
* Tasks belong here if they are safe even if the inplace update does not
* complete. Currently, this just unlinks a cache file, which can fail. The
* sum of this and AtInplace_Inval() mirrors AtEOXact_Inval(isCommit=true).
*/
void
PreInplace_Inval(void)
{
Assert(CritSectionCount == 0);
if (inplaceInvalInfo && inplaceInvalInfo->RelcacheInitFileInval)
RelationCacheInitFilePreInvalidate();
}
/*
* AtInplace_Inval
* Process queued-up invalidations after inplace update buffer mutation.
*/
void
AtInplace_Inval(void)
{
Assert(CritSectionCount > 0);
if (inplaceInvalInfo == NULL)
return;
ProcessInvalidationMessagesMulti(&inplaceInvalInfo->CurrentCmdInvalidMsgs,
SendSharedInvalidMessages);
if (inplaceInvalInfo->RelcacheInitFileInval)
RelationCacheInitFilePostInvalidate();
inplaceInvalInfo = NULL;
}
/*
* AtEOSubXact_Inval
* Process queued-up invalidation messages at end of subtransaction.
@ -1179,20 +1082,9 @@ void
AtEOSubXact_Inval(bool isCommit)
{
int my_level;
TransInvalidationInfo *myInfo;
TransInvalidationInfo *myInfo = transInvalInfo;
/*
* Successful inplace update must clear this, but we clear it on abort.
* Inplace updates allocate this in CurrentMemoryContext, which has
* lifespan <= subtransaction lifespan. Hence, don't free it explicitly.
*/
if (isCommit)
Assert(inplaceInvalInfo == NULL);
else
inplaceInvalInfo = NULL;
/* Quick exit if no transactional messages. */
myInfo = transInvalInfo;
/* Quick exit if no messages. */
if (myInfo == NULL)
return;
@ -1233,12 +1125,12 @@ AtEOSubXact_Inval(bool isCommit)
&myInfo->PriorCmdInvalidMsgs);
/* Must readjust parent's CurrentCmdInvalidMsgs indexes now */
SetGroupToFollow(&myInfo->parent->ii.CurrentCmdInvalidMsgs,
SetGroupToFollow(&myInfo->parent->CurrentCmdInvalidMsgs,
&myInfo->parent->PriorCmdInvalidMsgs);
/* Pending relcache inval becomes parent's problem too */
if (myInfo->ii.RelcacheInitFileInval)
myInfo->parent->ii.RelcacheInitFileInval = true;
if (myInfo->RelcacheInitFileInval)
myInfo->parent->RelcacheInitFileInval = true;
/* Pop the transaction state stack */
transInvalInfo = myInfo->parent;
@ -1285,7 +1177,7 @@ CommandEndInvalidationMessages(void)
if (transInvalInfo == NULL)
return;
ProcessInvalidationMessages(&transInvalInfo->ii.CurrentCmdInvalidMsgs,
ProcessInvalidationMessages(&transInvalInfo->CurrentCmdInvalidMsgs,
LocalExecuteInvalidationMessage);
/* WAL Log per-command invalidation messages for wal_level=logical */
@ -1293,21 +1185,26 @@ CommandEndInvalidationMessages(void)
LogLogicalInvalidations();
AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
&transInvalInfo->ii.CurrentCmdInvalidMsgs);
&transInvalInfo->CurrentCmdInvalidMsgs);
}
/*
* CacheInvalidateHeapTupleCommon
* Common logic for end-of-command and inplace variants.
* CacheInvalidateHeapTuple
* Register the given tuple for invalidation at end of command
* (ie, current command is creating or outdating this tuple).
* Also, detect whether a relcache invalidation is implied.
*
* For an insert or delete, tuple is the target tuple and newtuple is NULL.
* For an update, we are called just once, with tuple being the old tuple
* version and newtuple the new version. This allows avoidance of duplicate
* effort during an update.
*/
static void
CacheInvalidateHeapTupleCommon(Relation relation,
HeapTuple tuple,
HeapTuple newtuple,
InvalidationInfo *(*prepare_callback) (void))
void
CacheInvalidateHeapTuple(Relation relation,
HeapTuple tuple,
HeapTuple newtuple)
{
InvalidationInfo *info;
Oid tupleRelId;
Oid databaseId;
Oid relationId;
@ -1331,8 +1228,11 @@ CacheInvalidateHeapTupleCommon(Relation relation,
if (IsToastRelation(relation))
return;
/* Allocate any required resources. */
info = prepare_callback();
/*
* If we're not prepared to queue invalidation messages for this
* subtransaction level, get ready now.
*/
PrepareInvalidationState();
/*
* First let the catcache do its thing
@ -1341,12 +1241,11 @@ CacheInvalidateHeapTupleCommon(Relation relation,
if (RelationInvalidatesSnapshotsOnly(tupleRelId))
{
databaseId = IsSharedRelation(tupleRelId) ? InvalidOid : MyDatabaseId;
RegisterSnapshotInvalidation(info, databaseId, tupleRelId);
RegisterSnapshotInvalidation(databaseId, tupleRelId);
}
else
PrepareToInvalidateCacheTuple(relation, tuple, newtuple,
RegisterCatcacheInvalidation,
(void *) info);
RegisterCatcacheInvalidation);
/*
* Now, is this tuple one of the primary definers of a relcache entry? See
@ -1419,44 +1318,7 @@ CacheInvalidateHeapTupleCommon(Relation relation,
/*
* Yes. We need to register a relcache invalidation event.
*/
RegisterRelcacheInvalidation(info, databaseId, relationId);
}
/*
* CacheInvalidateHeapTuple
* Register the given tuple for invalidation at end of command
* (ie, current command is creating or outdating this tuple) and end of
* transaction. Also, detect whether a relcache invalidation is implied.
*
* For an insert or delete, tuple is the target tuple and newtuple is NULL.
* For an update, we are called just once, with tuple being the old tuple
* version and newtuple the new version. This allows avoidance of duplicate
* effort during an update.
*/
void
CacheInvalidateHeapTuple(Relation relation,
HeapTuple tuple,
HeapTuple newtuple)
{
CacheInvalidateHeapTupleCommon(relation, tuple, newtuple,
PrepareInvalidationState);
}
/*
* CacheInvalidateHeapTupleInplace
* Register the given tuple for nontransactional invalidation pertaining
* to an inplace update. Also, detect whether a relcache invalidation is
* implied.
*
* Like CacheInvalidateHeapTuple(), but for inplace updates.
*/
void
CacheInvalidateHeapTupleInplace(Relation relation,
HeapTuple tuple,
HeapTuple newtuple)
{
CacheInvalidateHeapTupleCommon(relation, tuple, newtuple,
PrepareInplaceInvalidationState);
RegisterRelcacheInvalidation(databaseId, relationId);
}
/*
@ -1475,13 +1337,14 @@ CacheInvalidateCatalog(Oid catalogId)
{
Oid databaseId;
PrepareInvalidationState();
if (IsSharedRelation(catalogId))
databaseId = InvalidOid;
else
databaseId = MyDatabaseId;
RegisterCatalogInvalidation(PrepareInvalidationState(),
databaseId, catalogId);
RegisterCatalogInvalidation(databaseId, catalogId);
}
/*
@ -1499,14 +1362,15 @@ CacheInvalidateRelcache(Relation relation)
Oid databaseId;
Oid relationId;
PrepareInvalidationState();
relationId = RelationGetRelid(relation);
if (relation->rd_rel->relisshared)
databaseId = InvalidOid;
else
databaseId = MyDatabaseId;
RegisterRelcacheInvalidation(PrepareInvalidationState(),
databaseId, relationId);
RegisterRelcacheInvalidation(databaseId, relationId);
}
/*
@ -1519,8 +1383,9 @@ CacheInvalidateRelcache(Relation relation)
void
CacheInvalidateRelcacheAll(void)
{
RegisterRelcacheInvalidation(PrepareInvalidationState(),
InvalidOid, InvalidOid);
PrepareInvalidationState();
RegisterRelcacheInvalidation(InvalidOid, InvalidOid);
}
/*
@ -1534,13 +1399,14 @@ CacheInvalidateRelcacheByTuple(HeapTuple classTuple)
Oid databaseId;
Oid relationId;
PrepareInvalidationState();
relationId = classtup->oid;
if (classtup->relisshared)
databaseId = InvalidOid;
else
databaseId = MyDatabaseId;
RegisterRelcacheInvalidation(PrepareInvalidationState(),
databaseId, relationId);
RegisterRelcacheInvalidation(databaseId, relationId);
}
/*
@ -1554,6 +1420,8 @@ CacheInvalidateRelcacheByRelid(Oid relid)
{
HeapTuple tup;
PrepareInvalidationState();
tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
if (!HeapTupleIsValid(tup))
elog(ERROR, "cache lookup failed for relation %u", relid);
@ -1743,7 +1611,7 @@ LogLogicalInvalidations(void)
if (transInvalInfo == NULL)
return;
group = &transInvalInfo->ii.CurrentCmdInvalidMsgs;
group = &transInvalInfo->CurrentCmdInvalidMsgs;
nmsgs = NumMessagesInGroup(group);
if (nmsgs > 0)

@ -351,7 +351,8 @@ SearchSysCacheLocked1(int cacheId,
/*
* If an inplace update just finished, ensure we process the syscache
* inval.
* inval. XXX this is insufficient: the inplace updater may not yet
* have reached AtEOXact_Inval(). See test at inplace-inval.spec.
*
* If a heap_update() call just released its LOCKTAG_TUPLE, we'll
* probably find the old tuple and reach "tuple concurrently updated".

@ -225,7 +225,6 @@ extern void CatCacheInvalidate(CatCache *cache, uint32 hashValue);
extern void PrepareToInvalidateCacheTuple(Relation relation,
HeapTuple tuple,
HeapTuple newtuple,
void (*function) (int, uint32, Oid, void *),
void *context);
void (*function) (int, uint32, Oid));
#endif /* CATCACHE_H */

@ -28,9 +28,6 @@ extern void AcceptInvalidationMessages(void);
extern void AtEOXact_Inval(bool isCommit);
extern void PreInplace_Inval(void);
extern void AtInplace_Inval(void);
extern void AtEOSubXact_Inval(bool isCommit);
extern void PostPrepare_Inval(void);
@ -40,9 +37,6 @@ extern void CommandEndInvalidationMessages(void);
extern void CacheInvalidateHeapTuple(Relation relation,
HeapTuple tuple,
HeapTuple newtuple);
extern void CacheInvalidateHeapTupleInplace(Relation relation,
HeapTuple tuple,
HeapTuple newtuple);
extern void CacheInvalidateCatalog(Oid catalogId);

@ -1,6 +1,6 @@
Parsed test spec with 3 sessions
starting permutation: cachefill3 cir1 cic2 ddl3 read1
starting permutation: cachefill3 cir1 cic2 ddl3
step cachefill3: TABLE newly_indexed;
c
-
@ -9,14 +9,6 @@ c
step cir1: BEGIN; CREATE INDEX i1 ON newly_indexed (c); ROLLBACK;
step cic2: CREATE INDEX i2 ON newly_indexed (c);
step ddl3: ALTER TABLE newly_indexed ADD extra int;
step read1:
SELECT relhasindex FROM pg_class WHERE oid = 'newly_indexed'::regclass;
relhasindex
-----------
t
(1 row)
starting permutation: cir1 cic2 ddl3 read1
step cir1: BEGIN; CREATE INDEX i1 ON newly_indexed (c); ROLLBACK;

@ -1,7 +1,7 @@
# An inplace update had been able to abort before sending the inplace
# invalidation message to the shared queue. If a heap_update() caller then
# retrieved its oldtup from a cache, the heap_update() could revert the
# inplace update.
# If a heap_update() caller retrieves its oldtup from a cache, it's possible
# for that cache entry to predate an inplace update, causing loss of that
# inplace update. This arises because the transaction may abort before
# sending the inplace invalidation message to the shared queue.
setup
{
@ -27,12 +27,14 @@ step cachefill3 { TABLE newly_indexed; }
step ddl3 { ALTER TABLE newly_indexed ADD extra int; }
# XXX shows an extant bug. Adding step read1 at the end would usually print
# relhasindex=f (not wanted). This does not reach the unwanted behavior under
# -DCATCACHE_FORCE_RELEASE and friends.
permutation
cachefill3 # populates the pg_class row in the catcache
cir1 # sets relhasindex=true; rollback discards cache inval
cic2 # sees relhasindex=true, skips changing it (so no inval)
ddl3 # cached row as the oldtup of an update, losing relhasindex
read1 # observe damage
# without cachefill3, no bug
permutation cir1 cic2 ddl3 read1

@ -1253,7 +1253,6 @@ Interval
IntervalAggState
IntoClause
InvalMessageArray
InvalidationInfo
InvalidationMsgsGroup
IpcMemoryId
IpcMemoryKey