
Too allow table accesses to be not directly dependent on heap, several new abstractions are needed. Specifically: 1) Heap scans need to be generalized into table scans. Do this by introducing TableScanDesc, which will be the "base class" for individual AMs. This contains the AM independent fields from HeapScanDesc. The previous heap_{beginscan,rescan,endscan} et al. have been replaced with a table_ version. There's no direct replacement for heap_getnext(), as that returned a HeapTuple, which is undesirable for a other AMs. Instead there's table_scan_getnextslot(). But note that heap_getnext() lives on, it's still used widely to access catalog tables. This is achieved by new scan_begin, scan_end, scan_rescan, scan_getnextslot callbacks. 2) The portion of parallel scans that's shared between backends need to be able to do so without the user doing per-AM work. To achieve that new parallelscan_{estimate, initialize, reinitialize} callbacks are introduced, which operate on a new ParallelTableScanDesc, which again can be subclassed by AMs. As it is likely that several AMs are going to be block oriented, block oriented callbacks that can be shared between such AMs are provided and used by heap. table_block_parallelscan_{estimate, intiialize, reinitialize} as callbacks, and table_block_parallelscan_{nextpage, init} for use in AMs. These operate on a ParallelBlockTableScanDesc. 3) Index scans need to be able to access tables to return a tuple, and there needs to be state across individual accesses to the heap to store state like buffers. That's now handled by introducing a sort-of-scan IndexFetchTable, which again is intended to be subclassed by individual AMs (for heap IndexFetchHeap). The relevant callbacks for an AM are index_fetch_{end, begin, reset} to create the necessary state, and index_fetch_tuple to retrieve an indexed tuple. Note that index_fetch_tuple implementations need to be smarter than just blindly fetching the tuples for AMs that have optimizations similar to heap's HOT - the currently alive tuple in the update chain needs to be fetched if appropriate. Similar to table_scan_getnextslot(), it's undesirable to continue to return HeapTuples. Thus index_fetch_heap (might want to rename that later) now accepts a slot as an argument. Core code doesn't have a lot of call sites performing index scans without going through the systable_* API (in contrast to loads of heap_getnext calls and working directly with HeapTuples). Index scans now store the result of a search in IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the target is not generally a HeapTuple anymore that seems cleaner. To be able to sensible adapt code to use the above, two further callbacks have been introduced: a) slot_callbacks returns a TupleTableSlotOps* suitable for creating slots capable of holding a tuple of the AMs type. table_slot_callbacks() and table_slot_create() are based upon that, but have additional logic to deal with views, foreign tables, etc. While this change could have been done separately, nearly all the call sites that needed to be adapted for the rest of this commit also would have been needed to be adapted for table_slot_callbacks(), making separation not worthwhile. b) tuple_satisfies_snapshot checks whether the tuple in a slot is currently visible according to a snapshot. That's required as a few places now don't have a buffer + HeapTuple around, but a slot (which in heap's case internally has that information). Additionally a few infrastructure changes were needed: I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now internally uses a slot to keep track of tuples. While systable_getnext() still returns HeapTuples, and will so for the foreseeable future, the index API (see 1) above) now only deals with slots. The remainder, and largest part, of this commit is then adjusting all scans in postgres to use the new APIs. Author: Andres Freund, Haribabu Kommi, Alvaro Herrera Discussion: https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
317 lines
8.6 KiB
C
317 lines
8.6 KiB
C
/*
|
|
* contrib/pgrowlocks/pgrowlocks.c
|
|
*
|
|
* Copyright (c) 2005-2006 Tatsuo Ishii
|
|
*
|
|
* Permission to use, copy, modify, and distribute this software and
|
|
* its documentation for any purpose, without fee, and without a
|
|
* written agreement is hereby granted, provided that the above
|
|
* copyright notice and this paragraph and the following two
|
|
* paragraphs appear in all copies.
|
|
*
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT,
|
|
* INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
|
|
* LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
|
|
* DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED
|
|
* OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*
|
|
* THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
* A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS
|
|
* IS" BASIS, AND THE AUTHOR HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE,
|
|
* SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
|
*/
|
|
|
|
#include "postgres.h"
|
|
|
|
#include "access/heapam.h"
|
|
#include "access/multixact.h"
|
|
#include "access/relscan.h"
|
|
#include "access/tableam.h"
|
|
#include "access/xact.h"
|
|
#include "catalog/namespace.h"
|
|
#include "catalog/pg_authid.h"
|
|
#include "funcapi.h"
|
|
#include "miscadmin.h"
|
|
#include "storage/bufmgr.h"
|
|
#include "storage/procarray.h"
|
|
#include "utils/acl.h"
|
|
#include "utils/builtins.h"
|
|
#include "utils/rel.h"
|
|
#include "utils/snapmgr.h"
|
|
#include "utils/varlena.h"
|
|
|
|
PG_MODULE_MAGIC;
|
|
|
|
PG_FUNCTION_INFO_V1(pgrowlocks);
|
|
|
|
/* ----------
|
|
* pgrowlocks:
|
|
* returns tids of rows being locked
|
|
* ----------
|
|
*/
|
|
|
|
#define NCHARS 32
|
|
|
|
typedef struct
|
|
{
|
|
Relation rel;
|
|
TableScanDesc scan;
|
|
int ncolumns;
|
|
} MyData;
|
|
|
|
#define Atnum_tid 0
|
|
#define Atnum_xmax 1
|
|
#define Atnum_ismulti 2
|
|
#define Atnum_xids 3
|
|
#define Atnum_modes 4
|
|
#define Atnum_pids 5
|
|
|
|
Datum
|
|
pgrowlocks(PG_FUNCTION_ARGS)
|
|
{
|
|
FuncCallContext *funcctx;
|
|
TableScanDesc scan;
|
|
HeapScanDesc hscan;
|
|
HeapTuple tuple;
|
|
TupleDesc tupdesc;
|
|
AttInMetadata *attinmeta;
|
|
Datum result;
|
|
MyData *mydata;
|
|
Relation rel;
|
|
|
|
if (SRF_IS_FIRSTCALL())
|
|
{
|
|
text *relname;
|
|
RangeVar *relrv;
|
|
MemoryContext oldcontext;
|
|
AclResult aclresult;
|
|
|
|
funcctx = SRF_FIRSTCALL_INIT();
|
|
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
|
|
|
|
/* Build a tuple descriptor for our result type */
|
|
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
|
|
elog(ERROR, "return type must be a row type");
|
|
|
|
attinmeta = TupleDescGetAttInMetadata(tupdesc);
|
|
funcctx->attinmeta = attinmeta;
|
|
|
|
relname = PG_GETARG_TEXT_PP(0);
|
|
relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
|
|
rel = relation_openrv(relrv, AccessShareLock);
|
|
|
|
if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
|
|
ereport(ERROR,
|
|
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
|
errmsg("\"%s\" is a partitioned table",
|
|
RelationGetRelationName(rel)),
|
|
errdetail("Partitioned tables do not contain rows.")));
|
|
else if (rel->rd_rel->relkind != RELKIND_RELATION)
|
|
ereport(ERROR,
|
|
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
|
errmsg("\"%s\" is not a table",
|
|
RelationGetRelationName(rel))));
|
|
|
|
/*
|
|
* check permissions: must have SELECT on table or be in
|
|
* pg_stat_scan_tables
|
|
*/
|
|
aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
|
|
ACL_SELECT);
|
|
if (aclresult != ACLCHECK_OK)
|
|
aclresult = is_member_of_role(GetUserId(), DEFAULT_ROLE_STAT_SCAN_TABLES) ? ACLCHECK_OK : ACLCHECK_NO_PRIV;
|
|
|
|
if (aclresult != ACLCHECK_OK)
|
|
aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind),
|
|
RelationGetRelationName(rel));
|
|
|
|
scan = table_beginscan(rel, GetActiveSnapshot(), 0, NULL);
|
|
hscan = (HeapScanDesc) scan;
|
|
mydata = palloc(sizeof(*mydata));
|
|
mydata->rel = rel;
|
|
mydata->scan = scan;
|
|
mydata->ncolumns = tupdesc->natts;
|
|
funcctx->user_fctx = mydata;
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
}
|
|
|
|
funcctx = SRF_PERCALL_SETUP();
|
|
attinmeta = funcctx->attinmeta;
|
|
mydata = (MyData *) funcctx->user_fctx;
|
|
scan = mydata->scan;
|
|
hscan = (HeapScanDesc) scan;
|
|
|
|
/* scan the relation */
|
|
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
|
|
{
|
|
HTSU_Result htsu;
|
|
TransactionId xmax;
|
|
uint16 infomask;
|
|
|
|
/* must hold a buffer lock to call HeapTupleSatisfiesUpdate */
|
|
LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
|
|
|
|
htsu = HeapTupleSatisfiesUpdate(tuple,
|
|
GetCurrentCommandId(false),
|
|
hscan->rs_cbuf);
|
|
xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
|
|
infomask = tuple->t_data->t_infomask;
|
|
|
|
/*
|
|
* A tuple is locked if HTSU returns BeingUpdated.
|
|
*/
|
|
if (htsu == HeapTupleBeingUpdated)
|
|
{
|
|
char **values;
|
|
|
|
values = (char **) palloc(mydata->ncolumns * sizeof(char *));
|
|
|
|
values[Atnum_tid] = (char *) DirectFunctionCall1(tidout,
|
|
PointerGetDatum(&tuple->t_self));
|
|
|
|
values[Atnum_xmax] = palloc(NCHARS * sizeof(char));
|
|
snprintf(values[Atnum_xmax], NCHARS, "%d", xmax);
|
|
if (infomask & HEAP_XMAX_IS_MULTI)
|
|
{
|
|
MultiXactMember *members;
|
|
int nmembers;
|
|
bool first = true;
|
|
bool allow_old;
|
|
|
|
values[Atnum_ismulti] = pstrdup("true");
|
|
|
|
allow_old = HEAP_LOCKED_UPGRADED(infomask);
|
|
nmembers = GetMultiXactIdMembers(xmax, &members, allow_old,
|
|
false);
|
|
if (nmembers == -1)
|
|
{
|
|
values[Atnum_xids] = "{0}";
|
|
values[Atnum_modes] = "{transient upgrade status}";
|
|
values[Atnum_pids] = "{0}";
|
|
}
|
|
else
|
|
{
|
|
int j;
|
|
|
|
values[Atnum_xids] = palloc(NCHARS * nmembers);
|
|
values[Atnum_modes] = palloc(NCHARS * nmembers);
|
|
values[Atnum_pids] = palloc(NCHARS * nmembers);
|
|
|
|
strcpy(values[Atnum_xids], "{");
|
|
strcpy(values[Atnum_modes], "{");
|
|
strcpy(values[Atnum_pids], "{");
|
|
|
|
for (j = 0; j < nmembers; j++)
|
|
{
|
|
char buf[NCHARS];
|
|
|
|
if (!first)
|
|
{
|
|
strcat(values[Atnum_xids], ",");
|
|
strcat(values[Atnum_modes], ",");
|
|
strcat(values[Atnum_pids], ",");
|
|
}
|
|
snprintf(buf, NCHARS, "%d", members[j].xid);
|
|
strcat(values[Atnum_xids], buf);
|
|
switch (members[j].status)
|
|
{
|
|
case MultiXactStatusUpdate:
|
|
snprintf(buf, NCHARS, "Update");
|
|
break;
|
|
case MultiXactStatusNoKeyUpdate:
|
|
snprintf(buf, NCHARS, "No Key Update");
|
|
break;
|
|
case MultiXactStatusForUpdate:
|
|
snprintf(buf, NCHARS, "For Update");
|
|
break;
|
|
case MultiXactStatusForNoKeyUpdate:
|
|
snprintf(buf, NCHARS, "For No Key Update");
|
|
break;
|
|
case MultiXactStatusForShare:
|
|
snprintf(buf, NCHARS, "Share");
|
|
break;
|
|
case MultiXactStatusForKeyShare:
|
|
snprintf(buf, NCHARS, "Key Share");
|
|
break;
|
|
}
|
|
strcat(values[Atnum_modes], buf);
|
|
snprintf(buf, NCHARS, "%d",
|
|
BackendXidGetPid(members[j].xid));
|
|
strcat(values[Atnum_pids], buf);
|
|
|
|
first = false;
|
|
}
|
|
|
|
strcat(values[Atnum_xids], "}");
|
|
strcat(values[Atnum_modes], "}");
|
|
strcat(values[Atnum_pids], "}");
|
|
}
|
|
}
|
|
else
|
|
{
|
|
values[Atnum_ismulti] = pstrdup("false");
|
|
|
|
values[Atnum_xids] = palloc(NCHARS * sizeof(char));
|
|
snprintf(values[Atnum_xids], NCHARS, "{%d}", xmax);
|
|
|
|
values[Atnum_modes] = palloc(NCHARS);
|
|
if (infomask & HEAP_XMAX_LOCK_ONLY)
|
|
{
|
|
if (HEAP_XMAX_IS_SHR_LOCKED(infomask))
|
|
snprintf(values[Atnum_modes], NCHARS, "{For Share}");
|
|
else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
|
|
snprintf(values[Atnum_modes], NCHARS, "{For Key Share}");
|
|
else if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
|
|
{
|
|
if (tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED)
|
|
snprintf(values[Atnum_modes], NCHARS, "{For Update}");
|
|
else
|
|
snprintf(values[Atnum_modes], NCHARS, "{For No Key Update}");
|
|
}
|
|
else
|
|
/* neither keyshare nor exclusive bit it set */
|
|
snprintf(values[Atnum_modes], NCHARS,
|
|
"{transient upgrade status}");
|
|
}
|
|
else
|
|
{
|
|
if (tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED)
|
|
snprintf(values[Atnum_modes], NCHARS, "{Update}");
|
|
else
|
|
snprintf(values[Atnum_modes], NCHARS, "{No Key Update}");
|
|
}
|
|
|
|
values[Atnum_pids] = palloc(NCHARS * sizeof(char));
|
|
snprintf(values[Atnum_pids], NCHARS, "{%d}",
|
|
BackendXidGetPid(xmax));
|
|
}
|
|
|
|
LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
|
|
|
|
/* build a tuple */
|
|
tuple = BuildTupleFromCStrings(attinmeta, values);
|
|
|
|
/* make the tuple into a datum */
|
|
result = HeapTupleGetDatum(tuple);
|
|
|
|
/*
|
|
* no need to pfree what we allocated; it's on a short-lived
|
|
* memory context anyway
|
|
*/
|
|
|
|
SRF_RETURN_NEXT(funcctx, result);
|
|
}
|
|
else
|
|
{
|
|
LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK);
|
|
}
|
|
}
|
|
|
|
table_endscan(scan);
|
|
table_close(mydata->rel, AccessShareLock);
|
|
|
|
SRF_RETURN_DONE(funcctx);
|
|
}
|