
This reverts commit cb2fd7eac285b1b0a24eeb2b8ed4456b66c5a09f. Per numerous buildfarm members, it was incompatible with parallel query, and a test case assumed LP64. Back-patch to 9.5 (all supported versions). Discussion: https://postgr.es/m/20200321224920.GB1763544@rfd.leadboat.com
222 lines
8.5 KiB
C
222 lines
8.5 KiB
C
/*-------------------------------------------------------------------------
|
|
*
|
|
* heapam.h
|
|
* POSTGRES heap access method definitions.
|
|
*
|
|
*
|
|
* Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
*
|
|
* src/include/access/heapam.h
|
|
*
|
|
*-------------------------------------------------------------------------
|
|
*/
|
|
#ifndef HEAPAM_H
|
|
#define HEAPAM_H
|
|
|
|
#include "access/relation.h" /* for backward compatibility */
|
|
#include "access/relscan.h"
|
|
#include "access/sdir.h"
|
|
#include "access/skey.h"
|
|
#include "access/table.h" /* for backward compatibility */
|
|
#include "access/tableam.h"
|
|
#include "nodes/lockoptions.h"
|
|
#include "nodes/primnodes.h"
|
|
#include "storage/bufpage.h"
|
|
#include "storage/lockdefs.h"
|
|
#include "utils/relcache.h"
|
|
#include "utils/snapshot.h"
|
|
|
|
|
|
/* "options" flag bits for heap_insert */
|
|
#define HEAP_INSERT_SKIP_WAL TABLE_INSERT_SKIP_WAL
|
|
#define HEAP_INSERT_SKIP_FSM TABLE_INSERT_SKIP_FSM
|
|
#define HEAP_INSERT_FROZEN TABLE_INSERT_FROZEN
|
|
#define HEAP_INSERT_NO_LOGICAL TABLE_INSERT_NO_LOGICAL
|
|
#define HEAP_INSERT_SPECULATIVE 0x0010
|
|
|
|
typedef struct BulkInsertStateData *BulkInsertState;
|
|
struct TupleTableSlot;
|
|
|
|
#define MaxLockTupleMode LockTupleExclusive
|
|
|
|
/*
|
|
* Descriptor for heap table scans.
|
|
*/
|
|
typedef struct HeapScanDescData
|
|
{
|
|
TableScanDescData rs_base; /* AM independent part of the descriptor */
|
|
|
|
/* state set up at initscan time */
|
|
BlockNumber rs_nblocks; /* total number of blocks in rel */
|
|
BlockNumber rs_startblock; /* block # to start at */
|
|
BlockNumber rs_numblocks; /* max number of blocks to scan */
|
|
/* rs_numblocks is usually InvalidBlockNumber, meaning "scan whole rel" */
|
|
|
|
/* scan current state */
|
|
bool rs_inited; /* false = scan not init'd yet */
|
|
BlockNumber rs_cblock; /* current block # in scan, if any */
|
|
Buffer rs_cbuf; /* current buffer in scan, if any */
|
|
/* NB: if rs_cbuf is not InvalidBuffer, we hold a pin on that buffer */
|
|
|
|
/* rs_numblocks is usually InvalidBlockNumber, meaning "scan whole rel" */
|
|
BufferAccessStrategy rs_strategy; /* access strategy for reads */
|
|
|
|
HeapTupleData rs_ctup; /* current tuple in scan, if any */
|
|
|
|
/* these fields only used in page-at-a-time mode and for bitmap scans */
|
|
int rs_cindex; /* current tuple's index in vistuples */
|
|
int rs_ntuples; /* number of visible tuples on page */
|
|
OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]; /* their offsets */
|
|
} HeapScanDescData;
|
|
typedef struct HeapScanDescData *HeapScanDesc;
|
|
|
|
/*
|
|
* Descriptor for fetches from heap via an index.
|
|
*/
|
|
typedef struct IndexFetchHeapData
|
|
{
|
|
IndexFetchTableData xs_base; /* AM independent part of the descriptor */
|
|
|
|
Buffer xs_cbuf; /* current heap buffer in scan, if any */
|
|
/* NB: if xs_cbuf is not InvalidBuffer, we hold a pin on that buffer */
|
|
} IndexFetchHeapData;
|
|
|
|
/* Result codes for HeapTupleSatisfiesVacuum */
|
|
typedef enum
|
|
{
|
|
HEAPTUPLE_DEAD, /* tuple is dead and deletable */
|
|
HEAPTUPLE_LIVE, /* tuple is live (committed, no deleter) */
|
|
HEAPTUPLE_RECENTLY_DEAD, /* tuple is dead, but not deletable yet */
|
|
HEAPTUPLE_INSERT_IN_PROGRESS, /* inserting xact is still in progress */
|
|
HEAPTUPLE_DELETE_IN_PROGRESS /* deleting xact is still in progress */
|
|
} HTSV_Result;
|
|
|
|
/* ----------------
|
|
* function prototypes for heap access method
|
|
*
|
|
* heap_create, heap_create_with_catalog, and heap_drop_with_catalog
|
|
* are declared in catalog/heap.h
|
|
* ----------------
|
|
*/
|
|
|
|
|
|
/*
|
|
* HeapScanIsValid
|
|
* True iff the heap scan is valid.
|
|
*/
|
|
#define HeapScanIsValid(scan) PointerIsValid(scan)
|
|
|
|
extern TableScanDesc heap_beginscan(Relation relation, Snapshot snapshot,
|
|
int nkeys, ScanKey key,
|
|
ParallelTableScanDesc parallel_scan,
|
|
uint32 flags);
|
|
extern void heap_setscanlimits(TableScanDesc scan, BlockNumber startBlk,
|
|
BlockNumber endBlk);
|
|
extern void heapgetpage(TableScanDesc scan, BlockNumber page);
|
|
extern void heap_rescan(TableScanDesc scan, ScanKey key, bool set_params,
|
|
bool allow_strat, bool allow_sync, bool allow_pagemode);
|
|
extern void heap_endscan(TableScanDesc scan);
|
|
extern HeapTuple heap_getnext(TableScanDesc scan, ScanDirection direction);
|
|
extern bool heap_getnextslot(TableScanDesc sscan,
|
|
ScanDirection direction, struct TupleTableSlot *slot);
|
|
|
|
extern bool heap_fetch(Relation relation, Snapshot snapshot,
|
|
HeapTuple tuple, Buffer *userbuf);
|
|
extern bool heap_hot_search_buffer(ItemPointer tid, Relation relation,
|
|
Buffer buffer, Snapshot snapshot, HeapTuple heapTuple,
|
|
bool *all_dead, bool first_call);
|
|
|
|
extern void heap_get_latest_tid(TableScanDesc scan, ItemPointer tid);
|
|
extern void setLastTid(const ItemPointer tid);
|
|
|
|
extern BulkInsertState GetBulkInsertState(void);
|
|
extern void FreeBulkInsertState(BulkInsertState);
|
|
extern void ReleaseBulkInsertStatePin(BulkInsertState bistate);
|
|
|
|
extern void heap_insert(Relation relation, HeapTuple tup, CommandId cid,
|
|
int options, BulkInsertState bistate);
|
|
extern void heap_multi_insert(Relation relation, struct TupleTableSlot **slots,
|
|
int ntuples, CommandId cid, int options,
|
|
BulkInsertState bistate);
|
|
extern TM_Result heap_delete(Relation relation, ItemPointer tid,
|
|
CommandId cid, Snapshot crosscheck, bool wait,
|
|
struct TM_FailureData *tmfd, bool changingPart);
|
|
extern void heap_finish_speculative(Relation relation, ItemPointer tid);
|
|
extern void heap_abort_speculative(Relation relation, ItemPointer tid);
|
|
extern TM_Result heap_update(Relation relation, ItemPointer otid,
|
|
HeapTuple newtup,
|
|
CommandId cid, Snapshot crosscheck, bool wait,
|
|
struct TM_FailureData *tmfd, LockTupleMode *lockmode);
|
|
extern TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
|
|
CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
|
|
bool follow_update,
|
|
Buffer *buffer, struct TM_FailureData *tmfd);
|
|
|
|
extern void heap_inplace_update(Relation relation, HeapTuple tuple);
|
|
extern bool heap_freeze_tuple(HeapTupleHeader tuple,
|
|
TransactionId relfrozenxid, TransactionId relminmxid,
|
|
TransactionId cutoff_xid, TransactionId cutoff_multi);
|
|
extern bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
|
|
MultiXactId cutoff_multi, Buffer buf);
|
|
extern bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple);
|
|
|
|
extern void simple_heap_insert(Relation relation, HeapTuple tup);
|
|
extern void simple_heap_delete(Relation relation, ItemPointer tid);
|
|
extern void simple_heap_update(Relation relation, ItemPointer otid,
|
|
HeapTuple tup);
|
|
|
|
extern void heap_sync(Relation relation);
|
|
|
|
extern TransactionId heap_compute_xid_horizon_for_tuples(Relation rel,
|
|
ItemPointerData *items,
|
|
int nitems);
|
|
|
|
/* in heap/pruneheap.c */
|
|
extern void heap_page_prune_opt(Relation relation, Buffer buffer);
|
|
extern int heap_page_prune(Relation relation, Buffer buffer,
|
|
TransactionId OldestXmin,
|
|
bool report_stats, TransactionId *latestRemovedXid);
|
|
extern void heap_page_prune_execute(Buffer buffer,
|
|
OffsetNumber *redirected, int nredirected,
|
|
OffsetNumber *nowdead, int ndead,
|
|
OffsetNumber *nowunused, int nunused);
|
|
extern void heap_get_root_tuples(Page page, OffsetNumber *root_offsets);
|
|
|
|
/* in heap/syncscan.c */
|
|
extern void ss_report_location(Relation rel, BlockNumber location);
|
|
extern BlockNumber ss_get_location(Relation rel, BlockNumber relnblocks);
|
|
extern void SyncScanShmemInit(void);
|
|
extern Size SyncScanShmemSize(void);
|
|
|
|
/* in heap/vacuumlazy.c */
|
|
struct VacuumParams;
|
|
extern void heap_vacuum_rel(Relation onerel,
|
|
struct VacuumParams *params, BufferAccessStrategy bstrategy);
|
|
|
|
/* in heap/heapam_visibility.c */
|
|
extern bool HeapTupleSatisfiesVisibility(HeapTuple stup, Snapshot snapshot,
|
|
Buffer buffer);
|
|
extern TM_Result HeapTupleSatisfiesUpdate(HeapTuple stup, CommandId curcid,
|
|
Buffer buffer);
|
|
extern HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple stup, TransactionId OldestXmin,
|
|
Buffer buffer);
|
|
extern void HeapTupleSetHintBits(HeapTupleHeader tuple, Buffer buffer,
|
|
uint16 infomask, TransactionId xid);
|
|
extern bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple);
|
|
extern bool XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot);
|
|
extern bool HeapTupleIsSurelyDead(HeapTuple htup, TransactionId OldestXmin);
|
|
|
|
/*
|
|
* To avoid leaking too much knowledge about reorderbuffer implementation
|
|
* details this is implemented in reorderbuffer.c not heapam_visibility.c
|
|
*/
|
|
struct HTAB;
|
|
extern bool ResolveCminCmaxDuringDecoding(struct HTAB *tuplecid_data,
|
|
Snapshot snapshot,
|
|
HeapTuple htup,
|
|
Buffer buffer,
|
|
CommandId *cmin, CommandId *cmax);
|
|
|
|
#endif /* HEAPAM_H */
|