Initial pgindent run for v12.
This is still using the 2.0 version of pg_bsd_indent. I thought it would be good to commit this separately, so as to document the differences between 2.0 and 2.1 behavior. Discussion: https://postgr.es/m/16296.1558103386@sss.pgh.pa.us
This commit is contained in:
parent
66a4bad83a
commit
be76af171c
@ -160,7 +160,7 @@ static inline bool invariant_l_nontarget_offset(BtreeCheckState *state,
|
||||
OffsetNumber upperbound);
|
||||
static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum);
|
||||
static inline BTScanInsert bt_mkscankey_pivotsearch(Relation rel,
|
||||
IndexTuple itup);
|
||||
IndexTuple itup);
|
||||
static ItemId PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block,
|
||||
Page page, OffsetNumber offset);
|
||||
static inline ItemPointer BTreeTupleGetHeapTIDCareful(BtreeCheckState *state,
|
||||
@ -1029,7 +1029,7 @@ bt_target_page_check(BtreeCheckState *state)
|
||||
/* Fingerprint leaf page tuples (those that point to the heap) */
|
||||
if (state->heapallindexed && P_ISLEAF(topaque) && !ItemIdIsDead(itemid))
|
||||
{
|
||||
IndexTuple norm;
|
||||
IndexTuple norm;
|
||||
|
||||
norm = bt_normalize_tuple(state, itup);
|
||||
bloom_add_element(state->filter, (unsigned char *) norm,
|
||||
@ -1174,7 +1174,7 @@ bt_target_page_check(BtreeCheckState *state)
|
||||
*/
|
||||
else if (offset == max)
|
||||
{
|
||||
BTScanInsert rightkey;
|
||||
BTScanInsert rightkey;
|
||||
|
||||
/* Get item in next/right page */
|
||||
rightkey = bt_right_page_check_scankey(state);
|
||||
@ -1851,7 +1851,8 @@ bt_tuple_present_callback(Relation index, HeapTuple htup, Datum *values,
|
||||
bool *isnull, bool tupleIsAlive, void *checkstate)
|
||||
{
|
||||
BtreeCheckState *state = (BtreeCheckState *) checkstate;
|
||||
IndexTuple itup, norm;
|
||||
IndexTuple itup,
|
||||
norm;
|
||||
|
||||
Assert(state->heapallindexed);
|
||||
|
||||
@ -1931,7 +1932,7 @@ bt_normalize_tuple(BtreeCheckState *state, IndexTuple itup)
|
||||
|
||||
for (i = 0; i < tupleDescriptor->natts; i++)
|
||||
{
|
||||
Form_pg_attribute att;
|
||||
Form_pg_attribute att;
|
||||
|
||||
att = TupleDescAttr(tupleDescriptor, i);
|
||||
|
||||
|
@ -156,7 +156,7 @@ cube_a_f8_f8(PG_FUNCTION_ARGS)
|
||||
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
|
||||
errmsg("can't extend cube"),
|
||||
errdetail("A cube cannot have more than %d dimensions.",
|
||||
CUBE_MAX_DIM)));
|
||||
CUBE_MAX_DIM)));
|
||||
|
||||
if (ARRNELEMS(ll) != dim)
|
||||
ereport(ERROR,
|
||||
@ -220,7 +220,7 @@ cube_a_f8(PG_FUNCTION_ARGS)
|
||||
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
|
||||
errmsg("array is too long"),
|
||||
errdetail("A cube cannot have more than %d dimensions.",
|
||||
CUBE_MAX_DIM)));
|
||||
CUBE_MAX_DIM)));
|
||||
|
||||
dur = ARRPTR(ur);
|
||||
|
||||
@ -260,7 +260,7 @@ cube_subset(PG_FUNCTION_ARGS)
|
||||
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
|
||||
errmsg("array is too long"),
|
||||
errdetail("A cube cannot have more than %d dimensions.",
|
||||
CUBE_MAX_DIM)));
|
||||
CUBE_MAX_DIM)));
|
||||
|
||||
size = IS_POINT(c) ? POINT_SIZE(dim) : CUBE_SIZE(dim);
|
||||
result = (NDBOX *) palloc0(size);
|
||||
@ -1780,7 +1780,7 @@ cube_c_f8(PG_FUNCTION_ARGS)
|
||||
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
|
||||
errmsg("can't extend cube"),
|
||||
errdetail("A cube cannot have more than %d dimensions.",
|
||||
CUBE_MAX_DIM)));
|
||||
CUBE_MAX_DIM)));
|
||||
|
||||
if (IS_POINT(cube))
|
||||
{
|
||||
@ -1828,7 +1828,7 @@ cube_c_f8_f8(PG_FUNCTION_ARGS)
|
||||
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
|
||||
errmsg("can't extend cube"),
|
||||
errdetail("A cube cannot have more than %d dimensions.",
|
||||
CUBE_MAX_DIM)));
|
||||
CUBE_MAX_DIM)));
|
||||
|
||||
if (IS_POINT(cube) && (x1 == x2))
|
||||
{
|
||||
|
@ -922,8 +922,11 @@ check_selective_binary_conversion(RelOptInfo *baserel,
|
||||
/* Skip dropped attributes (probably shouldn't see any here). */
|
||||
if (attr->attisdropped)
|
||||
continue;
|
||||
/* Skip generated columns (COPY won't accept them in the column
|
||||
* list) */
|
||||
|
||||
/*
|
||||
* Skip generated columns (COPY won't accept them in the column
|
||||
* list)
|
||||
*/
|
||||
if (attr->attgenerated)
|
||||
continue;
|
||||
*columns = lappend(*columns, makeString(pstrdup(attname)));
|
||||
|
@ -212,41 +212,44 @@ g_int_compress(PG_FUNCTION_ARGS)
|
||||
*/
|
||||
for (j = i = len - 1; i > 0 && lenr > 0; i--, j--)
|
||||
{
|
||||
int r_end = dr[i];
|
||||
int r_start = r_end;
|
||||
while (i > 0 && lenr > 0 && dr[i-1] == r_start - 1)
|
||||
int r_end = dr[i];
|
||||
int r_start = r_end;
|
||||
|
||||
while (i > 0 && lenr > 0 && dr[i - 1] == r_start - 1)
|
||||
--r_start, --i, --lenr;
|
||||
dr[2*j] = r_start;
|
||||
dr[2*j+1] = r_end;
|
||||
dr[2 * j] = r_start;
|
||||
dr[2 * j + 1] = r_end;
|
||||
}
|
||||
/* just copy the rest, if any, as trivial ranges */
|
||||
for (; i >= 0; i--, j--)
|
||||
dr[2*j] = dr[2*j + 1] = dr[i];
|
||||
dr[2 * j] = dr[2 * j + 1] = dr[i];
|
||||
|
||||
if (++j)
|
||||
{
|
||||
/*
|
||||
* shunt everything down to start at the right place
|
||||
*/
|
||||
memmove((void *) &dr[0], (void *) &dr[2*j], 2*(len - j) * sizeof(int32));
|
||||
memmove((void *) &dr[0], (void *) &dr[2 * j], 2 * (len - j) * sizeof(int32));
|
||||
}
|
||||
|
||||
/*
|
||||
* make "len" be number of array elements, not ranges
|
||||
*/
|
||||
len = 2*(len - j);
|
||||
len = 2 * (len - j);
|
||||
cand = 1;
|
||||
while (len > MAXNUMRANGE * 2)
|
||||
{
|
||||
min = PG_INT64_MAX;
|
||||
for (i = 2; i < len; i += 2)
|
||||
if (min > ((int64)dr[i] - (int64)dr[i - 1]))
|
||||
if (min > ((int64) dr[i] - (int64) dr[i - 1]))
|
||||
{
|
||||
min = ((int64)dr[i] - (int64)dr[i - 1]);
|
||||
min = ((int64) dr[i] - (int64) dr[i - 1]);
|
||||
cand = i;
|
||||
}
|
||||
memmove((void *) &dr[cand - 1], (void *) &dr[cand + 1], (len - cand - 1) * sizeof(int32));
|
||||
len -= 2;
|
||||
}
|
||||
|
||||
/*
|
||||
* check sparseness of result
|
||||
*/
|
||||
|
@ -298,10 +298,10 @@ internal_size(int *a, int len)
|
||||
for (i = 0; i < len; i += 2)
|
||||
{
|
||||
if (!i || a[i] != a[i - 1]) /* do not count repeated range */
|
||||
size += (int64)(a[i + 1]) - (int64)(a[i]) + 1;
|
||||
size += (int64) (a[i + 1]) - (int64) (a[i]) + 1;
|
||||
}
|
||||
|
||||
if (size > (int64)INT_MAX || size < (int64)INT_MIN)
|
||||
if (size > (int64) INT_MAX || size < (int64) INT_MIN)
|
||||
return -1; /* overflow */
|
||||
return (int) size;
|
||||
}
|
||||
|
@ -1153,8 +1153,9 @@ pgss_store(const char *query, uint64 queryId,
|
||||
queryId = pgss_hash_string(query, query_len);
|
||||
|
||||
/*
|
||||
* If we are unlucky enough to get a hash of zero(invalid), use queryID
|
||||
* as 2 instead, queryID 1 is already in use for normal statements.
|
||||
* If we are unlucky enough to get a hash of zero(invalid), use
|
||||
* queryID as 2 instead, queryID 1 is already in use for normal
|
||||
* statements.
|
||||
*/
|
||||
if (queryId == UINT64CONST(0))
|
||||
queryId = UINT64CONST(2);
|
||||
|
@ -1138,7 +1138,7 @@ mp_int_mod(mp_int a, mp_int m, mp_int c)
|
||||
}
|
||||
|
||||
mp_result
|
||||
mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small * r)
|
||||
mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small *r)
|
||||
{
|
||||
mpz_t vtmp;
|
||||
mp_digit vbuf[MP_VALUE_DIGITS(value)];
|
||||
@ -1819,7 +1819,7 @@ mp_int_root(mp_int a, mp_small b, mp_int c)
|
||||
}
|
||||
|
||||
mp_result
|
||||
mp_int_to_int(mp_int z, mp_small * out)
|
||||
mp_int_to_int(mp_int z, mp_small *out)
|
||||
{
|
||||
assert(z != NULL);
|
||||
|
||||
@ -1850,7 +1850,7 @@ mp_int_to_int(mp_int z, mp_small * out)
|
||||
}
|
||||
|
||||
mp_result
|
||||
mp_int_to_uint(mp_int z, mp_usmall * out)
|
||||
mp_int_to_uint(mp_int z, mp_usmall *out)
|
||||
{
|
||||
assert(z != NULL);
|
||||
|
||||
|
@ -218,7 +218,7 @@ mp_result mp_int_div(mp_int a, mp_int b, mp_int q, mp_int r);
|
||||
/** Sets `q` and `*r` to the quotent and remainder of `a / value`. Division by
|
||||
powers of 2 is detected and handled efficiently. The remainder is pinned to
|
||||
`0 <= *r < b`. Either of `q` or `r` may be NULL. */
|
||||
mp_result mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small * r);
|
||||
mp_result mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small *r);
|
||||
|
||||
/** Sets `q` and `r` to the quotient and remainder of `a / 2^p2`. This is a
|
||||
special case for division by powers of two that is more efficient than
|
||||
@ -246,7 +246,7 @@ mp_result mp_int_expt_full(mp_int a, mp_int b, mp_int c);
|
||||
The remainder is pinned to `0 <= r < value`. */
|
||||
static inline
|
||||
mp_result
|
||||
mp_int_mod_value(mp_int a, mp_small value, mp_small * r)
|
||||
mp_int_mod_value(mp_int a, mp_small value, mp_small *r)
|
||||
{
|
||||
return mp_int_div_value(a, value, 0, r);
|
||||
}
|
||||
@ -339,11 +339,11 @@ mp_int_sqrt(mp_int a, mp_int c)
|
||||
|
||||
/** Returns `MP_OK` if `z` is representable as `mp_small`, else `MP_RANGE`.
|
||||
If `out` is not NULL, `*out` is set to the value of `z` when `MP_OK`. */
|
||||
mp_result mp_int_to_int(mp_int z, mp_small * out);
|
||||
mp_result mp_int_to_int(mp_int z, mp_small *out);
|
||||
|
||||
/** Returns `MP_OK` if `z` is representable as `mp_usmall`, or `MP_RANGE`.
|
||||
If `out` is not NULL, `*out` is set to the value of `z` when `MP_OK`. */
|
||||
mp_result mp_int_to_uint(mp_int z, mp_usmall * out);
|
||||
mp_result mp_int_to_uint(mp_int z, mp_usmall *out);
|
||||
|
||||
/** Converts `z` to a zero-terminated string of characters in the specified
|
||||
`radix`, writing at most `limit` characters to `str` including the
|
||||
|
@ -339,7 +339,7 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo)
|
||||
|
||||
InitDirtySnapshot(SnapshotDirty);
|
||||
|
||||
nblocks = hscan->rs_nblocks; /* # blocks to be scanned */
|
||||
nblocks = hscan->rs_nblocks; /* # blocks to be scanned */
|
||||
|
||||
/* scan the relation */
|
||||
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
|
||||
|
@ -1858,7 +1858,7 @@ postgresExecForeignInsert(EState *estate,
|
||||
if (fmstate->aux_fmstate)
|
||||
resultRelInfo->ri_FdwState = fmstate->aux_fmstate;
|
||||
rslot = execute_foreign_modify(estate, resultRelInfo, CMD_INSERT,
|
||||
slot, planSlot);
|
||||
slot, planSlot);
|
||||
/* Revert that change */
|
||||
if (fmstate->aux_fmstate)
|
||||
resultRelInfo->ri_FdwState = fmstate;
|
||||
@ -1934,11 +1934,11 @@ postgresBeginForeignInsert(ModifyTableState *mtstate,
|
||||
bool doNothing = false;
|
||||
|
||||
/*
|
||||
* If the foreign table we are about to insert routed rows into is also
|
||||
* an UPDATE subplan result rel that will be updated later, proceeding
|
||||
* with the INSERT will result in the later UPDATE incorrectly modifying
|
||||
* those routed rows, so prevent the INSERT --- it would be nice if we
|
||||
* could handle this case; but for now, throw an error for safety.
|
||||
* If the foreign table we are about to insert routed rows into is also an
|
||||
* UPDATE subplan result rel that will be updated later, proceeding with
|
||||
* the INSERT will result in the later UPDATE incorrectly modifying those
|
||||
* routed rows, so prevent the INSERT --- it would be nice if we could
|
||||
* handle this case; but for now, throw an error for safety.
|
||||
*/
|
||||
if (plan && plan->operation == CMD_UPDATE &&
|
||||
(resultRelInfo->ri_usesFdwDirectModify ||
|
||||
@ -3169,7 +3169,7 @@ adjust_foreign_grouping_path_cost(PlannerInfo *root,
|
||||
if (!grouping_is_sortable(root->parse->groupClause) ||
|
||||
!pathkeys_contained_in(pathkeys, root->group_pathkeys))
|
||||
{
|
||||
Path sort_path; /* dummy for result of cost_sort */
|
||||
Path sort_path; /* dummy for result of cost_sort */
|
||||
|
||||
cost_sort(&sort_path,
|
||||
root,
|
||||
@ -3191,7 +3191,7 @@ adjust_foreign_grouping_path_cost(PlannerInfo *root,
|
||||
* add 1/4th of that default.
|
||||
*/
|
||||
double sort_multiplier = 1.0 + (DEFAULT_FDW_SORT_MULTIPLIER
|
||||
- 1.0) * 0.25;
|
||||
- 1.0) * 0.25;
|
||||
|
||||
*p_startup_cost *= sort_multiplier;
|
||||
*p_run_cost *= sort_multiplier;
|
||||
@ -3773,6 +3773,7 @@ store_returning_result(PgFdwModifyState *fmstate,
|
||||
fmstate->retrieved_attrs,
|
||||
NULL,
|
||||
fmstate->temp_cxt);
|
||||
|
||||
/*
|
||||
* The returning slot will not necessarily be suitable to store
|
||||
* heaptuples directly, so allow for conversion.
|
||||
@ -6059,8 +6060,8 @@ add_foreign_final_paths(PlannerInfo *root, RelOptInfo *input_rel,
|
||||
/*
|
||||
* Grouping and aggregation are not supported with FOR UPDATE/SHARE,
|
||||
* so the input_rel should be a base, join, or ordered relation; and
|
||||
* if it's an ordered relation, its input relation should be a base
|
||||
* or join relation.
|
||||
* if it's an ordered relation, its input relation should be a base or
|
||||
* join relation.
|
||||
*/
|
||||
Assert(input_rel->reloptkind == RELOPT_BASEREL ||
|
||||
input_rel->reloptkind == RELOPT_JOINREL ||
|
||||
|
@ -787,8 +787,8 @@ expand_tuple(HeapTuple *targetHeapTuple,
|
||||
}
|
||||
|
||||
/*
|
||||
* Now walk the missing attributes. If there is a missing value
|
||||
* make space for it. Otherwise, it's going to be NULL.
|
||||
* Now walk the missing attributes. If there is a missing value make
|
||||
* space for it. Otherwise, it's going to be NULL.
|
||||
*/
|
||||
for (attnum = firstmissingnum;
|
||||
attnum < natts;
|
||||
|
@ -394,17 +394,17 @@ ginVacuumPostingTree(GinVacuumState *gvs, BlockNumber rootBlkno)
|
||||
* There is at least one empty page. So we have to rescan the tree
|
||||
* deleting empty pages.
|
||||
*/
|
||||
Buffer buffer;
|
||||
Buffer buffer;
|
||||
DataPageDeleteStack root,
|
||||
*ptr,
|
||||
*tmp;
|
||||
*ptr,
|
||||
*tmp;
|
||||
|
||||
buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, rootBlkno,
|
||||
RBM_NORMAL, gvs->strategy);
|
||||
|
||||
/*
|
||||
* Lock posting tree root for cleanup to ensure there are no concurrent
|
||||
* inserts.
|
||||
* Lock posting tree root for cleanup to ensure there are no
|
||||
* concurrent inserts.
|
||||
*/
|
||||
LockBufferForCleanup(buffer);
|
||||
|
||||
|
@ -205,8 +205,8 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data)
|
||||
while (segno < a_segno)
|
||||
{
|
||||
/*
|
||||
* Once modification is started and page tail is copied, we've
|
||||
* to copy unmodified segments.
|
||||
* Once modification is started and page tail is copied, we've to
|
||||
* copy unmodified segments.
|
||||
*/
|
||||
segsize = SizeOfGinPostingList(oldseg);
|
||||
if (tailCopy)
|
||||
@ -257,12 +257,12 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data)
|
||||
}
|
||||
|
||||
/*
|
||||
* We're about to start modification of the page. So, copy tail of the
|
||||
* page if it's not done already.
|
||||
* We're about to start modification of the page. So, copy tail of
|
||||
* the page if it's not done already.
|
||||
*/
|
||||
if (!tailCopy && segptr != segmentend)
|
||||
{
|
||||
int tailSize = segmentend - segptr;
|
||||
int tailSize = segmentend - segptr;
|
||||
|
||||
tailCopy = (Pointer) palloc(tailSize);
|
||||
memcpy(tailCopy, segptr, tailSize);
|
||||
@ -304,7 +304,7 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data)
|
||||
segptr = (Pointer) oldseg;
|
||||
if (segptr != segmentend && tailCopy)
|
||||
{
|
||||
int restSize = segmentend - segptr;
|
||||
int restSize = segmentend - segptr;
|
||||
|
||||
Assert(writePtr + restSize <= PageGetSpecialPointer(page));
|
||||
memcpy(writePtr, segptr, restSize);
|
||||
|
@ -839,16 +839,16 @@ gistNewBuffer(Relation r)
|
||||
gistcheckpage(r, buffer);
|
||||
|
||||
/*
|
||||
* Otherwise, recycle it if deleted, and too old to have any processes
|
||||
* interested in it.
|
||||
* Otherwise, recycle it if deleted, and too old to have any
|
||||
* processes interested in it.
|
||||
*/
|
||||
if (gistPageRecyclable(page))
|
||||
{
|
||||
/*
|
||||
* If we are generating WAL for Hot Standby then create a
|
||||
* WAL record that will allow us to conflict with queries
|
||||
* running on standby, in case they have snapshots older
|
||||
* than the page's deleteXid.
|
||||
* If we are generating WAL for Hot Standby then create a WAL
|
||||
* record that will allow us to conflict with queries running
|
||||
* on standby, in case they have snapshots older than the
|
||||
* page's deleteXid.
|
||||
*/
|
||||
if (XLogStandbyInfoActive() && RelationNeedsWAL(r))
|
||||
gistXLogPageReuse(r, blkno, GistPageGetDeleteXid(page));
|
||||
|
@ -246,7 +246,7 @@ hashtext(PG_FUNCTION_ARGS)
|
||||
{
|
||||
text *key = PG_GETARG_TEXT_PP(0);
|
||||
Oid collid = PG_GET_COLLATION();
|
||||
pg_locale_t mylocale = 0;
|
||||
pg_locale_t mylocale = 0;
|
||||
Datum result;
|
||||
|
||||
if (!collid)
|
||||
@ -271,7 +271,7 @@ hashtext(PG_FUNCTION_ARGS)
|
||||
int32_t ulen = -1;
|
||||
UChar *uchar = NULL;
|
||||
Size bsize;
|
||||
uint8_t *buf;
|
||||
uint8_t *buf;
|
||||
|
||||
ulen = icu_to_uchar(&uchar, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
|
||||
|
||||
@ -302,7 +302,7 @@ hashtextextended(PG_FUNCTION_ARGS)
|
||||
{
|
||||
text *key = PG_GETARG_TEXT_PP(0);
|
||||
Oid collid = PG_GET_COLLATION();
|
||||
pg_locale_t mylocale = 0;
|
||||
pg_locale_t mylocale = 0;
|
||||
Datum result;
|
||||
|
||||
if (!collid)
|
||||
@ -328,7 +328,7 @@ hashtextextended(PG_FUNCTION_ARGS)
|
||||
int32_t ulen = -1;
|
||||
UChar *uchar = NULL;
|
||||
Size bsize;
|
||||
uint8_t *buf;
|
||||
uint8_t *buf;
|
||||
|
||||
ulen = icu_to_uchar(&uchar, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
|
||||
|
||||
|
@ -1684,8 +1684,8 @@ void
|
||||
heap_get_latest_tid(TableScanDesc sscan,
|
||||
ItemPointer tid)
|
||||
{
|
||||
Relation relation = sscan->rs_rd;
|
||||
Snapshot snapshot = sscan->rs_snapshot;
|
||||
Relation relation = sscan->rs_rd;
|
||||
Snapshot snapshot = sscan->rs_snapshot;
|
||||
ItemPointerData ctid;
|
||||
TransactionId priorXmax;
|
||||
|
||||
|
@ -474,6 +474,7 @@ tuple_lock_retry:
|
||||
HeapTupleHeaderGetCmin(tuple->t_data) >= cid)
|
||||
{
|
||||
tmfd->xmax = priorXmax;
|
||||
|
||||
/*
|
||||
* Cmin is the problematic value, so store that. See
|
||||
* above.
|
||||
@ -1172,7 +1173,7 @@ heapam_index_build_range_scan(Relation heapRelation,
|
||||
Snapshot snapshot;
|
||||
bool need_unregister_snapshot = false;
|
||||
TransactionId OldestXmin;
|
||||
BlockNumber previous_blkno = InvalidBlockNumber;
|
||||
BlockNumber previous_blkno = InvalidBlockNumber;
|
||||
BlockNumber root_blkno = InvalidBlockNumber;
|
||||
OffsetNumber root_offsets[MaxHeapTuplesPerPage];
|
||||
|
||||
@ -1263,7 +1264,7 @@ heapam_index_build_range_scan(Relation heapRelation,
|
||||
/* Publish number of blocks to scan */
|
||||
if (progress)
|
||||
{
|
||||
BlockNumber nblocks;
|
||||
BlockNumber nblocks;
|
||||
|
||||
if (hscan->rs_base.rs_parallel != NULL)
|
||||
{
|
||||
@ -1314,7 +1315,7 @@ heapam_index_build_range_scan(Relation heapRelation,
|
||||
/* Report scan progress, if asked to. */
|
||||
if (progress)
|
||||
{
|
||||
BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan);
|
||||
BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan);
|
||||
|
||||
if (blocks_done != previous_blkno)
|
||||
{
|
||||
@ -1668,7 +1669,7 @@ heapam_index_build_range_scan(Relation heapRelation,
|
||||
/* Report scan progress one last time. */
|
||||
if (progress)
|
||||
{
|
||||
BlockNumber blks_done;
|
||||
BlockNumber blks_done;
|
||||
|
||||
if (hscan->rs_base.rs_parallel != NULL)
|
||||
{
|
||||
@ -1720,7 +1721,7 @@ heapam_index_validate_scan(Relation heapRelation,
|
||||
BlockNumber root_blkno = InvalidBlockNumber;
|
||||
OffsetNumber root_offsets[MaxHeapTuplesPerPage];
|
||||
bool in_index[MaxHeapTuplesPerPage];
|
||||
BlockNumber previous_blkno = InvalidBlockNumber;
|
||||
BlockNumber previous_blkno = InvalidBlockNumber;
|
||||
|
||||
/* state variables for the merge */
|
||||
ItemPointer indexcursor = NULL;
|
||||
@ -1955,8 +1956,8 @@ static BlockNumber
|
||||
heapam_scan_get_blocks_done(HeapScanDesc hscan)
|
||||
{
|
||||
ParallelBlockTableScanDesc bpscan = NULL;
|
||||
BlockNumber startblock;
|
||||
BlockNumber blocks_done;
|
||||
BlockNumber startblock;
|
||||
BlockNumber blocks_done;
|
||||
|
||||
if (hscan->rs_base.rs_parallel != NULL)
|
||||
{
|
||||
@ -1974,7 +1975,7 @@ heapam_scan_get_blocks_done(HeapScanDesc hscan)
|
||||
blocks_done = hscan->rs_cblock - startblock;
|
||||
else
|
||||
{
|
||||
BlockNumber nblocks;
|
||||
BlockNumber nblocks;
|
||||
|
||||
nblocks = bpscan != NULL ? bpscan->phs_nblocks : hscan->rs_nblocks;
|
||||
blocks_done = nblocks - startblock +
|
||||
|
@ -652,7 +652,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
|
||||
}
|
||||
else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
|
||||
{
|
||||
int options = HEAP_INSERT_SKIP_FSM;
|
||||
int options = HEAP_INSERT_SKIP_FSM;
|
||||
|
||||
if (!state->rs_use_wal)
|
||||
options |= HEAP_INSERT_SKIP_WAL;
|
||||
|
@ -2295,16 +2295,16 @@ static struct varlena *
|
||||
toast_decompress_datum_slice(struct varlena *attr, int32 slicelength)
|
||||
{
|
||||
struct varlena *result;
|
||||
int32 rawsize;
|
||||
int32 rawsize;
|
||||
|
||||
Assert(VARATT_IS_COMPRESSED(attr));
|
||||
|
||||
result = (struct varlena *) palloc(slicelength + VARHDRSZ);
|
||||
|
||||
rawsize = pglz_decompress(TOAST_COMPRESS_RAWDATA(attr),
|
||||
VARSIZE(attr) - TOAST_COMPRESS_HDRSZ,
|
||||
VARDATA(result),
|
||||
slicelength, false);
|
||||
VARSIZE(attr) - TOAST_COMPRESS_HDRSZ,
|
||||
VARDATA(result),
|
||||
slicelength, false);
|
||||
if (rawsize < 0)
|
||||
elog(ERROR, "compressed data is corrupted");
|
||||
|
||||
|
@ -164,7 +164,7 @@ static void lazy_cleanup_index(Relation indrel,
|
||||
static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
|
||||
int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
|
||||
static bool should_attempt_truncation(VacuumParams *params,
|
||||
LVRelStats *vacrelstats);
|
||||
LVRelStats *vacrelstats);
|
||||
static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
|
||||
static BlockNumber count_nondeletable_pages(Relation onerel,
|
||||
LVRelStats *vacrelstats);
|
||||
@ -1067,9 +1067,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
|
||||
* cheaper to get rid of it in the next pruning pass than
|
||||
* to treat it like an indexed tuple. Finally, if index
|
||||
* cleanup is disabled, the second heap pass will not
|
||||
* execute, and the tuple will not get removed, so we
|
||||
* must treat it like any other dead tuple that we choose
|
||||
* to keep.
|
||||
* execute, and the tuple will not get removed, so we must
|
||||
* treat it like any other dead tuple that we choose to
|
||||
* keep.
|
||||
*
|
||||
* If this were to happen for a tuple that actually needed
|
||||
* to be deleted, we'd be in trouble, because it'd
|
||||
@ -1087,6 +1087,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
|
||||
all_visible = false;
|
||||
break;
|
||||
case HEAPTUPLE_LIVE:
|
||||
|
||||
/*
|
||||
* Count it as live. Not only is this natural, but it's
|
||||
* also what acquire_sample_rows() does.
|
||||
@ -1251,13 +1252,14 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Here, we have indexes but index cleanup is disabled. Instead of
|
||||
* vacuuming the dead tuples on the heap, we just forget them.
|
||||
* Here, we have indexes but index cleanup is disabled.
|
||||
* Instead of vacuuming the dead tuples on the heap, we just
|
||||
* forget them.
|
||||
*
|
||||
* Note that vacrelstats->dead_tuples could have tuples which
|
||||
* became dead after HOT-pruning but are not marked dead yet.
|
||||
* We do not process them because it's a very rare condition, and
|
||||
* the next vacuum will process them anyway.
|
||||
* We do not process them because it's a very rare condition,
|
||||
* and the next vacuum will process them anyway.
|
||||
*/
|
||||
Assert(params->index_cleanup == VACOPT_TERNARY_DISABLED);
|
||||
}
|
||||
|
@ -1811,11 +1811,11 @@ _bt_insert_parent(Relation rel,
|
||||
/*
|
||||
* Re-find and write lock the parent of buf.
|
||||
*
|
||||
* It's possible that the location of buf's downlink has changed
|
||||
* since our initial _bt_search() descent. _bt_getstackbuf() will
|
||||
* detect and recover from this, updating the stack, which ensures
|
||||
* that the new downlink will be inserted at the correct offset.
|
||||
* Even buf's parent may have changed.
|
||||
* It's possible that the location of buf's downlink has changed since
|
||||
* our initial _bt_search() descent. _bt_getstackbuf() will detect
|
||||
* and recover from this, updating the stack, which ensures that the
|
||||
* new downlink will be inserted at the correct offset. Even buf's
|
||||
* parent may have changed.
|
||||
*/
|
||||
stack->bts_btentry = bknum;
|
||||
pbuf = _bt_getstackbuf(rel, stack);
|
||||
|
@ -166,8 +166,8 @@ _bt_search(Relation rel, BTScanInsert key, Buffer *bufP, int access,
|
||||
new_stack->bts_parent = stack_in;
|
||||
|
||||
/*
|
||||
* Page level 1 is lowest non-leaf page level prior to leaves. So,
|
||||
* if we're on the level 1 and asked to lock leaf page in write mode,
|
||||
* Page level 1 is lowest non-leaf page level prior to leaves. So, if
|
||||
* we're on the level 1 and asked to lock leaf page in write mode,
|
||||
* then lock next page in write mode, because it must be a leaf.
|
||||
*/
|
||||
if (opaque->btpo.level == 1 && access == BT_WRITE)
|
||||
@ -1235,7 +1235,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
|
||||
|
||||
/* Initialize remaining insertion scan key fields */
|
||||
inskey.heapkeyspace = _bt_heapkeyspace(rel);
|
||||
inskey.anynullkeys = false; /* unusued */
|
||||
inskey.anynullkeys = false; /* unused */
|
||||
inskey.nextkey = nextkey;
|
||||
inskey.pivotsearch = false;
|
||||
inskey.scantid = NULL;
|
||||
|
@ -962,10 +962,10 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
|
||||
* much smaller.
|
||||
*
|
||||
* Since the truncated tuple is often smaller than the original
|
||||
* tuple, it cannot just be copied in place (besides, we want
|
||||
* to actually save space on the leaf page). We delete the
|
||||
* original high key, and add our own truncated high key at the
|
||||
* same offset.
|
||||
* tuple, it cannot just be copied in place (besides, we want to
|
||||
* actually save space on the leaf page). We delete the original
|
||||
* high key, and add our own truncated high key at the same
|
||||
* offset.
|
||||
*
|
||||
* Note that the page layout won't be changed very much. oitup is
|
||||
* already located at the physical beginning of tuple space, so we
|
||||
|
@ -107,7 +107,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
|
||||
key = palloc(offsetof(BTScanInsertData, scankeys) +
|
||||
sizeof(ScanKeyData) * indnkeyatts);
|
||||
key->heapkeyspace = itup == NULL || _bt_heapkeyspace(rel);
|
||||
key->anynullkeys = false; /* initial assumption */
|
||||
key->anynullkeys = false; /* initial assumption */
|
||||
key->nextkey = false;
|
||||
key->pivotsearch = false;
|
||||
key->keysz = Min(indnkeyatts, tupnatts);
|
||||
|
@ -39,8 +39,8 @@ static int
|
||||
pairingheap_SpGistSearchItem_cmp(const pairingheap_node *a,
|
||||
const pairingheap_node *b, void *arg)
|
||||
{
|
||||
const SpGistSearchItem *sa = (const SpGistSearchItem *) a;
|
||||
const SpGistSearchItem *sb = (const SpGistSearchItem *) b;
|
||||
const SpGistSearchItem *sa = (const SpGistSearchItem *) a;
|
||||
const SpGistSearchItem *sb = (const SpGistSearchItem *) b;
|
||||
SpGistScanOpaque so = (SpGistScanOpaque) arg;
|
||||
int i;
|
||||
|
||||
@ -79,7 +79,7 @@ pairingheap_SpGistSearchItem_cmp(const pairingheap_node *a,
|
||||
}
|
||||
|
||||
static void
|
||||
spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem * item)
|
||||
spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem *item)
|
||||
{
|
||||
if (!so->state.attLeafType.attbyval &&
|
||||
DatumGetPointer(item->value) != NULL)
|
||||
@ -97,7 +97,7 @@ spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem * item)
|
||||
* Called in queue context
|
||||
*/
|
||||
static void
|
||||
spgAddSearchItemToQueue(SpGistScanOpaque so, SpGistSearchItem * item)
|
||||
spgAddSearchItemToQueue(SpGistScanOpaque so, SpGistSearchItem *item)
|
||||
{
|
||||
pairingheap_add(so->scanQueue, &item->phNode);
|
||||
}
|
||||
@ -439,7 +439,7 @@ spgNewHeapItem(SpGistScanOpaque so, int level, ItemPointer heapPtr,
|
||||
* the scan is not ordered AND the item satisfies the scankeys
|
||||
*/
|
||||
static bool
|
||||
spgLeafTest(SpGistScanOpaque so, SpGistSearchItem * item,
|
||||
spgLeafTest(SpGistScanOpaque so, SpGistSearchItem *item,
|
||||
SpGistLeafTuple leafTuple, bool isnull,
|
||||
bool *reportedSome, storeRes_func storeRes)
|
||||
{
|
||||
@ -530,7 +530,7 @@ spgLeafTest(SpGistScanOpaque so, SpGistSearchItem * item,
|
||||
static void
|
||||
spgInitInnerConsistentIn(spgInnerConsistentIn *in,
|
||||
SpGistScanOpaque so,
|
||||
SpGistSearchItem * item,
|
||||
SpGistSearchItem *item,
|
||||
SpGistInnerTuple innerTuple)
|
||||
{
|
||||
in->scankeys = so->keyData;
|
||||
@ -551,7 +551,7 @@ spgInitInnerConsistentIn(spgInnerConsistentIn *in,
|
||||
|
||||
static SpGistSearchItem *
|
||||
spgMakeInnerItem(SpGistScanOpaque so,
|
||||
SpGistSearchItem * parentItem,
|
||||
SpGistSearchItem *parentItem,
|
||||
SpGistNodeTuple tuple,
|
||||
spgInnerConsistentOut *out, int i, bool isnull,
|
||||
double *distances)
|
||||
@ -585,7 +585,7 @@ spgMakeInnerItem(SpGistScanOpaque so,
|
||||
}
|
||||
|
||||
static void
|
||||
spgInnerTest(SpGistScanOpaque so, SpGistSearchItem * item,
|
||||
spgInnerTest(SpGistScanOpaque so, SpGistSearchItem *item,
|
||||
SpGistInnerTuple innerTuple, bool isnull)
|
||||
{
|
||||
MemoryContext oldCxt = MemoryContextSwitchTo(so->tempCxt);
|
||||
@ -683,7 +683,7 @@ enum SpGistSpecialOffsetNumbers
|
||||
|
||||
static OffsetNumber
|
||||
spgTestLeafTuple(SpGistScanOpaque so,
|
||||
SpGistSearchItem * item,
|
||||
SpGistSearchItem *item,
|
||||
Page page, OffsetNumber offset,
|
||||
bool isnull, bool isroot,
|
||||
bool *reportedSome,
|
||||
|
@ -632,8 +632,8 @@ spg_text_leaf_consistent(PG_FUNCTION_ARGS)
|
||||
res = (level >= queryLen) ||
|
||||
DatumGetBool(DirectFunctionCall2Coll(text_starts_with,
|
||||
PG_GET_COLLATION(),
|
||||
out->leafValue,
|
||||
PointerGetDatum(query)));
|
||||
out->leafValue,
|
||||
PointerGetDatum(query)));
|
||||
|
||||
if (!res) /* no need to consider remaining conditions */
|
||||
break;
|
||||
|
@ -192,9 +192,9 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
|
||||
* happened since VACUUM started.
|
||||
*
|
||||
* Note: we could make a tighter test by seeing if the xid is
|
||||
* "running" according to the active snapshot; but snapmgr.c doesn't
|
||||
* currently export a suitable API, and it's not entirely clear
|
||||
* that a tighter test is worth the cycles anyway.
|
||||
* "running" according to the active snapshot; but snapmgr.c
|
||||
* doesn't currently export a suitable API, and it's not entirely
|
||||
* clear that a tighter test is worth the cycles anyway.
|
||||
*/
|
||||
if (TransactionIdFollowsOrEquals(dt->xid, bds->myXmin))
|
||||
spgAddPendingTID(bds, &dt->pointer);
|
||||
|
@ -94,7 +94,7 @@ TableScanDesc
|
||||
table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
|
||||
{
|
||||
uint32 flags = SO_TYPE_SEQSCAN |
|
||||
SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
|
||||
SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
|
||||
Oid relid = RelationGetRelid(relation);
|
||||
Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
|
||||
|
||||
@ -158,7 +158,7 @@ table_beginscan_parallel(Relation relation, ParallelTableScanDesc parallel_scan)
|
||||
{
|
||||
Snapshot snapshot;
|
||||
uint32 flags = SO_TYPE_SEQSCAN |
|
||||
SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
|
||||
SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
|
||||
|
||||
Assert(RelationGetRelid(relation) == parallel_scan->phs_relid);
|
||||
|
||||
@ -223,7 +223,7 @@ table_index_fetch_tuple_check(Relation rel,
|
||||
void
|
||||
table_get_latest_tid(TableScanDesc scan, ItemPointer tid)
|
||||
{
|
||||
Relation rel = scan->rs_rd;
|
||||
Relation rel = scan->rs_rd;
|
||||
const TableAmRoutine *tableam = rel->rd_tableam;
|
||||
|
||||
/*
|
||||
|
@ -570,9 +570,9 @@ AssignTransactionId(TransactionState s)
|
||||
|
||||
/*
|
||||
* Ensure parent(s) have XIDs, so that a child always has an XID later
|
||||
* than its parent. Mustn't recurse here, or we might get a stack overflow
|
||||
* if we're at the bottom of a huge stack of subtransactions none of which
|
||||
* have XIDs yet.
|
||||
* than its parent. Mustn't recurse here, or we might get a stack
|
||||
* overflow if we're at the bottom of a huge stack of subtransactions none
|
||||
* of which have XIDs yet.
|
||||
*/
|
||||
if (isSubXact && !FullTransactionIdIsValid(s->parent->fullTransactionId))
|
||||
{
|
||||
@ -2868,8 +2868,8 @@ StartTransactionCommand(void)
|
||||
* just skipping the reset in StartTransaction() won't work.)
|
||||
*/
|
||||
static int save_XactIsoLevel;
|
||||
static bool save_XactReadOnly;
|
||||
static bool save_XactDeferrable;
|
||||
static bool save_XactReadOnly;
|
||||
static bool save_XactDeferrable;
|
||||
|
||||
void
|
||||
SaveTransactionCharacteristics(void)
|
||||
@ -5193,7 +5193,7 @@ SerializeTransactionState(Size maxsize, char *start_address)
|
||||
nxids = add_size(nxids, s->nChildXids);
|
||||
}
|
||||
Assert(SerializedTransactionStateHeaderSize + nxids * sizeof(TransactionId)
|
||||
<= maxsize);
|
||||
<= maxsize);
|
||||
|
||||
/* Copy them to our scratch space. */
|
||||
workspace = palloc(nxids * sizeof(TransactionId));
|
||||
|
@ -6397,9 +6397,9 @@ StartupXLOG(void)
|
||||
ereport(FATAL,
|
||||
(errmsg("could not find redo location referenced by checkpoint record"),
|
||||
errhint("If you are restoring from a backup, touch \"%s/recovery.signal\" and add required recovery options.\n"
|
||||
"If you are not restoring from a backup, try removing the file \"%s/backup_label\".\n"
|
||||
"Be careful: removing \"%s/backup_label\" will result in a corrupt cluster if restoring from a backup.",
|
||||
DataDir, DataDir, DataDir)));
|
||||
"If you are not restoring from a backup, try removing the file \"%s/backup_label\".\n"
|
||||
"Be careful: removing \"%s/backup_label\" will result in a corrupt cluster if restoring from a backup.",
|
||||
DataDir, DataDir, DataDir)));
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -848,7 +848,7 @@ objectsInSchemaToOids(ObjectType objtype, List *nspnames)
|
||||
|
||||
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
|
||||
{
|
||||
Oid oid = ((Form_pg_proc) GETSTRUCT(tuple))->oid;
|
||||
Oid oid = ((Form_pg_proc) GETSTRUCT(tuple))->oid;
|
||||
|
||||
objects = lappend_oid(objects, oid);
|
||||
}
|
||||
@ -895,7 +895,7 @@ getRelationsInNamespace(Oid namespaceId, char relkind)
|
||||
|
||||
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
|
||||
{
|
||||
Oid oid = ((Form_pg_class) GETSTRUCT(tuple))->oid;
|
||||
Oid oid = ((Form_pg_class) GETSTRUCT(tuple))->oid;
|
||||
|
||||
relations = lappend_oid(relations, oid);
|
||||
}
|
||||
@ -1311,7 +1311,7 @@ SetDefaultACL(InternalDefaultACL *iacls)
|
||||
}
|
||||
else
|
||||
{
|
||||
Oid defAclOid;
|
||||
Oid defAclOid;
|
||||
|
||||
/* Prepare to insert or update pg_default_acl entry */
|
||||
MemSet(values, 0, sizeof(values));
|
||||
@ -1384,7 +1384,7 @@ SetDefaultACL(InternalDefaultACL *iacls)
|
||||
if (isNew)
|
||||
InvokeObjectPostCreateHook(DefaultAclRelationId, defAclOid, 0);
|
||||
else
|
||||
InvokeObjectPostAlterHook(DefaultAclRelationId, defAclOid, 0);
|
||||
InvokeObjectPostAlterHook(DefaultAclRelationId, defAclOid, 0);
|
||||
}
|
||||
|
||||
if (HeapTupleIsValid(tuple))
|
||||
|
@ -476,15 +476,15 @@ GetNewRelFileNode(Oid reltablespace, Relation pg_class, char relpersistence)
|
||||
Datum
|
||||
pg_nextoid(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid reloid = PG_GETARG_OID(0);
|
||||
Name attname = PG_GETARG_NAME(1);
|
||||
Oid idxoid = PG_GETARG_OID(2);
|
||||
Relation rel;
|
||||
Relation idx;
|
||||
HeapTuple atttuple;
|
||||
Oid reloid = PG_GETARG_OID(0);
|
||||
Name attname = PG_GETARG_NAME(1);
|
||||
Oid idxoid = PG_GETARG_OID(2);
|
||||
Relation rel;
|
||||
Relation idx;
|
||||
HeapTuple atttuple;
|
||||
Form_pg_attribute attform;
|
||||
AttrNumber attno;
|
||||
Oid newoid;
|
||||
AttrNumber attno;
|
||||
Oid newoid;
|
||||
|
||||
/*
|
||||
* As this function is not intended to be used during normal running, and
|
||||
|
@ -2550,8 +2550,8 @@ AddRelationNewConstraints(Relation rel,
|
||||
/*
|
||||
* If the expression is just a NULL constant, we do not bother to make
|
||||
* an explicit pg_attrdef entry, since the default behavior is
|
||||
* equivalent. This applies to column defaults, but not for generation
|
||||
* expressions.
|
||||
* equivalent. This applies to column defaults, but not for
|
||||
* generation expressions.
|
||||
*
|
||||
* Note a nonobvious property of this test: if the column is of a
|
||||
* domain type, what we'll get is not a bare null Const but a
|
||||
|
@ -1236,8 +1236,7 @@ index_concurrently_create_copy(Relation heapRelation, Oid oldIndexId, const char
|
||||
Anum_pg_class_reloptions, &isnull);
|
||||
|
||||
/*
|
||||
* Extract the list of column names to be used for the index
|
||||
* creation.
|
||||
* Extract the list of column names to be used for the index creation.
|
||||
*/
|
||||
for (int i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
|
||||
{
|
||||
@ -1270,8 +1269,8 @@ index_concurrently_create_copy(Relation heapRelation, Oid oldIndexId, const char
|
||||
optionDatum,
|
||||
INDEX_CREATE_SKIP_BUILD | INDEX_CREATE_CONCURRENT,
|
||||
0,
|
||||
true, /* allow table to be a system catalog? */
|
||||
false, /* is_internal? */
|
||||
true, /* allow table to be a system catalog? */
|
||||
false, /* is_internal? */
|
||||
NULL);
|
||||
|
||||
/* Close the relations used and clean up */
|
||||
@ -1540,7 +1539,7 @@ index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char *oldName)
|
||||
values, nulls, replaces);
|
||||
CatalogTupleUpdate(description, &tuple->t_self, tuple);
|
||||
|
||||
break; /* Assume there can be only one match */
|
||||
break; /* Assume there can be only one match */
|
||||
}
|
||||
|
||||
systable_endscan(sd);
|
||||
@ -1552,8 +1551,8 @@ index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char *oldName)
|
||||
*/
|
||||
if (get_rel_relispartition(oldIndexId))
|
||||
{
|
||||
List *ancestors = get_partition_ancestors(oldIndexId);
|
||||
Oid parentIndexRelid = linitial_oid(ancestors);
|
||||
List *ancestors = get_partition_ancestors(oldIndexId);
|
||||
Oid parentIndexRelid = linitial_oid(ancestors);
|
||||
|
||||
DeleteInheritsTuple(oldIndexId, parentIndexRelid);
|
||||
StoreSingleInheritance(newIndexId, parentIndexRelid, 1);
|
||||
@ -1583,7 +1582,11 @@ index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char *oldName)
|
||||
newClassRel->pgstat_info->t_counts.t_tuples_fetched = tabentry->tuples_fetched;
|
||||
newClassRel->pgstat_info->t_counts.t_blocks_fetched = tabentry->blocks_fetched;
|
||||
newClassRel->pgstat_info->t_counts.t_blocks_hit = tabentry->blocks_hit;
|
||||
/* The data will be sent by the next pgstat_report_stat() call. */
|
||||
|
||||
/*
|
||||
* The data will be sent by the next pgstat_report_stat()
|
||||
* call.
|
||||
*/
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1614,27 +1617,26 @@ index_concurrently_set_dead(Oid heapId, Oid indexId)
|
||||
Relation userIndexRelation;
|
||||
|
||||
/*
|
||||
* No more predicate locks will be acquired on this index, and we're
|
||||
* about to stop doing inserts into the index which could show
|
||||
* conflicts with existing predicate locks, so now is the time to move
|
||||
* them to the heap relation.
|
||||
* No more predicate locks will be acquired on this index, and we're about
|
||||
* to stop doing inserts into the index which could show conflicts with
|
||||
* existing predicate locks, so now is the time to move them to the heap
|
||||
* relation.
|
||||
*/
|
||||
userHeapRelation = table_open(heapId, ShareUpdateExclusiveLock);
|
||||
userIndexRelation = index_open(indexId, ShareUpdateExclusiveLock);
|
||||
TransferPredicateLocksToHeapRelation(userIndexRelation);
|
||||
|
||||
/*
|
||||
* Now we are sure that nobody uses the index for queries; they just
|
||||
* might have it open for updating it. So now we can unset indisready
|
||||
* and indislive, then wait till nobody could be using it at all
|
||||
* anymore.
|
||||
* Now we are sure that nobody uses the index for queries; they just might
|
||||
* have it open for updating it. So now we can unset indisready and
|
||||
* indislive, then wait till nobody could be using it at all anymore.
|
||||
*/
|
||||
index_set_state_flags(indexId, INDEX_DROP_SET_DEAD);
|
||||
|
||||
/*
|
||||
* Invalidate the relcache for the table, so that after this commit
|
||||
* all sessions will refresh the table's index list. Forgetting just
|
||||
* the index's relcache entry is not enough.
|
||||
* Invalidate the relcache for the table, so that after this commit all
|
||||
* sessions will refresh the table's index list. Forgetting just the
|
||||
* index's relcache entry is not enough.
|
||||
*/
|
||||
CacheInvalidateRelcache(userHeapRelation);
|
||||
|
||||
@ -1786,7 +1788,7 @@ index_constraint_create(Relation heapRelation,
|
||||
*/
|
||||
if (OidIsValid(parentConstraintId))
|
||||
{
|
||||
ObjectAddress referenced;
|
||||
ObjectAddress referenced;
|
||||
|
||||
ObjectAddressSet(referenced, ConstraintRelationId, parentConstraintId);
|
||||
recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
|
||||
@ -2709,7 +2711,7 @@ index_build(Relation heapRelation,
|
||||
PROGRESS_SCAN_BLOCKS_DONE,
|
||||
PROGRESS_SCAN_BLOCKS_TOTAL
|
||||
};
|
||||
const int64 val[] = {
|
||||
const int64 val[] = {
|
||||
PROGRESS_CREATEIDX_PHASE_BUILD,
|
||||
PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE,
|
||||
0, 0, 0, 0
|
||||
@ -3014,10 +3016,11 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
|
||||
PROGRESS_SCAN_BLOCKS_DONE,
|
||||
PROGRESS_SCAN_BLOCKS_TOTAL
|
||||
};
|
||||
const int64 val[] = {
|
||||
const int64 val[] = {
|
||||
PROGRESS_CREATEIDX_PHASE_VALIDATE_IDXSCAN,
|
||||
0, 0, 0, 0
|
||||
};
|
||||
|
||||
pgstat_progress_update_multi_param(5, index, val);
|
||||
}
|
||||
|
||||
@ -3080,7 +3083,7 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
|
||||
PROGRESS_SCAN_BLOCKS_DONE,
|
||||
PROGRESS_SCAN_BLOCKS_TOTAL
|
||||
};
|
||||
const int64 val[] = {
|
||||
const int64 val[] = {
|
||||
PROGRESS_CREATEIDX_PHASE_VALIDATE_SORT,
|
||||
0, 0
|
||||
};
|
||||
|
@ -3050,7 +3050,7 @@ getObjectDescription(const ObjectAddress *object)
|
||||
StringInfoData opfam;
|
||||
|
||||
amprocDesc = table_open(AccessMethodProcedureRelationId,
|
||||
AccessShareLock);
|
||||
AccessShareLock);
|
||||
|
||||
ScanKeyInit(&skey[0],
|
||||
Anum_pg_amproc_oid,
|
||||
|
@ -612,7 +612,7 @@ AggregateCreate(const char *aggName,
|
||||
|
||||
myself = ProcedureCreate(aggName,
|
||||
aggNamespace,
|
||||
replace, /* maybe replacement */
|
||||
replace, /* maybe replacement */
|
||||
false, /* doesn't return a set */
|
||||
finaltype, /* returnType */
|
||||
GetUserId(), /* proowner */
|
||||
@ -693,10 +693,9 @@ AggregateCreate(const char *aggName,
|
||||
|
||||
/*
|
||||
* If we're replacing an existing entry, we need to validate that
|
||||
* we're not changing anything that would break callers.
|
||||
* Specifically we must not change aggkind or aggnumdirectargs,
|
||||
* which affect how an aggregate call is treated in parse
|
||||
* analysis.
|
||||
* we're not changing anything that would break callers. Specifically
|
||||
* we must not change aggkind or aggnumdirectargs, which affect how an
|
||||
* aggregate call is treated in parse analysis.
|
||||
*/
|
||||
if (aggKind != oldagg->aggkind)
|
||||
ereport(ERROR,
|
||||
|
@ -423,7 +423,11 @@ ProcedureCreate(const char *procedureName,
|
||||
prokind == PROKIND_PROCEDURE
|
||||
? errmsg("cannot change whether a procedure has output parameters")
|
||||
: errmsg("cannot change return type of existing function"),
|
||||
/* translator: first %s is DROP FUNCTION, DROP PROCEDURE or DROP AGGREGATE */
|
||||
|
||||
/*
|
||||
* translator: first %s is DROP FUNCTION, DROP PROCEDURE or DROP
|
||||
* AGGREGATE
|
||||
*/
|
||||
errhint("Use %s %s first.",
|
||||
dropcmd,
|
||||
format_procedure(oldproc->oid))));
|
||||
@ -450,7 +454,7 @@ ProcedureCreate(const char *procedureName,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("cannot change return type of existing function"),
|
||||
errdetail("Row type defined by OUT parameters is different."),
|
||||
/* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
|
||||
/* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
|
||||
errhint("Use %s %s first.",
|
||||
dropcmd,
|
||||
format_procedure(oldproc->oid))));
|
||||
@ -495,7 +499,7 @@ ProcedureCreate(const char *procedureName,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("cannot change name of input parameter \"%s\"",
|
||||
old_arg_names[j]),
|
||||
/* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
|
||||
/* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
|
||||
errhint("Use %s %s first.",
|
||||
dropcmd,
|
||||
format_procedure(oldproc->oid))));
|
||||
@ -521,7 +525,7 @@ ProcedureCreate(const char *procedureName,
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("cannot remove parameter defaults from existing function"),
|
||||
/* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
|
||||
/* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
|
||||
errhint("Use %s %s first.",
|
||||
dropcmd,
|
||||
format_procedure(oldproc->oid))));
|
||||
@ -549,7 +553,7 @@ ProcedureCreate(const char *procedureName,
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||
errmsg("cannot change data type of existing parameter default value"),
|
||||
/* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
|
||||
/* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
|
||||
errhint("Use %s %s first.",
|
||||
dropcmd,
|
||||
format_procedure(oldproc->oid))));
|
||||
@ -575,7 +579,7 @@ ProcedureCreate(const char *procedureName,
|
||||
else
|
||||
{
|
||||
/* Creating a new procedure */
|
||||
Oid newOid;
|
||||
Oid newOid;
|
||||
|
||||
/* First, get default permissions and set up proacl */
|
||||
proacl = get_user_default_acl(OBJECT_FUNCTION, proowner,
|
||||
|
@ -317,7 +317,7 @@ GetAllTablesPublications(void)
|
||||
result = NIL;
|
||||
while (HeapTupleIsValid(tup = systable_getnext(scan)))
|
||||
{
|
||||
Oid oid = ((Form_pg_publication) GETSTRUCT(tup))->oid;
|
||||
Oid oid = ((Form_pg_publication) GETSTRUCT(tup))->oid;
|
||||
|
||||
result = lappend_oid(result, oid);
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ RelationCreateStorage(RelFileNode rnode, char relpersistence)
|
||||
break;
|
||||
default:
|
||||
elog(ERROR, "invalid relpersistence: %c", relpersistence);
|
||||
return NULL; /* placate compiler */
|
||||
return NULL; /* placate compiler */
|
||||
}
|
||||
|
||||
srel = smgropen(rnode, backend);
|
||||
|
@ -61,7 +61,7 @@ CreateAccessMethod(CreateAmStmt *stmt)
|
||||
errhint("Must be superuser to create an access method.")));
|
||||
|
||||
/* Check if name is used */
|
||||
amoid = GetSysCacheOid1(AMNAME, Anum_pg_am_oid,
|
||||
amoid = GetSysCacheOid1(AMNAME, Anum_pg_am_oid,
|
||||
CStringGetDatum(stmt->amname));
|
||||
if (OidIsValid(amoid))
|
||||
{
|
||||
|
@ -70,8 +70,8 @@ typedef struct
|
||||
|
||||
static void rebuild_relation(Relation OldHeap, Oid indexOid, bool verbose);
|
||||
static void copy_table_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
|
||||
bool verbose, bool *pSwapToastByContent,
|
||||
TransactionId *pFreezeXid, MultiXactId *pCutoffMulti);
|
||||
bool verbose, bool *pSwapToastByContent,
|
||||
TransactionId *pFreezeXid, MultiXactId *pCutoffMulti);
|
||||
static List *get_tables_to_cluster(MemoryContext cluster_context);
|
||||
|
||||
|
||||
@ -614,7 +614,7 @@ rebuild_relation(Relation OldHeap, Oid indexOid, bool verbose)
|
||||
|
||||
/* Copy the heap data into the new table in the desired order */
|
||||
copy_table_data(OIDNewHeap, tableOid, indexOid, verbose,
|
||||
&swap_toast_by_content, &frozenXid, &cutoffMulti);
|
||||
&swap_toast_by_content, &frozenXid, &cutoffMulti);
|
||||
|
||||
/*
|
||||
* Swap the physical files of the target and transient tables, then
|
||||
|
@ -83,7 +83,7 @@ unique_key_recheck(PG_FUNCTION_ARGS)
|
||||
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
|
||||
errmsg("function \"%s\" must be fired for INSERT or UPDATE",
|
||||
funcname)));
|
||||
ItemPointerSetInvalid(&checktid); /* keep compiler quiet */
|
||||
ItemPointerSetInvalid(&checktid); /* keep compiler quiet */
|
||||
}
|
||||
|
||||
slot = table_slot_create(trigdata->tg_relation, NULL);
|
||||
@ -109,7 +109,7 @@ unique_key_recheck(PG_FUNCTION_ARGS)
|
||||
tmptid = checktid;
|
||||
{
|
||||
IndexFetchTableData *scan = table_index_fetch_begin(trigdata->tg_relation);
|
||||
bool call_again = false;
|
||||
bool call_again = false;
|
||||
|
||||
if (!table_index_fetch_tuple(scan, &tmptid, SnapshotSelf, slot,
|
||||
&call_again, NULL))
|
||||
|
@ -2033,7 +2033,7 @@ get_database_oid(const char *dbname, bool missing_ok)
|
||||
|
||||
/* We assume that there can be at most one matching tuple */
|
||||
if (HeapTupleIsValid(dbtuple))
|
||||
oid = ((Form_pg_database)GETSTRUCT(dbtuple))->oid;
|
||||
oid = ((Form_pg_database) GETSTRUCT(dbtuple))->oid;
|
||||
else
|
||||
oid = InvalidOid;
|
||||
|
||||
|
@ -606,7 +606,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
|
||||
static void
|
||||
ExplainPrintSettings(ExplainState *es)
|
||||
{
|
||||
int num;
|
||||
int num;
|
||||
struct config_generic **gucs;
|
||||
|
||||
/* bail out if information about settings not requested */
|
||||
@ -622,13 +622,13 @@ ExplainPrintSettings(ExplainState *es)
|
||||
|
||||
if (es->format != EXPLAIN_FORMAT_TEXT)
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
ExplainOpenGroup("Settings", "Settings", true, es);
|
||||
|
||||
for (i = 0; i < num; i++)
|
||||
{
|
||||
char *setting;
|
||||
char *setting;
|
||||
struct config_generic *conf = gucs[i];
|
||||
|
||||
setting = GetConfigOptionByName(conf->name, NULL, true);
|
||||
@ -640,14 +640,14 @@ ExplainPrintSettings(ExplainState *es)
|
||||
}
|
||||
else
|
||||
{
|
||||
int i;
|
||||
StringInfoData str;
|
||||
int i;
|
||||
StringInfoData str;
|
||||
|
||||
initStringInfo(&str);
|
||||
|
||||
for (i = 0; i < num; i++)
|
||||
{
|
||||
char *setting;
|
||||
char *setting;
|
||||
struct config_generic *conf = gucs[i];
|
||||
|
||||
if (i > 0)
|
||||
@ -705,8 +705,8 @@ ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc)
|
||||
ExplainNode(ps, NIL, NULL, NULL, es);
|
||||
|
||||
/*
|
||||
* If requested, include information about GUC parameters with values
|
||||
* that don't match the built-in defaults.
|
||||
* If requested, include information about GUC parameters with values that
|
||||
* don't match the built-in defaults.
|
||||
*/
|
||||
ExplainPrintSettings(es);
|
||||
}
|
||||
@ -1674,7 +1674,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
|
||||
if (es->costs && es->verbose &&
|
||||
outerPlanState(planstate)->worker_jit_instrument)
|
||||
{
|
||||
PlanState *child = outerPlanState(planstate);
|
||||
PlanState *child = outerPlanState(planstate);
|
||||
int n;
|
||||
SharedJitInstrumentation *w = child->worker_jit_instrument;
|
||||
|
||||
|
@ -903,9 +903,9 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
|
||||
|
||||
t_sql = DirectFunctionCall3Coll(replace_text,
|
||||
C_COLLATION_OID,
|
||||
t_sql,
|
||||
CStringGetTextDatum("@extschema@"),
|
||||
CStringGetTextDatum(qSchemaName));
|
||||
t_sql,
|
||||
CStringGetTextDatum("@extschema@"),
|
||||
CStringGetTextDatum(qSchemaName));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -916,9 +916,9 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
|
||||
{
|
||||
t_sql = DirectFunctionCall3Coll(replace_text,
|
||||
C_COLLATION_OID,
|
||||
t_sql,
|
||||
CStringGetTextDatum("MODULE_PATHNAME"),
|
||||
CStringGetTextDatum(control->module_pathname));
|
||||
t_sql,
|
||||
CStringGetTextDatum("MODULE_PATHNAME"),
|
||||
CStringGetTextDatum(control->module_pathname));
|
||||
}
|
||||
|
||||
/* And now back to C string */
|
||||
|
@ -96,8 +96,8 @@ static void update_relispartition(Oid relationId, bool newval);
|
||||
*/
|
||||
struct ReindexIndexCallbackState
|
||||
{
|
||||
bool concurrent; /* flag from statement */
|
||||
Oid locked_table_oid; /* tracks previously locked table */
|
||||
bool concurrent; /* flag from statement */
|
||||
Oid locked_table_oid; /* tracks previously locked table */
|
||||
};
|
||||
|
||||
/*
|
||||
@ -396,7 +396,7 @@ WaitForOlderSnapshots(TransactionId limitXmin, bool progress)
|
||||
{
|
||||
if (progress)
|
||||
{
|
||||
PGPROC *holder = BackendIdGetProc(old_snapshots[i].backendId);
|
||||
PGPROC *holder = BackendIdGetProc(old_snapshots[i].backendId);
|
||||
|
||||
pgstat_progress_update_param(PROGRESS_WAITFOR_CURRENT_PID,
|
||||
holder->pid);
|
||||
@ -984,7 +984,7 @@ DefineIndex(Oid relationId,
|
||||
*/
|
||||
if (partitioned && stmt->relation && !stmt->relation->inh)
|
||||
{
|
||||
PartitionDesc pd = RelationGetPartitionDesc(rel);
|
||||
PartitionDesc pd = RelationGetPartitionDesc(rel);
|
||||
|
||||
if (pd->nparts != 0)
|
||||
flags |= INDEX_CREATE_INVALID;
|
||||
@ -3003,7 +3003,7 @@ ReindexRelationConcurrently(Oid relationOid, int options)
|
||||
/* Get a session-level lock on each table. */
|
||||
foreach(lc, relationLocks)
|
||||
{
|
||||
LockRelId *lockrelid = (LockRelId *) lfirst(lc);
|
||||
LockRelId *lockrelid = (LockRelId *) lfirst(lc);
|
||||
|
||||
LockRelationIdForSession(lockrelid, ShareUpdateExclusiveLock);
|
||||
}
|
||||
@ -3112,8 +3112,8 @@ ReindexRelationConcurrently(Oid relationOid, int options)
|
||||
|
||||
/*
|
||||
* The index is now valid in the sense that it contains all currently
|
||||
* interesting tuples. But since it might not contain tuples deleted just
|
||||
* before the reference snap was taken, we have to wait out any
|
||||
* interesting tuples. But since it might not contain tuples deleted
|
||||
* just before the reference snap was taken, we have to wait out any
|
||||
* transactions that might have older snapshots.
|
||||
*/
|
||||
pgstat_progress_update_param(PROGRESS_CREATEIDX_PHASE,
|
||||
@ -3250,7 +3250,7 @@ ReindexRelationConcurrently(Oid relationOid, int options)
|
||||
*/
|
||||
foreach(lc, relationLocks)
|
||||
{
|
||||
LockRelId *lockrelid = (LockRelId *) lfirst(lc);
|
||||
LockRelId *lockrelid = (LockRelId *) lfirst(lc);
|
||||
|
||||
UnlockRelationIdForSession(lockrelid, ShareUpdateExclusiveLock);
|
||||
}
|
||||
|
@ -465,9 +465,9 @@ UpdateStatisticsForTypeChange(Oid statsOid, Oid relationOid, int attnum,
|
||||
elog(ERROR, "cache lookup failed for statistics object %u", statsOid);
|
||||
|
||||
/*
|
||||
* When none of the defined statistics types contain datum values
|
||||
* from the table's columns then there's no need to reset the stats.
|
||||
* Functional dependencies and ndistinct stats should still hold true.
|
||||
* When none of the defined statistics types contain datum values from the
|
||||
* table's columns then there's no need to reset the stats. Functional
|
||||
* dependencies and ndistinct stats should still hold true.
|
||||
*/
|
||||
if (!statext_is_kind_built(oldtup, STATS_EXT_MCV))
|
||||
{
|
||||
|
@ -379,7 +379,7 @@ static void ATExecCheckNotNull(AlteredTableInfo *tab, Relation rel,
|
||||
const char *colName, LOCKMODE lockmode);
|
||||
static bool NotNullImpliedByRelConstraints(Relation rel, Form_pg_attribute attr);
|
||||
static bool ConstraintImpliedByRelConstraint(Relation scanrel,
|
||||
List *partConstraint, List *existedConstraints);
|
||||
List *partConstraint, List *existedConstraints);
|
||||
static ObjectAddress ATExecColumnDefault(Relation rel, const char *colName,
|
||||
Node *newDefault, LOCKMODE lockmode);
|
||||
static ObjectAddress ATExecAddIdentity(Relation rel, const char *colName,
|
||||
@ -1099,9 +1099,9 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
|
||||
}
|
||||
|
||||
/*
|
||||
* Now add any newly specified CHECK constraints to the new relation.
|
||||
* Same as for defaults above, but these need to come after partitioning
|
||||
* is set up.
|
||||
* Now add any newly specified CHECK constraints to the new relation. Same
|
||||
* as for defaults above, but these need to come after partitioning is set
|
||||
* up.
|
||||
*/
|
||||
if (stmt->constraints)
|
||||
AddRelationNewConstraints(rel, NIL, stmt->constraints,
|
||||
@ -1401,9 +1401,9 @@ RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid, Oid oldRelOid,
|
||||
*/
|
||||
if (IsSystemClass(relOid, classform) && relkind == RELKIND_INDEX)
|
||||
{
|
||||
HeapTuple locTuple;
|
||||
Form_pg_index indexform;
|
||||
bool indisvalid;
|
||||
HeapTuple locTuple;
|
||||
Form_pg_index indexform;
|
||||
bool indisvalid;
|
||||
|
||||
locTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(relOid));
|
||||
if (!HeapTupleIsValid(locTuple))
|
||||
@ -1786,6 +1786,7 @@ ExecuteTruncateGuts(List *explicit_rels, List *relids, List *relids_logged,
|
||||
{
|
||||
Relation toastrel = relation_open(toast_relid,
|
||||
AccessExclusiveLock);
|
||||
|
||||
RelationSetNewRelfilenode(toastrel,
|
||||
toastrel->rd_rel->relpersistence);
|
||||
table_close(toastrel, NoLock);
|
||||
@ -4336,6 +4337,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
|
||||
/* nothing to do here, oid columns don't exist anymore */
|
||||
break;
|
||||
case AT_SetTableSpace: /* SET TABLESPACE */
|
||||
|
||||
/*
|
||||
* Only do this for partitioned tables and indexes, for which this
|
||||
* is just a catalog change. Other relation types which have
|
||||
@ -4626,8 +4628,8 @@ ATRewriteTables(AlterTableStmt *parsetree, List **wqueue, LOCKMODE lockmode)
|
||||
{
|
||||
/*
|
||||
* If required, test the current data within the table against new
|
||||
* constraints generated by ALTER TABLE commands, but don't rebuild
|
||||
* data.
|
||||
* constraints generated by ALTER TABLE commands, but don't
|
||||
* rebuild data.
|
||||
*/
|
||||
if (tab->constraints != NIL || tab->verify_new_notnull ||
|
||||
tab->partition_constraint != NULL)
|
||||
@ -4798,8 +4800,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
|
||||
{
|
||||
/*
|
||||
* If we are rebuilding the tuples OR if we added any new but not
|
||||
* verified NOT NULL constraints, check all not-null constraints.
|
||||
* This is a bit of overkill but it minimizes risk of bugs, and
|
||||
* verified NOT NULL constraints, check all not-null constraints. This
|
||||
* is a bit of overkill but it minimizes risk of bugs, and
|
||||
* heap_attisnull is a pretty cheap test anyway.
|
||||
*/
|
||||
for (i = 0; i < newTupDesc->natts; i++)
|
||||
@ -4941,8 +4943,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
|
||||
{
|
||||
/*
|
||||
* If there's no rewrite, old and new table are guaranteed to
|
||||
* have the same AM, so we can just use the old slot to
|
||||
* verify new constraints etc.
|
||||
* have the same AM, so we can just use the old slot to verify
|
||||
* new constraints etc.
|
||||
*/
|
||||
insertslot = oldslot;
|
||||
}
|
||||
@ -6209,9 +6211,8 @@ ATExecSetNotNull(AlteredTableInfo *tab, Relation rel,
|
||||
/*
|
||||
* Ordinarily phase 3 must ensure that no NULLs exist in columns that
|
||||
* are set NOT NULL; however, if we can find a constraint which proves
|
||||
* this then we can skip that. We needn't bother looking if
|
||||
* we've already found that we must verify some other NOT NULL
|
||||
* constraint.
|
||||
* this then we can skip that. We needn't bother looking if we've
|
||||
* already found that we must verify some other NOT NULL constraint.
|
||||
*/
|
||||
if (!tab->verify_new_notnull &&
|
||||
!NotNullImpliedByRelConstraints(rel, (Form_pg_attribute) GETSTRUCT(tuple)))
|
||||
@ -10503,7 +10504,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
|
||||
*/
|
||||
if (tab->rewrite)
|
||||
{
|
||||
Relation newrel;
|
||||
Relation newrel;
|
||||
|
||||
newrel = table_open(RelationGetRelid(rel), NoLock);
|
||||
RelationClearMissing(newrel);
|
||||
@ -10657,8 +10658,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
|
||||
{
|
||||
/*
|
||||
* Changing the type of a column that is used by a
|
||||
* generated column is not allowed by SQL standard.
|
||||
* It might be doable with some thinking and effort.
|
||||
* generated column is not allowed by SQL standard. It
|
||||
* might be doable with some thinking and effort.
|
||||
*/
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_SYNTAX_ERROR),
|
||||
@ -10862,13 +10863,13 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
|
||||
|
||||
/*
|
||||
* Here we go --- change the recorded column type and collation. (Note
|
||||
* heapTup is a copy of the syscache entry, so okay to scribble on.)
|
||||
* First fix up the missing value if any.
|
||||
* heapTup is a copy of the syscache entry, so okay to scribble on.) First
|
||||
* fix up the missing value if any.
|
||||
*/
|
||||
if (attTup->atthasmissing)
|
||||
{
|
||||
Datum missingval;
|
||||
bool missingNull;
|
||||
Datum missingval;
|
||||
bool missingNull;
|
||||
|
||||
/* if rewrite is true the missing value should already be cleared */
|
||||
Assert(tab->rewrite == 0);
|
||||
@ -10881,7 +10882,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
|
||||
|
||||
/* if it's a null array there is nothing to do */
|
||||
|
||||
if (! missingNull)
|
||||
if (!missingNull)
|
||||
{
|
||||
/*
|
||||
* Get the datum out of the array and repack it in a new array
|
||||
@ -10890,12 +10891,12 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
|
||||
* changed, only the array metadata.
|
||||
*/
|
||||
|
||||
int one = 1;
|
||||
bool isNull;
|
||||
Datum valuesAtt[Natts_pg_attribute];
|
||||
bool nullsAtt[Natts_pg_attribute];
|
||||
bool replacesAtt[Natts_pg_attribute];
|
||||
HeapTuple newTup;
|
||||
int one = 1;
|
||||
bool isNull;
|
||||
Datum valuesAtt[Natts_pg_attribute];
|
||||
bool nullsAtt[Natts_pg_attribute];
|
||||
bool replacesAtt[Natts_pg_attribute];
|
||||
HeapTuple newTup;
|
||||
|
||||
MemSet(valuesAtt, 0, sizeof(valuesAtt));
|
||||
MemSet(nullsAtt, false, sizeof(nullsAtt));
|
||||
@ -10910,12 +10911,12 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
|
||||
attTup->attalign,
|
||||
&isNull);
|
||||
missingval = PointerGetDatum(
|
||||
construct_array(&missingval,
|
||||
1,
|
||||
targettype,
|
||||
tform->typlen,
|
||||
tform->typbyval,
|
||||
tform->typalign));
|
||||
construct_array(&missingval,
|
||||
1,
|
||||
targettype,
|
||||
tform->typlen,
|
||||
tform->typbyval,
|
||||
tform->typalign));
|
||||
|
||||
valuesAtt[Anum_pg_attribute_attmissingval - 1] = missingval;
|
||||
replacesAtt[Anum_pg_attribute_attmissingval - 1] = true;
|
||||
@ -12311,16 +12312,16 @@ ATExecSetTableSpaceNoStorage(Relation rel, Oid newTableSpace)
|
||||
Oid reloid = RelationGetRelid(rel);
|
||||
|
||||
/*
|
||||
* Shouldn't be called on relations having storage; these are processed
|
||||
* in phase 3.
|
||||
* Shouldn't be called on relations having storage; these are processed in
|
||||
* phase 3.
|
||||
*/
|
||||
Assert(!RELKIND_HAS_STORAGE(rel->rd_rel->relkind));
|
||||
|
||||
/* Can't allow a non-shared relation in pg_global */
|
||||
if (newTableSpace == GLOBALTABLESPACE_OID)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("only shared relations can be placed in pg_global tablespace")));
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("only shared relations can be placed in pg_global tablespace")));
|
||||
|
||||
/*
|
||||
* No work if no change in tablespace.
|
||||
@ -15044,7 +15045,7 @@ ComputePartitionAttrs(ParseState *pstate, Relation rel, List *partParams, AttrNu
|
||||
i = -1;
|
||||
while ((i = bms_next_member(expr_attrs, i)) >= 0)
|
||||
{
|
||||
AttrNumber attno = i + FirstLowInvalidHeapAttributeNumber;
|
||||
AttrNumber attno = i + FirstLowInvalidHeapAttributeNumber;
|
||||
|
||||
if (TupleDescAttr(RelationGetDescr(rel), attno - 1)->attgenerated)
|
||||
ereport(ERROR,
|
||||
@ -15202,7 +15203,7 @@ PartConstraintImpliedByRelConstraint(Relation scanrel,
|
||||
bool
|
||||
ConstraintImpliedByRelConstraint(Relation scanrel, List *testConstraint, List *provenConstraint)
|
||||
{
|
||||
List *existConstraint = list_copy(provenConstraint);
|
||||
List *existConstraint = list_copy(provenConstraint);
|
||||
TupleConstr *constr = RelationGetDescr(scanrel)->constr;
|
||||
int num_check,
|
||||
i;
|
||||
@ -15240,8 +15241,8 @@ ConstraintImpliedByRelConstraint(Relation scanrel, List *testConstraint, List *p
|
||||
* not-false and try to prove the same for testConstraint.
|
||||
*
|
||||
* Note that predicate_implied_by assumes its first argument is known
|
||||
* immutable. That should always be true for both NOT NULL and
|
||||
* partition constraints, so we don't test it here.
|
||||
* immutable. That should always be true for both NOT NULL and partition
|
||||
* constraints, so we don't test it here.
|
||||
*/
|
||||
return predicate_implied_by(testConstraint, existConstraint, true);
|
||||
}
|
||||
|
@ -1143,9 +1143,9 @@ GetDefaultTablespace(char relpersistence, bool partitioned)
|
||||
|
||||
/*
|
||||
* Allow explicit specification of database's default tablespace in
|
||||
* default_tablespace without triggering permissions checks. Don't
|
||||
* allow specifying that when creating a partitioned table, however,
|
||||
* since the result is confusing.
|
||||
* default_tablespace without triggering permissions checks. Don't allow
|
||||
* specifying that when creating a partitioned table, however, since the
|
||||
* result is confusing.
|
||||
*/
|
||||
if (result == MyDatabaseTableSpace)
|
||||
{
|
||||
|
@ -4245,9 +4245,9 @@ AfterTriggerExecute(EState *estate,
|
||||
case AFTER_TRIGGER_FDW_REUSE:
|
||||
|
||||
/*
|
||||
* Store tuple in the slot so that tg_trigtuple does not
|
||||
* reference tuplestore memory. (It is formally possible for the
|
||||
* trigger function to queue trigger events that add to the same
|
||||
* Store tuple in the slot so that tg_trigtuple does not reference
|
||||
* tuplestore memory. (It is formally possible for the trigger
|
||||
* function to queue trigger events that add to the same
|
||||
* tuplestore, which can push other tuples out of memory.) The
|
||||
* distinction is academic, because we start with a minimal tuple
|
||||
* that is stored as a heap tuple, constructed in different memory
|
||||
|
@ -88,13 +88,13 @@ void
|
||||
ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel)
|
||||
{
|
||||
VacuumParams params;
|
||||
bool verbose = false;
|
||||
bool skip_locked = false;
|
||||
bool analyze = false;
|
||||
bool freeze = false;
|
||||
bool full = false;
|
||||
bool disable_page_skipping = false;
|
||||
ListCell *lc;
|
||||
bool verbose = false;
|
||||
bool skip_locked = false;
|
||||
bool analyze = false;
|
||||
bool freeze = false;
|
||||
bool full = false;
|
||||
bool disable_page_skipping = false;
|
||||
ListCell *lc;
|
||||
|
||||
/* Set default value */
|
||||
params.index_cleanup = VACOPT_TERNARY_DEFAULT;
|
||||
@ -103,7 +103,7 @@ ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel)
|
||||
/* Parse options list */
|
||||
foreach(lc, vacstmt->options)
|
||||
{
|
||||
DefElem *opt = (DefElem *) lfirst(lc);
|
||||
DefElem *opt = (DefElem *) lfirst(lc);
|
||||
|
||||
/* Parse common options for VACUUM and ANALYZE */
|
||||
if (strcmp(opt->defname, "verbose") == 0)
|
||||
@ -593,8 +593,9 @@ vacuum_open_relation(Oid relid, RangeVar *relation, int options,
|
||||
/*
|
||||
* Determine the log level.
|
||||
*
|
||||
* For manual VACUUM or ANALYZE, we emit a WARNING to match the log statements
|
||||
* in the permission checks; otherwise, only log if the caller so requested.
|
||||
* For manual VACUUM or ANALYZE, we emit a WARNING to match the log
|
||||
* statements in the permission checks; otherwise, only log if the caller
|
||||
* so requested.
|
||||
*/
|
||||
if (!IsAutoVacuumWorkerProcess())
|
||||
elevel = WARNING;
|
||||
@ -1328,9 +1329,9 @@ vac_update_datfrozenxid(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Some table AMs might not need per-relation xid / multixid
|
||||
* horizons. It therefore seems reasonable to allow relfrozenxid and
|
||||
* relminmxid to not be set (i.e. set to their respective Invalid*Id)
|
||||
* Some table AMs might not need per-relation xid / multixid horizons.
|
||||
* It therefore seems reasonable to allow relfrozenxid and relminmxid
|
||||
* to not be set (i.e. set to their respective Invalid*Id)
|
||||
* independently. Thus validate and compute horizon for each only if
|
||||
* set.
|
||||
*
|
||||
|
@ -2367,10 +2367,10 @@ get_last_attnums_walker(Node *node, LastAttnumInfo *info)
|
||||
static void
|
||||
ExecComputeSlotInfo(ExprState *state, ExprEvalStep *op)
|
||||
{
|
||||
PlanState *parent = state->parent;
|
||||
PlanState *parent = state->parent;
|
||||
TupleDesc desc = NULL;
|
||||
const TupleTableSlotOps *tts_ops = NULL;
|
||||
bool isfixed = false;
|
||||
bool isfixed = false;
|
||||
|
||||
if (op->d.fetch.known_desc != NULL)
|
||||
{
|
||||
@ -3313,7 +3313,7 @@ ExecBuildAggTransCall(ExprState *state, AggState *aggstate,
|
||||
*/
|
||||
ExprState *
|
||||
ExecBuildGroupingEqual(TupleDesc ldesc, TupleDesc rdesc,
|
||||
const TupleTableSlotOps * lops, const TupleTableSlotOps * rops,
|
||||
const TupleTableSlotOps *lops, const TupleTableSlotOps *rops,
|
||||
int numCols,
|
||||
const AttrNumber *keyColIdx,
|
||||
const Oid *eqfunctions,
|
||||
|
@ -4038,7 +4038,7 @@ void
|
||||
ExecEvalSysVar(ExprState *state, ExprEvalStep *op, ExprContext *econtext,
|
||||
TupleTableSlot *slot)
|
||||
{
|
||||
Datum d;
|
||||
Datum d;
|
||||
|
||||
/* slot_getsysattr has sufficient defenses against bad attnums */
|
||||
d = slot_getsysattr(slot,
|
||||
|
@ -2551,7 +2551,7 @@ EvalPlanQualSlot(EPQState *epqstate,
|
||||
|
||||
if (relation)
|
||||
*slot = table_slot_create(relation,
|
||||
&epqstate->estate->es_tupleTable);
|
||||
&epqstate->estate->es_tupleTable);
|
||||
else
|
||||
*slot = ExecAllocTableSlot(&epqstate->estate->es_tupleTable,
|
||||
epqstate->origslot->tts_tupleDescriptor,
|
||||
|
@ -1058,7 +1058,7 @@ ExecParallelRetrieveJitInstrumentation(PlanState *planstate,
|
||||
* instrumentation in per-query context.
|
||||
*/
|
||||
ibytes = offsetof(SharedJitInstrumentation, jit_instr)
|
||||
+ mul_size(shared_jit->num_workers, sizeof(JitInstrumentation));
|
||||
+ mul_size(shared_jit->num_workers, sizeof(JitInstrumentation));
|
||||
planstate->worker_jit_instrument =
|
||||
MemoryContextAlloc(planstate->state->es_query_cxt, ibytes);
|
||||
|
||||
@ -1133,7 +1133,7 @@ ExecParallelCleanup(ParallelExecutorInfo *pei)
|
||||
/* Accumulate JIT instrumentation, if any. */
|
||||
if (pei->jit_instrumentation)
|
||||
ExecParallelRetrieveJitInstrumentation(pei->planstate,
|
||||
pei->jit_instrumentation);
|
||||
pei->jit_instrumentation);
|
||||
|
||||
/* Free any serialized parameters. */
|
||||
if (DsaPointerIsValid(pei->param_exec))
|
||||
|
@ -145,12 +145,12 @@ typedef struct PartitionDispatchData
|
||||
TupleTableSlot *tupslot;
|
||||
AttrNumber *tupmap;
|
||||
int indexes[FLEXIBLE_ARRAY_MEMBER];
|
||||
} PartitionDispatchData;
|
||||
} PartitionDispatchData;
|
||||
|
||||
/* struct to hold result relations coming from UPDATE subplans */
|
||||
typedef struct SubplanResultRelHashElem
|
||||
{
|
||||
Oid relid; /* hash key -- must be first */
|
||||
Oid relid; /* hash key -- must be first */
|
||||
ResultRelInfo *rri;
|
||||
} SubplanResultRelHashElem;
|
||||
|
||||
@ -375,7 +375,7 @@ ExecFindPartition(ModifyTableState *mtstate,
|
||||
if (proute->subplan_resultrel_htab)
|
||||
{
|
||||
Oid partoid = partdesc->oids[partidx];
|
||||
SubplanResultRelHashElem *elem;
|
||||
SubplanResultRelHashElem *elem;
|
||||
|
||||
elem = hash_search(proute->subplan_resultrel_htab,
|
||||
&partoid, HASH_FIND, NULL);
|
||||
@ -474,7 +474,7 @@ ExecHashSubPlanResultRelsByOid(ModifyTableState *mtstate,
|
||||
ResultRelInfo *rri = &mtstate->resultRelInfo[i];
|
||||
bool found;
|
||||
Oid partoid = RelationGetRelid(rri->ri_RelationDesc);
|
||||
SubplanResultRelHashElem *elem;
|
||||
SubplanResultRelHashElem *elem;
|
||||
|
||||
elem = (SubplanResultRelHashElem *)
|
||||
hash_search(htab, &partoid, HASH_ENTER, &found);
|
||||
@ -762,9 +762,9 @@ ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate,
|
||||
* It's safe to reuse these from the partition root, as we
|
||||
* only process one tuple at a time (therefore we won't
|
||||
* overwrite needed data in slots), and the results of
|
||||
* projections are independent of the underlying
|
||||
* storage. Projections and where clauses themselves don't
|
||||
* store state / are independent of the underlying storage.
|
||||
* projections are independent of the underlying storage.
|
||||
* Projections and where clauses themselves don't store state
|
||||
* / are independent of the underlying storage.
|
||||
*/
|
||||
leaf_part_rri->ri_onConflict->oc_ProjSlot =
|
||||
rootResultRelInfo->ri_onConflict->oc_ProjSlot;
|
||||
@ -892,7 +892,7 @@ ExecInitRoutingInfo(ModifyTableState *mtstate,
|
||||
{
|
||||
MemoryContext oldcxt;
|
||||
PartitionRoutingInfo *partrouteinfo;
|
||||
int rri_index;
|
||||
int rri_index;
|
||||
|
||||
oldcxt = MemoryContextSwitchTo(proute->memcxt);
|
||||
|
||||
@ -1668,16 +1668,16 @@ ExecCreatePartitionPruneState(PlanState *planstate,
|
||||
}
|
||||
else
|
||||
{
|
||||
int pd_idx = 0;
|
||||
int pp_idx;
|
||||
int pd_idx = 0;
|
||||
int pp_idx;
|
||||
|
||||
/*
|
||||
* Some new partitions have appeared since plan time, and
|
||||
* those are reflected in our PartitionDesc but were not
|
||||
* present in the one used to construct subplan_map and
|
||||
* subpart_map. So we must construct new and longer arrays
|
||||
* where the partitions that were originally present map to the
|
||||
* same place, and any added indexes map to -1, as if the
|
||||
* where the partitions that were originally present map to
|
||||
* the same place, and any added indexes map to -1, as if the
|
||||
* new partitions had been pruned.
|
||||
*/
|
||||
pprune->subpart_map = palloc(sizeof(int) * partdesc->nparts);
|
||||
|
@ -227,7 +227,7 @@ retry:
|
||||
static bool
|
||||
tuples_equal(TupleTableSlot *slot1, TupleTableSlot *slot2)
|
||||
{
|
||||
int attrnum;
|
||||
int attrnum;
|
||||
|
||||
Assert(slot1->tts_tupleDescriptor->natts ==
|
||||
slot2->tts_tupleDescriptor->natts);
|
||||
@ -265,8 +265,8 @@ tuples_equal(TupleTableSlot *slot1, TupleTableSlot *slot2)
|
||||
|
||||
if (!DatumGetBool(FunctionCall2Coll(&typentry->eq_opr_finfo,
|
||||
att->attcollation,
|
||||
slot1->tts_values[attrnum],
|
||||
slot2->tts_values[attrnum])))
|
||||
slot1->tts_values[attrnum],
|
||||
slot2->tts_values[attrnum])))
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -406,7 +406,7 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot)
|
||||
resultRelInfo->ri_TrigDesc->trig_insert_before_row)
|
||||
{
|
||||
if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
|
||||
skip_tuple = true; /* "do nothing" */
|
||||
skip_tuple = true; /* "do nothing" */
|
||||
}
|
||||
|
||||
if (!skip_tuple)
|
||||
@ -471,7 +471,7 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
|
||||
{
|
||||
if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
|
||||
tid, NULL, slot))
|
||||
skip_tuple = true; /* "do nothing" */
|
||||
skip_tuple = true; /* "do nothing" */
|
||||
}
|
||||
|
||||
if (!skip_tuple)
|
||||
@ -490,7 +490,7 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
|
||||
if (resultRelInfo->ri_PartitionCheck)
|
||||
ExecPartitionCheck(resultRelInfo, slot, estate, true);
|
||||
|
||||
simple_table_update(rel, tid, slot,estate->es_snapshot,
|
||||
simple_table_update(rel, tid, slot, estate->es_snapshot,
|
||||
&update_indexes);
|
||||
|
||||
if (resultRelInfo->ri_NumIndices > 0 && update_indexes)
|
||||
@ -591,8 +591,8 @@ CheckSubscriptionRelkind(char relkind, const char *nspname,
|
||||
const char *relname)
|
||||
{
|
||||
/*
|
||||
* We currently only support writing to regular tables. However, give
|
||||
* a more specific error for partitioned and foreign tables.
|
||||
* We currently only support writing to regular tables. However, give a
|
||||
* more specific error for partitioned and foreign tables.
|
||||
*/
|
||||
if (relkind == RELKIND_PARTITIONED_TABLE)
|
||||
ereport(ERROR,
|
||||
@ -600,14 +600,14 @@ CheckSubscriptionRelkind(char relkind, const char *nspname,
|
||||
errmsg("cannot use relation \"%s.%s\" as logical replication target",
|
||||
nspname, relname),
|
||||
errdetail("\"%s.%s\" is a partitioned table.",
|
||||
nspname, relname)));
|
||||
nspname, relname)));
|
||||
else if (relkind == RELKIND_FOREIGN_TABLE)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("cannot use relation \"%s.%s\" as logical replication target",
|
||||
nspname, relname),
|
||||
errdetail("\"%s.%s\" is a foreign table.",
|
||||
nspname, relname)));
|
||||
nspname, relname)));
|
||||
|
||||
if (relkind != RELKIND_RELATION)
|
||||
ereport(ERROR,
|
||||
@ -615,5 +615,5 @@ CheckSubscriptionRelkind(char relkind, const char *nspname,
|
||||
errmsg("cannot use relation \"%s.%s\" as logical replication target",
|
||||
nspname, relname),
|
||||
errdetail("\"%s.%s\" is not a table.",
|
||||
nspname, relname)));
|
||||
nspname, relname)));
|
||||
}
|
||||
|
@ -81,7 +81,8 @@ ExecScanFetch(ScanState *node,
|
||||
|
||||
/* Check if it meets the access-method conditions */
|
||||
if (!(*recheckMtd) (node, slot))
|
||||
return ExecClearTuple(slot); /* would not be returned by scan */
|
||||
return ExecClearTuple(slot); /* would not be returned by
|
||||
* scan */
|
||||
|
||||
return slot;
|
||||
}
|
||||
|
@ -71,13 +71,12 @@
|
||||
|
||||
static TupleDesc ExecTypeFromTLInternal(List *targetList,
|
||||
bool skipjunk);
|
||||
static pg_attribute_always_inline void
|
||||
slot_deform_heap_tuple(TupleTableSlot *slot, HeapTuple tuple, uint32 *offp,
|
||||
static pg_attribute_always_inline void slot_deform_heap_tuple(TupleTableSlot *slot, HeapTuple tuple, uint32 *offp,
|
||||
int natts);
|
||||
static inline void tts_buffer_heap_store_tuple(TupleTableSlot *slot,
|
||||
HeapTuple tuple,
|
||||
Buffer buffer,
|
||||
bool transfer_pin);
|
||||
HeapTuple tuple,
|
||||
Buffer buffer,
|
||||
bool transfer_pin);
|
||||
static void tts_heap_store_tuple(TupleTableSlot *slot, HeapTuple tuple, bool shouldFree);
|
||||
|
||||
|
||||
@ -138,7 +137,7 @@ tts_virtual_getsysattr(TupleTableSlot *slot, int attnum, bool *isnull)
|
||||
{
|
||||
elog(ERROR, "virtual tuple table slot does not have system attributes");
|
||||
|
||||
return 0; /* silence compiler warnings */
|
||||
return 0; /* silence compiler warnings */
|
||||
}
|
||||
|
||||
/*
|
||||
@ -164,7 +163,7 @@ tts_virtual_materialize(TupleTableSlot *slot)
|
||||
for (int natt = 0; natt < desc->natts; natt++)
|
||||
{
|
||||
Form_pg_attribute att = TupleDescAttr(desc, natt);
|
||||
Datum val;
|
||||
Datum val;
|
||||
|
||||
if (att->attbyval || slot->tts_isnull[natt])
|
||||
continue;
|
||||
@ -200,7 +199,7 @@ tts_virtual_materialize(TupleTableSlot *slot)
|
||||
for (int natt = 0; natt < desc->natts; natt++)
|
||||
{
|
||||
Form_pg_attribute att = TupleDescAttr(desc, natt);
|
||||
Datum val;
|
||||
Datum val;
|
||||
|
||||
if (att->attbyval || slot->tts_isnull[natt])
|
||||
continue;
|
||||
@ -210,7 +209,7 @@ tts_virtual_materialize(TupleTableSlot *slot)
|
||||
if (att->attlen == -1 &&
|
||||
VARATT_IS_EXTERNAL_EXPANDED(DatumGetPointer(val)))
|
||||
{
|
||||
Size data_length;
|
||||
Size data_length;
|
||||
|
||||
/*
|
||||
* We want to flatten the expanded value so that the materialized
|
||||
@ -228,7 +227,7 @@ tts_virtual_materialize(TupleTableSlot *slot)
|
||||
}
|
||||
else
|
||||
{
|
||||
Size data_length = 0;
|
||||
Size data_length = 0;
|
||||
|
||||
data = (char *) att_align_nominal(data, att->attalign);
|
||||
data_length = att_addlength_datum(data_length, att->attlen, val);
|
||||
@ -382,7 +381,7 @@ tts_heap_materialize(TupleTableSlot *slot)
|
||||
static void
|
||||
tts_heap_copyslot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
|
||||
{
|
||||
HeapTuple tuple;
|
||||
HeapTuple tuple;
|
||||
MemoryContext oldcontext;
|
||||
|
||||
oldcontext = MemoryContextSwitchTo(dstslot->tts_mcxt);
|
||||
@ -499,7 +498,7 @@ tts_minimal_getsysattr(TupleTableSlot *slot, int attnum, bool *isnull)
|
||||
{
|
||||
elog(ERROR, "minimal tuple table slot does not have system attributes");
|
||||
|
||||
return 0; /* silence compiler warnings */
|
||||
return 0; /* silence compiler warnings */
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1077,8 +1076,10 @@ TupleTableSlot *
|
||||
MakeTupleTableSlot(TupleDesc tupleDesc,
|
||||
const TupleTableSlotOps *tts_ops)
|
||||
{
|
||||
Size basesz, allocsz;
|
||||
Size basesz,
|
||||
allocsz;
|
||||
TupleTableSlot *slot;
|
||||
|
||||
basesz = tts_ops->base_slot_size;
|
||||
|
||||
/*
|
||||
@ -1866,7 +1867,7 @@ void
|
||||
slot_getsomeattrs_int(TupleTableSlot *slot, int attnum)
|
||||
{
|
||||
/* Check for caller errors */
|
||||
Assert(slot->tts_nvalid < attnum); /* slot_getsomeattr checked */
|
||||
Assert(slot->tts_nvalid < attnum); /* slot_getsomeattr checked */
|
||||
Assert(attnum > 0);
|
||||
|
||||
if (unlikely(attnum > slot->tts_tupleDescriptor->natts))
|
||||
@ -1876,8 +1877,8 @@ slot_getsomeattrs_int(TupleTableSlot *slot, int attnum)
|
||||
slot->tts_ops->getsomeattrs(slot, attnum);
|
||||
|
||||
/*
|
||||
* If the underlying tuple doesn't have enough attributes, tuple descriptor
|
||||
* must have the missing attributes.
|
||||
* If the underlying tuple doesn't have enough attributes, tuple
|
||||
* descriptor must have the missing attributes.
|
||||
*/
|
||||
if (unlikely(slot->tts_nvalid < attnum))
|
||||
{
|
||||
|
@ -1762,7 +1762,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
|
||||
if (junkFilter)
|
||||
{
|
||||
TupleTableSlot *slot =
|
||||
MakeSingleTupleTableSlot(NULL, &TTSOpsMinimalTuple);
|
||||
MakeSingleTupleTableSlot(NULL, &TTSOpsMinimalTuple);
|
||||
|
||||
*junkFilter = ExecInitJunkFilter(tlist, slot);
|
||||
}
|
||||
@ -1929,7 +1929,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
|
||||
if (junkFilter)
|
||||
{
|
||||
TupleTableSlot *slot =
|
||||
MakeSingleTupleTableSlot(NULL, &TTSOpsMinimalTuple);
|
||||
MakeSingleTupleTableSlot(NULL, &TTSOpsMinimalTuple);
|
||||
|
||||
*junkFilter = ExecInitJunkFilterConversion(tlist,
|
||||
CreateTupleDescCopy(tupdesc),
|
||||
|
@ -754,7 +754,7 @@ process_ordered_aggregate_single(AggState *aggstate,
|
||||
oldAbbrevVal == newAbbrevVal &&
|
||||
DatumGetBool(FunctionCall2Coll(&pertrans->equalfnOne,
|
||||
pertrans->aggCollation,
|
||||
oldVal, *newVal)))))
|
||||
oldVal, *newVal)))))
|
||||
{
|
||||
/* equal to prior, so forget this one */
|
||||
if (!pertrans->inputtypeByVal && !*isNull)
|
||||
|
@ -700,10 +700,10 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
|
||||
Assert(HeapTupleIsValid(tup));
|
||||
|
||||
/* Build the TupleTableSlot for the given tuple */
|
||||
ExecStoreHeapTuple(tup, /* tuple to store */
|
||||
ExecStoreHeapTuple(tup, /* tuple to store */
|
||||
gm_state->gm_slots[reader], /* slot in which to store
|
||||
* the tuple */
|
||||
true); /* pfree tuple when done with it */
|
||||
true); /* pfree tuple when done with it */
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -750,7 +750,7 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags)
|
||||
rclauses = lappend(rclauses, ExecInitExpr(lsecond(hclause->args),
|
||||
(PlanState *) hjstate));
|
||||
rhclauses = lappend(rhclauses, ExecInitExpr(lsecond(hclause->args),
|
||||
innerPlanState(hjstate)));
|
||||
innerPlanState(hjstate)));
|
||||
hoperators = lappend_oid(hoperators, hclause->opno);
|
||||
hcollations = lappend_oid(hcollations, hclause->inputcollid);
|
||||
}
|
||||
|
@ -192,9 +192,9 @@ IndexOnlyNext(IndexOnlyScanState *node)
|
||||
|
||||
/*
|
||||
* Fill the scan tuple slot with data from the index. This might be
|
||||
* provided in either HeapTuple or IndexTuple format. Conceivably
|
||||
* an index AM might fill both fields, in which case we prefer the
|
||||
* heap format, since it's probably a bit cheaper to fill a slot from.
|
||||
* provided in either HeapTuple or IndexTuple format. Conceivably an
|
||||
* index AM might fill both fields, in which case we prefer the heap
|
||||
* format, since it's probably a bit cheaper to fill a slot from.
|
||||
*/
|
||||
if (scandesc->xs_hitup)
|
||||
{
|
||||
|
@ -242,7 +242,7 @@ IndexNextWithReorder(IndexScanState *node)
|
||||
scandesc->xs_orderbynulls,
|
||||
node) <= 0)
|
||||
{
|
||||
HeapTuple tuple;
|
||||
HeapTuple tuple;
|
||||
|
||||
tuple = reorderqueue_pop(node);
|
||||
|
||||
|
@ -327,7 +327,7 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags)
|
||||
/* node returns unmodified slots from the outer plan */
|
||||
lrstate->ps.resultopsset = true;
|
||||
lrstate->ps.resultops = ExecGetResultSlotOps(outerPlanState(lrstate),
|
||||
&lrstate->ps.resultopsfixed);
|
||||
&lrstate->ps.resultopsfixed);
|
||||
|
||||
/*
|
||||
* LockRows nodes do no projections, so initialize projection info for
|
||||
|
@ -865,6 +865,7 @@ ldelete:;
|
||||
goto ldelete;
|
||||
|
||||
case TM_SelfModified:
|
||||
|
||||
/*
|
||||
* This can be reached when following an update
|
||||
* chain from a tuple updated by another session,
|
||||
@ -1070,7 +1071,7 @@ ExecUpdate(ModifyTableState *mtstate,
|
||||
{
|
||||
if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
|
||||
tupleid, oldtuple, slot))
|
||||
return NULL; /* "do nothing" */
|
||||
return NULL; /* "do nothing" */
|
||||
}
|
||||
|
||||
/* INSTEAD OF ROW UPDATE Triggers */
|
||||
@ -1079,7 +1080,7 @@ ExecUpdate(ModifyTableState *mtstate,
|
||||
{
|
||||
if (!ExecIRUpdateTriggers(estate, resultRelInfo,
|
||||
oldtuple, slot))
|
||||
return NULL; /* "do nothing" */
|
||||
return NULL; /* "do nothing" */
|
||||
}
|
||||
else if (resultRelInfo->ri_FdwRoutine)
|
||||
{
|
||||
@ -1401,6 +1402,7 @@ lreplace:;
|
||||
return NULL;
|
||||
|
||||
case TM_SelfModified:
|
||||
|
||||
/*
|
||||
* This can be reached when following an update
|
||||
* chain from a tuple updated by another session,
|
||||
|
@ -228,8 +228,8 @@ ExecReScanSeqScan(SeqScanState *node)
|
||||
scan = node->ss.ss_currentScanDesc;
|
||||
|
||||
if (scan != NULL)
|
||||
table_rescan(scan, /* scan desc */
|
||||
NULL); /* new scan keys */
|
||||
table_rescan(scan, /* scan desc */
|
||||
NULL); /* new scan keys */
|
||||
|
||||
ExecScanReScan((ScanState *) node);
|
||||
}
|
||||
|
@ -684,7 +684,7 @@ execTuplesUnequal(TupleTableSlot *slot1,
|
||||
/* Apply the type-specific equality function */
|
||||
if (!DatumGetBool(FunctionCall2Coll(&eqfunctions[i],
|
||||
collations[i],
|
||||
attr1, attr2)))
|
||||
attr1, attr2)))
|
||||
{
|
||||
result = true; /* they are unequal */
|
||||
break;
|
||||
|
@ -131,6 +131,7 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate, int eflags)
|
||||
ExecInitScanTupleSlot(estate, &subquerystate->ss,
|
||||
ExecGetResultType(subquerystate->subplan),
|
||||
ExecGetResultSlotOps(subquerystate->subplan, NULL));
|
||||
|
||||
/*
|
||||
* The slot used as the scantuple isn't the slot above (outside of EPQ),
|
||||
* but the one from the node below.
|
||||
|
@ -2154,8 +2154,8 @@ CheckPAMAuth(Port *port, const char *user, const char *password)
|
||||
* later used inside the PAM conversation to pass the password to the
|
||||
* authentication module.
|
||||
*/
|
||||
pam_passw_conv.appdata_ptr = unconstify(char *, password); /* from password above,
|
||||
* not allocated */
|
||||
pam_passw_conv.appdata_ptr = unconstify(char *, password); /* from password above,
|
||||
* not allocated */
|
||||
|
||||
/* Optionally, one can set the service name in pg_hba.conf */
|
||||
if (port->hba->pamservice && port->hba->pamservice[0] != '\0')
|
||||
|
@ -68,10 +68,10 @@ static bool dummy_ssl_passwd_cb_called = false;
|
||||
static bool ssl_is_server_start;
|
||||
|
||||
static int ssl_protocol_version_to_openssl(int v, const char *guc_name,
|
||||
int loglevel);
|
||||
int loglevel);
|
||||
#ifndef SSL_CTX_set_min_proto_version
|
||||
static int SSL_CTX_set_min_proto_version(SSL_CTX *ctx, int version);
|
||||
static int SSL_CTX_set_max_proto_version(SSL_CTX *ctx, int version);
|
||||
static int SSL_CTX_set_min_proto_version(SSL_CTX *ctx, int version);
|
||||
static int SSL_CTX_set_max_proto_version(SSL_CTX *ctx, int version);
|
||||
#endif
|
||||
|
||||
|
||||
@ -192,9 +192,10 @@ be_tls_init(bool isServerStart)
|
||||
|
||||
if (ssl_min_protocol_version)
|
||||
{
|
||||
int ssl_ver = ssl_protocol_version_to_openssl(ssl_min_protocol_version,
|
||||
"ssl_min_protocol_version",
|
||||
isServerStart ? FATAL : LOG);
|
||||
int ssl_ver = ssl_protocol_version_to_openssl(ssl_min_protocol_version,
|
||||
"ssl_min_protocol_version",
|
||||
isServerStart ? FATAL : LOG);
|
||||
|
||||
if (ssl_ver == -1)
|
||||
goto error;
|
||||
SSL_CTX_set_min_proto_version(context, ssl_ver);
|
||||
@ -202,9 +203,10 @@ be_tls_init(bool isServerStart)
|
||||
|
||||
if (ssl_max_protocol_version)
|
||||
{
|
||||
int ssl_ver = ssl_protocol_version_to_openssl(ssl_max_protocol_version,
|
||||
"ssl_max_protocol_version",
|
||||
isServerStart ? FATAL : LOG);
|
||||
int ssl_ver = ssl_protocol_version_to_openssl(ssl_max_protocol_version,
|
||||
"ssl_max_protocol_version",
|
||||
isServerStart ? FATAL : LOG);
|
||||
|
||||
if (ssl_ver == -1)
|
||||
goto error;
|
||||
SSL_CTX_set_max_proto_version(context, ssl_ver);
|
||||
@ -1150,6 +1152,7 @@ be_tls_get_peer_serial(Port *port, char *ptr, size_t len)
|
||||
serial = X509_get_serialNumber(port->peer);
|
||||
b = ASN1_INTEGER_to_BN(serial, NULL);
|
||||
decimal = BN_bn2dec(b);
|
||||
|
||||
BN_free(b);
|
||||
strlcpy(ptr, decimal, len);
|
||||
OPENSSL_free(decimal);
|
||||
|
@ -476,10 +476,10 @@ bms_member_index(Bitmapset *a, int x)
|
||||
}
|
||||
|
||||
/*
|
||||
* Now add bits of the last word, but only those before the item.
|
||||
* We can do that by applying a mask and then using popcount again.
|
||||
* To get 0-based index, we want to count only preceding bits, not
|
||||
* the item itself, so we subtract 1.
|
||||
* Now add bits of the last word, but only those before the item. We can
|
||||
* do that by applying a mask and then using popcount again. To get
|
||||
* 0-based index, we want to count only preceding bits, not the item
|
||||
* itself, so we subtract 1.
|
||||
*/
|
||||
mask = ((bitmapword) 1 << bitnum) - 1;
|
||||
result += bmw_popcount(a->words[wordnum] & mask);
|
||||
|
@ -161,9 +161,9 @@ clauselist_selectivity_simple(PlannerInfo *root,
|
||||
int listidx;
|
||||
|
||||
/*
|
||||
* If there's exactly one clause (and it was not estimated yet), just
|
||||
* go directly to clause_selectivity(). None of what we might do below
|
||||
* is relevant.
|
||||
* If there's exactly one clause (and it was not estimated yet), just go
|
||||
* directly to clause_selectivity(). None of what we might do below is
|
||||
* relevant.
|
||||
*/
|
||||
if ((list_length(clauses) == 1) &&
|
||||
bms_num_members(estimatedclauses) == 0)
|
||||
|
@ -311,6 +311,7 @@ expand_partitioned_rtentry(PlannerInfo *root, RelOptInfo *relinfo,
|
||||
if (!root->partColsUpdated)
|
||||
root->partColsUpdated =
|
||||
has_partition_attrs(parentrel, parentrte->updatedCols, NULL);
|
||||
|
||||
/*
|
||||
* There shouldn't be any generated columns in the partition key.
|
||||
*/
|
||||
|
@ -3621,7 +3621,7 @@ create_limit_path(PlannerInfo *root, RelOptInfo *rel,
|
||||
*/
|
||||
void
|
||||
adjust_limit_rows_costs(double *rows, /* in/out parameter */
|
||||
Cost *startup_cost, /* in/out parameter */
|
||||
Cost *startup_cost, /* in/out parameter */
|
||||
Cost *total_cost, /* in/out parameter */
|
||||
int64 offset_est,
|
||||
int64 count_est)
|
||||
|
@ -1053,8 +1053,8 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
|
||||
InvalidOid, &found_whole_row);
|
||||
|
||||
/*
|
||||
* Prevent this for the same reason as for constraints below.
|
||||
* Note that defaults cannot contain any vars, so it's OK that the
|
||||
* Prevent this for the same reason as for constraints below. Note
|
||||
* that defaults cannot contain any vars, so it's OK that the
|
||||
* error message refers to generated columns.
|
||||
*/
|
||||
if (found_whole_row)
|
||||
@ -3845,11 +3845,11 @@ transformPartitionBound(ParseState *pstate, Relation parent,
|
||||
* any necessary validation.
|
||||
*/
|
||||
result_spec->lowerdatums =
|
||||
transformPartitionRangeBounds(pstate, spec->lowerdatums,
|
||||
parent);
|
||||
transformPartitionRangeBounds(pstate, spec->lowerdatums,
|
||||
parent);
|
||||
result_spec->upperdatums =
|
||||
transformPartitionRangeBounds(pstate, spec->upperdatums,
|
||||
parent);
|
||||
transformPartitionRangeBounds(pstate, spec->upperdatums,
|
||||
parent);
|
||||
}
|
||||
else
|
||||
elog(ERROR, "unexpected partition strategy: %d", (int) strategy);
|
||||
@ -3876,17 +3876,17 @@ transformPartitionRangeBounds(ParseState *pstate, List *blist,
|
||||
i = j = 0;
|
||||
foreach(lc, blist)
|
||||
{
|
||||
Node *expr = lfirst(lc);
|
||||
Node *expr = lfirst(lc);
|
||||
PartitionRangeDatum *prd = NULL;
|
||||
|
||||
/*
|
||||
* Infinite range bounds -- "minvalue" and "maxvalue" -- get passed
|
||||
* in as ColumnRefs.
|
||||
* Infinite range bounds -- "minvalue" and "maxvalue" -- get passed in
|
||||
* as ColumnRefs.
|
||||
*/
|
||||
if (IsA(expr, ColumnRef))
|
||||
{
|
||||
ColumnRef *cref = (ColumnRef *) expr;
|
||||
char *cname = NULL;
|
||||
ColumnRef *cref = (ColumnRef *) expr;
|
||||
char *cname = NULL;
|
||||
|
||||
/*
|
||||
* There should be a single field named either "minvalue" or
|
||||
@ -3899,8 +3899,8 @@ transformPartitionRangeBounds(ParseState *pstate, List *blist,
|
||||
if (cname == NULL)
|
||||
{
|
||||
/*
|
||||
* ColumnRef is not in the desired single-field-name form.
|
||||
* For consistency between all partition strategies, let the
|
||||
* ColumnRef is not in the desired single-field-name form. For
|
||||
* consistency between all partition strategies, let the
|
||||
* expression transformation report any errors rather than
|
||||
* doing it ourselves.
|
||||
*/
|
||||
@ -3965,8 +3965,8 @@ transformPartitionRangeBounds(ParseState *pstate, List *blist,
|
||||
}
|
||||
|
||||
/*
|
||||
* Once we see MINVALUE or MAXVALUE for one column, the remaining
|
||||
* columns must be the same.
|
||||
* Once we see MINVALUE or MAXVALUE for one column, the remaining columns
|
||||
* must be the same.
|
||||
*/
|
||||
validateInfiniteBounds(pstate, result);
|
||||
|
||||
@ -4030,13 +4030,13 @@ transformPartitionBoundValue(ParseState *pstate, Node *val,
|
||||
|
||||
/*
|
||||
* Check that the input expression's collation is compatible with one
|
||||
* specified for the parent's partition key (partcollation). Don't
|
||||
* throw an error if it's the default collation which we'll replace with
|
||||
* the parent's collation anyway.
|
||||
* specified for the parent's partition key (partcollation). Don't throw
|
||||
* an error if it's the default collation which we'll replace with the
|
||||
* parent's collation anyway.
|
||||
*/
|
||||
if (IsA(value, CollateExpr))
|
||||
{
|
||||
Oid exprCollOid = exprCollation(value);
|
||||
Oid exprCollOid = exprCollation(value);
|
||||
|
||||
if (OidIsValid(exprCollOid) &&
|
||||
exprCollOid != DEFAULT_COLLATION_OID &&
|
||||
|
@ -38,7 +38,7 @@ typedef struct PartitionDirectoryData
|
||||
{
|
||||
MemoryContext pdir_mcxt;
|
||||
HTAB *pdir_hash;
|
||||
} PartitionDirectoryData;
|
||||
} PartitionDirectoryData;
|
||||
|
||||
typedef struct PartitionDirectoryEntry
|
||||
{
|
||||
@ -74,9 +74,9 @@ RelationBuildPartitionDesc(Relation rel)
|
||||
|
||||
/*
|
||||
* Get partition oids from pg_inherits. This uses a single snapshot to
|
||||
* fetch the list of children, so while more children may be getting
|
||||
* added concurrently, whatever this function returns will be accurate
|
||||
* as of some well-defined point in time.
|
||||
* fetch the list of children, so while more children may be getting added
|
||||
* concurrently, whatever this function returns will be accurate as of
|
||||
* some well-defined point in time.
|
||||
*/
|
||||
inhoids = find_inheritance_children(RelationGetRelid(rel), NoLock);
|
||||
nparts = list_length(inhoids);
|
||||
@ -122,14 +122,14 @@ RelationBuildPartitionDesc(Relation rel)
|
||||
*
|
||||
* Note that this algorithm assumes that PartitionBoundSpec we manage
|
||||
* to fetch is the right one -- so this is only good enough for
|
||||
* concurrent ATTACH PARTITION, not concurrent DETACH PARTITION
|
||||
* or some hypothetical operation that changes the partition bounds.
|
||||
* concurrent ATTACH PARTITION, not concurrent DETACH PARTITION or
|
||||
* some hypothetical operation that changes the partition bounds.
|
||||
*/
|
||||
if (boundspec == NULL)
|
||||
{
|
||||
Relation pg_class;
|
||||
SysScanDesc scan;
|
||||
ScanKeyData key[1];
|
||||
SysScanDesc scan;
|
||||
ScanKeyData key[1];
|
||||
Datum datum;
|
||||
bool isnull;
|
||||
|
||||
@ -301,7 +301,7 @@ PartitionDirectoryLookup(PartitionDirectory pdir, Relation rel)
|
||||
void
|
||||
DestroyPartitionDirectory(PartitionDirectory pdir)
|
||||
{
|
||||
HASH_SEQ_STATUS status;
|
||||
HASH_SEQ_STATUS status;
|
||||
PartitionDirectoryEntry *pde;
|
||||
|
||||
hash_seq_init(&status, pdir->pdir_hash);
|
||||
|
@ -57,7 +57,7 @@ static void libpqrcv_get_senderinfo(WalReceiverConn *conn,
|
||||
char **sender_host, int *sender_port);
|
||||
static char *libpqrcv_identify_system(WalReceiverConn *conn,
|
||||
TimeLineID *primary_tli);
|
||||
static int libpqrcv_server_version(WalReceiverConn *conn);
|
||||
static int libpqrcv_server_version(WalReceiverConn *conn);
|
||||
static void libpqrcv_readtimelinehistoryfile(WalReceiverConn *conn,
|
||||
TimeLineID tli, char **filename,
|
||||
char **content, int *len);
|
||||
|
@ -468,8 +468,8 @@ ReorderBufferReturnTupleBuf(ReorderBuffer *rb, ReorderBufferTupleBuf *tuple)
|
||||
Oid *
|
||||
ReorderBufferGetRelids(ReorderBuffer *rb, int nrelids)
|
||||
{
|
||||
Oid *relids;
|
||||
Size alloc_len;
|
||||
Oid *relids;
|
||||
Size alloc_len;
|
||||
|
||||
alloc_len = sizeof(Oid) * nrelids;
|
||||
|
||||
@ -1327,8 +1327,8 @@ ReorderBufferBuildTupleCidHash(ReorderBuffer *rb, ReorderBufferTXN *txn)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Maybe we already saw this tuple before in this transaction,
|
||||
* but if so it must have the same cmin.
|
||||
* Maybe we already saw this tuple before in this transaction, but
|
||||
* if so it must have the same cmin.
|
||||
*/
|
||||
Assert(ent->cmin == change->data.tuplecid.cmin);
|
||||
|
||||
@ -2464,8 +2464,8 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
|
||||
}
|
||||
case REORDER_BUFFER_CHANGE_TRUNCATE:
|
||||
{
|
||||
Size size;
|
||||
char *data;
|
||||
Size size;
|
||||
char *data;
|
||||
|
||||
/* account for the OIDs of truncated relations */
|
||||
size = sizeof(Oid) * change->data.truncate.nrelids;
|
||||
@ -2767,7 +2767,7 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
|
||||
/* the base struct contains all the data, easy peasy */
|
||||
case REORDER_BUFFER_CHANGE_TRUNCATE:
|
||||
{
|
||||
Oid *relids;
|
||||
Oid *relids;
|
||||
|
||||
relids = ReorderBufferGetRelids(rb,
|
||||
change->data.truncate.nrelids);
|
||||
|
@ -730,11 +730,11 @@ copy_replication_slot(FunctionCallInfo fcinfo, bool logical_slot)
|
||||
SpinLockRelease(&src->mutex);
|
||||
|
||||
/*
|
||||
* Check if the source slot still exists and is valid. We regard it
|
||||
* as invalid if the type of replication slot or name has been
|
||||
* changed, or the restart_lsn either is invalid or has gone backward.
|
||||
* (The restart_lsn could go backwards if the source slot is dropped
|
||||
* and copied from an older slot during installation.)
|
||||
* Check if the source slot still exists and is valid. We regard it as
|
||||
* invalid if the type of replication slot or name has been changed,
|
||||
* or the restart_lsn either is invalid or has gone backward. (The
|
||||
* restart_lsn could go backwards if the source slot is dropped and
|
||||
* copied from an older slot during installation.)
|
||||
*
|
||||
* Since erroring out will release and drop the destination slot we
|
||||
* don't need to release it here.
|
||||
|
@ -276,9 +276,8 @@ SyncRepWaitForLSN(XLogRecPtr lsn, bool commit)
|
||||
WAIT_EVENT_SYNC_REP);
|
||||
|
||||
/*
|
||||
* If the postmaster dies, we'll probably never get an
|
||||
* acknowledgment, because all the wal sender processes will exit. So
|
||||
* just bail out.
|
||||
* If the postmaster dies, we'll probably never get an acknowledgment,
|
||||
* because all the wal sender processes will exit. So just bail out.
|
||||
*/
|
||||
if (rc & WL_POSTMASTER_DEATH)
|
||||
{
|
||||
|
@ -808,11 +808,11 @@ WalRcvQuickDieHandler(SIGNAL_ARGS)
|
||||
* anyway.
|
||||
*
|
||||
* Note we use _exit(2) not _exit(0). This is to force the postmaster
|
||||
* into a system reset cycle if someone sends a manual SIGQUIT to a
|
||||
* random backend. This is necessary precisely because we don't clean up
|
||||
* our shared memory state. (The "dead man switch" mechanism in
|
||||
* pmsignal.c should ensure the postmaster sees this as a crash, too, but
|
||||
* no harm in being doubly sure.)
|
||||
* into a system reset cycle if someone sends a manual SIGQUIT to a random
|
||||
* backend. This is necessary precisely because we don't clean up our
|
||||
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
|
||||
* should ensure the postmaster sees this as a crash, too, but no harm in
|
||||
* being doubly sure.)
|
||||
*/
|
||||
_exit(2);
|
||||
}
|
||||
|
@ -218,7 +218,7 @@ typedef struct
|
||||
int write_head;
|
||||
int read_heads[NUM_SYNC_REP_WAIT_MODE];
|
||||
WalTimeSample last_read[NUM_SYNC_REP_WAIT_MODE];
|
||||
} LagTracker;
|
||||
} LagTracker;
|
||||
|
||||
static LagTracker *lag_tracker;
|
||||
|
||||
@ -1407,7 +1407,7 @@ WalSndWaitForWal(XLogRecPtr loc)
|
||||
sleeptime = WalSndComputeSleeptime(GetCurrentTimestamp());
|
||||
|
||||
wakeEvents = WL_LATCH_SET | WL_EXIT_ON_PM_DEATH |
|
||||
WL_SOCKET_READABLE | WL_TIMEOUT;
|
||||
WL_SOCKET_READABLE | WL_TIMEOUT;
|
||||
|
||||
if (pq_is_send_pending())
|
||||
wakeEvents |= WL_SOCKET_WRITEABLE;
|
||||
@ -2255,7 +2255,7 @@ WalSndLoop(WalSndSendDataCallback send_data)
|
||||
int wakeEvents;
|
||||
|
||||
wakeEvents = WL_LATCH_SET | WL_EXIT_ON_PM_DEATH | WL_TIMEOUT |
|
||||
WL_SOCKET_READABLE;
|
||||
WL_SOCKET_READABLE;
|
||||
|
||||
/*
|
||||
* Use fresh timestamp, not last_processed, to reduce the chance
|
||||
|
@ -279,8 +279,8 @@ dependency_degree(int numrows, HeapTuple *rows, int k, AttrNumber *dependency,
|
||||
* build an array of SortItem(s) sorted using the multi-sort support
|
||||
*
|
||||
* XXX This relies on all stats entries pointing to the same tuple
|
||||
* descriptor. For now that assumption holds, but it might change in
|
||||
* the future for example if we support statistics on multiple tables.
|
||||
* descriptor. For now that assumption holds, but it might change in the
|
||||
* future for example if we support statistics on multiple tables.
|
||||
*/
|
||||
items = build_sorted_items(numrows, &nitems, rows, stats[0]->tupDesc,
|
||||
mss, k, attnums_dep);
|
||||
@ -300,8 +300,8 @@ dependency_degree(int numrows, HeapTuple *rows, int k, AttrNumber *dependency,
|
||||
{
|
||||
/*
|
||||
* Check if the group ended, which may be either because we processed
|
||||
* all the items (i==nitems), or because the i-th item is not equal
|
||||
* to the preceding one.
|
||||
* all the items (i==nitems), or because the i-th item is not equal to
|
||||
* the preceding one.
|
||||
*/
|
||||
if (i == nitems ||
|
||||
multi_sort_compare_dims(0, k - 2, &items[i - 1], &items[i], mss) != 0)
|
||||
|
@ -67,7 +67,7 @@ static VacAttrStats **lookup_var_attr_stats(Relation rel, Bitmapset *attrs,
|
||||
int nvacatts, VacAttrStats **vacatts);
|
||||
static void statext_store(Relation pg_stext, Oid relid,
|
||||
MVNDistinct *ndistinct, MVDependencies *dependencies,
|
||||
MCVList * mcvlist, VacAttrStats **stats);
|
||||
MCVList *mcvlist, VacAttrStats **stats);
|
||||
|
||||
|
||||
/*
|
||||
@ -317,7 +317,7 @@ lookup_var_attr_stats(Relation rel, Bitmapset *attrs,
|
||||
static void
|
||||
statext_store(Relation pg_stext, Oid statOid,
|
||||
MVNDistinct *ndistinct, MVDependencies *dependencies,
|
||||
MCVList * mcv, VacAttrStats **stats)
|
||||
MCVList *mcv, VacAttrStats **stats)
|
||||
{
|
||||
HeapTuple stup,
|
||||
oldtup;
|
||||
@ -538,9 +538,9 @@ build_attnums_array(Bitmapset *attrs, int *numattrs)
|
||||
{
|
||||
/*
|
||||
* Make sure the bitmap contains only user-defined attributes. As
|
||||
* bitmaps can't contain negative values, this can be violated in
|
||||
* two ways. Firstly, the bitmap might contain 0 as a member, and
|
||||
* secondly the integer value might be larger than MaxAttrNumber.
|
||||
* bitmaps can't contain negative values, this can be violated in two
|
||||
* ways. Firstly, the bitmap might contain 0 as a member, and secondly
|
||||
* the integer value might be larger than MaxAttrNumber.
|
||||
*/
|
||||
Assert(AttrNumberIsForUserDefinedAttr(j));
|
||||
Assert(j <= MaxAttrNumber);
|
||||
@ -600,7 +600,7 @@ build_sorted_items(int numrows, int *nitems, HeapTuple *rows, TupleDesc tdesc,
|
||||
idx = 0;
|
||||
for (i = 0; i < numrows; i++)
|
||||
{
|
||||
bool toowide = false;
|
||||
bool toowide = false;
|
||||
|
||||
items[idx].values = &values[idx * numattrs];
|
||||
items[idx].isnull = &isnull[idx * numattrs];
|
||||
@ -608,8 +608,8 @@ build_sorted_items(int numrows, int *nitems, HeapTuple *rows, TupleDesc tdesc,
|
||||
/* load the values/null flags from sample rows */
|
||||
for (j = 0; j < numattrs; j++)
|
||||
{
|
||||
Datum value;
|
||||
bool isnull;
|
||||
Datum value;
|
||||
bool isnull;
|
||||
|
||||
value = heap_getattr(rows[i], attnums[j], tdesc, &isnull);
|
||||
|
||||
@ -988,7 +988,7 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
|
||||
int listidx;
|
||||
StatisticExtInfo *stat;
|
||||
List *stat_clauses;
|
||||
Selectivity simple_sel,
|
||||
Selectivity simple_sel,
|
||||
mcv_sel,
|
||||
mcv_basesel,
|
||||
mcv_totalsel,
|
||||
@ -1006,9 +1006,9 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
|
||||
* Pre-process the clauses list to extract the attnums seen in each item.
|
||||
* We need to determine if there's any clauses which will be useful for
|
||||
* selectivity estimations with extended stats. Along the way we'll record
|
||||
* all of the attnums for each clause in a list which we'll reference later
|
||||
* so we don't need to repeat the same work again. We'll also keep track of
|
||||
* all attnums seen.
|
||||
* all of the attnums for each clause in a list which we'll reference
|
||||
* later so we don't need to repeat the same work again. We'll also keep
|
||||
* track of all attnums seen.
|
||||
*
|
||||
* We also skip clauses that we already estimated using different types of
|
||||
* statistics (we treat them as incompatible).
|
||||
@ -1066,9 +1066,10 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
|
||||
}
|
||||
|
||||
/*
|
||||
* First compute "simple" selectivity, i.e. without the extended statistics,
|
||||
* and essentially assuming independence of the columns/clauses. We'll then
|
||||
* use the various selectivities computed from MCV list to improve it.
|
||||
* First compute "simple" selectivity, i.e. without the extended
|
||||
* statistics, and essentially assuming independence of the
|
||||
* columns/clauses. We'll then use the various selectivities computed from
|
||||
* MCV list to improve it.
|
||||
*/
|
||||
simple_sel = clauselist_selectivity_simple(root, stat_clauses, varRelid,
|
||||
jointype, sjinfo, NULL);
|
||||
@ -1105,16 +1106,16 @@ statext_clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid,
|
||||
JoinType jointype, SpecialJoinInfo *sjinfo,
|
||||
RelOptInfo *rel, Bitmapset **estimatedclauses)
|
||||
{
|
||||
Selectivity sel;
|
||||
Selectivity sel;
|
||||
|
||||
/* First, try estimating clauses using a multivariate MCV list. */
|
||||
sel = statext_mcv_clauselist_selectivity(root, clauses, varRelid, jointype,
|
||||
sjinfo, rel, estimatedclauses);
|
||||
|
||||
/*
|
||||
* Then, apply functional dependencies on the remaining clauses by
|
||||
* calling dependencies_clauselist_selectivity. Pass 'estimatedclauses'
|
||||
* so the function can properly skip clauses already estimated above.
|
||||
* Then, apply functional dependencies on the remaining clauses by calling
|
||||
* dependencies_clauselist_selectivity. Pass 'estimatedclauses' so the
|
||||
* function can properly skip clauses already estimated above.
|
||||
*
|
||||
* The reasoning for applying dependencies last is that the more complex
|
||||
* stats can track more complex correlations between the attributes, and
|
||||
|
@ -209,20 +209,20 @@ statext_mcv_build(int numrows, HeapTuple *rows, Bitmapset *attrs,
|
||||
*
|
||||
* Using the same algorithm might exclude items that are close to the
|
||||
* "average" frequency of the sample. But that does not say whether the
|
||||
* observed frequency is close to the base frequency or not. We also
|
||||
* need to consider unexpectedly uncommon items (again, compared to the
|
||||
* base frequency), and the single-column algorithm does not have to.
|
||||
* observed frequency is close to the base frequency or not. We also need
|
||||
* to consider unexpectedly uncommon items (again, compared to the base
|
||||
* frequency), and the single-column algorithm does not have to.
|
||||
*
|
||||
* We simply decide how many items to keep by computing minimum count
|
||||
* using get_mincount_for_mcv_list() and then keep all items that seem
|
||||
* to be more common than that.
|
||||
* using get_mincount_for_mcv_list() and then keep all items that seem to
|
||||
* be more common than that.
|
||||
*/
|
||||
mincount = get_mincount_for_mcv_list(numrows, totalrows);
|
||||
|
||||
/*
|
||||
* Walk the groups until we find the first group with a count below
|
||||
* the mincount threshold (the index of that group is the number of
|
||||
* groups we want to keep).
|
||||
* Walk the groups until we find the first group with a count below the
|
||||
* mincount threshold (the index of that group is the number of groups we
|
||||
* want to keep).
|
||||
*/
|
||||
for (i = 0; i < nitems; i++)
|
||||
{
|
||||
@ -240,7 +240,7 @@ statext_mcv_build(int numrows, HeapTuple *rows, Bitmapset *attrs,
|
||||
*/
|
||||
if (nitems > 0)
|
||||
{
|
||||
int j;
|
||||
int j;
|
||||
|
||||
/*
|
||||
* Allocate the MCV list structure, set the global parameters.
|
||||
@ -485,7 +485,7 @@ statext_mcv_load(Oid mvoid)
|
||||
* (or a longer type) instead of using an array of bool items.
|
||||
*/
|
||||
bytea *
|
||||
statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats)
|
||||
statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats)
|
||||
{
|
||||
int i;
|
||||
int dim;
|
||||
@ -603,7 +603,7 @@ statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats)
|
||||
info[dim].nbytes = 0;
|
||||
for (i = 0; i < info[dim].nvalues; i++)
|
||||
{
|
||||
Size len;
|
||||
Size len;
|
||||
|
||||
values[dim][i] = PointerGetDatum(PG_DETOAST_DATUM(values[dim][i]));
|
||||
|
||||
@ -616,7 +616,7 @@ statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats)
|
||||
info[dim].nbytes = 0;
|
||||
for (i = 0; i < info[dim].nvalues; i++)
|
||||
{
|
||||
Size len;
|
||||
Size len;
|
||||
|
||||
/* c-strings include terminator, so +1 byte */
|
||||
values[dim][i] = PointerGetDatum(PG_DETOAST_DATUM(values[dim][i]));
|
||||
@ -636,11 +636,11 @@ statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats)
|
||||
* for each attribute, deduplicated values and items).
|
||||
*
|
||||
* The header fields are copied one by one, so that we don't need any
|
||||
* explicit alignment (we copy them while deserializing). All fields
|
||||
* after this need to be properly aligned, for direct access.
|
||||
* explicit alignment (we copy them while deserializing). All fields after
|
||||
* this need to be properly aligned, for direct access.
|
||||
*/
|
||||
total_length = MAXALIGN(VARHDRSZ + (3 * sizeof(uint32))
|
||||
+ sizeof(AttrNumber) + (ndims * sizeof(Oid)));
|
||||
+ sizeof(AttrNumber) + (ndims * sizeof(Oid)));
|
||||
|
||||
/* dimension info */
|
||||
total_length += MAXALIGN(ndims * sizeof(DimensionInfo));
|
||||
@ -650,14 +650,14 @@ statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats)
|
||||
total_length += MAXALIGN(info[i].nbytes);
|
||||
|
||||
/*
|
||||
* And finally the items (no additional alignment needed, we start
|
||||
* at proper alignment and the itemsize formula uses MAXALIGN)
|
||||
* And finally the items (no additional alignment needed, we start at
|
||||
* proper alignment and the itemsize formula uses MAXALIGN)
|
||||
*/
|
||||
total_length += mcvlist->nitems * itemsize;
|
||||
|
||||
/*
|
||||
* Allocate space for the whole serialized MCV list (we'll skip bytes,
|
||||
* so we set them to zero to make the result more compressible).
|
||||
* Allocate space for the whole serialized MCV list (we'll skip bytes, so
|
||||
* we set them to zero to make the result more compressible).
|
||||
*/
|
||||
raw = palloc0(total_length);
|
||||
SET_VARSIZE(raw, total_length);
|
||||
@ -1189,8 +1189,8 @@ pg_stats_ext_mcvlist_items(PG_FUNCTION_ARGS)
|
||||
HeapTuple tuple;
|
||||
Datum result;
|
||||
|
||||
StringInfoData itemValues;
|
||||
StringInfoData itemNulls;
|
||||
StringInfoData itemValues;
|
||||
StringInfoData itemNulls;
|
||||
|
||||
int i;
|
||||
|
||||
@ -1213,9 +1213,9 @@ pg_stats_ext_mcvlist_items(PG_FUNCTION_ARGS)
|
||||
*/
|
||||
values = (char **) palloc0(5 * sizeof(char *));
|
||||
|
||||
values[0] = (char *) palloc(64 * sizeof(char)); /* item index */
|
||||
values[3] = (char *) palloc(64 * sizeof(char)); /* frequency */
|
||||
values[4] = (char *) palloc(64 * sizeof(char)); /* base frequency */
|
||||
values[0] = (char *) palloc(64 * sizeof(char)); /* item index */
|
||||
values[3] = (char *) palloc(64 * sizeof(char)); /* frequency */
|
||||
values[4] = (char *) palloc(64 * sizeof(char)); /* base frequency */
|
||||
|
||||
outfuncs = (Oid *) palloc0(sizeof(Oid) * mcvlist->ndimensions);
|
||||
fmgrinfo = (FmgrInfo *) palloc0(sizeof(FmgrInfo) * mcvlist->ndimensions);
|
||||
@ -1376,7 +1376,7 @@ pg_mcv_list_send(PG_FUNCTION_ARGS)
|
||||
*/
|
||||
static bool *
|
||||
mcv_get_match_bitmap(PlannerInfo *root, List *clauses,
|
||||
Bitmapset *keys, MCVList * mcvlist, bool is_or)
|
||||
Bitmapset *keys, MCVList *mcvlist, bool is_or)
|
||||
{
|
||||
int i;
|
||||
ListCell *l;
|
||||
|
@ -2813,12 +2813,12 @@ RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
|
||||
case RELKIND_MATVIEW:
|
||||
{
|
||||
/*
|
||||
* Not every table AM uses BLCKSZ wide fixed size
|
||||
* blocks. Therefore tableam returns the size in bytes - but
|
||||
* for the purpose of this routine, we want the number of
|
||||
* blocks. Therefore divide, rounding up.
|
||||
* Not every table AM uses BLCKSZ wide fixed size blocks.
|
||||
* Therefore tableam returns the size in bytes - but for the
|
||||
* purpose of this routine, we want the number of blocks.
|
||||
* Therefore divide, rounding up.
|
||||
*/
|
||||
uint64 szbytes;
|
||||
uint64 szbytes;
|
||||
|
||||
szbytes = table_relation_size(relation, forkNum);
|
||||
|
||||
|
@ -1731,7 +1731,7 @@ FileClose(File file)
|
||||
* see LruDelete.
|
||||
*/
|
||||
elog(vfdP->fdstate & FD_TEMP_FILE_LIMIT ? LOG : data_sync_elevel(LOG),
|
||||
"could not close file \"%s\": %m", vfdP->fileName);
|
||||
"could not close file \"%s\": %m", vfdP->fileName);
|
||||
}
|
||||
|
||||
--nfile;
|
||||
|
@ -856,7 +856,7 @@ WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action)
|
||||
if (rc < 0)
|
||||
ereport(ERROR,
|
||||
(errcode_for_socket_access(),
|
||||
/* translator: %s is a syscall name, such as "poll()" */
|
||||
/* translator: %s is a syscall name, such as "poll()" */
|
||||
errmsg("%s failed: %m",
|
||||
"epoll_ctl()")));
|
||||
}
|
||||
@ -1089,7 +1089,7 @@ WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
|
||||
waiting = false;
|
||||
ereport(ERROR,
|
||||
(errcode_for_socket_access(),
|
||||
/* translator: %s is a syscall name, such as "poll()" */
|
||||
/* translator: %s is a syscall name, such as "poll()" */
|
||||
errmsg("%s failed: %m",
|
||||
"epoll_wait()")));
|
||||
}
|
||||
@ -1215,7 +1215,7 @@ WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
|
||||
waiting = false;
|
||||
ereport(ERROR,
|
||||
(errcode_for_socket_access(),
|
||||
/* translator: %s is a syscall name, such as "poll()" */
|
||||
/* translator: %s is a syscall name, such as "poll()" */
|
||||
errmsg("%s failed: %m",
|
||||
"poll()")));
|
||||
}
|
||||
|
@ -370,7 +370,7 @@ void
|
||||
PostmasterDeathSignalInit(void)
|
||||
{
|
||||
#ifdef USE_POSTMASTER_DEATH_SIGNAL
|
||||
int signum = POSTMASTER_DEATH_SIGNAL;
|
||||
int signum = POSTMASTER_DEATH_SIGNAL;
|
||||
|
||||
/* Register our signal handler. */
|
||||
pqsignal(signum, postmaster_death_handler);
|
||||
|
@ -181,7 +181,7 @@ pg_rotate_logfile(PG_FUNCTION_ARGS)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
(errmsg("must be superuser to rotate log files with adminpack 1.0"),
|
||||
/* translator: %s is a SQL function name */
|
||||
/* translator: %s is a SQL function name */
|
||||
errhint("Consider using %s, which is part of core, instead.",
|
||||
"pg_logfile_rotate()"))));
|
||||
|
||||
|
@ -906,7 +906,7 @@ WaitForLockersMultiple(List *locktags, LOCKMODE lockmode, bool progress)
|
||||
*/
|
||||
if (progress)
|
||||
{
|
||||
PGPROC *holder = BackendIdGetProc(lockholders->backendId);
|
||||
PGPROC *holder = BackendIdGetProc(lockholders->backendId);
|
||||
|
||||
pgstat_progress_update_param(PROGRESS_WAITFOR_CURRENT_PID,
|
||||
holder->pid);
|
||||
@ -925,9 +925,10 @@ WaitForLockersMultiple(List *locktags, LOCKMODE lockmode, bool progress)
|
||||
PROGRESS_WAITFOR_DONE,
|
||||
PROGRESS_WAITFOR_CURRENT_PID
|
||||
};
|
||||
const int64 values[] = {
|
||||
const int64 values[] = {
|
||||
0, 0, 0
|
||||
};
|
||||
|
||||
pgstat_progress_update_multi_param(3, index, values);
|
||||
}
|
||||
|
||||
|
@ -91,7 +91,7 @@ static const int NSmgr = lengthof(smgrsw);
|
||||
*/
|
||||
static HTAB *SMgrRelationHash = NULL;
|
||||
|
||||
static dlist_head unowned_relns;
|
||||
static dlist_head unowned_relns;
|
||||
|
||||
/* local function prototypes */
|
||||
static void smgrshutdown(int code, Datum arg);
|
||||
@ -713,7 +713,7 @@ smgrimmedsync(SMgrRelation reln, ForkNumber forknum)
|
||||
void
|
||||
AtEOXact_SMgr(void)
|
||||
{
|
||||
dlist_mutable_iter iter;
|
||||
dlist_mutable_iter iter;
|
||||
|
||||
/*
|
||||
* Zap all unowned SMgrRelations. We rely on smgrclose() to remove each
|
||||
@ -721,8 +721,8 @@ AtEOXact_SMgr(void)
|
||||
*/
|
||||
dlist_foreach_modify(iter, &unowned_relns)
|
||||
{
|
||||
SMgrRelation rel = dlist_container(SMgrRelationData, node,
|
||||
iter.cur);
|
||||
SMgrRelation rel = dlist_container(SMgrRelationData, node,
|
||||
iter.cur);
|
||||
|
||||
Assert(rel->smgr_owner == NULL);
|
||||
|
||||
|
@ -548,8 +548,8 @@ RegisterSyncRequest(const FileTag *ftag, SyncRequestType type,
|
||||
for (;;)
|
||||
{
|
||||
/*
|
||||
* Notify the checkpointer about it. If we fail to queue a message
|
||||
* in retryOnError mode, we have to sleep and try again ... ugly, but
|
||||
* Notify the checkpointer about it. If we fail to queue a message in
|
||||
* retryOnError mode, we have to sleep and try again ... ugly, but
|
||||
* hopefully won't happen often.
|
||||
*
|
||||
* XXX should we CHECK_FOR_INTERRUPTS in this loop? Escaping with an
|
||||
|
@ -113,8 +113,8 @@ DestReceiver *
|
||||
CreateDestReceiver(CommandDest dest)
|
||||
{
|
||||
/*
|
||||
* It's ok to cast the constness away as any modification of the none receiver
|
||||
* would be a bug (which gets easier to catch this way).
|
||||
* It's ok to cast the constness away as any modification of the none
|
||||
* receiver would be a bug (which gets easier to catch this way).
|
||||
*/
|
||||
|
||||
switch (dest)
|
||||
|
@ -3023,6 +3023,7 @@ DCH_from_char(FormatNode *node, char *in, TmFromChar *out)
|
||||
int len,
|
||||
value;
|
||||
bool fx_mode = false;
|
||||
|
||||
/* number of extra skipped characters (more than given in format string) */
|
||||
int extra_skip = 0;
|
||||
|
||||
@ -3049,8 +3050,8 @@ DCH_from_char(FormatNode *node, char *in, TmFromChar *out)
|
||||
/*
|
||||
* In non FX (fixed format) mode one format string space or
|
||||
* separator match to one space or separator in input string.
|
||||
* Or match nothing if there is no space or separator in
|
||||
* the current position of input string.
|
||||
* Or match nothing if there is no space or separator in the
|
||||
* current position of input string.
|
||||
*/
|
||||
extra_skip--;
|
||||
if (isspace((unsigned char) *s) || is_separator_char(s))
|
||||
@ -3176,11 +3177,13 @@ DCH_from_char(FormatNode *node, char *in, TmFromChar *out)
|
||||
n->key->name)));
|
||||
break;
|
||||
case DCH_TZH:
|
||||
|
||||
/*
|
||||
* Value of TZH might be negative. And the issue is that we
|
||||
* might swallow minus sign as the separator. So, if we have
|
||||
* skipped more characters than specified in the format string,
|
||||
* then we consider prepending last skipped minus to TZH.
|
||||
* skipped more characters than specified in the format
|
||||
* string, then we consider prepending last skipped minus to
|
||||
* TZH.
|
||||
*/
|
||||
if (*s == '+' || *s == '-' || *s == ' ')
|
||||
{
|
||||
|
@ -219,7 +219,7 @@ pg_read_file(PG_FUNCTION_ARGS)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
|
||||
(errmsg("must be superuser to read files with adminpack 1.0"),
|
||||
/* translator: %s is a SQL function name */
|
||||
/* translator: %s is a SQL function name */
|
||||
errhint("Consider using %s, which is part of core, instead.",
|
||||
"pg_file_read()"))));
|
||||
|
||||
|
@ -88,7 +88,7 @@ static int point_inside(Point *p, int npts, Point *plist);
|
||||
static inline void line_construct(LINE *result, Point *pt, float8 m);
|
||||
static inline float8 line_sl(LINE *line);
|
||||
static inline float8 line_invsl(LINE *line);
|
||||
static bool line_interpt_line(Point *result, LINE *l1, LINE *l2);
|
||||
static bool line_interpt_line(Point *result, LINE *l1, LINE *l2);
|
||||
static bool line_contain_point(LINE *line, Point *point);
|
||||
static float8 line_closept_point(Point *result, LINE *line, Point *pt);
|
||||
|
||||
@ -96,10 +96,10 @@ static float8 line_closept_point(Point *result, LINE *line, Point *pt);
|
||||
static inline void statlseg_construct(LSEG *lseg, Point *pt1, Point *pt2);
|
||||
static inline float8 lseg_sl(LSEG *lseg);
|
||||
static inline float8 lseg_invsl(LSEG *lseg);
|
||||
static bool lseg_interpt_line(Point *result, LSEG *lseg, LINE *line);
|
||||
static bool lseg_interpt_lseg(Point *result, LSEG *l1, LSEG *l2);
|
||||
static bool lseg_interpt_line(Point *result, LSEG *lseg, LINE *line);
|
||||
static bool lseg_interpt_lseg(Point *result, LSEG *l1, LSEG *l2);
|
||||
static int lseg_crossing(float8 x, float8 y, float8 px, float8 py);
|
||||
static bool lseg_contain_point(LSEG *lseg, Point *point);
|
||||
static bool lseg_contain_point(LSEG *lseg, Point *point);
|
||||
static float8 lseg_closept_point(Point *result, LSEG *lseg, Point *pt);
|
||||
static float8 lseg_closept_line(Point *result, LSEG *lseg, LINE *line);
|
||||
static float8 lseg_closept_lseg(Point *result, LSEG *on_lseg, LSEG *to_lseg);
|
||||
@ -692,9 +692,9 @@ static bool
|
||||
box_contain_box(BOX *contains_box, BOX *contained_box)
|
||||
{
|
||||
return FPge(contains_box->high.x, contained_box->high.x) &&
|
||||
FPle(contains_box->low.x, contained_box->low.x) &&
|
||||
FPge(contains_box->high.y, contained_box->high.y) &&
|
||||
FPle(contains_box->low.y, contained_box->low.y);
|
||||
FPle(contains_box->low.x, contained_box->low.x) &&
|
||||
FPge(contains_box->high.y, contained_box->high.y) &&
|
||||
FPle(contains_box->low.y, contained_box->low.y);
|
||||
}
|
||||
|
||||
|
||||
@ -2378,8 +2378,8 @@ dist_ppath(PG_FUNCTION_ARGS)
|
||||
Assert(path->npts > 0);
|
||||
|
||||
/*
|
||||
* The distance from a point to a path is the smallest distance
|
||||
* from the point to any of its constituent segments.
|
||||
* The distance from a point to a path is the smallest distance from the
|
||||
* point to any of its constituent segments.
|
||||
*/
|
||||
for (i = 0; i < path->npts; i++)
|
||||
{
|
||||
@ -2553,9 +2553,9 @@ lseg_interpt_line(Point *result, LSEG *lseg, LINE *line)
|
||||
LINE tmp;
|
||||
|
||||
/*
|
||||
* First, we promote the line segment to a line, because we know how
|
||||
* to find the intersection point of two lines. If they don't have
|
||||
* an intersection point, we are done.
|
||||
* First, we promote the line segment to a line, because we know how to
|
||||
* find the intersection point of two lines. If they don't have an
|
||||
* intersection point, we are done.
|
||||
*/
|
||||
line_construct(&tmp, &lseg->p[0], lseg_sl(lseg));
|
||||
if (!line_interpt_line(&interpt, &tmp, line))
|
||||
@ -2602,8 +2602,8 @@ line_closept_point(Point *result, LINE *line, Point *point)
|
||||
LINE tmp;
|
||||
|
||||
/*
|
||||
* We drop a perpendicular to find the intersection point. Ordinarily
|
||||
* we should always find it, but that can fail in the presence of NaN
|
||||
* We drop a perpendicular to find the intersection point. Ordinarily we
|
||||
* should always find it, but that can fail in the presence of NaN
|
||||
* coordinates, and perhaps even from simple roundoff issues.
|
||||
*/
|
||||
line_construct(&tmp, point, line_invsl(line));
|
||||
@ -2693,8 +2693,8 @@ lseg_closept_lseg(Point *result, LSEG *on_lseg, LSEG *to_lseg)
|
||||
return 0.0;
|
||||
|
||||
/*
|
||||
* Then, we find the closest points from the endpoints of the second
|
||||
* line segment, and keep the closest one.
|
||||
* Then, we find the closest points from the endpoints of the second line
|
||||
* segment, and keep the closest one.
|
||||
*/
|
||||
dist = lseg_closept_point(result, on_lseg, &to_lseg->p[0]);
|
||||
d = lseg_closept_point(&point, on_lseg, &to_lseg->p[1]);
|
||||
@ -3063,7 +3063,7 @@ static bool
|
||||
box_contain_point(BOX *box, Point *point)
|
||||
{
|
||||
return box->high.x >= point->x && box->low.x <= point->x &&
|
||||
box->high.y >= point->y && box->low.y <= point-> y;
|
||||
box->high.y >= point->y && box->low.y <= point->y;
|
||||
}
|
||||
|
||||
Datum
|
||||
@ -3150,7 +3150,7 @@ static bool
|
||||
box_contain_lseg(BOX *box, LSEG *lseg)
|
||||
{
|
||||
return box_contain_point(box, &lseg->p[0]) &&
|
||||
box_contain_point(box, &lseg->p[1]);
|
||||
box_contain_point(box, &lseg->p[1]);
|
||||
}
|
||||
|
||||
Datum
|
||||
|
@ -207,7 +207,7 @@ IsValidJsonNumber(const char *str, int len)
|
||||
*/
|
||||
if (*str == '-')
|
||||
{
|
||||
dummy_lex.input = unconstify(char *, str) + 1;
|
||||
dummy_lex.input = unconstify(char *, str) +1;
|
||||
dummy_lex.input_length = len - 1;
|
||||
}
|
||||
else
|
||||
@ -2192,7 +2192,7 @@ json_build_object(PG_FUNCTION_ARGS)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("argument list must have even number of elements"),
|
||||
/* translator: %s is a SQL function name */
|
||||
/* translator: %s is a SQL function name */
|
||||
errhint("The arguments of %s must consist of alternating keys and values.",
|
||||
"json_build_object()")));
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user