diff --git a/contrib/auto_explain/auto_explain.c b/contrib/auto_explain/auto_explain.c index fcd11a0071..4ccd2aa849 100644 --- a/contrib/auto_explain/auto_explain.c +++ b/contrib/auto_explain/auto_explain.c @@ -165,16 +165,16 @@ _PG_init(void) DefineCustomRealVariable("auto_explain.sample_rate", "Fraction of queries to process.", - NULL, - &auto_explain_sample_rate, - 1.0, - 0.0, - 1.0, - PGC_SUSET, - 0, - NULL, - NULL, - NULL); + NULL, + &auto_explain_sample_rate, + 1.0, + 0.0, + 1.0, + PGC_SUSET, + 0, + NULL, + NULL, + NULL); EmitWarningsOnPlaceholders("auto_explain"); @@ -209,12 +209,12 @@ static void explain_ExecutorStart(QueryDesc *queryDesc, int eflags) { /* - * For rate sampling, randomly choose top-level statement. Either - * all nested statements will be explained or none will. + * For rate sampling, randomly choose top-level statement. Either all + * nested statements will be explained or none will. */ if (auto_explain_log_min_duration >= 0 && nesting_level == 0) current_query_sampled = (random() < auto_explain_sample_rate * - MAX_RANDOM_VALUE); + MAX_RANDOM_VALUE); if (auto_explain_enabled() && current_query_sampled) { diff --git a/contrib/bloom/blinsert.c b/contrib/bloom/blinsert.c index 15ac30d55f..3953af996b 100644 --- a/contrib/bloom/blinsert.c +++ b/contrib/bloom/blinsert.c @@ -33,11 +33,11 @@ PG_MODULE_MAGIC; typedef struct { BloomState blstate; /* bloom index state */ - MemoryContext tmpCtx; /* temporary memory context reset after - * each tuple */ + MemoryContext tmpCtx; /* temporary memory context reset after each + * tuple */ char data[BLCKSZ]; /* cached page */ int64 count; /* number of tuples in cached page */ -} BloomBuildState; +} BloomBuildState; /* * Flush page cached in BloomBuildState. @@ -140,8 +140,8 @@ blbuild(Relation heap, Relation index, IndexInfo *indexInfo) bloomBuildCallback, (void *) &buildstate); /* - * There are could be some items in cached page. Flush this page - * if needed. + * There are could be some items in cached page. Flush this page if + * needed. */ if (buildstate.count > 0) flushCachedPage(index, &buildstate); diff --git a/contrib/bloom/bloom.h b/contrib/bloom/bloom.h index c6091a8dd6..bc451a00db 100644 --- a/contrib/bloom/bloom.h +++ b/contrib/bloom/bloom.h @@ -31,14 +31,13 @@ /* Opaque for bloom pages */ typedef struct BloomPageOpaqueData { - OffsetNumber maxoff; /* number of index tuples on page */ - uint16 flags; /* see bit definitions below */ - uint16 unused; /* placeholder to force maxaligning of size - * of BloomPageOpaqueData and to place - * bloom_page_id exactly at the end of page - */ - uint16 bloom_page_id; /* for identification of BLOOM indexes */ -} BloomPageOpaqueData; + OffsetNumber maxoff; /* number of index tuples on page */ + uint16 flags; /* see bit definitions below */ + uint16 unused; /* placeholder to force maxaligning of size of + * BloomPageOpaqueData and to place + * bloom_page_id exactly at the end of page */ + uint16 bloom_page_id; /* for identification of BLOOM indexes */ +} BloomPageOpaqueData; typedef BloomPageOpaqueData *BloomPageOpaque; @@ -102,9 +101,9 @@ typedef struct BloomOptions { int32 vl_len_; /* varlena header (do not touch directly!) */ int bloomLength; /* length of signature in words (not bits!) */ - int bitSize[INDEX_MAX_KEYS]; /* # of bits generated for each - * index key */ -} BloomOptions; + int bitSize[INDEX_MAX_KEYS]; /* # of bits generated for + * each index key */ +} BloomOptions; /* * FreeBlockNumberArray - array of block numbers sized so that metadata fill @@ -125,7 +124,7 @@ typedef struct BloomMetaPageData uint16 nEnd; BloomOptions opts; FreeBlockNumberArray notFullPage; -} BloomMetaPageData; +} BloomMetaPageData; /* Magic number to distinguish bloom pages among anothers */ #define BLOOM_MAGICK_NUMBER (0xDBAC0DED) @@ -146,7 +145,7 @@ typedef struct BloomState * precompute it */ Size sizeOfBloomTuple; -} BloomState; +} BloomState; #define BloomPageGetFreeSpace(state, page) \ (BLCKSZ - MAXALIGN(SizeOfPageHeaderData) \ @@ -160,30 +159,30 @@ typedef struct BloomTuple { ItemPointerData heapPtr; BloomSignatureWord sign[FLEXIBLE_ARRAY_MEMBER]; -} BloomTuple; +} BloomTuple; #define BLOOMTUPLEHDRSZ offsetof(BloomTuple, sign) /* Opaque data structure for bloom index scan */ typedef struct BloomScanOpaqueData { - BloomSignatureWord *sign; /* Scan signature */ + BloomSignatureWord *sign; /* Scan signature */ BloomState state; -} BloomScanOpaqueData; +} BloomScanOpaqueData; typedef BloomScanOpaqueData *BloomScanOpaque; /* blutils.c */ extern void _PG_init(void); extern Datum blhandler(PG_FUNCTION_ARGS); -extern void initBloomState(BloomState * state, Relation index); +extern void initBloomState(BloomState *state, Relation index); extern void BloomFillMetapage(Relation index, Page metaPage); extern void BloomInitMetapage(Relation index); extern void BloomInitPage(Page page, uint16 flags); extern Buffer BloomNewBuffer(Relation index); -extern void signValue(BloomState * state, BloomSignatureWord * sign, Datum value, int attno); -extern BloomTuple *BloomFormTuple(BloomState * state, ItemPointer iptr, Datum *values, bool *isnull); -extern bool BloomPageAddItem(BloomState * state, Page page, BloomTuple * tuple); +extern void signValue(BloomState *state, BloomSignatureWord *sign, Datum value, int attno); +extern BloomTuple *BloomFormTuple(BloomState *state, ItemPointer iptr, Datum *values, bool *isnull); +extern bool BloomPageAddItem(BloomState *state, Page page, BloomTuple *tuple); /* blvalidate.c */ extern bool blvalidate(Oid opclassoid); diff --git a/contrib/bloom/blutils.c b/contrib/bloom/blutils.c index 876952f2d5..317bcb3dec 100644 --- a/contrib/bloom/blutils.c +++ b/contrib/bloom/blutils.c @@ -37,6 +37,7 @@ PG_FUNCTION_INFO_V1(blhandler); /* Kind of relation options for bloom index */ static relopt_kind bl_relopt_kind; + /* parse table for fillRelOptions */ static relopt_parse_elt bl_relopt_tab[INDEX_MAX_KEYS + 1]; @@ -215,7 +216,9 @@ myRand(void) * October 1988, p. 1195. *---------- */ - int32 hi, lo, x; + int32 hi, + lo, + x; /* Must be in [1, 0x7ffffffe] range at this point. */ hi = next / 127773; diff --git a/contrib/bloom/blvacuum.c b/contrib/bloom/blvacuum.c index 5ae3349938..7c355463a2 100644 --- a/contrib/bloom/blvacuum.c +++ b/contrib/bloom/blvacuum.c @@ -78,7 +78,7 @@ blbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, /* Iterate over the tuples */ itup = itupPtr = BloomPageGetTuple(&state, page, FirstOffsetNumber); itupEnd = BloomPageGetTuple(&state, page, - OffsetNumberNext(BloomPageGetMaxOffset(page))); + OffsetNumberNext(BloomPageGetMaxOffset(page))); while (itup < itupEnd) { /* Do we have to delete this tuple? */ @@ -106,11 +106,11 @@ blbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, } Assert(itupPtr == BloomPageGetTuple(&state, page, - OffsetNumberNext(BloomPageGetMaxOffset(page)))); + OffsetNumberNext(BloomPageGetMaxOffset(page)))); /* - * Add page to notFullPage list if we will not mark page as deleted and - * there is a free space on it + * Add page to notFullPage list if we will not mark page as deleted + * and there is a free space on it */ if (BloomPageGetMaxOffset(page) != 0 && BloomPageGetFreeSpace(&state, page) > state.sizeOfBloomTuple && diff --git a/contrib/file_fdw/file_fdw.c b/contrib/file_fdw/file_fdw.c index bc4d2d7082..c0491318c0 100644 --- a/contrib/file_fdw/file_fdw.c +++ b/contrib/file_fdw/file_fdw.c @@ -132,7 +132,7 @@ static bool fileAnalyzeForeignTable(Relation relation, AcquireSampleRowsFunc *func, BlockNumber *totalpages); static bool fileIsForeignScanParallelSafe(PlannerInfo *root, RelOptInfo *rel, - RangeTblEntry *rte); + RangeTblEntry *rte); /* * Helper functions @@ -767,12 +767,12 @@ fileAnalyzeForeignTable(Relation relation, /* * fileIsForeignScanParallelSafe - * Reading a file in a parallel worker should work just the same as - * reading it in the leader, so mark scans safe. + * Reading a file in a parallel worker should work just the same as + * reading it in the leader, so mark scans safe. */ static bool fileIsForeignScanParallelSafe(PlannerInfo *root, RelOptInfo *rel, - RangeTblEntry *rte) + RangeTblEntry *rte) { return true; } diff --git a/contrib/isn/isn.c b/contrib/isn/isn.c index 49d088ffa8..c622a4ef07 100644 --- a/contrib/isn/isn.c +++ b/contrib/isn/isn.c @@ -444,9 +444,9 @@ ean2ISBN(char *isn) unsigned check; /* - * The number should come in this format: 978-0-000-00000-0 - * or may be an ISBN-13 number, 979-..., which does not have a short - * representation. Do the short output version if possible. + * The number should come in this format: 978-0-000-00000-0 or may be an + * ISBN-13 number, 979-..., which does not have a short representation. Do + * the short output version if possible. */ if (strncmp("978-", isn, 4) == 0) { diff --git a/contrib/pageinspect/heapfuncs.c b/contrib/pageinspect/heapfuncs.c index d0c28865c3..904eaef2da 100644 --- a/contrib/pageinspect/heapfuncs.c +++ b/contrib/pageinspect/heapfuncs.c @@ -82,7 +82,7 @@ text_to_bits(char *str, int len) else ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("illegal character '%c' in t_bits string", str[off]))); + errmsg("illegal character '%c' in t_bits string", str[off]))); if (off % 8 == 7) bits[off / 8] = byte; @@ -192,9 +192,9 @@ heap_page_items(PG_FUNCTION_ARGS) lp_offset == MAXALIGN(lp_offset) && lp_offset + lp_len <= raw_page_size) { - HeapTupleHeader tuphdr; - bytea *tuple_data_bytea; - int tuple_data_len; + HeapTupleHeader tuphdr; + bytea *tuple_data_bytea; + int tuple_data_len; /* Extract information from the tuple header */ @@ -214,7 +214,7 @@ heap_page_items(PG_FUNCTION_ARGS) tuple_data_bytea = (bytea *) palloc(tuple_data_len + VARHDRSZ); SET_VARSIZE(tuple_data_bytea, tuple_data_len + VARHDRSZ); memcpy(VARDATA(tuple_data_bytea), (char *) tuphdr + tuphdr->t_hoff, - tuple_data_len); + tuple_data_len); values[13] = PointerGetDatum(tuple_data_bytea); /* @@ -284,16 +284,16 @@ heap_page_items(PG_FUNCTION_ARGS) */ static Datum tuple_data_split_internal(Oid relid, char *tupdata, - uint16 tupdata_len, uint16 t_infomask, - uint16 t_infomask2, bits8 *t_bits, - bool do_detoast) + uint16 tupdata_len, uint16 t_infomask, + uint16 t_infomask2, bits8 *t_bits, + bool do_detoast) { - ArrayBuildState *raw_attrs; - int nattrs; - int i; - int off = 0; - Relation rel; - TupleDesc tupdesc; + ArrayBuildState *raw_attrs; + int nattrs; + int i; + int off = 0; + Relation rel; + TupleDesc tupdesc; /* Get tuple descriptor from relation OID */ rel = relation_open(relid, NoLock); @@ -310,30 +310,31 @@ tuple_data_split_internal(Oid relid, char *tupdata, for (i = 0; i < nattrs; i++) { - Form_pg_attribute attr; - bool is_null; - bytea *attr_data = NULL; + Form_pg_attribute attr; + bool is_null; + bytea *attr_data = NULL; attr = tupdesc->attrs[i]; is_null = (t_infomask & HEAP_HASNULL) && att_isnull(i, t_bits); /* - * Tuple header can specify less attributes than tuple descriptor - * as ALTER TABLE ADD COLUMN without DEFAULT keyword does not - * actually change tuples in pages, so attributes with numbers greater - * than (t_infomask2 & HEAP_NATTS_MASK) should be treated as NULL. + * Tuple header can specify less attributes than tuple descriptor as + * ALTER TABLE ADD COLUMN without DEFAULT keyword does not actually + * change tuples in pages, so attributes with numbers greater than + * (t_infomask2 & HEAP_NATTS_MASK) should be treated as NULL. */ if (i >= (t_infomask2 & HEAP_NATTS_MASK)) is_null = true; if (!is_null) { - int len; + int len; if (attr->attlen == -1) { off = att_align_pointer(off, tupdesc->attrs[i]->attalign, -1, tupdata + off); + /* * As VARSIZE_ANY throws an exception if it can't properly * detect the type of external storage in macros VARTAG_SIZE, @@ -343,8 +344,8 @@ tuple_data_split_internal(Oid relid, char *tupdata, !VARATT_IS_EXTERNAL_ONDISK(tupdata + off) && !VARATT_IS_EXTERNAL_INDIRECT(tupdata + off)) ereport(ERROR, - (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("first byte of varlena attribute is incorrect for attribute %d", i))); + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("first byte of varlena attribute is incorrect for attribute %d", i))); len = VARSIZE_ANY(tupdata + off); } @@ -381,7 +382,7 @@ tuple_data_split_internal(Oid relid, char *tupdata, if (tupdata_len != off) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("end of tuple reached without looking at all its data"))); + errmsg("end of tuple reached without looking at all its data"))); return makeArrayResult(raw_attrs, CurrentMemoryContext); } @@ -397,14 +398,14 @@ PG_FUNCTION_INFO_V1(tuple_data_split); Datum tuple_data_split(PG_FUNCTION_ARGS) { - Oid relid; - bytea *raw_data; - uint16 t_infomask; - uint16 t_infomask2; - char *t_bits_str; - bool do_detoast = false; - bits8 *t_bits = NULL; - Datum res; + Oid relid; + bytea *raw_data; + uint16 t_infomask; + uint16 t_infomask2; + char *t_bits_str; + bool do_detoast = false; + bits8 *t_bits = NULL; + Datum res; relid = PG_GETARG_OID(0); raw_data = PG_ARGISNULL(1) ? NULL : PG_GETARG_BYTEA_P(1); @@ -430,8 +431,8 @@ tuple_data_split(PG_FUNCTION_ARGS) */ if (t_infomask & HEAP_HASNULL) { - int bits_str_len; - int bits_len; + int bits_str_len; + int bits_len; bits_len = (t_infomask2 & HEAP_NATTS_MASK) / 8 + 1; if (!t_bits_str) diff --git a/contrib/pg_trgm/trgm_gin.c b/contrib/pg_trgm/trgm_gin.c index 8c19041591..ead33ef544 100644 --- a/contrib/pg_trgm/trgm_gin.c +++ b/contrib/pg_trgm/trgm_gin.c @@ -265,13 +265,13 @@ gin_trgm_consistent(PG_FUNCTION_ARGS) Datum gin_trgm_triconsistent(PG_FUNCTION_ARGS) { - GinTernaryValue *check = (GinTernaryValue *) PG_GETARG_POINTER(0); + GinTernaryValue *check = (GinTernaryValue *) PG_GETARG_POINTER(0); StrategyNumber strategy = PG_GETARG_UINT16(1); /* text *query = PG_GETARG_TEXT_P(2); */ int32 nkeys = PG_GETARG_INT32(3); Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); - GinTernaryValue res = GIN_MAYBE; + GinTernaryValue res = GIN_MAYBE; int32 i, ntrue; bool *boolcheck; @@ -293,11 +293,12 @@ gin_trgm_triconsistent(PG_FUNCTION_ARGS) } /* - * See comment in gin_trgm_consistent() about * upper bound formula + * See comment in gin_trgm_consistent() about * upper bound + * formula */ res = (nkeys == 0) ? GIN_FALSE : (((((float4) ntrue) / ((float4) nkeys)) >= nlimit) - ? GIN_MAYBE : GIN_FALSE); + ? GIN_MAYBE : GIN_FALSE); break; case ILikeStrategyNumber: #ifndef IGNORECASE @@ -330,9 +331,9 @@ gin_trgm_triconsistent(PG_FUNCTION_ARGS) else { /* - * As trigramsMatchGraph implements a monotonic boolean function, - * promoting all GIN_MAYBE keys to GIN_TRUE will give a - * conservative result. + * As trigramsMatchGraph implements a monotonic boolean + * function, promoting all GIN_MAYBE keys to GIN_TRUE will + * give a conservative result. */ boolcheck = (bool *) palloc(sizeof(bool) * nkeys); for (i = 0; i < nkeys; i++) @@ -345,7 +346,7 @@ gin_trgm_triconsistent(PG_FUNCTION_ARGS) break; default: elog(ERROR, "unrecognized strategy number: %d", strategy); - res = GIN_FALSE; /* keep compiler quiet */ + res = GIN_FALSE; /* keep compiler quiet */ break; } diff --git a/contrib/pg_trgm/trgm_gist.c b/contrib/pg_trgm/trgm_gist.c index 2181c2623f..3a5aff9ede 100644 --- a/contrib/pg_trgm/trgm_gist.c +++ b/contrib/pg_trgm/trgm_gist.c @@ -296,6 +296,7 @@ gtrgm_consistent(PG_FUNCTION_ARGS) if (GIST_LEAF(entry)) { /* all leafs contains orig trgm */ + /* * Prevent gcc optimizing the tmpsml variable using volatile * keyword. Otherwise comparison of nlimit and tmpsml may give @@ -476,12 +477,14 @@ gtrgm_distance(PG_FUNCTION_ARGS) *recheck = strategy == WordDistanceStrategyNumber; if (GIST_LEAF(entry)) { /* all leafs contains orig trgm */ + /* * Prevent gcc optimizing the sml variable using volatile * keyword. Otherwise res can differ from the * word_similarity_dist_op() function. */ float4 volatile sml = cnt_sml(qtrg, key, *recheck); + res = 1.0 - sml; } else if (ISALLTRUE(key)) diff --git a/contrib/pg_trgm/trgm_op.c b/contrib/pg_trgm/trgm_op.c index b728cc1abd..dd0f492cfa 100644 --- a/contrib/pg_trgm/trgm_op.c +++ b/contrib/pg_trgm/trgm_op.c @@ -16,8 +16,8 @@ PG_MODULE_MAGIC; /* GUC variables */ -double similarity_threshold = 0.3f; -double word_similarity_threshold = 0.6f; +double similarity_threshold = 0.3f; +double word_similarity_threshold = 0.6f; void _PG_init(void); @@ -36,8 +36,8 @@ PG_FUNCTION_INFO_V1(word_similarity_dist_commutator_op); /* Trigram with position */ typedef struct { - trgm trg; - int index; + trgm trg; + int index; } pos_trgm; /* @@ -48,29 +48,29 @@ _PG_init(void) { /* Define custom GUC variables. */ DefineCustomRealVariable("pg_trgm.similarity_threshold", - "Sets the threshold used by the %% operator.", - "Valid range is 0.0 .. 1.0.", - &similarity_threshold, - 0.3, - 0.0, - 1.0, - PGC_USERSET, - 0, - NULL, - NULL, - NULL); + "Sets the threshold used by the %% operator.", + "Valid range is 0.0 .. 1.0.", + &similarity_threshold, + 0.3, + 0.0, + 1.0, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); DefineCustomRealVariable("pg_trgm.word_similarity_threshold", - "Sets the threshold used by the <%% operator.", - "Valid range is 0.0 .. 1.0.", - &word_similarity_threshold, - 0.6, - 0.0, - 1.0, - PGC_USERSET, - 0, - NULL, - NULL, - NULL); + "Sets the threshold used by the <%% operator.", + "Valid range is 0.0 .. 1.0.", + &word_similarity_threshold, + 0.6, + 0.0, + 1.0, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); } /* @@ -352,9 +352,9 @@ generate_trgm(char *str, int slen) * Make array of positional trigrams from two trigram arrays trg1 and trg2. * * trg1: trigram array of search pattern, of length len1. trg1 is required - * word which positions don't matter and replaced with -1. + * word which positions don't matter and replaced with -1. * trg2: trigram array of text, of length len2. trg2 is haystack where we - * search and have to store its positions. + * search and have to store its positions. * * Returns concatenated trigram array. */ @@ -362,7 +362,8 @@ static pos_trgm * make_positional_trgm(trgm *trg1, int len1, trgm *trg2, int len2) { pos_trgm *result; - int i, len = len1 + len2; + int i, + len = len1 + len2; result = (pos_trgm *) palloc(sizeof(pos_trgm) * len); @@ -387,9 +388,9 @@ make_positional_trgm(trgm *trg1, int len1, trgm *trg2, int len2) static int comp_ptrgm(const void *v1, const void *v2) { - const pos_trgm *p1 = (const pos_trgm *)v1; - const pos_trgm *p2 = (const pos_trgm *)v2; - int cmp; + const pos_trgm *p1 = (const pos_trgm *) v1; + const pos_trgm *p2 = (const pos_trgm *) v2; + int cmp; cmp = CMPTRGM(p1->trg, p2->trg); if (cmp != 0) @@ -413,7 +414,7 @@ comp_ptrgm(const void *v1, const void *v2) * len2: length of array "trg2" and array "trg2indexes". * len: length of the array "found". * check_only: if true then only check existaince of similar search pattern in - * text. + * text. * * Returns word similarity. */ @@ -441,7 +442,7 @@ iterate_word_similarity(int *trg2indexes, for (i = 0; i < len2; i++) { /* Get index of next trigram */ - int trgindex = trg2indexes[i]; + int trgindex = trg2indexes[i]; /* Update last position of this trigram */ if (lower >= 0 || found[trgindex]) @@ -458,10 +459,10 @@ iterate_word_similarity(int *trg2indexes, /* Adjust lower bound if this trigram is present in required substing */ if (found[trgindex]) { - int prev_lower, - tmp_ulen2, - tmp_lower, - tmp_count; + int prev_lower, + tmp_ulen2, + tmp_lower, + tmp_count; upper = i; if (lower == -1) @@ -478,8 +479,8 @@ iterate_word_similarity(int *trg2indexes, prev_lower = lower; for (tmp_lower = lower; tmp_lower <= upper; tmp_lower++) { - float smlr_tmp = CALCSML(tmp_count, ulen1, tmp_ulen2); - int tmp_trgindex; + float smlr_tmp = CALCSML(tmp_count, ulen1, tmp_ulen2); + int tmp_trgindex; if (smlr_tmp > smlr_cur) { @@ -488,10 +489,11 @@ iterate_word_similarity(int *trg2indexes, lower = tmp_lower; count = tmp_count; } + /* * if we only check that word similarity is greater than - * pg_trgm.word_similarity_threshold we do not need to calculate - * a maximum similarity. + * pg_trgm.word_similarity_threshold we do not need to + * calculate a maximum similarity. */ if (check_only && smlr_cur >= word_similarity_threshold) break; @@ -506,6 +508,7 @@ iterate_word_similarity(int *trg2indexes, } smlr_max = Max(smlr_max, smlr_cur); + /* * if we only check that word similarity is greater than * pg_trgm.word_similarity_threshold we do not need to calculate a @@ -516,7 +519,8 @@ iterate_word_similarity(int *trg2indexes, for (tmp_lower = prev_lower; tmp_lower < lower; tmp_lower++) { - int tmp_trgindex; + int tmp_trgindex; + tmp_trgindex = trg2indexes[tmp_lower]; if (lastpos[tmp_trgindex] == tmp_lower) lastpos[tmp_trgindex] = -1; @@ -544,13 +548,13 @@ iterate_word_similarity(int *trg2indexes, * str1: search pattern string, of length slen1 bytes. * str2: text in which we are looking for a word, of length slen2 bytes. * check_only: if true then only check existaince of similar search pattern in - * text. + * text. * * Returns word similarity. */ static float4 calc_word_similarity(char *str1, int slen1, char *str2, int slen2, - bool check_only) + bool check_only) { bool *found; pos_trgm *ptrg; @@ -568,8 +572,8 @@ calc_word_similarity(char *str1, int slen1, char *str2, int slen2, protect_out_of_mem(slen1 + slen2); /* Make positional trigrams */ - trg1 = (trgm *) palloc(sizeof(trgm) * (slen1 / 2 + 1) * 3); - trg2 = (trgm *) palloc(sizeof(trgm) * (slen2 / 2 + 1) * 3); + trg1 = (trgm *) palloc(sizeof(trgm) * (slen1 / 2 + 1) *3); + trg2 = (trgm *) palloc(sizeof(trgm) * (slen2 / 2 + 1) *3); len1 = generate_trgm_only(trg1, str1, slen1); len2 = generate_trgm_only(trg2, str2, slen2); @@ -594,7 +598,8 @@ calc_word_similarity(char *str1, int slen1, char *str2, int slen2, { if (i > 0) { - int cmp = CMPTRGM(ptrg[i - 1].trg, ptrg[i].trg); + int cmp = CMPTRGM(ptrg[i - 1].trg, ptrg[i].trg); + if (cmp != 0) { if (found[j]) @@ -617,7 +622,7 @@ calc_word_similarity(char *str1, int slen1, char *str2, int slen2, /* Run iterative procedure to find maximum similarity with word */ result = iterate_word_similarity(trg2indexes, found, ulen1, len2, len, - check_only); + check_only); pfree(trg2indexes); pfree(found); @@ -1075,8 +1080,8 @@ word_similarity(PG_FUNCTION_ARGS) float4 res; res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), - VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), - false); + VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), + false); PG_FREE_IF_COPY(in1, 0); PG_FREE_IF_COPY(in2, 1); @@ -1111,8 +1116,8 @@ word_similarity_op(PG_FUNCTION_ARGS) float4 res; res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), - VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), - true); + VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), + true); PG_FREE_IF_COPY(in1, 0); PG_FREE_IF_COPY(in2, 1); @@ -1127,8 +1132,8 @@ word_similarity_commutator_op(PG_FUNCTION_ARGS) float4 res; res = calc_word_similarity(VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), - VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), - true); + VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), + true); PG_FREE_IF_COPY(in1, 0); PG_FREE_IF_COPY(in2, 1); @@ -1143,8 +1148,8 @@ word_similarity_dist_op(PG_FUNCTION_ARGS) float4 res; res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), - VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), - false); + VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), + false); PG_FREE_IF_COPY(in1, 0); PG_FREE_IF_COPY(in2, 1); @@ -1159,8 +1164,8 @@ word_similarity_dist_commutator_op(PG_FUNCTION_ARGS) float4 res; res = calc_word_similarity(VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), - VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), - false); + VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), + false); PG_FREE_IF_COPY(in1, 0); PG_FREE_IF_COPY(in2, 1); diff --git a/contrib/pg_visibility/pg_visibility.c b/contrib/pg_visibility/pg_visibility.c index 5e5c7cce24..9edf239819 100644 --- a/contrib/pg_visibility/pg_visibility.c +++ b/contrib/pg_visibility/pg_visibility.c @@ -20,8 +20,8 @@ PG_MODULE_MAGIC; typedef struct vbits { - BlockNumber next; - BlockNumber count; + BlockNumber next; + BlockNumber count; uint8 bits[FLEXIBLE_ARRAY_MEMBER]; } vbits; @@ -129,7 +129,7 @@ pg_visibility_map_rel(PG_FUNCTION_ARGS) if (SRF_IS_FIRSTCALL()) { Oid relid = PG_GETARG_OID(0); - MemoryContext oldcontext; + MemoryContext oldcontext; funcctx = SRF_FIRSTCALL_INIT(); oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); @@ -173,7 +173,7 @@ pg_visibility_rel(PG_FUNCTION_ARGS) if (SRF_IS_FIRSTCALL()) { Oid relid = PG_GETARG_OID(0); - MemoryContext oldcontext; + MemoryContext oldcontext; funcctx = SRF_FIRSTCALL_INIT(); oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); @@ -214,8 +214,8 @@ pg_visibility_map_summary(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); Relation rel; - BlockNumber nblocks; - BlockNumber blkno; + BlockNumber nblocks; + BlockNumber blkno; Buffer vmbuffer = InvalidBuffer; int64 all_visible = 0; int64 all_frozen = 0; @@ -292,16 +292,16 @@ static vbits * collect_visibility_data(Oid relid, bool include_pd) { Relation rel; - BlockNumber nblocks; + BlockNumber nblocks; vbits *info; - BlockNumber blkno; + BlockNumber blkno; Buffer vmbuffer = InvalidBuffer; - BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD); + BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD); rel = relation_open(relid, AccessShareLock); nblocks = RelationGetNumberOfBlocks(rel); - info = palloc0(offsetof(vbits, bits) + nblocks); + info = palloc0(offsetof(vbits, bits) +nblocks); info->next = 0; info->count = nblocks; @@ -320,8 +320,8 @@ collect_visibility_data(Oid relid, bool include_pd) info->bits[blkno] |= (1 << 1); /* - * Page-level data requires reading every block, so only get it if - * the caller needs it. Use a buffer access strategy, too, to prevent + * Page-level data requires reading every block, so only get it if the + * caller needs it. Use a buffer access strategy, too, to prevent * cache-trashing. */ if (include_pd) diff --git a/contrib/pgcrypto/pgp.h b/contrib/pgcrypto/pgp.h index 88f7f8dc48..804a27018a 100644 --- a/contrib/pgcrypto/pgp.h +++ b/contrib/pgcrypto/pgp.h @@ -124,7 +124,7 @@ struct PGP_S2K uint8 mode; uint8 digest_algo; uint8 salt[8]; - uint8 iter; /* encoded (one-octet) count */ + uint8 iter; /* encoded (one-octet) count */ /* calculated: */ uint8 key[PGP_MAX_KEY]; uint8 key_len; diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c index 43c7fc9e08..8ca1c1c898 100644 --- a/contrib/postgres_fdw/connection.c +++ b/contrib/postgres_fdw/connection.c @@ -486,11 +486,11 @@ pgfdw_get_result(PGconn *conn, const char *query) for (;;) { - PGresult *res; + PGresult *res; while (PQisBusy(conn)) { - int wc; + int wc; /* Sleep until there's something to do */ wc = WaitLatchOrSocket(MyLatch, @@ -675,9 +675,9 @@ pgfdw_xact_callback(XactEvent event, void *arg) /* * If a command has been submitted to the remote server by * using an asynchronous execution function, the command - * might not have yet completed. Check to see if a command - * is still being processed by the remote server, and if so, - * request cancellation of the command. + * might not have yet completed. Check to see if a + * command is still being processed by the remote server, + * and if so, request cancellation of the command. */ if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE) { @@ -689,8 +689,8 @@ pgfdw_xact_callback(XactEvent event, void *arg) if (!PQcancel(cancel, errbuf, sizeof(errbuf))) ereport(WARNING, (errcode(ERRCODE_CONNECTION_FAILURE), - errmsg("could not send cancel request: %s", - errbuf))); + errmsg("could not send cancel request: %s", + errbuf))); PQfreeCancel(cancel); } } @@ -798,11 +798,11 @@ pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid, entry->have_error = true; /* - * If a command has been submitted to the remote server by using an - * asynchronous execution function, the command might not have yet - * completed. Check to see if a command is still being processed by - * the remote server, and if so, request cancellation of the - * command. + * If a command has been submitted to the remote server by using + * an asynchronous execution function, the command might not have + * yet completed. Check to see if a command is still being + * processed by the remote server, and if so, request cancellation + * of the command. */ if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE) { diff --git a/contrib/postgres_fdw/deparse.c b/contrib/postgres_fdw/deparse.c index 35c27e7fb3..7d2512cf04 100644 --- a/contrib/postgres_fdw/deparse.c +++ b/contrib/postgres_fdw/deparse.c @@ -1583,10 +1583,10 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root, /* * All other system attributes are fetched as 0, except for table OID, * which is fetched as the local table OID. However, we must be - * careful; the table could be beneath an outer join, in which case - * it must go to NULL whenever the rest of the row does. + * careful; the table could be beneath an outer join, in which case it + * must go to NULL whenever the rest of the row does. */ - Oid fetchval = 0; + Oid fetchval = 0; if (varattno == TableOidAttributeNumber) { @@ -1633,10 +1633,10 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root, 0 - FirstLowInvalidHeapAttributeNumber); /* - * In case the whole-row reference is under an outer join then it has to - * go NULL whenver the rest of the row goes NULL. Deparsing a join query - * would always involve multiple relations, thus qualify_col would be - * true. + * In case the whole-row reference is under an outer join then it has + * to go NULL whenver the rest of the row goes NULL. Deparsing a join + * query would always involve multiple relations, thus qualify_col + * would be true. */ if (qualify_col) { @@ -1652,7 +1652,7 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root, /* Complete the CASE WHEN statement started above. */ if (qualify_col) - appendStringInfo(buf," END"); + appendStringInfo(buf, " END"); heap_close(rel, NoLock); bms_free(attrs_used); diff --git a/contrib/postgres_fdw/option.c b/contrib/postgres_fdw/option.c index f89de2f694..224aed948e 100644 --- a/contrib/postgres_fdw/option.c +++ b/contrib/postgres_fdw/option.c @@ -133,9 +133,9 @@ postgres_fdw_validator(PG_FUNCTION_ARGS) } else if (strcmp(def->defname, "fetch_size") == 0) { - int fetch_size; + int fetch_size; - fetch_size = strtol(defGetString(def), NULL,10); + fetch_size = strtol(defGetString(def), NULL, 10); if (fetch_size <= 0) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c index 4d17272674..4e31b8e40d 100644 --- a/contrib/postgres_fdw/postgres_fdw.c +++ b/contrib/postgres_fdw/postgres_fdw.c @@ -4063,19 +4063,20 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, /* * Pull the other remote conditions from the joining relations into join - * clauses or other remote clauses (remote_conds) of this relation wherever - * possible. This avoids building subqueries at every join step, which is - * not currently supported by the deparser logic. + * clauses or other remote clauses (remote_conds) of this relation + * wherever possible. This avoids building subqueries at every join step, + * which is not currently supported by the deparser logic. * * For an inner join, clauses from both the relations are added to the - * other remote clauses. For LEFT and RIGHT OUTER join, the clauses from the - * outer side are added to remote_conds since those can be evaluated after - * the join is evaluated. The clauses from inner side are added to the - * joinclauses, since they need to evaluated while constructing the join. + * other remote clauses. For LEFT and RIGHT OUTER join, the clauses from + * the outer side are added to remote_conds since those can be evaluated + * after the join is evaluated. The clauses from inner side are added to + * the joinclauses, since they need to evaluated while constructing the + * join. * - * For a FULL OUTER JOIN, the other clauses from either relation can not be - * added to the joinclauses or remote_conds, since each relation acts as an - * outer relation for the other. Consider such full outer join as + * For a FULL OUTER JOIN, the other clauses from either relation can not + * be added to the joinclauses or remote_conds, since each relation acts + * as an outer relation for the other. Consider such full outer join as * unshippable because of the reasons mentioned above in this comment. * * The joining sides can not have local conditions, thus no need to test diff --git a/contrib/postgres_fdw/postgres_fdw.h b/contrib/postgres_fdw/postgres_fdw.h index 574b07d16c..67126bc421 100644 --- a/contrib/postgres_fdw/postgres_fdw.h +++ b/contrib/postgres_fdw/postgres_fdw.h @@ -78,7 +78,7 @@ typedef struct PgFdwRelationInfo ForeignServer *server; UserMapping *user; /* only set in use_remote_estimate mode */ - int fetch_size; /* fetch size for this remote table */ + int fetch_size; /* fetch size for this remote table */ /* * Name of the relation while EXPLAINing ForeignScan. It is used for join @@ -133,23 +133,23 @@ extern void deparseUpdateSql(StringInfo buf, PlannerInfo *root, List *targetAttrs, List *returningList, List **retrieved_attrs); extern void deparseDirectUpdateSql(StringInfo buf, PlannerInfo *root, - Index rtindex, Relation rel, - List *targetlist, - List *targetAttrs, - List *remote_conds, - List **params_list, - List *returningList, - List **retrieved_attrs); + Index rtindex, Relation rel, + List *targetlist, + List *targetAttrs, + List *remote_conds, + List **params_list, + List *returningList, + List **retrieved_attrs); extern void deparseDeleteSql(StringInfo buf, PlannerInfo *root, Index rtindex, Relation rel, List *returningList, List **retrieved_attrs); extern void deparseDirectDeleteSql(StringInfo buf, PlannerInfo *root, - Index rtindex, Relation rel, - List *remote_conds, - List **params_list, - List *returningList, - List **retrieved_attrs); + Index rtindex, Relation rel, + List *remote_conds, + List **params_list, + List *returningList, + List **retrieved_attrs); extern void deparseAnalyzeSizeSql(StringInfo buf, Relation rel); extern void deparseAnalyzeSql(StringInfo buf, Relation rel, List **retrieved_attrs); diff --git a/contrib/sslinfo/sslinfo.c b/contrib/sslinfo/sslinfo.c index 237af6cfd2..82a4c1bd70 100644 --- a/contrib/sslinfo/sslinfo.c +++ b/contrib/sslinfo/sslinfo.c @@ -494,8 +494,8 @@ ssl_extension_info(PG_FUNCTION_ARGS) if (nid == NID_undef) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("unknown OpenSSL extension in certificate at position %d", - call_cntr))); + errmsg("unknown OpenSSL extension in certificate at position %d", + call_cntr))); values[0] = CStringGetTextDatum(OBJ_nid2sn(nid)); nulls[0] = false; diff --git a/contrib/test_decoding/test_decoding.c b/contrib/test_decoding/test_decoding.c index 3336e1e16e..c3508f0e13 100644 --- a/contrib/test_decoding/test_decoding.c +++ b/contrib/test_decoding/test_decoding.c @@ -65,9 +65,9 @@ static void pg_decode_change(LogicalDecodingContext *ctx, static bool pg_decode_filter(LogicalDecodingContext *ctx, RepOriginId origin_id); static void pg_decode_message(LogicalDecodingContext *ctx, - ReorderBufferTXN *txn, XLogRecPtr message_lsn, - bool transactional, const char *prefix, - Size sz, const char *message); + ReorderBufferTXN *txn, XLogRecPtr message_lsn, + bool transactional, const char *prefix, + Size sz, const char *message); void _PG_init(void) diff --git a/src/backend/access/brin/brin_xlog.c b/src/backend/access/brin/brin_xlog.c index 6f3e37cc37..27ba0a97f8 100644 --- a/src/backend/access/brin/brin_xlog.c +++ b/src/backend/access/brin/brin_xlog.c @@ -47,7 +47,7 @@ brin_xlog_insert_update(XLogReaderState *record, { XLogRecPtr lsn = record->EndRecPtr; Buffer buffer; - BlockNumber regpgno; + BlockNumber regpgno; Page page; XLogRedoAction action; diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c index cdf074fc98..ba1f3aafed 100644 --- a/src/backend/access/common/reloptions.c +++ b/src/backend/access/common/reloptions.c @@ -101,7 +101,8 @@ static relopt_int intRelOpts[] = "fillfactor", "Packs table pages only to this percentage", RELOPT_KIND_HEAP, - ShareUpdateExclusiveLock /* since it applies only to later inserts */ + ShareUpdateExclusiveLock /* since it applies only to later + * inserts */ }, HEAP_DEFAULT_FILLFACTOR, HEAP_MIN_FILLFACTOR, 100 }, @@ -110,7 +111,8 @@ static relopt_int intRelOpts[] = "fillfactor", "Packs btree index pages only to this percentage", RELOPT_KIND_BTREE, - ShareUpdateExclusiveLock /* since it applies only to later inserts */ + ShareUpdateExclusiveLock /* since it applies only to later + * inserts */ }, BTREE_DEFAULT_FILLFACTOR, BTREE_MIN_FILLFACTOR, 100 }, @@ -119,7 +121,8 @@ static relopt_int intRelOpts[] = "fillfactor", "Packs hash index pages only to this percentage", RELOPT_KIND_HASH, - ShareUpdateExclusiveLock /* since it applies only to later inserts */ + ShareUpdateExclusiveLock /* since it applies only to later + * inserts */ }, HASH_DEFAULT_FILLFACTOR, HASH_MIN_FILLFACTOR, 100 }, @@ -128,7 +131,8 @@ static relopt_int intRelOpts[] = "fillfactor", "Packs gist index pages only to this percentage", RELOPT_KIND_GIST, - ShareUpdateExclusiveLock /* since it applies only to later inserts */ + ShareUpdateExclusiveLock /* since it applies only to later + * inserts */ }, GIST_DEFAULT_FILLFACTOR, GIST_MIN_FILLFACTOR, 100 }, @@ -137,7 +141,8 @@ static relopt_int intRelOpts[] = "fillfactor", "Packs spgist index pages only to this percentage", RELOPT_KIND_SPGIST, - ShareUpdateExclusiveLock /* since it applies only to later inserts */ + ShareUpdateExclusiveLock /* since it applies only to later + * inserts */ }, SPGIST_DEFAULT_FILLFACTOR, SPGIST_MIN_FILLFACTOR, 100 }, @@ -1475,8 +1480,8 @@ tablespace_reloptions(Datum reloptions, bool validate) LOCKMODE AlterTableGetRelOptionsLockLevel(List *defList) { - LOCKMODE lockmode = NoLock; - ListCell *cell; + LOCKMODE lockmode = NoLock; + ListCell *cell; if (defList == NIL) return AccessExclusiveLock; @@ -1486,8 +1491,8 @@ AlterTableGetRelOptionsLockLevel(List *defList) foreach(cell, defList) { - DefElem *def = (DefElem *) lfirst(cell); - int i; + DefElem *def = (DefElem *) lfirst(cell); + int i; for (i = 0; relOpts[i]; i++) { diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c index 5cf737f621..59a63f28d0 100644 --- a/src/backend/access/gin/ginfast.c +++ b/src/backend/access/gin/ginfast.c @@ -524,7 +524,7 @@ shiftList(Relation index, Buffer metabuffer, BlockNumber newHead, int64 nDeletedHeapTuples = 0; ginxlogDeleteListPages data; Buffer buffers[GIN_NDELETE_AT_ONCE]; - BlockNumber freespace[GIN_NDELETE_AT_ONCE]; + BlockNumber freespace[GIN_NDELETE_AT_ONCE]; data.ndeleted = 0; while (data.ndeleted < GIN_NDELETE_AT_ONCE && blknoToDelete != newHead) @@ -745,30 +745,29 @@ ginInsertCleanup(GinState *ginstate, bool full_clean, bool inVacuum = (stats == NULL); /* - * We would like to prevent concurrent cleanup process. For - * that we will lock metapage in exclusive mode using LockPage() - * call. Nobody other will use that lock for metapage, so - * we keep possibility of concurrent insertion into pending list + * We would like to prevent concurrent cleanup process. For that we will + * lock metapage in exclusive mode using LockPage() call. Nobody other + * will use that lock for metapage, so we keep possibility of concurrent + * insertion into pending list */ if (inVacuum) { /* - * We are called from [auto]vacuum/analyze or - * gin_clean_pending_list() and we would like to wait - * concurrent cleanup to finish. + * We are called from [auto]vacuum/analyze or gin_clean_pending_list() + * and we would like to wait concurrent cleanup to finish. */ LockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock); workMemory = (IsAutoVacuumWorkerProcess() && autovacuum_work_mem != -1) ? - autovacuum_work_mem : maintenance_work_mem; + autovacuum_work_mem : maintenance_work_mem; } else { /* - * We are called from regular insert and if we see - * concurrent cleanup just exit in hope that concurrent - * process will clean up pending list. + * We are called from regular insert and if we see concurrent cleanup + * just exit in hope that concurrent process will clean up pending + * list. */ if (!ConditionalLockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock)) return; @@ -829,9 +828,10 @@ ginInsertCleanup(GinState *ginstate, bool full_clean, Assert(!GinPageIsDeleted(page)); /* - * Are we walk through the page which as we remember was a tail when we - * start our cleanup? But if caller asks us to clean up whole pending - * list then ignore old tail, we will work until list becomes empty. + * Are we walk through the page which as we remember was a tail when + * we start our cleanup? But if caller asks us to clean up whole + * pending list then ignore old tail, we will work until list becomes + * empty. */ if (blkno == blknoFinish && full_clean == false) cleanupFinish = true; @@ -917,8 +917,8 @@ ginInsertCleanup(GinState *ginstate, bool full_clean, * locking */ /* - * remove read pages from pending list, at this point all - * content of read pages is in regular structure + * remove read pages from pending list, at this point all content + * of read pages is in regular structure */ shiftList(index, metabuffer, blkno, fill_fsm, stats); @@ -961,9 +961,9 @@ ginInsertCleanup(GinState *ginstate, bool full_clean, ReleaseBuffer(metabuffer); /* - * As pending list pages can have a high churn rate, it is - * desirable to recycle them immediately to the FreeSpace Map when - * ordinary backends clean the list. + * As pending list pages can have a high churn rate, it is desirable to + * recycle them immediately to the FreeSpace Map when ordinary backends + * clean the list. */ if (fsm_vac && fill_fsm) IndexFreeSpaceMapVacuum(index); @@ -989,7 +989,7 @@ gin_clean_pending_list(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("recovery is in progress"), - errhint("GIN pending list cannot be cleaned up during recovery."))); + errhint("GIN pending list cannot be cleaned up during recovery."))); /* Must be a GIN index */ if (indexRel->rd_rel->relkind != RELKIND_INDEX || diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c index 7a9c67aca8..9f784bf48d 100644 --- a/src/backend/access/gin/gininsert.c +++ b/src/backend/access/gin/gininsert.c @@ -281,7 +281,7 @@ ginBuildCallback(Relation index, HeapTuple htup, Datum *values, &htup->t_self); /* If we've maxed out our available memory, dump everything to the index */ - if (buildstate->accum.allocatedMemory >= (Size)maintenance_work_mem * 1024L) + if (buildstate->accum.allocatedMemory >= (Size) maintenance_work_mem * 1024L) { ItemPointerData *list; Datum key; diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c index 1934c37534..c258478f23 100644 --- a/src/backend/access/gin/ginvacuum.c +++ b/src/backend/access/gin/ginvacuum.c @@ -540,8 +540,10 @@ ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, { /* Yes, so initialize stats to zeroes */ stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult)); + /* - * and cleanup any pending inserts */ + * and cleanup any pending inserts + */ ginInsertCleanup(&gvs.ginstate, !IsAutoVacuumWorkerProcess(), false, stats); } diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index a29088728d..fdf0c5a5cf 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -1498,8 +1498,9 @@ static void gistvacuumpage(Relation rel, Page page, Buffer buffer) { OffsetNumber deletable[MaxIndexTuplesPerPage]; - int ndeletable = 0; - OffsetNumber offnum, maxoff; + int ndeletable = 0; + OffsetNumber offnum, + maxoff; Assert(GistPageIsLeaf(page)); diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c index affd63535a..5ba7d0a793 100644 --- a/src/backend/access/gist/gistget.c +++ b/src/backend/access/gist/gistget.c @@ -36,13 +36,13 @@ static void gistkillitems(IndexScanDesc scan) { - GISTScanOpaque so = (GISTScanOpaque) scan->opaque; - Buffer buffer; - Page page; - OffsetNumber offnum; - ItemId iid; - int i; - bool killedsomething = false; + GISTScanOpaque so = (GISTScanOpaque) scan->opaque; + Buffer buffer; + Page page; + OffsetNumber offnum; + ItemId iid; + int i; + bool killedsomething = false; Assert(so->curBlkno != InvalidBlockNumber); Assert(!XLogRecPtrIsInvalid(so->curPageLSN)); @@ -57,21 +57,22 @@ gistkillitems(IndexScanDesc scan) page = BufferGetPage(buffer); /* - * If page LSN differs it means that the page was modified since the last read. - * killedItems could be not valid so LP_DEAD hints applying is not safe. + * If page LSN differs it means that the page was modified since the last + * read. killedItems could be not valid so LP_DEAD hints applying is not + * safe. */ - if(PageGetLSN(page) != so->curPageLSN) + if (PageGetLSN(page) != so->curPageLSN) { UnlockReleaseBuffer(buffer); - so->numKilled = 0; /* reset counter */ + so->numKilled = 0; /* reset counter */ return; } Assert(GistPageIsLeaf(page)); /* - * Mark all killedItems as dead. We need no additional recheck, - * because, if page was modified, pageLSN must have changed. + * Mark all killedItems as dead. We need no additional recheck, because, + * if page was modified, pageLSN must have changed. */ for (i = 0; i < so->numKilled; i++) { @@ -390,7 +391,7 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances, maxoff = PageGetMaxOffsetNumber(page); for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) { - ItemId iid = PageGetItemId(page, i); + ItemId iid = PageGetItemId(page, i); IndexTuple it; bool match; bool recheck; @@ -400,10 +401,11 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances, * If the scan specifies not to return killed tuples, then we treat a * killed tuple as not passing the qual. */ - if(scan->ignore_killed_tuples && ItemIdIsDead(iid)) + if (scan->ignore_killed_tuples && ItemIdIsDead(iid)) continue; it = (IndexTuple) PageGetItem(page, iid); + /* * Must call gistindex_keytest in tempCxt, and clean up any leftover * junk afterward. @@ -665,11 +667,11 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir) if (so->killedItems == NULL) { MemoryContext oldCxt = - MemoryContextSwitchTo(so->giststate->scanCxt); + MemoryContextSwitchTo(so->giststate->scanCxt); so->killedItems = (OffsetNumber *) palloc(MaxIndexTuplesPerPage - * sizeof(OffsetNumber)); + * sizeof(OffsetNumber)); MemoryContextSwitchTo(oldCxt); } @@ -702,11 +704,11 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir) if (so->killedItems == NULL) { MemoryContext oldCxt = - MemoryContextSwitchTo(so->giststate->scanCxt); + MemoryContextSwitchTo(so->giststate->scanCxt); so->killedItems = (OffsetNumber *) palloc(MaxIndexTuplesPerPage - * sizeof(OffsetNumber)); + * sizeof(OffsetNumber)); MemoryContextSwitchTo(oldCxt); } diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c index 328e54b85e..6f07cd8d46 100644 --- a/src/backend/access/gist/gistscan.c +++ b/src/backend/access/gist/gistscan.c @@ -230,8 +230,8 @@ gistrescan(IndexScanDesc scan, ScanKey key, int nkeys, ScanKey skey = scan->keyData + i; /* - * Copy consistent support function to ScanKey structure - * instead of function implementing filtering operator. + * Copy consistent support function to ScanKey structure instead + * of function implementing filtering operator. */ fmgr_info_copy(&(skey->sk_func), &(so->giststate->consistentFn[skey->sk_attno - 1]), @@ -303,8 +303,8 @@ gistrescan(IndexScanDesc scan, ScanKey key, int nkeys, so->orderByTypes[i] = get_func_rettype(skey->sk_func.fn_oid); /* - * Copy distance support function to ScanKey structure - * instead of function implementing ordering operator. + * Copy distance support function to ScanKey structure instead of + * function implementing ordering operator. */ fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt); diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 4041f9cc5a..6db6241097 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -1687,7 +1687,7 @@ heap_parallelscan_nextpage(HeapScanDesc scan) { BlockNumber page = InvalidBlockNumber; BlockNumber sync_startpage = InvalidBlockNumber; - BlockNumber report_page = InvalidBlockNumber; + BlockNumber report_page = InvalidBlockNumber; ParallelHeapScanDesc parallel_scan; Assert(scan->rs_parallel); diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index 6b850920c7..c90fb71965 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -178,7 +178,7 @@ static void RelationAddExtraBlocks(Relation relation, BulkInsertState bistate) { Page page; - BlockNumber blockNum = InvalidBlockNumber, + BlockNumber blockNum = InvalidBlockNumber, firstBlock = InvalidBlockNumber; int extraBlocks = 0; int lockWaiters = 0; @@ -191,10 +191,10 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate) return; /* - * It might seem like multiplying the number of lock waiters by as much - * as 20 is too aggressive, but benchmarking revealed that smaller numbers - * were insufficient. 512 is just an arbitrary cap to prevent pathological - * results. + * It might seem like multiplying the number of lock waiters by as much as + * 20 is too aggressive, but benchmarking revealed that smaller numbers + * were insufficient. 512 is just an arbitrary cap to prevent + * pathological results. */ extraBlocks = Min(512, lockWaiters * 20); @@ -225,10 +225,10 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate) } /* - * Updating the upper levels of the free space map is too expensive - * to do for every block, but it's worth doing once at the end to make - * sure that subsequent insertion activity sees all of those nifty free - * pages we just inserted. + * Updating the upper levels of the free space map is too expensive to do + * for every block, but it's worth doing once at the end to make sure that + * subsequent insertion activity sees all of those nifty free pages we + * just inserted. * * Note that we're using the freespace value that was reported for the * last block we added as if it were the freespace value for every block @@ -547,8 +547,8 @@ loop: } /* - * In addition to whatever extension we performed above, we always add - * at least one block to satisfy our own request. + * In addition to whatever extension we performed above, we always add at + * least one block to satisfy our own request. * * XXX This does an lseek - rather expensive - but at the moment it is the * only way to accurately determine how many blocks are in a relation. Is diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index eb7ae8f226..6ff92516ed 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -105,8 +105,8 @@ heap_page_prune_opt(Relation relation, Buffer buffer) OldestXmin = RecentGlobalXmin; else OldestXmin = - TransactionIdLimitedForOldSnapshots(RecentGlobalDataXmin, - relation); + TransactionIdLimitedForOldSnapshots(RecentGlobalDataXmin, + relation); Assert(TransactionIdIsValid(OldestXmin)); diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c index 05422f1079..b472d31a03 100644 --- a/src/backend/access/heap/visibilitymap.c +++ b/src/backend/access/heap/visibilitymap.c @@ -272,7 +272,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk); uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk); Page page; - uint8 *map; + uint8 *map; #ifdef TRACE_VISIBILITYMAP elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk); @@ -291,7 +291,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, elog(ERROR, "wrong VM buffer passed to visibilitymap_set"); page = BufferGetPage(vmBuf); - map = (uint8 *)PageGetContents(page); + map = (uint8 *) PageGetContents(page); LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE); if (flags != (map[mapByte] >> mapOffset & VISIBILITYMAP_VALID_BITS)) diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index ecc43e51c9..ef69290b6c 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -395,7 +395,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, * Check for a conflict-in as we would if we were going to * write to this page. We aren't actually going to write, * but we want a chance to report SSI conflicts that would - * otherwise be masked by this unique constraint violation. + * otherwise be masked by this unique constraint + * violation. */ CheckForSerializableConflictIn(rel, NULL, buf); diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 3a7942997c..1f479735c2 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -813,8 +813,8 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, /* * Check to see if we need to issue one final WAL record for this index, - * which may be needed for correctness on a hot standby node when - * non-MVCC index scans could take place. + * which may be needed for correctness on a hot standby node when non-MVCC + * index scans could take place. * * If the WAL is replayed in hot standby, the replay process needs to get * cleanup locks on all index leaf pages, just as we've been doing here. @@ -1025,13 +1025,13 @@ restart: if (ndeletable > 0) { /* - * Notice that the issued XLOG_BTREE_VACUUM WAL record includes all - * information to the replay code to allow it to get a cleanup lock - * on all pages between the previous lastBlockVacuumed and this page. - * This ensures that WAL replay locks all leaf pages at some point, - * which is important should non-MVCC scans be requested. - * This is currently unused on standby, but we record it anyway, so - * that the WAL contains the required information. + * Notice that the issued XLOG_BTREE_VACUUM WAL record includes + * all information to the replay code to allow it to get a cleanup + * lock on all pages between the previous lastBlockVacuumed and + * this page. This ensures that WAL replay locks all leaf pages at + * some point, which is important should non-MVCC scans be + * requested. This is currently unused on standby, but we record + * it anyway, so that the WAL contains the required information. * * Since we can visit leaf pages out-of-order when recursing, * replay might end up locking such pages an extra time, but it diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c index f8691bbc44..c536e22432 100644 --- a/src/backend/access/nbtree/nbtxlog.c +++ b/src/backend/access/nbtree/nbtxlog.c @@ -392,15 +392,15 @@ btree_xlog_vacuum(XLogReaderState *record) xl_btree_vacuum *xlrec = (xl_btree_vacuum *) XLogRecGetData(record); /* - * This section of code is thought to be no longer needed, after - * analysis of the calling paths. It is retained to allow the code - * to be reinstated if a flaw is revealed in that thinking. + * This section of code is thought to be no longer needed, after analysis + * of the calling paths. It is retained to allow the code to be reinstated + * if a flaw is revealed in that thinking. * * If we are running non-MVCC scans using this index we need to do some * additional work to ensure correctness, which is known as a "pin scan" * described in more detail in next paragraphs. We used to do the extra - * work in all cases, whereas we now avoid that work in most cases. - * If lastBlockVacuumed is set to InvalidBlockNumber then we skip the + * work in all cases, whereas we now avoid that work in most cases. If + * lastBlockVacuumed is set to InvalidBlockNumber then we skip the * additional work required for the pin scan. * * Avoiding this extra work is important since it requires us to touch diff --git a/src/backend/access/rmgrdesc/genericdesc.c b/src/backend/access/rmgrdesc/genericdesc.c index 0796bb8741..22f81570a5 100644 --- a/src/backend/access/rmgrdesc/genericdesc.c +++ b/src/backend/access/rmgrdesc/genericdesc.c @@ -29,8 +29,8 @@ generic_desc(StringInfo buf, XLogReaderState *record) while (ptr < end) { - OffsetNumber offset, - length; + OffsetNumber offset, + length; memcpy(&offset, ptr, sizeof(offset)); ptr += sizeof(offset); diff --git a/src/backend/access/rmgrdesc/logicalmsgdesc.c b/src/backend/access/rmgrdesc/logicalmsgdesc.c index b194e1424d..525826efd3 100644 --- a/src/backend/access/rmgrdesc/logicalmsgdesc.c +++ b/src/backend/access/rmgrdesc/logicalmsgdesc.c @@ -26,7 +26,7 @@ logicalmsg_desc(StringInfo buf, XLogReaderState *record) xl_logical_message *xlrec = (xl_logical_message *) rec; appendStringInfo(buf, "%s message size %zu bytes", - xlrec->transactional ? "transactional" : "nontransactional", + xlrec->transactional ? "transactional" : "nontransactional", xlrec->message_size); } } diff --git a/src/backend/access/rmgrdesc/standbydesc.c b/src/backend/access/rmgrdesc/standbydesc.c index e6172ccdf7..13797a3d2f 100644 --- a/src/backend/access/rmgrdesc/standbydesc.c +++ b/src/backend/access/rmgrdesc/standbydesc.c @@ -100,7 +100,7 @@ standby_desc_invalidations(StringInfo buf, Oid dbId, Oid tsId, bool relcacheInitFileInval) { - int i; + int i; if (relcacheInitFileInval) appendStringInfo(buf, "; relcache init file inval dbid %u tsid %u", diff --git a/src/backend/access/rmgrdesc/xactdesc.c b/src/backend/access/rmgrdesc/xactdesc.c index 6f07c5cfaa..91d27d0654 100644 --- a/src/backend/access/rmgrdesc/xactdesc.c +++ b/src/backend/access/rmgrdesc/xactdesc.c @@ -205,8 +205,8 @@ xact_desc_commit(StringInfo buf, uint8 info, xl_xact_commit *xlrec, RepOriginId if (parsed.nmsgs > 0) { standby_desc_invalidations( - buf, parsed.nmsgs, parsed.msgs, parsed.dbId, parsed.tsId, - XactCompletionRelcacheInitFileInval(parsed.xinfo)); + buf, parsed.nmsgs, parsed.msgs, parsed.dbId, parsed.tsId, + XactCompletionRelcacheInitFileInval(parsed.xinfo)); } if (XactCompletionForceSyncCommit(parsed.xinfo)) diff --git a/src/backend/access/rmgrdesc/xlogdesc.c b/src/backend/access/rmgrdesc/xlogdesc.c index 022bd44eff..62ed1dc04b 100644 --- a/src/backend/access/rmgrdesc/xlogdesc.c +++ b/src/backend/access/rmgrdesc/xlogdesc.c @@ -26,8 +26,8 @@ const struct config_enum_entry wal_level_options[] = { {"minimal", WAL_LEVEL_MINIMAL, false}, {"replica", WAL_LEVEL_REPLICA, false}, - {"archive", WAL_LEVEL_REPLICA, true}, /* deprecated */ - {"hot_standby", WAL_LEVEL_REPLICA, true}, /* deprecated */ + {"archive", WAL_LEVEL_REPLICA, true}, /* deprecated */ + {"hot_standby", WAL_LEVEL_REPLICA, true}, /* deprecated */ {"logical", WAL_LEVEL_LOGICAL, false}, {NULL, 0, false} }; diff --git a/src/backend/access/transam/commit_ts.c b/src/backend/access/transam/commit_ts.c index 17134396a4..e330105217 100644 --- a/src/backend/access/transam/commit_ts.c +++ b/src/backend/access/transam/commit_ts.c @@ -92,7 +92,7 @@ typedef struct CommitTimestampShared { TransactionId xidLastCommit; CommitTimestampEntry dataLastCommit; - bool commitTsActive; + bool commitTsActive; } CommitTimestampShared; CommitTimestampShared *commitTsShared; @@ -153,9 +153,9 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids, * No-op if the module is not active. * * An unlocked read here is fine, because in a standby (the only place - * where the flag can change in flight) this routine is only called by - * the recovery process, which is also the only process which can change - * the flag. + * where the flag can change in flight) this routine is only called by the + * recovery process, which is also the only process which can change the + * flag. */ if (!commitTsShared->commitTsActive) return; @@ -767,8 +767,8 @@ ExtendCommitTs(TransactionId newestXact) int pageno; /* - * Nothing to do if module not enabled. Note we do an unlocked read of the - * flag here, which is okay because this routine is only called from + * Nothing to do if module not enabled. Note we do an unlocked read of + * the flag here, which is okay because this routine is only called from * GetNewTransactionId, which is never called in a standby. */ Assert(!InRecovery); @@ -855,7 +855,7 @@ AdvanceOldestCommitTsXid(TransactionId oldestXact) { LWLockAcquire(CommitTsLock, LW_EXCLUSIVE); if (ShmemVariableCache->oldestCommitTsXid != InvalidTransactionId && - TransactionIdPrecedes(ShmemVariableCache->oldestCommitTsXid, oldestXact)) + TransactionIdPrecedes(ShmemVariableCache->oldestCommitTsXid, oldestXact)) ShmemVariableCache->oldestCommitTsXid = oldestXact; LWLockRelease(CommitTsLock); } diff --git a/src/backend/access/transam/generic_xlog.c b/src/backend/access/transam/generic_xlog.c index c33e7beb6a..1926d98de0 100644 --- a/src/backend/access/transam/generic_xlog.c +++ b/src/backend/access/transam/generic_xlog.c @@ -52,9 +52,8 @@ typedef struct Buffer buffer; /* registered buffer */ int flags; /* flags for this buffer */ int deltaLen; /* space consumed in delta field */ - char *image; /* copy of page image for modification, - * do not do it in-place to have aligned - * memory chunk */ + char *image; /* copy of page image for modification, do not + * do it in-place to have aligned memory chunk */ char delta[MAX_DELTA_SIZE]; /* delta between page images */ } PageData; diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index a677af0049..7bccca8a17 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -988,8 +988,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) char *oldest_datname = get_database_name(oldest_datoid); /* - * Immediately kick autovacuum into action as we're already - * in ERROR territory. + * Immediately kick autovacuum into action as we're already in + * ERROR territory. */ SendPostmasterSignal(PMSIGNAL_START_AUTOVAC_LAUNCHER); @@ -1134,8 +1134,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg_plural("database with OID %u must be vacuumed before %d more multixact member is used", "database with OID %u must be vacuumed before %d more multixact members are used", - MultiXactState->offsetStopLimit - nextOffset + nmembers, - MultiXactState->oldestMultiXactDB, + MultiXactState->offsetStopLimit - nextOffset + nmembers, + MultiXactState->oldestMultiXactDB, MultiXactState->offsetStopLimit - nextOffset + nmembers), errhint("Execute a database-wide VACUUM in that database with reduced vacuum_multixact_freeze_min_age and vacuum_multixact_freeze_table_age settings."))); diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c index 934dba88c6..74a483e0fd 100644 --- a/src/backend/access/transam/parallel.c +++ b/src/backend/access/transam/parallel.c @@ -134,9 +134,9 @@ CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers) nworkers = 0; /* - * If we are running under serializable isolation, we can't use - * parallel workers, at least not until somebody enhances that mechanism - * to be parallel-aware. + * If we are running under serializable isolation, we can't use parallel + * workers, at least not until somebody enhances that mechanism to be + * parallel-aware. */ if (IsolationIsSerializable()) nworkers = 0; @@ -646,9 +646,9 @@ DestroyParallelContext(ParallelContext *pcxt) } /* - * We can't finish transaction commit or abort until all of the - * workers have exited. This means, in particular, that we can't respond - * to interrupts at this stage. + * We can't finish transaction commit or abort until all of the workers + * have exited. This means, in particular, that we can't respond to + * interrupts at this stage. */ HOLD_INTERRUPTS(); WaitForParallelWorkersToExit(pcxt); @@ -918,7 +918,7 @@ ParallelWorkerMain(Datum main_arg) if (toc == NULL) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("invalid magic number in dynamic shared memory segment"))); + errmsg("invalid magic number in dynamic shared memory segment"))); /* Look up fixed parallel state. */ fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED); @@ -958,9 +958,9 @@ ParallelWorkerMain(Datum main_arg) */ /* - * Join locking group. We must do this before anything that could try - * to acquire a heavyweight lock, because any heavyweight locks acquired - * to this point could block either directly against the parallel group + * Join locking group. We must do this before anything that could try to + * acquire a heavyweight lock, because any heavyweight locks acquired to + * this point could block either directly against the parallel group * leader or against some process which in turn waits for a lock that * conflicts with the parallel group leader, causing an undetected * deadlock. (If we can't join the lock group, the leader has gone away, diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c index 36a011cc94..bbae5847f2 100644 --- a/src/backend/access/transam/slru.c +++ b/src/backend/access/transam/slru.c @@ -152,7 +152,7 @@ SimpleLruShmemSize(int nslots, int nlsns) sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */ sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */ sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */ - sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */ + sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */ if (nlsns > 0) sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); /* group_lsn[] */ @@ -224,7 +224,7 @@ SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns, for (slotno = 0; slotno < nslots; slotno++) { LWLockInitialize(&shared->buffer_locks[slotno].lock, - shared->lwlock_tranche_id); + shared->lwlock_tranche_id); shared->page_buffer[slotno] = ptr; shared->page_status[slotno] = SLRU_PAGE_EMPTY; diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c index c02046c073..908fe2d533 100644 --- a/src/backend/access/transam/subtrans.c +++ b/src/backend/access/transam/subtrans.c @@ -257,7 +257,7 @@ StartupSUBTRANS(TransactionId oldestActiveXID) startPage++; /* must account for wraparound */ if (startPage > TransactionIdToPage(MaxTransactionId)) - startPage=0; + startPage = 0; } (void) ZeroSUBTRANSPage(startPage); diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index a65048b683..06aedd40bf 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -140,13 +140,13 @@ typedef struct GlobalTransactionData TimestampTz prepared_at; /* time of preparation */ /* - * Note that we need to keep track of two LSNs for each GXACT. - * We keep track of the start LSN because this is the address we must - * use to read state data back from WAL when committing a prepared GXACT. - * We keep track of the end LSN because that is the LSN we need to wait - * for prior to commit. + * Note that we need to keep track of two LSNs for each GXACT. We keep + * track of the start LSN because this is the address we must use to read + * state data back from WAL when committing a prepared GXACT. We keep + * track of the end LSN because that is the LSN we need to wait for prior + * to commit. */ - XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */ + XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */ XLogRecPtr prepare_end_lsn; /* XLOG offset of prepare record end */ Oid owner; /* ID of user that executed the xact */ @@ -980,7 +980,7 @@ StartPrepare(GlobalTransaction gxact) hdr.nabortrels = smgrGetPendingDeletes(false, &abortrels); hdr.ninvalmsgs = xactGetCommittedInvalidationMessages(&invalmsgs, &hdr.initfileinval); - hdr.gidlen = strlen(gxact->gid) + 1; /* Include '\0' */ + hdr.gidlen = strlen(gxact->gid) + 1; /* Include '\0' */ save_state_data(&hdr, sizeof(TwoPhaseFileHeader)); save_state_data(gxact->gid, hdr.gidlen); @@ -1259,28 +1259,28 @@ XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"), - errdetail("Failed while allocating an XLog reading processor."))); + errdetail("Failed while allocating an XLog reading processor."))); record = XLogReadRecord(xlogreader, lsn, &errormsg); if (record == NULL) ereport(ERROR, (errcode_for_file_access(), errmsg("could not read two-phase state from xlog at %X/%X", - (uint32) (lsn >> 32), - (uint32) lsn))); + (uint32) (lsn >> 32), + (uint32) lsn))); if (XLogRecGetRmid(xlogreader) != RM_XACT_ID || (XLogRecGetInfo(xlogreader) & XLOG_XACT_OPMASK) != XLOG_XACT_PREPARE) ereport(ERROR, (errcode_for_file_access(), errmsg("expected two-phase state data is not present in xlog at %X/%X", - (uint32) (lsn >> 32), - (uint32) lsn))); + (uint32) (lsn >> 32), + (uint32) lsn))); if (len != NULL) *len = XLogRecGetDataLen(xlogreader); - *buf = palloc(sizeof(char)*XLogRecGetDataLen(xlogreader)); + *buf = palloc(sizeof(char) * XLogRecGetDataLen(xlogreader)); memcpy(*buf, XLogRecGetData(xlogreader), sizeof(char) * XLogRecGetDataLen(xlogreader)); XLogReaderFree(xlogreader); @@ -1347,10 +1347,9 @@ FinishPreparedTransaction(const char *gid, bool isCommit) xid = pgxact->xid; /* - * Read and validate 2PC state data. - * State data will typically be stored in WAL files if the LSN is after the - * last checkpoint record, or moved to disk if for some reason they have - * lived for a long time. + * Read and validate 2PC state data. State data will typically be stored + * in WAL files if the LSN is after the last checkpoint record, or moved + * to disk if for some reason they have lived for a long time. */ if (gxact->ondisk) buf = ReadTwoPhaseFile(xid, true); @@ -1605,22 +1604,20 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon) TRACE_POSTGRESQL_TWOPHASE_CHECKPOINT_START(); /* - * We are expecting there to be zero GXACTs that need to be - * copied to disk, so we perform all I/O while holding - * TwoPhaseStateLock for simplicity. This prevents any new xacts - * from preparing while this occurs, which shouldn't be a problem - * since the presence of long-lived prepared xacts indicates the - * transaction manager isn't active. + * We are expecting there to be zero GXACTs that need to be copied to + * disk, so we perform all I/O while holding TwoPhaseStateLock for + * simplicity. This prevents any new xacts from preparing while this + * occurs, which shouldn't be a problem since the presence of long-lived + * prepared xacts indicates the transaction manager isn't active. * - * It's also possible to move I/O out of the lock, but on - * every error we should check whether somebody committed our - * transaction in different backend. Let's leave this optimisation - * for future, if somebody will spot that this place cause - * bottleneck. + * It's also possible to move I/O out of the lock, but on every error we + * should check whether somebody committed our transaction in different + * backend. Let's leave this optimisation for future, if somebody will + * spot that this place cause bottleneck. * - * Note that it isn't possible for there to be a GXACT with - * a prepare_end_lsn set prior to the last checkpoint yet - * is marked invalid, because of the efforts with delayChkpt. + * Note that it isn't possible for there to be a GXACT with a + * prepare_end_lsn set prior to the last checkpoint yet is marked invalid, + * because of the efforts with delayChkpt. */ LWLockAcquire(TwoPhaseStateLock, LW_SHARED); for (i = 0; i < TwoPhaseState->numPrepXacts; i++) @@ -1633,7 +1630,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon) gxact->prepare_end_lsn <= redo_horizon) { char *buf; - int len; + int len; XlogReadTwoPhaseData(gxact->prepare_start_lsn, &buf, &len); RecreateTwoPhaseFile(pgxact->xid, buf, len); @@ -1920,7 +1917,7 @@ RecoverPreparedTransactions(void) TwoPhaseFileHeader *hdr; TransactionId *subxids; GlobalTransaction gxact; - const char *gid; + const char *gid; int i; xid = (TransactionId) strtoul(clde->d_name, NULL, 16); diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 95690ff36c..23f36ead7e 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -1166,19 +1166,19 @@ RecordTransactionCommit(void) /* * Transactions without an assigned xid can contain invalidation * messages (e.g. explicit relcache invalidations or catcache - * invalidations for inplace updates); standbys need to process - * those. We can't emit a commit record without an xid, and we don't - * want to force assigning an xid, because that'd be problematic for - * e.g. vacuum. Hence we emit a bespoke record for the - * invalidations. We don't want to use that in case a commit record is - * emitted, so they happen synchronously with commits (besides not - * wanting to emit more WAL recoreds). + * invalidations for inplace updates); standbys need to process those. + * We can't emit a commit record without an xid, and we don't want to + * force assigning an xid, because that'd be problematic for e.g. + * vacuum. Hence we emit a bespoke record for the invalidations. We + * don't want to use that in case a commit record is emitted, so they + * happen synchronously with commits (besides not wanting to emit more + * WAL recoreds). */ if (nmsgs != 0) { LogStandbyInvalidations(nmsgs, invalMessages, RelcacheInitFileInval); - wrote_xlog = true; /* not strictly necessary */ + wrote_xlog = true; /* not strictly necessary */ } /* @@ -1272,8 +1272,8 @@ RecordTransactionCommit(void) * this case, but we don't currently try to do that. It would certainly * cause problems at least in Hot Standby mode, where the * KnownAssignedXids machinery requires tracking every XID assignment. It - * might be OK to skip it only when wal_level < replica, but for now - * we don't.) + * might be OK to skip it only when wal_level < replica, but for now we + * don't.) * * However, if we're doing cleanup of any non-temp rels or committing any * command that wanted to force sync commit, then we must flush XLOG @@ -5486,8 +5486,8 @@ xact_redo_commit(xl_xact_parsed_commit *parsed, /* * If asked by the primary (because someone is waiting for a synchronous - * commit = remote_apply), we will need to ask walreceiver to send a - * reply immediately. + * commit = remote_apply), we will need to ask walreceiver to send a reply + * immediately. */ if (XactCompletionApplyFeedback(parsed->xinfo)) XLogRequestWalReceiverReply(); diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index b473f1914e..e4645a3169 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -5004,9 +5004,9 @@ readRecoveryCommandFile(void) else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid value for recovery parameter \"%s\": \"%s\"", - "recovery_target_action", - item->value), + errmsg("invalid value for recovery parameter \"%s\": \"%s\"", + "recovery_target_action", + item->value), errhint("Valid values are \"pause\", \"promote\", and \"shutdown\"."))); ereport(DEBUG2, @@ -5087,9 +5087,9 @@ readRecoveryCommandFile(void) else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid value for recovery parameter \"%s\": \"%s\"", - "recovery_target", - item->value), + errmsg("invalid value for recovery parameter \"%s\": \"%s\"", + "recovery_target", + item->value), errhint("The only allowed value is \"immediate\"."))); ereport(DEBUG2, (errmsg_internal("recovery_target = '%s'", @@ -5880,8 +5880,8 @@ CheckRequiredParameterValues(void) } /* - * For Hot Standby, the WAL must be generated with 'replica' mode, and - * we must have at least as many backend slots as the primary. + * For Hot Standby, the WAL must be generated with 'replica' mode, and we + * must have at least as many backend slots as the primary. */ if (ArchiveRecoveryRequested && EnableHotStandby) { @@ -6163,26 +6163,26 @@ StartupXLOG(void) * is no use of such file. There is no harm in retaining it, but it * is better to get rid of the map file so that we don't have any * redundant file in data directory and it will avoid any sort of - * confusion. It seems prudent though to just rename the file out - * of the way rather than delete it completely, also we ignore any - * error that occurs in rename operation as even if map file is - * present without backup_label file, it is harmless. + * confusion. It seems prudent though to just rename the file out of + * the way rather than delete it completely, also we ignore any error + * that occurs in rename operation as even if map file is present + * without backup_label file, it is harmless. */ if (stat(TABLESPACE_MAP, &st) == 0) { unlink(TABLESPACE_MAP_OLD); if (durable_rename(TABLESPACE_MAP, TABLESPACE_MAP_OLD, DEBUG1) == 0) ereport(LOG, - (errmsg("ignoring file \"%s\" because no file \"%s\" exists", - TABLESPACE_MAP, BACKUP_LABEL_FILE), - errdetail("File \"%s\" was renamed to \"%s\".", - TABLESPACE_MAP, TABLESPACE_MAP_OLD))); + (errmsg("ignoring file \"%s\" because no file \"%s\" exists", + TABLESPACE_MAP, BACKUP_LABEL_FILE), + errdetail("File \"%s\" was renamed to \"%s\".", + TABLESPACE_MAP, TABLESPACE_MAP_OLD))); else ereport(LOG, - (errmsg("ignoring file \"%s\" because no file \"%s\" exists", - TABLESPACE_MAP, BACKUP_LABEL_FILE), - errdetail("Could not rename file \"%s\" to \"%s\": %m.", - TABLESPACE_MAP, TABLESPACE_MAP_OLD))); + (errmsg("ignoring file \"%s\" because no file \"%s\" exists", + TABLESPACE_MAP, BACKUP_LABEL_FILE), + errdetail("Could not rename file \"%s\" to \"%s\": %m.", + TABLESPACE_MAP, TABLESPACE_MAP_OLD))); } /* @@ -6314,24 +6314,24 @@ StartupXLOG(void) ereport(DEBUG1, (errmsg_internal("redo record is at %X/%X; shutdown %s", (uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo, - wasShutdown ? "TRUE" : "FALSE"))); + wasShutdown ? "TRUE" : "FALSE"))); ereport(DEBUG1, (errmsg_internal("next transaction ID: %u:%u; next OID: %u", - checkPoint.nextXidEpoch, checkPoint.nextXid, - checkPoint.nextOid))); + checkPoint.nextXidEpoch, checkPoint.nextXid, + checkPoint.nextOid))); ereport(DEBUG1, (errmsg_internal("next MultiXactId: %u; next MultiXactOffset: %u", - checkPoint.nextMulti, checkPoint.nextMultiOffset))); + checkPoint.nextMulti, checkPoint.nextMultiOffset))); ereport(DEBUG1, - (errmsg_internal("oldest unfrozen transaction ID: %u, in database %u", - checkPoint.oldestXid, checkPoint.oldestXidDB))); + (errmsg_internal("oldest unfrozen transaction ID: %u, in database %u", + checkPoint.oldestXid, checkPoint.oldestXidDB))); ereport(DEBUG1, (errmsg_internal("oldest MultiXactId: %u, in database %u", - checkPoint.oldestMulti, checkPoint.oldestMultiDB))); + checkPoint.oldestMulti, checkPoint.oldestMultiDB))); ereport(DEBUG1, (errmsg_internal("commit timestamp Xid oldest/newest: %u/%u", - checkPoint.oldestCommitTsXid, - checkPoint.newestCommitTsXid))); + checkPoint.oldestCommitTsXid, + checkPoint.newestCommitTsXid))); if (!TransactionIdIsNormal(checkPoint.nextXid)) ereport(PANIC, (errmsg("invalid next transaction ID"))); @@ -6883,8 +6883,8 @@ StartupXLOG(void) SpinLockRelease(&XLogCtl->info_lck); /* - * If rm_redo called XLogRequestWalReceiverReply, then we - * wake up the receiver so that it notices the updated + * If rm_redo called XLogRequestWalReceiverReply, then we wake + * up the receiver so that it notices the updated * lastReplayedEndRecPtr and sends a reply to the master. */ if (doRequestWalReceiverReply) diff --git a/src/backend/access/transam/xlogfuncs.c b/src/backend/access/transam/xlogfuncs.c index de493fad7a..33383b4dcc 100644 --- a/src/backend/access/transam/xlogfuncs.c +++ b/src/backend/access/transam/xlogfuncs.c @@ -104,8 +104,8 @@ pg_start_backup(PG_FUNCTION_ARGS) MemoryContext oldcontext; /* - * Label file and tablespace map file need to be long-lived, since they - * are read in pg_stop_backup. + * Label file and tablespace map file need to be long-lived, since + * they are read in pg_stop_backup. */ oldcontext = MemoryContextSwitchTo(TopMemoryContext); label_file = makeStringInfo(); @@ -113,7 +113,7 @@ pg_start_backup(PG_FUNCTION_ARGS) MemoryContextSwitchTo(oldcontext); startpoint = do_pg_start_backup(backupidstr, fast, NULL, label_file, - dir, NULL, tblspc_map_file, false, true); + dir, NULL, tblspc_map_file, false, true); nonexclusive_backup_running = true; before_shmem_exit(nonexclusive_base_backup_cleanup, (Datum) 0); @@ -138,8 +138,8 @@ pg_start_backup(PG_FUNCTION_ARGS) * Note: different from CancelBackup which just cancels online backup mode. * * Note: this version is only called to stop an exclusive backup. The function - * pg_stop_backup_v2 (overloaded as pg_stop_backup in SQL) is called to - * stop non-exclusive backups. + * pg_stop_backup_v2 (overloaded as pg_stop_backup in SQL) is called to + * stop non-exclusive backups. * * Permission checking for this function is managed through the normal * GRANT system. @@ -156,10 +156,10 @@ pg_stop_backup(PG_FUNCTION_ARGS) errhint("Did you mean to use pg_stop_backup('f')?"))); /* - * Exclusive backups were typically started in a different connection, - * so don't try to verify that exclusive_backup_running is set in this one. - * Actual verification that an exclusive backup is in fact running is handled - * inside do_pg_stop_backup. + * Exclusive backups were typically started in a different connection, so + * don't try to verify that exclusive_backup_running is set in this one. + * Actual verification that an exclusive backup is in fact running is + * handled inside do_pg_stop_backup. */ stoppoint = do_pg_stop_backup(NULL, true, NULL); @@ -182,16 +182,16 @@ pg_stop_backup(PG_FUNCTION_ARGS) Datum pg_stop_backup_v2(PG_FUNCTION_ARGS) { - ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + TupleDesc tupdesc; Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; - Datum values[3]; - bool nulls[3]; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + Datum values[3]; + bool nulls[3]; - bool exclusive = PG_GETARG_BOOL(0); - XLogRecPtr stoppoint; + bool exclusive = PG_GETARG_BOOL(0); + XLogRecPtr stoppoint; /* check to see if caller supports us returning a tuplestore */ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) @@ -248,9 +248,8 @@ pg_stop_backup_v2(PG_FUNCTION_ARGS) errhint("Did you mean to use pg_stop_backup('t')?"))); /* - * Stop the non-exclusive backup. Return a copy of the backup - * label and tablespace map so they can be written to disk by - * the caller. + * Stop the non-exclusive backup. Return a copy of the backup label + * and tablespace map so they can be written to disk by the caller. */ stoppoint = do_pg_stop_backup(label_file->data, true, NULL); nonexclusive_backup_running = false; @@ -269,7 +268,7 @@ pg_stop_backup_v2(PG_FUNCTION_ARGS) } /* Stoppoint is included on both exclusive and nonexclusive backups */ - values[0] = LSNGetDatum(stoppoint); + values[0] = LSNGetDatum(stoppoint); tuplestore_putvalues(tupstore, tupdesc, values, nulls); tuplestore_donestoring(typstore); diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index c3aecc7574..dcf747c633 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -322,7 +322,7 @@ XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg) if (total_len < SizeOfXLogRecord) { report_invalid_record(state, - "invalid record length at %X/%X: wanted %u, got %u", + "invalid record length at %X/%X: wanted %u, got %u", (uint32) (RecPtr >> 32), (uint32) RecPtr, (uint32) SizeOfXLogRecord, total_len); goto err; @@ -621,7 +621,7 @@ ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr, if (record->xl_tot_len < SizeOfXLogRecord) { report_invalid_record(state, - "invalid record length at %X/%X: wanted %u, got %u", + "invalid record length at %X/%X: wanted %u, got %u", (uint32) (RecPtr >> 32), (uint32) RecPtr, (uint32) SizeOfXLogRecord, record->xl_tot_len); return false; diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c index 13244610db..8068b82eab 100644 --- a/src/backend/catalog/objectaddress.c +++ b/src/backend/catalog/objectaddress.c @@ -1792,7 +1792,7 @@ get_object_address_defacl(List *objname, List *objargs, bool missing_ok) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("unrecognized default ACL object type %c", objtype), - errhint("Valid object types are \"r\", \"S\", \"f\", and \"T\"."))); + errhint("Valid object types are \"r\", \"S\", \"f\", and \"T\"."))); } /* diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c index bcc941104f..73d19ec394 100644 --- a/src/backend/catalog/pg_aggregate.c +++ b/src/backend/catalog/pg_aggregate.c @@ -82,9 +82,9 @@ AggregateCreate(const char *aggName, Form_pg_proc proc; Oid transfn; Oid finalfn = InvalidOid; /* can be omitted */ - Oid combinefn = InvalidOid; /* can be omitted */ + Oid combinefn = InvalidOid; /* can be omitted */ Oid serialfn = InvalidOid; /* can be omitted */ - Oid deserialfn = InvalidOid; /* can be omitted */ + Oid deserialfn = InvalidOid; /* can be omitted */ Oid mtransfn = InvalidOid; /* can be omitted */ Oid minvtransfn = InvalidOid; /* can be omitted */ Oid mfinalfn = InvalidOid; /* can be omitted */ @@ -407,11 +407,11 @@ AggregateCreate(const char *aggName, /* handle the combinefn, if supplied */ if (aggcombinefnName) { - Oid combineType; + Oid combineType; /* - * Combine function must have 2 argument, each of which is the - * trans type + * Combine function must have 2 argument, each of which is the trans + * type */ fnArgs[0] = aggTransType; fnArgs[1] = aggTransType; @@ -423,9 +423,9 @@ AggregateCreate(const char *aggName, if (combineType != aggTransType) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("return type of combine function %s is not %s", - NameListToString(aggcombinefnName), - format_type_be(aggTransType)))); + errmsg("return type of combine function %s is not %s", + NameListToString(aggcombinefnName), + format_type_be(aggTransType)))); /* * A combine function to combine INTERNAL states must accept nulls and @@ -440,8 +440,9 @@ AggregateCreate(const char *aggName, } /* - * Validate the serialization function, if present. We must ensure that the - * return type of this function is the same as the specified serialType. + * Validate the serialization function, if present. We must ensure that + * the return type of this function is the same as the specified + * serialType. */ if (aggserialfnName) { @@ -454,9 +455,9 @@ AggregateCreate(const char *aggName, if (rettype != aggSerialType) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("return type of serialization function %s is not %s", - NameListToString(aggserialfnName), - format_type_be(aggSerialType)))); + errmsg("return type of serialization function %s is not %s", + NameListToString(aggserialfnName), + format_type_be(aggSerialType)))); } /* @@ -474,9 +475,9 @@ AggregateCreate(const char *aggName, if (rettype != aggTransType) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("return type of deserialization function %s is not %s", - NameListToString(aggdeserialfnName), - format_type_be(aggTransType)))); + errmsg("return type of deserialization function %s is not %s", + NameListToString(aggdeserialfnName), + format_type_be(aggTransType)))); } /* diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c index 7f45ba9407..f1fdc1a360 100644 --- a/src/backend/commands/aggregatecmds.c +++ b/src/backend/commands/aggregatecmds.c @@ -338,14 +338,14 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters, /* * There's little point in having a serialization/deserialization * function on aggregates that don't have an internal state, so let's - * just disallow this as it may help clear up any confusion or needless - * authoring of these functions. + * just disallow this as it may help clear up any confusion or + * needless authoring of these functions. */ if (transTypeId != INTERNALOID) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("a serialization type must only be specified when the aggregate transition data type is %s", - format_type_be(INTERNALOID)))); + format_type_be(INTERNALOID)))); serialTypeId = typenameTypeId(NULL, serialType); @@ -358,15 +358,15 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters, /* * We disallow INTERNAL serialType as the whole point of the - * serialized types is to allow the aggregate state to be output, - * and we cannot output INTERNAL. This check, combined with the one - * above ensures that the trans type and serialization type are not the + * serialized types is to allow the aggregate state to be output, and + * we cannot output INTERNAL. This check, combined with the one above + * ensures that the trans type and serialization type are not the * same. */ if (serialTypeId == INTERNALOID) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("aggregate serialization data type cannot be %s", + errmsg("aggregate serialization data type cannot be %s", format_type_be(serialTypeId)))); /* @@ -392,14 +392,14 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters, */ if (serialfuncName != NIL) ereport(ERROR, - (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("must specify serialization type when specifying serialization function"))); + (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), + errmsg("must specify serialization type when specifying serialization function"))); /* likewise for the deserialization function */ if (deserialfuncName != NIL) ereport(ERROR, - (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("must specify serialization type when specifying deserialization function"))); + (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), + errmsg("must specify serialization type when specifying deserialization function"))); } /* @@ -493,7 +493,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters, mfinalfuncExtraArgs, sortoperatorName, /* sort operator name */ transTypeId, /* transition data type */ - serialTypeId, /* serialization data type */ + serialTypeId, /* serialization data type */ transSpace, /* transition space */ mtransTypeId, /* transition data type */ mtransSpace, /* transition space */ diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c index 4b08cb832e..1301bcb5e8 100644 --- a/src/backend/commands/alter.c +++ b/src/backend/commands/alter.c @@ -400,18 +400,17 @@ ExecRenameStmt(RenameStmt *stmt) ObjectAddress ExecAlterObjectDependsStmt(AlterObjectDependsStmt *stmt, ObjectAddress *refAddress) { - ObjectAddress address; - ObjectAddress refAddr; - Relation rel; + ObjectAddress address; + ObjectAddress refAddr; + Relation rel; address = get_object_address_rv(stmt->objectType, stmt->relation, stmt->objname, - stmt->objargs, &rel, AccessExclusiveLock, false); + stmt->objargs, &rel, AccessExclusiveLock, false); /* - * If a relation was involved, it would have been opened and locked. - * We don't need the relation here, but we'll retain the lock until - * commit. + * If a relation was involved, it would have been opened and locked. We + * don't need the relation here, but we'll retain the lock until commit. */ if (rel) heap_close(rel, NoLock); @@ -630,8 +629,8 @@ AlterObjectNamespace_internal(Relation rel, Oid objid, Oid nspOid) oldNspOid = DatumGetObjectId(namespace); /* - * If the object is already in the correct namespace, we don't need - * to do anything except fire the object access hook. + * If the object is already in the correct namespace, we don't need to do + * anything except fire the object access hook. */ if (oldNspOid == nspOid) { diff --git a/src/backend/commands/amcmds.c b/src/backend/commands/amcmds.c index 904dc1cbd1..9ac930ea8b 100644 --- a/src/backend/commands/amcmds.c +++ b/src/backend/commands/amcmds.c @@ -138,7 +138,7 @@ RemoveAccessMethodById(Oid amOid) /* * get_am_type_oid - * Worker for various get_am_*_oid variants + * Worker for various get_am_*_oid variants * * If missing_ok is false, throw an error if access method not found. If * true, just return InvalidOid. @@ -188,7 +188,7 @@ get_index_am_oid(const char *amname, bool missing_ok) /* * get_am_oid - given an access method name, look up its OID. - * The type is not checked. + * The type is not checked. */ Oid get_am_oid(const char *amname, bool missing_ok) diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c index 97059e59c8..5fcedd7855 100644 --- a/src/backend/commands/analyze.c +++ b/src/backend/commands/analyze.c @@ -570,7 +570,7 @@ do_analyze_rel(Relation onerel, int options, VacuumParams *params, */ if (!inh) { - BlockNumber relallvisible; + BlockNumber relallvisible; visibilitymap_count(onerel, &relallvisible, NULL); diff --git a/src/backend/commands/conversioncmds.c b/src/backend/commands/conversioncmds.c index ad9b8ba156..175d4ab685 100644 --- a/src/backend/commands/conversioncmds.c +++ b/src/backend/commands/conversioncmds.c @@ -85,8 +85,8 @@ CreateConversionCommand(CreateConversionStmt *stmt) if (get_func_rettype(funcoid) != VOIDOID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("encoding conversion function %s must return type %s", - NameListToString(func_name), "void"))); + errmsg("encoding conversion function %s must return type %s", + NameListToString(func_name), "void"))); /* Check we have EXECUTE rights for the function */ aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE); diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 28dcd34001..f45b3304ae 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -875,7 +875,7 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed) if (is_from) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY FROM not supported with row-level security"), + errmsg("COPY FROM not supported with row-level security"), errhint("Use INSERT statements instead."))); /* Build target list */ @@ -1399,16 +1399,16 @@ BeginCopy(bool is_from, { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("DO INSTEAD NOTHING rules are not supported for COPY"))); + errmsg("DO INSTEAD NOTHING rules are not supported for COPY"))); } else if (list_length(rewritten) > 1) { - ListCell *lc; + ListCell *lc; /* examine queries to determine which error message to issue */ foreach(lc, rewritten) { - Query *q = (Query *) lfirst(lc); + Query *q = (Query *) lfirst(lc); if (q->querySource == QSRC_QUAL_INSTEAD_RULE) ereport(ERROR, @@ -1417,7 +1417,7 @@ BeginCopy(bool is_from, if (q->querySource == QSRC_NON_INSTEAD_RULE) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("DO ALSO rules are not supported for the COPY"))); + errmsg("DO ALSO rules are not supported for the COPY"))); } ereport(ERROR, @@ -1448,8 +1448,8 @@ BeginCopy(bool is_from, query->commandType == CMD_DELETE); ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY query must have a RETURNING clause"))); + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("COPY query must have a RETURNING clause"))); } /* plan the query */ diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c index 9d84b79ea0..e78e3b5b74 100644 --- a/src/backend/commands/extension.c +++ b/src/backend/commands/extension.c @@ -1419,7 +1419,7 @@ CreateExtensionInternal(CreateExtensionStmt *stmt, List *parents) CreateExtensionStmt *ces; ListCell *lc; ObjectAddress addr; - List *cascade_parents; + List *cascade_parents; /* Check extension name validity before trying to cascade */ check_valid_extension_name(curreq); diff --git a/src/backend/commands/foreigncmds.c b/src/backend/commands/foreigncmds.c index 804bab2e1f..eb531afd49 100644 --- a/src/backend/commands/foreigncmds.c +++ b/src/backend/commands/foreigncmds.c @@ -487,7 +487,7 @@ lookup_fdw_handler_func(DefElem *handler) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("function %s must return type %s", - NameListToString((List *) handler->arg), "fdw_handler"))); + NameListToString((List *) handler->arg), "fdw_handler"))); return handlerOid; } diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c index 62e61a2674..6cddcbd02c 100644 --- a/src/backend/commands/matview.c +++ b/src/backend/commands/matview.c @@ -217,21 +217,20 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString, RelationGetRelationName(matviewRel)); /* - * Check that there is a unique index with no WHERE clause on - * one or more columns of the materialized view if CONCURRENTLY - * is specified. + * Check that there is a unique index with no WHERE clause on one or more + * columns of the materialized view if CONCURRENTLY is specified. */ if (concurrent) { - List *indexoidlist = RelationGetIndexList(matviewRel); - ListCell *indexoidscan; + List *indexoidlist = RelationGetIndexList(matviewRel); + ListCell *indexoidscan; bool hasUniqueIndex = false; foreach(indexoidscan, indexoidlist) { Oid indexoid = lfirst_oid(indexoidscan); Relation indexRel; - Form_pg_index indexStruct; + Form_pg_index indexStruct; indexRel = index_open(indexoid, AccessShareLock); indexStruct = indexRel->rd_index; @@ -255,9 +254,9 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString, if (!hasUniqueIndex) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("cannot refresh materialized view \"%s\" concurrently", - quote_qualified_identifier(get_namespace_name(RelationGetNamespace(matviewRel)), - RelationGetRelationName(matviewRel))), + errmsg("cannot refresh materialized view \"%s\" concurrently", + quote_qualified_identifier(get_namespace_name(RelationGetNamespace(matviewRel)), + RelationGetRelationName(matviewRel))), errhint("Create a unique index with no WHERE clause on one or more columns of the materialized view."))); } @@ -745,8 +744,8 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner, /* * There must be at least one unique index on the matview. * - * ExecRefreshMatView() checks that after taking the exclusive lock on - * the matview. So at least one unique index is guaranteed to exist here + * ExecRefreshMatView() checks that after taking the exclusive lock on the + * matview. So at least one unique index is guaranteed to exist here * because the lock is still being held. */ Assert(foundUniqueIndex); diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c index 1103624121..67d08d862b 100644 --- a/src/backend/commands/operatorcmds.c +++ b/src/backend/commands/operatorcmds.c @@ -275,8 +275,8 @@ ValidateRestrictionEstimator(List *restrictionName) if (get_func_rettype(restrictionOid) != FLOAT8OID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("restriction estimator function %s must return type %s", - NameListToString(restrictionName), "float8"))); + errmsg("restriction estimator function %s must return type %s", + NameListToString(restrictionName), "float8"))); /* Require EXECUTE rights for the estimator */ aclresult = pg_proc_aclcheck(restrictionOid, GetUserId(), ACL_EXECUTE); @@ -321,8 +321,8 @@ ValidateJoinEstimator(List *joinName) if (get_func_rettype(joinOid) != FLOAT8OID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("join estimator function %s must return type %s", - NameListToString(joinName), "float8"))); + errmsg("join estimator function %s must return type %s", + NameListToString(joinName), "float8"))); /* Require EXECUTE rights for the estimator */ aclresult = pg_proc_aclcheck(joinOid, GetUserId(), ACL_EXECUTE); diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c index 93d15e477a..bc2e4af82a 100644 --- a/src/backend/commands/policy.c +++ b/src/backend/commands/policy.c @@ -496,7 +496,7 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id) /* Must own relation. */ if (pg_class_ownercheck(relid, GetUserId())) - noperm = false; /* user is allowed to modify this policy */ + noperm = false; /* user is allowed to modify this policy */ else ereport(WARNING, (errcode(ERRCODE_WARNING_PRIVILEGE_NOT_REVOKED), @@ -511,15 +511,16 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id) */ if (!noperm && num_roles > 0) { - int i, j; + int i, + j; Oid *roles = (Oid *) ARR_DATA_PTR(policy_roles); Datum *role_oids; char *qual_value; Node *qual_expr; - List *qual_parse_rtable = NIL; + List *qual_parse_rtable = NIL; char *with_check_value; Node *with_check_qual; - List *with_check_parse_rtable = NIL; + List *with_check_parse_rtable = NIL; Datum values[Natts_pg_policy]; bool isnull[Natts_pg_policy]; bool replaces[Natts_pg_policy]; @@ -536,15 +537,14 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id) /* * All of the dependencies will be removed from the policy and then - * re-added. In order to get them correct, we need to extract out - * the expressions in the policy and construct a parsestate just - * enough to build the range table(s) to then pass to - * recordDependencyOnExpr(). + * re-added. In order to get them correct, we need to extract out the + * expressions in the policy and construct a parsestate just enough to + * build the range table(s) to then pass to recordDependencyOnExpr(). */ /* Get policy qual, to update dependencies */ value_datum = heap_getattr(tuple, Anum_pg_policy_polqual, - RelationGetDescr(pg_policy_rel), &attr_isnull); + RelationGetDescr(pg_policy_rel), &attr_isnull); if (!attr_isnull) { ParseState *qual_pstate; @@ -566,7 +566,7 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id) /* Get WITH CHECK qual, to update dependencies */ value_datum = heap_getattr(tuple, Anum_pg_policy_polwithcheck, - RelationGetDescr(pg_policy_rel), &attr_isnull); + RelationGetDescr(pg_policy_rel), &attr_isnull); if (!attr_isnull) { ParseState *with_check_pstate; @@ -665,7 +665,7 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id) heap_close(pg_policy_rel, RowExclusiveLock); - return(noperm || num_roles > 0); + return (noperm || num_roles > 0); } /* @@ -996,8 +996,8 @@ AlterPolicy(AlterPolicyStmt *stmt) /* Get policy command */ polcmd_datum = heap_getattr(policy_tuple, Anum_pg_policy_polcmd, - RelationGetDescr(pg_policy_rel), - &polcmd_isnull); + RelationGetDescr(pg_policy_rel), + &polcmd_isnull); Assert(!polcmd_isnull); polcmd = DatumGetChar(polcmd_datum); @@ -1029,15 +1029,15 @@ AlterPolicy(AlterPolicyStmt *stmt) } else { - Oid *roles; + Oid *roles; Datum roles_datum; bool attr_isnull; ArrayType *policy_roles; /* - * We need to pull the set of roles this policy applies to from - * what's in the catalog, so that we can recreate the dependencies - * correctly for the policy. + * We need to pull the set of roles this policy applies to from what's + * in the catalog, so that we can recreate the dependencies correctly + * for the policy. */ roles_datum = heap_getattr(policy_tuple, Anum_pg_policy_polroles, @@ -1065,13 +1065,13 @@ AlterPolicy(AlterPolicyStmt *stmt) } else { - Datum value_datum; - bool attr_isnull; + Datum value_datum; + bool attr_isnull; /* * We need to pull the USING expression and build the range table for - * the policy from what's in the catalog, so that we can recreate - * the dependencies correctly for the policy. + * the policy from what's in the catalog, so that we can recreate the + * dependencies correctly for the policy. */ /* Check if the policy has a USING expr */ @@ -1106,8 +1106,8 @@ AlterPolicy(AlterPolicyStmt *stmt) } else { - Datum value_datum; - bool attr_isnull; + Datum value_datum; + bool attr_isnull; /* * We need to pull the WITH CHECK expression and build the range table diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c index 0515f4d3df..761d08f604 100644 --- a/src/backend/commands/proclang.c +++ b/src/backend/commands/proclang.c @@ -114,8 +114,8 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) if (funcrettype != LANGUAGE_HANDLEROID) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("function %s must return type %s", - NameListToString(funcname), "language_handler"))); + errmsg("function %s must return type %s", + NameListToString(funcname), "language_handler"))); } else { @@ -285,8 +285,8 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) else ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("function %s must return type %s", - NameListToString(stmt->plhandler), "language_handler"))); + errmsg("function %s must return type %s", + NameListToString(stmt->plhandler), "language_handler"))); } /* validate the inline function */ diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index 6f728ff0fc..99a659a102 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -532,8 +532,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, * can skip this for internally generated triggers, since the name * modification above should be sufficient. * - * NOTE that this is cool only because we have ShareRowExclusiveLock on the - * relation, so the trigger set won't be changing underneath us. + * NOTE that this is cool only because we have ShareRowExclusiveLock on + * the relation, so the trigger set won't be changing underneath us. */ if (!isInternal) { diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c index 71d4df9c79..ce04211067 100644 --- a/src/backend/commands/typecmds.c +++ b/src/backend/commands/typecmds.c @@ -450,8 +450,8 @@ DefineType(List *names, List *parameters) { /* backwards-compatibility hack */ ereport(WARNING, - (errmsg("changing return type of function %s from %s to %s", - NameListToString(inputName), "opaque", typeName))); + (errmsg("changing return type of function %s from %s to %s", + NameListToString(inputName), "opaque", typeName))); SetFunctionReturnType(inputOid, typoid); } else @@ -467,15 +467,15 @@ DefineType(List *names, List *parameters) { /* backwards-compatibility hack */ ereport(WARNING, - (errmsg("changing return type of function %s from %s to %s", - NameListToString(outputName), "opaque", "cstring"))); + (errmsg("changing return type of function %s from %s to %s", + NameListToString(outputName), "opaque", "cstring"))); SetFunctionReturnType(outputOid, CSTRINGOID); } else ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("type output function %s must return type %s", - NameListToString(outputName), "cstring"))); + errmsg("type output function %s must return type %s", + NameListToString(outputName), "cstring"))); } if (receiveOid) { @@ -492,8 +492,8 @@ DefineType(List *names, List *parameters) if (resulttype != BYTEAOID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("type send function %s must return type %s", - NameListToString(sendName), "bytea"))); + errmsg("type send function %s must return type %s", + NameListToString(sendName), "bytea"))); } /* @@ -1888,8 +1888,8 @@ findTypeAnalyzeFunction(List *procname, Oid typeOid) if (get_func_rettype(procOid) != BOOLOID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("type analyze function %s must return type %s", - NameListToString(procname), "boolean"))); + errmsg("type analyze function %s must return type %s", + NameListToString(procname), "boolean"))); return procOid; } @@ -3313,9 +3313,9 @@ AlterTypeOwner_oid(Oid typeOid, Oid newOwnerId, bool hasDependEntry) typTup = (Form_pg_type) GETSTRUCT(tup); /* - * If it's a composite type, invoke ATExecChangeOwner so that we fix up the - * pg_class entry properly. That will call back to AlterTypeOwnerInternal - * to take care of the pg_type entry(s). + * If it's a composite type, invoke ATExecChangeOwner so that we fix up + * the pg_class entry properly. That will call back to + * AlterTypeOwnerInternal to take care of the pg_type entry(s). */ if (typTup->typtype == TYPTYPE_COMPOSITE) ATExecChangeOwner(typTup->typrelid, newOwnerId, true, AccessExclusiveLock); diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c index f0ac636b9b..b6ea95061d 100644 --- a/src/backend/commands/user.c +++ b/src/backend/commands/user.c @@ -302,7 +302,7 @@ CreateRole(CreateRoleStmt *stmt) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to change bypassrls attribute"))); + errmsg("must be superuser to change bypassrls attribute"))); } else { @@ -320,8 +320,8 @@ CreateRole(CreateRoleStmt *stmt) ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("role name \"%s\" is reserved", - stmt->role), - errdetail("Role names starting with \"pg_\" are reserved."))); + stmt->role), + errdetail("Role names starting with \"pg_\" are reserved."))); /* * Check the pg_authid relation to be certain the role doesn't already @@ -977,7 +977,7 @@ DropRole(DropRoleStmt *stmt) if (rolspec->roletype != ROLESPEC_CSTRING) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("cannot use special role specifier in DROP ROLE"))); + errmsg("cannot use special role specifier in DROP ROLE"))); role = rolspec->rolename; tuple = SearchSysCache1(AUTHNAME, PointerGetDatum(role)); @@ -1167,22 +1167,22 @@ RenameRole(const char *oldname, const char *newname) errmsg("current user cannot be renamed"))); /* - * Check that the user is not trying to rename a system role and - * not trying to rename a role into the reserved "pg_" namespace. + * Check that the user is not trying to rename a system role and not + * trying to rename a role into the reserved "pg_" namespace. */ if (IsReservedName(NameStr(authform->rolname))) ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("role name \"%s\" is reserved", - NameStr(authform->rolname)), - errdetail("Role names starting with \"pg_\" are reserved."))); + NameStr(authform->rolname)), + errdetail("Role names starting with \"pg_\" are reserved."))); if (IsReservedName(newname)) ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("role name \"%s\" is reserved", - newname), - errdetail("Role names starting with \"pg_\" are reserved."))); + newname), + errdetail("Role names starting with \"pg_\" are reserved."))); /* make sure the new name doesn't exist */ if (SearchSysCacheExists1(AUTHNAME, CStringGetDatum(newname))) diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index 784c3e9356..0010ca9a80 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -1192,9 +1192,9 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, } /* - * If the all-visible page is turned out to be all-frozen but not marked, - * we should so mark it. Note that all_frozen is only valid if all_visible - * is true, so we must check both. + * If the all-visible page is turned out to be all-frozen but not + * marked, we should so mark it. Note that all_frozen is only valid + * if all_visible is true, so we must check both. */ else if (all_visible_according_to_vm && all_visible && all_frozen && !VM_ALL_FROZEN(onerel, blkno, &vmbuffer)) @@ -1660,7 +1660,7 @@ should_attempt_truncation(LVRelStats *vacrelstats) possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages; if (possibly_freeable > 0 && (possibly_freeable >= REL_TRUNCATE_MINIMUM || - possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) && + possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) && old_snapshot_threshold < 0) return true; else diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c index f801faacd2..962d75db6e 100644 --- a/src/backend/commands/variable.c +++ b/src/backend/commands/variable.c @@ -880,9 +880,9 @@ check_role(char **newval, void **extra, GucSource source) ReleaseSysCache(roleTup); /* - * Verify that session user is allowed to become this role, but - * skip this in parallel mode, where we must blindly recreate the - * parallel leader's state. + * Verify that session user is allowed to become this role, but skip + * this in parallel mode, where we must blindly recreate the parallel + * leader's state. */ if (!InitializingParallelWorker && !is_member_of_role(GetSessionUserId(), roleid)) diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c index 0c8e939905..4a978adea7 100644 --- a/src/backend/executor/execAmi.c +++ b/src/backend/executor/execAmi.c @@ -444,10 +444,9 @@ ExecSupportsBackwardScan(Plan *node) return false; /* - * Parallel-aware nodes return a subset of the tuples in each worker, - * and in general we can't expect to have enough bookkeeping state to - * know which ones we returned in this worker as opposed to some other - * worker. + * Parallel-aware nodes return a subset of the tuples in each worker, and + * in general we can't expect to have enough bookkeeping state to know + * which ones we returned in this worker as opposed to some other worker. */ if (node->parallel_aware) return false; diff --git a/src/backend/executor/execIndexing.c b/src/backend/executor/execIndexing.c index a2eeeb6f6c..c819d19db4 100644 --- a/src/backend/executor/execIndexing.c +++ b/src/backend/executor/execIndexing.c @@ -725,7 +725,7 @@ retry: { TransactionId xwait; ItemPointerData ctid_wait; - XLTW_Oper reason_wait; + XLTW_Oper reason_wait; Datum existing_values[INDEX_MAX_KEYS]; bool existing_isnull[INDEX_MAX_KEYS]; char *error_new; diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index b5ced388d2..32bb3f9205 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -1851,25 +1851,25 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo, if (wco->polname != NULL) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("new row violates row-level security policy \"%s\" for table \"%s\"", - wco->polname, wco->relname))); + errmsg("new row violates row-level security policy \"%s\" for table \"%s\"", + wco->polname, wco->relname))); else ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("new row violates row-level security policy for table \"%s\"", - wco->relname))); + errmsg("new row violates row-level security policy for table \"%s\"", + wco->relname))); break; case WCO_RLS_CONFLICT_CHECK: if (wco->polname != NULL) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"", - wco->polname, wco->relname))); + errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"", + wco->polname, wco->relname))); else ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("new row violates row-level security policy (USING expression) for table \"%s\"", - wco->relname))); + errmsg("new row violates row-level security policy (USING expression) for table \"%s\"", + wco->relname))); break; default: elog(ERROR, "unrecognized WCO kind: %u", wco->kind); diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index f03cd9b07b..6de90705e4 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -83,7 +83,7 @@ struct SharedExecutorInstrumentation typedef struct ExecParallelEstimateContext { ParallelContext *pcxt; - int nnodes; + int nnodes; } ExecParallelEstimateContext; /* Context object for ExecParallelInitializeDSM. */ @@ -91,7 +91,7 @@ typedef struct ExecParallelInitializeDSMContext { ParallelContext *pcxt; SharedExecutorInstrumentation *instrumentation; - int nnodes; + int nnodes; } ExecParallelInitializeDSMContext; /* Helper functions that run in the parallel leader. */ @@ -99,11 +99,11 @@ static char *ExecSerializePlan(Plan *plan, EState *estate); static bool ExecParallelEstimate(PlanState *node, ExecParallelEstimateContext *e); static bool ExecParallelInitializeDSM(PlanState *node, - ExecParallelInitializeDSMContext *d); + ExecParallelInitializeDSMContext *d); static shm_mq_handle **ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize); static bool ExecParallelRetrieveInstrumentation(PlanState *planstate, - SharedExecutorInstrumentation *instrumentation); + SharedExecutorInstrumentation *instrumentation); /* Helper functions that run in the parallel worker. */ static void ParallelQueryMain(dsm_segment *seg, shm_toc *toc); @@ -387,12 +387,12 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) /* Estimate space for tuple queues. */ shm_toc_estimate_chunk(&pcxt->estimator, - mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers)); + mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers)); shm_toc_estimate_keys(&pcxt->estimator, 1); /* - * Give parallel-aware nodes a chance to add to the estimates, and get - * a count of how many PlanState nodes there are. + * Give parallel-aware nodes a chance to add to the estimates, and get a + * count of how many PlanState nodes there are. */ e.pcxt = pcxt; e.nnodes = 0; @@ -444,14 +444,14 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false); /* - * If instrumentation options were supplied, allocate space for the - * data. It only gets partially initialized here; the rest happens - * during ExecParallelInitializeDSM. + * If instrumentation options were supplied, allocate space for the data. + * It only gets partially initialized here; the rest happens during + * ExecParallelInitializeDSM. */ if (estate->es_instrument) { Instrumentation *instrument; - int i; + int i; instrumentation = shm_toc_allocate(pcxt->toc, instrumentation_len); instrumentation->instrument_options = estate->es_instrument; @@ -493,13 +493,13 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) */ static bool ExecParallelRetrieveInstrumentation(PlanState *planstate, - SharedExecutorInstrumentation *instrumentation) + SharedExecutorInstrumentation *instrumentation) { Instrumentation *instrument; - int i; - int n; - int ibytes; - int plan_node_id = planstate->plan->plan_node_id; + int i; + int n; + int ibytes; + int plan_node_id = planstate->plan->plan_node_id; /* Find the instumentation for this node. */ for (i = 0; i < instrumentation->num_plan_nodes; ++i) @@ -532,7 +532,7 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate, void ExecParallelFinish(ParallelExecutorInfo *pei) { - int i; + int i; if (pei->finished) return; @@ -626,19 +626,19 @@ ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver, */ static bool ExecParallelReportInstrumentation(PlanState *planstate, - SharedExecutorInstrumentation *instrumentation) + SharedExecutorInstrumentation *instrumentation) { - int i; - int plan_node_id = planstate->plan->plan_node_id; + int i; + int plan_node_id = planstate->plan->plan_node_id; Instrumentation *instrument; InstrEndLoop(planstate->instrument); /* * If we shuffled the plan_node_id values in ps_instrument into sorted - * order, we could use binary search here. This might matter someday - * if we're pushing down sufficiently large plan trees. For now, do it - * the slow, dumb way. + * order, we could use binary search here. This might matter someday if + * we're pushing down sufficiently large plan trees. For now, do it the + * slow, dumb way. */ for (i = 0; i < instrumentation->num_plan_nodes; ++i) if (instrumentation->plan_node_id[i] == plan_node_id) diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index cd93c045dc..e02fba5232 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -497,8 +497,8 @@ init_execution_state(List *queryTree_list, stmt = queryTree->utilityStmt; else stmt = (Node *) pg_plan_query(queryTree, - fcache->readonly_func ? CURSOR_OPT_PARALLEL_OK : 0, - NULL); + fcache->readonly_func ? CURSOR_OPT_PARALLEL_OK : 0, + NULL); /* Precheck all commands for validity in a function */ if (IsA(stmt, TransactionStmt)) diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 0c1e4a3cb6..c3a04ef7da 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -491,9 +491,9 @@ static void finalize_aggregate(AggState *aggstate, AggStatePerGroup pergroupstate, Datum *resultVal, bool *resultIsNull); static void finalize_partialaggregate(AggState *aggstate, - AggStatePerAgg peragg, - AggStatePerGroup pergroupstate, - Datum *resultVal, bool *resultIsNull); + AggStatePerAgg peragg, + AggStatePerGroup pergroupstate, + Datum *resultVal, bool *resultIsNull); static void prepare_projection_slot(AggState *aggstate, TupleTableSlot *slot, int currentSet); @@ -981,17 +981,18 @@ combine_aggregates(AggState *aggstate, AggStatePerGroup pergroup) if (OidIsValid(pertrans->deserialfn_oid)) { /* - * Don't call a strict deserialization function with NULL input. - * A strict deserialization function and a null value means we skip - * calling the combine function for this state. We assume that this - * would be a waste of time and effort anyway so just skip it. + * Don't call a strict deserialization function with NULL input. A + * strict deserialization function and a null value means we skip + * calling the combine function for this state. We assume that + * this would be a waste of time and effort anyway so just skip + * it. */ if (pertrans->deserialfn.fn_strict && slot->tts_isnull[0]) continue; else { - FunctionCallInfo dsinfo = &pertrans->deserialfn_fcinfo; - MemoryContext oldContext; + FunctionCallInfo dsinfo = &pertrans->deserialfn_fcinfo; + MemoryContext oldContext; dsinfo->arg[0] = slot->tts_values[0]; dsinfo->argnull[0] = slot->tts_isnull[0]; @@ -1423,14 +1424,14 @@ finalize_partialaggregate(AggState *aggstate, AggStatePerGroup pergroupstate, Datum *resultVal, bool *resultIsNull) { - AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno]; - MemoryContext oldContext; + AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno]; + MemoryContext oldContext; oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory); /* - * serialfn_oid will be set if we must serialize the input state - * before calling the combine function on the state. + * serialfn_oid will be set if we must serialize the input state before + * calling the combine function on the state. */ if (OidIsValid(pertrans->serialfn_oid)) { @@ -1443,6 +1444,7 @@ finalize_partialaggregate(AggState *aggstate, else { FunctionCallInfo fcinfo = &pertrans->serialfn_fcinfo; + fcinfo->arg[0] = pergroupstate->transValue; fcinfo->argnull[0] = pergroupstate->transValueIsNull; @@ -1459,7 +1461,7 @@ finalize_partialaggregate(AggState *aggstate, /* If result is pass-by-ref, make sure it is in the right context. */ if (!peragg->resulttypeByVal && !*resultIsNull && !MemoryContextContains(CurrentMemoryContext, - DatumGetPointer(*resultVal))) + DatumGetPointer(*resultVal))) *resultVal = datumCopy(*resultVal, peragg->resulttypeByVal, peragg->resulttypeLen); @@ -2627,21 +2629,21 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) * * 1. An aggregate function appears more than once in query: * - * SELECT SUM(x) FROM ... HAVING SUM(x) > 0 + * SELECT SUM(x) FROM ... HAVING SUM(x) > 0 * - * Since the aggregates are the identical, we only need to calculate - * the calculate it once. Both aggregates will share the same 'aggno' - * value. + * Since the aggregates are the identical, we only need to calculate + * the calculate it once. Both aggregates will share the same 'aggno' + * value. * * 2. Two different aggregate functions appear in the query, but the - * aggregates have the same transition function and initial value, but - * different final function: + * aggregates have the same transition function and initial value, but + * different final function: * - * SELECT SUM(x), AVG(x) FROM ... + * SELECT SUM(x), AVG(x) FROM ... * - * In this case we must create a new peragg for the varying aggregate, - * and need to call the final functions separately, but can share the - * same transition state. + * In this case we must create a new peragg for the varying aggregate, + * and need to call the final functions separately, but can share the + * same transition state. * * For either of these optimizations to be valid, the aggregate's * arguments must be the same, including any modifiers such as ORDER BY, @@ -2889,8 +2891,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) */ existing_transno = find_compatible_pertrans(aggstate, aggref, transfn_oid, aggtranstype, - serialfn_oid, deserialfn_oid, - initValue, initValueIsNull, + serialfn_oid, deserialfn_oid, + initValue, initValueIsNull, same_input_transnos); if (existing_transno != -1) { @@ -3366,9 +3368,9 @@ find_compatible_pertrans(AggState *aggstate, Aggref *newagg, /* * The serialization and deserialization functions must match, if * present, as we're unable to share the trans state for aggregates - * which will serialize or deserialize into different formats. Remember - * that these will be InvalidOid if they're not required for this agg - * node. + * which will serialize or deserialize into different formats. + * Remember that these will be InvalidOid if they're not required for + * this agg node. */ if (aggserialfn != pertrans->serialfn_oid || aggdeserialfn != pertrans->deserialfn_oid) diff --git a/src/backend/executor/nodeForeignscan.c b/src/backend/executor/nodeForeignscan.c index 300f947d43..d886aaf64d 100644 --- a/src/backend/executor/nodeForeignscan.c +++ b/src/backend/executor/nodeForeignscan.c @@ -285,8 +285,8 @@ ExecReScanForeignScan(ForeignScanState *node) /* * If chgParam of subnode is not null then plan will be re-scanned by - * first ExecProcNode. outerPlan may also be NULL, in which case there - * is nothing to rescan at all. + * first ExecProcNode. outerPlan may also be NULL, in which case there is + * nothing to rescan at all. */ if (outerPlan != NULL && outerPlan->chgParam == NULL) ExecReScan(outerPlan); diff --git a/src/backend/executor/nodeGather.c b/src/backend/executor/nodeGather.c index 3834ed678c..313b234454 100644 --- a/src/backend/executor/nodeGather.c +++ b/src/backend/executor/nodeGather.c @@ -138,8 +138,8 @@ ExecGather(GatherState *node) /* * Initialize the parallel context and workers on first execution. We do * this on first execution rather than during node initialization, as it - * needs to allocate large dynamic segment, so it is better to do if it - * is really needed. + * needs to allocate large dynamic segment, so it is better to do if it is + * really needed. */ if (!node->initialized) { @@ -147,8 +147,8 @@ ExecGather(GatherState *node) Gather *gather = (Gather *) node->ps.plan; /* - * Sometimes we might have to run without parallelism; but if - * parallel mode is active then we can try to fire up some workers. + * Sometimes we might have to run without parallelism; but if parallel + * mode is active then we can try to fire up some workers. */ if (gather->num_workers > 0 && IsInParallelMode()) { @@ -186,7 +186,7 @@ ExecGather(GatherState *node) } else { - /* No workers? Then never mind. */ + /* No workers? Then never mind. */ ExecShutdownGatherWorkers(node); } } @@ -314,7 +314,7 @@ gather_getnext(GatherState *gatherstate) static HeapTuple gather_readnext(GatherState *gatherstate) { - int waitpos = gatherstate->nextreader; + int waitpos = gatherstate->nextreader; for (;;) { @@ -330,8 +330,8 @@ gather_readnext(GatherState *gatherstate) tup = TupleQueueReaderNext(reader, true, &readerdone); /* - * If this reader is done, remove it. If all readers are done, - * clean up remaining worker state. + * If this reader is done, remove it. If all readers are done, clean + * up remaining worker state. */ if (readerdone) { @@ -402,7 +402,7 @@ ExecShutdownGatherWorkers(GatherState *node) /* Shut down tuple queue readers before shutting down workers. */ if (node->reader != NULL) { - int i; + int i; for (i = 0; i < node->nreaders; ++i) DestroyTupleQueueReader(node->reader[i]); @@ -452,10 +452,10 @@ void ExecReScanGather(GatherState *node) { /* - * Re-initialize the parallel workers to perform rescan of relation. - * We want to gracefully shutdown all the workers so that they - * should be able to propagate any error or other information to master - * backend before dying. Parallel context will be reused for rescan. + * Re-initialize the parallel workers to perform rescan of relation. We + * want to gracefully shutdown all the workers so that they should be able + * to propagate any error or other information to master backend before + * dying. Parallel context will be reused for rescan. */ ExecShutdownGatherWorkers(node); diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index e62c8aad65..af7b26c0ef 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -1221,10 +1221,10 @@ ExecOnConflictUpdate(ModifyTableState *mtstate, /* * Note that it is possible that the target tuple has been modified in * this session, after the above heap_lock_tuple. We choose to not error - * out in that case, in line with ExecUpdate's treatment of similar - * cases. This can happen if an UPDATE is triggered from within - * ExecQual(), ExecWithCheckOptions() or ExecProject() above, e.g. by - * selecting from a wCTE in the ON CONFLICT's SET. + * out in that case, in line with ExecUpdate's treatment of similar cases. + * This can happen if an UPDATE is triggered from within ExecQual(), + * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a + * wCTE in the ON CONFLICT's SET. */ /* Execute UPDATE with projection */ @@ -1595,7 +1595,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) /* Initialize the usesFdwDirectModify flag */ resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i, - node->fdwDirectModifyPlans); + node->fdwDirectModifyPlans); /* * Verify result relation is a valid target for the current operation diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c index f12921d188..00bf3a58b1 100644 --- a/src/backend/executor/nodeSeqscan.c +++ b/src/backend/executor/nodeSeqscan.c @@ -65,8 +65,8 @@ SeqNext(SeqScanState *node) if (scandesc == NULL) { /* - * We reach here if the scan is not parallel, or if we're executing - * a scan that was intended to be parallel serially. + * We reach here if the scan is not parallel, or if we're executing a + * scan that was intended to be parallel serially. */ scandesc = heap_beginscan(node->ss.ss_currentRelation, estate->es_snapshot, @@ -145,7 +145,7 @@ InitScanRelation(SeqScanState *node, EState *estate, int eflags) * open that relation and acquire appropriate lock on it. */ currentRelation = ExecOpenScanRelation(estate, - ((SeqScan *) node->ss.ps.plan)->scanrelid, + ((SeqScan *) node->ss.ps.plan)->scanrelid, eflags); node->ss.ss_currentRelation = currentRelation; @@ -277,8 +277,8 @@ ExecReScanSeqScan(SeqScanState *node) scan = node->ss.ss_currentScanDesc; if (scan != NULL) - heap_rescan(scan, /* scan desc */ - NULL); /* new scan keys */ + heap_rescan(scan, /* scan desc */ + NULL); /* new scan keys */ ExecScanReScan((ScanState *) node); } @@ -316,7 +316,7 @@ ExecSeqScanInitializeDSM(SeqScanState *node, ParallelContext *pcxt) { EState *estate = node->ss.ps.state; - ParallelHeapScanDesc pscan; + ParallelHeapScanDesc pscan; pscan = shm_toc_allocate(pcxt->toc, node->pscan_len); heap_parallelscan_initialize(pscan, @@ -336,7 +336,7 @@ ExecSeqScanInitializeDSM(SeqScanState *node, void ExecSeqScanInitializeWorker(SeqScanState *node, shm_toc *toc) { - ParallelHeapScanDesc pscan; + ParallelHeapScanDesc pscan; pscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id); node->ss.ss_currentScanDesc = diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c index f06eebee0c..d4c88a1f0e 100644 --- a/src/backend/executor/nodeWindowAgg.c +++ b/src/backend/executor/nodeWindowAgg.c @@ -2220,8 +2220,8 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, /* build expression trees using actual argument & result types */ build_aggregate_transfn_expr(inputTypes, numArguments, - 0, /* no ordered-set window functions yet */ - false, /* no variadic window functions yet */ + 0, /* no ordered-set window functions yet */ + false, /* no variadic window functions yet */ wfunc->wintype, wfunc->inputcollid, transfn_oid, diff --git a/src/backend/executor/tqueue.c b/src/backend/executor/tqueue.c index 8abb1f16e4..a729372c74 100644 --- a/src/backend/executor/tqueue.c +++ b/src/backend/executor/tqueue.c @@ -44,13 +44,13 @@ typedef enum TQUEUE_REMAP_ARRAY, /* array */ TQUEUE_REMAP_RANGE, /* range */ TQUEUE_REMAP_RECORD /* composite type, named or anonymous */ -} RemapClass; +} RemapClass; typedef struct { int natts; RemapClass mapping[FLEXIBLE_ARRAY_MEMBER]; -} RemapInfo; +} RemapInfo; typedef struct { @@ -61,13 +61,13 @@ typedef struct char mode; TupleDesc tupledesc; RemapInfo *remapinfo; -} TQueueDestReceiver; +} TQueueDestReceiver; typedef struct RecordTypemodMap { int remotetypmod; int localtypmod; -} RecordTypemodMap; +} RecordTypemodMap; struct TupleQueueReader { @@ -81,19 +81,19 @@ struct TupleQueueReader #define TUPLE_QUEUE_MODE_CONTROL 'c' #define TUPLE_QUEUE_MODE_DATA 'd' -static void tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, +static void tqueueWalk(TQueueDestReceiver *tqueue, RemapClass walktype, Datum value); -static void tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value); -static void tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value); -static void tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value); -static void tqueueSendTypmodInfo(TQueueDestReceiver * tqueue, int typmod, +static void tqueueWalkRecord(TQueueDestReceiver *tqueue, Datum value); +static void tqueueWalkArray(TQueueDestReceiver *tqueue, Datum value); +static void tqueueWalkRange(TQueueDestReceiver *tqueue, Datum value); +static void tqueueSendTypmodInfo(TQueueDestReceiver *tqueue, int typmod, TupleDesc tupledesc); static void TupleQueueHandleControlMessage(TupleQueueReader *reader, Size nbytes, char *data); static HeapTuple TupleQueueHandleDataMessage(TupleQueueReader *reader, Size nbytes, HeapTupleHeader data); static HeapTuple TupleQueueRemapTuple(TupleQueueReader *reader, - TupleDesc tupledesc, RemapInfo * remapinfo, + TupleDesc tupledesc, RemapInfo *remapinfo, HeapTuple tuple); static Datum TupleQueueRemap(TupleQueueReader *reader, RemapClass remapclass, Datum value); @@ -212,7 +212,7 @@ tqueueReceiveSlot(TupleTableSlot *slot, DestReceiver *self) * Invoke the appropriate walker function based on the given RemapClass. */ static void -tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, Datum value) +tqueueWalk(TQueueDestReceiver *tqueue, RemapClass walktype, Datum value) { check_stack_depth(); @@ -237,7 +237,7 @@ tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, Datum value) * contained therein. */ static void -tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value) +tqueueWalkRecord(TQueueDestReceiver *tqueue, Datum value) { HeapTupleHeader tup; Oid typeid; @@ -304,7 +304,7 @@ tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value) * contained therein. */ static void -tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value) +tqueueWalkArray(TQueueDestReceiver *tqueue, Datum value) { ArrayType *arr = DatumGetArrayTypeP(value); Oid typeid = ARR_ELEMTYPE(arr); @@ -342,7 +342,7 @@ tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value) * contained therein. */ static void -tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value) +tqueueWalkRange(TQueueDestReceiver *tqueue, Datum value) { RangeType *range = DatumGetRangeType(value); Oid typeid = RangeTypeGetOid(range); @@ -386,7 +386,7 @@ tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value) * already done so previously. */ static void -tqueueSendTypmodInfo(TQueueDestReceiver * tqueue, int typmod, +tqueueSendTypmodInfo(TQueueDestReceiver *tqueue, int typmod, TupleDesc tupledesc) { StringInfoData buf; @@ -613,7 +613,7 @@ TupleQueueHandleDataMessage(TupleQueueReader *reader, */ static HeapTuple TupleQueueRemapTuple(TupleQueueReader *reader, TupleDesc tupledesc, - RemapInfo * remapinfo, HeapTuple tuple) + RemapInfo *remapinfo, HeapTuple tuple) { Datum *values; bool *isnull; diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c index 43bb134355..7d8fc3e54d 100644 --- a/src/backend/libpq/auth.c +++ b/src/backend/libpq/auth.c @@ -1875,7 +1875,7 @@ CheckPAMAuth(Port *port, char *user, char *password) retval = pg_getnameinfo_all(&port->raddr.addr, port->raddr.salen, hostinfo, sizeof(hostinfo), NULL, 0, - port->hba->pam_use_hostname ? 0 : NI_NUMERICHOST | NI_NUMERICSERV); + port->hba->pam_use_hostname ? 0 : NI_NUMERICHOST | NI_NUMERICSERV); if (retval != 0) { ereport(WARNING, @@ -1934,7 +1934,7 @@ CheckPAMAuth(Port *port, char *user, char *password) { ereport(LOG, (errmsg("pam_set_item(PAM_RHOST) failed: %s", - pam_strerror(pamh, retval)))); + pam_strerror(pamh, retval)))); pam_passwd = NULL; return STATUS_ERROR; } @@ -1996,8 +1996,8 @@ CheckPAMAuth(Port *port, char *user, char *password) static int CheckBSDAuth(Port *port, char *user) { - char *passwd; - int retval; + char *passwd; + int retval; /* Send regular password request to client, and get the response */ sendAuthRequest(port, AUTH_REQ_PASSWORD); @@ -2539,11 +2539,10 @@ CheckRADIUSAuth(Port *port) radius_add_attribute(packet, RADIUS_NAS_IDENTIFIER, (unsigned char *) identifier, strlen(identifier)); /* - * RADIUS password attributes are calculated as: - * e[0] = p[0] XOR MD5(secret + Request Authenticator) - * for the first group of 16 octets, and then: - * e[i] = p[i] XOR MD5(secret + e[i-1]) - * for the following ones (if necessary) + * RADIUS password attributes are calculated as: e[0] = p[0] XOR + * MD5(secret + Request Authenticator) for the first group of 16 octets, + * and then: e[i] = p[i] XOR MD5(secret + e[i-1]) for the following ones + * (if necessary) */ encryptedpasswordlen = ((strlen(passwd) + RADIUS_VECTOR_LENGTH - 1) / RADIUS_VECTOR_LENGTH) * RADIUS_VECTOR_LENGTH; cryptvector = palloc(strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH); @@ -2554,7 +2553,11 @@ CheckRADIUSAuth(Port *port) for (i = 0; i < encryptedpasswordlen; i += RADIUS_VECTOR_LENGTH) { memcpy(cryptvector + strlen(port->hba->radiussecret), md5trailer, RADIUS_VECTOR_LENGTH); - /* .. and for subsequent iterations the result of the previous XOR (calculated below) */ + + /* + * .. and for subsequent iterations the result of the previous XOR + * (calculated below) + */ md5trailer = encryptedpassword + i; if (!pg_md5_binary(cryptvector, strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH, encryptedpassword + i)) @@ -2565,7 +2568,7 @@ CheckRADIUSAuth(Port *port) return STATUS_ERROR; } - for (j = i; j < i+RADIUS_VECTOR_LENGTH; j++) + for (j = i; j < i + RADIUS_VECTOR_LENGTH; j++) { if (j < strlen(passwd)) encryptedpassword[j] = passwd[j] ^ encryptedpassword[j]; diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c index 95cceeed7a..f6adb155c6 100644 --- a/src/backend/libpq/be-secure-openssl.c +++ b/src/backend/libpq/be-secure-openssl.c @@ -241,8 +241,8 @@ be_tls_init(void) (buf.st_uid == 0 && buf.st_mode & (S_IWGRP | S_IXGRP | S_IRWXO))) ereport(FATAL, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("private key file \"%s\" has group or world access", - ssl_key_file), + errmsg("private key file \"%s\" has group or world access", + ssl_key_file), errdetail("File must have permissions u=rw (0600) or less if owned by the database user, or permissions u=rw,g=r (0640) or less if owned by root."))); #endif @@ -316,7 +316,7 @@ be_tls_init(void) else ereport(FATAL, (errmsg("could not load SSL certificate revocation list file \"%s\": %s", - ssl_crl_file, SSLerrmessage(ERR_get_error())))); + ssl_crl_file, SSLerrmessage(ERR_get_error())))); } } @@ -377,11 +377,12 @@ be_tls_open_server(Port *port) port->ssl_in_use = true; aloop: + /* * Prepare to call SSL_get_error() by clearing thread's OpenSSL error * queue. In general, the current thread's error queue must be empty - * before the TLS/SSL I/O operation is attempted, or SSL_get_error() - * will not work reliably. An extension may have failed to clear the + * before the TLS/SSL I/O operation is attempted, or SSL_get_error() will + * not work reliably. An extension may have failed to clear the * per-thread error queue following another call to an OpenSSL I/O * routine. */ @@ -393,12 +394,11 @@ aloop: /* * Other clients of OpenSSL in the backend may fail to call - * ERR_get_error(), but we always do, so as to not cause problems - * for OpenSSL clients that don't call ERR_clear_error() - * defensively. Be sure that this happens by calling now. - * SSL_get_error() relies on the OpenSSL per-thread error queue - * being intact, so this is the earliest possible point - * ERR_get_error() may be called. + * ERR_get_error(), but we always do, so as to not cause problems for + * OpenSSL clients that don't call ERR_clear_error() defensively. Be + * sure that this happens by calling now. SSL_get_error() relies on + * the OpenSSL per-thread error queue being intact, so this is the + * earliest possible point ERR_get_error() may be called. */ ecode = ERR_get_error(); switch (err) diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c index 29297e7299..cdd07d577b 100644 --- a/src/backend/libpq/be-secure.c +++ b/src/backend/libpq/be-secure.c @@ -140,26 +140,26 @@ retry: /* In blocking mode, wait until the socket is ready */ if (n < 0 && !port->noblock && (errno == EWOULDBLOCK || errno == EAGAIN)) { - WaitEvent event; + WaitEvent event; Assert(waitfor); ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL); - WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */, &event, 1); + WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */ , &event, 1); /* * If the postmaster has died, it's not safe to continue running, * because it is the postmaster's job to kill us if some other backend * exists uncleanly. Moreover, we won't run very well in this state; * helper processes like walwriter and the bgwriter will exit, so - * performance may be poor. Finally, if we don't exit, pg_ctl will - * be unable to restart the postmaster without manual intervention, - * so no new connections can be accepted. Exiting clears the deck - * for a postmaster restart. + * performance may be poor. Finally, if we don't exit, pg_ctl will be + * unable to restart the postmaster without manual intervention, so no + * new connections can be accepted. Exiting clears the deck for a + * postmaster restart. * - * (Note that we only make this check when we would otherwise sleep - * on our latch. We might still continue running for a while if the + * (Note that we only make this check when we would otherwise sleep on + * our latch. We might still continue running for a while if the * postmaster is killed in mid-query, or even through multiple queries * if we never have to wait for read. We don't want to burn too many * cycles checking for this very rare condition, and this should cause @@ -168,7 +168,7 @@ retry: if (event.events & WL_POSTMASTER_DEATH) ereport(FATAL, (errcode(ERRCODE_ADMIN_SHUTDOWN), - errmsg("terminating connection due to unexpected postmaster exit"))); + errmsg("terminating connection due to unexpected postmaster exit"))); /* Handle interrupt. */ if (event.events & WL_LATCH_SET) @@ -241,19 +241,19 @@ retry: if (n < 0 && !port->noblock && (errno == EWOULDBLOCK || errno == EAGAIN)) { - WaitEvent event; + WaitEvent event; Assert(waitfor); ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL); - WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */, &event, 1); + WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */ , &event, 1); /* See comments in secure_read. */ if (event.events & WL_POSTMASTER_DEATH) ereport(FATAL, (errcode(ERRCODE_ADMIN_SHUTDOWN), - errmsg("terminating connection due to unexpected postmaster exit"))); + errmsg("terminating connection due to unexpected postmaster exit"))); /* Handle interrupt. */ if (event.events & WL_LATCH_SET) diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c index 8d6eb0b7bb..ba42753c06 100644 --- a/src/backend/libpq/pqcomm.c +++ b/src/backend/libpq/pqcomm.c @@ -1174,7 +1174,7 @@ pq_startmsgread(void) if (PqCommReadingMsg) ereport(FATAL, (errcode(ERRCODE_PROTOCOL_VIOLATION), - errmsg("terminating connection because protocol synchronization was lost"))); + errmsg("terminating connection because protocol synchronization was lost"))); PqCommReadingMsg = true; } diff --git a/src/backend/libpq/pqmq.c b/src/backend/libpq/pqmq.c index 350210b006..3225c1fa0e 100644 --- a/src/backend/libpq/pqmq.c +++ b/src/backend/libpq/pqmq.c @@ -143,9 +143,9 @@ mq_putmessage(char msgtype, const char *s, size_t len) /* * If the message queue is already gone, just ignore the message. This - * doesn't necessarily indicate a problem; for example, DEBUG messages - * can be generated late in the shutdown sequence, after all DSMs have - * already been detached. + * doesn't necessarily indicate a problem; for example, DEBUG messages can + * be generated late in the shutdown sequence, after all DSMs have already + * been detached. */ if (pq_mq == NULL) return 0; diff --git a/src/backend/main/main.c b/src/backend/main/main.c index da86c6243f..a13c446f89 100644 --- a/src/backend/main/main.c +++ b/src/backend/main/main.c @@ -270,19 +270,22 @@ startup_hacks(const char *progname) SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX); #if defined(_M_AMD64) && _MSC_VER == 1800 + /* - * Avoid crashing in certain floating-point operations if - * we were compiled for x64 with MS Visual Studio 2013 and - * are running on Windows prior to 7/2008R2 SP1 on an - * AVX2-capable CPU. + * Avoid crashing in certain floating-point operations if we were + * compiled for x64 with MS Visual Studio 2013 and are running on + * Windows prior to 7/2008R2 SP1 on an AVX2-capable CPU. * - * Ref: https://connect.microsoft.com/VisualStudio/feedback/details/811093/visual-studio-2013-rtm-c-x64-code-generation-bug-for-avx2-instructions + * Ref: + * https://connect.microsoft.com/VisualStudio/feedback/details/811093/v + * isual-studio-2013-rtm-c-x64-code-generation-bug-for-avx2-instruction + * s */ if (!IsWindows7SP1OrGreater()) { _set_FMA3_enable(0); } -#endif /* defined(_M_AMD64) && _MSC_VER == 1800 */ +#endif /* defined(_M_AMD64) && _MSC_VER == 1800 */ } #endif /* WIN32 */ diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c index 1ac51a7b2f..5facd439ca 100644 --- a/src/backend/nodes/nodeFuncs.c +++ b/src/backend/nodes/nodeFuncs.c @@ -3499,7 +3499,7 @@ planstate_tree_walker(PlanState *planstate, return true; break; case T_CustomScan: - foreach (lc, ((CustomScanState *) planstate)->custom_ps) + foreach(lc, ((CustomScanState *) planstate)->custom_ps) { if (walker((PlanState *) lfirst(lc), context)) return true; diff --git a/src/backend/nodes/params.c b/src/backend/nodes/params.c index d07974d3b0..d7d513e78f 100644 --- a/src/backend/nodes/params.c +++ b/src/backend/nodes/params.c @@ -94,8 +94,8 @@ copyParamList(ParamListInfo from) Size EstimateParamListSpace(ParamListInfo paramLI) { - int i; - Size sz = sizeof(int); + int i; + Size sz = sizeof(int); if (paramLI == NULL || paramLI->numParams <= 0) return sz; @@ -119,7 +119,7 @@ EstimateParamListSpace(ParamListInfo paramLI) typeOid = prm->ptype; } - sz = add_size(sz, sizeof(Oid)); /* space for type OID */ + sz = add_size(sz, sizeof(Oid)); /* space for type OID */ sz = add_size(sz, sizeof(uint16)); /* space for pflags */ /* space for datum/isnull */ @@ -132,7 +132,7 @@ EstimateParamListSpace(ParamListInfo paramLI) typByVal = true; } sz = add_size(sz, - datumEstimateSpace(prm->value, prm->isnull, typByVal, typLen)); + datumEstimateSpace(prm->value, prm->isnull, typByVal, typLen)); } return sz; diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c index 6f28047d84..c401762a39 100644 --- a/src/backend/nodes/readfuncs.c +++ b/src/backend/nodes/readfuncs.c @@ -1836,8 +1836,8 @@ _readCustomScan(void) READ_BITMAPSET_FIELD(custom_relids); /* Lookup CustomScanMethods by CustomName */ - token = pg_strtok(&length); /* skip methods: */ - token = pg_strtok(&length); /* CustomName */ + token = pg_strtok(&length); /* skip methods: */ + token = pg_strtok(&length); /* CustomName */ custom_name = nullable_string(token, length); methods = GetCustomScanMethods(custom_name, false); local_node->methods = methods; @@ -2227,11 +2227,12 @@ _readExtensibleNode(void) { const ExtensibleNodeMethods *methods; ExtensibleNode *local_node; - const char *extnodename; + const char *extnodename; + READ_TEMP_LOCALS(); - token = pg_strtok(&length); /* skip: extnodename */ - token = pg_strtok(&length); /* get extnodename */ + token = pg_strtok(&length); /* skip: extnodename */ + token = pg_strtok(&length); /* get extnodename */ extnodename = nullable_string(token, length); if (!extnodename) diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index 6deb2cf0c9..ff5e39c1aa 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -163,8 +163,8 @@ make_one_rel(PlannerInfo *root, List *joinlist) set_base_rel_consider_startup(root); /* - * Generate access paths for the base rels. set_base_rel_sizes also - * sets the consider_parallel flag for each baserel, if appropriate. + * Generate access paths for the base rels. set_base_rel_sizes also sets + * the consider_parallel flag for each baserel, if appropriate. */ set_base_rel_sizes(root); set_base_rel_pathlists(root); @@ -228,7 +228,7 @@ set_base_rel_consider_startup(PlannerInfo *root) /* * set_base_rel_sizes * Set the size estimates (rows and widths) for each base-relation entry. - * Also determine whether to consider parallel paths for base relations. + * Also determine whether to consider parallel paths for base relations. * * We do this in a separate pass over the base rels so that rowcount * estimates are available for parameterized path generation, and also so @@ -509,6 +509,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, switch (rte->rtekind) { case RTE_RELATION: + /* * Currently, parallel workers can't access the leader's temporary * tables. We could possibly relax this if the wrote all of its @@ -528,7 +529,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, */ if (rte->tablesample != NULL) { - Oid proparallel = func_parallel(rte->tablesample->tsmhandler); + Oid proparallel = func_parallel(rte->tablesample->tsmhandler); if (proparallel != PROPARALLEL_SAFE) return; @@ -557,14 +558,15 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, break; case RTE_SUBQUERY: + /* * Subplans currently aren't passed to workers. Even if they - * were, the subplan might be using parallelism internally, and - * we can't support nested Gather nodes at present. Finally, - * we don't have a good way of knowing whether the subplan - * involves any parallel-restricted operations. It would be - * nice to relax this restriction some day, but it's going to - * take a fair amount of work. + * were, the subplan might be using parallelism internally, and we + * can't support nested Gather nodes at present. Finally, we + * don't have a good way of knowing whether the subplan involves + * any parallel-restricted operations. It would be nice to relax + * this restriction some day, but it's going to take a fair amount + * of work. */ return; @@ -580,6 +582,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, break; case RTE_VALUES: + /* * The data for a VALUES clause is stored in the plan tree itself, * so scanning it in a worker is fine. @@ -587,6 +590,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, break; case RTE_CTE: + /* * CTE tuplestores aren't shared among parallel workers, so we * force all CTE scans to happen in the leader. Also, populating @@ -598,8 +602,8 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, } /* - * If there's anything in baserestrictinfo that's parallel-restricted, - * we give up on parallelizing access to this relation. We could consider + * If there's anything in baserestrictinfo that's parallel-restricted, we + * give up on parallelizing access to this relation. We could consider * instead postponing application of the restricted quals until we're * above all the parallelism in the plan tree, but it's not clear that * this would be a win in very many cases, and it might be tricky to make @@ -609,8 +613,8 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, return; /* - * If the relation's outputs are not parallel-safe, we must give up. - * In the common case where the relation only outputs Vars, this check is + * If the relation's outputs are not parallel-safe, we must give up. In + * the common case where the relation only outputs Vars, this check is * very cheap; otherwise, we have to do more work. */ if (rel->reltarget_has_non_vars && @@ -1251,8 +1255,8 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, int parallel_workers = 0; /* - * Decide on the numebr of workers to request for this append path. For - * now, we just use the maximum value from among the members. It + * Decide on the numebr of workers to request for this append path. + * For now, we just use the maximum value from among the members. It * might be useful to use a higher number if the Append node were * smart enough to spread out the workers, but it currently isn't. */ @@ -2160,8 +2164,8 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels) * Run generate_gather_paths() for each just-processed joinrel. We * could not do this earlier because both regular and partial paths * can get added to a particular joinrel at multiple times within - * join_search_one_level. After that, we're done creating paths - * for the joinrel, so run set_cheapest(). + * join_search_one_level. After that, we're done creating paths for + * the joinrel, so run set_cheapest(). */ foreach(lc, root->join_rel_level[lev]) { diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index 52df17fe69..ab8df76a6e 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -1428,15 +1428,14 @@ create_projection_plan(PlannerInfo *root, ProjectionPath *best_path) * We might not really need a Result node here. There are several ways * that this can happen. For example, MergeAppend doesn't project, so we * would have thought that we needed a projection to attach resjunk sort - * columns to its output ... but create_merge_append_plan might have - * added those same resjunk sort columns to both MergeAppend and its - * children. Alternatively, apply_projection_to_path might have created - * a projection path as the subpath of a Gather node even though the - * subpath was projection-capable. So, if the subpath is capable of - * projection or the desired tlist is the same expression-wise as the - * subplan's, just jam it in there. We'll have charged for a Result that - * doesn't actually appear in the plan, but that's better than having a - * Result we don't need. + * columns to its output ... but create_merge_append_plan might have added + * those same resjunk sort columns to both MergeAppend and its children. + * Alternatively, apply_projection_to_path might have created a projection + * path as the subpath of a Gather node even though the subpath was + * projection-capable. So, if the subpath is capable of projection or the + * desired tlist is the same expression-wise as the subplan's, just jam it + * in there. We'll have charged for a Result that doesn't actually appear + * in the plan, but that's better than having a Result we don't need. */ if (is_projection_capable_path(best_path->subpath) || tlist_same_exprs(tlist, subplan->targetlist)) @@ -3248,8 +3247,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path, /* * If a join between foreign relations was pushed down, remember it. The * push-down safety of the join depends upon the server and user mapping - * being same. That can change between planning and execution time, in which - * case the plan should be invalidated. + * being same. That can change between planning and execution time, in + * which case the plan should be invalidated. */ if (scan_relid == 0) root->glob->hasForeignJoin = true; @@ -3257,8 +3256,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path, /* * Replace any outer-relation variables with nestloop params in the qual, * fdw_exprs and fdw_recheck_quals expressions. We do this last so that - * the FDW doesn't have to be involved. (Note that parts of fdw_exprs - * or fdw_recheck_quals could have come from join clauses, so doing this + * the FDW doesn't have to be involved. (Note that parts of fdw_exprs or + * fdw_recheck_quals could have come from join clauses, so doing this * beforehand on the scan_clauses wouldn't work.) We assume * fdw_scan_tlist contains no such variables. */ @@ -3279,8 +3278,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path, * 0, but there can be no Var with relid 0 in the rel's targetlist or the * restriction clauses, so we skip this in that case. Note that any such * columns in base relations that were joined are assumed to be contained - * in fdw_scan_tlist.) This is a bit of a kluge and might go away someday, - * so we intentionally leave it out of the API presented to FDWs. + * in fdw_scan_tlist.) This is a bit of a kluge and might go away + * someday, so we intentionally leave it out of the API presented to FDWs. */ scan_plan->fsSystemCol = false; if (scan_relid > 0) @@ -5899,7 +5898,7 @@ make_gather(List *qptlist, plan->righttree = NULL; node->num_workers = nworkers; node->single_copy = single_copy; - node->invisible = false; + node->invisible = false; return node; } diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index ba0c0ecae9..54c0440436 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -108,10 +108,10 @@ static double get_number_of_groups(PlannerInfo *root, List *rollup_lists, List *rollup_groupclauses); static void set_grouped_rel_consider_parallel(PlannerInfo *root, - RelOptInfo *grouped_rel, - PathTarget *target); + RelOptInfo *grouped_rel, + PathTarget *target); static Size estimate_hashagg_tablesize(Path *path, AggClauseCosts *agg_costs, - double dNumGroups); + double dNumGroups); static RelOptInfo *create_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, @@ -141,7 +141,7 @@ static RelOptInfo *create_ordered_paths(PlannerInfo *root, static PathTarget *make_group_input_target(PlannerInfo *root, PathTarget *final_target); static PathTarget *make_partialgroup_input_target(PlannerInfo *root, - PathTarget *final_target); + PathTarget *final_target); static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist); static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists); static PathTarget *make_window_input_target(PlannerInfo *root, @@ -1777,8 +1777,8 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, * findable from the PlannerInfo struct; anything else the FDW wants * to know should be obtainable via "root". * - * Note: CustomScan providers, as well as FDWs that don't want to - * use this hook, can use the create_upper_paths_hook; see below. + * Note: CustomScan providers, as well as FDWs that don't want to use + * this hook, can use the create_upper_paths_hook; see below. */ if (current_rel->fdwroutine && current_rel->fdwroutine->GetForeignUpperPaths) @@ -3196,8 +3196,8 @@ set_grouped_rel_consider_parallel(PlannerInfo *root, RelOptInfo *grouped_rel, /* * All that's left to check now is to make sure all aggregate functions - * support partial mode. If there's no aggregates then we can skip checking - * that. + * support partial mode. If there's no aggregates then we can skip + * checking that. */ if (!parse->hasAggs) grouped_rel->consider_parallel = true; @@ -3370,9 +3370,10 @@ create_grouping_paths(PlannerInfo *root, /* * Determine whether it's possible to perform sort-based implementations - * of grouping. (Note that if groupClause is empty, grouping_is_sortable() - * is trivially true, and all the pathkeys_contained_in() tests will - * succeed too, so that we'll consider every surviving input path.) + * of grouping. (Note that if groupClause is empty, + * grouping_is_sortable() is trivially true, and all the + * pathkeys_contained_in() tests will succeed too, so that we'll consider + * every surviving input path.) */ can_sort = grouping_is_sortable(parse->groupClause); @@ -3408,7 +3409,7 @@ create_grouping_paths(PlannerInfo *root, */ if (grouped_rel->consider_parallel) { - Path *cheapest_partial_path = linitial(input_rel->partial_pathlist); + Path *cheapest_partial_path = linitial(input_rel->partial_pathlist); /* * Build target list for partial aggregate paths. We cannot reuse the @@ -3471,27 +3472,27 @@ create_grouping_paths(PlannerInfo *root, if (parse->hasAggs) add_partial_path(grouped_rel, (Path *) - create_agg_path(root, - grouped_rel, - path, - partial_grouping_target, - parse->groupClause ? AGG_SORTED : AGG_PLAIN, - parse->groupClause, - NIL, - &agg_partial_costs, - dNumPartialGroups, - false, - false, - true)); + create_agg_path(root, + grouped_rel, + path, + partial_grouping_target, + parse->groupClause ? AGG_SORTED : AGG_PLAIN, + parse->groupClause, + NIL, + &agg_partial_costs, + dNumPartialGroups, + false, + false, + true)); else add_partial_path(grouped_rel, (Path *) - create_group_path(root, - grouped_rel, - path, - partial_grouping_target, - parse->groupClause, - NIL, - dNumPartialGroups)); + create_group_path(root, + grouped_rel, + path, + partial_grouping_target, + parse->groupClause, + NIL, + dNumPartialGroups)); } } } @@ -3513,18 +3514,18 @@ create_grouping_paths(PlannerInfo *root, if (hashaggtablesize < work_mem * 1024L) { add_partial_path(grouped_rel, (Path *) - create_agg_path(root, - grouped_rel, - cheapest_partial_path, - partial_grouping_target, - AGG_HASHED, - parse->groupClause, - NIL, - &agg_partial_costs, - dNumPartialGroups, - false, - false, - true)); + create_agg_path(root, + grouped_rel, + cheapest_partial_path, + partial_grouping_target, + AGG_HASHED, + parse->groupClause, + NIL, + &agg_partial_costs, + dNumPartialGroups, + false, + false, + true)); } } } @@ -3616,13 +3617,13 @@ create_grouping_paths(PlannerInfo *root, /* * Now generate a complete GroupAgg Path atop of the cheapest partial - * path. We need only bother with the cheapest path here, as the output - * of Gather is never sorted. + * path. We need only bother with the cheapest path here, as the + * output of Gather is never sorted. */ if (grouped_rel->partial_pathlist) { - Path *path = (Path *) linitial(grouped_rel->partial_pathlist); - double total_groups = path->rows * path->parallel_workers; + Path *path = (Path *) linitial(grouped_rel->partial_pathlist); + double total_groups = path->rows * path->parallel_workers; path = (Path *) create_gather_path(root, grouped_rel, @@ -3632,9 +3633,9 @@ create_grouping_paths(PlannerInfo *root, &total_groups); /* - * Gather is always unsorted, so we'll need to sort, unless there's - * no GROUP BY clause, in which case there will only be a single - * group. + * Gather is always unsorted, so we'll need to sort, unless + * there's no GROUP BY clause, in which case there will only be a + * single group. */ if (parse->groupClause) path = (Path *) create_sort_path(root, @@ -3645,27 +3646,27 @@ create_grouping_paths(PlannerInfo *root, if (parse->hasAggs) add_path(grouped_rel, (Path *) - create_agg_path(root, - grouped_rel, - path, - target, - parse->groupClause ? AGG_SORTED : AGG_PLAIN, - parse->groupClause, - (List *) parse->havingQual, - &agg_final_costs, - dNumGroups, - true, - true, - true)); + create_agg_path(root, + grouped_rel, + path, + target, + parse->groupClause ? AGG_SORTED : AGG_PLAIN, + parse->groupClause, + (List *) parse->havingQual, + &agg_final_costs, + dNumGroups, + true, + true, + true)); else add_path(grouped_rel, (Path *) - create_group_path(root, - grouped_rel, - path, - target, - parse->groupClause, - (List *) parse->havingQual, - dNumGroups)); + create_group_path(root, + grouped_rel, + path, + target, + parse->groupClause, + (List *) parse->havingQual, + dNumGroups)); } } @@ -3678,15 +3679,15 @@ create_grouping_paths(PlannerInfo *root, /* * Provided that the estimated size of the hashtable does not exceed * work_mem, we'll generate a HashAgg Path, although if we were unable - * to sort above, then we'd better generate a Path, so that we at least - * have one. + * to sort above, then we'd better generate a Path, so that we at + * least have one. */ if (hashaggtablesize < work_mem * 1024L || grouped_rel->pathlist == NIL) { /* - * We just need an Agg over the cheapest-total input path, since input - * order won't matter. + * We just need an Agg over the cheapest-total input path, since + * input order won't matter. */ add_path(grouped_rel, (Path *) create_agg_path(root, grouped_rel, @@ -3704,12 +3705,12 @@ create_grouping_paths(PlannerInfo *root, /* * Generate a HashAgg Path atop of the cheapest partial path. Once - * again, we'll only do this if it looks as though the hash table won't - * exceed work_mem. + * again, we'll only do this if it looks as though the hash table + * won't exceed work_mem. */ if (grouped_rel->partial_pathlist) { - Path *path = (Path *) linitial(grouped_rel->partial_pathlist); + Path *path = (Path *) linitial(grouped_rel->partial_pathlist); hashaggtablesize = estimate_hashagg_tablesize(path, &agg_final_costs, @@ -3717,7 +3718,7 @@ create_grouping_paths(PlannerInfo *root, if (hashaggtablesize < work_mem * 1024L) { - double total_groups = path->rows * path->parallel_workers; + double total_groups = path->rows * path->parallel_workers; path = (Path *) create_gather_path(root, grouped_rel, @@ -3727,18 +3728,18 @@ create_grouping_paths(PlannerInfo *root, &total_groups); add_path(grouped_rel, (Path *) - create_agg_path(root, - grouped_rel, - path, - target, - AGG_HASHED, - parse->groupClause, - (List *) parse->havingQual, - &agg_final_costs, - dNumGroups, - true, - true, - true)); + create_agg_path(root, + grouped_rel, + path, + target, + AGG_HASHED, + parse->groupClause, + (List *) parse->havingQual, + &agg_final_costs, + dNumGroups, + true, + true, + true)); } } } diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c index 266e83055b..9b690cf66e 100644 --- a/src/backend/optimizer/plan/setrefs.c +++ b/src/backend/optimizer/plan/setrefs.c @@ -2100,6 +2100,7 @@ search_indexed_tlist_for_partial_aggref(Aggref *aggref, indexed_tlist *itlist, continue; if (aggref->aggvariadic != tlistaggref->aggvariadic) continue; + /* * it would be harmless to compare aggcombine and aggpartial, but * it's also unnecessary diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c index 759566ad46..e7909eb5d5 100644 --- a/src/backend/optimizer/util/clauses.c +++ b/src/backend/optimizer/util/clauses.c @@ -101,7 +101,7 @@ typedef struct } has_parallel_hazard_arg; static bool aggregates_allow_partial_walker(Node *node, - partial_agg_context *context); + partial_agg_context *context); static bool contain_agg_clause_walker(Node *node, void *context); static bool count_agg_clauses_walker(Node *node, count_agg_clauses_context *context); @@ -112,9 +112,9 @@ static bool contain_mutable_functions_walker(Node *node, void *context); static bool contain_volatile_functions_walker(Node *node, void *context); static bool contain_volatile_functions_not_nextval_walker(Node *node, void *context); static bool has_parallel_hazard_walker(Node *node, - has_parallel_hazard_arg *context); + has_parallel_hazard_arg *context); static bool parallel_too_dangerous(char proparallel, - has_parallel_hazard_arg *context); + has_parallel_hazard_arg *context); static bool typeid_is_temp(Oid typeid); static bool contain_nonstrict_functions_walker(Node *node, void *context); static bool contain_leaked_vars_walker(Node *node, void *context); @@ -446,7 +446,7 @@ aggregates_allow_partial_walker(Node *node, partial_agg_context *context) if (aggref->aggdistinct || aggref->aggorder) { context->allowedtype = PAT_DISABLED; - return true; /* abort search */ + return true; /* abort search */ } aggTuple = SearchSysCache1(AGGFNOID, ObjectIdGetDatum(aggref->aggfnoid)); @@ -463,7 +463,7 @@ aggregates_allow_partial_walker(Node *node, partial_agg_context *context) { ReleaseSysCache(aggTuple); context->allowedtype = PAT_DISABLED; - return true; /* abort search */ + return true; /* abort search */ } /* @@ -479,7 +479,7 @@ aggregates_allow_partial_walker(Node *node, partial_agg_context *context) context->allowedtype = PAT_INTERNAL_ONLY; ReleaseSysCache(aggTuple); - return false; /* continue searching */ + return false; /* continue searching */ } return expression_tree_walker(node, aggregates_allow_partial_walker, (void *) context); @@ -1354,7 +1354,7 @@ contain_volatile_functions_not_nextval_walker(Node *node, void *context) bool has_parallel_hazard(Node *node, bool allow_restricted) { - has_parallel_hazard_arg context; + has_parallel_hazard_arg context; context.allow_restricted = allow_restricted; return has_parallel_hazard_walker(node, &context); @@ -1371,16 +1371,16 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context) * recurse through Query objects to as to locate parallel-unsafe * constructs anywhere in the tree. * - * Later, we'll be called again for specific quals, possibly after - * some planning has been done, we may encounter SubPlan, SubLink, - * or AlternativeSubLink nodes. Currently, there's no need to recurse - * through these; they can't be unsafe, since we've already cleared - * the entire query of unsafe operations, and they're definitely + * Later, we'll be called again for specific quals, possibly after some + * planning has been done, we may encounter SubPlan, SubLink, or + * AlternativeSubLink nodes. Currently, there's no need to recurse + * through these; they can't be unsafe, since we've already cleared the + * entire query of unsafe operations, and they're definitely * parallel-restricted. */ if (IsA(node, Query)) { - Query *query = (Query *) node; + Query *query = (Query *) node; if (query->rowMarks != NULL) return true; @@ -1390,12 +1390,12 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context) has_parallel_hazard_walker, context, 0); } - else if (IsA(node, SubPlan) || IsA(node, SubLink) || - IsA(node, AlternativeSubPlan) || IsA(node, Param)) + else if (IsA(node, SubPlan) ||IsA(node, SubLink) || + IsA(node, AlternativeSubPlan) ||IsA(node, Param)) { /* - * Since we don't have the ability to push subplans down to workers - * at present, we treat subplan references as parallel-restricted. + * Since we don't have the ability to push subplans down to workers at + * present, we treat subplan references as parallel-restricted. */ if (!context->allow_restricted) return true; @@ -1405,12 +1405,14 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context) if (IsA(node, RestrictInfo)) { RestrictInfo *rinfo = (RestrictInfo *) node; + return has_parallel_hazard_walker((Node *) rinfo->clause, context); } /* * It is an error for a parallel worker to touch a temporary table in any - * way, so we can't handle nodes whose type is the rowtype of such a table. + * way, so we can't handle nodes whose type is the rowtype of such a + * table. */ if (!context->allow_restricted) { @@ -1534,7 +1536,8 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context) foreach(opid, rcexpr->opnos) { - Oid opfuncid = get_opcode(lfirst_oid(opid)); + Oid opfuncid = get_opcode(lfirst_oid(opid)); + if (parallel_too_dangerous(func_parallel(opfuncid), context)) return true; } @@ -1558,7 +1561,7 @@ parallel_too_dangerous(char proparallel, has_parallel_hazard_arg *context) static bool typeid_is_temp(Oid typeid) { - Oid relid = get_typ_typrelid(typeid); + Oid relid = get_typ_typrelid(typeid); if (!OidIsValid(relid)) return false; @@ -1870,8 +1873,8 @@ contain_leaked_vars_walker(Node *node, void *context) /* * WHERE CURRENT OF doesn't contain function calls. Moreover, it * is important that this can be pushed down into a - * security_barrier view, since the planner must always generate - * a TID scan when CURRENT OF is present -- c.f. cost_tidscan. + * security_barrier view, since the planner must always generate a + * TID scan when CURRENT OF is present -- c.f. cost_tidscan. */ return false; diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c index de849808c3..6aa8192180 100644 --- a/src/backend/optimizer/util/plancat.c +++ b/src/backend/optimizer/util/plancat.c @@ -709,7 +709,7 @@ infer_collation_opclass_match(InferenceElem *elem, Relation idxRel, AttrNumber natt; Oid inferopfamily = InvalidOid; /* OID of opclass opfamily */ Oid inferopcinputtype = InvalidOid; /* OID of opclass input type */ - int nplain = 0; /* # plain attrs observed */ + int nplain = 0; /* # plain attrs observed */ /* * If inference specification element lacks collation/opclass, then no diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c index 2def06dd92..91cd2b506f 100644 --- a/src/backend/optimizer/util/relnode.c +++ b/src/backend/optimizer/util/relnode.c @@ -107,7 +107,7 @@ build_simple_rel(PlannerInfo *root, int relid, RelOptKind reloptkind) rel->consider_startup = (root->tuple_fraction > 0); rel->consider_param_startup = false; /* might get changed later */ rel->consider_parallel = false; /* might get changed later */ - rel->rel_parallel_workers = -1; /* set up in GetRelationInfo */ + rel->rel_parallel_workers = -1; /* set up in GetRelationInfo */ rel->reltarget = create_empty_pathtarget(); rel->reltarget_has_non_vars = false; rel->pathlist = NIL; diff --git a/src/backend/optimizer/util/tlist.c b/src/backend/optimizer/util/tlist.c index 465cb9e851..339a5b3f25 100644 --- a/src/backend/optimizer/util/tlist.c +++ b/src/backend/optimizer/util/tlist.c @@ -776,11 +776,11 @@ apply_pathtarget_labeling_to_tlist(List *tlist, PathTarget *target) void apply_partialaggref_adjustment(PathTarget *target) { - ListCell *lc; + ListCell *lc; foreach(lc, target->exprs) { - Aggref *aggref = (Aggref *) lfirst(lc); + Aggref *aggref = (Aggref *) lfirst(lc); if (IsA(aggref, Aggref)) { diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c index 81332b57d9..1e3ecbc51e 100644 --- a/src/backend/parser/parse_relation.c +++ b/src/backend/parser/parse_relation.c @@ -3083,8 +3083,8 @@ errorMissingColumn(ParseState *pstate, errmsg("column %s.%s does not exist", relname, colname) : errmsg("column \"%s\" does not exist", colname), state->rfirst ? closestfirst ? - errhint("Perhaps you meant to reference the column \"%s.%s\".", - state->rfirst->eref->aliasname, closestfirst) : + errhint("Perhaps you meant to reference the column \"%s.%s\".", + state->rfirst->eref->aliasname, closestfirst) : errhint("There is a column named \"%s\" in table \"%s\", but it cannot be referenced from this part of the query.", colname, state->rfirst->eref->aliasname) : 0, parser_errposition(pstate, location))); diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c index 65284941ed..6313087174 100644 --- a/src/backend/parser/parse_utilcmd.c +++ b/src/backend/parser/parse_utilcmd.c @@ -124,7 +124,7 @@ static void transformFKConstraints(CreateStmtContext *cxt, bool skipValidation, bool isAddConstraint); static void transformCheckConstraints(CreateStmtContext *cxt, - bool skipValidation); + bool skipValidation); static void transformConstraintAttrs(CreateStmtContext *cxt, List *constraintList); static void transformColumnType(CreateStmtContext *cxt, ColumnDef *column); @@ -287,15 +287,14 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString) if (like_found) { /* - * To match INHERITS, the existence of any LIKE table with OIDs - * causes the new table to have oids. For the same reason, - * WITH/WITHOUT OIDs is also ignored with LIKE. We prepend - * because the first oid option list entry is honored. Our - * prepended WITHOUT OIDS clause will be overridden if an - * inherited table has oids. + * To match INHERITS, the existence of any LIKE table with OIDs causes + * the new table to have oids. For the same reason, WITH/WITHOUT OIDs + * is also ignored with LIKE. We prepend because the first oid option + * list entry is honored. Our prepended WITHOUT OIDS clause will be + * overridden if an inherited table has oids. */ stmt->options = lcons(makeDefElem("oids", - (Node *)makeInteger(cxt.hasoids)), stmt->options); + (Node *) makeInteger(cxt.hasoids)), stmt->options); } foreach(elements, stmt->tableElts) @@ -305,6 +304,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString) if (nodeTag(element) == T_Constraint) transformTableConstraint(&cxt, (Constraint *) element); } + /* * transformIndexConstraints wants cxt.alist to contain only index * statements, so transfer anything we already have into save_alist. @@ -1949,8 +1949,8 @@ transformCheckConstraints(CreateStmtContext *cxt, bool skipValidation) /* * If creating a new table, we can safely skip validation of check - * constraints, and nonetheless mark them valid. (This will override - * any user-supplied NOT VALID flag.) + * constraints, and nonetheless mark them valid. (This will override any + * user-supplied NOT VALID flag.) */ if (skipValidation) { diff --git a/src/backend/port/atomics.c b/src/backend/port/atomics.c index 4972c3031f..42169a33cf 100644 --- a/src/backend/port/atomics.c +++ b/src/backend/port/atomics.c @@ -35,8 +35,7 @@ pg_spinlock_barrier(void) * * We use kill(0) for the fallback barrier as we assume that kernels on * systems old enough to require fallback barrier support will include an - * appropriate barrier while checking the existence of the postmaster - * pid. + * appropriate barrier while checking the existence of the postmaster pid. */ (void) kill(PostmasterPid, 0); } diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c index 6bdaac50e0..2c7446b4a4 100644 --- a/src/backend/postmaster/autovacuum.c +++ b/src/backend/postmaster/autovacuum.c @@ -204,7 +204,7 @@ typedef struct autovac_table * wi_links entry into free list or running list * wi_dboid OID of the database this worker is supposed to work on * wi_tableoid OID of the table currently being vacuumed, if any - * wi_sharedrel flag indicating whether table is marked relisshared + * wi_sharedrel flag indicating whether table is marked relisshared * wi_proc pointer to PGPROC of the running worker, NULL if not started * wi_launchtime Time at which this worker was launched * wi_cost_* Vacuum cost-based delay parameters current in this worker @@ -672,9 +672,9 @@ AutoVacLauncherMain(int argc, char *argv[]) /* * There are some conditions that we need to check before trying to - * start a worker. First, we need to make sure that there is a - * worker slot available. Second, we need to make sure that no - * other worker failed while starting up. + * start a worker. First, we need to make sure that there is a worker + * slot available. Second, we need to make sure that no other worker + * failed while starting up. */ current_time = GetCurrentTimestamp(); diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c index d655fbcd83..8fa9edbf72 100644 --- a/src/backend/postmaster/pgstat.c +++ b/src/backend/postmaster/pgstat.c @@ -2727,6 +2727,7 @@ pgstat_bestart(void) beentry->st_activity[pgstat_track_activity_query_size - 1] = '\0'; beentry->st_progress_command = PROGRESS_COMMAND_INVALID; beentry->st_progress_command_target = InvalidOid; + /* * we don't zero st_progress_param here to save cycles; nobody should * examine it until st_progress_command has been set to something other @@ -2909,7 +2910,7 @@ pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val) { volatile PgBackendStatus *beentry = MyBEEntry; - int i; + int i; if (!beentry || !pgstat_track_activities || nparam == 0) return; diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index 6cf51e1b64..6421c8601b 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -1182,23 +1182,22 @@ PostmasterMain(int argc, char *argv[]) RemovePgTempFiles(); /* - * Forcibly remove the files signaling a standby promotion - * request. Otherwise, the existence of those files triggers - * a promotion too early, whether a user wants that or not. + * Forcibly remove the files signaling a standby promotion request. + * Otherwise, the existence of those files triggers a promotion too early, + * whether a user wants that or not. * - * This removal of files is usually unnecessary because they - * can exist only during a few moments during a standby - * promotion. However there is a race condition: if pg_ctl promote - * is executed and creates the files during a promotion, - * the files can stay around even after the server is brought up - * to new master. Then, if new standby starts by using the backup - * taken from that master, the files can exist at the server + * This removal of files is usually unnecessary because they can exist + * only during a few moments during a standby promotion. However there is + * a race condition: if pg_ctl promote is executed and creates the files + * during a promotion, the files can stay around even after the server is + * brought up to new master. Then, if new standby starts by using the + * backup taken from that master, the files can exist at the server * startup and should be removed in order to avoid an unexpected * promotion. * - * Note that promotion signal files need to be removed before - * the startup process is invoked. Because, after that, they can - * be used by postmaster's SIGUSR1 signal handler. + * Note that promotion signal files need to be removed before the startup + * process is invoked. Because, after that, they can be used by + * postmaster's SIGUSR1 signal handler. */ RemovePromoteSignalFiles(); @@ -2053,9 +2052,9 @@ retry1: else if (!parse_bool(valptr, &am_walsender)) ereport(FATAL, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid value for parameter \"%s\": \"%s\"", - "replication", - valptr), + errmsg("invalid value for parameter \"%s\": \"%s\"", + "replication", + valptr), errhint("Valid values are: \"false\", 0, \"true\", 1, \"database\"."))); } else @@ -2607,6 +2606,7 @@ pmdie(SIGNAL_ARGS) if (pmState == PM_RECOVERY) { SignalSomeChildren(SIGTERM, BACKEND_TYPE_BGWORKER); + /* * Only startup, bgwriter, walreceiver, possibly bgworkers, * and/or checkpointer should be active in this state; we just @@ -3074,9 +3074,9 @@ CleanupBackgroundWorker(int pid, /* * It's possible that this background worker started some OTHER - * background worker and asked to be notified when that worker - * started or stopped. If so, cancel any notifications destined - * for the now-dead backend. + * background worker and asked to be notified when that worker started + * or stopped. If so, cancel any notifications destined for the + * now-dead backend. */ if (rw->rw_backend->bgworker_notify) BackgroundWorkerStopNotifications(rw->rw_pid); @@ -5696,9 +5696,8 @@ maybe_start_bgworker(void) rw->rw_crashed_at = 0; /* - * Allocate and assign the Backend element. Note we - * must do this before forking, so that we can handle out of - * memory properly. + * Allocate and assign the Backend element. Note we must do this + * before forking, so that we can handle out of memory properly. */ if (!assign_backendlist_entry(rw)) return; diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c index 100887337c..da9b7a6f0d 100644 --- a/src/backend/replication/basebackup.c +++ b/src/backend/replication/basebackup.c @@ -117,8 +117,8 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) TimeLineID starttli; XLogRecPtr endptr; TimeLineID endtli; - StringInfo labelfile; - StringInfo tblspc_map_file = NULL; + StringInfo labelfile; + StringInfo tblspc_map_file = NULL; int datadirpathlen; List *tablespaces = NIL; diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c index 0c248f07e8..46cd5ba1f2 100644 --- a/src/backend/replication/logical/decode.c +++ b/src/backend/replication/logical/decode.c @@ -330,7 +330,7 @@ DecodeStandbyOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) case XLOG_INVALIDATIONS: { xl_invalidations *invalidations = - (xl_invalidations *) XLogRecGetData(r); + (xl_invalidations *) XLogRecGetData(r); ReorderBufferImmediateInvalidation( ctx->reorder, invalidations->nmsgs, invalidations->msgs); @@ -488,12 +488,12 @@ FilterByOrigin(LogicalDecodingContext *ctx, RepOriginId origin_id) static void DecodeLogicalMsgOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) { - SnapBuild *builder = ctx->snapshot_builder; + SnapBuild *builder = ctx->snapshot_builder; XLogReaderState *r = buf->record; - TransactionId xid = XLogRecGetXid(r); - uint8 info = XLogRecGetInfo(r) & ~XLR_INFO_MASK; - RepOriginId origin_id = XLogRecGetOrigin(r); - Snapshot snapshot; + TransactionId xid = XLogRecGetXid(r); + uint8 info = XLogRecGetInfo(r) & ~XLR_INFO_MASK; + RepOriginId origin_id = XLogRecGetOrigin(r); + Snapshot snapshot; xl_logical_message *message; if (info != XLOG_LOGICAL_MESSAGE) @@ -522,7 +522,8 @@ DecodeLogicalMsgOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) snapshot = SnapBuildGetOrBuildSnapshot(builder, xid); ReorderBufferQueueMessage(ctx->reorder, xid, snapshot, buf->endptr, message->transactional, - message->message, /* first part of message is prefix */ + message->message, /* first part of message is + * prefix */ message->message_size, message->message + message->prefix_size); } @@ -536,8 +537,8 @@ DecodeCommit(LogicalDecodingContext *ctx, XLogRecordBuffer *buf, xl_xact_parsed_commit *parsed, TransactionId xid) { XLogRecPtr origin_lsn = InvalidXLogRecPtr; - TimestampTz commit_time = parsed->xact_time; - RepOriginId origin_id = XLogRecGetOrigin(buf->record); + TimestampTz commit_time = parsed->xact_time; + RepOriginId origin_id = XLogRecGetOrigin(buf->record); int i; if (parsed->xinfo & XACT_XINFO_HAS_ORIGIN) diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c index 5ccfd3105f..7c8a777b33 100644 --- a/src/backend/replication/logical/logical.c +++ b/src/backend/replication/logical/logical.c @@ -63,8 +63,8 @@ static void commit_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, static void change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, Relation relation, ReorderBufferChange *change); static void message_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, - XLogRecPtr message_lsn, bool transactional, - const char *prefix, Size message_size, const char *message); + XLogRecPtr message_lsn, bool transactional, + const char *prefix, Size message_size, const char *message); static void LoadOutputPlugin(OutputPluginCallbacks *callbacks, char *plugin); diff --git a/src/backend/replication/logical/message.c b/src/backend/replication/logical/message.c index efcc25ae95..8f9dc2f47c 100644 --- a/src/backend/replication/logical/message.c +++ b/src/backend/replication/logical/message.c @@ -51,7 +51,7 @@ XLogRecPtr LogLogicalMessage(const char *prefix, const char *message, size_t size, bool transactional) { - xl_logical_message xlrec; + xl_logical_message xlrec; /* * Force xid to be allocated if we're emitting a transactional message. @@ -87,7 +87,7 @@ logicalmsg_redo(XLogReaderState *record) uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; if (info != XLOG_LOGICAL_MESSAGE) - elog(PANIC, "logicalmsg_redo: unknown op code %u", info); + elog(PANIC, "logicalmsg_redo: unknown op code %u", info); /* This is only interesting for logical decoding, see decode.c. */ } diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c index 9aeb2d8597..cc2b513236 100644 --- a/src/backend/replication/logical/origin.c +++ b/src/backend/replication/logical/origin.c @@ -148,7 +148,7 @@ typedef struct ReplicationStateCtl } ReplicationStateCtl; /* external variables */ -RepOriginId replorigin_session_origin = InvalidRepOriginId; /* assumed identity */ +RepOriginId replorigin_session_origin = InvalidRepOriginId; /* assumed identity */ XLogRecPtr replorigin_session_origin_lsn = InvalidXLogRecPtr; TimestampTz replorigin_session_origin_timestamp = 0; diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 52b0d41fa6..00e31a2d74 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -666,8 +666,8 @@ ReorderBufferQueueMessage(ReorderBuffer *rb, TransactionId xid, } else { - ReorderBufferTXN *txn = NULL; - volatile Snapshot snapshot_now = snapshot; + ReorderBufferTXN *txn = NULL; + volatile Snapshot snapshot_now = snapshot; if (xid != InvalidTransactionId) txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true); @@ -1836,10 +1836,10 @@ ReorderBufferImmediateInvalidation(ReorderBuffer *rb, uint32 ninvalidations, BeginInternalSubTransaction("replay"); /* - * Force invalidations to happen outside of a valid transaction - that - * way entries will just be marked as invalid without accessing the - * catalog. That's advantageous because we don't need to setup the - * full state necessary for catalog access. + * Force invalidations to happen outside of a valid transaction - that way + * entries will just be marked as invalid without accessing the catalog. + * That's advantageous because we don't need to setup the full state + * necessary for catalog access. */ if (use_subtxn) AbortCurrentTransaction(); @@ -2543,14 +2543,14 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn, change->data.msg.prefix = MemoryContextAlloc(rb->context, prefix_size); memcpy(change->data.msg.prefix, data, prefix_size); - Assert(change->data.msg.prefix[prefix_size-1] == '\0'); + Assert(change->data.msg.prefix[prefix_size - 1] == '\0'); data += prefix_size; /* read the messsage */ memcpy(&change->data.msg.message_size, data, sizeof(Size)); data += sizeof(Size); change->data.msg.message = MemoryContextAlloc(rb->context, - change->data.msg.message_size); + change->data.msg.message_size); memcpy(change->data.msg.message, data, change->data.msg.message_size); data += change->data.msg.message_size; diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index 644c52ea41..2fb7c17d7d 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -230,11 +230,11 @@ ReplicationSlotCreate(const char *name, bool db_specific, ReplicationSlotValidateName(name, ERROR); /* - * If some other backend ran this code concurrently with us, we'd likely both - * allocate the same slot, and that would be bad. We'd also be at risk of - * missing a name collision. Also, we don't want to try to create a new - * slot while somebody's busy cleaning up an old one, because we might - * both be monkeying with the same directory. + * If some other backend ran this code concurrently with us, we'd likely + * both allocate the same slot, and that would be bad. We'd also be at + * risk of missing a name collision. Also, we don't want to try to create + * a new slot while somebody's busy cleaning up an old one, because we + * might both be monkeying with the same directory. */ LWLockAcquire(ReplicationSlotAllocationLock, LW_EXCLUSIVE); @@ -352,8 +352,8 @@ ReplicationSlotAcquire(const char *name) if (active_pid != 0) ereport(ERROR, (errcode(ERRCODE_OBJECT_IN_USE), - errmsg("replication slot \"%s\" is active for PID %d", - name, active_pid))); + errmsg("replication slot \"%s\" is active for PID %d", + name, active_pid))); /* We made this slot active, so it's ours now. */ MyReplicationSlot = slot; @@ -533,6 +533,7 @@ void ReplicationSlotMarkDirty(void) { ReplicationSlot *slot = MyReplicationSlot; + Assert(MyReplicationSlot != NULL); SpinLockAcquire(&slot->mutex); diff --git a/src/backend/replication/slotfuncs.c b/src/backend/replication/slotfuncs.c index 9cc24eadf2..f9087619d2 100644 --- a/src/backend/replication/slotfuncs.c +++ b/src/backend/replication/slotfuncs.c @@ -40,7 +40,7 @@ Datum pg_create_physical_replication_slot(PG_FUNCTION_ARGS) { Name name = PG_GETARG_NAME(0); - bool immediately_reserve = PG_GETARG_BOOL(1); + bool immediately_reserve = PG_GETARG_BOOL(1); Datum values[2]; bool nulls[2]; TupleDesc tupdesc; diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c index 959ca78a1e..67249d80c8 100644 --- a/src/backend/replication/syncrep.c +++ b/src/backend/replication/syncrep.c @@ -86,9 +86,9 @@ static void SyncRepCancelWait(void); static int SyncRepWakeQueue(bool all, int mode); static bool SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, - XLogRecPtr *flushPtr, - XLogRecPtr *applyPtr, - bool *am_sync); + XLogRecPtr *flushPtr, + XLogRecPtr *applyPtr, + bool *am_sync); static int SyncRepGetStandbyPriority(void); #ifdef USE_ASSERT_CHECKING @@ -212,8 +212,8 @@ SyncRepWaitForLSN(XLogRecPtr lsn, bool commit) /* * If a wait for synchronous replication is pending, we can neither * acknowledge the commit nor raise ERROR or FATAL. The latter would - * lead the client to believe that the transaction aborted, which - * is not true: it's already committed locally. The former is no good + * lead the client to believe that the transaction aborted, which is + * not true: it's already committed locally. The former is no good * either: the client has requested synchronous replication, and is * entitled to assume that an acknowledged commit is also replicated, * which might not be true. So in this case we issue a WARNING (which @@ -400,8 +400,8 @@ SyncRepReleaseWaiters(void) /* * If this WALSender is serving a standby that is not on the list of * potential sync standbys then we have nothing to do. If we are still - * starting up, still running base backup or the current flush position - * is still invalid, then leave quickly also. + * starting up, still running base backup or the current flush position is + * still invalid, then leave quickly also. */ if (MyWalSnd->sync_standby_priority == 0 || MyWalSnd->state < WALSNDSTATE_STREAMING || @@ -412,21 +412,21 @@ SyncRepReleaseWaiters(void) } /* - * We're a potential sync standby. Release waiters if there are - * enough sync standbys and we are considered as sync. + * We're a potential sync standby. Release waiters if there are enough + * sync standbys and we are considered as sync. */ LWLockAcquire(SyncRepLock, LW_EXCLUSIVE); /* - * Check whether we are a sync standby or not, and calculate - * the oldest positions among all sync standbys. + * Check whether we are a sync standby or not, and calculate the oldest + * positions among all sync standbys. */ got_oldest = SyncRepGetOldestSyncRecPtr(&writePtr, &flushPtr, &applyPtr, &am_sync); /* - * If we are managing a sync standby, though we weren't - * prior to this, then announce we are now a sync standby. + * If we are managing a sync standby, though we weren't prior to this, + * then announce we are now a sync standby. */ if (announce_next_takeover && am_sync) { @@ -489,8 +489,8 @@ static bool SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr, XLogRecPtr *applyPtr, bool *am_sync) { - List *sync_standbys; - ListCell *cell; + List *sync_standbys; + ListCell *cell; *writePtr = InvalidXLogRecPtr; *flushPtr = InvalidXLogRecPtr; @@ -513,12 +513,12 @@ SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr, } /* - * Scan through all sync standbys and calculate the oldest - * Write, Flush and Apply positions. + * Scan through all sync standbys and calculate the oldest Write, Flush + * and Apply positions. */ - foreach (cell, sync_standbys) + foreach(cell, sync_standbys) { - WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)]; + WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)]; XLogRecPtr write; XLogRecPtr flush; XLogRecPtr apply; @@ -554,16 +554,16 @@ SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr, List * SyncRepGetSyncStandbys(bool *am_sync) { - List *result = NIL; - List *pending = NIL; - int lowest_priority; - int next_highest_priority; - int this_priority; - int priority; - int i; - bool am_in_pending = false; - volatile WalSnd *walsnd; /* Use volatile pointer to prevent - * code rearrangement */ + List *result = NIL; + List *pending = NIL; + int lowest_priority; + int next_highest_priority; + int this_priority; + int priority; + int i; + bool am_in_pending = false; + volatile WalSnd *walsnd; /* Use volatile pointer to prevent code + * rearrangement */ /* Set default result */ if (am_sync != NULL) @@ -577,9 +577,9 @@ SyncRepGetSyncStandbys(bool *am_sync) next_highest_priority = lowest_priority + 1; /* - * Find the sync standbys which have the highest priority (i.e, 1). - * Also store all the other potential sync standbys into the pending list, - * in order to scan it later and find other sync standbys from it quickly. + * Find the sync standbys which have the highest priority (i.e, 1). Also + * store all the other potential sync standbys into the pending list, in + * order to scan it later and find other sync standbys from it quickly. */ for (i = 0; i < max_wal_senders; i++) { @@ -603,9 +603,9 @@ SyncRepGetSyncStandbys(bool *am_sync) continue; /* - * If the priority is equal to 1, consider this standby as sync - * and append it to the result. Otherwise append this standby - * to the pending list to check if it's actually sync or not later. + * If the priority is equal to 1, consider this standby as sync and + * append it to the result. Otherwise append this standby to the + * pending list to check if it's actually sync or not later. */ if (this_priority == 1) { @@ -615,7 +615,7 @@ SyncRepGetSyncStandbys(bool *am_sync) if (list_length(result) == SyncRepConfig->num_sync) { list_free(pending); - return result; /* Exit if got enough sync standbys */ + return result; /* Exit if got enough sync standbys */ } } else @@ -626,10 +626,10 @@ SyncRepGetSyncStandbys(bool *am_sync) /* * Track the highest priority among the standbys in the pending - * list, in order to use it as the starting priority for later scan - * of the list. This is useful to find quickly the sync standbys - * from the pending list later because we can skip unnecessary - * scans for the unused priorities. + * list, in order to use it as the starting priority for later + * scan of the list. This is useful to find quickly the sync + * standbys from the pending list later because we can skip + * unnecessary scans for the unused priorities. */ if (this_priority < next_highest_priority) next_highest_priority = this_priority; @@ -663,9 +663,9 @@ SyncRepGetSyncStandbys(bool *am_sync) priority = next_highest_priority; while (priority <= lowest_priority) { - ListCell *cell; - ListCell *prev = NULL; - ListCell *next; + ListCell *cell; + ListCell *prev = NULL; + ListCell *next; next_highest_priority = lowest_priority + 1; @@ -685,8 +685,8 @@ SyncRepGetSyncStandbys(bool *am_sync) /* * We should always exit here after the scan of pending list - * starts because we know that the list has enough elements - * to reach SyncRepConfig->num_sync. + * starts because we know that the list has enough elements to + * reach SyncRepConfig->num_sync. */ if (list_length(result) == SyncRepConfig->num_sync) { @@ -695,8 +695,8 @@ SyncRepGetSyncStandbys(bool *am_sync) } /* - * Remove the entry for this sync standby from the list - * to prevent us from looking at the same entry again. + * Remove the entry for this sync standby from the list to + * prevent us from looking at the same entry again. */ pending = list_delete_cell(pending, cell, prev); diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c index 6fd5952be7..ce311cb897 100644 --- a/src/backend/replication/walreceiver.c +++ b/src/backend/replication/walreceiver.c @@ -463,7 +463,7 @@ WalReceiverMain(void) */ Assert(wait_fd != PGINVALID_SOCKET); rc = WaitLatchOrSocket(&walrcv->latch, - WL_POSTMASTER_DEATH | WL_SOCKET_READABLE | + WL_POSTMASTER_DEATH | WL_SOCKET_READABLE | WL_TIMEOUT | WL_LATCH_SET, wait_fd, NAPTIME_PER_CYCLE); @@ -475,8 +475,8 @@ WalReceiverMain(void) /* * The recovery process has asked us to send apply * feedback now. Make sure the flag is really set to - * false in shared memory before sending the reply, - * so we don't miss a new request for a reply. + * false in shared memory before sending the reply, so + * we don't miss a new request for a reply. */ walrcv->force_reply = false; pg_memory_barrier(); @@ -1318,10 +1318,10 @@ pg_stat_get_wal_receiver(PG_FUNCTION_ARGS) TimeLineID receive_start_tli; XLogRecPtr received_lsn; TimeLineID received_tli; - TimestampTz last_send_time; - TimestampTz last_receipt_time; + TimestampTz last_send_time; + TimestampTz last_receipt_time; XLogRecPtr latest_end_lsn; - TimestampTz latest_end_time; + TimestampTz latest_end_time; char *slotname; /* No WAL receiver, just return a tuple with NULL values */ @@ -1379,8 +1379,8 @@ pg_stat_get_wal_receiver(PG_FUNCTION_ARGS) if (!superuser()) { /* - * Only superusers can see details. Other users only get the pid - * value to know whether it is a WAL receiver, but no details. + * Only superusers can see details. Other users only get the pid value + * to know whether it is a WAL receiver, but no details. */ MemSet(&nulls[1], true, PG_STAT_GET_WAL_RECEIVER_COLS - 1); } @@ -1422,5 +1422,5 @@ pg_stat_get_wal_receiver(PG_FUNCTION_ARGS) /* Returns the record as Datum */ PG_RETURN_DATUM(HeapTupleGetDatum( - heap_form_tuple(tupdesc, values, nulls))); + heap_form_tuple(tupdesc, values, nulls))); } diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c index 5cd4d51865..a0dba194a6 100644 --- a/src/backend/replication/walsender.c +++ b/src/backend/replication/walsender.c @@ -464,7 +464,7 @@ SendTimeLineHistory(TimeLineHistoryCmd *cmd) pq_beginmessage(&buf, 'D'); pq_sendint(&buf, 2, 2); /* # of columns */ len = strlen(histfname); - pq_sendint(&buf, len, 4); /* col1 len */ + pq_sendint(&buf, len, 4); /* col1 len */ pq_sendbytes(&buf, histfname, len); fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0666); @@ -657,7 +657,7 @@ StartReplication(StartReplicationCmd *cmd) /* Initialize shared memory status, too */ { - WalSnd *walsnd = MyWalSnd; + WalSnd *walsnd = MyWalSnd; SpinLockAcquire(&walsnd->mutex); walsnd->sentPtr = sentPtr; @@ -728,7 +728,7 @@ StartReplication(StartReplicationCmd *cmd) pq_sendint(&buf, 2, 2); /* number of columns */ len = strlen(tli_str); - pq_sendint(&buf, len, 4); /* length */ + pq_sendint(&buf, len, 4); /* length */ pq_sendbytes(&buf, tli_str, len); len = strlen(startpos_str); @@ -901,7 +901,7 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd) /* slot_name */ len = strlen(NameStr(MyReplicationSlot->data.name)); - pq_sendint(&buf, len, 4); /* col1 len */ + pq_sendint(&buf, len, 4); /* col1 len */ pq_sendbytes(&buf, NameStr(MyReplicationSlot->data.name), len); /* consistent wal location */ @@ -1008,7 +1008,7 @@ StartLogicalReplication(StartReplicationCmd *cmd) /* Also update the sent position status in shared memory */ { - WalSnd *walsnd = MyWalSnd; + WalSnd *walsnd = MyWalSnd; SpinLockAcquire(&walsnd->mutex); walsnd->sentPtr = MyReplicationSlot->data.restart_lsn; @@ -1569,7 +1569,7 @@ ProcessStandbyReplyMessage(void) * standby. */ { - WalSnd *walsnd = MyWalSnd; + WalSnd *walsnd = MyWalSnd; SpinLockAcquire(&walsnd->mutex); walsnd->write = writePtr; @@ -1948,7 +1948,7 @@ InitWalSenderSlot(void) */ for (i = 0; i < max_wal_senders; i++) { - WalSnd *walsnd = &WalSndCtl->walsnds[i]; + WalSnd *walsnd = &WalSndCtl->walsnds[i]; SpinLockAcquire(&walsnd->mutex); @@ -2161,7 +2161,7 @@ retry: */ if (am_cascading_walsender) { - WalSnd *walsnd = MyWalSnd; + WalSnd *walsnd = MyWalSnd; bool reload; SpinLockAcquire(&walsnd->mutex); @@ -2399,7 +2399,7 @@ XLogSendPhysical(void) /* Update shared memory status */ { - WalSnd *walsnd = MyWalSnd; + WalSnd *walsnd = MyWalSnd; SpinLockAcquire(&walsnd->mutex); walsnd->sentPtr = sentPtr; @@ -2461,7 +2461,7 @@ XLogSendLogical(void) /* Update shared memory status */ { - WalSnd *walsnd = MyWalSnd; + WalSnd *walsnd = MyWalSnd; SpinLockAcquire(&walsnd->mutex); walsnd->sentPtr = sentPtr; @@ -2556,7 +2556,7 @@ WalSndRqstFileReload(void) for (i = 0; i < max_wal_senders; i++) { - WalSnd *walsnd = &WalSndCtl->walsnds[i]; + WalSnd *walsnd = &WalSndCtl->walsnds[i]; if (walsnd->pid == 0) continue; @@ -2708,7 +2708,7 @@ WalSndWakeup(void) void WalSndSetState(WalSndState state) { - WalSnd *walsnd = MyWalSnd; + WalSnd *walsnd = MyWalSnd; Assert(am_walsender); @@ -2792,7 +2792,7 @@ pg_stat_get_wal_senders(PG_FUNCTION_ARGS) for (i = 0; i < max_wal_senders; i++) { - WalSnd *walsnd = &WalSndCtl->walsnds[i]; + WalSnd *walsnd = &WalSndCtl->walsnds[i]; XLogRecPtr sentPtr; XLogRecPtr write; XLogRecPtr flush; diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c index 96f3f37667..f82d891c34 100644 --- a/src/backend/rewrite/rewriteDefine.c +++ b/src/backend/rewrite/rewriteDefine.c @@ -414,8 +414,8 @@ DefineQueryRewrite(char *rulename, * any triggers, indexes, child tables, policies, or RLS enabled. * (Note: these tests are too strict, because they will reject * relations that once had such but don't anymore. But we don't - * really care, because this whole business of converting relations - * to views is just a kluge to allow dump/reload of views that + * really care, because this whole business of converting relations to + * views is just a kluge to allow dump/reload of views that * participate in circular dependencies.) */ if (event_relation->rd_rel->relkind != RELKIND_VIEW && diff --git a/src/backend/rewrite/rowsecurity.c b/src/backend/rewrite/rowsecurity.c index 970fa33843..e02911656a 100644 --- a/src/backend/rewrite/rowsecurity.c +++ b/src/backend/rewrite/rowsecurity.c @@ -64,21 +64,21 @@ static void get_policies_for_relation(Relation relation, static List *sort_policies_by_name(List *policies); -static int row_security_policy_cmp(const void *a, const void *b); +static int row_security_policy_cmp(const void *a, const void *b); static void add_security_quals(int rt_index, - List *permissive_policies, - List *restrictive_policies, - List **securityQuals, - bool *hasSubLinks); + List *permissive_policies, + List *restrictive_policies, + List **securityQuals, + bool *hasSubLinks); static void add_with_check_options(Relation rel, - int rt_index, - WCOKind kind, - List *permissive_policies, - List *restrictive_policies, - List **withCheckOptions, - bool *hasSubLinks); + int rt_index, + WCOKind kind, + List *permissive_policies, + List *restrictive_policies, + List **withCheckOptions, + bool *hasSubLinks); static bool check_role_for_policy(ArrayType *policy_roles, Oid user_id); @@ -163,29 +163,31 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index, rel = heap_open(rte->relid, NoLock); commandType = rt_index == root->resultRelation ? - root->commandType : CMD_SELECT; + root->commandType : CMD_SELECT; /* * In some cases, we need to apply USING policies (which control the * visibility of records) associated with multiple command types (see * specific cases below). * - * When considering the order in which to apply these USING policies, - * we prefer to apply higher privileged policies, those which allow the - * user to lock records (UPDATE and DELETE), first, followed by policies - * which don't (SELECT). + * When considering the order in which to apply these USING policies, we + * prefer to apply higher privileged policies, those which allow the user + * to lock records (UPDATE and DELETE), first, followed by policies which + * don't (SELECT). * * Note that the optimizer is free to push down and reorder quals which * use leakproof functions. * * In all cases, if there are no policy clauses allowing access to rows in - * the table for the specific type of operation, then a single always-false - * clause (a default-deny policy) will be added (see add_security_quals). + * the table for the specific type of operation, then a single + * always-false clause (a default-deny policy) will be added (see + * add_security_quals). */ /* * For a SELECT, if UPDATE privileges are required (eg: the user has - * specified FOR [KEY] UPDATE/SHARE), then add the UPDATE USING quals first. + * specified FOR [KEY] UPDATE/SHARE), then add the UPDATE USING quals + * first. * * This way, we filter out any records from the SELECT FOR SHARE/UPDATE * which the user does not have access to via the UPDATE USING policies, @@ -232,8 +234,8 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index, * a WHERE clause which involves columns from the relation), we collect up * CMD_SELECT policies and add them via add_security_quals first. * - * This way, we filter out any records which are not visible through an ALL - * or SELECT USING policy. + * This way, we filter out any records which are not visible through an + * ALL or SELECT USING policy. */ if ((commandType == CMD_UPDATE || commandType == CMD_DELETE) && rte->requiredPerms & ACL_SELECT) @@ -272,9 +274,9 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index, hasSubLinks); /* - * Get and add ALL/SELECT policies, if SELECT rights are required - * for this relation (eg: when RETURNING is used). These are added as - * WCO policies rather than security quals to ensure that an error is + * Get and add ALL/SELECT policies, if SELECT rights are required for + * this relation (eg: when RETURNING is used). These are added as WCO + * policies rather than security quals to ensure that an error is * raised if a policy is violated; otherwise, we might end up silently * dropping rows to be added. */ @@ -288,7 +290,7 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index, &select_restrictive_policies); add_with_check_options(rel, rt_index, commandType == CMD_INSERT ? - WCO_RLS_INSERT_CHECK : WCO_RLS_UPDATE_CHECK, + WCO_RLS_INSERT_CHECK : WCO_RLS_UPDATE_CHECK, select_permissive_policies, select_restrictive_policies, withCheckOptions, @@ -324,11 +326,11 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index, hasSubLinks); /* - * Get and add ALL/SELECT policies, as WCO_RLS_CONFLICT_CHECK - * WCOs to ensure they are considered when taking the UPDATE - * path of an INSERT .. ON CONFLICT DO UPDATE, if SELECT - * rights are required for this relation, also as WCO policies, - * again, to avoid silently dropping data. See above. + * Get and add ALL/SELECT policies, as WCO_RLS_CONFLICT_CHECK WCOs + * to ensure they are considered when taking the UPDATE path of an + * INSERT .. ON CONFLICT DO UPDATE, if SELECT rights are required + * for this relation, also as WCO policies, again, to avoid + * silently dropping data. See above. */ if (rte->requiredPerms & ACL_SELECT) { @@ -336,7 +338,7 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index, List *conflict_select_restrictive_policies = NIL; get_policies_for_relation(rel, CMD_SELECT, user_id, - &conflict_select_permissive_policies, + &conflict_select_permissive_policies, &conflict_select_restrictive_policies); add_with_check_options(rel, rt_index, WCO_RLS_CONFLICT_CHECK, @@ -392,8 +394,8 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id, */ foreach(item, relation->rd_rsdesc->policies) { - bool cmd_matches = false; - RowSecurityPolicy *policy = (RowSecurityPolicy *) lfirst(item); + bool cmd_matches = false; + RowSecurityPolicy *policy = (RowSecurityPolicy *) lfirst(item); /* Always add ALL policies, if they exist. */ if (policy->polcmd == '*') @@ -427,8 +429,8 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id, } /* - * Add this policy to the list of permissive policies if it - * applies to the specified role. + * Add this policy to the list of permissive policies if it applies to + * the specified role. */ if (cmd_matches && check_role_for_policy(policy->roles, user_id)) *permissive_policies = lappend(*permissive_policies, policy); @@ -442,7 +444,7 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id, if (row_security_policy_hook_restrictive) { List *hook_policies = - (*row_security_policy_hook_restrictive) (cmd, relation); + (*row_security_policy_hook_restrictive) (cmd, relation); /* * We sort restrictive policies by name so that any WCOs they generate @@ -462,7 +464,7 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id, if (row_security_policy_hook_permissive) { List *hook_policies = - (*row_security_policy_hook_permissive) (cmd, relation); + (*row_security_policy_hook_permissive) (cmd, relation); foreach(item, hook_policies) { @@ -498,6 +500,7 @@ sort_policies_by_name(List *policies) foreach(item, policies) { RowSecurityPolicy *policy = (RowSecurityPolicy *) lfirst(item); + pols[ii++] = *policy; } @@ -551,8 +554,8 @@ add_security_quals(int rt_index, Expr *rowsec_expr; /* - * First collect up the permissive quals. If we do not find any permissive - * policies then no rows are visible (this is handled below). + * First collect up the permissive quals. If we do not find any + * permissive policies then no rows are visible (this is handled below). */ foreach(item, permissive_policies) { @@ -577,8 +580,8 @@ add_security_quals(int rt_index, /* * We now know that permissive policies exist, so we can now add * security quals based on the USING clauses from the restrictive - * policies. Since these need to be "AND"d together, we can - * just add them one at a time. + * policies. Since these need to be "AND"d together, we can just add + * them one at a time. */ foreach(item, restrictive_policies) { @@ -608,6 +611,7 @@ add_security_quals(int rt_index, *securityQuals = list_append_unique(*securityQuals, rowsec_expr); } else + /* * A permissive policy must exist for rows to be visible at all. * Therefore, if there were no permissive policies found, return a @@ -647,7 +651,7 @@ add_with_check_options(Relation rel, List *permissive_quals = NIL; #define QUAL_FOR_WCO(policy) \ - ( kind != WCO_RLS_CONFLICT_CHECK && \ + ( kind != WCO_RLS_CONFLICT_CHECK && \ (policy)->with_check_qual != NULL ? \ (policy)->with_check_qual : (policy)->qual ) @@ -668,11 +672,11 @@ add_with_check_options(Relation rel, } /* - * There must be at least one permissive qual found or no rows are - * allowed to be added. This is the same as in add_security_quals. + * There must be at least one permissive qual found or no rows are allowed + * to be added. This is the same as in add_security_quals. * - * If there are no permissive_quals then we fall through and return a single - * 'false' WCO, preventing all new rows. + * If there are no permissive_quals then we fall through and return a + * single 'false' WCO, preventing all new rows. */ if (permissive_quals != NIL) { diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c index 5804870ad4..a4163cf717 100644 --- a/src/backend/storage/buffer/buf_init.c +++ b/src/backend/storage/buffer/buf_init.c @@ -187,11 +187,12 @@ BufferShmemSize(void) /* * It would be nice to include the I/O locks in the BufferDesc, but that - * would increase the size of a BufferDesc to more than one cache line, and - * benchmarking has shown that keeping every BufferDesc aligned on a cache - * line boundary is important for performance. So, instead, the array of - * I/O locks is allocated in a separate tranche. Because those locks are - * not highly contentended, we lay out the array with minimal padding. + * would increase the size of a BufferDesc to more than one cache line, + * and benchmarking has shown that keeping every BufferDesc aligned on a + * cache line boundary is important for performance. So, instead, the + * array of I/O locks is allocated in a separate tranche. Because those + * locks are not highly contentended, we lay out the array with minimal + * padding. */ size = add_size(size, mul_size(NBuffers, sizeof(LWLockMinimallyPadded))); /* to allow aligning the above */ diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 8a830d4f21..59a8a85dfc 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -4291,8 +4291,8 @@ void TestForOldSnapshot_impl(Snapshot snapshot, Relation relation) { if (!IsCatalogRelation(relation) - && !RelationIsAccessibleInLogicalDecoding(relation) - && (snapshot)->whenTaken < GetOldSnapshotThresholdTimestamp()) + && !RelationIsAccessibleInLogicalDecoding(relation) + && (snapshot)->whenTaken < GetOldSnapshotThresholdTimestamp()) ereport(ERROR, (errcode(ERRCODE_SNAPSHOT_TOO_OLD), errmsg("snapshot too old"))); diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c index 2ffa8ff24d..bbd90c911a 100644 --- a/src/backend/storage/freespace/freespace.c +++ b/src/backend/storage/freespace/freespace.c @@ -199,13 +199,13 @@ RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail) */ void UpdateFreeSpaceMap(Relation rel, BlockNumber startBlkNum, - BlockNumber endBlkNum, Size freespace) + BlockNumber endBlkNum, Size freespace) { int new_cat = fsm_space_avail_to_cat(freespace); FSMAddress addr; uint16 slot; - BlockNumber blockNum; - BlockNumber lastBlkOnPage; + BlockNumber blockNum; + BlockNumber lastBlkOnPage; blockNum = startBlkNum; @@ -219,9 +219,9 @@ UpdateFreeSpaceMap(Relation rel, BlockNumber startBlkNum, fsm_update_recursive(rel, addr, new_cat); /* - * Get the last block number on this FSM page. If that's greater - * than or equal to our endBlkNum, we're done. Otherwise, advance - * to the first block on the next page. + * Get the last block number on this FSM page. If that's greater than + * or equal to our endBlkNum, we're done. Otherwise, advance to the + * first block on the next page. */ lastBlkOnPage = fsm_get_lastblckno(rel, addr); if (lastBlkOnPage >= endBlkNum) @@ -841,8 +841,8 @@ fsm_get_lastblckno(Relation rel, FSMAddress addr) int slot; /* - * Get the last slot number on the given address and convert that to - * block number + * Get the last slot number on the given address and convert that to block + * number */ slot = SlotsPerFSMPage - 1; return fsm_get_heap_blk(addr, slot); @@ -862,8 +862,8 @@ fsm_update_recursive(Relation rel, FSMAddress addr, uint8 new_cat) return; /* - * Get the parent page and our slot in the parent page, and - * update the information in that. + * Get the parent page and our slot in the parent page, and update the + * information in that. */ parent = fsm_get_parent(addr, &parentslot); fsm_set_and_search(rel, parent, parentslot, new_cat, 0); diff --git a/src/backend/storage/ipc/dsm.c b/src/backend/storage/ipc/dsm.c index cd13a6284c..47f2bea0be 100644 --- a/src/backend/storage/ipc/dsm.c +++ b/src/backend/storage/ipc/dsm.c @@ -245,8 +245,8 @@ dsm_cleanup_using_control_segment(dsm_handle old_control_handle) } /* - * OK, the control segment looks basically valid, so we can use it to - * get a list of segments that need to be removed. + * OK, the control segment looks basically valid, so we can use it to get + * a list of segments that need to be removed. */ nitems = old_control->nitems; for (i = 0; i < nitems; ++i) diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c index e9de51bdfa..e5d487dbb7 100644 --- a/src/backend/storage/ipc/procarray.c +++ b/src/backend/storage/ipc/procarray.c @@ -460,7 +460,7 @@ ProcArrayEndTransactionInternal(PGPROC *proc, PGXACT *pgxact, pgxact->xmin = InvalidTransactionId; /* must be cleared with xid/xmin: */ pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK; - pgxact->delayChkpt = false; /* be sure this is cleared in abort */ + pgxact->delayChkpt = false; /* be sure this is cleared in abort */ proc->recoveryConflictPending = false; /* Clear the subtransaction-XID cache too while holding the lock */ @@ -559,8 +559,8 @@ ProcArrayGroupClearXid(PGPROC *proc, TransactionId latestXid) /* Walk the list and clear all XIDs. */ while (nextidx != INVALID_PGPROCNO) { - PGPROC *proc = &allProcs[nextidx]; - PGXACT *pgxact = &allPgXact[nextidx]; + PGPROC *proc = &allProcs[nextidx]; + PGXACT *pgxact = &allPgXact[nextidx]; ProcArrayEndTransactionInternal(proc, pgxact, proc->procArrayGroupMemberXid); @@ -580,7 +580,7 @@ ProcArrayGroupClearXid(PGPROC *proc, TransactionId latestXid) */ while (wakeidx != INVALID_PGPROCNO) { - PGPROC *proc = &allProcs[wakeidx]; + PGPROC *proc = &allProcs[wakeidx]; wakeidx = pg_atomic_read_u32(&proc->procArrayGroupNext); pg_atomic_write_u32(&proc->procArrayGroupNext, INVALID_PGPROCNO); @@ -642,8 +642,8 @@ ProcArrayInitRecovery(TransactionId initializedUptoXID) Assert(TransactionIdIsNormal(initializedUptoXID)); /* - * we set latestObservedXid to the xid SUBTRANS has been initialized up to, - * so we can extend it from that point onwards in + * we set latestObservedXid to the xid SUBTRANS has been initialized up + * to, so we can extend it from that point onwards in * RecordKnownAssignedTransactionIds, and when we get consistent in * ProcArrayApplyRecoveryInfo(). */ @@ -2591,8 +2591,8 @@ GetConflictingVirtualXIDs(TransactionId limitXmin, Oid dbOid) /* * We ignore an invalid pxmin because this means that backend has * no snapshot currently. We hold a Share lock to avoid contention - * with users taking snapshots. That is not a problem because - * the current xmin is always at least one higher than the latest + * with users taking snapshots. That is not a problem because the + * current xmin is always at least one higher than the latest * removed xid, so any new snapshot would never conflict with the * test here. */ diff --git a/src/backend/storage/ipc/shm_mq.c b/src/backend/storage/ipc/shm_mq.c index 03ca79b5e3..5f6226c9bb 100644 --- a/src/backend/storage/ipc/shm_mq.c +++ b/src/backend/storage/ipc/shm_mq.c @@ -1007,8 +1007,8 @@ shm_mq_receive_bytes(shm_mq *mq, Size bytes_needed, bool nowait, static bool shm_mq_counterparty_gone(volatile shm_mq *mq, BackgroundWorkerHandle *handle) { - bool detached; - pid_t pid; + bool detached; + pid_t pid; /* Acquire the lock just long enough to check the pointer. */ SpinLockAcquire(&mq->mq_mutex); diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c index 762dfa65eb..7a512b3f81 100644 --- a/src/backend/storage/ipc/standby.c +++ b/src/backend/storage/ipc/standby.c @@ -370,6 +370,7 @@ ResolveRecoveryConflictWithLock(LOCKTAG locktag) * We're already behind, so clear a path as quickly as possible. */ VirtualTransactionId *backends; + backends = GetLockConflicts(&locktag, AccessExclusiveLock); ResolveRecoveryConflictWithVirtualXIDs(backends, PROCSIG_RECOVERY_CONFLICT_LOCK); diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index 41f6930645..dba3809e74 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -1153,13 +1153,13 @@ SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, uint32 partition = LockHashPartition(hashcode); /* - * It might seem unsafe to access proclock->groupLeader without a lock, - * but it's not really. Either we are initializing a proclock on our - * own behalf, in which case our group leader isn't changing because - * the group leader for a process can only ever be changed by the - * process itself; or else we are transferring a fast-path lock to the - * main lock table, in which case that process can't change it's lock - * group leader without first releasing all of its locks (and in + * It might seem unsafe to access proclock->groupLeader without a + * lock, but it's not really. Either we are initializing a proclock + * on our own behalf, in which case our group leader isn't changing + * because the group leader for a process can only ever be changed by + * the process itself; or else we are transferring a fast-path lock to + * the main lock table, in which case that process can't change it's + * lock group leader without first releasing all of its locks (and in * particular the one we are currently transferring). */ proclock->groupLeader = proc->lockGroupLeader != NULL ? @@ -1319,10 +1319,9 @@ LockCheckConflicts(LockMethod lockMethodTable, } /* - * Rats. Something conflicts. But it could still be my own lock, or - * a lock held by another member of my locking group. First, figure out - * how many conflicts remain after subtracting out any locks I hold - * myself. + * Rats. Something conflicts. But it could still be my own lock, or a + * lock held by another member of my locking group. First, figure out how + * many conflicts remain after subtracting out any locks I hold myself. */ myLocks = proclock->holdMask; for (i = 1; i <= numLockModes; i++) @@ -1357,9 +1356,10 @@ LockCheckConflicts(LockMethod lockMethodTable, /* * Locks held in conflicting modes by members of our own lock group are * not real conflicts; we can subtract those out and see if we still have - * a conflict. This is O(N) in the number of processes holding or awaiting - * locks on this object. We could improve that by making the shared memory - * state more complex (and larger) but it doesn't seem worth it. + * a conflict. This is O(N) in the number of processes holding or + * awaiting locks on this object. We could improve that by making the + * shared memory state more complex (and larger) but it doesn't seem worth + * it. */ procLocks = &(lock->procLocks); otherproclock = (PROCLOCK *) @@ -1370,7 +1370,7 @@ LockCheckConflicts(LockMethod lockMethodTable, proclock->groupLeader == otherproclock->groupLeader && (otherproclock->holdMask & conflictMask) != 0) { - int intersectMask = otherproclock->holdMask & conflictMask; + int intersectMask = otherproclock->holdMask & conflictMask; for (i = 1; i <= numLockModes; i++) { @@ -2583,8 +2583,8 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag * * proc->databaseId is set at backend startup time and never changes * thereafter, so it might be safe to perform this test before - * acquiring &proc->backendLock. In particular, it's certainly safe to - * assume that if the target backend holds any fast-path locks, it + * acquiring &proc->backendLock. In particular, it's certainly safe + * to assume that if the target backend holds any fast-path locks, it * must have performed a memory-fencing operation (in particular, an * LWLock acquisition) since setting proc->databaseId. However, it's * less clear that our backend is certain to have performed a memory diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index 25eec9800d..7ffa87d914 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -208,25 +208,25 @@ PRINT_LWDEBUG(const char *where, LWLock *lock, LWLockMode mode) (errhidestmt(true), errhidecontext(true), errmsg_internal("%d: %s(%s): excl %u shared %u haswaiters %u waiters %u rOK %d", - MyProcPid, - where, MainLWLockNames[id], - (state & LW_VAL_EXCLUSIVE) != 0, - state & LW_SHARED_MASK, - (state & LW_FLAG_HAS_WAITERS) != 0, - pg_atomic_read_u32(&lock->nwaiters), - (state & LW_FLAG_RELEASE_OK) != 0))); + MyProcPid, + where, MainLWLockNames[id], + (state & LW_VAL_EXCLUSIVE) != 0, + state & LW_SHARED_MASK, + (state & LW_FLAG_HAS_WAITERS) != 0, + pg_atomic_read_u32(&lock->nwaiters), + (state & LW_FLAG_RELEASE_OK) != 0))); else ereport(LOG, (errhidestmt(true), errhidecontext(true), errmsg_internal("%d: %s(%s %d): excl %u shared %u haswaiters %u waiters %u rOK %d", - MyProcPid, - where, T_NAME(lock), id, - (state & LW_VAL_EXCLUSIVE) != 0, - state & LW_SHARED_MASK, - (state & LW_FLAG_HAS_WAITERS) != 0, - pg_atomic_read_u32(&lock->nwaiters), - (state & LW_FLAG_RELEASE_OK) != 0))); + MyProcPid, + where, T_NAME(lock), id, + (state & LW_VAL_EXCLUSIVE) != 0, + state & LW_SHARED_MASK, + (state & LW_FLAG_HAS_WAITERS) != 0, + pg_atomic_read_u32(&lock->nwaiters), + (state & LW_FLAG_RELEASE_OK) != 0))); } } @@ -243,13 +243,13 @@ LOG_LWDEBUG(const char *where, LWLock *lock, const char *msg) (errhidestmt(true), errhidecontext(true), errmsg_internal("%s(%s): %s", where, - MainLWLockNames[id], msg))); + MainLWLockNames[id], msg))); else ereport(LOG, (errhidestmt(true), errhidecontext(true), errmsg_internal("%s(%s %d): %s", where, - T_NAME(lock), id, msg))); + T_NAME(lock), id, msg))); } } @@ -760,8 +760,8 @@ GetLWLockIdentifier(uint8 classId, uint16 eventId) /* * It is quite possible that user has registered tranche in one of the - * backends (e.g. by allocating lwlocks in dynamic shared memory) but - * not all of them, so we can't assume the tranche is registered here. + * backends (e.g. by allocating lwlocks in dynamic shared memory) but not + * all of them, so we can't assume the tranche is registered here. */ if (eventId >= LWLockTranchesAllocated || LWLockTrancheArray[eventId]->name == NULL) diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index a66e07b766..9a758bd916 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -288,7 +288,7 @@ InitProcGlobal(void) void InitProcess(void) { - PGPROC * volatile * procgloballist; + PGPROC *volatile * procgloballist; /* * ProcGlobal should be set up already (if we are a backend, we inherit @@ -342,8 +342,8 @@ InitProcess(void) MyPgXact = &ProcGlobal->allPgXact[MyProc->pgprocno]; /* - * Cross-check that the PGPROC is of the type we expect; if this were - * not the case, it would get returned to the wrong list. + * Cross-check that the PGPROC is of the type we expect; if this were not + * the case, it would get returned to the wrong list. */ Assert(MyProc->procgloballist == procgloballist); @@ -781,7 +781,7 @@ static void ProcKill(int code, Datum arg) { PGPROC *proc; - PGPROC * volatile * procgloballist; + PGPROC *volatile * procgloballist; Assert(MyProc != NULL); diff --git a/src/backend/tsearch/spell.c b/src/backend/tsearch/spell.c index 8b46ea5bf9..c43c206c03 100644 --- a/src/backend/tsearch/spell.c +++ b/src/backend/tsearch/spell.c @@ -25,23 +25,23 @@ * * A compiled dictionary is stored in the IspellDict structure. Compilation of * a dictionary is divided into the several steps: - * - NIImportDictionary() - stores each word of a .dict file in the - * temporary Spell field. - * - NIImportAffixes() - stores affix rules of an .affix file in the - * Affix field (not temporary) if an .affix file has the Ispell format. - * -> NIImportOOAffixes() - stores affix rules if an .affix file has the - * Hunspell format. The AffixData field is initialized if AF parameter - * is defined. - * - NISortDictionary() - builds a prefix tree (Trie) from the words list - * and stores it in the Dictionary field. The words list is got from the - * Spell field. The AffixData field is initialized if AF parameter is not - * defined. - * - NISortAffixes(): - * - builds a list of compond affixes from the affix list and stores it - * in the CompoundAffix. - * - builds prefix trees (Trie) from the affix list for prefixes and suffixes - * and stores them in Suffix and Prefix fields. - * The affix list is got from the Affix field. + * - NIImportDictionary() - stores each word of a .dict file in the + * temporary Spell field. + * - NIImportAffixes() - stores affix rules of an .affix file in the + * Affix field (not temporary) if an .affix file has the Ispell format. + * -> NIImportOOAffixes() - stores affix rules if an .affix file has the + * Hunspell format. The AffixData field is initialized if AF parameter + * is defined. + * - NISortDictionary() - builds a prefix tree (Trie) from the words list + * and stores it in the Dictionary field. The words list is got from the + * Spell field. The AffixData field is initialized if AF parameter is not + * defined. + * - NISortAffixes(): + * - builds a list of compond affixes from the affix list and stores it + * in the CompoundAffix. + * - builds prefix trees (Trie) from the affix list for prefixes and suffixes + * and stores them in Suffix and Prefix fields. + * The affix list is got from the Affix field. * * Memory management * ----------------- @@ -204,14 +204,14 @@ static int cmpspellaffix(const void *s1, const void *s2) { return (strcmp((*(SPELL *const *) s1)->p.flag, - (*(SPELL *const *) s2)->p.flag)); + (*(SPELL *const *) s2)->p.flag)); } static int cmpcmdflag(const void *f1, const void *f2) { - CompoundAffixFlag *fv1 = (CompoundAffixFlag *) f1, - *fv2 = (CompoundAffixFlag *) f2; + CompoundAffixFlag *fv1 = (CompoundAffixFlag *) f1, + *fv2 = (CompoundAffixFlag *) f2; Assert(fv1->flagMode == fv2->flagMode); @@ -335,15 +335,15 @@ cmpaffix(const void *s1, const void *s2) * * Depending on the flagMode an affix string can have the following format: * - FM_CHAR: ABCD - * Here we have 4 flags: A, B, C and D + * Here we have 4 flags: A, B, C and D * - FM_LONG: ABCDE* - * Here we have 3 flags: AB, CD and E* + * Here we have 3 flags: AB, CD and E* * - FM_NUM: 200,205,50 - * Here we have 3 flags: 200, 205 and 50 + * Here we have 3 flags: 200, 205 and 50 * * Conf: current dictionary. * sflagset: the set of affix flags. Returns a reference to the start of a next - * affix flag. + * affix flag. * sflag: returns an affix flag from sflagset. */ static void @@ -358,7 +358,7 @@ getNextFlagFromString(IspellDict *Conf, char **sflagset, char *sflag) maxstep = (Conf->flagMode == FM_LONG) ? 2 : 1; - while(**sflagset) + while (**sflagset) { switch (Conf->flagMode) { @@ -413,8 +413,8 @@ getNextFlagFromString(IspellDict *Conf, char **sflagset, char *sflag) { ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("invalid character in affix flag \"%s\"", - *sflagset))); + errmsg("invalid character in affix flag \"%s\"", + *sflagset))); } *sflagset += pg_mblen(*sflagset); @@ -432,8 +432,8 @@ getNextFlagFromString(IspellDict *Conf, char **sflagset, char *sflag) if (Conf->flagMode == FM_LONG && maxstep > 0) ereport(ERROR, - (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("invalid affix flag \"%s\" with long flag value", sbuf))); + (errcode(ERRCODE_CONFIG_FILE_ERROR), + errmsg("invalid affix flag \"%s\" with long flag value", sbuf))); *sflag = '\0'; } @@ -527,6 +527,7 @@ NIImportDictionary(IspellDict *Conf, const char *filename) { char *s, *pstr; + /* Set of affix flags */ const char *flag; @@ -581,11 +582,11 @@ NIImportDictionary(IspellDict *Conf, const char *filename) * meter/GMD * * The affix rule with the flag S: - * SFX S y ies [^aeiou]y + * SFX S y ies [^aeiou]y * is not presented here. * * The affix rule with the flag M: - * SFX M 0 's . + * SFX M 0 's . * is presented here. * * Conf: current dictionary. @@ -620,9 +621,9 @@ FindWord(IspellDict *Conf, const char *word, char *affixflag, int flag) if (flag == 0) { /* - * The word can be formed only with another word. - * And in the flag parameter there is not a sign - * that we search compound words. + * The word can be formed only with another word. And + * in the flag parameter there is not a sign that we + * search compound words. */ if (StopMiddle->compoundflag & FF_COMPOUNDONLY) return 0; @@ -658,21 +659,21 @@ FindWord(IspellDict *Conf, const char *word, char *affixflag, int flag) * Conf: current dictionary. * flag: affix flag ('\' in the below example). * flagflags: set of flags from the flagval field for this affix rule. This set - * is listed after '/' character in the added string (repl). + * is listed after '/' character in the added string (repl). * - * For example L flag in the hunspell_sample.affix: - * SFX \ 0 Y/L [^Y] + * For example L flag in the hunspell_sample.affix: + * SFX \ 0 Y/L [^Y] * * mask: condition for search ('[^Y]' in the above example). * find: stripping characters from beginning (at prefix) or end (at suffix) - * of the word ('0' in the above example, 0 means that there is not - * stripping character). + * of the word ('0' in the above example, 0 means that there is not + * stripping character). * repl: adding string after stripping ('Y' in the above example). * type: FF_SUFFIX or FF_PREFIX. */ static void -NIAddAffix(IspellDict *Conf, const char* flag, char flagflags, const char *mask, - const char *find, const char *repl, int type) +NIAddAffix(IspellDict *Conf, const char *flag, char flagflags, const char *mask, + const char *find, const char *repl, int type) { AFFIX *Affix; @@ -1024,8 +1025,8 @@ setCompoundAffixFlagValue(IspellDict *Conf, CompoundAffixFlag *entry, { if (Conf->flagMode == FM_NUM) { - char *next; - int i; + char *next; + int i; i = strtol(s, &next, 10); if (s == next || errno == ERANGE) @@ -1056,10 +1057,10 @@ setCompoundAffixFlagValue(IspellDict *Conf, CompoundAffixFlag *entry, static void addCompoundAffixFlagValue(IspellDict *Conf, char *s, uint32 val) { - CompoundAffixFlag *newValue; - char sbuf[BUFSIZ]; - char *sflag; - int clen; + CompoundAffixFlag *newValue; + char sbuf[BUFSIZ]; + char *sflag; + int clen; while (*s && t_isspace(s)) s += pg_mblen(s); @@ -1088,7 +1089,7 @@ addCompoundAffixFlagValue(IspellDict *Conf, char *s, uint32 val) Conf->mCompoundAffixFlag *= 2; Conf->CompoundAffixFlags = (CompoundAffixFlag *) repalloc((void *) Conf->CompoundAffixFlags, - Conf->mCompoundAffixFlag * sizeof(CompoundAffixFlag)); + Conf->mCompoundAffixFlag * sizeof(CompoundAffixFlag)); } else { @@ -1113,11 +1114,11 @@ addCompoundAffixFlagValue(IspellDict *Conf, char *s, uint32 val) static int getCompoundAffixFlagValue(IspellDict *Conf, char *s) { - uint32 flag = 0; + uint32 flag = 0; CompoundAffixFlag *found, - key; - char sflag[BUFSIZ]; - char *flagcur; + key; + char sflag[BUFSIZ]; + char *flagcur; if (Conf->nCompoundAffixFlag == 0) return 0; @@ -1151,8 +1152,8 @@ getAffixFlagSet(IspellDict *Conf, char *s) { if (Conf->useFlagAliases && *s != '\0') { - int curaffix; - char *end; + int curaffix; + char *end; curaffix = strtol(s, &end, 10); if (s == end || errno == ERANGE) @@ -1161,9 +1162,10 @@ getAffixFlagSet(IspellDict *Conf, char *s) errmsg("invalid affix alias \"%s\"", s))); if (curaffix > 0 && curaffix <= Conf->nAffixData) + /* - * Do not subtract 1 from curaffix - * because empty string was added in NIImportOOAffixes + * Do not subtract 1 from curaffix because empty string was added + * in NIImportOOAffixes */ return Conf->AffixData[curaffix]; else @@ -1260,9 +1262,9 @@ NIImportOOAffixes(IspellDict *Conf, const char *filename) Conf->flagMode = FM_NUM; else if (STRNCMP(s, "default") != 0) ereport(ERROR, - (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("Ispell dictionary supports only default, " - "long and num flag value"))); + (errcode(ERRCODE_CONFIG_FILE_ERROR), + errmsg("Ispell dictionary supports only default, " + "long and num flag value"))); } } @@ -1303,8 +1305,8 @@ NIImportOOAffixes(IspellDict *Conf, const char *filename) naffix = atoi(sflag); if (naffix == 0) ereport(ERROR, - (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("invalid number of flag vector aliases"))); + (errcode(ERRCODE_CONFIG_FILE_ERROR), + errmsg("invalid number of flag vector aliases"))); /* Also reserve place for empty flag set */ naffix++; @@ -1596,7 +1598,8 @@ MergeAffix(IspellDict *Conf, int a1, int a2) static uint32 makeCompoundFlags(IspellDict *Conf, int affix) { - char *str = Conf->AffixData[affix]; + char *str = Conf->AffixData[affix]; + return (getCompoundAffixFlagValue(Conf, str) & FF_COMPOUNDFLAGMASK); } @@ -1700,14 +1703,14 @@ NISortDictionary(IspellDict *Conf) /* compress affixes */ /* - * If we use flag aliases then we need to use Conf->AffixData filled - * in the NIImportOOAffixes(). + * If we use flag aliases then we need to use Conf->AffixData filled in + * the NIImportOOAffixes(). */ if (Conf->useFlagAliases) { for (i = 0; i < Conf->nspell; i++) { - char *end; + char *end; if (*Conf->Spell[i]->p.flag != '\0') { @@ -1762,7 +1765,7 @@ NISortDictionary(IspellDict *Conf) curaffix++; Assert(curaffix < naffix); Conf->AffixData[curaffix] = cpstrdup(Conf, - Conf->Spell[i]->p.flag); + Conf->Spell[i]->p.flag); } Conf->Spell[i]->p.d.affix = curaffix; @@ -2219,8 +2222,8 @@ NormalizeSubWord(IspellDict *Conf, char *word, int flag) if (CheckAffix(newword, swrdlen, prefix->aff[j], flag, pnewword, &baselen)) { /* prefix success */ - char *ff = (prefix->aff[j]->flagflags & suffix->aff[i]->flagflags & FF_CROSSPRODUCT) ? - VoidString : prefix->aff[j]->flag; + char *ff = (prefix->aff[j]->flagflags & suffix->aff[i]->flagflags & FF_CROSSPRODUCT) ? + VoidString : prefix->aff[j]->flag; if (FindWord(Conf, pnewword, ff, flag)) cur += addToResult(forms, cur, pnewword); diff --git a/src/backend/tsearch/to_tsany.c b/src/backend/tsearch/to_tsany.c index d41f82c479..80d80f2451 100644 --- a/src/backend/tsearch/to_tsany.c +++ b/src/backend/tsearch/to_tsany.c @@ -20,8 +20,8 @@ typedef struct MorphOpaque { - Oid cfg_id; - int qoperator; /* query operator */ + Oid cfg_id; + int qoperator; /* query operator */ } MorphOpaque; @@ -274,14 +274,14 @@ to_tsvector(PG_FUNCTION_ARGS) static void pushval_morph(Datum opaque, TSQueryParserState state, char *strval, int lenval, int16 weight, bool prefix) { - int32 count = 0; - ParsedText prs; - uint32 variant, - pos = 0, - cntvar = 0, - cntpos = 0, - cnt = 0; - MorphOpaque *data = (MorphOpaque *) DatumGetPointer(opaque); + int32 count = 0; + ParsedText prs; + uint32 variant, + pos = 0, + cntvar = 0, + cntpos = 0, + cnt = 0; + MorphOpaque *data = (MorphOpaque *) DatumGetPointer(opaque); prs.lenwords = 4; prs.curwords = 0; @@ -295,8 +295,8 @@ pushval_morph(Datum opaque, TSQueryParserState state, char *strval, int lenval, while (count < prs.curwords) { /* - * Were any stop words removed? If so, fill empty positions - * with placeholders linked by an appropriate operator. + * Were any stop words removed? If so, fill empty positions with + * placeholders linked by an appropriate operator. */ if (pos > 0 && pos + 1 < prs.words[count].pos.pos) { @@ -330,7 +330,7 @@ pushval_morph(Datum opaque, TSQueryParserState state, char *strval, int lenval, prs.words[count].word, prs.words[count].len, weight, - ((prs.words[count].flags & TSL_PREFIX) || prefix)); + ((prs.words[count].flags & TSL_PREFIX) || prefix)); pfree(prs.words[count].word); if (cnt) pushOperator(state, OP_AND, 0); @@ -362,9 +362,9 @@ pushval_morph(Datum opaque, TSQueryParserState state, char *strval, int lenval, Datum to_tsquery_byid(PG_FUNCTION_ARGS) { - text *in = PG_GETARG_TEXT_P(1); - TSQuery query; - MorphOpaque data; + text *in = PG_GETARG_TEXT_P(1); + TSQuery query; + MorphOpaque data; data.cfg_id = PG_GETARG_OID(0); data.qoperator = OP_AND; @@ -392,9 +392,9 @@ to_tsquery(PG_FUNCTION_ARGS) Datum plainto_tsquery_byid(PG_FUNCTION_ARGS) { - text *in = PG_GETARG_TEXT_P(1); - TSQuery query; - MorphOpaque data; + text *in = PG_GETARG_TEXT_P(1); + TSQuery query; + MorphOpaque data; data.cfg_id = PG_GETARG_OID(0); data.qoperator = OP_AND; @@ -423,9 +423,9 @@ plainto_tsquery(PG_FUNCTION_ARGS) Datum phraseto_tsquery_byid(PG_FUNCTION_ARGS) { - text *in = PG_GETARG_TEXT_P(1); - TSQuery query; - MorphOpaque data; + text *in = PG_GETARG_TEXT_P(1); + TSQuery query; + MorphOpaque data; data.cfg_id = PG_GETARG_OID(0); data.qoperator = OP_PHRASE; diff --git a/src/backend/tsearch/wparser_def.c b/src/backend/tsearch/wparser_def.c index ca352af331..ac1c4d2316 100644 --- a/src/backend/tsearch/wparser_def.c +++ b/src/backend/tsearch/wparser_def.c @@ -2033,7 +2033,7 @@ static bool checkcondition_HL(void *opaque, QueryOperand *val, ExecPhraseData *data) { int i; - hlCheck *checkval = (hlCheck *) opaque; + hlCheck *checkval = (hlCheck *) opaque; for (i = 0; i < checkval->len; i++) { diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c index d2b23d05cb..fecf605541 100644 --- a/src/backend/utils/adt/acl.c +++ b/src/backend/utils/adt/acl.c @@ -5277,12 +5277,12 @@ check_rolespec_name(const Node *node, const char *detail_msg) ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("role \"%s\" is reserved", - role->rolename), + role->rolename), errdetail("%s", detail_msg))); else ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("role \"%s\" is reserved", - role->rolename))); + role->rolename))); } } diff --git a/src/backend/utils/adt/datum.c b/src/backend/utils/adt/datum.c index 803ba4752e..c6c296b960 100644 --- a/src/backend/utils/adt/datum.c +++ b/src/backend/utils/adt/datum.c @@ -257,7 +257,7 @@ datumIsEqual(Datum value1, Datum value2, bool typByVal, int typLen) Size datumEstimateSpace(Datum value, bool isnull, bool typByVal, int typLen) { - Size sz = sizeof(int); + Size sz = sizeof(int); if (!isnull) { @@ -267,6 +267,7 @@ datumEstimateSpace(Datum value, bool isnull, bool typByVal, int typLen) else if (VARATT_IS_EXTERNAL_EXPANDED(value)) { ExpandedObjectHeader *eoh = DatumGetEOHP(value); + sz += EOH_get_flat_size(eoh); } else @@ -298,7 +299,7 @@ datumSerialize(Datum value, bool isnull, bool typByVal, int typLen, char **start_address) { ExpandedObjectHeader *eoh = NULL; - int header; + int header; /* Write header word. */ if (isnull) @@ -346,8 +347,8 @@ datumSerialize(Datum value, bool isnull, bool typByVal, int typLen, Datum datumRestore(char **start_address, bool *isnull) { - int header; - void *d; + int header; + void *d; /* Read header word. */ memcpy(&header, *start_address, sizeof(int)); diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c index d622e98f30..ae93fe0167 100644 --- a/src/backend/utils/adt/formatting.c +++ b/src/backend/utils/adt/formatting.c @@ -5074,9 +5074,9 @@ numeric_to_number(PG_FUNCTION_ARGS) { Numeric x; Numeric a = DatumGetNumeric(DirectFunctionCall1(int4_numeric, - Int32GetDatum(10))); + Int32GetDatum(10))); Numeric b = DatumGetNumeric(DirectFunctionCall1(int4_numeric, - Int32GetDatum(-Num.multi))); + Int32GetDatum(-Num.multi))); x = DatumGetNumeric(DirectFunctionCall2(numeric_power, NumericGetDatum(a), diff --git a/src/backend/utils/adt/geo_spgist.c b/src/backend/utils/adt/geo_spgist.c index e3945f2081..019015656f 100644 --- a/src/backend/utils/adt/geo_spgist.c +++ b/src/backend/utils/adt/geo_spgist.c @@ -101,19 +101,19 @@ typedef struct { double low; double high; -} Range; +} Range; typedef struct { Range left; Range right; -} RangeBox; +} RangeBox; typedef struct { RangeBox range_box_x; RangeBox range_box_y; -} RectBox; +} RectBox; /* * Calculate the quadrant @@ -173,7 +173,7 @@ getRangeBox(BOX *box) static RectBox * initRectBox(void) { - RectBox *rect_box = (RectBox *) palloc(sizeof(RectBox)); + RectBox *rect_box = (RectBox *) palloc(sizeof(RectBox)); double infinity = get_float8_infinity(); rect_box->range_box_x.left.low = -infinity; @@ -201,7 +201,7 @@ initRectBox(void) static RectBox * nextRectBox(RectBox *rect_box, RangeBox *centroid, uint8 quadrant) { - RectBox *next_rect_box = (RectBox *) palloc(sizeof(RectBox)); + RectBox *next_rect_box = (RectBox *) palloc(sizeof(RectBox)); memcpy(next_rect_box, rect_box, sizeof(RectBox)); @@ -233,7 +233,7 @@ static bool overlap2D(RangeBox *range_box, Range *query) { return FPge(range_box->right.high, query->low) && - FPle(range_box->left.low, query->high); + FPle(range_box->left.low, query->high); } /* Can any rectangle from rect_box overlap with this argument? */ @@ -241,7 +241,7 @@ static bool overlap4D(RectBox *rect_box, RangeBox *query) { return overlap2D(&rect_box->range_box_x, &query->left) && - overlap2D(&rect_box->range_box_y, &query->right); + overlap2D(&rect_box->range_box_y, &query->right); } /* Can any range from range_box contain this argument? */ @@ -249,15 +249,15 @@ static bool contain2D(RangeBox *range_box, Range *query) { return FPge(range_box->right.high, query->high) && - FPle(range_box->left.low, query->low); + FPle(range_box->left.low, query->low); } /* Can any rectangle from rect_box contain this argument? */ static bool -contain4D(RectBox *rect_box, RangeBox * query) +contain4D(RectBox *rect_box, RangeBox *query) { return contain2D(&rect_box->range_box_x, &query->left) && - contain2D(&rect_box->range_box_y, &query->right); + contain2D(&rect_box->range_box_y, &query->right); } /* Can any range from range_box be contained by this argument? */ @@ -265,9 +265,9 @@ static bool contained2D(RangeBox *range_box, Range *query) { return FPle(range_box->left.low, query->high) && - FPge(range_box->left.high, query->low) && - FPle(range_box->right.low, query->high) && - FPge(range_box->right.high, query->low); + FPge(range_box->left.high, query->low) && + FPle(range_box->right.low, query->high) && + FPge(range_box->right.high, query->low); } /* Can any rectangle from rect_box be contained by this argument? */ @@ -275,7 +275,7 @@ static bool contained4D(RectBox *rect_box, RangeBox *query) { return contained2D(&rect_box->range_box_x, &query->left) && - contained2D(&rect_box->range_box_y, &query->right); + contained2D(&rect_box->range_box_y, &query->right); } /* Can any range from range_box to be lower than this argument? */ @@ -283,7 +283,7 @@ static bool lower2D(RangeBox *range_box, Range *query) { return FPlt(range_box->left.low, query->low) && - FPlt(range_box->right.low, query->low); + FPlt(range_box->right.low, query->low); } /* Can any range from range_box to be higher than this argument? */ @@ -291,7 +291,7 @@ static bool higher2D(RangeBox *range_box, Range *query) { return FPgt(range_box->left.high, query->high) && - FPgt(range_box->right.high, query->high); + FPgt(range_box->right.high, query->high); } /* Can any rectangle from rect_box be left of this argument? */ @@ -396,8 +396,8 @@ spg_box_quad_choose(PG_FUNCTION_ARGS) Datum spg_box_quad_picksplit(PG_FUNCTION_ARGS) { - spgPickSplitIn *in = (spgPickSplitIn *) PG_GETARG_POINTER(0); - spgPickSplitOut *out = (spgPickSplitOut *) PG_GETARG_POINTER(1); + spgPickSplitIn *in = (spgPickSplitIn *) PG_GETARG_POINTER(0); + spgPickSplitOut *out = (spgPickSplitOut *) PG_GETARG_POINTER(1); BOX *centroid; int median, i; @@ -409,7 +409,7 @@ spg_box_quad_picksplit(PG_FUNCTION_ARGS) /* Calculate median of all 4D coordinates */ for (i = 0; i < in->nTuples; i++) { - BOX *box = DatumGetBoxP(in->datums[i]); + BOX *box = DatumGetBoxP(in->datums[i]); lowXs[i] = box->low.x; highXs[i] = box->high.x; @@ -442,13 +442,13 @@ spg_box_quad_picksplit(PG_FUNCTION_ARGS) out->leafTupleDatums = palloc(sizeof(Datum) * in->nTuples); /* - * Assign ranges to corresponding nodes according to quadrants - * relative to the "centroid" range + * Assign ranges to corresponding nodes according to quadrants relative to + * the "centroid" range */ for (i = 0; i < in->nTuples; i++) { - BOX *box = DatumGetBoxP(in->datums[i]); - uint8 quadrant = getQuadrant(centroid, box); + BOX *box = DatumGetBoxP(in->datums[i]); + uint8 quadrant = getQuadrant(centroid, box); out->leafTupleDatums[i] = BoxPGetDatum(box); out->mapTuplesToNodes[i] = quadrant; @@ -465,12 +465,12 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS) { spgInnerConsistentIn *in = (spgInnerConsistentIn *) PG_GETARG_POINTER(0); spgInnerConsistentOut *out = (spgInnerConsistentOut *) PG_GETARG_POINTER(1); - int i; - MemoryContext old_ctx; - RectBox *rect_box; - uint8 quadrant; - RangeBox *centroid, - **queries; + int i; + MemoryContext old_ctx; + RectBox *rect_box; + uint8 quadrant; + RangeBox *centroid, + **queries; if (in->allTheSame) { @@ -484,8 +484,8 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS) } /* - * We are saving the traversal value or initialize it an unbounded - * one, if we have just begun to walk the tree. + * We are saving the traversal value or initialize it an unbounded one, if + * we have just begun to walk the tree. */ if (in->traversalValue) rect_box = in->traversalValue; @@ -493,8 +493,8 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS) rect_box = initRectBox(); /* - * We are casting the prefix and queries to RangeBoxes for ease of - * the following operations. + * We are casting the prefix and queries to RangeBoxes for ease of the + * following operations. */ centroid = getRangeBox(DatumGetBoxP(in->prefixDatum)); queries = (RangeBox **) palloc(in->nkeys * sizeof(RangeBox *)); @@ -507,15 +507,15 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS) out->traversalValues = (void **) palloc(sizeof(void *) * in->nNodes); /* - * We switch memory context, because we want to allocate memory for - * new traversal values (next_rect_box) and pass these pieces of - * memory to further call of this function. + * We switch memory context, because we want to allocate memory for new + * traversal values (next_rect_box) and pass these pieces of memory to + * further call of this function. */ old_ctx = MemoryContextSwitchTo(in->traversalMemoryContext); for (quadrant = 0; quadrant < in->nNodes; quadrant++) { - RectBox *next_rect_box = nextRectBox(rect_box, centroid, quadrant); + RectBox *next_rect_box = nextRectBox(rect_box, centroid, quadrant); bool flag = true; for (i = 0; i < in->nkeys; i++) @@ -587,8 +587,8 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS) else { /* - * If this node is not selected, we don't need to keep - * the next traversal value in the memory context. + * If this node is not selected, we don't need to keep the next + * traversal value in the memory context. */ pfree(next_rect_box); } diff --git a/src/backend/utils/adt/jsonb_util.c b/src/backend/utils/adt/jsonb_util.c index e1ceea6da4..ddc34ceec7 100644 --- a/src/backend/utils/adt/jsonb_util.c +++ b/src/backend/utils/adt/jsonb_util.c @@ -1305,7 +1305,7 @@ compareJsonbScalarValue(JsonbValue *aScalar, JsonbValue *bScalar) case jbvBool: if (aScalar->val.boolean == bScalar->val.boolean) return 0; - else if (aScalar->val.boolean >bScalar->val.boolean) + else if (aScalar->val.boolean > bScalar->val.boolean) return 1; else return -1; diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index fb149dcd47..cb14993542 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -609,7 +609,7 @@ jsonb_array_element(PG_FUNCTION_ARGS) /* Handle negative subscript */ if (element < 0) { - uint32 nelements = JB_ROOT_COUNT(jb); + uint32 nelements = JB_ROOT_COUNT(jb); if (-element > nelements) PG_RETURN_NULL(); @@ -652,7 +652,7 @@ jsonb_array_element_text(PG_FUNCTION_ARGS) /* Handle negative subscript */ if (element < 0) { - uint32 nelements = JB_ROOT_COUNT(jb); + uint32 nelements = JB_ROOT_COUNT(jb); if (-element > nelements) PG_RETURN_NULL(); @@ -992,7 +992,7 @@ get_array_start(void *state) _state->path_indexes[lex_level] != INT_MIN) { /* Negative subscript -- convert to positive-wise subscript */ - int nelements = json_count_array_elements(_state->lex); + int nelements = json_count_array_elements(_state->lex); if (-_state->path_indexes[lex_level] <= nelements) _state->path_indexes[lex_level] += nelements; @@ -1002,8 +1002,8 @@ get_array_start(void *state) { /* * Special case: we should match the entire array. We only need this - * at the outermost level because at nested levels the match will - * have been started by the outer field or array element callback. + * at the outermost level because at nested levels the match will have + * been started by the outer field or array element callback. */ _state->result_start = _state->lex->token_start; } @@ -3368,9 +3368,9 @@ jsonb_concat(PG_FUNCTION_ARGS) *it2; /* - * If one of the jsonb is empty, just return the other if it's not - * scalar and both are of the same kind. If it's a scalar or they are - * of different kinds we need to perform the concatenation even if one is + * If one of the jsonb is empty, just return the other if it's not scalar + * and both are of the same kind. If it's a scalar or they are of + * different kinds we need to perform the concatenation even if one is * empty. */ if (JB_ROOT_IS_OBJECT(jb1) == JB_ROOT_IS_OBJECT(jb2)) @@ -3481,7 +3481,7 @@ jsonb_delete_idx(PG_FUNCTION_ARGS) it = JsonbIteratorInit(&in->root); r = JsonbIteratorNext(&it, &v, false); - Assert (r == WJB_BEGIN_ARRAY); + Assert(r == WJB_BEGIN_ARRAY); n = v.val.array.nElems; if (idx < 0) @@ -3868,8 +3868,8 @@ setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls, if (level == path_len - 1) { /* - * called from jsonb_insert(), it forbids redefining - * an existsing value + * called from jsonb_insert(), it forbids redefining an + * existsing value */ if (op_type & (JB_PATH_INSERT_BEFORE | JB_PATH_INSERT_AFTER)) ereport(ERROR, @@ -3878,7 +3878,7 @@ setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls, errhint("Try using the function jsonb_set " "to replace key value."))); - r = JsonbIteratorNext(it, &v, true); /* skip value */ + r = JsonbIteratorNext(it, &v, true); /* skip value */ if (!(op_type & JB_PATH_DELETE)) { (void) pushJsonbValue(st, WJB_KEY, &k); @@ -4005,8 +4005,8 @@ setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls, /* * We should keep current value only in case of - * JB_PATH_INSERT_BEFORE or JB_PATH_INSERT_AFTER - * because otherwise it should be deleted or replaced + * JB_PATH_INSERT_BEFORE or JB_PATH_INSERT_AFTER because + * otherwise it should be deleted or replaced */ if (op_type & (JB_PATH_INSERT_AFTER | JB_PATH_INSERT_BEFORE)) (void) pushJsonbValue(st, r, &v); diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c index 39f43863d6..10133f21fa 100644 --- a/src/backend/utils/adt/misc.c +++ b/src/backend/utils/adt/misc.c @@ -849,13 +849,13 @@ parse_ident(PG_FUNCTION_ARGS) (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("string is not a valid identifier: \"%s\"", text_to_cstring(qualname)), - errdetail("No valid identifier before \".\"."))); + errdetail("No valid identifier before \".\"."))); else if (after_dot) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("string is not a valid identifier: \"%s\"", text_to_cstring(qualname)), - errdetail("No valid identifier after \".\"."))); + errdetail("No valid identifier after \".\"."))); else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index 3d21e33a26..6592ef4d2d 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -3355,10 +3355,10 @@ numeric_accum(PG_FUNCTION_ARGS) Datum numeric_combine(PG_FUNCTION_ARGS) { - NumericAggState *state1; - NumericAggState *state2; - MemoryContext agg_context; - MemoryContext old_context; + NumericAggState *state1; + NumericAggState *state2; + MemoryContext agg_context; + MemoryContext old_context; if (!AggCheckCallContext(fcinfo, &agg_context)) elog(ERROR, "aggregate function called in non-aggregate context"); @@ -3397,8 +3397,8 @@ numeric_combine(PG_FUNCTION_ARGS) state1->NaNcount += state2->NaNcount; /* - * These are currently only needed for moving aggregates, but let's - * do the right thing anyway... + * These are currently only needed for moving aggregates, but let's do + * the right thing anyway... */ if (state2->maxScale > state1->maxScale) { @@ -3446,10 +3446,10 @@ numeric_avg_accum(PG_FUNCTION_ARGS) Datum numeric_avg_combine(PG_FUNCTION_ARGS) { - NumericAggState *state1; - NumericAggState *state2; - MemoryContext agg_context; - MemoryContext old_context; + NumericAggState *state1; + NumericAggState *state2; + MemoryContext agg_context; + MemoryContext old_context; if (!AggCheckCallContext(fcinfo, &agg_context)) elog(ERROR, "aggregate function called in non-aggregate context"); @@ -3485,8 +3485,8 @@ numeric_avg_combine(PG_FUNCTION_ARGS) state1->NaNcount += state2->NaNcount; /* - * These are currently only needed for moving aggregates, but let's - * do the right thing anyway... + * These are currently only needed for moving aggregates, but let's do + * the right thing anyway... */ if (state2->maxScale > state1->maxScale) { @@ -3518,11 +3518,11 @@ numeric_avg_combine(PG_FUNCTION_ARGS) Datum numeric_avg_serialize(PG_FUNCTION_ARGS) { - NumericAggState *state; - StringInfoData buf; - Datum temp; - bytea *sumX; - bytea *result; + NumericAggState *state; + StringInfoData buf; + Datum temp; + bytea *sumX; + bytea *result; /* Ensure we disallow calling when not in aggregate context */ if (!AggCheckCallContext(fcinfo, NULL)) @@ -3549,7 +3549,7 @@ numeric_avg_serialize(PG_FUNCTION_ARGS) pq_sendbytes(&buf, VARDATA(sumX), VARSIZE(sumX) - VARHDRSZ); /* maxScale */ - pq_sendint(&buf, state->maxScale, 4); + pq_sendint(&buf, state->maxScale, 4); /* maxScaleCount */ pq_sendint64(&buf, state->maxScaleCount); @@ -3564,7 +3564,7 @@ numeric_avg_serialize(PG_FUNCTION_ARGS) /* * numeric_avg_deserialize - * Deserialize bytea into NumericAggState for numeric aggregates that + * Deserialize bytea into NumericAggState for numeric aggregates that * don't require sumX2. Deserializes bytea into NumericAggState using the * standard pq API. * @@ -3574,10 +3574,10 @@ numeric_avg_serialize(PG_FUNCTION_ARGS) Datum numeric_avg_deserialize(PG_FUNCTION_ARGS) { - bytea *sstate = PG_GETARG_BYTEA_P(0); - NumericAggState *result; - Datum temp; - StringInfoData buf; + bytea *sstate = PG_GETARG_BYTEA_P(0); + NumericAggState *result; + Datum temp; + StringInfoData buf; if (!AggCheckCallContext(fcinfo, NULL)) elog(ERROR, "aggregate function called in non-aggregate context"); @@ -3628,12 +3628,12 @@ numeric_avg_deserialize(PG_FUNCTION_ARGS) Datum numeric_serialize(PG_FUNCTION_ARGS) { - NumericAggState *state; - StringInfoData buf; - Datum temp; - bytea *sumX; - bytea *sumX2; - bytea *result; + NumericAggState *state; + StringInfoData buf; + Datum temp; + bytea *sumX; + bytea *sumX2; + bytea *result; /* Ensure we disallow calling when not in aggregate context */ if (!AggCheckCallContext(fcinfo, NULL)) @@ -3667,7 +3667,7 @@ numeric_serialize(PG_FUNCTION_ARGS) pq_sendbytes(&buf, VARDATA(sumX2), VARSIZE(sumX2) - VARHDRSZ); /* maxScale */ - pq_sendint(&buf, state->maxScale, 4); + pq_sendint(&buf, state->maxScale, 4); /* maxScaleCount */ pq_sendint64(&buf, state->maxScaleCount); @@ -3692,10 +3692,10 @@ numeric_serialize(PG_FUNCTION_ARGS) Datum numeric_deserialize(PG_FUNCTION_ARGS) { - bytea *sstate = PG_GETARG_BYTEA_P(0); - NumericAggState *result; - Datum temp; - StringInfoData buf; + bytea *sstate = PG_GETARG_BYTEA_P(0); + NumericAggState *result; + Datum temp; + StringInfoData buf; if (!AggCheckCallContext(fcinfo, NULL)) elog(ERROR, "aggregate function called in non-aggregate context"); @@ -3932,8 +3932,8 @@ numeric_poly_combine(PG_FUNCTION_ARGS) { PolyNumAggState *state1; PolyNumAggState *state2; - MemoryContext agg_context; - MemoryContext old_context; + MemoryContext agg_context; + MemoryContext old_context; if (!AggCheckCallContext(fcinfo, &agg_context)) elog(ERROR, "aggregate function called in non-aggregate context"); @@ -4001,11 +4001,11 @@ numeric_poly_combine(PG_FUNCTION_ARGS) Datum numeric_poly_serialize(PG_FUNCTION_ARGS) { - PolyNumAggState *state; - StringInfoData buf; - bytea *sumX; - bytea *sumX2; - bytea *result; + PolyNumAggState *state; + StringInfoData buf; + bytea *sumX; + bytea *sumX2; + bytea *result; /* Ensure we disallow calling when not in aggregate context */ if (!AggCheckCallContext(fcinfo, NULL)) @@ -4040,11 +4040,11 @@ numeric_poly_serialize(PG_FUNCTION_ARGS) free_var(&num); #else temp = DirectFunctionCall1(numeric_send, - NumericGetDatum(make_result(&state->sumX))); + NumericGetDatum(make_result(&state->sumX))); sumX = DatumGetByteaP(temp); temp = DirectFunctionCall1(numeric_send, - NumericGetDatum(make_result(&state->sumX2))); + NumericGetDatum(make_result(&state->sumX2))); sumX2 = DatumGetByteaP(temp); #endif } @@ -4076,11 +4076,11 @@ numeric_poly_serialize(PG_FUNCTION_ARGS) Datum numeric_poly_deserialize(PG_FUNCTION_ARGS) { - bytea *sstate = PG_GETARG_BYTEA_P(0); - PolyNumAggState *result; - Datum sumX; - Datum sumX2; - StringInfoData buf; + bytea *sstate = PG_GETARG_BYTEA_P(0); + PolyNumAggState *result; + Datum sumX; + Datum sumX2; + StringInfoData buf; if (!AggCheckCallContext(fcinfo, NULL)) elog(ERROR, "aggregate function called in non-aggregate context"); @@ -4105,13 +4105,13 @@ numeric_poly_deserialize(PG_FUNCTION_ARGS) /* sumX2 */ sumX2 = DirectFunctionCall3(numeric_recv, - PointerGetDatum(&buf), - InvalidOid, - -1); + PointerGetDatum(&buf), + InvalidOid, + -1); #ifdef HAVE_INT128 { - NumericVar num; + NumericVar num; init_var(&num); set_var_from_num(DatumGetNumeric(sumX), &num); @@ -4170,10 +4170,10 @@ int8_avg_accum(PG_FUNCTION_ARGS) Datum int8_avg_combine(PG_FUNCTION_ARGS) { - PolyNumAggState *state1; - PolyNumAggState *state2; - MemoryContext agg_context; - MemoryContext old_context; + PolyNumAggState *state1; + PolyNumAggState *state2; + MemoryContext agg_context; + MemoryContext old_context; if (!AggCheckCallContext(fcinfo, &agg_context)) elog(ERROR, "aggregate function called in non-aggregate context"); @@ -4233,10 +4233,10 @@ int8_avg_combine(PG_FUNCTION_ARGS) Datum int8_avg_serialize(PG_FUNCTION_ARGS) { - PolyNumAggState *state; - StringInfoData buf; - bytea *sumX; - bytea *result; + PolyNumAggState *state; + StringInfoData buf; + bytea *sumX; + bytea *result; /* Ensure we disallow calling when not in aggregate context */ if (!AggCheckCallContext(fcinfo, NULL)) @@ -4265,7 +4265,7 @@ int8_avg_serialize(PG_FUNCTION_ARGS) sumX = DatumGetByteaP(temp); #else temp = DirectFunctionCall1(numeric_send, - NumericGetDatum(make_result(&state->sumX))); + NumericGetDatum(make_result(&state->sumX))); sumX = DatumGetByteaP(temp); #endif } @@ -4293,10 +4293,10 @@ int8_avg_serialize(PG_FUNCTION_ARGS) Datum int8_avg_deserialize(PG_FUNCTION_ARGS) { - bytea *sstate = PG_GETARG_BYTEA_P(0); - PolyNumAggState *result; - StringInfoData buf; - Datum temp; + bytea *sstate = PG_GETARG_BYTEA_P(0); + PolyNumAggState *result; + StringInfoData buf; + Datum temp; if (!AggCheckCallContext(fcinfo, NULL)) elog(ERROR, "aggregate function called in non-aggregate context"); @@ -4321,7 +4321,7 @@ int8_avg_deserialize(PG_FUNCTION_ARGS) #ifdef HAVE_INT128 { - NumericVar num; + NumericVar num; init_var(&num); set_var_from_num(DatumGetNumeric(temp), &num); diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c index 17c5cb0fb6..1bba5fa8c8 100644 --- a/src/backend/utils/adt/pgstatfuncs.c +++ b/src/backend/utils/adt/pgstatfuncs.c @@ -27,7 +27,7 @@ #include "utils/inet.h" #include "utils/timestamp.h" -#define UINT32_ACCESS_ONCE(var) ((uint32)(*((volatile uint32 *)&(var)))) +#define UINT32_ACCESS_ONCE(var) ((uint32)(*((volatile uint32 *)&(var)))) /* bogus ... these externs should be in a header file */ extern Datum pg_stat_get_numscans(PG_FUNCTION_ARGS); @@ -540,7 +540,7 @@ pg_stat_get_progress_info(PG_FUNCTION_ARGS) int num_backends = pgstat_fetch_stat_numbackends(); int curr_backend; char *cmd = text_to_cstring(PG_GETARG_TEXT_PP(0)); - ProgressCommandType cmdtype; + ProgressCommandType cmdtype; TupleDesc tupdesc; Tuplestorestate *tupstore; ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; @@ -582,8 +582,8 @@ pg_stat_get_progress_info(PG_FUNCTION_ARGS) /* 1-based index */ for (curr_backend = 1; curr_backend <= num_backends; curr_backend++) { - LocalPgBackendStatus *local_beentry; - PgBackendStatus *beentry; + LocalPgBackendStatus *local_beentry; + PgBackendStatus *beentry; Datum values[PG_STAT_GET_PROGRESS_COLS]; bool nulls[PG_STAT_GET_PROGRESS_COLS]; int i; @@ -613,14 +613,14 @@ pg_stat_get_progress_info(PG_FUNCTION_ARGS) if (has_privs_of_role(GetUserId(), beentry->st_userid)) { values[2] = ObjectIdGetDatum(beentry->st_progress_command_target); - for(i = 0; i < PGSTAT_NUM_PROGRESS_PARAM; i++) - values[i+3] = Int64GetDatum(beentry->st_progress_param[i]); + for (i = 0; i < PGSTAT_NUM_PROGRESS_PARAM; i++) + values[i + 3] = Int64GetDatum(beentry->st_progress_param[i]); } else { nulls[2] = true; for (i = 0; i < PGSTAT_NUM_PROGRESS_PARAM; i++) - nulls[i+3] = true; + nulls[i + 3] = true; } tuplestore_putvalues(tupstore, tupdesc, values, nulls); @@ -787,7 +787,7 @@ pg_stat_get_activity(PG_FUNCTION_ARGS) proc = BackendPidGetProc(beentry->st_procpid); if (proc != NULL) { - uint32 raw_wait_event; + uint32 raw_wait_event; raw_wait_event = UINT32_ACCESS_ONCE(proc->wait_event_info); wait_event_type = pgstat_get_wait_event_type(raw_wait_event); diff --git a/src/backend/utils/adt/rangetypes_spgist.c b/src/backend/utils/adt/rangetypes_spgist.c index 7d35d58112..b89e90f908 100644 --- a/src/backend/utils/adt/rangetypes_spgist.c +++ b/src/backend/utils/adt/rangetypes_spgist.c @@ -760,13 +760,14 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS) /* Save previous prefix if needed */ if (needPrevious) { - Datum previousCentroid; + Datum previousCentroid; - /* We know, that in->prefixDatum in this place is varlena, + /* + * We know, that in->prefixDatum in this place is varlena, * because it's range */ previousCentroid = datumCopy(in->prefixDatum, false, -1); - out->traversalValues[out->nNodes] = (void *)previousCentroid; + out->traversalValues[out->nNodes] = (void *) previousCentroid; } out->nodeNumbers[out->nNodes] = i - 1; out->nNodes++; diff --git a/src/backend/utils/adt/tsginidx.c b/src/backend/utils/adt/tsginidx.c index ebc11c9e4e..b096329143 100644 --- a/src/backend/utils/adt/tsginidx.c +++ b/src/backend/utils/adt/tsginidx.c @@ -184,8 +184,8 @@ checkcondition_gin_internal(GinChkVal *gcv, QueryOperand *val, ExecPhraseData *d int j; /* - * if any val requiring a weight is used or caller - * needs position information then set recheck flag + * if any val requiring a weight is used or caller needs position + * information then set recheck flag */ if (val->weight != 0 || data != NULL) *gcv->need_recheck = true; @@ -236,9 +236,10 @@ TS_execute_ternary(GinChkVal *gcv, QueryItem *curitem) return !result; case OP_PHRASE: + /* - * GIN doesn't contain any information about positions, - * treat OP_PHRASE as OP_AND with recheck requirement + * GIN doesn't contain any information about positions, treat + * OP_PHRASE as OP_AND with recheck requirement */ *gcv->need_recheck = true; /* FALL THRU */ diff --git a/src/backend/utils/adt/tsquery.c b/src/backend/utils/adt/tsquery.c index eea6e0eae1..21a18bfbc4 100644 --- a/src/backend/utils/adt/tsquery.c +++ b/src/backend/utils/adt/tsquery.c @@ -24,12 +24,12 @@ #include "utils/pg_crc.h" /* FTS operator priorities, see ts_type.h */ -const int tsearch_op_priority[OP_COUNT] = +const int tsearch_op_priority[OP_COUNT] = { - 3, /* OP_NOT */ - 2, /* OP_AND */ - 1, /* OP_OR */ - 4 /* OP_PHRASE */ + 3, /* OP_NOT */ + 2, /* OP_AND */ + 1, /* OP_OR */ + 4 /* OP_PHRASE */ }; struct TSQueryParserStateData @@ -128,15 +128,15 @@ parse_phrase_operator(char *buf, int16 *distance) PHRASE_CLOSE, PHRASE_ERR, PHRASE_FINISH - } state = PHRASE_OPEN; + } state = PHRASE_OPEN; - char *ptr = buf; - char *endptr; - long l = 1; + char *ptr = buf; + char *endptr; + long l = 1; while (*ptr) { - switch(state) + switch (state) { case PHRASE_OPEN: Assert(t_iseq(ptr, '<')); @@ -192,7 +192,7 @@ parse_phrase_operator(char *buf, int16 *distance) } } - err: +err: *distance = -1; return buf; } @@ -440,18 +440,18 @@ makepol(TSQueryParserState state, PushFunction pushval, Datum opaque) { - int8 operator = 0; - ts_tokentype type; - int lenval = 0; - char *strval = NULL; + int8 operator = 0; + ts_tokentype type; + int lenval = 0; + char *strval = NULL; struct { - int8 op; - int16 distance; - } opstack[STACKDEPTH]; - int lenstack = 0; - int16 weight = 0; - bool prefix; + int8 op; + int16 distance; + } opstack[STACKDEPTH]; + int lenstack = 0; + int16 weight = 0; + bool prefix; /* since this function recurses, it could be driven to stack overflow */ check_stack_depth(); @@ -538,7 +538,7 @@ findoprnd_recurse(QueryItem *ptr, uint32 *pos, int nnodes, bool *needcleanup) } else if (ptr[*pos].type == QI_VALSTOP) { - *needcleanup = true; /* we'll have to remove stop words */ + *needcleanup = true; /* we'll have to remove stop words */ (*pos)++; } else @@ -547,7 +547,7 @@ findoprnd_recurse(QueryItem *ptr, uint32 *pos, int nnodes, bool *needcleanup) if (ptr[*pos].qoperator.oper == OP_NOT) { - ptr[*pos].qoperator.left = 1; /* fixed offset */ + ptr[*pos].qoperator.left = 1; /* fixed offset */ (*pos)++; /* process the only argument */ @@ -555,15 +555,15 @@ findoprnd_recurse(QueryItem *ptr, uint32 *pos, int nnodes, bool *needcleanup) } else { - QueryOperator *curitem = &ptr[*pos].qoperator; - int tmp = *pos; /* save current position */ + QueryOperator *curitem = &ptr[*pos].qoperator; + int tmp = *pos; /* save current position */ Assert(curitem->oper == OP_AND || curitem->oper == OP_OR || curitem->oper == OP_PHRASE); if (curitem->oper == OP_PHRASE) - *needcleanup = true; /* push OP_PHRASE down later */ + *needcleanup = true; /* push OP_PHRASE down later */ (*pos)++; @@ -669,7 +669,7 @@ parse_tsquery(char *buf, i = 0; foreach(cell, state.polstr) { - QueryItem *item = (QueryItem *) lfirst(cell); + QueryItem *item = (QueryItem *) lfirst(cell); switch (item->type) { @@ -696,8 +696,8 @@ parse_tsquery(char *buf, findoprnd(ptr, query->size, &needcleanup); /* - * QI_VALSTOP nodes should be cleaned and - * and OP_PHRASE should be pushed down + * QI_VALSTOP nodes should be cleaned and and OP_PHRASE should be pushed + * down */ if (needcleanup) return cleanup_fakeval_and_phrase(query); @@ -819,7 +819,7 @@ infix(INFIX *in, int parentPriority) } else if (in->curpol->qoperator.oper == OP_NOT) { - int priority = PRINT_PRIORITY(in->curpol); + int priority = PRINT_PRIORITY(in->curpol); if (priority < parentPriority) { @@ -852,8 +852,9 @@ infix(INFIX *in, int parentPriority) in->curpol++; if (priority < parentPriority || (op == OP_PHRASE && - (priority == parentPriority || /* phrases are not commutative! */ - parentPriority == OP_PRIORITY(OP_AND)))) + (priority == parentPriority || /* phrases are not + * commutative! */ + parentPriority == OP_PRIORITY(OP_AND)))) { needParenthesis = true; RESIZEBUF(in, 2); @@ -874,7 +875,7 @@ infix(INFIX *in, int parentPriority) infix(in, priority); /* print operator & right operand */ - RESIZEBUF(in, 3 + (2 + 10 /* distance */) + (nrm.cur - nrm.buf)); + RESIZEBUF(in, 3 + (2 + 10 /* distance */ ) + (nrm.cur - nrm.buf)); switch (op) { case OP_OR: @@ -923,7 +924,7 @@ tsqueryout(PG_FUNCTION_ARGS) nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen); *(nrm.cur) = '\0'; nrm.op = GETOPERAND(query); - infix(&nrm, -1 /* lowest priority */); + infix(&nrm, -1 /* lowest priority */ ); PG_FREE_IF_COPY(query, 0); PG_RETURN_CSTRING(nrm.buf); @@ -989,16 +990,16 @@ tsquerysend(PG_FUNCTION_ARGS) Datum tsqueryrecv(PG_FUNCTION_ARGS) { - StringInfo buf = (StringInfo) PG_GETARG_POINTER(0); - TSQuery query; - int i, - len; - QueryItem *item; - int datalen; - char *ptr; - uint32 size; - const char **operands; - bool needcleanup; + StringInfo buf = (StringInfo) PG_GETARG_POINTER(0); + TSQuery query; + int i, + len; + QueryItem *item; + int datalen; + char *ptr; + uint32 size; + const char **operands; + bool needcleanup; size = pq_getmsgint(buf, sizeof(uint32)); if (size > (MaxAllocSize / sizeof(QueryItem))) diff --git a/src/backend/utils/adt/tsquery_cleanup.c b/src/backend/utils/adt/tsquery_cleanup.c index 0f6ef6f541..6c74070e45 100644 --- a/src/backend/utils/adt/tsquery_cleanup.c +++ b/src/backend/utils/adt/tsquery_cleanup.c @@ -224,8 +224,8 @@ clean_NOT(QueryItem *ptr, int *len) static NODE * clean_fakeval_intree(NODE *node, char *result, int *adddistance) { - char lresult = V_UNKNOWN, - rresult = V_UNKNOWN; + char lresult = V_UNKNOWN, + rresult = V_UNKNOWN; /* since this function recurses, it could be driven to stack overflow. */ check_stack_depth(); @@ -256,24 +256,26 @@ clean_fakeval_intree(NODE *node, char *result, int *adddistance) } else { - NODE *res = node; - int ndistance, ldistance = 0, rdistance = 0; + NODE *res = node; + int ndistance, + ldistance = 0, + rdistance = 0; ndistance = (node->valnode->qoperator.oper == OP_PHRASE) ? - node->valnode->qoperator.distance : - 0; + node->valnode->qoperator.distance : + 0; - node->left = clean_fakeval_intree(node->left, - &lresult, - ndistance ? &ldistance : NULL); + node->left = clean_fakeval_intree(node->left, + &lresult, + ndistance ? &ldistance : NULL); node->right = clean_fakeval_intree(node->right, &rresult, ndistance ? &rdistance : NULL); /* - * ndistance, ldistance and rdistance are greater than zero - * if their corresponding nodes are OP_PHRASE + * ndistance, ldistance and rdistance are greater than zero if their + * corresponding nodes are OP_PHRASE */ if (lresult == V_STOP && rresult == V_STOP) @@ -287,9 +289,10 @@ clean_fakeval_intree(NODE *node, char *result, int *adddistance) else if (lresult == V_STOP) { res = node->right; + /* - * propagate distance from current node to the - * right upper subtree. + * propagate distance from current node to the right upper + * subtree. */ if (adddistance && ndistance) *adddistance = rdistance; @@ -298,6 +301,7 @@ clean_fakeval_intree(NODE *node, char *result, int *adddistance) else if (rresult == V_STOP) { res = node->left; + /* * propagate distance from current node to the upper tree. */ @@ -324,7 +328,7 @@ clean_fakeval_intree(NODE *node, char *result, int *adddistance) static NODE * copyNODE(NODE *node) { - NODE *cnode = palloc(sizeof(NODE)); + NODE *cnode = palloc(sizeof(NODE)); /* since this function recurses, it could be driven to stack overflow. */ check_stack_depth(); @@ -345,7 +349,7 @@ copyNODE(NODE *node) static NODE * makeNODE(int8 op, NODE *left, NODE *right) { - NODE *node = palloc(sizeof(NODE)); + NODE *node = palloc(sizeof(NODE)); /* zeroing allocation to prevent difference in unused bytes */ node->valnode = palloc0(sizeof(QueryItem)); @@ -368,15 +372,15 @@ makeNODE(int8 op, NODE *left, NODE *right) * <-> () operation since it's needed solely for the phrase operator. * * Rules: - * a <-> (b | c) => (a <-> b) | (a <-> c) - * (a | b) <-> c => (a <-> c) | (b <-> c) - * a <-> !b => a & !(a <-> b) - * !a <-> b => b & !(a <-> b) + * a <-> (b | c) => (a <-> b) | (a <-> c) + * (a | b) <-> c => (a <-> c) | (b <-> c) + * a <-> !b => a & !(a <-> b) + * !a <-> b => b & !(a <-> b) * * Warnings for readers: * a <-> b != b <-> a * - * a (b c) != (a b) c since the phrase lengths are: + * a (b c) != (a b) c since the phrase lengths are: * n 2n-1 */ static NODE * @@ -397,7 +401,7 @@ normalize_phrase_tree(NODE *node) { /* eliminate NOT sequence */ while (node->valnode->type == QI_OPR && - node->valnode->qoperator.oper == node->right->valnode->qoperator.oper) + node->valnode->qoperator.oper == node->right->valnode->qoperator.oper) { node = node->right->right; } @@ -406,19 +410,19 @@ normalize_phrase_tree(NODE *node) } else if (node->valnode->qoperator.oper == OP_PHRASE) { - int16 distance; - NODE *X; + int16 distance; + NODE *X; node->left = normalize_phrase_tree(node->left); node->right = normalize_phrase_tree(node->right); if (NODE_PRIORITY(node) <= NODE_PRIORITY(node->right) && NODE_PRIORITY(node) <= NODE_PRIORITY(node->left)) - return node; + return node; /* - * We can't swap left-right and works only with left child - * because of a <-> b != b <-> a + * We can't swap left-right and works only with left child because of + * a <-> b != b <-> a */ distance = node->valnode->qoperator.distance; @@ -464,8 +468,8 @@ normalize_phrase_tree(NODE *node) /* no-op */ break; default: - elog(ERROR,"Wrong type of tsquery node: %d", - node->right->valnode->qoperator.oper); + elog(ERROR, "Wrong type of tsquery node: %d", + node->right->valnode->qoperator.oper); } } @@ -476,10 +480,10 @@ normalize_phrase_tree(NODE *node) * if the node is still OP_PHRASE, check the left subtree, * otherwise the whole node will be transformed later. */ - switch(node->left->valnode->qoperator.oper) + switch (node->left->valnode->qoperator.oper) { case OP_AND: - /* (a & b) <-> c => (a <-> c) & (b <-> c) */ + /* (a & b) <-> c => (a <-> c) & (b <-> c) */ node = makeNODE(OP_AND, makeNODE(OP_PHRASE, node->left->left, @@ -515,15 +519,15 @@ normalize_phrase_tree(NODE *node) /* no-op */ break; default: - elog(ERROR,"Wrong type of tsquery node: %d", - node->left->valnode->qoperator.oper); + elog(ERROR, "Wrong type of tsquery node: %d", + node->left->valnode->qoperator.oper); } } /* continue transformation */ node = normalize_phrase_tree(node); } - else /* AND or OR */ + else /* AND or OR */ { node->left = normalize_phrase_tree(node->left); node->right = normalize_phrase_tree(node->right); @@ -538,7 +542,7 @@ normalize_phrase_tree(NODE *node) static int32 calcstrlen(NODE *node) { - int32 size = 0; + int32 size = 0; if (node->valnode->type == QI_VAL) { diff --git a/src/backend/utils/adt/tsquery_op.c b/src/backend/utils/adt/tsquery_op.c index 5ea02c41c7..a574b4b257 100644 --- a/src/backend/utils/adt/tsquery_op.c +++ b/src/backend/utils/adt/tsquery_op.c @@ -147,10 +147,10 @@ Datum tsquery_phrase(PG_FUNCTION_ARGS) { PG_RETURN_POINTER(DirectFunctionCall3( - tsquery_phrase_distance, - PG_GETARG_DATUM(0), - PG_GETARG_DATUM(1), - Int32GetDatum(1))); + tsquery_phrase_distance, + PG_GETARG_DATUM(0), + PG_GETARG_DATUM(1), + Int32GetDatum(1))); } Datum diff --git a/src/backend/utils/adt/tsrank.c b/src/backend/utils/adt/tsrank.c index ab47b763ee..32023821b3 100644 --- a/src/backend/utils/adt/tsrank.c +++ b/src/backend/utils/adt/tsrank.c @@ -366,8 +366,8 @@ calc_rank(const float *w, TSVector t, TSQuery q, int32 method) /* XXX: What about NOT? */ res = (item->type == QI_OPR && (item->qoperator.oper == OP_AND || item->qoperator.oper == OP_PHRASE)) ? - calc_rank_and(w, t, q) : - calc_rank_or(w, t, q); + calc_rank_and(w, t, q) : + calc_rank_or(w, t, q); if (res < 0) res = 1e-20f; @@ -498,17 +498,21 @@ ts_rank_tt(PG_FUNCTION_ARGS) typedef struct { - union { - struct { /* compiled doc representation */ + union + { + struct + { /* compiled doc representation */ QueryItem **items; int16 nitem; - } query; - struct { /* struct is used for preparing doc representation */ + } query; + struct + { /* struct is used for preparing doc + * representation */ QueryItem *item; WordEntry *entry; - } map; - } data; - WordEntryPos pos; + } map; + } data; + WordEntryPos pos; } DocRepresentation; static int @@ -536,16 +540,16 @@ compareDocR(const void *va, const void *vb) #define MAXQROPOS MAXENTRYPOS typedef struct { - bool operandexists; - bool reverseinsert; /* indicates insert order, - true means descending order */ - uint32 npos; - WordEntryPos pos[MAXQROPOS]; + bool operandexists; + bool reverseinsert; /* indicates insert order, true means + * descending order */ + uint32 npos; + WordEntryPos pos[MAXQROPOS]; } QueryRepresentationOperand; typedef struct { - TSQuery query; + TSQuery query; QueryRepresentationOperand *operandData; } QueryRepresentation; @@ -555,8 +559,8 @@ typedef struct static bool checkcondition_QueryOperand(void *checkval, QueryOperand *val, ExecPhraseData *data) { - QueryRepresentation *qr = (QueryRepresentation *) checkval; - QueryRepresentationOperand *opData = QR_GET_OPERAND_DATA(qr, val); + QueryRepresentation *qr = (QueryRepresentation *) checkval; + QueryRepresentationOperand *opData = QR_GET_OPERAND_DATA(qr, val); if (!opData->operandexists) return false; @@ -584,9 +588,9 @@ typedef struct static void resetQueryRepresentation(QueryRepresentation *qr, bool reverseinsert) { - int i; + int i; - for(i = 0; i < qr->query->size; i++) + for (i = 0; i < qr->query->size; i++) { qr->operandData[i].operandexists = false; qr->operandData[i].reverseinsert = reverseinsert; @@ -597,8 +601,8 @@ resetQueryRepresentation(QueryRepresentation *qr, bool reverseinsert) static void fillQueryRepresentationData(QueryRepresentation *qr, DocRepresentation *entry) { - int i; - int lastPos; + int i; + int lastPos; QueryRepresentationOperand *opData; for (i = 0; i < entry->data.query.nitem; i++) @@ -619,14 +623,14 @@ fillQueryRepresentationData(QueryRepresentation *qr, DocRepresentation *entry) } lastPos = opData->reverseinsert ? - (MAXQROPOS - opData->npos) : - (opData->npos - 1); + (MAXQROPOS - opData->npos) : + (opData->npos - 1); if (WEP_GETPOS(opData->pos[lastPos]) != WEP_GETPOS(entry->pos)) { lastPos = opData->reverseinsert ? - (MAXQROPOS - 1 - opData->npos) : - (opData->npos); + (MAXQROPOS - 1 - opData->npos) : + (opData->npos); opData->pos[lastPos] = entry->pos; opData->npos++; @@ -637,9 +641,9 @@ fillQueryRepresentationData(QueryRepresentation *qr, DocRepresentation *entry) static bool Cover(DocRepresentation *doc, int len, QueryRepresentation *qr, CoverExt *ext) { - DocRepresentation *ptr; - int lastpos = ext->pos; - bool found = false; + DocRepresentation *ptr; + int lastpos = ext->pos; + bool found = false; /* * since this function recurses, it could be driven to stack overflow. @@ -720,7 +724,7 @@ get_docrep(TSVector txt, QueryRepresentation *qr, int *doclen) WordEntry *entry, *firstentry; WordEntryPos *post; - int32 dimt, /* number of 'post' items */ + int32 dimt, /* number of 'post' items */ j, i, nitem; @@ -731,8 +735,8 @@ get_docrep(TSVector txt, QueryRepresentation *qr, int *doclen) doc = (DocRepresentation *) palloc(sizeof(DocRepresentation) * len); /* - * Iterate through query to make DocRepresentaion for words and it's entries - * satisfied by query + * Iterate through query to make DocRepresentaion for words and it's + * entries satisfied by query */ for (i = 0; i < qr->query->size; i++) { @@ -787,9 +791,9 @@ get_docrep(TSVector txt, QueryRepresentation *qr, int *doclen) if (cur > 0) { - DocRepresentation *rptr = doc + 1, - *wptr = doc, - storage; + DocRepresentation *rptr = doc + 1, + *wptr = doc, + storage; /* * Sort representation in ascending order by pos and entry @@ -806,8 +810,8 @@ get_docrep(TSVector txt, QueryRepresentation *qr, int *doclen) while (rptr - doc < cur) { - if (rptr->pos == (rptr-1)->pos && - rptr->data.map.entry == (rptr-1)->data.map.entry) + if (rptr->pos == (rptr - 1)->pos && + rptr->data.map.entry == (rptr - 1)->data.map.entry) { storage.data.query.items[storage.data.query.nitem] = rptr->data.map.item; storage.data.query.nitem++; @@ -865,7 +869,7 @@ calc_rank_cd(const float4 *arrdata, TSVector txt, TSQuery query, int method) qr.query = query; qr.operandData = (QueryRepresentationOperand *) - palloc0(sizeof(QueryRepresentationOperand) * query->size); + palloc0(sizeof(QueryRepresentationOperand) * query->size); doc = get_docrep(txt, &qr, &doclen); if (!doc) diff --git a/src/backend/utils/adt/tsvector_op.c b/src/backend/utils/adt/tsvector_op.c index 591e59cf99..6117ba9b3e 100644 --- a/src/backend/utils/adt/tsvector_op.c +++ b/src/backend/utils/adt/tsvector_op.c @@ -66,7 +66,7 @@ typedef struct #define STATHDRSIZE (offsetof(TSVectorStat, data)) static Datum tsvector_update_trigger(PG_FUNCTION_ARGS, bool config_column); -static int tsvector_bsearch(const TSVector tsv, char *lexeme, int lexeme_len); +static int tsvector_bsearch(const TSVector tsv, char *lexeme, int lexeme_len); /* * Order: haspos, len, word, for all positions (pos, weight) @@ -276,16 +276,20 @@ tsvector_setweight_by_filter(PG_FUNCTION_ARGS) switch (char_weight) { - case 'A': case 'a': + case 'A': + case 'a': weight = 3; break; - case 'B': case 'b': + case 'B': + case 'b': weight = 2; break; - case 'C': case 'c': + case 'C': + case 'c': weight = 1; break; - case 'D': case 'd': + case 'D': + case 'd': weight = 0; break; default: @@ -301,15 +305,15 @@ tsvector_setweight_by_filter(PG_FUNCTION_ARGS) &dlexemes, &nulls, &nlexemes); /* - * Assuming that lexemes array is significantly shorter than tsvector - * we can iterate through lexemes performing binary search - * of each lexeme from lexemes in tsvector. + * Assuming that lexemes array is significantly shorter than tsvector we + * can iterate through lexemes performing binary search of each lexeme + * from lexemes in tsvector. */ for (i = 0; i < nlexemes; i++) { - char *lex; - int lex_len, - lex_pos; + char *lex; + int lex_len, + lex_pos; if (nulls[i]) ereport(ERROR, @@ -323,6 +327,7 @@ tsvector_setweight_by_filter(PG_FUNCTION_ARGS) if (lex_pos >= 0 && (j = POSDATALEN(tsout, entry + lex_pos)) != 0) { WordEntryPos *p = POSDATAPTR(tsout, entry + lex_pos); + while (j--) { WEP_SETWEIGHT(*p, weight); @@ -393,18 +398,18 @@ tsvector_bsearch(const TSVector tsv, char *lexeme, int lexeme_len) while (StopLow < StopHigh) { - StopMiddle = (StopLow + StopHigh)/2; + StopMiddle = (StopLow + StopHigh) / 2; cmp = tsCompareString(lexeme, lexeme_len, - STRPTR(tsv) + arrin[StopMiddle].pos, - arrin[StopMiddle].len, - false); + STRPTR(tsv) + arrin[StopMiddle].pos, + arrin[StopMiddle].len, + false); if (cmp < 0) StopHigh = StopMiddle; else if (cmp > 0) StopLow = StopMiddle + 1; - else /* found it */ + else /* found it */ return StopMiddle; } @@ -440,13 +445,15 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete, *arrout; char *data = STRPTR(tsv), *dataout; - int i, j, k, + int i, + j, + k, curoff; /* * Here we overestimates tsout size, since we don't know exact size - * occupied by positions and weights. We will set exact size later - * after a pass through TSVector. + * occupied by positions and weights. We will set exact size later after a + * pass through TSVector. */ tsout = (TSVector) palloc0(VARSIZE(tsv)); arrout = ARRPTR(tsout); @@ -465,10 +472,11 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete, { /* * Here we should check whether current i is present in - * indices_to_delete or not. Since indices_to_delete is already - * sorted we can advance it index only when we have match. + * indices_to_delete or not. Since indices_to_delete is already sorted + * we can advance it index only when we have match. */ - if (k < indices_count && i == indices_to_delete[k]){ + if (k < indices_count && i == indices_to_delete[k]) + { k++; continue; } @@ -481,8 +489,9 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete, curoff += arrin[i].len; if (arrin[i].haspos) { - int len = POSDATALEN(tsv, arrin+i) * sizeof(WordEntryPos) + - sizeof(uint16); + int len = POSDATALEN(tsv, arrin + i) * sizeof(WordEntryPos) + + sizeof(uint16); + curoff = SHORTALIGN(curoff); memcpy(dataout + curoff, STRPTR(tsv) + SHORTALIGN(arrin[i].pos + arrin[i].len), @@ -494,9 +503,10 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete, } /* - * After the pass through TSVector k should equals exactly to indices_count. - * If it isn't then the caller provided us with indices outside of - * [0, tsv->size) range and estimation of tsout's size is wrong. + * After the pass through TSVector k should equals exactly to + * indices_count. If it isn't then the caller provided us with indices + * outside of [0, tsv->size) range and estimation of tsout's size is + * wrong. */ Assert(k == indices_count); @@ -538,7 +548,8 @@ tsvector_delete_arr(PG_FUNCTION_ARGS) TSVector tsin = PG_GETARG_TSVECTOR(0), tsout; ArrayType *lexemes = PG_GETARG_ARRAYTYPE_P(1); - int i, nlex, + int i, + nlex, skip_count, *skip_indices; Datum *dlexemes; @@ -548,16 +559,16 @@ tsvector_delete_arr(PG_FUNCTION_ARGS) &dlexemes, &nulls, &nlex); /* - * In typical use case array of lexemes to delete is relatively small. - * So here we optimizing things for that scenario: iterate through lexarr + * In typical use case array of lexemes to delete is relatively small. So + * here we optimizing things for that scenario: iterate through lexarr * performing binary search of each lexeme from lexarr in tsvector. */ skip_indices = palloc0(nlex * sizeof(int)); for (i = skip_count = 0; i < nlex; i++) { - char *lex; - int lex_len, - lex_pos; + char *lex; + int lex_len, + lex_pos; if (nulls[i]) ereport(ERROR, @@ -583,15 +594,15 @@ tsvector_delete_arr(PG_FUNCTION_ARGS) /* * Expand tsvector as table with following columns: - * lexeme: lexeme text - * positions: integer array of lexeme positions - * weights: char array of weights corresponding to positions + * lexeme: lexeme text + * positions: integer array of lexeme positions + * weights: char array of weights corresponding to positions */ Datum tsvector_unnest(PG_FUNCTION_ARGS) { - FuncCallContext *funcctx; - TSVector tsin; + FuncCallContext *funcctx; + TSVector tsin; if (SRF_IS_FIRSTCALL()) { @@ -629,8 +640,8 @@ tsvector_unnest(PG_FUNCTION_ARGS) Datum values[3]; values[0] = PointerGetDatum( - cstring_to_text_with_len(data + arrin[i].pos, arrin[i].len) - ); + cstring_to_text_with_len(data + arrin[i].pos, arrin[i].len) + ); if (arrin[i].haspos) { @@ -641,25 +652,25 @@ tsvector_unnest(PG_FUNCTION_ARGS) /* * Internally tsvector stores position and weight in the same - * uint16 (2 bits for weight, 14 for position). Here we extract that - * in two separate arrays. + * uint16 (2 bits for weight, 14 for position). Here we extract + * that in two separate arrays. */ posv = _POSVECPTR(tsin, arrin + i); positions = palloc(posv->npos * sizeof(Datum)); - weights = palloc(posv->npos * sizeof(Datum)); + weights = palloc(posv->npos * sizeof(Datum)); for (j = 0; j < posv->npos; j++) { positions[j] = Int16GetDatum(WEP_GETPOS(posv->pos[j])); weight = 'D' - WEP_GETWEIGHT(posv->pos[j]); weights[j] = PointerGetDatum( - cstring_to_text_with_len(&weight, 1) - ); + cstring_to_text_with_len(&weight, 1) + ); } values[1] = PointerGetDatum( - construct_array(positions, posv->npos, INT2OID, 2, true, 's')); + construct_array(positions, posv->npos, INT2OID, 2, true, 's')); values[2] = PointerGetDatum( - construct_array(weights, posv->npos, TEXTOID, -1, false, 'i')); + construct_array(weights, posv->npos, TEXTOID, -1, false, 'i')); } else { @@ -682,19 +693,19 @@ tsvector_unnest(PG_FUNCTION_ARGS) Datum tsvector_to_array(PG_FUNCTION_ARGS) { - TSVector tsin = PG_GETARG_TSVECTOR(0); - WordEntry *arrin = ARRPTR(tsin); - Datum *elements; - int i; - ArrayType *array; + TSVector tsin = PG_GETARG_TSVECTOR(0); + WordEntry *arrin = ARRPTR(tsin); + Datum *elements; + int i; + ArrayType *array; elements = palloc(tsin->size * sizeof(Datum)); for (i = 0; i < tsin->size; i++) { elements[i] = PointerGetDatum( - cstring_to_text_with_len(STRPTR(tsin) + arrin[i].pos, arrin[i].len) - ); + cstring_to_text_with_len(STRPTR(tsin) + arrin[i].pos, arrin[i].len) + ); } array = construct_array(elements, tsin->size, TEXTOID, -1, false, 'i'); @@ -742,8 +753,8 @@ array_to_tsvector(PG_FUNCTION_ARGS) for (i = 0; i < nitems; i++) { - char *lex = VARDATA(dlexemes[i]); - int lex_len = VARSIZE_ANY_EXHDR(dlexemes[i]); + char *lex = VARDATA(dlexemes[i]); + int lex_len = VARSIZE_ANY_EXHDR(dlexemes[i]); memcpy(cur, lex, lex_len); arrout[i].haspos = 0; @@ -772,7 +783,8 @@ tsvector_filter(PG_FUNCTION_ARGS) Datum *dweights; bool *nulls; int nweights; - int i, j; + int i, + j; int cur_pos = 0; char mask = 0; @@ -781,7 +793,7 @@ tsvector_filter(PG_FUNCTION_ARGS) for (i = 0; i < nweights; i++) { - char char_weight; + char char_weight; if (nulls[i]) ereport(ERROR, @@ -791,22 +803,26 @@ tsvector_filter(PG_FUNCTION_ARGS) char_weight = DatumGetChar(dweights[i]); switch (char_weight) { - case 'A': case 'a': + case 'A': + case 'a': mask = mask | 8; break; - case 'B': case 'b': + case 'B': + case 'b': mask = mask | 4; break; - case 'C': case 'c': + case 'C': + case 'c': mask = mask | 2; break; - case 'D': case 'd': + case 'D': + case 'd': mask = mask | 1; break; default: - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("unrecognized weight: \"%c\"", char_weight))); + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("unrecognized weight: \"%c\"", char_weight))); } } @@ -818,16 +834,16 @@ tsvector_filter(PG_FUNCTION_ARGS) for (i = j = 0; i < tsin->size; i++) { WordEntryPosVector *posvin, - *posvout; - int npos = 0; - int k; + *posvout; + int npos = 0; + int k; if (!arrin[i].haspos) continue; - posvin = _POSVECPTR(tsin, arrin + i); + posvin = _POSVECPTR(tsin, arrin + i); posvout = (WordEntryPosVector *) - (dataout + SHORTALIGN(cur_pos + arrin[i].len)); + (dataout + SHORTALIGN(cur_pos + arrin[i].len)); for (k = 0; k < posvin->npos; k++) { @@ -846,8 +862,8 @@ tsvector_filter(PG_FUNCTION_ARGS) memcpy(dataout + cur_pos, datain + arrin[i].pos, arrin[i].len); posvout->npos = npos; cur_pos += SHORTALIGN(arrin[i].len); - cur_pos += POSDATALEN(tsout, arrout+j) * sizeof(WordEntryPos) + - sizeof(uint16); + cur_pos += POSDATALEN(tsout, arrout + j) * sizeof(WordEntryPos) + + sizeof(uint16); j++; } @@ -1129,11 +1145,11 @@ static bool checkclass_str(CHKVAL *chkval, WordEntry *entry, QueryOperand *val, ExecPhraseData *data) { - bool result = false; + bool result = false; if (entry->haspos && (val->weight || data)) { - WordEntryPosVector *posvec; + WordEntryPosVector *posvec; /* * We can't use the _POSVECPTR macro here because the pointer to the @@ -1144,8 +1160,8 @@ checkclass_str(CHKVAL *chkval, WordEntry *entry, QueryOperand *val, if (val->weight && data) { - WordEntryPos *posvec_iter = posvec->pos; - WordEntryPos *dptr; + WordEntryPos *posvec_iter = posvec->pos; + WordEntryPos *dptr; /* * Filter position information by weights @@ -1173,7 +1189,7 @@ checkclass_str(CHKVAL *chkval, WordEntry *entry, QueryOperand *val, } else if (val->weight) { - WordEntryPos *posvec_iter = posvec->pos; + WordEntryPos *posvec_iter = posvec->pos; /* Is there a position with a matching weight? */ while (posvec_iter < posvec->pos + posvec->npos) @@ -1181,16 +1197,16 @@ checkclass_str(CHKVAL *chkval, WordEntry *entry, QueryOperand *val, if (val->weight & (1 << WEP_GETWEIGHT(*posvec_iter))) { result = true; - break; /* no need to go further */ + break; /* no need to go further */ } posvec_iter++; } } - else /* data != NULL */ + else /* data != NULL */ { data->npos = posvec->npos; - data->pos = posvec->pos; + data->pos = posvec->pos; data->allocated = false; result = true; } @@ -1213,7 +1229,7 @@ static int uniqueLongPos(WordEntryPos *pos, int npos) { WordEntryPos *pos_iter, - *result; + *result; if (npos <= 1) return npos; @@ -1273,9 +1289,10 @@ checkcondition_str(void *checkval, QueryOperand *val, ExecPhraseData *data) if ((!res || data) && val->prefix) { - WordEntryPos *allpos = NULL; - int npos = 0, - totalpos = 0; + WordEntryPos *allpos = NULL; + int npos = 0, + totalpos = 0; + /* * there was a failed exact search, so we should scan further to find * a prefix match. We also need to do so if caller needs position info @@ -1355,11 +1372,11 @@ TS_phrase_execute(QueryItem *curitem, } else { - ExecPhraseData Ldata = {0, false, NULL}, - Rdata = {0, false, NULL}; - WordEntryPos *Lpos, - *Rpos, - *pos_iter = NULL; + ExecPhraseData Ldata = {0, false, NULL}, + Rdata = {0, false, NULL}; + WordEntryPos *Lpos, + *Rpos, + *pos_iter = NULL; Assert(curitem->qoperator.oper == OP_PHRASE); @@ -1371,22 +1388,24 @@ TS_phrase_execute(QueryItem *curitem, return false; /* - * if at least one of the operands has no position - * information, fallback to AND operation. + * if at least one of the operands has no position information, + * fallback to AND operation. */ if (Ldata.npos == 0 || Rdata.npos == 0) return true; /* - * Result of the operation is a list of the - * corresponding positions of RIGHT operand. + * Result of the operation is a list of the corresponding positions of + * RIGHT operand. */ if (data) { if (!Rdata.allocated) + /* - * OP_PHRASE is based on the OP_AND, so the number of resulting - * positions could not be greater than the total amount of operands. + * OP_PHRASE is based on the OP_AND, so the number of + * resulting positions could not be greater than the total + * amount of operands. */ data->pos = palloc(sizeof(WordEntryPos) * Min(Ldata.npos, Rdata.npos)); else @@ -1423,8 +1442,8 @@ TS_phrase_execute(QueryItem *curitem, *pos_iter = WEP_GETPOS(*Rpos); pos_iter++; - break; /* We need to build a unique result - * array, so go to the next Rpos */ + break; /* We need to build a unique result + * array, so go to the next Rpos */ } else { @@ -1439,8 +1458,8 @@ TS_phrase_execute(QueryItem *curitem, else { /* - * Go to the next Rpos, because Lpos - * is ahead of the current Rpos + * Go to the next Rpos, because Lpos is ahead of the + * current Rpos */ break; } @@ -1477,14 +1496,14 @@ TS_phrase_execute(QueryItem *curitem, */ bool TS_execute(QueryItem *curitem, void *checkval, bool calcnot, - bool (*chkcond) (void *checkval, QueryOperand *val, ExecPhraseData *data)) + bool (*chkcond) (void *checkval, QueryOperand *val, ExecPhraseData *data)) { /* since this function recurses, it could be driven to stack overflow */ check_stack_depth(); if (curitem->type == QI_VAL) return chkcond(checkval, (QueryOperand *) curitem, - NULL /* we don't need position info */); + NULL /* we don't need position info */ ); switch (curitem->qoperator.oper) { @@ -1546,6 +1565,7 @@ tsquery_requires_match(QueryItem *curitem) return false; case OP_PHRASE: + /* * Treat OP_PHRASE as OP_AND here */ @@ -1972,7 +1992,7 @@ ts_stat_sql(MemoryContext persistentContext, text *txt, text *ws) if (SPI_tuptable == NULL || SPI_tuptable->tupdesc->natts != 1 || !IsBinaryCoercible(SPI_gettypeid(SPI_tuptable->tupdesc, 1), - TSVECTOROID)) + TSVECTOROID)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("ts_stat query must return one tsvector column"))); @@ -2160,7 +2180,7 @@ tsvector_update_trigger(PG_FUNCTION_ARGS, bool config_column) errmsg("tsvector column \"%s\" does not exist", trigger->tgargs[0]))); if (!IsBinaryCoercible(SPI_gettypeid(rel->rd_att, tsvector_attr_num), - TSVECTOROID)) + TSVECTOROID)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("column \"%s\" is not of tsvector type", @@ -2178,7 +2198,7 @@ tsvector_update_trigger(PG_FUNCTION_ARGS, bool config_column) errmsg("configuration column \"%s\" does not exist", trigger->tgargs[1]))); if (!IsBinaryCoercible(SPI_gettypeid(rel->rd_att, config_attr_num), - REGCONFIGOID)) + REGCONFIGOID)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("column \"%s\" is not of regconfig type", diff --git a/src/backend/utils/adt/uuid.c b/src/backend/utils/adt/uuid.c index a6d5c18312..c59e74da3d 100644 --- a/src/backend/utils/adt/uuid.c +++ b/src/backend/utils/adt/uuid.c @@ -44,8 +44,8 @@ static void string_to_uuid(const char *source, pg_uuid_t *uuid); static int uuid_internal_cmp(const pg_uuid_t *arg1, const pg_uuid_t *arg2); static int uuid_fast_cmp(Datum x, Datum y, SortSupport ssup); static int uuid_cmp_abbrev(Datum x, Datum y, SortSupport ssup); -static bool uuid_abbrev_abort(int memtupcount, SortSupport ssup); -static Datum uuid_abbrev_convert(Datum original, SortSupport ssup); +static bool uuid_abbrev_abort(int memtupcount, SortSupport ssup); +static Datum uuid_abbrev_convert(Datum original, SortSupport ssup); Datum uuid_in(PG_FUNCTION_ARGS) @@ -245,15 +245,15 @@ uuid_cmp(PG_FUNCTION_ARGS) Datum uuid_sortsupport(PG_FUNCTION_ARGS) { - SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0); + SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0); ssup->comparator = uuid_fast_cmp; ssup->ssup_extra = NULL; if (ssup->abbreviate) { - uuid_sortsupport_state *uss; - MemoryContext oldcontext; + uuid_sortsupport_state *uss; + MemoryContext oldcontext; oldcontext = MemoryContextSwitchTo(ssup->ssup_cxt); @@ -310,8 +310,8 @@ uuid_cmp_abbrev(Datum x, Datum y, SortSupport ssup) static bool uuid_abbrev_abort(int memtupcount, SortSupport ssup) { - uuid_sortsupport_state *uss = ssup->ssup_extra; - double abbr_card; + uuid_sortsupport_state *uss = ssup->ssup_extra; + double abbr_card; if (memtupcount < 10000 || uss->input_count < 10000 || !uss->estimating) return false; @@ -340,8 +340,8 @@ uuid_abbrev_abort(int memtupcount, SortSupport ssup) /* * Target minimum cardinality is 1 per ~2k of non-null inputs. 0.5 row * fudge factor allows us to abort earlier on genuinely pathological data - * where we've had exactly one abbreviated value in the first 2k (non-null) - * rows. + * where we've had exactly one abbreviated value in the first 2k + * (non-null) rows. */ if (abbr_card < uss->input_count / 2000.0 + 0.5) { @@ -349,7 +349,7 @@ uuid_abbrev_abort(int memtupcount, SortSupport ssup) if (trace_sort) elog(LOG, "uuid_abbrev: aborting abbreviation at cardinality %f" - " below threshold %f after " INT64_FORMAT " values (%d rows)", + " below threshold %f after " INT64_FORMAT " values (%d rows)", abbr_card, uss->input_count / 2000.0 + 0.5, uss->input_count, memtupcount); #endif @@ -376,9 +376,9 @@ uuid_abbrev_abort(int memtupcount, SortSupport ssup) static Datum uuid_abbrev_convert(Datum original, SortSupport ssup) { - uuid_sortsupport_state *uss = ssup->ssup_extra; - pg_uuid_t *authoritative = DatumGetUUIDP(original); - Datum res; + uuid_sortsupport_state *uss = ssup->ssup_extra; + pg_uuid_t *authoritative = DatumGetUUIDP(original); + Datum res; memcpy(&res, authoritative->data, sizeof(Datum)); uss->input_count += 1; @@ -400,9 +400,9 @@ uuid_abbrev_convert(Datum original, SortSupport ssup) * Byteswap on little-endian machines. * * This is needed so that uuid_cmp_abbrev() (an unsigned integer 3-way - * comparator) works correctly on all platforms. If we didn't do this, the - * comparator would have to call memcmp() with a pair of pointers to the - * first byte of each abbreviated key, which is slower. + * comparator) works correctly on all platforms. If we didn't do this, + * the comparator would have to call memcmp() with a pair of pointers to + * the first byte of each abbreviated key, which is slower. */ res = DatumBigEndianToNative(res); diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index b10027ff51..a869e858bc 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -5055,9 +5055,9 @@ text_format(PG_FUNCTION_ARGS) /* should not get here, because of previous check */ ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("unrecognized format() type specifier \"%c\"", - *cp), - errhint("For a single \"%%\" use \"%%%%\"."))); + errmsg("unrecognized format() type specifier \"%c\"", + *cp), + errhint("For a single \"%%\" use \"%%%%\"."))); break; } } diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c index cb26d79afb..13ae6add03 100644 --- a/src/backend/utils/cache/lsyscache.c +++ b/src/backend/utils/cache/lsyscache.c @@ -1819,9 +1819,9 @@ get_rel_tablespace(Oid relid) char get_rel_persistence(Oid relid) { - HeapTuple tp; - Form_pg_class reltup; - char result; + HeapTuple tp; + Form_pg_class reltup; + char result; tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (!HeapTupleIsValid(tp)) diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index 8fd9f2b573..005e4b7f1c 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -106,7 +106,7 @@ static void PlanCacheRelCallback(Datum arg, Oid relid); static void PlanCacheFuncCallback(Datum arg, int cacheid, uint32 hashvalue); static void PlanCacheSysCallback(Datum arg, int cacheid, uint32 hashvalue); static void PlanCacheUserMappingCallback(Datum arg, int cacheid, - uint32 hashvalue); + uint32 hashvalue); /* @@ -610,10 +610,11 @@ RevalidateCachedQuery(CachedPlanSource *plansource) plansource->is_valid = false; /* - * If we have a join pushed down to the foreign server and the current user - * is different from the one for which the plan was created, invalidate the - * generic plan since user mapping for the new user might make the join - * unsafe to push down, or change which user mapping is used. + * If we have a join pushed down to the foreign server and the current + * user is different from the one for which the plan was created, + * invalidate the generic plan since user mapping for the new user might + * make the join unsafe to push down, or change which user mapping is + * used. */ if (plansource->is_valid && plansource->gplan && @@ -901,7 +902,7 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist, bool spi_pushed; MemoryContext plan_context; MemoryContext oldcxt = CurrentMemoryContext; - ListCell *lc; + ListCell *lc; /* * Normally the querytree should be valid already, but if it's not, @@ -1016,7 +1017,7 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist, plan->has_foreign_join = false; foreach(lc, plist) { - PlannedStmt *plan_stmt = (PlannedStmt *) lfirst(lc); + PlannedStmt *plan_stmt = (PlannedStmt *) lfirst(lc); if (IsA(plan_stmt, PlannedStmt)) plan->has_foreign_join = @@ -1888,9 +1889,9 @@ PlanCacheSysCallback(Datum arg, int cacheid, uint32 hashvalue) /* * PlanCacheUserMappingCallback - * Syscache inval callback function for user mapping cache invalidation. + * Syscache inval callback function for user mapping cache invalidation. * - * Invalidates plans which have pushed down foreign joins. + * Invalidates plans which have pushed down foreign joins. */ static void PlanCacheUserMappingCallback(Datum arg, int cacheid, uint32 hashvalue) @@ -1911,9 +1912,9 @@ PlanCacheUserMappingCallback(Datum arg, int cacheid, uint32 hashvalue) /* * If the plan has pushed down foreign joins, those join may become - * unsafe to push down because of user mapping changes. Invalidate only - * the generic plan, since changes to user mapping do not invalidate the - * parse tree. + * unsafe to push down because of user mapping changes. Invalidate + * only the generic plan, since changes to user mapping do not + * invalidate the parse tree. */ if (plansource->gplan && plansource->gplan->has_foreign_join) plansource->gplan->is_valid = false; diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 1b7b99548c..afb6c8772d 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -5251,9 +5251,9 @@ RelationIdIsInInitFile(Oid relationId) bool RelationHasUnloggedIndex(Relation rel) { - List *indexoidlist; - ListCell *indexoidscan; - bool result = false; + List *indexoidlist; + ListCell *indexoidscan; + bool result = false; indexoidlist = RelationGetIndexList(rel); diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c index 740f089ccd..78d441d198 100644 --- a/src/backend/utils/error/elog.c +++ b/src/backend/utils/error/elog.c @@ -150,8 +150,8 @@ static int recursion_depth = 0; /* to detect actual recursion */ * both log_line_prefix and csv logs. */ -static struct timeval saved_timeval; -static bool saved_timeval_set = false; +static struct timeval saved_timeval; +static bool saved_timeval_set = false; #define FORMATTED_TS_LEN 128 static char formatted_start_time[FORMATTED_TS_LEN]; @@ -1467,9 +1467,9 @@ EmitErrorReport(void) * mechanisms. * * The log hook has access to both the translated and original English - * error message text, which is passed through to allow it to be used - * as a message identifier. Note that the original text is not available - * for detail, detail_log, hint and context text elements. + * error message text, which is passed through to allow it to be used as a + * message identifier. Note that the original text is not available for + * detail, detail_log, hint and context text elements. */ if (edata->output_to_server && emit_log_hook) (*emit_log_hook) (edata); @@ -2467,7 +2467,7 @@ log_line_prefix(StringInfo buf, ErrorData *edata) break; case 'n': { - char strfbuf[128]; + char strfbuf[128]; if (!saved_timeval_set) { @@ -2476,7 +2476,7 @@ log_line_prefix(StringInfo buf, ErrorData *edata) } sprintf(strfbuf, "%ld.%03d", saved_timeval.tv_sec, - (int)(saved_timeval.tv_usec / 1000)); + (int) (saved_timeval.tv_usec / 1000)); if (padding != 0) appendStringInfo(buf, "%*s", padding, strfbuf); diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c index 1a9f70c566..d35052aea6 100644 --- a/src/backend/utils/hash/dynahash.c +++ b/src/backend/utils/hash/dynahash.c @@ -130,7 +130,7 @@ typedef struct slock_t mutex; /* spinlock */ long nentries; /* number of entries */ HASHELEMENT *freeList; /* list of free elements */ -} FreeListData; +} FreeListData; /* * Header structure for a hash table --- contains all changeable info diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c index d13355bf66..d4625a6238 100644 --- a/src/backend/utils/init/miscinit.c +++ b/src/backend/utils/init/miscinit.c @@ -482,7 +482,7 @@ InitializeSessionUserId(const char *rolename, Oid roleid) { HeapTuple roleTup; Form_pg_authid rform; - char *rname; + char *rname; /* * Don't do scans if we're bootstrapping, none of the system catalogs diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c index b3f1bc46d9..d17197267e 100644 --- a/src/backend/utils/init/postinit.c +++ b/src/backend/utils/init/postinit.c @@ -192,6 +192,7 @@ PerformAuthentication(Port *port) * FIXME: [fork/exec] Ugh. Is there a way around this overhead? */ #ifdef EXEC_BACKEND + /* * load_hba() and load_ident() want to work within the PostmasterContext, * so create that if it doesn't exist (which it won't). We'll delete it @@ -870,9 +871,9 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username, { /* * If this is a background worker not bound to any particular - * database, we're done now. Everything that follows only makes - * sense if we are bound to a specific database. We do need to - * close the transaction we started before returning. + * database, we're done now. Everything that follows only makes sense + * if we are bound to a specific database. We do need to close the + * transaction we started before returning. */ if (!bootstrap) CommitTransactionCommand(); diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 14d2042910..cf3eb1a4f1 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -2869,7 +2869,7 @@ static struct config_real ConfigureNamesReal[] = { {"parallel_setup_cost", PGC_USERSET, QUERY_TUNING_COST, gettext_noop("Sets the planner's estimate of the cost of " - "starting up worker processes for parallel query."), + "starting up worker processes for parallel query."), NULL }, ¶llel_setup_cost, @@ -5926,13 +5926,14 @@ set_config_option(const char *name, const char *value, * don't re-read the config file during backend start. * * In EXEC_BACKEND builds, this works differently: we load all - * non-default settings from the CONFIG_EXEC_PARAMS file during - * backend start. In that case we must accept PGC_SIGHUP - * settings, so as to have the same value as if we'd forked - * from the postmaster. This can also happen when using - * RestoreGUCState() within a background worker that needs to - * have the same settings as the user backend that started it. - * is_reload will be true when either situation applies. + * non-default settings from the CONFIG_EXEC_PARAMS file + * during backend start. In that case we must accept + * PGC_SIGHUP settings, so as to have the same value as if + * we'd forked from the postmaster. This can also happen when + * using RestoreGUCState() within a background worker that + * needs to have the same settings as the user backend that + * started it. is_reload will be true when either situation + * applies. */ if (IsUnderPostmaster && !is_reload) return -1; diff --git a/src/backend/utils/misc/pg_config.c b/src/backend/utils/misc/pg_config.c index 3d6b9f27c0..1fbdd1f4db 100644 --- a/src/backend/utils/misc/pg_config.c +++ b/src/backend/utils/misc/pg_config.c @@ -25,17 +25,17 @@ Datum pg_config(PG_FUNCTION_ARGS) { - ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - Tuplestorestate *tupstore; - HeapTuple tuple; - TupleDesc tupdesc; - AttInMetadata *attinmeta; - MemoryContext per_query_ctx; - MemoryContext oldcontext; - ConfigData *configdata; - size_t configdata_len; - char *values[2]; - int i = 0; + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + Tuplestorestate *tupstore; + HeapTuple tuple; + TupleDesc tupdesc; + AttInMetadata *attinmeta; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + ConfigData *configdata; + size_t configdata_len; + char *values[2]; + int i = 0; /* check to see if caller supports us returning a tuplestore */ if (!rsinfo || !(rsinfo->allowedModes & SFRM_Materialize)) @@ -91,10 +91,10 @@ pg_config(PG_FUNCTION_ARGS) /* * SFRM_Materialize mode expects us to return a NULL Datum. The actual - * tuples are in our tuplestore and passed back through - * rsinfo->setResult. rsinfo->setDesc is set to the tuple description - * that we actually used to build our tuples with, so the caller can - * verify we did what it was expecting. + * tuples are in our tuplestore and passed back through rsinfo->setResult. + * rsinfo->setDesc is set to the tuple description that we actually used + * to build our tuples with, so the caller can verify we did what it was + * expecting. */ rsinfo->setDesc = tupdesc; MemoryContextSwitchTo(oldcontext); diff --git a/src/backend/utils/misc/pg_controldata.c b/src/backend/utils/misc/pg_controldata.c index 8552c0713b..34ee76a237 100644 --- a/src/backend/utils/misc/pg_controldata.c +++ b/src/backend/utils/misc/pg_controldata.c @@ -29,11 +29,11 @@ Datum pg_control_system(PG_FUNCTION_ARGS) { - Datum values[4]; - bool nulls[4]; - TupleDesc tupdesc; - HeapTuple htup; - ControlFileData *ControlFile; + Datum values[4]; + bool nulls[4]; + TupleDesc tupdesc; + HeapTuple htup; + ControlFileData *ControlFile; /* * Construct a tuple descriptor for the result row. This must match this @@ -73,13 +73,13 @@ pg_control_system(PG_FUNCTION_ARGS) Datum pg_control_checkpoint(PG_FUNCTION_ARGS) { - Datum values[19]; - bool nulls[19]; - TupleDesc tupdesc; - HeapTuple htup; - ControlFileData *ControlFile; - XLogSegNo segno; - char xlogfilename[MAXFNAMELEN]; + Datum values[19]; + bool nulls[19]; + TupleDesc tupdesc; + HeapTuple htup; + ControlFileData *ControlFile; + XLogSegNo segno; + char xlogfilename[MAXFNAMELEN]; /* * Construct a tuple descriptor for the result row. This must match this @@ -159,8 +159,8 @@ pg_control_checkpoint(PG_FUNCTION_ARGS) nulls[6] = false; values[7] = CStringGetTextDatum(psprintf("%u:%u", - ControlFile->checkPointCopy.nextXidEpoch, - ControlFile->checkPointCopy.nextXid)); + ControlFile->checkPointCopy.nextXidEpoch, + ControlFile->checkPointCopy.nextXid)); nulls[7] = false; values[8] = ObjectIdGetDatum(ControlFile->checkPointCopy.nextOid); @@ -205,11 +205,11 @@ pg_control_checkpoint(PG_FUNCTION_ARGS) Datum pg_control_recovery(PG_FUNCTION_ARGS) { - Datum values[5]; - bool nulls[5]; - TupleDesc tupdesc; - HeapTuple htup; - ControlFileData *ControlFile; + Datum values[5]; + bool nulls[5]; + TupleDesc tupdesc; + HeapTuple htup; + ControlFileData *ControlFile; /* * Construct a tuple descriptor for the result row. This must match this @@ -254,11 +254,11 @@ pg_control_recovery(PG_FUNCTION_ARGS) Datum pg_control_init(PG_FUNCTION_ARGS) { - Datum values[13]; - bool nulls[13]; - TupleDesc tupdesc; - HeapTuple htup; - ControlFileData *ControlFile; + Datum values[13]; + bool nulls[13]; + TupleDesc tupdesc; + HeapTuple htup; + ControlFileData *ControlFile; /* * Construct a tuple descriptor for the result row. This must match this diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c index 6856ad0d19..07075ce06d 100644 --- a/src/backend/utils/resowner/resowner.c +++ b/src/backend/utils/resowner/resowner.c @@ -128,7 +128,7 @@ typedef struct ResourceOwnerData /* We can remember up to MAX_RESOWNER_LOCKS references to local locks. */ int nlocks; /* number of owned locks */ LOCALLOCK *locks[MAX_RESOWNER_LOCKS]; /* list of owned locks */ -} ResourceOwnerData; +} ResourceOwnerData; /***************************************************************************** diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index 4cc5be92a2..7878660055 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -263,7 +263,7 @@ struct Tuplesortstate int maxTapes; /* number of tapes (Knuth's T) */ int tapeRange; /* maxTapes-1 (Knuth's P) */ MemoryContext sortcontext; /* memory context holding most sort data */ - MemoryContext tuplecontext; /* sub-context of sortcontext for tuple data */ + MemoryContext tuplecontext; /* sub-context of sortcontext for tuple data */ LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp file */ /* @@ -321,10 +321,10 @@ struct Tuplesortstate /* * Memory for tuples is sometimes allocated in batch, rather than - * incrementally. This implies that incremental memory accounting has been - * abandoned. Currently, this only happens for the final on-the-fly merge - * step. Large batch allocations can store tuples (e.g. IndexTuples) - * without palloc() fragmentation and other overhead. + * incrementally. This implies that incremental memory accounting has + * been abandoned. Currently, this only happens for the final on-the-fly + * merge step. Large batch allocations can store tuples (e.g. + * IndexTuples) without palloc() fragmentation and other overhead. */ bool batchUsed; @@ -337,8 +337,8 @@ struct Tuplesortstate /* * While building initial runs, this is the current output run number - * (starting at RUN_FIRST). Afterwards, it is the number of initial - * runs we made. + * (starting at RUN_FIRST). Afterwards, it is the number of initial runs + * we made. */ int currentRun; @@ -375,9 +375,9 @@ struct Tuplesortstate * just a few large allocations. * * Aside from the general benefits of performing fewer individual retail - * palloc() calls, this also helps make merging more cache efficient, since - * each tape's tuples must naturally be accessed sequentially (in sorted - * order). + * palloc() calls, this also helps make merging more cache efficient, + * since each tape's tuples must naturally be accessed sequentially (in + * sorted order). */ int64 spacePerTape; /* Space (memory) for tuples (not slots) */ char **mergetuples; /* Each tape's memory allocation */ @@ -546,9 +546,9 @@ static void beginmerge(Tuplesortstate *state, bool finalMerge); static void batchmemtuples(Tuplesortstate *state); static void mergebatch(Tuplesortstate *state, int64 spacePerTape); static void mergebatchone(Tuplesortstate *state, int srcTape, - SortTuple *stup, bool *should_free); + SortTuple *stup, bool *should_free); static void mergebatchfreetape(Tuplesortstate *state, int srcTape, - SortTuple *rtup, bool *should_free); + SortTuple *rtup, bool *should_free); static void *mergebatchalloc(Tuplesortstate *state, int tapenum, Size tuplen); static void mergepreread(Tuplesortstate *state); static void mergeprereadone(Tuplesortstate *state, int srcTape); @@ -647,10 +647,10 @@ tuplesort_begin_common(int workMem, bool randomAccess) * Caller tuple (e.g. IndexTuple) memory context. * * A dedicated child content used exclusively for caller passed tuples - * eases memory management. Resetting at key points reduces fragmentation. - * Note that the memtuples array of SortTuples is allocated in the parent - * context, not this context, because there is no need to free memtuples - * early. + * eases memory management. Resetting at key points reduces + * fragmentation. Note that the memtuples array of SortTuples is allocated + * in the parent context, not this context, because there is no need to + * free memtuples early. */ tuplecontext = AllocSetContextCreate(sortcontext, "Caller tuples", @@ -1042,8 +1042,8 @@ tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation, * a pass-by-value datatype could have an abbreviated form that is cheaper * to compare. In a tuple sort, we could support that, because we can * always extract the original datum from the tuple is needed. Here, we - * can't, because a datum sort only stores a single copy of the datum; - * the "tuple" field of each sortTuple is NULL. + * can't, because a datum sort only stores a single copy of the datum; the + * "tuple" field of each sortTuple is NULL. */ state->sortKeys->abbreviate = !typbyval; @@ -1413,8 +1413,7 @@ tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel, * ensure a consistent representation (current tuple was just * handled). It does not matter if some dumped tuples are already * sorted on tape, since serialized tuples lack abbreviated keys - * (TSS_BUILDRUNS state prevents control reaching here in any - * case). + * (TSS_BUILDRUNS state prevents control reaching here in any case). */ for (i = 0; i < state->memtupcount; i++) { @@ -1459,8 +1458,8 @@ tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull) if (isNull || !state->tuples) { /* - * Set datum1 to zeroed representation for NULLs (to be consistent, and - * to support cheap inequality tests for NULL abbreviated keys). + * Set datum1 to zeroed representation for NULLs (to be consistent, + * and to support cheap inequality tests for NULL abbreviated keys). */ stup.datum1 = !isNull ? val : (Datum) 0; stup.isnull1 = isNull; @@ -1498,10 +1497,10 @@ tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull) * * Alter datum1 representation in already-copied tuples, so as to * ensure a consistent representation (current tuple was just - * handled). It does not matter if some dumped tuples are - * already sorted on tape, since serialized tuples lack - * abbreviated keys (TSS_BUILDRUNS state prevents control - * reaching here in any case). + * handled). It does not matter if some dumped tuples are already + * sorted on tape, since serialized tuples lack abbreviated keys + * (TSS_BUILDRUNS state prevents control reaching here in any + * case). */ for (i = 0; i < state->memtupcount; i++) { @@ -1965,11 +1964,11 @@ tuplesort_gettuple_common(Tuplesortstate *state, bool forward, SortTuple *newtup; /* - * Returned tuple is still counted in our memory space most - * of the time. See mergebatchone() for discussion of why - * caller may occasionally be required to free returned - * tuple, and how preread memory is managed with regard to - * edge cases more generally. + * Returned tuple is still counted in our memory space most of + * the time. See mergebatchone() for discussion of why caller + * may occasionally be required to free returned tuple, and + * how preread memory is managed with regard to edge cases + * more generally. */ *stup = state->memtuples[0]; tuplesort_heap_siftup(state, false); @@ -2269,8 +2268,8 @@ useselection(Tuplesortstate *state) /* * memtupsize might be noticeably higher than memtupcount here in atypical * cases. It seems slightly preferable to not allow recent outliers to - * impact this determination. Note that caller's trace_sort output reports - * memtupcount instead. + * impact this determination. Note that caller's trace_sort output + * reports memtupcount instead. */ if (state->memtupsize <= replacement_sort_tuples) return true; @@ -2349,9 +2348,9 @@ inittapes(Tuplesortstate *state) state->tp_tapenum = (int *) palloc0(maxTapes * sizeof(int)); /* - * Give replacement selection a try based on user setting. There will - * be a switch to a simple hybrid sort-merge strategy after the first - * run (iff we could not output one long run). + * Give replacement selection a try based on user setting. There will be + * a switch to a simple hybrid sort-merge strategy after the first run + * (iff we could not output one long run). */ state->replaceActive = useselection(state); @@ -2372,7 +2371,7 @@ inittapes(Tuplesortstate *state) elog(LOG, "replacement selection will sort %d first run tuples", state->memtupcount); #endif - state->memtupcount = 0; /* make the heap empty */ + state->memtupcount = 0; /* make the heap empty */ for (j = 0; j < ntuples; j++) { @@ -2792,8 +2791,8 @@ beginmerge(Tuplesortstate *state, bool finalMergeBatch) int usedSlots; /* - * Report how effective batchmemtuples() was in balancing - * the number of slots against the need for memory for the + * Report how effective batchmemtuples() was in balancing the + * number of slots against the need for memory for the * underlying tuples (e.g. IndexTuples). The big preread of * all tapes when switching to FINALMERGE state should be * fairly representative of memory utilization during the @@ -2847,9 +2846,9 @@ beginmerge(Tuplesortstate *state, bool finalMergeBatch) static void batchmemtuples(Tuplesortstate *state) { - int64 refund; - int64 availMemLessRefund; - int memtupsize = state->memtupsize; + int64 refund; + int64 availMemLessRefund; + int memtupsize = state->memtupsize; /* For simplicity, assume no memtuples are actually currently counted */ Assert(state->memtupcount == 0); @@ -2867,8 +2866,8 @@ batchmemtuples(Tuplesortstate *state) /* * To establish balanced memory use after refunding palloc overhead, * temporarily have our accounting indicate that we've allocated all - * memory we're allowed to less that refund, and call grow_memtuples() - * to have it increase the number of slots. + * memory we're allowed to less that refund, and call grow_memtuples() to + * have it increase the number of slots. */ state->growmemtuples = true; USEMEM(state, availMemLessRefund); @@ -2880,8 +2879,8 @@ batchmemtuples(Tuplesortstate *state) #ifdef TRACE_SORT if (trace_sort) { - Size OldKb = (memtupsize * sizeof(SortTuple) + 1023) / 1024; - Size NewKb = (state->memtupsize * sizeof(SortTuple) + 1023) / 1024; + Size OldKb = (memtupsize * sizeof(SortTuple) + 1023) / 1024; + Size NewKb = (state->memtupsize * sizeof(SortTuple) + 1023) / 1024; elog(LOG, "grew memtuples %1.2fx from %d (%zu KB) to %d (%zu KB) for final merge", (double) NewKb / (double) OldKb, @@ -2910,7 +2909,7 @@ batchmemtuples(Tuplesortstate *state) static void mergebatch(Tuplesortstate *state, int64 spacePerTape) { - int srcTape; + int srcTape; Assert(state->activeTapes > 0); Assert(state->tuples); @@ -2966,13 +2965,13 @@ mergebatchone(Tuplesortstate *state, int srcTape, SortTuple *rtup, */ if (!state->mergeoverflow[srcTape]) { - Size tupLen; + Size tupLen; /* * Mark tuple buffer range for reuse, but be careful to move final, - * tail tuple to start of space for next run so that it's available - * to caller when stup is returned, and remains available at least - * until the next tuple is requested. + * tail tuple to start of space for next run so that it's available to + * caller when stup is returned, and remains available at least until + * the next tuple is requested. */ tupLen = state->mergecurrent[srcTape] - state->mergetail[srcTape]; state->mergecurrent[srcTape] = state->mergetuples[srcTape]; @@ -3081,9 +3080,9 @@ mergebatchalloc(Tuplesortstate *state, int tapenum, Size tuplen) state->mergetuples[tapenum] + state->spacePerTape) { /* - * Usual case -- caller is returned pointer into its tape's buffer, and - * an offset from that point is recorded as where tape has consumed up - * to for current round of preloading. + * Usual case -- caller is returned pointer into its tape's buffer, + * and an offset from that point is recorded as where tape has + * consumed up to for current round of preloading. */ ret = state->mergetail[tapenum] = state->mergecurrent[tapenum]; state->mergecurrent[tapenum] += reserve_tuplen; @@ -3238,8 +3237,8 @@ dumptuples(Tuplesortstate *state, bool alltuples) if (state->replaceActive) { /* - * Still holding out for a case favorable to replacement selection. - * Still incrementally spilling using heap. + * Still holding out for a case favorable to replacement + * selection. Still incrementally spilling using heap. * * Dump the heap's frontmost entry, and sift up to remove it from * the heap. @@ -3252,17 +3251,15 @@ dumptuples(Tuplesortstate *state, bool alltuples) else { /* - * Once committed to quicksorting runs, never incrementally - * spill + * Once committed to quicksorting runs, never incrementally spill */ dumpbatch(state, alltuples); break; } /* - * If top run number has changed, we've finished the current run - * (this can only be the first run), and will no longer spill - * incrementally. + * If top run number has changed, we've finished the current run (this + * can only be the first run), and will no longer spill incrementally. */ if (state->memtupcount == 0 || state->memtuples[0].tupindex == HEAP_RUN_NEXT) @@ -3280,6 +3277,7 @@ dumptuples(Tuplesortstate *state, bool alltuples) state->currentRun, state->destTape, pg_rusage_show(&state->ru_start)); #endif + /* * Done if heap is empty, which is possible when there is only one * long run. @@ -3334,19 +3332,19 @@ dumpbatch(Tuplesortstate *state, bool alltuples) * remaining tuples are loaded into memory, just before input was * exhausted. * - * In general, short final runs are quite possible. Rather than - * allowing a special case where there was a superfluous - * selectnewtape() call (i.e. a call with no subsequent run actually - * written to destTape), we prefer to write out a 0 tuple run. + * In general, short final runs are quite possible. Rather than allowing + * a special case where there was a superfluous selectnewtape() call (i.e. + * a call with no subsequent run actually written to destTape), we prefer + * to write out a 0 tuple run. * * mergepreread()/mergeprereadone() are prepared for 0 tuple runs, and * will reliably mark the tape inactive for the merge when called from * beginmerge(). This case is therefore similar to the case where * mergeonerun() finds a dummy run for the tape, and so doesn't need to - * merge a run from the tape (or conceptually "merges" the dummy run, - * if you prefer). According to Knuth, Algorithm D "isn't strictly - * optimal" in its method of distribution and dummy run assignment; - * this edge case seems very unlikely to make that appreciably worse. + * merge a run from the tape (or conceptually "merges" the dummy run, if + * you prefer). According to Knuth, Algorithm D "isn't strictly optimal" + * in its method of distribution and dummy run assignment; this edge case + * seems very unlikely to make that appreciably worse. */ Assert(state->status == TSS_BUILDRUNS); @@ -3369,8 +3367,8 @@ dumpbatch(Tuplesortstate *state, bool alltuples) #endif /* - * Sort all tuples accumulated within the allowed amount of memory for this - * run using quicksort + * Sort all tuples accumulated within the allowed amount of memory for + * this run using quicksort */ tuplesort_sort_memtuples(state); @@ -3848,11 +3846,11 @@ readtup_alloc(Tuplesortstate *state, int tapenum, Size tuplen) if (state->batchUsed) { /* - * No USEMEM() call, because during final on-the-fly merge - * accounting is based on tape-private state. ("Overflow" - * allocations are detected as an indication that a new round - * or preloading is required. Preloading marks existing - * contents of tape's batch buffer for reuse.) + * No USEMEM() call, because during final on-the-fly merge accounting + * is based on tape-private state. ("Overflow" allocations are + * detected as an indication that a new round or preloading is + * required. Preloading marks existing contents of tape's batch buffer + * for reuse.) */ return mergebatchalloc(state, tapenum, tuplen); } @@ -3993,8 +3991,7 @@ copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup) * ensure a consistent representation (current tuple was just * handled). It does not matter if some dumped tuples are already * sorted on tape, since serialized tuples lack abbreviated keys - * (TSS_BUILDRUNS state prevents control reaching here in any - * case). + * (TSS_BUILDRUNS state prevents control reaching here in any case). */ for (i = 0; i < state->memtupcount; i++) { @@ -4238,8 +4235,7 @@ copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup) * ensure a consistent representation (current tuple was just * handled). It does not matter if some dumped tuples are already * sorted on tape, since serialized tuples lack abbreviated keys - * (TSS_BUILDRUNS state prevents control reaching here in any - * case). + * (TSS_BUILDRUNS state prevents control reaching here in any case). */ for (i = 0; i < state->memtupcount; i++) { @@ -4544,8 +4540,7 @@ copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup) * ensure a consistent representation (current tuple was just * handled). It does not matter if some dumped tuples are already * sorted on tape, since serialized tuples lack abbreviated keys - * (TSS_BUILDRUNS state prevents control reaching here in any - * case). + * (TSS_BUILDRUNS state prevents control reaching here in any case). */ for (i = 0; i < state->memtupcount; i++) { diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c index f8a2a83b88..8a1c9932e5 100644 --- a/src/backend/utils/time/snapmgr.c +++ b/src/backend/utils/time/snapmgr.c @@ -78,16 +78,15 @@ typedef struct OldSnapshotControlData * Variables for old snapshot handling are shared among processes and are * only allowed to move forward. */ - slock_t mutex_current; /* protect current_timestamp */ + slock_t mutex_current; /* protect current_timestamp */ int64 current_timestamp; /* latest snapshot timestamp */ - slock_t mutex_latest_xmin; /* protect latest_xmin - * and next_map_update - */ - TransactionId latest_xmin; /* latest snapshot xmin */ - int64 next_map_update; /* latest snapshot valid up to */ - slock_t mutex_threshold; /* protect threshold fields */ + slock_t mutex_latest_xmin; /* protect latest_xmin and + * next_map_update */ + TransactionId latest_xmin; /* latest snapshot xmin */ + int64 next_map_update; /* latest snapshot valid up to */ + slock_t mutex_threshold; /* protect threshold fields */ int64 threshold_timestamp; /* earlier snapshot is old */ - TransactionId threshold_xid; /* earlier xid may be gone */ + TransactionId threshold_xid; /* earlier xid may be gone */ /* * Keep one xid per minute for old snapshot error handling. @@ -117,11 +116,11 @@ typedef struct OldSnapshotControlData * * Persistence is not needed. */ - int head_offset; /* subscript of oldest tracked time */ - int64 head_timestamp; /* time corresponding to head xid */ - int count_used; /* how many slots are in use */ + int head_offset; /* subscript of oldest tracked time */ + int64 head_timestamp; /* time corresponding to head xid */ + int count_used; /* how many slots are in use */ TransactionId xid_by_minute[FLEXIBLE_ARRAY_MEMBER]; -} OldSnapshotControlData; +} OldSnapshotControlData; static volatile OldSnapshotControlData *oldSnapshotControl; @@ -709,8 +708,8 @@ UpdateActiveSnapshotCommandId(void) /* * Don't allow modification of the active snapshot during parallel - * operation. We share the snapshot to worker backends at the beginning of - * parallel operation, so any change to the snapshot can lead to + * operation. We share the snapshot to worker backends at the beginning + * of parallel operation, so any change to the snapshot can lead to * inconsistencies. We have other defenses against * CommandCounterIncrement, but there are a few places that call this * directly, so we put an additional guard here. @@ -1634,7 +1633,7 @@ TransactionIdLimitedForOldSnapshots(TransactionId recentXmin, } ts = AlignTimestampToMinuteBoundary(ts) - - (old_snapshot_threshold * USECS_PER_MINUTE); + - (old_snapshot_threshold * USECS_PER_MINUTE); /* Check for fast exit without LW locking. */ SpinLockAcquire(&oldSnapshotControl->mutex_threshold); @@ -1660,14 +1659,14 @@ TransactionIdLimitedForOldSnapshots(TransactionId recentXmin, if (oldSnapshotControl->count_used > 0 && ts >= oldSnapshotControl->head_timestamp) { - int offset; + int offset; offset = ((ts - oldSnapshotControl->head_timestamp) / USECS_PER_MINUTE); if (offset > oldSnapshotControl->count_used - 1) offset = oldSnapshotControl->count_used - 1; offset = (oldSnapshotControl->head_offset + offset) - % OLD_SNAPSHOT_TIME_MAP_ENTRIES; + % OLD_SNAPSHOT_TIME_MAP_ENTRIES; xlimit = oldSnapshotControl->xid_by_minute[offset]; if (NormalTransactionIdFollows(xlimit, recentXmin)) @@ -1715,8 +1714,8 @@ MaintainOldSnapshotTimeMapping(int64 whenTaken, TransactionId xmin) ts = AlignTimestampToMinuteBoundary(whenTaken); /* - * Keep track of the latest xmin seen by any process. Update mapping - * with a new value when we have crossed a bucket boundary. + * Keep track of the latest xmin seen by any process. Update mapping with + * a new value when we have crossed a bucket boundary. */ SpinLockAcquire(&oldSnapshotControl->mutex_latest_xmin); latest_xmin = oldSnapshotControl->latest_xmin; @@ -1747,7 +1746,7 @@ MaintainOldSnapshotTimeMapping(int64 whenTaken, TransactionId xmin) if (whenTaken < 0) { elog(DEBUG1, - "MaintainOldSnapshotTimeMapping called with negative whenTaken = %ld", + "MaintainOldSnapshotTimeMapping called with negative whenTaken = %ld", (long) whenTaken); return; } @@ -1789,10 +1788,10 @@ MaintainOldSnapshotTimeMapping(int64 whenTaken, TransactionId xmin) * USECS_PER_MINUTE))) { /* existing mapping; advance xid if possible */ - int bucket = (oldSnapshotControl->head_offset - + ((ts - oldSnapshotControl->head_timestamp) - / USECS_PER_MINUTE)) - % OLD_SNAPSHOT_TIME_MAP_ENTRIES; + int bucket = (oldSnapshotControl->head_offset + + ((ts - oldSnapshotControl->head_timestamp) + / USECS_PER_MINUTE)) + % OLD_SNAPSHOT_TIME_MAP_ENTRIES; if (TransactionIdPrecedes(oldSnapshotControl->xid_by_minute[bucket], xmin)) oldSnapshotControl->xid_by_minute[bucket] = xmin; @@ -1800,8 +1799,8 @@ MaintainOldSnapshotTimeMapping(int64 whenTaken, TransactionId xmin) else { /* We need a new bucket, but it might not be the very next one. */ - int advance = ((ts - oldSnapshotControl->head_timestamp) - / USECS_PER_MINUTE); + int advance = ((ts - oldSnapshotControl->head_timestamp) + / USECS_PER_MINUTE); oldSnapshotControl->head_timestamp = ts; @@ -1815,14 +1814,14 @@ MaintainOldSnapshotTimeMapping(int64 whenTaken, TransactionId xmin) else { /* Store the new value in one or more buckets. */ - int i; + int i; for (i = 0; i < advance; i++) { if (oldSnapshotControl->count_used == OLD_SNAPSHOT_TIME_MAP_ENTRIES) { /* Map full and new value replaces old head. */ - int old_head = oldSnapshotControl->head_offset; + int old_head = oldSnapshotControl->head_offset; if (old_head == (OLD_SNAPSHOT_TIME_MAP_ENTRIES - 1)) oldSnapshotControl->head_offset = 0; @@ -1833,9 +1832,9 @@ MaintainOldSnapshotTimeMapping(int64 whenTaken, TransactionId xmin) else { /* Extend map to unused entry. */ - int new_tail = (oldSnapshotControl->head_offset - + oldSnapshotControl->count_used) - % OLD_SNAPSHOT_TIME_MAP_ENTRIES; + int new_tail = (oldSnapshotControl->head_offset + + oldSnapshotControl->count_used) + % OLD_SNAPSHOT_TIME_MAP_ENTRIES; oldSnapshotControl->count_used++; oldSnapshotControl->xid_by_minute[new_tail] = xmin; diff --git a/src/bin/pg_archivecleanup/pg_archivecleanup.c b/src/bin/pg_archivecleanup/pg_archivecleanup.c index c5569f32a3..2b3d15dd58 100644 --- a/src/bin/pg_archivecleanup/pg_archivecleanup.c +++ b/src/bin/pg_archivecleanup/pg_archivecleanup.c @@ -106,8 +106,8 @@ CleanupPriorWALFiles(void) { /* * Truncation is essentially harmless, because we skip names of - * length other than XLOG_FNAME_LEN. (In principle, one - * could use a 1000-character additional_ext and get trouble.) + * length other than XLOG_FNAME_LEN. (In principle, one could use + * a 1000-character additional_ext and get trouble.) */ strlcpy(walfile, xlde->d_name, MAXPGPATH); TrimExtension(walfile, additional_ext); diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c index 2927b60cc2..ec69682d20 100644 --- a/src/bin/pg_basebackup/pg_basebackup.c +++ b/src/bin/pg_basebackup/pg_basebackup.c @@ -1830,6 +1830,7 @@ BaseBackup(void) int r; #else DWORD status; + /* * get a pointer sized version of bgchild to avoid warnings about * casting to a different size on WIN64. diff --git a/src/bin/pg_basebackup/pg_recvlogical.c b/src/bin/pg_basebackup/pg_recvlogical.c index 6d1270508d..4c6cf7054e 100644 --- a/src/bin/pg_basebackup/pg_recvlogical.c +++ b/src/bin/pg_basebackup/pg_recvlogical.c @@ -861,8 +861,8 @@ main(int argc, char **argv) /* * Obtain a connection to server. This is not really necessary but it - * helps to get more precise error messages about authentication, - * required GUC parameters and such. + * helps to get more precise error messages about authentication, required + * GUC parameters and such. */ conn = GetConnection(); if (!conn) diff --git a/src/bin/pg_ctl/pg_ctl.c b/src/bin/pg_ctl/pg_ctl.c index acfeee8e91..efc07291ad 100644 --- a/src/bin/pg_ctl/pg_ctl.c +++ b/src/bin/pg_ctl/pg_ctl.c @@ -1747,8 +1747,8 @@ CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo, bool as_ser if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ALL_ACCESS, &origToken)) { /* - * Most Windows targets make DWORD a 32-bit unsigned long, but - * in case it doesn't cast DWORD before printing. + * Most Windows targets make DWORD a 32-bit unsigned long, but in case + * it doesn't cast DWORD before printing. */ write_stderr(_("%s: could not open process token: error code %lu\n"), progname, (unsigned long) GetLastError()); diff --git a/src/bin/pg_dump/dumputils.c b/src/bin/pg_dump/dumputils.c index 98ffb099ff..cd1e8c4a68 100644 --- a/src/bin/pg_dump/dumputils.c +++ b/src/bin/pg_dump/dumputils.c @@ -712,13 +712,13 @@ buildACLQueries(PQExpBuffer acl_subquery, PQExpBuffer racl_subquery, * and is what the current privileges are). * * We always perform this delta on all ACLs and expect that by the time - * these are run the initial privileges will be in place, even in a - * binary upgrade situation (see below). + * these are run the initial privileges will be in place, even in a binary + * upgrade situation (see below). */ printfPQExpBuffer(acl_subquery, "(SELECT pg_catalog.array_agg(acl) FROM " "(SELECT pg_catalog.unnest(coalesce(%s,pg_catalog.acldefault(%s,%s))) AS acl " "EXCEPT " - "SELECT pg_catalog.unnest(coalesce(pip.initprivs,pg_catalog.acldefault(%s,%s)))) as foo)", + "SELECT pg_catalog.unnest(coalesce(pip.initprivs,pg_catalog.acldefault(%s,%s)))) as foo)", acl_column, obj_kind, acl_owner, @@ -726,9 +726,9 @@ buildACLQueries(PQExpBuffer acl_subquery, PQExpBuffer racl_subquery, acl_owner); printfPQExpBuffer(racl_subquery, "(SELECT pg_catalog.array_agg(acl) FROM " - "(SELECT pg_catalog.unnest(coalesce(pip.initprivs,pg_catalog.acldefault(%s,%s))) AS acl " + "(SELECT pg_catalog.unnest(coalesce(pip.initprivs,pg_catalog.acldefault(%s,%s))) AS acl " "EXCEPT " - "SELECT pg_catalog.unnest(coalesce(%s,pg_catalog.acldefault(%s,%s)))) as foo)", + "SELECT pg_catalog.unnest(coalesce(%s,pg_catalog.acldefault(%s,%s)))) as foo)", obj_kind, acl_owner, acl_column, @@ -756,16 +756,16 @@ buildACLQueries(PQExpBuffer acl_subquery, PQExpBuffer racl_subquery, "(SELECT pg_catalog.array_agg(acl) FROM " "(SELECT pg_catalog.unnest(pip.initprivs) AS acl " "EXCEPT " - "SELECT pg_catalog.unnest(pg_catalog.acldefault(%s,%s))) as foo) END", + "SELECT pg_catalog.unnest(pg_catalog.acldefault(%s,%s))) as foo) END", obj_kind, acl_owner); printfPQExpBuffer(init_racl_subquery, "CASE WHEN privtype = 'e' THEN " "(SELECT pg_catalog.array_agg(acl) FROM " - "(SELECT pg_catalog.unnest(pg_catalog.acldefault(%s,%s)) AS acl " + "(SELECT pg_catalog.unnest(pg_catalog.acldefault(%s,%s)) AS acl " "EXCEPT " - "SELECT pg_catalog.unnest(pip.initprivs)) as foo) END", + "SELECT pg_catalog.unnest(pip.initprivs)) as foo) END", obj_kind, acl_owner); } diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 89cf0274ed..0f1f6e9f93 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -1322,7 +1322,7 @@ checkExtensionMembership(DumpableObject *dobj, Archive *fout) dobj->dump = DUMP_COMPONENT_NONE; else dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL | - DUMP_COMPONENT_SECLABEL | DUMP_COMPONENT_POLICY); + DUMP_COMPONENT_SECLABEL | DUMP_COMPONENT_POLICY); } @@ -1576,16 +1576,16 @@ static void selectDumpableExtension(ExtensionInfo *extinfo, DumpOptions *dopt) { /* - * Use DUMP_COMPONENT_ACL for from-initdb extensions, to allow users - * to change permissions on those objects, if they wish to, and have - * those changes preserved. + * Use DUMP_COMPONENT_ACL for from-initdb extensions, to allow users to + * change permissions on those objects, if they wish to, and have those + * changes preserved. */ if (dopt->binary_upgrade && extinfo->dobj.catId.oid < (Oid) FirstNormalObjectId) extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL; else extinfo->dobj.dump = extinfo->dobj.dump_contains = - dopt->include_everything ? DUMP_COMPONENT_ALL : - DUMP_COMPONENT_NONE; + dopt->include_everything ? DUMP_COMPONENT_ALL : + DUMP_COMPONENT_NONE; } /* @@ -5243,7 +5243,7 @@ getTables(Archive *fout, int *numTables) dopt->binary_upgrade); buildACLQueries(attacl_subquery, attracl_subquery, attinitacl_subquery, - attinitracl_subquery, "at.attacl", "c.relowner", "'c'", + attinitracl_subquery, "at.attacl", "c.relowner", "'c'", dopt->binary_upgrade); appendPQExpBuffer(query, @@ -5949,8 +5949,8 @@ getTables(Archive *fout, int *numTables) /* * If the table-level and all column-level ACLs for this table are - * unchanged, then we don't need to worry about including the ACLs - * for this table. If any column-level ACLs have been changed, the + * unchanged, then we don't need to worry about including the ACLs for + * this table. If any column-level ACLs have been changed, the * 'changed_acl' column from the query will indicate that. * * This can result in a significant performance improvement in cases @@ -5977,10 +5977,11 @@ getTables(Archive *fout, int *numTables) * NOTE: it'd be kinda nice to lock other relations too, not only * plain tables, but the backend doesn't presently allow that. * - * We only need to lock the table for certain components; see pg_dump.h + * We only need to lock the table for certain components; see + * pg_dump.h */ if (tblinfo[i].dobj.dump && tblinfo[i].relkind == RELKIND_RELATION && - (tblinfo[i].dobj.dump & DUMP_COMPONENTS_REQUIRING_LOCK)) + (tblinfo[i].dobj.dump & DUMP_COMPONENTS_REQUIRING_LOCK)) { resetPQExpBuffer(query); appendPQExpBuffer(query, @@ -8608,7 +8609,7 @@ getForeignDataWrappers(Archive *fout, int *numForeignDataWrappers) "FROM pg_foreign_data_wrapper f " "LEFT JOIN pg_init_privs pip ON " "(f.oid = pip.objoid " - "AND pip.classoid = 'pg_foreign_data_wrapper'::regclass " + "AND pip.classoid = 'pg_foreign_data_wrapper'::regclass " "AND pip.objsubid = 0) ", username_subquery, acl_subquery->data, @@ -13457,30 +13458,30 @@ dumpAgg(Archive *fout, AggInfo *agginfo) if (fout->remoteVersion >= 90600) { appendPQExpBuffer(query, "SELECT aggtransfn, " - "aggfinalfn, aggtranstype::pg_catalog.regtype, " - "aggcombinefn, aggserialfn, aggdeserialfn, aggmtransfn, " - "aggminvtransfn, aggmfinalfn, aggmtranstype::pg_catalog.regtype, " - "aggfinalextra, aggmfinalextra, " - "aggsortop::pg_catalog.regoperator, " - "aggserialtype::pg_catalog.regtype, " - "(aggkind = 'h') AS hypothetical, " - "aggtransspace, agginitval, " - "aggmtransspace, aggminitval, " - "true AS convertok, " - "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs, " - "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs, " - "p.proparallel " - "FROM pg_catalog.pg_aggregate a, pg_catalog.pg_proc p " - "WHERE a.aggfnoid = p.oid " - "AND p.oid = '%u'::pg_catalog.oid", - agginfo->aggfn.dobj.catId.oid); + "aggfinalfn, aggtranstype::pg_catalog.regtype, " + "aggcombinefn, aggserialfn, aggdeserialfn, aggmtransfn, " + "aggminvtransfn, aggmfinalfn, aggmtranstype::pg_catalog.regtype, " + "aggfinalextra, aggmfinalextra, " + "aggsortop::pg_catalog.regoperator, " + "aggserialtype::pg_catalog.regtype, " + "(aggkind = 'h') AS hypothetical, " + "aggtransspace, agginitval, " + "aggmtransspace, aggminitval, " + "true AS convertok, " + "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs, " + "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs, " + "p.proparallel " + "FROM pg_catalog.pg_aggregate a, pg_catalog.pg_proc p " + "WHERE a.aggfnoid = p.oid " + "AND p.oid = '%u'::pg_catalog.oid", + agginfo->aggfn.dobj.catId.oid); } else if (fout->remoteVersion >= 90400) { appendPQExpBuffer(query, "SELECT aggtransfn, " "aggfinalfn, aggtranstype::pg_catalog.regtype, " "'-' AS aggcombinefn, '-' AS aggserialfn, " - "'-' AS aggdeserialfn, aggmtransfn, aggminvtransfn, " + "'-' AS aggdeserialfn, aggmtransfn, aggminvtransfn, " "aggmfinalfn, aggmtranstype::pg_catalog.regtype, " "aggfinalextra, aggmfinalextra, " "aggsortop::pg_catalog.regoperator, " @@ -13720,7 +13721,7 @@ dumpAgg(Archive *fout, AggInfo *agginfo) if (strcmp(aggcombinefn, "-") != 0) { - appendPQExpBuffer(details, ",\n COMBINEFUNC = %s", aggcombinefn); + appendPQExpBuffer(details, ",\n COMBINEFUNC = %s", aggcombinefn); } /* @@ -13729,9 +13730,9 @@ dumpAgg(Archive *fout, AggInfo *agginfo) */ if (strcmp(aggserialfn, "-") != 0) { - appendPQExpBuffer(details, ",\n SERIALFUNC = %s", aggserialfn); - appendPQExpBuffer(details, ",\n DESERIALFUNC = %s", aggdeserialfn); - appendPQExpBuffer(details, ",\n SERIALTYPE = %s", aggserialtype); + appendPQExpBuffer(details, ",\n SERIALFUNC = %s", aggserialfn); + appendPQExpBuffer(details, ",\n DESERIALFUNC = %s", aggdeserialfn); + appendPQExpBuffer(details, ",\n SERIALTYPE = %s", aggserialtype); } if (strcmp(aggmtransfn, "-") != 0) @@ -13822,7 +13823,7 @@ dumpAgg(Archive *fout, AggInfo *agginfo) dumpSecLabel(fout, labelq->data, agginfo->aggfn.dobj.namespace->dobj.name, agginfo->aggfn.rolname, - agginfo->aggfn.dobj.catId, 0, agginfo->aggfn.dobj.dumpId); + agginfo->aggfn.dobj.catId, 0, agginfo->aggfn.dobj.dumpId); /* * Since there is no GRANT ON AGGREGATE syntax, we have to make the ACL @@ -15019,7 +15020,7 @@ dumpTable(Archive *fout, TableInfo *tbinfo) "JOIN pg_catalog.pg_class c ON (at.attrelid = c.oid) " "LEFT JOIN pg_catalog.pg_init_privs pip ON " "(at.attrelid = pip.objoid " - "AND pip.classoid = 'pg_catalog.pg_class'::pg_catalog.regclass " + "AND pip.classoid = 'pg_catalog.pg_class'::pg_catalog.regclass " "AND at.attnum = pip.objsubid) " "WHERE at.attrelid = '%u'::pg_catalog.oid AND " "NOT at.attisdropped " @@ -15050,7 +15051,7 @@ dumpTable(Archive *fout, TableInfo *tbinfo) "SELECT attname, attacl, NULL as rattacl, " "NULL AS initattacl, NULL AS initrattacl " "FROM pg_catalog.pg_attribute " - "WHERE attrelid = '%u'::pg_catalog.oid AND NOT attisdropped " + "WHERE attrelid = '%u'::pg_catalog.oid AND NOT attisdropped " "AND attacl IS NOT NULL " "ORDER BY attnum", tbinfo->dobj.catId.oid); diff --git a/src/bin/pg_dump/pg_dump_sort.c b/src/bin/pg_dump/pg_dump_sort.c index dc35a93727..d87f08d356 100644 --- a/src/bin/pg_dump/pg_dump_sort.c +++ b/src/bin/pg_dump/pg_dump_sort.c @@ -848,14 +848,15 @@ repairTypeFuncLoop(DumpableObject *typeobj, DumpableObject *funcobj) if (typeInfo->shellType) { addObjectDependency(funcobj, typeInfo->shellType->dobj.dumpId); + /* - * Mark shell type (always including the definition, as we need - * the shell type defined to identify the function fully) as to be - * dumped if any such function is + * Mark shell type (always including the definition, as we need the + * shell type defined to identify the function fully) as to be dumped + * if any such function is */ if (funcobj->dump) typeInfo->shellType->dobj.dump = funcobj->dump | - DUMP_COMPONENT_DEFINITION; + DUMP_COMPONENT_DEFINITION; } } diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c index 861f5ee915..d4fb03e5d9 100644 --- a/src/bin/pg_dump/pg_dumpall.c +++ b/src/bin/pg_dump/pg_dumpall.c @@ -788,7 +788,7 @@ dumpRoles(PGconn *conn) auth_oid = atooid(PQgetvalue(res, i, i_oid)); rolename = PQgetvalue(res, i, i_rolname); - if (strncmp(rolename,"pg_",3) == 0) + if (strncmp(rolename, "pg_", 3) == 0) { fprintf(stderr, _("%s: role name starting with \"pg_\" skipped (%s)\n"), progname, rolename); @@ -920,7 +920,7 @@ dumpRoleMembership(PGconn *conn) "LEFT JOIN pg_authid ur on ur.oid = a.roleid " "LEFT JOIN pg_authid um on um.oid = a.member " "LEFT JOIN pg_authid ug on ug.oid = a.grantor " - "WHERE NOT (ur.rolname ~ '^pg_' AND um.rolname ~ '^pg_')" + "WHERE NOT (ur.rolname ~ '^pg_' AND um.rolname ~ '^pg_')" "ORDER BY 1,2,3"); if (PQntuples(res) > 0) diff --git a/src/bin/pg_rewind/filemap.c b/src/bin/pg_rewind/filemap.c index a3e52f6f2a..9b00dc1cdc 100644 --- a/src/bin/pg_rewind/filemap.c +++ b/src/bin/pg_rewind/filemap.c @@ -79,8 +79,8 @@ process_source_file(const char *path, file_type_t type, size_t newsize, return; /* - * Pretend that pg_xlog is a directory, even if it's really a symlink. - * We don't want to mess with the symlink itself, nor complain if it's a + * Pretend that pg_xlog is a directory, even if it's really a symlink. We + * don't want to mess with the symlink itself, nor complain if it's a * symlink in source but not in target or vice versa. */ if (strcmp(path, "pg_xlog") == 0 && type == FILE_TYPE_SYMLINK) diff --git a/src/bin/pg_rewind/libpq_fetch.c b/src/bin/pg_rewind/libpq_fetch.c index 1169e70d2b..92390099eb 100644 --- a/src/bin/pg_rewind/libpq_fetch.c +++ b/src/bin/pg_rewind/libpq_fetch.c @@ -300,7 +300,7 @@ receiveFileChunks(const char *sql) if (PQgetisnull(res, 0, 2)) { pg_log(PG_DEBUG, - "received null value for chunk for file \"%s\", file has been deleted\n", + "received null value for chunk for file \"%s\", file has been deleted\n", filename); pg_free(filename); PQclear(res); diff --git a/src/bin/pg_rewind/parsexlog.c b/src/bin/pg_rewind/parsexlog.c index 9e0e4a6ce4..b53591d02a 100644 --- a/src/bin/pg_rewind/parsexlog.c +++ b/src/bin/pg_rewind/parsexlog.c @@ -261,15 +261,15 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, /* * Since incomplete segments are copied into next timelines, switch to - * the timeline holding the required segment. Assuming this scan can be - * done both forward and backward, consider also switching timeline + * the timeline holding the required segment. Assuming this scan can + * be done both forward and backward, consider also switching timeline * accordingly. */ while (private->tliIndex < targetNentries - 1 && - targetHistory[private->tliIndex].end < targetSegEnd) + targetHistory[private->tliIndex].end < targetSegEnd) private->tliIndex++; while (private->tliIndex > 0 && - targetHistory[private->tliIndex].begin >= targetSegEnd) + targetHistory[private->tliIndex].begin >= targetSegEnd) private->tliIndex--; XLogFileName(xlogfname, targetHistory[private->tliIndex].tli, xlogreadsegno); diff --git a/src/bin/pg_rewind/pg_rewind.c b/src/bin/pg_rewind/pg_rewind.c index f472813a64..5fdd4c5605 100644 --- a/src/bin/pg_rewind/pg_rewind.c +++ b/src/bin/pg_rewind/pg_rewind.c @@ -56,7 +56,7 @@ bool dry_run = false; /* Target history */ TimeLineHistoryEntry *targetHistory; -int targetNentries; +int targetNentries; static void usage(const char *progname) @@ -229,9 +229,9 @@ main(int argc, char **argv) targetHistory[lastcommontliIndex].tli); /* - * Check for the possibility that the target is in fact a direct ancestor - * of the source. In that case, there is no divergent history in the - * target that needs rewinding. + * Check for the possibility that the target is in fact a direct + * ancestor of the source. In that case, there is no divergent history + * in the target that needs rewinding. */ if (ControlFile_target.checkPoint >= divergerec) { @@ -248,9 +248,9 @@ main(int argc, char **argv) /* * If the histories diverged exactly at the end of the shutdown - * checkpoint record on the target, there are no WAL records in the - * target that don't belong in the source's history, and no rewind is - * needed. + * checkpoint record on the target, there are no WAL records in + * the target that don't belong in the source's history, and no + * rewind is needed. */ if (chkptendrec == divergerec) rewind_needed = false; @@ -430,14 +430,14 @@ MinXLogRecPtr(XLogRecPtr a, XLogRecPtr b) static TimeLineHistoryEntry * getTimelineHistory(ControlFileData *controlFile, int *nentries) { - TimeLineHistoryEntry *history; - TimeLineID tli; + TimeLineHistoryEntry *history; + TimeLineID tli; tli = controlFile->checkPointCopy.ThisTimeLineID; /* - * Timeline 1 does not have a history file, so there is no need to check and - * fake an entry with infinite start and end positions. + * Timeline 1 does not have a history file, so there is no need to check + * and fake an entry with infinite start and end positions. */ if (tli == 1) { @@ -467,7 +467,7 @@ getTimelineHistory(ControlFileData *controlFile, int *nentries) if (debug) { - int i; + int i; if (controlFile == &ControlFile_source) pg_log(PG_DEBUG, "Source timeline history:\n"); @@ -511,7 +511,8 @@ findCommonAncestorTimeline(XLogRecPtr *recptr, int *tliIndex) { TimeLineHistoryEntry *sourceHistory; int sourceNentries; - int i, n; + int i, + n; /* Retrieve timelines for both source and target */ sourceHistory = getTimelineHistory(&ControlFile_source, &sourceNentries); @@ -669,17 +670,17 @@ updateControlFile(ControlFileData *ControlFile) static void syncTargetDirectory(const char *argv0) { - int ret; + int ret; #define MAXCMDLEN (2 * MAXPGPATH) - char exec_path[MAXPGPATH]; - char cmd[MAXCMDLEN]; + char exec_path[MAXPGPATH]; + char cmd[MAXCMDLEN]; /* locate initdb binary */ if ((ret = find_other_exec(argv0, "initdb", "initdb (PostgreSQL) " PG_VERSION "\n", exec_path)) < 0) { - char full_path[MAXPGPATH]; + char full_path[MAXPGPATH]; if (find_my_exec(argv0, full_path) < 0) strlcpy(full_path, progname, sizeof(full_path)); diff --git a/src/bin/pg_rewind/pg_rewind.h b/src/bin/pg_rewind/pg_rewind.h index 0a3460fdd9..f5e02d7056 100644 --- a/src/bin/pg_rewind/pg_rewind.h +++ b/src/bin/pg_rewind/pg_rewind.h @@ -29,7 +29,7 @@ extern bool dry_run; /* Target history */ extern TimeLineHistoryEntry *targetHistory; -extern int targetNentries; +extern int targetNentries; /* in parsexlog.c */ extern void extractPageMap(const char *datadir, XLogRecPtr startpoint, diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c index 6b6f5ba7ea..324760b074 100644 --- a/src/bin/pg_upgrade/check.c +++ b/src/bin/pg_upgrade/check.c @@ -199,10 +199,10 @@ output_completion_banner(char *analyze_script_file_name, deletion_script_file_name); else pg_log(PG_REPORT, - "Could not create a script to delete the old cluster's data files\n" - "because user-defined tablespaces or the new cluster's data directory\n" - "exist in the old cluster directory. The old cluster's contents must\n" - "be deleted manually.\n"); + "Could not create a script to delete the old cluster's data files\n" + "because user-defined tablespaces or the new cluster's data directory\n" + "exist in the old cluster directory. The old cluster's contents must\n" + "be deleted manually.\n"); } @@ -501,7 +501,8 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name) { FILE *script = NULL; int tblnum; - char old_cluster_pgdata[MAXPGPATH], new_cluster_pgdata[MAXPGPATH]; + char old_cluster_pgdata[MAXPGPATH], + new_cluster_pgdata[MAXPGPATH]; *deletion_script_file_name = psprintf("%sdelete_old_cluster.%s", SCRIPT_PREFIX, SCRIPT_EXT); @@ -516,7 +517,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name) if (path_is_prefix_of_path(old_cluster_pgdata, new_cluster_pgdata)) { pg_log(PG_WARNING, - "\nWARNING: new data directory should not be inside the old data directory, e.g. %s\n", old_cluster_pgdata); + "\nWARNING: new data directory should not be inside the old data directory, e.g. %s\n", old_cluster_pgdata); /* Unlink file in case it is left over from a previous run. */ unlink(*deletion_script_file_name); diff --git a/src/bin/pg_upgrade/controldata.c b/src/bin/pg_upgrade/controldata.c index 2601827b92..d89cf196ab 100644 --- a/src/bin/pg_upgrade/controldata.c +++ b/src/bin/pg_upgrade/controldata.c @@ -200,8 +200,8 @@ get_control_data(ClusterInfo *cluster, bool live_check) /* * Delimiter changed from '/' to ':' in 9.6. We don't test for * the catalog version of the change because the catalog version - * is pulled from pg_controldata too, and it isn't worth adding - * an order dependency for this --- we just check the string. + * is pulled from pg_controldata too, and it isn't worth adding an + * order dependency for this --- we just check the string. */ if (strchr(p, '/') != NULL) p = strchr(p, '/'); diff --git a/src/bin/pg_upgrade/file.c b/src/bin/pg_upgrade/file.c index 4f27ce713f..b132e58195 100644 --- a/src/bin/pg_upgrade/file.c +++ b/src/bin/pg_upgrade/file.c @@ -37,13 +37,13 @@ const char * copyFile(const char *src, const char *dst) { #ifndef WIN32 - if (copy_file(src, dst) == -1) + if (copy_file(src, dst) == -1) #else - if (CopyFile(src, dst, true) == 0) + if (CopyFile(src, dst, true) == 0) #endif - return getErrorText(); - else - return NULL; + return getErrorText(); + else + return NULL; } diff --git a/src/bin/pg_upgrade/pg_upgrade.c b/src/bin/pg_upgrade/pg_upgrade.c index a09263625d..4d9f496ed3 100644 --- a/src/bin/pg_upgrade/pg_upgrade.c +++ b/src/bin/pg_upgrade/pg_upgrade.c @@ -535,9 +535,9 @@ set_frozenxids(bool minmxid_only) /* * We must update databases where datallowconn = false, e.g. * template0, because autovacuum increments their datfrozenxids, - * relfrozenxids, and relminmxid even if autovacuum is turned off, - * and even though all the data rows are already frozen. To enable - * this, we temporarily change datallowconn. + * relfrozenxids, and relminmxid even if autovacuum is turned off, and + * even though all the data rows are already frozen. To enable this, + * we temporarily change datallowconn. */ if (strcmp(datallowconn, "f") == 0) PQclear(executeQueryOrDie(conn_template1, diff --git a/src/bin/pg_upgrade/tablespace.c b/src/bin/pg_upgrade/tablespace.c index e72cbf98ca..dfbce59ca3 100644 --- a/src/bin/pg_upgrade/tablespace.c +++ b/src/bin/pg_upgrade/tablespace.c @@ -91,7 +91,7 @@ get_tablespace_paths(void) else report_status(PG_FATAL, "cannot stat() tablespace directory \"%s\": %s\n", - os_info.old_tablespaces[tblnum], getErrorText()); + os_info.old_tablespaces[tblnum], getErrorText()); } if (!S_ISDIR(statBuf.st_mode)) report_status(PG_FATAL, diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c index ecf359387d..87fb006d87 100644 --- a/src/bin/pgbench/pgbench.c +++ b/src/bin/pgbench/pgbench.c @@ -925,10 +925,10 @@ makeVariableNumeric(Variable *var) setIntValue(&var->num_value, strtoint64(var->value)); var->is_numeric = true; } - else /* type should be double */ + else /* type should be double */ { - double dv; - char xs; + double dv; + char xs; if (sscanf(var->value, "%lf%c", &dv, &xs) != 1) { @@ -1159,7 +1159,8 @@ coerceToInt(PgBenchValue *pval, int64 *ival) } else { - double dval = pval->u.dval; + double dval = pval->u.dval; + Assert(pval->type == PGBT_DOUBLE); if (dval < PG_INT64_MIN || PG_INT64_MAX < dval) { @@ -1215,8 +1216,8 @@ evalFunc(TState *thread, CState *st, PgBenchFunction func, PgBenchExprLink *args, PgBenchValue *retval) { /* evaluate all function arguments */ - int nargs = 0; - PgBenchValue vargs[MAX_FARGS]; + int nargs = 0; + PgBenchValue vargs[MAX_FARGS]; PgBenchExprLink *l = args; for (nargs = 0; nargs < MAX_FARGS && l != NULL; nargs++, l = l->next) @@ -1233,22 +1234,24 @@ evalFunc(TState *thread, CState *st, /* then evaluate function */ switch (func) { - /* overloaded operators */ + /* overloaded operators */ case PGBENCH_ADD: case PGBENCH_SUB: case PGBENCH_MUL: case PGBENCH_DIV: case PGBENCH_MOD: { - PgBenchValue *lval = &vargs[0], - *rval = &vargs[1]; + PgBenchValue *lval = &vargs[0], + *rval = &vargs[1]; + Assert(nargs == 2); /* overloaded type management, double if some double */ if ((lval->type == PGBT_DOUBLE || rval->type == PGBT_DOUBLE) && func != PGBENCH_MOD) { - double ld, rd; + double ld, + rd; if (!coerceToDouble(lval, &ld) || !coerceToDouble(rval, &rd)) @@ -1277,9 +1280,10 @@ evalFunc(TState *thread, CState *st, Assert(0); } } - else /* we have integer operands, or % */ + else /* we have integer operands, or % */ { - int64 li, ri; + int64 li, + ri; if (!coerceToInt(lval, &li) || !coerceToInt(rval, &ri)) @@ -1318,7 +1322,7 @@ evalFunc(TState *thread, CState *st, return false; } else - setIntValue(retval, - li); + setIntValue(retval, -li); } else setIntValue(retval, 0); @@ -1327,7 +1331,7 @@ evalFunc(TState *thread, CState *st, /* else divisor is not -1 */ if (func == PGBENCH_DIV) setIntValue(retval, li / ri); - else /* func == PGBENCH_MOD */ + else /* func == PGBENCH_MOD */ setIntValue(retval, li % ri); return true; @@ -1339,27 +1343,30 @@ evalFunc(TState *thread, CState *st, } } - /* no arguments */ + /* no arguments */ case PGBENCH_PI: setDoubleValue(retval, M_PI); return true; - /* 1 overloaded argument */ + /* 1 overloaded argument */ case PGBENCH_ABS: { PgBenchValue *varg = &vargs[0]; + Assert(nargs == 1); if (varg->type == PGBT_INT) { - int64 i = varg->u.ival; + int64 i = varg->u.ival; + setIntValue(retval, i < 0 ? -i : i); } else { - double d = varg->u.dval; + double d = varg->u.dval; + Assert(varg->type == PGBT_DOUBLE); - setDoubleValue(retval, d < 0.0 ? -d: d); + setDoubleValue(retval, d < 0.0 ? -d : d); } return true; @@ -1368,13 +1375,14 @@ evalFunc(TState *thread, CState *st, case PGBENCH_DEBUG: { PgBenchValue *varg = &vargs[0]; + Assert(nargs == 1); - fprintf(stderr, "debug(script=%d,command=%d): ", - st->use_file, st->state+1); + fprintf(stderr, "debug(script=%d,command=%d): ", + st->use_file, st->state + 1); if (varg->type == PGBT_INT) - fprintf(stderr, "int "INT64_FORMAT"\n", varg->u.ival); + fprintf(stderr, "int " INT64_FORMAT "\n", varg->u.ival); else { Assert(varg->type == PGBT_DOUBLE); @@ -1386,11 +1394,12 @@ evalFunc(TState *thread, CState *st, return true; } - /* 1 double argument */ + /* 1 double argument */ case PGBENCH_DOUBLE: case PGBENCH_SQRT: { - double dval; + double dval; + Assert(nargs == 1); if (!coerceToDouble(&vargs[0], &dval)) @@ -1403,10 +1412,11 @@ evalFunc(TState *thread, CState *st, return true; } - /* 1 int argument */ + /* 1 int argument */ case PGBENCH_INT: { - int64 ival; + int64 ival; + Assert(nargs == 1); if (!coerceToInt(&vargs[0], &ival)) @@ -1416,7 +1426,7 @@ evalFunc(TState *thread, CState *st, return true; } - /* variable number of arguments */ + /* variable number of arguments */ case PGBENCH_LEAST: case PGBENCH_GREATEST: { @@ -1476,75 +1486,78 @@ evalFunc(TState *thread, CState *st, return true; } - /* random functions */ + /* random functions */ case PGBENCH_RANDOM: case PGBENCH_RANDOM_EXPONENTIAL: case PGBENCH_RANDOM_GAUSSIAN: - { - int64 imin, imax; - Assert(nargs >= 2); - - if (!coerceToInt(&vargs[0], &imin) || - !coerceToInt(&vargs[1], &imax)) - return false; - - /* check random range */ - if (imin > imax) { - fprintf(stderr, "empty range given to random\n"); - return false; - } - else if (imax - imin < 0 || (imax - imin) + 1 < 0) - { - /* prevent int overflows in random functions */ - fprintf(stderr, "random range is too large\n"); - return false; - } + int64 imin, + imax; - if (func == PGBENCH_RANDOM) - { - Assert(nargs == 2); - setIntValue(retval, getrand(thread, imin, imax)); - } - else /* gaussian & exponential */ - { - double param; - Assert(nargs == 3); + Assert(nargs >= 2); - if (!coerceToDouble(&vargs[2], ¶m)) + if (!coerceToInt(&vargs[0], &imin) || + !coerceToInt(&vargs[1], &imax)) return false; - if (func == PGBENCH_RANDOM_GAUSSIAN) + /* check random range */ + if (imin > imax) { - if (param < MIN_GAUSSIAN_PARAM) - { - fprintf(stderr, - "gaussian parameter must be at least %f " - "(not %f)\n", MIN_GAUSSIAN_PARAM, param); - return false; - } - - setIntValue(retval, - getGaussianRand(thread, imin, imax, param)); + fprintf(stderr, "empty range given to random\n"); + return false; } - else /* exponential */ + else if (imax - imin < 0 || (imax - imin) + 1 < 0) { - if (param <= 0.0) - { - fprintf(stderr, - "exponential parameter must be greater than zero" - " (got %f)\n", param); - return false; - } - - setIntValue(retval, - getExponentialRand(thread, imin, imax, param)); + /* prevent int overflows in random functions */ + fprintf(stderr, "random range is too large\n"); + return false; } + + if (func == PGBENCH_RANDOM) + { + Assert(nargs == 2); + setIntValue(retval, getrand(thread, imin, imax)); + } + else /* gaussian & exponential */ + { + double param; + + Assert(nargs == 3); + + if (!coerceToDouble(&vargs[2], ¶m)) + return false; + + if (func == PGBENCH_RANDOM_GAUSSIAN) + { + if (param < MIN_GAUSSIAN_PARAM) + { + fprintf(stderr, + "gaussian parameter must be at least %f " + "(not %f)\n", MIN_GAUSSIAN_PARAM, param); + return false; + } + + setIntValue(retval, + getGaussianRand(thread, imin, imax, param)); + } + else /* exponential */ + { + if (param <= 0.0) + { + fprintf(stderr, + "exponential parameter must be greater than zero" + " (got %f)\n", param); + return false; + } + + setIntValue(retval, + getExponentialRand(thread, imin, imax, param)); + } + } + + return true; } - return true; - } - default: /* cannot get here */ Assert(0); @@ -2078,7 +2091,7 @@ top: if (pg_strcasecmp(argv[0], "set") == 0) { PgBenchExpr *expr = commands[st->state]->expr; - PgBenchValue result; + PgBenchValue result; if (!evaluateExpr(thread, st, expr, &result)) { @@ -2173,7 +2186,7 @@ static void doLog(TState *thread, CState *st, instr_time *now, StatsData *agg, bool skipped, double latency, double lag) { - FILE *logfile = thread->logfile; + FILE *logfile = thread->logfile; Assert(use_log); @@ -3865,7 +3878,7 @@ main(int argc, char **argv) if (var->is_numeric) { if (!putVariableNumber(&state[i], "startup", - var->name, &var->num_value)) + var->name, &var->num_value)) exit(1); } else @@ -3997,7 +4010,7 @@ main(int argc, char **argv) thread->random_state[0] = random(); thread->random_state[1] = random(); thread->random_state[2] = random(); - thread->logfile = NULL; /* filled in later */ + thread->logfile = NULL; /* filled in later */ thread->latency_late = 0; initStats(&thread->stats, 0.0); @@ -4025,7 +4038,7 @@ main(int argc, char **argv) /* compute when to stop */ if (duration > 0) end_time = INSTR_TIME_GET_MICROSEC(thread->start_time) + - (int64) 1000000 * duration; + (int64) 1000000 *duration; /* the first thread (i = 0) is executed by main thread */ if (i > 0) @@ -4048,7 +4061,7 @@ main(int argc, char **argv) /* compute when to stop */ if (duration > 0) end_time = INSTR_TIME_GET_MICROSEC(threads[0].start_time) + - (int64) 1000000 * duration; + (int64) 1000000 *duration; threads[0].thread = INVALID_THREAD; #endif /* ENABLE_THREAD_SAFETY */ diff --git a/src/bin/pgbench/pgbench.h b/src/bin/pgbench/pgbench.h index 58baad8ee6..ab0f822010 100644 --- a/src/bin/pgbench/pgbench.h +++ b/src/bin/pgbench/pgbench.h @@ -43,10 +43,10 @@ typedef struct PgBenchValueType type; union { - int64 ival; - double dval; + int64 ival; + double dval; /* add other types here */ - } u; + } u; } PgBenchValue; /* Types of expression nodes */ @@ -87,7 +87,7 @@ struct PgBenchExpr PgBenchExprType etype; union { - PgBenchValue constant; + PgBenchValue constant; struct { char *varname; diff --git a/src/bin/psql/crosstabview.c b/src/bin/psql/crosstabview.c index 1e838bb313..cbe8ddff55 100644 --- a/src/bin/psql/crosstabview.c +++ b/src/bin/psql/crosstabview.c @@ -80,7 +80,7 @@ typedef struct _avl_tree static bool printCrosstab(const PGresult *results, - int num_columns, pivot_field *piv_columns, int field_for_columns, + int num_columns, pivot_field *piv_columns, int field_for_columns, int num_rows, pivot_field *piv_rows, int field_for_rows, int field_for_data); static void avlInit(avl_tree *tree); @@ -285,7 +285,7 @@ error_return: */ static bool printCrosstab(const PGresult *results, - int num_columns, pivot_field *piv_columns, int field_for_columns, + int num_columns, pivot_field *piv_columns, int field_for_columns, int num_rows, pivot_field *piv_rows, int field_for_rows, int field_for_data) { @@ -396,10 +396,10 @@ printCrosstab(const PGresult *results, if (cont.cells[idx] != NULL) { psql_error("\\crosstabview: query result contains multiple data values for row \"%s\", column \"%s\"\n", - piv_rows[row_number].name ? piv_rows[row_number].name : - popt.nullPrint ? popt.nullPrint : "(null)", - piv_columns[col_number].name ? piv_columns[col_number].name : - popt.nullPrint ? popt.nullPrint : "(null)"); + piv_rows[row_number].name ? piv_rows[row_number].name : + popt.nullPrint ? popt.nullPrint : "(null)", + piv_columns[col_number].name ? piv_columns[col_number].name : + popt.nullPrint ? popt.nullPrint : "(null)"); goto error; } @@ -548,7 +548,7 @@ avlInsertNode(avl_tree *tree, avl_node **node, pivot_field field) if (cmp != 0) { avlInsertNode(tree, - cmp > 0 ? ¤t->children[1] : ¤t->children[0], + cmp > 0 ? ¤t->children[1] : ¤t->children[0], field); avlAdjustBalance(tree, node); } diff --git a/src/bin/psql/help.c b/src/bin/psql/help.c index 0d0461dc2a..9e6d67b0ea 100644 --- a/src/bin/psql/help.c +++ b/src/bin/psql/help.c @@ -85,8 +85,8 @@ usage(unsigned short int pager) fprintf(output, _(" -f, --file=FILENAME execute commands from file, then exit\n")); fprintf(output, _(" -l, --list list available databases, then exit\n")); fprintf(output, _(" -v, --set=, --variable=NAME=VALUE\n" - " set psql variable NAME to VALUE\n" - " (e.g., -v ON_ERROR_STOP=1)\n")); + " set psql variable NAME to VALUE\n" + " (e.g., -v ON_ERROR_STOP=1)\n")); fprintf(output, _(" -V, --version output version information, then exit\n")); fprintf(output, _(" -X, --no-psqlrc do not read startup file (~/.psqlrc)\n")); fprintf(output, _(" -1 (\"one\"), --single-transaction\n" diff --git a/src/bin/psql/prompt.c b/src/bin/psql/prompt.c index 647e8712d6..fb08d67390 100644 --- a/src/bin/psql/prompt.c +++ b/src/bin/psql/prompt.c @@ -166,7 +166,8 @@ get_prompt(promptStatus_t status) case 'p': if (pset.db) { - int pid = PQbackendPID(pset.db); + int pid = PQbackendPID(pset.db); + if (pid) snprintf(buf, sizeof(buf), "%d", pid); } diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c index cc10abc1c4..8de8a3d0f9 100644 --- a/src/bin/psql/tab-complete.c +++ b/src/bin/psql/tab-complete.c @@ -1548,7 +1548,7 @@ psql_completion(const char *text, int start, int end) /* ALTER SERVER */ else if (Matches3("ALTER", "SERVER", MatchAny)) COMPLETE_WITH_LIST4("VERSION", "OPTIONS", "OWNER TO", "RENAME TO"); - /* ALTER SERVER VERSION */ + /* ALTER SERVER VERSION */ else if (Matches5("ALTER", "SERVER", MatchAny, "VERSION", MatchAny)) COMPLETE_WITH_CONST("OPTIONS"); /* ALTER SYSTEM SET, RESET, RESET ALL */ @@ -2030,8 +2030,11 @@ psql_completion(const char *text, int start, int end) /* First off we complete CREATE UNIQUE with "INDEX" */ else if (TailMatches2("CREATE", "UNIQUE")) COMPLETE_WITH_CONST("INDEX"); - /* If we have CREATE|UNIQUE INDEX, then add "ON", "CONCURRENTLY", - and existing indexes */ + + /* + * If we have CREATE|UNIQUE INDEX, then add "ON", "CONCURRENTLY", and + * existing indexes + */ else if (TailMatches2("CREATE|UNIQUE", "INDEX")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes, " UNION SELECT 'ON'" @@ -2040,7 +2043,11 @@ psql_completion(const char *text, int start, int end) else if (TailMatches3("INDEX|CONCURRENTLY", MatchAny, "ON") || TailMatches2("INDEX|CONCURRENTLY", "ON")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, NULL); - /* Complete CREATE|UNIQUE INDEX CONCURRENTLY with "ON" and existing indexes */ + + /* + * Complete CREATE|UNIQUE INDEX CONCURRENTLY with "ON" and existing + * indexes + */ else if (TailMatches3("CREATE|UNIQUE", "INDEX", "CONCURRENTLY")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes, " UNION SELECT 'ON'"); @@ -3226,6 +3233,7 @@ _complete_from_query(int is_schema_query, const char *text, int state) static int list_index, byte_length; static PGresult *result = NULL; + /* * If this is the first time for this completion, we fetch a list of our * "things" from the backend. @@ -3242,7 +3250,10 @@ _complete_from_query(int is_schema_query, const char *text, int state) list_index = 0; byte_length = strlen(text); - /* Count length as number of characters (not bytes), for passing to substring */ + /* + * Count length as number of characters (not bytes), for passing to + * substring + */ while (*pstr) { char_length++; diff --git a/src/common/config_info.c b/src/common/config_info.c index 8976b3d288..0fab3ab527 100644 --- a/src/common/config_info.c +++ b/src/common/config_info.c @@ -33,10 +33,10 @@ ConfigData * get_configdata(const char *my_exec_path, size_t *configdata_len) { - ConfigData *configdata; - char path[MAXPGPATH]; - char *lastsep; - int i = 0; + ConfigData *configdata; + char path[MAXPGPATH]; + char *lastsep; + int i = 0; /* Adjust this to match the number of items filled below */ *configdata_len = 23; diff --git a/src/common/controldata_utils.c b/src/common/controldata_utils.c index 6867c03bee..5592fe7039 100644 --- a/src/common/controldata_utils.c +++ b/src/common/controldata_utils.c @@ -37,10 +37,10 @@ ControlFileData * get_controlfile(char *DataDir, const char *progname) { - ControlFileData *ControlFile; - int fd; - char ControlFilePath[MAXPGPATH]; - pg_crc32c crc; + ControlFileData *ControlFile; + int fd; + char ControlFilePath[MAXPGPATH]; + pg_crc32c crc; ControlFile = palloc(sizeof(ControlFileData)); snprintf(ControlFilePath, MAXPGPATH, "%s/global/pg_control", DataDir); @@ -49,8 +49,8 @@ get_controlfile(char *DataDir, const char *progname) #ifndef FRONTEND ereport(ERROR, (errcode_for_file_access(), - errmsg("could not open file \"%s\" for reading: %m", - ControlFilePath))); + errmsg("could not open file \"%s\" for reading: %m", + ControlFilePath))); #else { fprintf(stderr, _("%s: could not open file \"%s\" for reading: %s\n"), @@ -63,7 +63,7 @@ get_controlfile(char *DataDir, const char *progname) #ifndef FRONTEND ereport(ERROR, (errcode_for_file_access(), - errmsg("could not read file \"%s\": %m", ControlFilePath))); + errmsg("could not read file \"%s\": %m", ControlFilePath))); #else { fprintf(stderr, _("%s: could not read file \"%s\": %s\n"), @@ -77,8 +77,8 @@ get_controlfile(char *DataDir, const char *progname) /* Check the CRC. */ INIT_CRC32C(crc); COMP_CRC32C(crc, - (char *) ControlFile, - offsetof(ControlFileData, crc)); + (char *) ControlFile, + offsetof(ControlFileData, crc)); FIN_CRC32C(crc); if (!EQ_CRC32C(crc, ControlFile->crc)) diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h index 5c6a79ec08..19437d2863 100644 --- a/src/include/access/nbtree.h +++ b/src/include/access/nbtree.h @@ -722,7 +722,7 @@ extern int32 _bt_compare(Relation rel, int keysz, ScanKey scankey, extern bool _bt_first(IndexScanDesc scan, ScanDirection dir); extern bool _bt_next(IndexScanDesc scan, ScanDirection dir); extern Buffer _bt_get_endpoint(Relation rel, uint32 level, bool rightmost, - Snapshot snapshot); + Snapshot snapshot); /* * prototypes for functions in nbtutils.c diff --git a/src/include/access/timeline.h b/src/include/access/timeline.h index ff921158de..2b333bc10e 100644 --- a/src/include/access/timeline.h +++ b/src/include/access/timeline.h @@ -26,8 +26,7 @@ typedef struct { TimeLineID tli; XLogRecPtr begin; /* inclusive */ - XLogRecPtr end; /* exclusive, InvalidXLogRecPtr means - * infinity */ + XLogRecPtr end; /* exclusive, InvalidXLogRecPtr means infinity */ } TimeLineHistoryEntry; extern List *readTimeLineHistory(TimeLineID targetTLI); diff --git a/src/include/access/visibilitymap.h b/src/include/access/visibilitymap.h index 65e78eccb6..fca99ca318 100644 --- a/src/include/access/visibilitymap.h +++ b/src/include/access/visibilitymap.h @@ -25,7 +25,8 @@ /* Flags for bit map */ #define VISIBILITYMAP_ALL_VISIBLE 0x01 #define VISIBILITYMAP_ALL_FROZEN 0x02 -#define VISIBILITYMAP_VALID_BITS 0x03 /* OR of all valid visiblitymap flags bits */ +#define VISIBILITYMAP_VALID_BITS 0x03 /* OR of all valid + * visiblitymap flags bits */ /* Macros for visibilitymap test */ #define VM_ALL_VISIBLE(r, b, v) \ @@ -39,8 +40,8 @@ extern void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf); extern bool visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf); extern void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, - XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, - uint8 flags); + XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, + uint8 flags); extern uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf); extern void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen); extern void visibilitymap_truncate(Relation rel, BlockNumber nheapblocks); diff --git a/src/include/access/xlog.h b/src/include/access/xlog.h index e7e91fcb03..14b7f7f459 100644 --- a/src/include/access/xlog.h +++ b/src/include/access/xlog.h @@ -277,8 +277,8 @@ extern void assign_checkpoint_completion_target(double newval, void *extra); * Starting/stopping a base backup */ extern XLogRecPtr do_pg_start_backup(const char *backupidstr, bool fast, - TimeLineID *starttli_p, StringInfo labelfile, DIR *tblspcdir, - List **tablespaces, StringInfo tblspcmapfile, bool infotbssize, + TimeLineID *starttli_p, StringInfo labelfile, DIR *tblspcdir, + List **tablespaces, StringInfo tblspcmapfile, bool infotbssize, bool needtblspcmapfile); extern XLogRecPtr do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p); diff --git a/src/include/access/xlog_internal.h b/src/include/access/xlog_internal.h index 658257b3a1..af2944c1b3 100644 --- a/src/include/access/xlog_internal.h +++ b/src/include/access/xlog_internal.h @@ -138,7 +138,7 @@ typedef XLogLongPageHeaderData *XLogLongPageHeader; #define MAXFNAMELEN 64 /* Length of XLog file name */ -#define XLOG_FNAME_LEN 24 +#define XLOG_FNAME_LEN 24 #define XLogFileName(fname, tli, logSegNo) \ snprintf(fname, MAXFNAMELEN, "%08X%08X%08X", tli, \ diff --git a/src/include/catalog/objectaddress.h b/src/include/catalog/objectaddress.h index 87aa41497d..583a1206f3 100644 --- a/src/include/catalog/objectaddress.h +++ b/src/include/catalog/objectaddress.h @@ -45,8 +45,8 @@ extern ObjectAddress get_object_address(ObjectType objtype, List *objname, LOCKMODE lockmode, bool missing_ok); extern ObjectAddress get_object_address_rv(ObjectType objtype, RangeVar *rel, - List *objname, List *objargs, Relation *relp, - LOCKMODE lockmode, bool missing_ok); + List *objname, List *objargs, Relation *relp, + LOCKMODE lockmode, bool missing_ok); extern void check_object_ownership(Oid roleid, ObjectType objtype, ObjectAddress address, diff --git a/src/include/catalog/pg_aggregate.h b/src/include/catalog/pg_aggregate.h index e16aa48c75..057c88c077 100644 --- a/src/include/catalog/pg_aggregate.h +++ b/src/include/catalog/pg_aggregate.h @@ -141,7 +141,7 @@ typedef FormData_pg_aggregate *Form_pg_aggregate; DATA(insert ( 2100 n 0 int8_avg_accum numeric_poly_avg int8_avg_combine int8_avg_serialize int8_avg_deserialize int8_avg_accum int8_avg_accum_inv numeric_poly_avg f f 0 2281 17 48 2281 48 _null_ _null_ )); DATA(insert ( 2101 n 0 int4_avg_accum int8_avg int4_avg_combine - - int4_avg_accum int4_avg_accum_inv int8_avg f f 0 1016 0 0 1016 0 "{0,0}" "{0,0}" )); DATA(insert ( 2102 n 0 int2_avg_accum int8_avg int4_avg_combine - - int2_avg_accum int2_avg_accum_inv int8_avg f f 0 1016 0 0 1016 0 "{0,0}" "{0,0}" )); -DATA(insert ( 2103 n 0 numeric_avg_accum numeric_avg numeric_avg_combine numeric_avg_serialize numeric_avg_deserialize numeric_avg_accum numeric_accum_inv numeric_avg f f 0 2281 17 128 2281 128 _null_ _null_ )); +DATA(insert ( 2103 n 0 numeric_avg_accum numeric_avg numeric_avg_combine numeric_avg_serialize numeric_avg_deserialize numeric_avg_accum numeric_accum_inv numeric_avg f f 0 2281 17 128 2281 128 _null_ _null_ )); DATA(insert ( 2104 n 0 float4_accum float8_avg float8_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0}" _null_ )); DATA(insert ( 2105 n 0 float8_accum float8_avg float8_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0}" _null_ )); DATA(insert ( 2106 n 0 interval_accum interval_avg interval_combine - - interval_accum interval_accum_inv interval_avg f f 0 1187 0 0 1187 0 "{0 second,0 second}" "{0 second,0 second}" )); @@ -154,7 +154,7 @@ DATA(insert ( 2110 n 0 float4pl - float4pl - - - - - DATA(insert ( 2111 n 0 float8pl - float8pl - - - - - f f 0 701 0 0 0 0 _null_ _null_ )); DATA(insert ( 2112 n 0 cash_pl - cash_pl - - cash_pl cash_mi - f f 0 790 0 0 790 0 _null_ _null_ )); DATA(insert ( 2113 n 0 interval_pl - interval_pl - - interval_pl interval_mi - f f 0 1186 0 0 1186 0 _null_ _null_ )); -DATA(insert ( 2114 n 0 numeric_avg_accum numeric_sum numeric_avg_combine numeric_avg_serialize numeric_avg_deserialize numeric_avg_accum numeric_accum_inv numeric_sum f f 0 2281 17 128 2281 128 _null_ _null_ )); +DATA(insert ( 2114 n 0 numeric_avg_accum numeric_sum numeric_avg_combine numeric_avg_serialize numeric_avg_deserialize numeric_avg_accum numeric_accum_inv numeric_sum f f 0 2281 17 128 2281 128 _null_ _null_ )); /* max */ DATA(insert ( 2115 n 0 int8larger - int8larger - - - - - f f 413 20 0 0 0 0 _null_ _null_ )); @@ -192,7 +192,7 @@ DATA(insert ( 2139 n 0 time_smaller - time_smaller - - - - - f f 11 DATA(insert ( 2140 n 0 timetz_smaller - timetz_smaller - - - - - f f 1552 1266 0 0 0 0 _null_ _null_ )); DATA(insert ( 2141 n 0 cashsmaller - cashsmaller - - - - - f f 902 790 0 0 0 0 _null_ _null_ )); DATA(insert ( 2142 n 0 timestamp_smaller - timestamp_smaller - - - - - f f 2062 1114 0 0 0 0 _null_ _null_ )); -DATA(insert ( 2143 n 0 timestamptz_smaller - timestamptz_smaller - - - - - f f 1322 1184 0 0 0 0 _null_ _null_ )); +DATA(insert ( 2143 n 0 timestamptz_smaller - timestamptz_smaller - - - - - f f 1322 1184 0 0 0 0 _null_ _null_ )); DATA(insert ( 2144 n 0 interval_smaller - interval_smaller - - - - - f f 1332 1186 0 0 0 0 _null_ _null_ )); DATA(insert ( 2145 n 0 text_smaller - text_smaller - - - - - f f 664 25 0 0 0 0 _null_ _null_ )); DATA(insert ( 2146 n 0 numeric_smaller - numeric_smaller - - - - - f f 1754 1700 0 0 0 0 _null_ _null_ )); @@ -231,9 +231,9 @@ DATA(insert ( 2152 n 0 float8_accum float8_var_samp float8_combine - - DATA(insert ( 2153 n 0 numeric_accum numeric_var_samp numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_var_samp f f 0 2281 17 128 2281 128 _null_ _null_ )); /* stddev_pop */ -DATA(insert ( 2724 n 0 int8_accum numeric_stddev_pop numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_stddev_pop f f 0 2281 17 128 2281 128 _null_ _null_ )); -DATA(insert ( 2725 n 0 int4_accum numeric_poly_stddev_pop numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_stddev_pop f f 0 2281 17 48 2281 48 _null_ _null_ )); -DATA(insert ( 2726 n 0 int2_accum numeric_poly_stddev_pop numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_stddev_pop f f 0 2281 17 48 2281 48 _null_ _null_ )); +DATA(insert ( 2724 n 0 int8_accum numeric_stddev_pop numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_stddev_pop f f 0 2281 17 128 2281 128 _null_ _null_ )); +DATA(insert ( 2725 n 0 int4_accum numeric_poly_stddev_pop numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_stddev_pop f f 0 2281 17 48 2281 48 _null_ _null_ )); +DATA(insert ( 2726 n 0 int2_accum numeric_poly_stddev_pop numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_stddev_pop f f 0 2281 17 48 2281 48 _null_ _null_ )); DATA(insert ( 2727 n 0 float4_accum float8_stddev_pop float8_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0}" _null_ )); DATA(insert ( 2728 n 0 float8_accum float8_stddev_pop float8_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0}" _null_ )); DATA(insert ( 2729 n 0 numeric_accum numeric_stddev_pop numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_stddev_pop f f 0 2281 17 128 2281 128 _null_ _null_ )); @@ -256,17 +256,17 @@ DATA(insert ( 2159 n 0 numeric_accum numeric_stddev_samp numeric_combine num /* SQL2003 binary regression aggregates */ DATA(insert ( 2818 n 0 int8inc_float8_float8 - int8pl - - - - - f f 0 20 0 0 0 0 "0" _null_ )); -DATA(insert ( 2819 n 0 float8_regr_accum float8_regr_sxx float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); -DATA(insert ( 2820 n 0 float8_regr_accum float8_regr_syy float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); -DATA(insert ( 2821 n 0 float8_regr_accum float8_regr_sxy float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); -DATA(insert ( 2822 n 0 float8_regr_accum float8_regr_avgx float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); -DATA(insert ( 2823 n 0 float8_regr_accum float8_regr_avgy float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); -DATA(insert ( 2824 n 0 float8_regr_accum float8_regr_r2 float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); -DATA(insert ( 2825 n 0 float8_regr_accum float8_regr_slope float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); -DATA(insert ( 2826 n 0 float8_regr_accum float8_regr_intercept float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); -DATA(insert ( 2827 n 0 float8_regr_accum float8_covar_pop float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); -DATA(insert ( 2828 n 0 float8_regr_accum float8_covar_samp float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); -DATA(insert ( 2829 n 0 float8_regr_accum float8_corr float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); +DATA(insert ( 2819 n 0 float8_regr_accum float8_regr_sxx float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); +DATA(insert ( 2820 n 0 float8_regr_accum float8_regr_syy float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); +DATA(insert ( 2821 n 0 float8_regr_accum float8_regr_sxy float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); +DATA(insert ( 2822 n 0 float8_regr_accum float8_regr_avgx float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); +DATA(insert ( 2823 n 0 float8_regr_accum float8_regr_avgy float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); +DATA(insert ( 2824 n 0 float8_regr_accum float8_regr_r2 float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); +DATA(insert ( 2825 n 0 float8_regr_accum float8_regr_slope float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); +DATA(insert ( 2826 n 0 float8_regr_accum float8_regr_intercept float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); +DATA(insert ( 2827 n 0 float8_regr_accum float8_covar_pop float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); +DATA(insert ( 2828 n 0 float8_regr_accum float8_covar_samp float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); +DATA(insert ( 2829 n 0 float8_regr_accum float8_corr float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ )); /* boolean-and and boolean-or */ DATA(insert ( 2517 n 0 booland_statefunc - booland_statefunc - - bool_accum bool_accum_inv bool_alltrue f f 58 16 0 0 2281 16 _null_ _null_ )); @@ -274,11 +274,11 @@ DATA(insert ( 2518 n 0 boolor_statefunc - boolor_statefunc - - bool_accum bool_a DATA(insert ( 2519 n 0 booland_statefunc - booland_statefunc - - bool_accum bool_accum_inv bool_alltrue f f 58 16 0 0 2281 16 _null_ _null_ )); /* bitwise integer */ -DATA(insert ( 2236 n 0 int2and - int2and - - - - - f f 0 21 0 0 0 0 _null_ _null_ )); +DATA(insert ( 2236 n 0 int2and - int2and - - - - - f f 0 21 0 0 0 0 _null_ _null_ )); DATA(insert ( 2237 n 0 int2or - int2or - - - - - f f 0 21 0 0 0 0 _null_ _null_ )); -DATA(insert ( 2238 n 0 int4and - int4and - - - - - f f 0 23 0 0 0 0 _null_ _null_ )); +DATA(insert ( 2238 n 0 int4and - int4and - - - - - f f 0 23 0 0 0 0 _null_ _null_ )); DATA(insert ( 2239 n 0 int4or - int4or - - - - - f f 0 23 0 0 0 0 _null_ _null_ )); -DATA(insert ( 2240 n 0 int8and - int8and - - - - - f f 0 20 0 0 0 0 _null_ _null_ )); +DATA(insert ( 2240 n 0 int8and - int8and - - - - - f f 0 20 0 0 0 0 _null_ _null_ )); DATA(insert ( 2241 n 0 int8or - int8or - - - - - f f 0 20 0 0 0 0 _null_ _null_ )); DATA(insert ( 2242 n 0 bitand - bitand - - - - - f f 0 1560 0 0 0 0 _null_ _null_ )); DATA(insert ( 2243 n 0 bitor - bitor - - - - - f f 0 1560 0 0 0 0 _null_ _null_ )); @@ -288,7 +288,7 @@ DATA(insert ( 2901 n 0 xmlconcat2 - - - - - - - f f 0 142 0 0 0 0 /* array */ DATA(insert ( 2335 n 0 array_agg_transfn array_agg_finalfn - - - - - - t f 0 2281 0 0 0 0 _null_ _null_ )); -DATA(insert ( 4053 n 0 array_agg_array_transfn array_agg_array_finalfn - - - - - - t f 0 2281 0 0 0 0 _null_ _null_ )); +DATA(insert ( 4053 n 0 array_agg_array_transfn array_agg_array_finalfn - - - - - - t f 0 2281 0 0 0 0 _null_ _null_ )); /* text */ DATA(insert ( 3538 n 0 string_agg_transfn string_agg_finalfn - - - - - - f f 0 2281 0 0 0 0 _null_ _null_ )); diff --git a/src/include/catalog/pg_class.h b/src/include/catalog/pg_class.h index fdc67c2eb3..e57b81c417 100644 --- a/src/include/catalog/pg_class.h +++ b/src/include/catalog/pg_class.h @@ -66,7 +66,8 @@ CATALOG(pg_class,1259) BKI_BOOTSTRAP BKI_ROWTYPE_OID(83) BKI_SCHEMA_MACRO bool relhastriggers; /* has (or has had) any TRIGGERs */ bool relhassubclass; /* has (or has had) derived classes */ bool relrowsecurity; /* row security is enabled or not */ - bool relforcerowsecurity; /* row security forced for owners or not */ + bool relforcerowsecurity; /* row security forced for owners or + * not */ bool relispopulated; /* matview currently holds query results */ char relreplident; /* see REPLICA_IDENTITY_xxx constants */ TransactionId relfrozenxid; /* all Xids < this are frozen in this rel */ diff --git a/src/include/catalog/pg_control.h b/src/include/catalog/pg_control.h index 7ba396df51..fefcac8432 100644 --- a/src/include/catalog/pg_control.h +++ b/src/include/catalog/pg_control.h @@ -54,8 +54,8 @@ typedef struct CheckPoint /* * Oldest XID still running. This is only needed to initialize hot standby * mode from an online checkpoint, so we only bother calculating this for - * online checkpoints and only when wal_level is replica. Otherwise - * it's set to InvalidTransactionId. + * online checkpoints and only when wal_level is replica. Otherwise it's + * set to InvalidTransactionId. */ TransactionId oldestActiveXid; } CheckPoint; diff --git a/src/include/catalog/pg_opclass.h b/src/include/catalog/pg_opclass.h index b564046dea..6c82d94600 100644 --- a/src/include/catalog/pg_opclass.h +++ b/src/include/catalog/pg_opclass.h @@ -228,7 +228,7 @@ DATA(insert ( 403 range_ops PGNSP PGUID 3901 3831 t 0 )); DATA(insert ( 405 range_ops PGNSP PGUID 3903 3831 t 0 )); DATA(insert ( 783 range_ops PGNSP PGUID 3919 3831 t 0 )); DATA(insert ( 4000 range_ops PGNSP PGUID 3474 3831 t 0 )); -DATA(insert ( 4000 box_ops PGNSP PGUID 5000 603 t 0 )); +DATA(insert ( 4000 box_ops PGNSP PGUID 5000 603 t 0 )); DATA(insert ( 4000 quad_point_ops PGNSP PGUID 4015 600 t 0 )); DATA(insert ( 4000 kd_point_ops PGNSP PGUID 4016 600 f 0 )); DATA(insert ( 4000 text_ops PGNSP PGUID 4017 25 t 0 )); diff --git a/src/include/catalog/pg_operator.h b/src/include/catalog/pg_operator.h index a5e4a02ebc..b8f06b3500 100644 --- a/src/include/catalog/pg_operator.h +++ b/src/include/catalog/pg_operator.h @@ -142,7 +142,7 @@ DESCR("prepend element onto front of array"); DATA(insert OID = 375 ( "||" PGNSP PGUID b f f 2277 2277 2277 0 0 array_cat - - )); DESCR("concatenate"); -DATA(insert OID = 352 ( "=" PGNSP PGUID b f t 28 28 16 352 3315 xideq eqsel eqjoinsel )); +DATA(insert OID = 352 ( "=" PGNSP PGUID b f t 28 28 16 352 3315 xideq eqsel eqjoinsel )); DESCR("equal"); DATA(insert OID = 353 ( "=" PGNSP PGUID b f f 28 23 16 0 3316 xideqint4 eqsel eqjoinsel )); DESCR("equal"); @@ -1676,7 +1676,7 @@ DESCR("AND-concatenate"); DATA(insert OID = 3681 ( "||" PGNSP PGUID b f f 3615 3615 3615 0 0 tsquery_or - - )); DESCR("OR-concatenate"); /* <-> operation calls tsquery_phrase, but function is polymorphic. So, point to OID of the tsquery_phrase */ -DATA(insert OID = 5005 ( "<->" PGNSP PGUID b f f 3615 3615 3615 0 0 5003 - - )); +DATA(insert OID = 5005 ( "<->" PGNSP PGUID b f f 3615 3615 3615 0 0 5003 - - )); DESCR("phrase-concatenate"); DATA(insert OID = 3682 ( "!!" PGNSP PGUID l f f 0 3615 3615 0 0 tsquery_not - - )); DESCR("NOT tsquery"); diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h index 2e9d7be3f7..f33c3ff032 100644 --- a/src/include/catalog/pg_proc.h +++ b/src/include/catalog/pg_proc.h @@ -255,39 +255,39 @@ DATA(insert OID = 103 ( scalarltsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 DESCR("restriction selectivity of < and related operators on scalar datatypes"); DATA(insert OID = 104 ( scalargtsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ scalargtsel _null_ _null_ _null_ )); DESCR("restriction selectivity of > and related operators on scalar datatypes"); -DATA(insert OID = 105 ( eqjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ eqjoinsel _null_ _null_ _null_ )); +DATA(insert OID = 105 ( eqjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ eqjoinsel _null_ _null_ _null_ )); DESCR("join selectivity of = and related operators"); -DATA(insert OID = 106 ( neqjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ neqjoinsel _null_ _null_ _null_ )); +DATA(insert OID = 106 ( neqjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ neqjoinsel _null_ _null_ _null_ )); DESCR("join selectivity of <> and related operators"); -DATA(insert OID = 107 ( scalarltjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ scalarltjoinsel _null_ _null_ _null_ )); +DATA(insert OID = 107 ( scalarltjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ scalarltjoinsel _null_ _null_ _null_ )); DESCR("join selectivity of < and related operators on scalar datatypes"); -DATA(insert OID = 108 ( scalargtjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ scalargtjoinsel _null_ _null_ _null_ )); +DATA(insert OID = 108 ( scalargtjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ scalargtjoinsel _null_ _null_ _null_ )); DESCR("join selectivity of > and related operators on scalar datatypes"); DATA(insert OID = 109 ( unknownin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 705 "2275" _null_ _null_ _null_ _null_ _null_ unknownin _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 110 ( unknownout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "705" _null_ _null_ _null_ _null_ _null_ unknownout _null_ _null_ _null_ )); +DATA(insert OID = 110 ( unknownout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "705" _null_ _null_ _null_ _null_ _null_ unknownout _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 111 ( numeric_fac PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ numeric_fac _null_ _null_ _null_ )); DATA(insert OID = 115 ( box_above_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_above_eq _null_ _null_ _null_ )); DATA(insert OID = 116 ( box_below_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_below_eq _null_ _null_ _null_ )); -DATA(insert OID = 117 ( point_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "2275" _null_ _null_ _null_ _null_ _null_ point_in _null_ _null_ _null_ )); +DATA(insert OID = 117 ( point_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "2275" _null_ _null_ _null_ _null_ _null_ point_in _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 118 ( point_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "600" _null_ _null_ _null_ _null_ _null_ point_out _null_ _null_ _null_ )); +DATA(insert OID = 118 ( point_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "600" _null_ _null_ _null_ _null_ _null_ point_out _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 119 ( lseg_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 601 "2275" _null_ _null_ _null_ _null_ _null_ lseg_in _null_ _null_ _null_ )); +DATA(insert OID = 119 ( lseg_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 601 "2275" _null_ _null_ _null_ _null_ _null_ lseg_in _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 120 ( lseg_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "601" _null_ _null_ _null_ _null_ _null_ lseg_out _null_ _null_ _null_ )); +DATA(insert OID = 120 ( lseg_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "601" _null_ _null_ _null_ _null_ _null_ lseg_out _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 121 ( path_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 602 "2275" _null_ _null_ _null_ _null_ _null_ path_in _null_ _null_ _null_ )); +DATA(insert OID = 121 ( path_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 602 "2275" _null_ _null_ _null_ _null_ _null_ path_in _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 122 ( path_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "602" _null_ _null_ _null_ _null_ _null_ path_out _null_ _null_ _null_ )); +DATA(insert OID = 122 ( path_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "602" _null_ _null_ _null_ _null_ _null_ path_out _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 123 ( box_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 603 "2275" _null_ _null_ _null_ _null_ _null_ box_in _null_ _null_ _null_ )); +DATA(insert OID = 123 ( box_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 603 "2275" _null_ _null_ _null_ _null_ _null_ box_in _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 124 ( box_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "603" _null_ _null_ _null_ _null_ _null_ box_out _null_ _null_ _null_ )); +DATA(insert OID = 124 ( box_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "603" _null_ _null_ _null_ _null_ _null_ box_out _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 125 ( box_overlap PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_overlap _null_ _null_ _null_ )); DATA(insert OID = 126 ( box_ge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_ge _null_ _null_ _null_ )); @@ -305,7 +305,7 @@ DATA(insert OID = 137 ( on_ppath PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 DATA(insert OID = 138 ( box_center PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "603" _null_ _null_ _null_ _null_ _null_ box_center _null_ _null_ _null_ )); DATA(insert OID = 139 ( areasel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ areasel _null_ _null_ _null_ )); DESCR("restriction selectivity for area-comparison operators"); -DATA(insert OID = 140 ( areajoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ areajoinsel _null_ _null_ _null_ )); +DATA(insert OID = 140 ( areajoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ areajoinsel _null_ _null_ _null_ )); DESCR("join selectivity for area-comparison operators"); DATA(insert OID = 141 ( int4mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 23" _null_ _null_ _null_ _null_ _null_ int4mul _null_ _null_ _null_ )); DATA(insert OID = 144 ( int4ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "23 23" _null_ _null_ _null_ _null_ _null_ int4ne _null_ _null_ _null_ )); @@ -368,9 +368,9 @@ DESCR("I/O"); /* OIDS 200 - 299 */ -DATA(insert OID = 200 ( float4in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 700 "2275" _null_ _null_ _null_ _null_ _null_ float4in _null_ _null_ _null_ )); +DATA(insert OID = 200 ( float4in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 700 "2275" _null_ _null_ _null_ _null_ _null_ float4in _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 201 ( float4out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "700" _null_ _null_ _null_ _null_ _null_ float4out _null_ _null_ _null_ )); +DATA(insert OID = 201 ( float4out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "700" _null_ _null_ _null_ _null_ _null_ float4out _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 202 ( float4mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 700 "700 700" _null_ _null_ _null_ _null_ _null_ float4mul _null_ _null_ _null_ )); DATA(insert OID = 203 ( float4div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 700 "700 700" _null_ _null_ _null_ _null_ _null_ float4div _null_ _null_ _null_ )); @@ -388,9 +388,9 @@ DESCR("smaller of two"); DATA(insert OID = 212 ( int4um PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "23" _null_ _null_ _null_ _null_ _null_ int4um _null_ _null_ _null_ )); DATA(insert OID = 213 ( int2um PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 21 "21" _null_ _null_ _null_ _null_ _null_ int2um _null_ _null_ _null_ )); -DATA(insert OID = 214 ( float8in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "2275" _null_ _null_ _null_ _null_ _null_ float8in _null_ _null_ _null_ )); +DATA(insert OID = 214 ( float8in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "2275" _null_ _null_ _null_ _null_ _null_ float8in _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 215 ( float8out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "701" _null_ _null_ _null_ _null_ _null_ float8out _null_ _null_ _null_ )); +DATA(insert OID = 215 ( float8out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "701" _null_ _null_ _null_ _null_ _null_ float8out _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 216 ( float8mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ float8mul _null_ _null_ _null_ )); DATA(insert OID = 217 ( float8div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ float8div _null_ _null_ _null_ )); @@ -440,24 +440,24 @@ DATA(insert OID = 238 ( int2 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 21 DESCR("convert float4 to int2"); DATA(insert OID = 239 ( line_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "628 628" _null_ _null_ _null_ _null_ _null_ line_distance _null_ _null_ _null_ )); -DATA(insert OID = 240 ( abstimein PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 702 "2275" _null_ _null_ _null_ _null_ _null_ abstimein _null_ _null_ _null_ )); +DATA(insert OID = 240 ( abstimein PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 702 "2275" _null_ _null_ _null_ _null_ _null_ abstimein _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 241 ( abstimeout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "702" _null_ _null_ _null_ _null_ _null_ abstimeout _null_ _null_ _null_ )); +DATA(insert OID = 241 ( abstimeout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "702" _null_ _null_ _null_ _null_ _null_ abstimeout _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 242 ( reltimein PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 703 "2275" _null_ _null_ _null_ _null_ _null_ reltimein _null_ _null_ _null_ )); +DATA(insert OID = 242 ( reltimein PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 703 "2275" _null_ _null_ _null_ _null_ _null_ reltimein _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 243 ( reltimeout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "703" _null_ _null_ _null_ _null_ _null_ reltimeout _null_ _null_ _null_ )); +DATA(insert OID = 243 ( reltimeout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "703" _null_ _null_ _null_ _null_ _null_ reltimeout _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 244 ( timepl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 702 "702 703" _null_ _null_ _null_ _null_ _null_ timepl _null_ _null_ _null_ )); DATA(insert OID = 245 ( timemi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 702 "702 703" _null_ _null_ _null_ _null_ _null_ timemi _null_ _null_ _null_ )); -DATA(insert OID = 246 ( tintervalin PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 704 "2275" _null_ _null_ _null_ _null_ _null_ tintervalin _null_ _null_ _null_ )); +DATA(insert OID = 246 ( tintervalin PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 704 "2275" _null_ _null_ _null_ _null_ _null_ tintervalin _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 247 ( tintervalout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "704" _null_ _null_ _null_ _null_ _null_ tintervalout _null_ _null_ _null_ )); +DATA(insert OID = 247 ( tintervalout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "704" _null_ _null_ _null_ _null_ _null_ tintervalout _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 248 ( intinterval PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "702 704" _null_ _null_ _null_ _null_ _null_ intinterval _null_ _null_ _null_ )); DATA(insert OID = 249 ( tintervalrel PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 703 "704" _null_ _null_ _null_ _null_ _null_ tintervalrel _null_ _null_ _null_ )); DESCR("tinterval to reltime"); -DATA(insert OID = 250 ( timenow PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 702 "" _null_ _null_ _null_ _null_ _null_ timenow _null_ _null_ _null_ )); +DATA(insert OID = 250 ( timenow PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 702 "" _null_ _null_ _null_ _null_ _null_ timenow _null_ _null_ _null_ )); DESCR("current date and time (abstime)"); DATA(insert OID = 251 ( abstimeeq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "702 702" _null_ _null_ _null_ _null_ _null_ abstimeeq _null_ _null_ _null_ )); DATA(insert OID = 252 ( abstimene PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "702 702" _null_ _null_ _null_ _null_ _null_ abstimene _null_ _null_ _null_ )); @@ -577,9 +577,9 @@ DATA(insert OID = 343 ( poly_overright PGNSP PGUID 12 1 0 0 0 f f f f t f i DATA(insert OID = 344 ( poly_right PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "604 604" _null_ _null_ _null_ _null_ _null_ poly_right _null_ _null_ _null_ )); DATA(insert OID = 345 ( poly_contained PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "604 604" _null_ _null_ _null_ _null_ _null_ poly_contained _null_ _null_ _null_ )); DATA(insert OID = 346 ( poly_overlap PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "604 604" _null_ _null_ _null_ _null_ _null_ poly_overlap _null_ _null_ _null_ )); -DATA(insert OID = 347 ( poly_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 604 "2275" _null_ _null_ _null_ _null_ _null_ poly_in _null_ _null_ _null_ )); +DATA(insert OID = 347 ( poly_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 604 "2275" _null_ _null_ _null_ _null_ _null_ poly_in _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 348 ( poly_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "604" _null_ _null_ _null_ _null_ _null_ poly_out _null_ _null_ _null_ )); +DATA(insert OID = 348 ( poly_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "604" _null_ _null_ _null_ _null_ _null_ poly_out _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 350 ( btint2cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "21 21" _null_ _null_ _null_ _null_ _null_ btint2cmp _null_ _null_ _null_ )); @@ -890,9 +890,9 @@ DESCR("restriction selectivity for array-containment operators"); DATA(insert OID = 3818 ( arraycontjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ arraycontjoinsel _null_ _null_ _null_ )); DESCR("join selectivity for array-containment operators"); -DATA(insert OID = 760 ( smgrin PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 210 "2275" _null_ _null_ _null_ _null_ _null_ smgrin _null_ _null_ _null_ )); +DATA(insert OID = 760 ( smgrin PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 210 "2275" _null_ _null_ _null_ _null_ _null_ smgrin _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 761 ( smgrout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "210" _null_ _null_ _null_ _null_ _null_ smgrout _null_ _null_ _null_ )); +DATA(insert OID = 761 ( smgrout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "210" _null_ _null_ _null_ _null_ _null_ smgrout _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 762 ( smgreq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "210 210" _null_ _null_ _null_ _null_ _null_ smgreq _null_ _null_ _null_ )); DESCR("storage manager"); @@ -901,7 +901,7 @@ DESCR("storage manager"); DATA(insert OID = 764 ( lo_import PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 26 "25" _null_ _null_ _null_ _null_ _null_ lo_import _null_ _null_ _null_ )); DESCR("large object import"); -DATA(insert OID = 767 ( lo_import PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 26 "25 26" _null_ _null_ _null_ _null_ _null_ lo_import_with_oid _null_ _null_ _null_ )); +DATA(insert OID = 767 ( lo_import PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 26 "25 26" _null_ _null_ _null_ _null_ _null_ lo_import_with_oid _null_ _null_ _null_ )); DESCR("large object import"); DATA(insert OID = 765 ( lo_export PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 23 "26 25" _null_ _null_ _null_ _null_ _null_ lo_export _null_ _null_ _null_ )); DESCR("large object export"); @@ -950,7 +950,7 @@ DESCR("convert char to char(n)"); DATA(insert OID = 861 ( current_database PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 19 "" _null_ _null_ _null_ _null_ _null_ current_database _null_ _null_ _null_ )); DESCR("name of the current database"); -DATA(insert OID = 817 ( current_query PGNSP PGUID 12 1 0 0 0 f f f f f f v r 0 0 25 "" _null_ _null_ _null_ _null_ _null_ current_query _null_ _null_ _null_ )); +DATA(insert OID = 817 ( current_query PGNSP PGUID 12 1 0 0 0 f f f f f f v r 0 0 25 "" _null_ _null_ _null_ _null_ _null_ current_query _null_ _null_ _null_ )); DESCR("get the currently executing query"); DATA(insert OID = 862 ( int4_mul_cash PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "23 790" _null_ _null_ _null_ _null_ _null_ int4_mul_cash _null_ _null_ _null_ )); @@ -960,9 +960,9 @@ DATA(insert OID = 865 ( cash_div_int4 PGNSP PGUID 12 1 0 0 0 f f f f t f i DATA(insert OID = 866 ( cash_mul_int2 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "790 21" _null_ _null_ _null_ _null_ _null_ cash_mul_int2 _null_ _null_ _null_ )); DATA(insert OID = 867 ( cash_div_int2 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "790 21" _null_ _null_ _null_ _null_ _null_ cash_div_int2 _null_ _null_ _null_ )); -DATA(insert OID = 886 ( cash_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 790 "2275" _null_ _null_ _null_ _null_ _null_ cash_in _null_ _null_ _null_ )); +DATA(insert OID = 886 ( cash_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 790 "2275" _null_ _null_ _null_ _null_ _null_ cash_in _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 887 ( cash_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "790" _null_ _null_ _null_ _null_ _null_ cash_out _null_ _null_ _null_ )); +DATA(insert OID = 887 ( cash_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "790" _null_ _null_ _null_ _null_ _null_ cash_out _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 888 ( cash_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_eq _null_ _null_ _null_ )); DATA(insert OID = 889 ( cash_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_ne _null_ _null_ _null_ )); @@ -982,9 +982,9 @@ DATA(insert OID = 919 ( flt8_mul_cash PGNSP PGUID 12 1 0 0 0 f f f f t f i DATA(insert OID = 935 ( cash_words PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "790" _null_ _null_ _null_ _null_ _null_ cash_words _null_ _null_ _null_ )); DESCR("output money amount as words"); DATA(insert OID = 3822 ( cash_div_cash PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "790 790" _null_ _null_ _null_ _null_ _null_ cash_div_cash _null_ _null_ _null_ )); -DATA(insert OID = 3823 ( numeric PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1700 "790" _null_ _null_ _null_ _null_ _null_ cash_numeric _null_ _null_ _null_ )); +DATA(insert OID = 3823 ( numeric PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1700 "790" _null_ _null_ _null_ _null_ _null_ cash_numeric _null_ _null_ _null_ )); DESCR("convert money to numeric"); -DATA(insert OID = 3824 ( money PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 790 "1700" _null_ _null_ _null_ _null_ _null_ numeric_cash _null_ _null_ _null_ )); +DATA(insert OID = 3824 ( money PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 790 "1700" _null_ _null_ _null_ _null_ _null_ numeric_cash _null_ _null_ _null_ )); DESCR("convert numeric to money"); DATA(insert OID = 3811 ( money PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 790 "23" _null_ _null_ _null_ _null_ _null_ int4_cash _null_ _null_ _null_ )); DESCR("convert int4 to money"); @@ -1179,9 +1179,9 @@ DATA(insert OID = 1143 ( time_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 DESCR("I/O"); DATA(insert OID = 1144 ( time_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "1083" _null_ _null_ _null_ _null_ _null_ time_out _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2909 ( timetypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ timetypmodin _null_ _null_ _null_ )); +DATA(insert OID = 2909 ( timetypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ timetypmodin _null_ _null_ _null_ )); DESCR("I/O typmod"); -DATA(insert OID = 2910 ( timetypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ timetypmodout _null_ _null_ _null_ )); +DATA(insert OID = 2910 ( timetypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ timetypmodout _null_ _null_ _null_ )); DESCR("I/O typmod"); DATA(insert OID = 1145 ( time_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1083 1083" _null_ _null_ _null_ _null_ _null_ time_eq _null_ _null_ _null_ )); @@ -1194,9 +1194,9 @@ DATA(insert OID = 1150 ( timestamptz_in PGNSP PGUID 12 1 0 0 0 f f f f t f s DESCR("I/O"); DATA(insert OID = 1151 ( timestamptz_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "1184" _null_ _null_ _null_ _null_ _null_ timestamptz_out _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2907 ( timestamptztypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ timestamptztypmodin _null_ _null_ _null_ )); +DATA(insert OID = 2907 ( timestamptztypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ timestamptztypmodin _null_ _null_ _null_ )); DESCR("I/O typmod"); -DATA(insert OID = 2908 ( timestamptztypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ timestamptztypmodout _null_ _null_ _null_ )); +DATA(insert OID = 2908 ( timestamptztypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ timestamptztypmodout _null_ _null_ _null_ )); DESCR("I/O typmod"); DATA(insert OID = 1152 ( timestamptz_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1184 1184" _null_ _null_ _null_ _null_ _null_ timestamp_eq _null_ _null_ _null_ )); DATA(insert OID = 1153 ( timestamptz_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1184 1184" _null_ _null_ _null_ _null_ _null_ timestamp_ne _null_ _null_ _null_ )); @@ -1215,9 +1215,9 @@ DATA(insert OID = 1160 ( interval_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s DESCR("I/O"); DATA(insert OID = 1161 ( interval_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "1186" _null_ _null_ _null_ _null_ _null_ interval_out _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2903 ( intervaltypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ intervaltypmodin _null_ _null_ _null_ )); +DATA(insert OID = 2903 ( intervaltypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ intervaltypmodin _null_ _null_ _null_ )); DESCR("I/O typmod"); -DATA(insert OID = 2904 ( intervaltypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ intervaltypmodout _null_ _null_ _null_ )); +DATA(insert OID = 2904 ( intervaltypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ intervaltypmodout _null_ _null_ _null_ )); DESCR("I/O typmod"); DATA(insert OID = 1162 ( interval_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1186 1186" _null_ _null_ _null_ _null_ _null_ interval_eq _null_ _null_ _null_ )); DATA(insert OID = 1163 ( interval_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1186 1186" _null_ _null_ _null_ _null_ _null_ interval_ne _null_ _null_ _null_ )); @@ -1250,7 +1250,7 @@ DATA(insert OID = 1178 ( date PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1 DESCR("convert timestamp with time zone to date"); DATA(insert OID = 1179 ( date PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1082 "702" _null_ _null_ _null_ _null_ _null_ abstime_date _null_ _null_ _null_ )); DESCR("convert abstime to date"); -DATA(insert OID = 1180 ( abstime PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 702 "1184" _null_ _null_ _null_ _null_ _null_ timestamptz_abstime _null_ _null_ _null_ )); +DATA(insert OID = 1180 ( abstime PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 702 "1184" _null_ _null_ _null_ _null_ _null_ timestamptz_abstime _null_ _null_ _null_ )); DESCR("convert timestamp with time zone to abstime"); DATA(insert OID = 1181 ( age PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 23 "28" _null_ _null_ _null_ _null_ _null_ xid_age _null_ _null_ _null_ )); DESCR("age of a transaction ID, in transactions before current transaction"); @@ -1379,11 +1379,11 @@ DESCR("current clock time"); DATA(insert OID = 1300 ( positionsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ positionsel _null_ _null_ _null_ )); DESCR("restriction selectivity for position-comparison operators"); -DATA(insert OID = 1301 ( positionjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ positionjoinsel _null_ _null_ _null_ )); +DATA(insert OID = 1301 ( positionjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ positionjoinsel _null_ _null_ _null_ )); DESCR("join selectivity for position-comparison operators"); DATA(insert OID = 1302 ( contsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ contsel _null_ _null_ _null_ )); DESCR("restriction selectivity for containment comparison operators"); -DATA(insert OID = 1303 ( contjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ contjoinsel _null_ _null_ _null_ )); +DATA(insert OID = 1303 ( contjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ contjoinsel _null_ _null_ _null_ )); DESCR("join selectivity for containment comparison operators"); DATA(insert OID = 1304 ( overlaps PGNSP PGUID 12 1 0 0 0 f f f f f f i s 4 0 16 "1184 1184 1184 1184" _null_ _null_ _null_ _null_ _null_ overlaps_timestamp _null_ _null_ _null_ )); @@ -1408,9 +1408,9 @@ DATA(insert OID = 1312 ( timestamp_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s DESCR("I/O"); DATA(insert OID = 1313 ( timestamp_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "1114" _null_ _null_ _null_ _null_ _null_ timestamp_out _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2905 ( timestamptypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ timestamptypmodin _null_ _null_ _null_ )); +DATA(insert OID = 2905 ( timestamptypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ timestamptypmodin _null_ _null_ _null_ )); DESCR("I/O typmod"); -DATA(insert OID = 2906 ( timestamptypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ timestamptypmodout _null_ _null_ _null_ )); +DATA(insert OID = 2906 ( timestamptypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ timestamptypmodout _null_ _null_ _null_ )); DESCR("I/O typmod"); DATA(insert OID = 1314 ( timestamptz_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "1184 1184" _null_ _null_ _null_ _null_ _null_ timestamp_cmp _null_ _null_ _null_ )); DESCR("less-equal-greater"); @@ -1464,9 +1464,9 @@ DATA(insert OID = 1350 ( timetz_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 DESCR("I/O"); DATA(insert OID = 1351 ( timetz_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "1266" _null_ _null_ _null_ _null_ _null_ timetz_out _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2911 ( timetztypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ timetztypmodin _null_ _null_ _null_ )); +DATA(insert OID = 2911 ( timetztypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ timetztypmodin _null_ _null_ _null_ )); DESCR("I/O typmod"); -DATA(insert OID = 2912 ( timetztypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ timetztypmodout _null_ _null_ _null_ )); +DATA(insert OID = 2912 ( timetztypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ timetztypmodout _null_ _null_ _null_ )); DESCR("I/O typmod"); DATA(insert OID = 1352 ( timetz_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1266 1266" _null_ _null_ _null_ _null_ _null_ timetz_eq _null_ _null_ _null_ )); DATA(insert OID = 1353 ( timetz_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1266 1266" _null_ _null_ _null_ _null_ _null_ timetz_ne _null_ _null_ _null_ )); @@ -1482,7 +1482,7 @@ DESCR("convert date and time with time zone to timestamp with time zone"); DATA(insert OID = 1364 ( time PGNSP PGUID 14 1 0 0 0 f f f f t f s s 1 0 1083 "702" _null_ _null_ _null_ _null_ _null_ "select cast(cast($1 as timestamp without time zone) as pg_catalog.time)" _null_ _null_ _null_ )); DESCR("convert abstime to time"); -DATA(insert OID = 1367 ( character_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1042" _null_ _null_ _null_ _null_ _null_ bpcharlen _null_ _null_ _null_ )); +DATA(insert OID = 1367 ( character_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1042" _null_ _null_ _null_ _null_ _null_ bpcharlen _null_ _null_ _null_ )); DESCR("character length"); DATA(insert OID = 1369 ( character_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "25" _null_ _null_ _null_ _null_ _null_ textlen _null_ _null_ _null_ )); DESCR("character length"); @@ -1552,12 +1552,12 @@ DESCR("convert name to varchar"); DATA(insert OID = 1402 ( current_schema PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 19 "" _null_ _null_ _null_ _null_ _null_ current_schema _null_ _null_ _null_ )); DESCR("current schema name"); -DATA(insert OID = 1403 ( current_schemas PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1003 "16" _null_ _null_ _null_ _null_ _null_ current_schemas _null_ _null_ _null_ )); +DATA(insert OID = 1403 ( current_schemas PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1003 "16" _null_ _null_ _null_ _null_ _null_ current_schemas _null_ _null_ _null_ )); DESCR("current schema search list"); DATA(insert OID = 1404 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 25 "25 25 23 23" _null_ _null_ _null_ _null_ _null_ textoverlay _null_ _null_ _null_ )); DESCR("substitute portion of string"); -DATA(insert OID = 1405 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 25 "25 25 23" _null_ _null_ _null_ _null_ _null_ textoverlay_no_len _null_ _null_ _null_ )); +DATA(insert OID = 1405 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 25 "25 25 23" _null_ _null_ _null_ _null_ _null_ textoverlay_no_len _null_ _null_ _null_ )); DESCR("substitute portion of string"); DATA(insert OID = 1406 ( isvertical PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "600 600" _null_ _null_ _null_ _null_ _null_ point_vert _null_ _null_ _null_ )); @@ -1735,9 +1735,9 @@ DATA(insert OID = 1564 ( bit_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 15 DESCR("I/O"); DATA(insert OID = 1565 ( bit_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "1560" _null_ _null_ _null_ _null_ _null_ bit_out _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2919 ( bittypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ bittypmodin _null_ _null_ _null_ )); +DATA(insert OID = 2919 ( bittypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ bittypmodin _null_ _null_ _null_ )); DESCR("I/O typmod"); -DATA(insert OID = 2920 ( bittypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ bittypmodout _null_ _null_ _null_ )); +DATA(insert OID = 2920 ( bittypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ bittypmodout _null_ _null_ _null_ )); DESCR("I/O typmod"); DATA(insert OID = 1569 ( like PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ textlike _null_ _null_ _null_ )); @@ -1751,9 +1751,9 @@ DESCR("does not match LIKE expression"); /* SEQUENCE functions */ -DATA(insert OID = 1574 ( nextval PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 20 "2205" _null_ _null_ _null_ _null_ _null_ nextval_oid _null_ _null_ _null_ )); +DATA(insert OID = 1574 ( nextval PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 20 "2205" _null_ _null_ _null_ _null_ _null_ nextval_oid _null_ _null_ _null_ )); DESCR("sequence next value"); -DATA(insert OID = 1575 ( currval PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 20 "2205" _null_ _null_ _null_ _null_ _null_ currval_oid _null_ _null_ _null_ )); +DATA(insert OID = 1575 ( currval PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 20 "2205" _null_ _null_ _null_ _null_ _null_ currval_oid _null_ _null_ _null_ )); DESCR("sequence current value"); DATA(insert OID = 1576 ( setval PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 20 "2205 20" _null_ _null_ _null_ _null_ _null_ setval_oid _null_ _null_ _null_ )); DESCR("set sequence value"); @@ -1766,9 +1766,9 @@ DATA(insert OID = 1579 ( varbit_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 DESCR("I/O"); DATA(insert OID = 1580 ( varbit_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "1562" _null_ _null_ _null_ _null_ _null_ varbit_out _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2902 ( varbittypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ varbittypmodin _null_ _null_ _null_ )); +DATA(insert OID = 2902 ( varbittypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ varbittypmodin _null_ _null_ _null_ )); DESCR("I/O typmod"); -DATA(insert OID = 2921 ( varbittypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ varbittypmodout _null_ _null_ _null_ )); +DATA(insert OID = 2921 ( varbittypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ varbittypmodout _null_ _null_ _null_ )); DESCR("I/O typmod"); DATA(insert OID = 1581 ( biteq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1560 1560" _null_ _null_ _null_ _null_ _null_ biteq _null_ _null_ _null_ )); @@ -1911,7 +1911,7 @@ DATA(insert OID = 2764 ( regexp_matches PGNSP PGUID 12 1 10 0 0 f f f f t t i DESCR("find all match groups for regexp"); DATA(insert OID = 2088 ( split_part PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 25 "25 25 23" _null_ _null_ _null_ _null_ _null_ split_text _null_ _null_ _null_ )); DESCR("split string by field_sep and return field_num"); -DATA(insert OID = 2765 ( regexp_split_to_table PGNSP PGUID 12 1 1000 0 0 f f f f t t i s 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ regexp_split_to_table_no_flags _null_ _null_ _null_ )); +DATA(insert OID = 2765 ( regexp_split_to_table PGNSP PGUID 12 1 1000 0 0 f f f f t t i s 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ regexp_split_to_table_no_flags _null_ _null_ _null_ )); DESCR("split string by pattern"); DATA(insert OID = 2766 ( regexp_split_to_table PGNSP PGUID 12 1 1000 0 0 f f f f t t i s 3 0 25 "25 25 25" _null_ _null_ _null_ _null_ _null_ regexp_split_to_table _null_ _null_ _null_ )); DESCR("split string by pattern"); @@ -1994,9 +1994,9 @@ DESCR("list of SQL keywords"); DATA(insert OID = 2289 ( pg_options_to_table PGNSP PGUID 12 1 3 0 0 f f f f t t s s 1 0 2249 "1009" "{1009,25,25}" "{i,o,o}" "{options_array,option_name,option_value}" _null_ _null_ pg_options_to_table _null_ _null_ _null_ )); DESCR("convert generic options array to name/value table"); -DATA(insert OID = 1619 ( pg_typeof PGNSP PGUID 12 1 0 0 0 f f f f f f s s 1 0 2206 "2276" _null_ _null_ _null_ _null_ _null_ pg_typeof _null_ _null_ _null_ )); +DATA(insert OID = 1619 ( pg_typeof PGNSP PGUID 12 1 0 0 0 f f f f f f s s 1 0 2206 "2276" _null_ _null_ _null_ _null_ _null_ pg_typeof _null_ _null_ _null_ )); DESCR("type of the argument"); -DATA(insert OID = 3162 ( pg_collation_for PGNSP PGUID 12 1 0 0 0 f f f f f f s s 1 0 25 "2276" _null_ _null_ _null_ _null_ _null_ pg_collation_for _null_ _null_ _null_ )); +DATA(insert OID = 3162 ( pg_collation_for PGNSP PGUID 12 1 0 0 0 f f f f f f s s 1 0 25 "2276" _null_ _null_ _null_ _null_ _null_ pg_collation_for _null_ _null_ _null_ )); DESCR("collation of the argument; implementation of the COLLATION FOR expression"); DATA(insert OID = 3842 ( pg_relation_is_updatable PGNSP PGUID 12 10 0 0 0 f f f f t f s s 2 0 23 "2205 16" _null_ _null_ _null_ _null_ _null_ pg_relation_is_updatable _null_ _null_ _null_ )); @@ -2073,7 +2073,7 @@ DESCR("position of sub-bitstring"); DATA(insert OID = 1699 ( substring PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1560 "1560 23" _null_ _null_ _null_ _null_ _null_ bitsubstr_no_len _null_ _null_ _null_ )); DESCR("extract portion of bitstring"); -DATA(insert OID = 3030 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 1560 "1560 1560 23 23" _null_ _null_ _null_ _null_ _null_ bitoverlay _null_ _null_ _null_ )); +DATA(insert OID = 3030 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 1560 "1560 1560 23 23" _null_ _null_ _null_ _null_ _null_ bitoverlay _null_ _null_ _null_ )); DESCR("substitute portion of bitstring"); DATA(insert OID = 3031 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1560 "1560 1560 23" _null_ _null_ _null_ _null_ _null_ bitoverlay_no_len _null_ _null_ _null_ )); DESCR("substitute portion of bitstring"); @@ -2099,9 +2099,9 @@ DATA(insert OID = 834 ( macaddr_ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 DATA(insert OID = 835 ( macaddr_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_ne _null_ _null_ _null_ )); DATA(insert OID = 836 ( macaddr_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_cmp _null_ _null_ _null_ )); DESCR("less-equal-greater"); -DATA(insert OID = 3144 ( macaddr_not PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 829 "829" _null_ _null_ _null_ _null_ _null_ macaddr_not _null_ _null_ _null_ )); -DATA(insert OID = 3145 ( macaddr_and PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 829 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_and _null_ _null_ _null_ )); -DATA(insert OID = 3146 ( macaddr_or PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 829 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_or _null_ _null_ _null_ )); +DATA(insert OID = 3144 ( macaddr_not PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 829 "829" _null_ _null_ _null_ _null_ _null_ macaddr_not _null_ _null_ _null_ )); +DATA(insert OID = 3145 ( macaddr_and PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 829 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_and _null_ _null_ _null_ )); +DATA(insert OID = 3146 ( macaddr_or PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 829 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_or _null_ _null_ _null_ )); /* for inet type support */ DATA(insert OID = 910 ( inet_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 869 "2275" _null_ _null_ _null_ _null_ _null_ inet_in _null_ _null_ _null_ )); @@ -2122,9 +2122,9 @@ DATA(insert OID = 922 ( network_le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 DATA(insert OID = 923 ( network_gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "869 869" _null_ _null_ _null_ _null_ _null_ network_gt _null_ _null_ _null_ )); DATA(insert OID = 924 ( network_ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "869 869" _null_ _null_ _null_ _null_ _null_ network_ge _null_ _null_ _null_ )); DATA(insert OID = 925 ( network_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "869 869" _null_ _null_ _null_ _null_ _null_ network_ne _null_ _null_ _null_ )); -DATA(insert OID = 3562 ( network_larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 869" _null_ _null_ _null_ _null_ _null_ network_larger _null_ _null_ _null_ )); +DATA(insert OID = 3562 ( network_larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 869" _null_ _null_ _null_ _null_ _null_ network_larger _null_ _null_ _null_ )); DESCR("larger of two"); -DATA(insert OID = 3563 ( network_smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 869" _null_ _null_ _null_ _null_ _null_ network_smaller _null_ _null_ _null_ )); +DATA(insert OID = 3563 ( network_smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 869" _null_ _null_ _null_ _null_ _null_ network_smaller _null_ _null_ _null_ )); DESCR("smaller of two"); DATA(insert OID = 926 ( network_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "869 869" _null_ _null_ _null_ _null_ _null_ network_cmp _null_ _null_ _null_ )); DESCR("less-equal-greater"); @@ -2159,21 +2159,21 @@ DATA(insert OID = 730 ( text PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 " DESCR("show all parts of inet/cidr value"); DATA(insert OID = 1362 ( hostmask PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 869 "869" _null_ _null_ _null_ _null_ _null_ network_hostmask _null_ _null_ _null_ )); DESCR("hostmask of address"); -DATA(insert OID = 1715 ( cidr PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 650 "869" _null_ _null_ _null_ _null_ _null_ inet_to_cidr _null_ _null_ _null_ )); +DATA(insert OID = 1715 ( cidr PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 650 "869" _null_ _null_ _null_ _null_ _null_ inet_to_cidr _null_ _null_ _null_ )); DESCR("convert inet to cidr"); DATA(insert OID = 2196 ( inet_client_addr PGNSP PGUID 12 1 0 0 0 f f f f f f s r 0 0 869 "" _null_ _null_ _null_ _null_ _null_ inet_client_addr _null_ _null_ _null_ )); DESCR("inet address of the client"); -DATA(insert OID = 2197 ( inet_client_port PGNSP PGUID 12 1 0 0 0 f f f f f f s r 0 0 23 "" _null_ _null_ _null_ _null_ _null_ inet_client_port _null_ _null_ _null_ )); +DATA(insert OID = 2197 ( inet_client_port PGNSP PGUID 12 1 0 0 0 f f f f f f s r 0 0 23 "" _null_ _null_ _null_ _null_ _null_ inet_client_port _null_ _null_ _null_ )); DESCR("client's port number for this connection"); DATA(insert OID = 2198 ( inet_server_addr PGNSP PGUID 12 1 0 0 0 f f f f f f s s 0 0 869 "" _null_ _null_ _null_ _null_ _null_ inet_server_addr _null_ _null_ _null_ )); DESCR("inet address of the server"); -DATA(insert OID = 2199 ( inet_server_port PGNSP PGUID 12 1 0 0 0 f f f f f f s s 0 0 23 "" _null_ _null_ _null_ _null_ _null_ inet_server_port _null_ _null_ _null_ )); +DATA(insert OID = 2199 ( inet_server_port PGNSP PGUID 12 1 0 0 0 f f f f f f s s 0 0 23 "" _null_ _null_ _null_ _null_ _null_ inet_server_port _null_ _null_ _null_ )); DESCR("server's port number for this connection"); -DATA(insert OID = 2627 ( inetnot PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 869 "869" _null_ _null_ _null_ _null_ _null_ inetnot _null_ _null_ _null_ )); -DATA(insert OID = 2628 ( inetand PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 869" _null_ _null_ _null_ _null_ _null_ inetand _null_ _null_ _null_ )); -DATA(insert OID = 2629 ( inetor PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 869" _null_ _null_ _null_ _null_ _null_ inetor _null_ _null_ _null_ )); +DATA(insert OID = 2627 ( inetnot PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 869 "869" _null_ _null_ _null_ _null_ _null_ inetnot _null_ _null_ _null_ )); +DATA(insert OID = 2628 ( inetand PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 869" _null_ _null_ _null_ _null_ _null_ inetand _null_ _null_ _null_ )); +DATA(insert OID = 2629 ( inetor PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 869" _null_ _null_ _null_ _null_ _null_ inetor _null_ _null_ _null_ )); DATA(insert OID = 2630 ( inetpl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 20" _null_ _null_ _null_ _null_ _null_ inetpl _null_ _null_ _null_ )); DATA(insert OID = 2631 ( int8pl_inet PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 869 "20 869" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ )); DATA(insert OID = 2632 ( inetmi_int8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 20" _null_ _null_ _null_ _null_ _null_ inetmi_int8 _null_ _null_ _null_ )); @@ -2204,7 +2204,7 @@ DESCR("GiST support"); /* Selectivity estimation for inet and cidr */ DATA(insert OID = 3560 ( networksel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ networksel _null_ _null_ _null_ )); DESCR("restriction selectivity for network operators"); -DATA(insert OID = 3561 ( networkjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ networkjoinsel _null_ _null_ _null_ )); +DATA(insert OID = 3561 ( networkjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ networkjoinsel _null_ _null_ _null_ )); DESCR("join selectivity for network operators"); DATA(insert OID = 1690 ( time_mi_time PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1186 "1083 1083" _null_ _null_ _null_ _null_ _null_ time_mi_time _null_ _null_ _null_ )); @@ -2223,13 +2223,13 @@ DESCR("hash"); /* OID's 1700 - 1799 NUMERIC data type */ -DATA(insert OID = 1701 ( numeric_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1700 "2275 26 23" _null_ _null_ _null_ _null_ _null_ numeric_in _null_ _null_ _null_ )); +DATA(insert OID = 1701 ( numeric_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1700 "2275 26 23" _null_ _null_ _null_ _null_ _null_ numeric_in _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 1702 ( numeric_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "1700" _null_ _null_ _null_ _null_ _null_ numeric_out _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2917 ( numerictypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ numerictypmodin _null_ _null_ _null_ )); +DATA(insert OID = 2917 ( numerictypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ numerictypmodin _null_ _null_ _null_ )); DESCR("I/O typmod"); -DATA(insert OID = 2918 ( numerictypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ numerictypmodout _null_ _null_ _null_ )); +DATA(insert OID = 2918 ( numerictypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ numerictypmodout _null_ _null_ _null_ )); DESCR("I/O typmod"); DATA(insert OID = 3157 ( numeric_transform PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ numeric_transform _null_ _null_ _null_ )); DESCR("transform a numeric length coercion"); @@ -2374,19 +2374,19 @@ DESCR("I/O"); DATA(insert OID = 1799 ( oidout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "26" _null_ _null_ _null_ _null_ _null_ oidout _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 3058 ( concat PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 1 0 25 "2276" "{2276}" "{v}" _null_ _null_ _null_ text_concat _null_ _null_ _null_ )); +DATA(insert OID = 3058 ( concat PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 1 0 25 "2276" "{2276}" "{v}" _null_ _null_ _null_ text_concat _null_ _null_ _null_ )); DESCR("concatenate values"); -DATA(insert OID = 3059 ( concat_ws PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 2 0 25 "25 2276" "{25,2276}" "{i,v}" _null_ _null_ _null_ text_concat_ws _null_ _null_ _null_ )); +DATA(insert OID = 3059 ( concat_ws PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 2 0 25 "25 2276" "{25,2276}" "{i,v}" _null_ _null_ _null_ text_concat_ws _null_ _null_ _null_ )); DESCR("concatenate values with separators"); DATA(insert OID = 3060 ( left PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "25 23" _null_ _null_ _null_ _null_ _null_ text_left _null_ _null_ _null_ )); DESCR("extract the first n characters"); DATA(insert OID = 3061 ( right PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "25 23" _null_ _null_ _null_ _null_ _null_ text_right _null_ _null_ _null_ )); DESCR("extract the last n characters"); -DATA(insert OID = 3062 ( reverse PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ text_reverse _null_ _null_ _null_ )); +DATA(insert OID = 3062 ( reverse PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ text_reverse _null_ _null_ _null_ )); DESCR("reverse text"); -DATA(insert OID = 3539 ( format PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 2 0 25 "25 2276" "{25,2276}" "{i,v}" _null_ _null_ _null_ text_format _null_ _null_ _null_ )); +DATA(insert OID = 3539 ( format PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 2 0 25 "25 2276" "{25,2276}" "{i,v}" _null_ _null_ _null_ text_format _null_ _null_ _null_ )); DESCR("format text message"); -DATA(insert OID = 3540 ( format PGNSP PGUID 12 1 0 0 0 f f f f f f s s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ text_format_nv _null_ _null_ _null_ )); +DATA(insert OID = 3540 ( format PGNSP PGUID 12 1 0 0 0 f f f f f f s s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ text_format_nv _null_ _null_ _null_ )); DESCR("format text message"); DATA(insert OID = 1810 ( bit_length PGNSP PGUID 14 1 0 0 0 f f f f t f i s 1 0 23 "17" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.octet_length($1) * 8" _null_ _null_ _null_ )); @@ -2397,25 +2397,25 @@ DATA(insert OID = 1812 ( bit_length PGNSP PGUID 14 1 0 0 0 f f f f t f i s 1 DESCR("length in bits"); /* Selectivity estimators for LIKE and related operators */ -DATA(insert OID = 1814 ( iclikesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ iclikesel _null_ _null_ _null_ )); +DATA(insert OID = 1814 ( iclikesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ iclikesel _null_ _null_ _null_ )); DESCR("restriction selectivity of ILIKE"); -DATA(insert OID = 1815 ( icnlikesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ icnlikesel _null_ _null_ _null_ )); +DATA(insert OID = 1815 ( icnlikesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ icnlikesel _null_ _null_ _null_ )); DESCR("restriction selectivity of NOT ILIKE"); DATA(insert OID = 1816 ( iclikejoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ iclikejoinsel _null_ _null_ _null_ )); DESCR("join selectivity of ILIKE"); DATA(insert OID = 1817 ( icnlikejoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ icnlikejoinsel _null_ _null_ _null_ )); DESCR("join selectivity of NOT ILIKE"); -DATA(insert OID = 1818 ( regexeqsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ regexeqsel _null_ _null_ _null_ )); +DATA(insert OID = 1818 ( regexeqsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ regexeqsel _null_ _null_ _null_ )); DESCR("restriction selectivity of regex match"); -DATA(insert OID = 1819 ( likesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ likesel _null_ _null_ _null_ )); +DATA(insert OID = 1819 ( likesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ likesel _null_ _null_ _null_ )); DESCR("restriction selectivity of LIKE"); -DATA(insert OID = 1820 ( icregexeqsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ icregexeqsel _null_ _null_ _null_ )); +DATA(insert OID = 1820 ( icregexeqsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ icregexeqsel _null_ _null_ _null_ )); DESCR("restriction selectivity of case-insensitive regex match"); -DATA(insert OID = 1821 ( regexnesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ regexnesel _null_ _null_ _null_ )); +DATA(insert OID = 1821 ( regexnesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ regexnesel _null_ _null_ _null_ )); DESCR("restriction selectivity of regex non-match"); -DATA(insert OID = 1822 ( nlikesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ nlikesel _null_ _null_ _null_ )); +DATA(insert OID = 1822 ( nlikesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ nlikesel _null_ _null_ _null_ )); DESCR("restriction selectivity of NOT LIKE"); -DATA(insert OID = 1823 ( icregexnesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ icregexnesel _null_ _null_ _null_ )); +DATA(insert OID = 1823 ( icregexnesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ icregexnesel _null_ _null_ _null_ )); DESCR("restriction selectivity of case-insensitive regex non-match"); DATA(insert OID = 1824 ( regexeqjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ regexeqjoinsel _null_ _null_ _null_ )); DESCR("join selectivity of regex match"); @@ -2443,19 +2443,19 @@ DATA(insert OID = 1832 ( float8_stddev_samp PGNSP PGUID 12 1 0 0 0 f f f f t f DESCR("aggregate final function"); DATA(insert OID = 1833 ( numeric_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 1700" _null_ _null_ _null_ _null_ _null_ numeric_accum _null_ _null_ _null_ )); DESCR("aggregate transition function"); -DATA(insert OID = 3341 ( numeric_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_combine _null_ _null_ _null_ )); +DATA(insert OID = 3341 ( numeric_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_combine _null_ _null_ _null_ )); DESCR("aggregate combine function"); DATA(insert OID = 2858 ( numeric_avg_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 1700" _null_ _null_ _null_ _null_ _null_ numeric_avg_accum _null_ _null_ _null_ )); DESCR("aggregate transition function"); -DATA(insert OID = 3337 ( numeric_avg_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_avg_combine _null_ _null_ _null_ )); +DATA(insert OID = 3337 ( numeric_avg_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_avg_combine _null_ _null_ _null_ )); DESCR("aggregate combine function"); DATA(insert OID = 2740 ( numeric_avg_serialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ numeric_avg_serialize _null_ _null_ _null_ )); DESCR("aggregate serial function"); -DATA(insert OID = 2741 ( numeric_avg_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "17" _null_ _null_ _null_ _null_ _null_ numeric_avg_deserialize _null_ _null_ _null_ )); +DATA(insert OID = 2741 ( numeric_avg_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "17" _null_ _null_ _null_ _null_ _null_ numeric_avg_deserialize _null_ _null_ _null_ )); DESCR("aggregate deserial function"); DATA(insert OID = 3335 ( numeric_serialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ numeric_serialize _null_ _null_ _null_ )); DESCR("aggregate serial function"); -DATA(insert OID = 3336 ( numeric_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "17" _null_ _null_ _null_ _null_ _null_ numeric_deserialize _null_ _null_ _null_ )); +DATA(insert OID = 3336 ( numeric_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "17" _null_ _null_ _null_ _null_ _null_ numeric_deserialize _null_ _null_ _null_ )); DESCR("aggregate deserial function"); DATA(insert OID = 3548 ( numeric_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 1700" _null_ _null_ _null_ _null_ _null_ numeric_accum_inv _null_ _null_ _null_ )); DESCR("aggregate transition function"); @@ -2465,11 +2465,11 @@ DATA(insert OID = 1835 ( int4_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 DESCR("aggregate transition function"); DATA(insert OID = 1836 ( int8_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 20" _null_ _null_ _null_ _null_ _null_ int8_accum _null_ _null_ _null_ )); DESCR("aggregate transition function"); -DATA(insert OID = 3338 ( numeric_poly_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_combine _null_ _null_ _null_ )); +DATA(insert OID = 3338 ( numeric_poly_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_combine _null_ _null_ _null_ )); DESCR("aggregate combine function"); -DATA(insert OID = 3339 ( numeric_poly_serialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_serialize _null_ _null_ _null_ )); +DATA(insert OID = 3339 ( numeric_poly_serialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_serialize _null_ _null_ _null_ )); DESCR("aggregate serial function"); -DATA(insert OID = 3340 ( numeric_poly_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "17" _null_ _null_ _null_ _null_ _null_ numeric_poly_deserialize _null_ _null_ _null_ )); +DATA(insert OID = 3340 ( numeric_poly_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "17" _null_ _null_ _null_ _null_ _null_ numeric_poly_deserialize _null_ _null_ _null_ )); DESCR("aggregate deserial function"); DATA(insert OID = 2746 ( int8_avg_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 20" _null_ _null_ _null_ _null_ _null_ int8_avg_accum _null_ _null_ _null_ )); DESCR("aggregate transition function"); @@ -2481,13 +2481,13 @@ DATA(insert OID = 3569 ( int8_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i DESCR("aggregate transition function"); DATA(insert OID = 3387 ( int8_avg_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 20" _null_ _null_ _null_ _null_ _null_ int8_avg_accum_inv _null_ _null_ _null_ )); DESCR("aggregate transition function"); -DATA(insert OID = 2785 ( int8_avg_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ int8_avg_combine _null_ _null_ _null_ )); +DATA(insert OID = 2785 ( int8_avg_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ int8_avg_combine _null_ _null_ _null_ )); DESCR("aggregate combine function"); -DATA(insert OID = 2786 ( int8_avg_serialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ int8_avg_serialize _null_ _null_ _null_ )); +DATA(insert OID = 2786 ( int8_avg_serialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ int8_avg_serialize _null_ _null_ _null_ )); DESCR("aggregate serial function"); -DATA(insert OID = 2787 ( int8_avg_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "17" _null_ _null_ _null_ _null_ _null_ int8_avg_deserialize _null_ _null_ _null_ )); +DATA(insert OID = 2787 ( int8_avg_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "17" _null_ _null_ _null_ _null_ _null_ int8_avg_deserialize _null_ _null_ _null_ )); DESCR("aggregate deserial function"); -DATA(insert OID = 3324 ( int4_avg_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ int4_avg_combine _null_ _null_ _null_ )); +DATA(insert OID = 3324 ( int4_avg_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ int4_avg_combine _null_ _null_ _null_ )); DESCR("aggregate combine function"); DATA(insert OID = 3178 ( numeric_sum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_sum _null_ _null_ _null_ )); DESCR("aggregate final function"); @@ -2522,7 +2522,7 @@ DESCR("aggregate final function"); DATA(insert OID = 1843 ( interval_accum PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1187 "1187 1186" _null_ _null_ _null_ _null_ _null_ interval_accum _null_ _null_ _null_ )); DESCR("aggregate transition function"); -DATA(insert OID = 3325 ( interval_combine PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1187 "1187 1187" _null_ _null_ _null_ _null_ _null_ interval_combine _null_ _null_ _null_ )); +DATA(insert OID = 3325 ( interval_combine PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1187 "1187 1187" _null_ _null_ _null_ _null_ _null_ interval_combine _null_ _null_ _null_ )); DESCR("aggregate combine function"); DATA(insert OID = 3549 ( interval_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1187 "1187 1186" _null_ _null_ _null_ _null_ _null_ interval_accum_inv _null_ _null_ _null_ )); DESCR("aggregate transition function"); @@ -2685,13 +2685,13 @@ DESCR("current user privilege on column by rel oid, col name"); DATA(insert OID = 3023 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 21 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_id_attnum _null_ _null_ _null_ )); DESCR("current user privilege on column by rel oid, col attnum"); -DATA(insert OID = 3024 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "19 25 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_name_name _null_ _null_ _null_ )); +DATA(insert OID = 3024 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "19 25 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_name_name _null_ _null_ _null_ )); DESCR("user privilege on any column by username, rel name"); -DATA(insert OID = 3025 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "19 26 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_name_id _null_ _null_ _null_ )); +DATA(insert OID = 3025 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "19 26 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_name_id _null_ _null_ _null_ )); DESCR("user privilege on any column by username, rel oid"); -DATA(insert OID = 3026 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "26 25 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_id_name _null_ _null_ _null_ )); +DATA(insert OID = 3026 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "26 25 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_id_name _null_ _null_ _null_ )); DESCR("user privilege on any column by user oid, rel name"); -DATA(insert OID = 3027 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "26 26 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_id_id _null_ _null_ _null_ )); +DATA(insert OID = 3027 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "26 26 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_id_id _null_ _null_ _null_ )); DESCR("user privilege on any column by user oid, rel oid"); DATA(insert OID = 3028 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_name _null_ _null_ _null_ )); DESCR("current user privilege on any column by rel name"); @@ -2726,9 +2726,9 @@ DATA(insert OID = 2781 ( pg_stat_get_last_vacuum_time PGNSP PGUID 12 1 0 0 0 f DESCR("statistics: last manual vacuum time for a table"); DATA(insert OID = 2782 ( pg_stat_get_last_autovacuum_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 1184 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_last_autovacuum_time _null_ _null_ _null_ )); DESCR("statistics: last auto vacuum time for a table"); -DATA(insert OID = 2783 ( pg_stat_get_last_analyze_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 1184 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_last_analyze_time _null_ _null_ _null_ )); +DATA(insert OID = 2783 ( pg_stat_get_last_analyze_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 1184 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_last_analyze_time _null_ _null_ _null_ )); DESCR("statistics: last manual analyze time for a table"); -DATA(insert OID = 2784 ( pg_stat_get_last_autoanalyze_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 1184 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_last_autoanalyze_time _null_ _null_ _null_ )); +DATA(insert OID = 2784 ( pg_stat_get_last_autoanalyze_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 1184 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_last_autoanalyze_time _null_ _null_ _null_ )); DESCR("statistics: last auto analyze time for a table"); DATA(insert OID = 3054 ( pg_stat_get_vacuum_count PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_vacuum_count _null_ _null_ _null_ )); DESCR("statistics: number of manual vacuums for a table"); @@ -2742,7 +2742,7 @@ DATA(insert OID = 1936 ( pg_stat_get_backend_idset PGNSP PGUID 12 1 100 0 0 f DESCR("statistics: currently active backend IDs"); DATA(insert OID = 2022 ( pg_stat_get_activity PGNSP PGUID 12 1 100 0 0 f f f f f t s r 1 0 2249 "23" "{23,26,23,26,25,25,25,25,25,1184,1184,1184,1184,869,25,23,28,28,16,25,25,23,16,25}" "{i,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{pid,datid,pid,usesysid,application_name,state,query,wait_event_type,wait_event,xact_start,query_start,backend_start,state_change,client_addr,client_hostname,client_port,backend_xid,backend_xmin,ssl,sslversion,sslcipher,sslbits,sslcompression,sslclientdn}" _null_ _null_ pg_stat_get_activity _null_ _null_ _null_ )); DESCR("statistics: information about currently active backends"); -DATA(insert OID = 3318 ( pg_stat_get_progress_info PGNSP PGUID 12 1 100 0 0 f f f f t t s r 1 0 2249 "25" "{25,23,26,26,20,20,20,20,20,20,20,20,20,20}" "{i,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{cmdtype,pid,datid,relid,param1,param2,param3,param4,param5,param6,param7,param8,param9,param10}" _null_ _null_ pg_stat_get_progress_info _null_ _null_ _null_ )); +DATA(insert OID = 3318 ( pg_stat_get_progress_info PGNSP PGUID 12 1 100 0 0 f f f f t t s r 1 0 2249 "25" "{25,23,26,26,20,20,20,20,20,20,20,20,20,20}" "{i,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{cmdtype,pid,datid,relid,param1,param2,param3,param4,param5,param6,param7,param8,param9,param10}" _null_ _null_ pg_stat_get_progress_info _null_ _null_ _null_ )); DESCR("statistics: information about progress of backends running maintenance command"); DATA(insert OID = 3099 ( pg_stat_get_wal_senders PGNSP PGUID 12 1 10 0 0 f f f f f t s r 0 0 2249 "" "{23,25,3220,3220,3220,3220,23,25}" "{o,o,o,o,o,o,o,o}" "{pid,state,sent_location,write_location,flush_location,replay_location,sync_priority,sync_state}" _null_ _null_ pg_stat_get_wal_senders _null_ _null_ _null_ )); DESCR("statistics: information about currently active replication"); @@ -2828,7 +2828,7 @@ DATA(insert OID = 2772 ( pg_stat_get_bgwriter_buf_written_clean PGNSP PGUID 12 1 DESCR("statistics: number of buffers written by the bgwriter for cleaning dirty buffers"); DATA(insert OID = 2773 ( pg_stat_get_bgwriter_maxwritten_clean PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 20 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_bgwriter_maxwritten_clean _null_ _null_ _null_ )); DESCR("statistics: number of times the bgwriter stopped processing when it had written too many buffers while cleaning"); -DATA(insert OID = 3075 ( pg_stat_get_bgwriter_stat_reset_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 1184 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_bgwriter_stat_reset_time _null_ _null_ _null_ )); +DATA(insert OID = 3075 ( pg_stat_get_bgwriter_stat_reset_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 1184 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_bgwriter_stat_reset_time _null_ _null_ _null_ )); DESCR("statistics: last reset for the bgwriter"); DATA(insert OID = 3160 ( pg_stat_get_checkpoint_write_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 701 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_checkpoint_write_time _null_ _null_ _null_ )); DESCR("statistics: checkpoint time spent writing buffers to disk, in msec"); @@ -2879,11 +2879,11 @@ DATA(insert OID = 2230 ( pg_stat_clear_snapshot PGNSP PGUID 12 1 0 0 0 f f f f DESCR("statistics: discard current transaction's statistics snapshot"); DATA(insert OID = 2274 ( pg_stat_reset PGNSP PGUID 12 1 0 0 0 f f f f f f v s 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_stat_reset _null_ _null_ _null_ )); DESCR("statistics: reset collected statistics for current database"); -DATA(insert OID = 3775 ( pg_stat_reset_shared PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 2278 "25" _null_ _null_ _null_ _null_ _null_ pg_stat_reset_shared _null_ _null_ _null_ )); +DATA(insert OID = 3775 ( pg_stat_reset_shared PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 2278 "25" _null_ _null_ _null_ _null_ _null_ pg_stat_reset_shared _null_ _null_ _null_ )); DESCR("statistics: reset collected statistics shared across the cluster"); -DATA(insert OID = 3776 ( pg_stat_reset_single_table_counters PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_reset_single_table_counters _null_ _null_ _null_ )); +DATA(insert OID = 3776 ( pg_stat_reset_single_table_counters PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_reset_single_table_counters _null_ _null_ _null_ )); DESCR("statistics: reset collected statistics for a single table or index in the current database"); -DATA(insert OID = 3777 ( pg_stat_reset_single_function_counters PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_reset_single_function_counters _null_ _null_ _null_ )); +DATA(insert OID = 3777 ( pg_stat_reset_single_function_counters PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_reset_single_function_counters _null_ _null_ _null_ )); DESCR("statistics: reset collected statistics for a single function in the current database"); DATA(insert OID = 3163 ( pg_trigger_depth PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 23 "" _null_ _null_ _null_ _null_ _null_ pg_trigger_depth _null_ _null_ _null_ )); @@ -2959,7 +2959,7 @@ DATA(insert OID = 2019 ( time PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 108 DESCR("convert timestamp with time zone to time"); DATA(insert OID = 2020 ( date_trunc PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1114 "25 1114" _null_ _null_ _null_ _null_ _null_ timestamp_trunc _null_ _null_ _null_ )); DESCR("truncate timestamp to specified units"); -DATA(insert OID = 2021 ( date_part PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "25 1114" _null_ _null_ _null_ _null_ _null_ timestamp_part _null_ _null_ _null_ )); +DATA(insert OID = 2021 ( date_part PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "25 1114" _null_ _null_ _null_ _null_ _null_ timestamp_part _null_ _null_ _null_ )); DESCR("extract field from timestamp"); DATA(insert OID = 2023 ( timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1114 "702" _null_ _null_ _null_ _null_ _null_ abstime_timestamp _null_ _null_ _null_ )); DESCR("convert abstime to timestamp"); @@ -2990,11 +2990,11 @@ DATA(insert OID = 2039 ( timestamp_hash PGNSP PGUID 12 1 0 0 0 f f f f t f i s DESCR("hash"); DATA(insert OID = 2041 ( overlaps PGNSP PGUID 12 1 0 0 0 f f f f f f i s 4 0 16 "1114 1114 1114 1114" _null_ _null_ _null_ _null_ _null_ overlaps_timestamp _null_ _null_ _null_ )); DESCR("intervals overlap?"); -DATA(insert OID = 2042 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f i s 4 0 16 "1114 1186 1114 1186" _null_ _null_ _null_ _null_ _null_ "select ($1, ($1 + $2)) overlaps ($3, ($3 + $4))" _null_ _null_ _null_ )); +DATA(insert OID = 2042 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f i s 4 0 16 "1114 1186 1114 1186" _null_ _null_ _null_ _null_ _null_ "select ($1, ($1 + $2)) overlaps ($3, ($3 + $4))" _null_ _null_ _null_ )); DESCR("intervals overlap?"); -DATA(insert OID = 2043 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f i s 4 0 16 "1114 1114 1114 1186" _null_ _null_ _null_ _null_ _null_ "select ($1, $2) overlaps ($3, ($3 + $4))" _null_ _null_ _null_ )); +DATA(insert OID = 2043 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f i s 4 0 16 "1114 1114 1114 1186" _null_ _null_ _null_ _null_ _null_ "select ($1, $2) overlaps ($3, ($3 + $4))" _null_ _null_ _null_ )); DESCR("intervals overlap?"); -DATA(insert OID = 2044 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f i s 4 0 16 "1114 1186 1114 1114" _null_ _null_ _null_ _null_ _null_ "select ($1, ($1 + $2)) overlaps ($3, $4)" _null_ _null_ _null_ )); +DATA(insert OID = 2044 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f i s 4 0 16 "1114 1186 1114 1114" _null_ _null_ _null_ _null_ _null_ "select ($1, ($1 + $2)) overlaps ($3, $4)" _null_ _null_ _null_ )); DESCR("intervals overlap?"); DATA(insert OID = 2045 ( timestamp_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "1114 1114" _null_ _null_ _null_ _null_ _null_ timestamp_cmp _null_ _null_ _null_ )); DESCR("less-equal-greater"); @@ -3125,7 +3125,7 @@ DATA(insert OID = 2849 ( pg_current_xlog_location PGNSP PGUID 12 1 0 0 0 f f f f DESCR("current xlog write location"); DATA(insert OID = 2852 ( pg_current_xlog_insert_location PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 3220 "" _null_ _null_ _null_ _null_ _null_ pg_current_xlog_insert_location _null_ _null_ _null_ )); DESCR("current xlog insert location"); -DATA(insert OID = 3330 ( pg_current_xlog_flush_location PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 3220 "" _null_ _null_ _null_ _null_ _null_ pg_current_xlog_flush_location _null_ _null_ _null_ )); +DATA(insert OID = 3330 ( pg_current_xlog_flush_location PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 3220 "" _null_ _null_ _null_ _null_ _null_ pg_current_xlog_flush_location _null_ _null_ _null_ )); DESCR("current xlog flush location"); DATA(insert OID = 2850 ( pg_xlogfile_name_offset PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2249 "3220" "{3220,25,23}" "{i,o,o}" "{wal_location,file_name,file_offset}" _null_ _null_ pg_xlogfile_name_offset _null_ _null_ _null_ )); DESCR("xlog filename and byte offset, given an xlog location"); @@ -3192,32 +3192,32 @@ DESCR("convert boolean to text"); /* Aggregates (moved here from pg_aggregate for 7.3) */ -DATA(insert OID = 2100 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2100 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("the average (arithmetic mean) as numeric of all bigint values"); -DATA(insert OID = 2101 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2101 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("the average (arithmetic mean) as numeric of all integer values"); -DATA(insert OID = 2102 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2102 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("the average (arithmetic mean) as numeric of all smallint values"); DATA(insert OID = 2103 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("the average (arithmetic mean) as numeric of all numeric values"); -DATA(insert OID = 2104 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2104 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("the average (arithmetic mean) as float8 of all float4 values"); -DATA(insert OID = 2105 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2105 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("the average (arithmetic mean) as float8 of all float8 values"); DATA(insert OID = 2106 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1186 "1186" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("the average (arithmetic mean) as interval of all interval values"); -DATA(insert OID = 2107 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2107 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sum as numeric across all bigint input values"); DATA(insert OID = 2108 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 20 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sum as bigint across all integer input values"); DATA(insert OID = 2109 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 20 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sum as bigint across all smallint input values"); -DATA(insert OID = 2110 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 700 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2110 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 700 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sum as float4 across all float4 input values"); -DATA(insert OID = 2111 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2111 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sum as float8 across all float8 input values"); -DATA(insert OID = 2112 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 790 "790" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2112 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 790 "790" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sum as money across all money input values"); DATA(insert OID = 2113 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1186 "1186" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sum as interval across all interval input values"); @@ -3232,11 +3232,11 @@ DATA(insert OID = 2117 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 21 " DESCR("maximum value of all smallint input values"); DATA(insert OID = 2118 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 26 "26" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("maximum value of all oid input values"); -DATA(insert OID = 2119 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 700 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2119 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 700 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("maximum value of all float4 input values"); -DATA(insert OID = 2120 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2120 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("maximum value of all float8 input values"); -DATA(insert OID = 2121 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 702 "702" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2121 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 702 "702" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("maximum value of all abstime input values"); DATA(insert OID = 2122 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1082 "1082" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("maximum value of all date input values"); @@ -3244,7 +3244,7 @@ DATA(insert OID = 2123 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1083 DESCR("maximum value of all time input values"); DATA(insert OID = 2124 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1266 "1266" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("maximum value of all time with time zone input values"); -DATA(insert OID = 2125 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 790 "790" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2125 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 790 "790" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("maximum value of all money input values"); DATA(insert OID = 2126 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1114 "1114" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("maximum value of all timestamp input values"); @@ -3273,11 +3273,11 @@ DATA(insert OID = 2133 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 21 " DESCR("minimum value of all smallint input values"); DATA(insert OID = 2134 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 26 "26" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("minimum value of all oid input values"); -DATA(insert OID = 2135 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 700 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2135 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 700 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("minimum value of all float4 input values"); -DATA(insert OID = 2136 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2136 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("minimum value of all float8 input values"); -DATA(insert OID = 2137 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 702 "702" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2137 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 702 "702" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("minimum value of all abstime input values"); DATA(insert OID = 2138 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1082 "1082" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("minimum value of all date input values"); @@ -3285,7 +3285,7 @@ DATA(insert OID = 2139 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1083 DESCR("minimum value of all time input values"); DATA(insert OID = 2140 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1266 "1266" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("minimum value of all time with time zone input values"); -DATA(insert OID = 2141 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 790 "790" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2141 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 790 "790" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("minimum value of all money input values"); DATA(insert OID = 2142 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1114 "1114" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("minimum value of all timestamp input values"); @@ -3307,114 +3307,114 @@ DATA(insert OID = 3565 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 869 DESCR("minimum value of all inet input values"); /* count has two forms: count(any) and count(*) */ -DATA(insert OID = 2147 ( count PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 20 "2276" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2147 ( count PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 20 "2276" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("number of input rows for which the input expression is not null"); -DATA(insert OID = 2803 ( count PGNSP PGUID 12 1 0 0 0 t f f f f f i s 0 0 20 "" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2803 ( count PGNSP PGUID 12 1 0 0 0 t f f f f f i s 0 0 20 "" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("number of input rows"); -DATA(insert OID = 2718 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2718 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("population variance of bigint input values (square of the population standard deviation)"); -DATA(insert OID = 2719 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2719 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("population variance of integer input values (square of the population standard deviation)"); -DATA(insert OID = 2720 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2720 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("population variance of smallint input values (square of the population standard deviation)"); -DATA(insert OID = 2721 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2721 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("population variance of float4 input values (square of the population standard deviation)"); -DATA(insert OID = 2722 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2722 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("population variance of float8 input values (square of the population standard deviation)"); DATA(insert OID = 2723 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("population variance of numeric input values (square of the population standard deviation)"); -DATA(insert OID = 2641 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2641 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sample variance of bigint input values (square of the sample standard deviation)"); -DATA(insert OID = 2642 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2642 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sample variance of integer input values (square of the sample standard deviation)"); -DATA(insert OID = 2643 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2643 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sample variance of smallint input values (square of the sample standard deviation)"); -DATA(insert OID = 2644 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2644 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sample variance of float4 input values (square of the sample standard deviation)"); -DATA(insert OID = 2645 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2645 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sample variance of float8 input values (square of the sample standard deviation)"); DATA(insert OID = 2646 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sample variance of numeric input values (square of the sample standard deviation)"); -DATA(insert OID = 2148 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2148 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("historical alias for var_samp"); -DATA(insert OID = 2149 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2149 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("historical alias for var_samp"); -DATA(insert OID = 2150 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2150 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("historical alias for var_samp"); -DATA(insert OID = 2151 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2151 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("historical alias for var_samp"); -DATA(insert OID = 2152 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2152 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("historical alias for var_samp"); DATA(insert OID = 2153 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("historical alias for var_samp"); -DATA(insert OID = 2724 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2724 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("population standard deviation of bigint input values"); -DATA(insert OID = 2725 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2725 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("population standard deviation of integer input values"); -DATA(insert OID = 2726 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2726 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("population standard deviation of smallint input values"); -DATA(insert OID = 2727 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2727 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("population standard deviation of float4 input values"); -DATA(insert OID = 2728 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2728 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("population standard deviation of float8 input values"); DATA(insert OID = 2729 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("population standard deviation of numeric input values"); -DATA(insert OID = 2712 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2712 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sample standard deviation of bigint input values"); -DATA(insert OID = 2713 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2713 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sample standard deviation of integer input values"); -DATA(insert OID = 2714 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2714 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sample standard deviation of smallint input values"); -DATA(insert OID = 2715 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2715 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sample standard deviation of float4 input values"); -DATA(insert OID = 2716 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2716 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sample standard deviation of float8 input values"); DATA(insert OID = 2717 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sample standard deviation of numeric input values"); -DATA(insert OID = 2154 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2154 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("historical alias for stddev_samp"); -DATA(insert OID = 2155 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2155 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("historical alias for stddev_samp"); -DATA(insert OID = 2156 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2156 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("historical alias for stddev_samp"); -DATA(insert OID = 2157 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2157 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("historical alias for stddev_samp"); -DATA(insert OID = 2158 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2158 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("historical alias for stddev_samp"); DATA(insert OID = 2159 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("historical alias for stddev_samp"); DATA(insert OID = 2818 ( regr_count PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 20 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("number of input rows in which both expressions are not null"); -DATA(insert OID = 2819 ( regr_sxx PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2819 ( regr_sxx PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sum of squares of the independent variable (sum(X^2) - sum(X)^2/N)"); -DATA(insert OID = 2820 ( regr_syy PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2820 ( regr_syy PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sum of squares of the dependent variable (sum(Y^2) - sum(Y)^2/N)"); -DATA(insert OID = 2821 ( regr_sxy PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2821 ( regr_sxy PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sum of products of independent times dependent variable (sum(X*Y) - sum(X) * sum(Y)/N)"); -DATA(insert OID = 2822 ( regr_avgx PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2822 ( regr_avgx PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("average of the independent variable (sum(X)/N)"); -DATA(insert OID = 2823 ( regr_avgy PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2823 ( regr_avgy PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("average of the dependent variable (sum(Y)/N)"); -DATA(insert OID = 2824 ( regr_r2 PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2824 ( regr_r2 PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("square of the correlation coefficient"); -DATA(insert OID = 2825 ( regr_slope PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2825 ( regr_slope PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("slope of the least-squares-fit linear equation determined by the (X, Y) pairs"); -DATA(insert OID = 2826 ( regr_intercept PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2826 ( regr_intercept PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs"); -DATA(insert OID = 2827 ( covar_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2827 ( covar_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("population covariance"); -DATA(insert OID = 2828 ( covar_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2828 ( covar_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("sample covariance"); -DATA(insert OID = 2829 ( corr PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 2829 ( corr PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("correlation coefficient"); DATA(insert OID = 2160 ( text_pattern_lt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ text_pattern_lt _null_ _null_ _null_ )); @@ -3480,7 +3480,7 @@ DATA(insert OID = 2221 ( regtypeout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 DESCR("I/O"); DATA(insert OID = 3493 ( to_regtype PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2206 "25" _null_ _null_ _null_ _null_ _null_ to_regtype _null_ _null_ _null_ )); DESCR("convert type name to regtype"); -DATA(insert OID = 1079 ( regclass PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2205 "25" _null_ _null_ _null_ _null_ _null_ text_regclass _null_ _null_ _null_ )); +DATA(insert OID = 1079 ( regclass PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2205 "25" _null_ _null_ _null_ _null_ _null_ text_regclass _null_ _null_ _null_ )); DESCR("convert text to regclass"); DATA(insert OID = 4098 ( regrolein PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 4096 "2275" _null_ _null_ _null_ _null_ _null_ regrolein _null_ _null_ _null_ )); @@ -3611,20 +3611,20 @@ DESCR("current user privilege on type by type name"); DATA(insert OID = 3143 ( has_type_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "26 25" _null_ _null_ _null_ _null_ _null_ has_type_privilege_id _null_ _null_ _null_ )); DESCR("current user privilege on type by type oid"); -DATA(insert OID = 2705 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 19 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_name_name _null_ _null_ _null_ )); +DATA(insert OID = 2705 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 19 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_name_name _null_ _null_ _null_ )); DESCR("user privilege on role by username, role name"); -DATA(insert OID = 2706 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 26 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_name_id _null_ _null_ _null_ )); +DATA(insert OID = 2706 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 26 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_name_id _null_ _null_ _null_ )); DESCR("user privilege on role by username, role oid"); -DATA(insert OID = 2707 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 19 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_id_name _null_ _null_ _null_ )); +DATA(insert OID = 2707 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 19 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_id_name _null_ _null_ _null_ )); DESCR("user privilege on role by user oid, role name"); -DATA(insert OID = 2708 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 26 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_id_id _null_ _null_ _null_ )); +DATA(insert OID = 2708 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 26 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_id_id _null_ _null_ _null_ )); DESCR("user privilege on role by user oid, role oid"); DATA(insert OID = 2709 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "19 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_name _null_ _null_ _null_ )); DESCR("current user privilege on role by role name"); DATA(insert OID = 2710 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "26 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_id _null_ _null_ _null_ )); DESCR("current user privilege on role by role oid"); -DATA(insert OID = 1269 ( pg_column_size PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 23 "2276" _null_ _null_ _null_ _null_ _null_ pg_column_size _null_ _null_ _null_ )); +DATA(insert OID = 1269 ( pg_column_size PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 23 "2276" _null_ _null_ _null_ _null_ _null_ pg_column_size _null_ _null_ _null_ )); DESCR("bytes required to store the value, perhaps with compression"); DATA(insert OID = 2322 ( pg_tablespace_size PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_tablespace_size_oid _null_ _null_ _null_ )); DESCR("total disk space usage for the specified tablespace"); @@ -3660,7 +3660,7 @@ DESCR("file path of relation"); DATA(insert OID = 2316 ( postgresql_fdw_validator PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1009 26" _null_ _null_ _null_ _null_ _null_ postgresql_fdw_validator _null_ _null_ _null_)); DESCR("(internal)"); -DATA(insert OID = 2290 ( record_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 2249 "2275 26 23" _null_ _null_ _null_ _null_ _null_ record_in _null_ _null_ _null_ )); +DATA(insert OID = 2290 ( record_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 2249 "2275 26 23" _null_ _null_ _null_ _null_ _null_ record_in _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 2291 ( record_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "2249" _null_ _null_ _null_ _null_ _null_ record_out _null_ _null_ _null_ )); DESCR("I/O"); @@ -3831,7 +3831,7 @@ DATA(insert OID = 2414 ( textrecv PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 DESCR("I/O"); DATA(insert OID = 2415 ( textsend PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 17 "25" _null_ _null_ _null_ _null_ _null_ textsend _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2416 ( unknownrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 705 "2281" _null_ _null_ _null_ _null_ _null_ unknownrecv _null_ _null_ _null_ )); +DATA(insert OID = 2416 ( unknownrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 705 "2281" _null_ _null_ _null_ _null_ _null_ unknownrecv _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 2417 ( unknownsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "705" _null_ _null_ _null_ _null_ _null_ unknownsend _null_ _null_ _null_ )); DESCR("I/O"); @@ -3847,15 +3847,15 @@ DATA(insert OID = 2422 ( namerecv PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 DESCR("I/O"); DATA(insert OID = 2423 ( namesend PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 17 "19" _null_ _null_ _null_ _null_ _null_ namesend _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2424 ( float4recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 700 "2281" _null_ _null_ _null_ _null_ _null_ float4recv _null_ _null_ _null_ )); +DATA(insert OID = 2424 ( float4recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 700 "2281" _null_ _null_ _null_ _null_ _null_ float4recv _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 2425 ( float4send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "700" _null_ _null_ _null_ _null_ _null_ float4send _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2426 ( float8recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "2281" _null_ _null_ _null_ _null_ _null_ float8recv _null_ _null_ _null_ )); +DATA(insert OID = 2426 ( float8recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "2281" _null_ _null_ _null_ _null_ _null_ float8recv _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 2427 ( float8send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "701" _null_ _null_ _null_ _null_ _null_ float8send _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2428 ( point_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "2281" _null_ _null_ _null_ _null_ _null_ point_recv _null_ _null_ _null_ )); +DATA(insert OID = 2428 ( point_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "2281" _null_ _null_ _null_ _null_ _null_ point_recv _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 2429 ( point_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "600" _null_ _null_ _null_ _null_ _null_ point_send _null_ _null_ _null_ )); DESCR("I/O"); @@ -3932,15 +3932,15 @@ DATA(insert OID = 2460 ( numeric_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i DESCR("I/O"); DATA(insert OID = 2461 ( numeric_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "1700" _null_ _null_ _null_ _null_ _null_ numeric_send _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2462 ( abstimerecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 702 "2281" _null_ _null_ _null_ _null_ _null_ abstimerecv _null_ _null_ _null_ )); +DATA(insert OID = 2462 ( abstimerecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 702 "2281" _null_ _null_ _null_ _null_ _null_ abstimerecv _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 2463 ( abstimesend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "702" _null_ _null_ _null_ _null_ _null_ abstimesend _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2464 ( reltimerecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 703 "2281" _null_ _null_ _null_ _null_ _null_ reltimerecv _null_ _null_ _null_ )); +DATA(insert OID = 2464 ( reltimerecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 703 "2281" _null_ _null_ _null_ _null_ _null_ reltimerecv _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 2465 ( reltimesend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "703" _null_ _null_ _null_ _null_ _null_ reltimesend _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2466 ( tintervalrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 704 "2281" _null_ _null_ _null_ _null_ _null_ tintervalrecv _null_ _null_ _null_ )); +DATA(insert OID = 2466 ( tintervalrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 704 "2281" _null_ _null_ _null_ _null_ _null_ tintervalrecv _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 2467 ( tintervalsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "704" _null_ _null_ _null_ _null_ _null_ tintervalsend _null_ _null_ _null_ )); DESCR("I/O"); @@ -3968,43 +3968,43 @@ DATA(insert OID = 2478 ( interval_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i DESCR("I/O"); DATA(insert OID = 2479 ( interval_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "1186" _null_ _null_ _null_ _null_ _null_ interval_send _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2480 ( lseg_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 601 "2281" _null_ _null_ _null_ _null_ _null_ lseg_recv _null_ _null_ _null_ )); +DATA(insert OID = 2480 ( lseg_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 601 "2281" _null_ _null_ _null_ _null_ _null_ lseg_recv _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 2481 ( lseg_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "601" _null_ _null_ _null_ _null_ _null_ lseg_send _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2482 ( path_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 602 "2281" _null_ _null_ _null_ _null_ _null_ path_recv _null_ _null_ _null_ )); +DATA(insert OID = 2482 ( path_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 602 "2281" _null_ _null_ _null_ _null_ _null_ path_recv _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 2483 ( path_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "602" _null_ _null_ _null_ _null_ _null_ path_send _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2484 ( box_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 603 "2281" _null_ _null_ _null_ _null_ _null_ box_recv _null_ _null_ _null_ )); +DATA(insert OID = 2484 ( box_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 603 "2281" _null_ _null_ _null_ _null_ _null_ box_recv _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 2485 ( box_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "603" _null_ _null_ _null_ _null_ _null_ box_send _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2486 ( poly_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 604 "2281" _null_ _null_ _null_ _null_ _null_ poly_recv _null_ _null_ _null_ )); +DATA(insert OID = 2486 ( poly_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 604 "2281" _null_ _null_ _null_ _null_ _null_ poly_recv _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 2487 ( poly_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "604" _null_ _null_ _null_ _null_ _null_ poly_send _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2488 ( line_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 628 "2281" _null_ _null_ _null_ _null_ _null_ line_recv _null_ _null_ _null_ )); +DATA(insert OID = 2488 ( line_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 628 "2281" _null_ _null_ _null_ _null_ _null_ line_recv _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 2489 ( line_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "628" _null_ _null_ _null_ _null_ _null_ line_send _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2490 ( circle_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 718 "2281" _null_ _null_ _null_ _null_ _null_ circle_recv _null_ _null_ _null_ )); +DATA(insert OID = 2490 ( circle_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 718 "2281" _null_ _null_ _null_ _null_ _null_ circle_recv _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 2491 ( circle_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "718" _null_ _null_ _null_ _null_ _null_ circle_send _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2492 ( cash_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 790 "2281" _null_ _null_ _null_ _null_ _null_ cash_recv _null_ _null_ _null_ )); +DATA(insert OID = 2492 ( cash_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 790 "2281" _null_ _null_ _null_ _null_ _null_ cash_recv _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 2493 ( cash_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "790" _null_ _null_ _null_ _null_ _null_ cash_send _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2494 ( macaddr_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 829 "2281" _null_ _null_ _null_ _null_ _null_ macaddr_recv _null_ _null_ _null_ )); +DATA(insert OID = 2494 ( macaddr_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 829 "2281" _null_ _null_ _null_ _null_ _null_ macaddr_recv _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 2495 ( macaddr_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "829" _null_ _null_ _null_ _null_ _null_ macaddr_send _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2496 ( inet_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 869 "2281" _null_ _null_ _null_ _null_ _null_ inet_recv _null_ _null_ _null_ )); +DATA(insert OID = 2496 ( inet_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 869 "2281" _null_ _null_ _null_ _null_ _null_ inet_recv _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 2497 ( inet_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "869" _null_ _null_ _null_ _null_ _null_ inet_send _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 2498 ( cidr_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 650 "2281" _null_ _null_ _null_ _null_ _null_ cidr_recv _null_ _null_ _null_ )); +DATA(insert OID = 2498 ( cidr_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 650 "2281" _null_ _null_ _null_ _null_ _null_ cidr_recv _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 2499 ( cidr_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "650" _null_ _null_ _null_ _null_ _null_ cidr_send _null_ _null_ _null_ )); DESCR("I/O"); @@ -4022,17 +4022,17 @@ DATA(insert OID = 3121 ( void_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s DESCR("I/O"); /* System-view support functions with pretty-print option */ -DATA(insert OID = 2504 ( pg_get_ruledef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "26 16" _null_ _null_ _null_ _null_ _null_ pg_get_ruledef_ext _null_ _null_ _null_ )); +DATA(insert OID = 2504 ( pg_get_ruledef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "26 16" _null_ _null_ _null_ _null_ _null_ pg_get_ruledef_ext _null_ _null_ _null_ )); DESCR("source text of a rule with pretty-print option"); -DATA(insert OID = 2505 ( pg_get_viewdef PGNSP PGUID 12 1 0 0 0 f f f f t f s r 2 0 25 "25 16" _null_ _null_ _null_ _null_ _null_ pg_get_viewdef_name_ext _null_ _null_ _null_ )); +DATA(insert OID = 2505 ( pg_get_viewdef PGNSP PGUID 12 1 0 0 0 f f f f t f s r 2 0 25 "25 16" _null_ _null_ _null_ _null_ _null_ pg_get_viewdef_name_ext _null_ _null_ _null_ )); DESCR("select statement of a view with pretty-print option"); -DATA(insert OID = 2506 ( pg_get_viewdef PGNSP PGUID 12 1 0 0 0 f f f f t f s r 2 0 25 "26 16" _null_ _null_ _null_ _null_ _null_ pg_get_viewdef_ext _null_ _null_ _null_ )); +DATA(insert OID = 2506 ( pg_get_viewdef PGNSP PGUID 12 1 0 0 0 f f f f t f s r 2 0 25 "26 16" _null_ _null_ _null_ _null_ _null_ pg_get_viewdef_ext _null_ _null_ _null_ )); DESCR("select statement of a view with pretty-print option"); -DATA(insert OID = 3159 ( pg_get_viewdef PGNSP PGUID 12 1 0 0 0 f f f f t f s r 2 0 25 "26 23" _null_ _null_ _null_ _null_ _null_ pg_get_viewdef_wrap _null_ _null_ _null_ )); +DATA(insert OID = 3159 ( pg_get_viewdef PGNSP PGUID 12 1 0 0 0 f f f f t f s r 2 0 25 "26 23" _null_ _null_ _null_ _null_ _null_ pg_get_viewdef_wrap _null_ _null_ _null_ )); DESCR("select statement of a view with pretty-printing and specified line wrapping"); DATA(insert OID = 2507 ( pg_get_indexdef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 25 "26 23 16" _null_ _null_ _null_ _null_ _null_ pg_get_indexdef_ext _null_ _null_ _null_ )); DESCR("index description (full create statement or single expression) with pretty-print option"); -DATA(insert OID = 2508 ( pg_get_constraintdef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "26 16" _null_ _null_ _null_ _null_ _null_ pg_get_constraintdef_ext _null_ _null_ _null_ )); +DATA(insert OID = 2508 ( pg_get_constraintdef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "26 16" _null_ _null_ _null_ _null_ _null_ pg_get_constraintdef_ext _null_ _null_ _null_ )); DESCR("constraint description with pretty-print option"); DATA(insert OID = 2509 ( pg_get_expr PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 25 "194 26 16" _null_ _null_ _null_ _null_ _null_ pg_get_expr_ext _null_ _null_ _null_ )); DESCR("deparse an encoded expression with pretty-print option"); @@ -4113,10 +4113,10 @@ DATA(insert OID = 2243 ( bit_or PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 DESCR("bitwise-or bit aggregate"); /* formerly-missing interval + datetime operators */ -DATA(insert OID = 2546 ( interval_pl_date PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1114 "1186 1082" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ )); -DATA(insert OID = 2547 ( interval_pl_timetz PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1266 "1186 1266" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ )); -DATA(insert OID = 2548 ( interval_pl_timestamp PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1114 "1186 1114" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ )); -DATA(insert OID = 2549 ( interval_pl_timestamptz PGNSP PGUID 14 1 0 0 0 f f f f t f s s 2 0 1184 "1186 1184" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ )); +DATA(insert OID = 2546 ( interval_pl_date PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1114 "1186 1082" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ )); +DATA(insert OID = 2547 ( interval_pl_timetz PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1266 "1186 1266" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ )); +DATA(insert OID = 2548 ( interval_pl_timestamp PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1114 "1186 1114" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ )); +DATA(insert OID = 2549 ( interval_pl_timestamptz PGNSP PGUID 14 1 0 0 0 f f f f t f s s 2 0 1184 "1186 1184" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ )); DATA(insert OID = 2550 ( integer_pl_date PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1082 "23 1082" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ )); DATA(insert OID = 2556 ( pg_tablespace_databases PGNSP PGUID 12 1 1000 0 0 f f f f t t s s 1 0 26 "26" _null_ _null_ _null_ _null_ _null_ pg_tablespace_databases _null_ _null_ _null_ )); @@ -4157,7 +4157,7 @@ DATA(insert OID = 2580 ( gist_box_decompress PGNSP PGUID 12 1 0 0 0 f f f f t f DESCR("GiST support"); DATA(insert OID = 3281 ( gist_box_fetch PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ gist_box_fetch _null_ _null_ _null_ )); DESCR("GiST support"); -DATA(insert OID = 2581 ( gist_box_penalty PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gist_box_penalty _null_ _null_ _null_ )); +DATA(insert OID = 2581 ( gist_box_penalty PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gist_box_penalty _null_ _null_ _null_ )); DESCR("GiST support"); DATA(insert OID = 2582 ( gist_box_picksplit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ gist_box_picksplit _null_ _null_ _null_ )); DESCR("GiST support"); @@ -4169,7 +4169,7 @@ DATA(insert OID = 2585 ( gist_poly_consistent PGNSP PGUID 12 1 0 0 0 f f f f t DESCR("GiST support"); DATA(insert OID = 2586 ( gist_poly_compress PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ gist_poly_compress _null_ _null_ _null_ )); DESCR("GiST support"); -DATA(insert OID = 2591 ( gist_circle_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 16 "2281 718 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_circle_consistent _null_ _null_ _null_ )); +DATA(insert OID = 2591 ( gist_circle_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 16 "2281 718 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_circle_consistent _null_ _null_ _null_ )); DESCR("GiST support"); DATA(insert OID = 2592 ( gist_circle_compress PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ gist_circle_compress _null_ _null_ _null_ )); DESCR("GiST support"); @@ -4179,11 +4179,11 @@ DATA(insert OID = 3282 ( gist_point_fetch PGNSP PGUID 12 1 0 0 0 f f f f t f i DESCR("GiST support"); DATA(insert OID = 2179 ( gist_point_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 16 "2281 600 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_point_consistent _null_ _null_ _null_ )); DESCR("GiST support"); -DATA(insert OID = 3064 ( gist_point_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 701 "2281 600 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_point_distance _null_ _null_ _null_ )); +DATA(insert OID = 3064 ( gist_point_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 701 "2281 600 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_point_distance _null_ _null_ _null_ )); DESCR("GiST support"); -DATA(insert OID = 3280 ( gist_circle_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 701 "2281 718 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_circle_distance _null_ _null_ _null_ )); +DATA(insert OID = 3280 ( gist_circle_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 701 "2281 718 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_circle_distance _null_ _null_ _null_ )); DESCR("GiST support"); -DATA(insert OID = 3288 ( gist_poly_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 701 "2281 604 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_poly_distance _null_ _null_ _null_ )); +DATA(insert OID = 3288 ( gist_poly_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 701 "2281 604 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_poly_distance _null_ _null_ _null_ )); DESCR("GiST support"); /* GIN array support */ @@ -4195,7 +4195,7 @@ DATA(insert OID = 2744 ( ginarrayconsistent PGNSP PGUID 12 1 0 0 0 f f f f t f DESCR("GIN array support"); DATA(insert OID = 3920 ( ginarraytriconsistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 7 0 18 "2281 21 2277 23 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ ginarraytriconsistent _null_ _null_ _null_ )); DESCR("GIN array support"); -DATA(insert OID = 3076 ( ginarrayextract PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "2277 2281" _null_ _null_ _null_ _null_ _null_ ginarrayextract_2args _null_ _null_ _null_ )); +DATA(insert OID = 3076 ( ginarrayextract PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "2277 2281" _null_ _null_ _null_ _null_ _null_ ginarrayextract_2args _null_ _null_ _null_ )); DESCR("GIN array support (obsolete)"); /* overlap/contains/contained */ @@ -4278,7 +4278,7 @@ DATA(insert OID = 2896 ( xml PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 14 DESCR("perform a non-validating parse of a character string to produce an XML value"); DATA(insert OID = 2897 ( xmlvalidate PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "142 25" _null_ _null_ _null_ _null_ _null_ xmlvalidate _null_ _null_ _null_ )); DESCR("validate an XML value"); -DATA(insert OID = 2898 ( xml_recv PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 142 "2281" _null_ _null_ _null_ _null_ _null_ xml_recv _null_ _null_ _null_ )); +DATA(insert OID = 2898 ( xml_recv PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 142 "2281" _null_ _null_ _null_ _null_ _null_ xml_recv _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 2899 ( xml_send PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 17 "142" _null_ _null_ _null_ _null_ _null_ xml_send _null_ _null_ _null_ )); DESCR("I/O"); @@ -4344,7 +4344,7 @@ DATA(insert OID = 321 ( json_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 DESCR("I/O"); DATA(insert OID = 322 ( json_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "114" _null_ _null_ _null_ _null_ _null_ json_out _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 323 ( json_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 114 "2281" _null_ _null_ _null_ _null_ _null_ json_recv _null_ _null_ _null_ )); +DATA(insert OID = 323 ( json_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 114 "2281" _null_ _null_ _null_ _null_ _null_ json_recv _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 324 ( json_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "114" _null_ _null_ _null_ _null_ _null_ json_send _null_ _null_ _null_ )); DESCR("I/O"); @@ -4506,7 +4506,7 @@ DATA(insert OID = 3639 ( tsvectorrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s DESCR("I/O"); DATA(insert OID = 3611 ( tsvectorout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "3614" _null_ _null_ _null_ _null_ _null_ tsvectorout _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 3638 ( tsvectorsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "3614" _null_ _null_ _null_ _null_ _null_ tsvectorsend _null_ _null_ _null_ )); +DATA(insert OID = 3638 ( tsvectorsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "3614" _null_ _null_ _null_ _null_ _null_ tsvectorsend _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 3612 ( tsqueryin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3615 "2275" _null_ _null_ _null_ _null_ _null_ tsqueryin _null_ _null_ _null_ )); DESCR("I/O"); @@ -4514,7 +4514,7 @@ DATA(insert OID = 3641 ( tsqueryrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s DESCR("I/O"); DATA(insert OID = 3613 ( tsqueryout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "3615" _null_ _null_ _null_ _null_ _null_ tsqueryout _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 3640 ( tsquerysend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "3615" _null_ _null_ _null_ _null_ _null_ tsquerysend _null_ _null_ _null_ )); +DATA(insert OID = 3640 ( tsquerysend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "3615" _null_ _null_ _null_ _null_ _null_ tsquerysend _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 3646 ( gtsvectorin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3642 "2275" _null_ _null_ _null_ _null_ _null_ gtsvectorin _null_ _null_ _null_ )); DESCR("I/O"); @@ -4536,20 +4536,20 @@ DATA(insert OID = 3623 ( strip PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3 DESCR("strip position information"); DATA(insert OID = 3624 ( setweight PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 18" _null_ _null_ _null_ _null_ _null_ tsvector_setweight _null_ _null_ _null_ )); DESCR("set given weight for whole tsvector"); -DATA(insert OID = 3320 ( setweight PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 3614 "3614 18 1009" _null_ _null_ _null_ _null_ _null_ tsvector_setweight_by_filter _null_ _null_ _null_ )); +DATA(insert OID = 3320 ( setweight PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 3614 "3614 18 1009" _null_ _null_ _null_ _null_ _null_ tsvector_setweight_by_filter _null_ _null_ _null_ )); DESCR("set given weight for given lexemes"); -DATA(insert OID = 3625 ( tsvector_concat PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 3614" _null_ _null_ _null_ _null_ _null_ tsvector_concat _null_ _null_ _null_ )); -DATA(insert OID = 3321 ( ts_delete PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 25" _null_ _null_ _null_ _null_ _null_ tsvector_delete_str _null_ _null_ _null_ )); +DATA(insert OID = 3625 ( tsvector_concat PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 3614" _null_ _null_ _null_ _null_ _null_ tsvector_concat _null_ _null_ _null_ )); +DATA(insert OID = 3321 ( ts_delete PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 25" _null_ _null_ _null_ _null_ _null_ tsvector_delete_str _null_ _null_ _null_ )); DESCR("delete lexeme"); -DATA(insert OID = 3323 ( ts_delete PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 1009" _null_ _null_ _null_ _null_ _null_ tsvector_delete_arr _null_ _null_ _null_ )); +DATA(insert OID = 3323 ( ts_delete PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 1009" _null_ _null_ _null_ _null_ _null_ tsvector_delete_arr _null_ _null_ _null_ )); DESCR("delete given lexemes"); DATA(insert OID = 3322 ( unnest PGNSP PGUID 12 1 10 0 0 f f f f t t i s 1 0 2249 "3614" "{3614,25,1005,1009}" "{i,o,o,o}" "{tsvector,lexeme,positions,weights}" _null_ _null_ tsvector_unnest _null_ _null_ _null_ )); DESCR("expand tsvector to set of rows"); -DATA(insert OID = 3326 ( tsvector_to_array PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1009 "3614" _null_ _null_ _null_ _null_ _null_ tsvector_to_array _null_ _null_ _null_ )); +DATA(insert OID = 3326 ( tsvector_to_array PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1009 "3614" _null_ _null_ _null_ _null_ _null_ tsvector_to_array _null_ _null_ _null_ )); DESCR("convert tsvector to array of lexemes"); -DATA(insert OID = 3327 ( array_to_tsvector PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3614 "1009" _null_ _null_ _null_ _null_ _null_ array_to_tsvector _null_ _null_ _null_ )); +DATA(insert OID = 3327 ( array_to_tsvector PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3614 "1009" _null_ _null_ _null_ _null_ _null_ array_to_tsvector _null_ _null_ _null_ )); DESCR("build tsvector from array of lexemes"); -DATA(insert OID = 3319 ( ts_filter PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 1002" _null_ _null_ _null_ _null_ _null_ tsvector_filter _null_ _null_ _null_ )); +DATA(insert OID = 3319 ( ts_filter PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 1002" _null_ _null_ _null_ _null_ _null_ tsvector_filter _null_ _null_ _null_ )); DESCR("delete lexemes that do not have one of the given weights"); DATA(insert OID = 3634 ( ts_match_vq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3614 3615" _null_ _null_ _null_ _null_ _null_ ts_match_vq _null_ _null_ _null_ )); @@ -4574,11 +4574,11 @@ DESCR("GiST tsvector support"); DATA(insert OID = 3790 ( gtsvector_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 16 "2281 3642 23 26 2281" _null_ _null_ _null_ _null_ _null_ gtsvector_consistent_oldsig _null_ _null_ _null_ )); DESCR("GiST tsvector support (obsolete)"); -DATA(insert OID = 3656 ( gin_extract_tsvector PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "3614 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_extract_tsvector _null_ _null_ _null_ )); +DATA(insert OID = 3656 ( gin_extract_tsvector PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "3614 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_extract_tsvector _null_ _null_ _null_ )); DESCR("GIN tsvector support"); DATA(insert OID = 3657 ( gin_extract_tsquery PGNSP PGUID 12 1 0 0 0 f f f f t f i s 7 0 2281 "3614 2281 21 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_extract_tsquery _null_ _null_ _null_ )); DESCR("GIN tsvector support"); -DATA(insert OID = 3658 ( gin_tsquery_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 8 0 16 "2281 21 3614 23 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_consistent _null_ _null_ _null_ )); +DATA(insert OID = 3658 ( gin_tsquery_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 8 0 16 "2281 21 3614 23 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_consistent _null_ _null_ _null_ )); DESCR("GIN tsvector support"); DATA(insert OID = 3921 ( gin_tsquery_triconsistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 7 0 18 "2281 21 3614 23 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_triconsistent _null_ _null_ _null_ )); DESCR("GIN tsvector support"); @@ -4594,7 +4594,7 @@ DATA(insert OID = 3088 ( gin_tsquery_consistent PGNSP PGUID 12 1 0 0 0 f f f f DESCR("GIN tsvector support (obsolete)"); DATA(insert OID = 3791 ( gin_extract_tsquery PGNSP PGUID 12 1 0 0 0 f f f f t f i s 7 0 2281 "3615 2281 21 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_extract_tsquery_oldsig _null_ _null_ _null_ )); DESCR("GIN tsvector support (obsolete)"); -DATA(insert OID = 3792 ( gin_tsquery_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 8 0 16 "2281 21 3615 23 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_consistent_oldsig _null_ _null_ _null_ )); +DATA(insert OID = 3792 ( gin_tsquery_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 8 0 16 "2281 21 3615 23 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_consistent_oldsig _null_ _null_ _null_ )); DESCR("GIN tsvector support (obsolete)"); DATA(insert OID = 3789 ( gin_clean_pending_list PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 20 "2205" _null_ _null_ _null_ _null_ _null_ gin_clean_pending_list _null_ _null_ _null_ )); @@ -4777,7 +4777,7 @@ DATA(insert OID = 3805 ( jsonb_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 DESCR("I/O"); DATA(insert OID = 3804 ( jsonb_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_out _null_ _null_ _null_ )); DESCR("I/O"); -DATA(insert OID = 3803 ( jsonb_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_send _null_ _null_ _null_ )); +DATA(insert OID = 3803 ( jsonb_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_send _null_ _null_ _null_ )); DESCR("I/O"); DATA(insert OID = 3263 ( jsonb_object PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3802 "1009" _null_ _null_ _null_ _null_ _null_ jsonb_object _null_ _null_ _null_ )); @@ -4800,7 +4800,7 @@ DATA(insert OID = 3270 ( jsonb_object_agg PGNSP PGUID 12 1 0 0 0 t f f f f DESCR("aggregate inputs into jsonb object"); DATA(insert OID = 3271 ( jsonb_build_array PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 1 0 3802 "2276" "{2276}" "{v}" _null_ _null_ _null_ jsonb_build_array _null_ _null_ _null_ )); DESCR("build a jsonb array from any inputs"); -DATA(insert OID = 3272 ( jsonb_build_array PGNSP PGUID 12 1 0 0 0 f f f f f f s s 0 0 3802 "" _null_ _null_ _null_ _null_ _null_ jsonb_build_array_noargs _null_ _null_ _null_ )); +DATA(insert OID = 3272 ( jsonb_build_array PGNSP PGUID 12 1 0 0 0 f f f f f f s s 0 0 3802 "" _null_ _null_ _null_ _null_ _null_ jsonb_build_array_noargs _null_ _null_ _null_ )); DESCR("build an empty jsonb array"); DATA(insert OID = 3273 ( jsonb_build_object PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 1 0 3802 "2276" "{2276}" "{v}" _null_ _null_ _null_ jsonb_build_object _null_ _null_ _null_ )); DESCR("build a jsonb object from pairwise key/value inputs"); @@ -4880,7 +4880,7 @@ DATA(insert OID = 3305 ( jsonb_set PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 DESCR("Set part of a jsonb"); DATA(insert OID = 3306 ( jsonb_pretty PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_pretty _null_ _null_ _null_ )); DESCR("Indented text from jsonb"); -DATA(insert OID = 3579 ( jsonb_insert PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 3802 "3802 1009 3802 16" _null_ _null_ _null_ _null_ _null_ jsonb_insert _null_ _null_ _null_ )); +DATA(insert OID = 3579 ( jsonb_insert PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 3802 "3802 1009 3802 16" _null_ _null_ _null_ _null_ _null_ jsonb_insert _null_ _null_ _null_ )); DESCR("Insert value into a jsonb"); /* txid */ DATA(insert OID = 2939 ( txid_snapshot_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2970 "2275" _null_ _null_ _null_ _null_ _null_ txid_snapshot_in _null_ _null_ _null_ )); @@ -5146,7 +5146,7 @@ DATA(insert OID = 5013 ( spg_box_quad_choose PGNSP PGUID 12 1 0 0 0 f f f f t f DESCR("SP-GiST support for quad tree over box"); DATA(insert OID = 5014 ( spg_box_quad_picksplit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_box_quad_picksplit _null_ _null_ _null_ )); DESCR("SP-GiST support for quad tree over box"); -DATA(insert OID = 5015 ( spg_box_quad_inner_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_box_quad_inner_consistent _null_ _null_ _null_ )); +DATA(insert OID = 5015 ( spg_box_quad_inner_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_box_quad_inner_consistent _null_ _null_ _null_ )); DESCR("SP-GiST support for quad tree over box"); DATA(insert OID = 5016 ( spg_box_quad_leaf_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_box_quad_leaf_consistent _null_ _null_ _null_ )); DESCR("SP-GiST support for quad tree over box"); @@ -5220,9 +5220,9 @@ DATA(insert OID = 3985 ( mode_final PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 DESCR("aggregate final function"); /* hypothetical-set aggregates (and their support functions) */ -DATA(insert OID = 3986 ( rank PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 0 20 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 3986 ( rank PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 0 20 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("rank of hypothetical row"); -DATA(insert OID = 3987 ( rank_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 20 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_rank_final _null_ _null_ _null_ )); +DATA(insert OID = 3987 ( rank_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 20 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_rank_final _null_ _null_ _null_ )); DESCR("aggregate final function"); DATA(insert OID = 3988 ( percent_rank PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 0 701 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("fractional rank of hypothetical row"); @@ -5232,9 +5232,9 @@ DATA(insert OID = 3990 ( cume_dist PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 DESCR("cumulative distribution of hypothetical row"); DATA(insert OID = 3991 ( cume_dist_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 701 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_cume_dist_final _null_ _null_ _null_ )); DESCR("aggregate final function"); -DATA(insert OID = 3992 ( dense_rank PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 0 20 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); +DATA(insert OID = 3992 ( dense_rank PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 0 20 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); DESCR("rank of hypothetical row without gaps"); -DATA(insert OID = 3993 ( dense_rank_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 20 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_dense_rank_final _null_ _null_ _null_ )); +DATA(insert OID = 3993 ( dense_rank_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 20 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_dense_rank_final _null_ _null_ _null_ )); DESCR("aggregate final function"); /* pg_upgrade support */ @@ -5338,7 +5338,7 @@ DESCR("pg_controldata init state information as a function"); */ #define PROPARALLEL_SAFE 's' /* can run in worker or master */ #define PROPARALLEL_RESTRICTED 'r' /* can run in parallel master only */ -#define PROPARALLEL_UNSAFE 'u' /* banned while in parallel mode */ +#define PROPARALLEL_UNSAFE 'u' /* banned while in parallel mode */ /* * Symbolic values for proargmodes column. Note that these must agree with diff --git a/src/include/catalog/pg_type.h b/src/include/catalog/pg_type.h index 2c90b76fe1..162239c7ae 100644 --- a/src/include/catalog/pg_type.h +++ b/src/include/catalog/pg_type.h @@ -695,7 +695,7 @@ DATA(insert OID = 3500 ( anyenum PGNSP PGUID 4 t p P f t \054 0 0 0 anyenum_in DATA(insert OID = 3115 ( fdw_handler PGNSP PGUID 4 t p P f t \054 0 0 0 fdw_handler_in fdw_handler_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); #define FDW_HANDLEROID 3115 DATA(insert OID = 325 ( index_am_handler PGNSP PGUID 4 t p P f t \054 0 0 0 index_am_handler_in index_am_handler_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define INDEX_AM_HANDLEROID 325 +#define INDEX_AM_HANDLEROID 325 DATA(insert OID = 3310 ( tsm_handler PGNSP PGUID 4 t p P f t \054 0 0 0 tsm_handler_in tsm_handler_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); #define TSM_HANDLEROID 3310 DATA(insert OID = 3831 ( anyrange PGNSP PGUID -1 f p P f t \054 0 0 0 anyrange_in anyrange_out - - - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); diff --git a/src/include/common/config_info.h b/src/include/common/config_info.h index c9e6e1cc8f..a695a42c0a 100644 --- a/src/include/common/config_info.h +++ b/src/include/common/config_info.h @@ -16,6 +16,6 @@ typedef struct ConfigData } ConfigData; extern ConfigData *get_configdata(const char *my_exec_path, - size_t *configdata_len); + size_t *configdata_len); #endif /* COMMON_CONFIG_INFO_H */ diff --git a/src/include/executor/execParallel.h b/src/include/executor/execParallel.h index 882dc80bcd..f4c6d37a11 100644 --- a/src/include/executor/execParallel.h +++ b/src/include/executor/execParallel.h @@ -22,13 +22,13 @@ typedef struct SharedExecutorInstrumentation SharedExecutorInstrumentation; typedef struct ParallelExecutorInfo { - PlanState *planstate; + PlanState *planstate; ParallelContext *pcxt; BufferUsage *buffer_usage; SharedExecutorInstrumentation *instrumentation; shm_mq_handle **tqueue; - bool finished; -} ParallelExecutorInfo; + bool finished; +} ParallelExecutorInfo; extern ParallelExecutorInfo *ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers); diff --git a/src/include/executor/nodeCustom.h b/src/include/executor/nodeCustom.h index 9d0b393528..7d16c2b1fa 100644 --- a/src/include/executor/nodeCustom.h +++ b/src/include/executor/nodeCustom.h @@ -32,10 +32,10 @@ extern void ExecCustomRestrPos(CustomScanState *node); * Parallel execution support */ extern void ExecCustomScanEstimate(CustomScanState *node, - ParallelContext *pcxt); + ParallelContext *pcxt); extern void ExecCustomScanInitializeDSM(CustomScanState *node, - ParallelContext *pcxt); + ParallelContext *pcxt); extern void ExecCustomScanInitializeWorker(CustomScanState *node, - shm_toc *toc); + shm_toc *toc); #endif /* NODECUSTOM_H */ diff --git a/src/include/executor/nodeForeignscan.h b/src/include/executor/nodeForeignscan.h index c2553295fa..0cdec4e843 100644 --- a/src/include/executor/nodeForeignscan.h +++ b/src/include/executor/nodeForeignscan.h @@ -23,10 +23,10 @@ extern void ExecEndForeignScan(ForeignScanState *node); extern void ExecReScanForeignScan(ForeignScanState *node); extern void ExecForeignScanEstimate(ForeignScanState *node, - ParallelContext *pcxt); + ParallelContext *pcxt); extern void ExecForeignScanInitializeDSM(ForeignScanState *node, - ParallelContext *pcxt); + ParallelContext *pcxt); extern void ExecForeignScanInitializeWorker(ForeignScanState *node, - shm_toc *toc); + shm_toc *toc); #endif /* NODEFOREIGNSCAN_H */ diff --git a/src/include/foreign/fdwapi.h b/src/include/foreign/fdwapi.h index 096a9c4a96..d5c1df2698 100644 --- a/src/include/foreign/fdwapi.h +++ b/src/include/foreign/fdwapi.h @@ -60,7 +60,7 @@ typedef void (*GetForeignJoinPaths_function) (PlannerInfo *root, JoinPathExtraData *extra); typedef void (*GetForeignUpperPaths_function) (PlannerInfo *root, - RelOptInfo *scan_join_rel); + RelOptInfo *scan_join_rel); typedef void (*AddForeignUpdateTargets_function) (Query *parsetree, RangeTblEntry *target_rte, @@ -98,12 +98,12 @@ typedef void (*EndForeignModify_function) (EState *estate, typedef int (*IsForeignRelUpdatable_function) (Relation rel); typedef bool (*PlanDirectModify_function) (PlannerInfo *root, - ModifyTable *plan, - Index resultRelation, - int subplan_index); + ModifyTable *plan, + Index resultRelation, + int subplan_index); typedef void (*BeginDirectModify_function) (ForeignScanState *node, - int eflags); + int eflags); typedef TupleTableSlot *(*IterateDirectModify_function) (ForeignScanState *node); @@ -142,13 +142,13 @@ typedef List *(*ImportForeignSchema_function) (ImportForeignSchemaStmt *stmt, Oid serverOid); typedef Size (*EstimateDSMForeignScan_function) (ForeignScanState *node, - ParallelContext *pcxt); + ParallelContext *pcxt); typedef void (*InitializeDSMForeignScan_function) (ForeignScanState *node, - ParallelContext *pcxt, - void *coordinate); + ParallelContext *pcxt, + void *coordinate); typedef void (*InitializeWorkerForeignScan_function) (ForeignScanState *node, - shm_toc *toc, - void *coordinate); + shm_toc *toc, + void *coordinate); typedef bool (*IsForeignScanParallelSafe_function) (PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte); diff --git a/src/include/foreign/foreign.h b/src/include/foreign/foreign.h index fb945e9ffd..f45873f4ee 100644 --- a/src/include/foreign/foreign.h +++ b/src/include/foreign/foreign.h @@ -72,7 +72,7 @@ typedef struct ForeignTable extern ForeignServer *GetForeignServer(Oid serverid); extern ForeignServer *GetForeignServerByName(const char *name, bool missing_ok); extern UserMapping *GetUserMapping(Oid userid, Oid serverid); -extern Oid GetUserMappingId(Oid userid, Oid serverid, bool missing_ok); +extern Oid GetUserMappingId(Oid userid, Oid serverid, bool missing_ok); extern UserMapping *GetUserMappingById(Oid umid); extern ForeignDataWrapper *GetForeignDataWrapper(Oid fdwid); extern ForeignDataWrapper *GetForeignDataWrapperByName(const char *name, diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index ee4e189689..1ddf14a86a 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -1031,7 +1031,7 @@ typedef struct PlanState * top-level plan */ Instrumentation *instrument; /* Optional runtime stats for this node */ - WorkerInstrumentation *worker_instrument; /* per-worker instrumentation */ + WorkerInstrumentation *worker_instrument; /* per-worker instrumentation */ /* * Common structural data for all Plan types. These links to subsidiary @@ -1586,7 +1586,7 @@ typedef struct WorkTableScanState typedef struct ForeignScanState { ScanState ss; /* its first field is NodeTag */ - List *fdw_recheck_quals; /* original quals not in ss.ps.qual */ + List *fdw_recheck_quals; /* original quals not in ss.ps.qual */ Size pscan_len; /* size of parallel coordination information */ /* use struct pointer to avoid including fdwapi.h here */ struct FdwRoutine *fdwroutine; @@ -1831,7 +1831,7 @@ typedef struct AggState AggStatePerTrans pertrans; /* per-Trans state information */ ExprContext **aggcontexts; /* econtexts for long-lived data (per GS) */ ExprContext *tmpcontext; /* econtext for input expressions */ - AggStatePerTrans curpertrans; /* currently active trans state */ + AggStatePerTrans curpertrans; /* currently active trans state */ bool input_done; /* indicates end of input */ bool agg_done; /* indicates completion of Agg scan */ bool combineStates; /* input tuples contain transition states */ diff --git a/src/include/nodes/extensible.h b/src/include/nodes/extensible.h index 9df1377a8e..17afe5897c 100644 --- a/src/include/nodes/extensible.h +++ b/src/include/nodes/extensible.h @@ -1,7 +1,7 @@ /*------------------------------------------------------------------------- * * extensible.h - * Definitions for extensible nodes and custom scans + * Definitions for extensible nodes and custom scans * * * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group @@ -61,13 +61,13 @@ typedef struct ExtensibleNodeMethods { const char *extnodename; Size node_size; - void (*nodeCopy)(struct ExtensibleNode *newnode, - const struct ExtensibleNode *oldnode); - bool (*nodeEqual)(const struct ExtensibleNode *a, - const struct ExtensibleNode *b); - void (*nodeOut)(struct StringInfoData *str, - const struct ExtensibleNode *node); - void (*nodeRead)(struct ExtensibleNode *node); + void (*nodeCopy) (struct ExtensibleNode *newnode, + const struct ExtensibleNode *oldnode); + bool (*nodeEqual) (const struct ExtensibleNode *a, + const struct ExtensibleNode *b); + void (*nodeOut) (struct StringInfoData *str, + const struct ExtensibleNode *node); + void (*nodeRead) (struct ExtensibleNode *node); } ExtensibleNodeMethods; extern void RegisterExtensibleNodeMethods(const ExtensibleNodeMethods *method); @@ -91,12 +91,12 @@ typedef struct CustomPathMethods /* Convert Path to a Plan */ struct Plan *(*PlanCustomPath) (PlannerInfo *root, - RelOptInfo *rel, - struct CustomPath *best_path, - List *tlist, - List *clauses, - List *custom_plans); -} CustomPathMethods; + RelOptInfo *rel, + struct CustomPath *best_path, + List *tlist, + List *clauses, + List *custom_plans); +} CustomPathMethods; /* * Custom scan. Here again, there's not much to do: we need to be able to @@ -120,8 +120,8 @@ typedef struct CustomExecMethods /* Required executor methods */ void (*BeginCustomScan) (CustomScanState *node, - EState *estate, - int eflags); + EState *estate, + int eflags); TupleTableSlot *(*ExecCustomScan) (CustomScanState *node); void (*EndCustomScan) (CustomScanState *node); void (*ReScanCustomScan) (CustomScanState *node); @@ -132,22 +132,22 @@ typedef struct CustomExecMethods /* Optional methods: needed if parallel execution is supported */ Size (*EstimateDSMCustomScan) (CustomScanState *node, - ParallelContext *pcxt); + ParallelContext *pcxt); void (*InitializeDSMCustomScan) (CustomScanState *node, - ParallelContext *pcxt, - void *coordinate); + ParallelContext *pcxt, + void *coordinate); void (*InitializeWorkerCustomScan) (CustomScanState *node, - shm_toc *toc, - void *coordinate); + shm_toc *toc, + void *coordinate); /* Optional: print additional information in EXPLAIN */ void (*ExplainCustomScan) (CustomScanState *node, - List *ancestors, - ExplainState *es); + List *ancestors, + ExplainState *es); } CustomExecMethods; extern void RegisterCustomScanMethods(const CustomScanMethods *methods); extern const CustomScanMethods *GetCustomScanMethods(const char *CustomName, - bool missing_ok); + bool missing_ok); -#endif /* EXTENSIBLE_H */ +#endif /* EXTENSIBLE_H */ diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h index 5953db45a8..c4b9c14ad9 100644 --- a/src/include/nodes/nodes.h +++ b/src/include/nodes/nodes.h @@ -557,7 +557,7 @@ extern void outToken(struct StringInfoData *str, const char *s); extern void outBitmapset(struct StringInfoData *str, const struct Bitmapset *bms); extern void outDatum(struct StringInfoData *str, uintptr_t value, - int typlen, bool typbyval); + int typlen, bool typbyval); /* * nodes/{readfuncs.c,read.c} diff --git a/src/include/nodes/params.h b/src/include/nodes/params.h index 298fec725c..a8aa530b74 100644 --- a/src/include/nodes/params.h +++ b/src/include/nodes/params.h @@ -72,7 +72,7 @@ typedef struct ParamListInfoData ParserSetupHook parserSetup; /* parser setup hook */ void *parserSetupArg; int numParams; /* number of ParamExternDatas following */ - struct Bitmapset *paramMask; /* if non-NULL, can ignore omitted params */ + struct Bitmapset *paramMask; /* if non-NULL, can ignore omitted params */ ParamExternData params[FLEXIBLE_ARRAY_MEMBER]; } ParamListInfoData; diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h index ea8554f275..72f53fd034 100644 --- a/src/include/nodes/plannodes.h +++ b/src/include/nodes/plannodes.h @@ -72,8 +72,8 @@ typedef struct PlannedStmt bool hasRowSecurity; /* row security applied? */ - bool parallelModeNeeded; /* parallel mode required to execute? */ - bool hasForeignJoin; /* Plan has a pushed down foreign join */ + bool parallelModeNeeded; /* parallel mode required to execute? */ + bool hasForeignJoin; /* Plan has a pushed down foreign join */ } PlannedStmt; /* macro for fetching the Plan associated with a SubPlan node */ @@ -537,7 +537,8 @@ typedef struct ForeignScan List *fdw_exprs; /* expressions that FDW may evaluate */ List *fdw_private; /* private data for FDW */ List *fdw_scan_tlist; /* optional tlist describing scan tuple */ - List *fdw_recheck_quals; /* original quals not in scan.plan.qual */ + List *fdw_recheck_quals; /* original quals not in + * scan.plan.qual */ Bitmapset *fs_relids; /* RTIs generated by this scan */ bool fsSystemCol; /* true if any "system column" is needed */ } ForeignScan; diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h index b2b204693f..7ca1e253b9 100644 --- a/src/include/nodes/relation.h +++ b/src/include/nodes/relation.h @@ -490,7 +490,7 @@ typedef struct RelOptInfo /* default result targetlist for Paths scanning this relation */ struct PathTarget *reltarget; /* list of Vars/Exprs, cost, width */ - bool reltarget_has_non_vars; /* true if any expression in + bool reltarget_has_non_vars; /* true if any expression in * PathTarget is a non-Var */ /* materialization information */ @@ -852,7 +852,8 @@ typedef struct Path bool parallel_aware; /* engage parallel-aware logic? */ bool parallel_safe; /* OK to use as part of parallel plan? */ - int parallel_workers; /* desired # of workers; 0 = not parallel */ + int parallel_workers; /* desired # of workers; 0 = not + * parallel */ /* estimated size/costs for path (see costsize.c for more info) */ double rows; /* estimated number of result tuples */ diff --git a/src/include/optimizer/clauses.h b/src/include/optimizer/clauses.h index 1eb1eb4a54..53cf726c0b 100644 --- a/src/include/optimizer/clauses.h +++ b/src/include/optimizer/clauses.h @@ -33,16 +33,17 @@ typedef struct * which context it is allowed in. We require three states here as there are * two different contexts in which partial aggregation is safe. For aggregates * which have an 'stype' of INTERNAL, it is okay to pass a pointer to the - * aggregate state within a single process, since the datum is just a + * aggregate state within a single process, since the datum is just a * pointer. In cases where the aggregate state must be passed between - * different processes, for example during parallel aggregation, passing - * pointers directly is not going to work. + * different processes, for example during parallel aggregation, passing + * pointers directly is not going to work. */ typedef enum { - PAT_ANY = 0, /* Any type of partial aggregation is okay. */ - PAT_INTERNAL_ONLY, /* Some aggregates support only internal mode. */ - PAT_DISABLED /* Some aggregates don't support partial mode at all */ + PAT_ANY = 0, /* Any type of partial aggregation is okay. */ + PAT_INTERNAL_ONLY, /* Some aggregates support only internal mode. */ + PAT_DISABLED /* Some aggregates don't support partial mode + * at all */ } PartialAggType; extern Expr *make_opclause(Oid opno, Oid opresulttype, bool opretset, diff --git a/src/include/parser/parse_agg.h b/src/include/parser/parse_agg.h index 23ce8d6ce3..1408d4e9fd 100644 --- a/src/include/parser/parse_agg.h +++ b/src/include/parser/parse_agg.h @@ -36,33 +36,33 @@ extern Oid resolve_aggregate_transtype(Oid aggfuncid, int numArguments); extern void build_aggregate_transfn_expr(Oid *agg_input_types, - int agg_num_inputs, - int agg_num_direct_inputs, - bool agg_variadic, - Oid agg_state_type, - Oid agg_input_collation, - Oid transfn_oid, - Oid invtransfn_oid, - Expr **transfnexpr, - Expr **invtransfnexpr); + int agg_num_inputs, + int agg_num_direct_inputs, + bool agg_variadic, + Oid agg_state_type, + Oid agg_input_collation, + Oid transfn_oid, + Oid invtransfn_oid, + Expr **transfnexpr, + Expr **invtransfnexpr); extern void build_aggregate_combinefn_expr(Oid agg_state_type, - Oid agg_input_collation, - Oid combinefn_oid, - Expr **combinefnexpr); + Oid agg_input_collation, + Oid combinefn_oid, + Expr **combinefnexpr); extern void build_aggregate_serialfn_expr(Oid agg_state_type, - Oid agg_serial_type, - Oid agg_input_collation, - Oid serialfn_oid, - Expr **serialfnexpr); + Oid agg_serial_type, + Oid agg_input_collation, + Oid serialfn_oid, + Expr **serialfnexpr); extern void build_aggregate_finalfn_expr(Oid *agg_input_types, - int num_finalfn_inputs, - Oid agg_state_type, - Oid agg_result_type, - Oid agg_input_collation, - Oid finalfn_oid, - Expr **finalfnexpr); + int num_finalfn_inputs, + Oid agg_state_type, + Oid agg_result_type, + Oid agg_input_collation, + Oid finalfn_oid, + Expr **finalfnexpr); #endif /* PARSE_AGG_H */ diff --git a/src/include/parser/scansup.h b/src/include/parser/scansup.h index 4f95c81f7c..7f482b6067 100644 --- a/src/include/parser/scansup.h +++ b/src/include/parser/scansup.h @@ -21,7 +21,7 @@ extern char *downcase_truncate_identifier(const char *ident, int len, bool warn); extern char *downcase_identifier(const char *ident, int len, - bool warn, bool truncate); + bool warn, bool truncate); extern void truncate_identifier(char *ident, int len, bool warn); diff --git a/src/include/pgstat.h b/src/include/pgstat.h index 19c838771f..dc3320d091 100644 --- a/src/include/pgstat.h +++ b/src/include/pgstat.h @@ -733,7 +733,7 @@ typedef enum ProgressCommandType { PROGRESS_COMMAND_INVALID, PROGRESS_COMMAND_VACUUM -} ProgressCommandType; +} ProgressCommandType; #define PGSTAT_NUM_PROGRESS_PARAM 10 diff --git a/src/include/replication/message.h b/src/include/replication/message.h index 23b9cdb268..9aff9273bf 100644 --- a/src/include/replication/message.h +++ b/src/include/replication/message.h @@ -19,11 +19,11 @@ */ typedef struct xl_logical_message { - Oid dbId; /* database Oid emitted from */ - bool transactional; /* is message transactional? */ - Size prefix_size; /* length of prefix */ - Size message_size; /* size of the message */ - char message[FLEXIBLE_ARRAY_MEMBER]; /* message including the null + Oid dbId; /* database Oid emitted from */ + bool transactional; /* is message transactional? */ + Size prefix_size; /* length of prefix */ + Size message_size; /* size of the message */ + char message[FLEXIBLE_ARRAY_MEMBER]; /* message including the null * terminated prefix of length * prefix_size */ } xl_logical_message; @@ -31,7 +31,7 @@ typedef struct xl_logical_message #define SizeOfLogicalMessage (offsetof(xl_logical_message, message)) extern XLogRecPtr LogLogicalMessage(const char *prefix, const char *message, - size_t size, bool transactional); + size_t size, bool transactional); /* RMGR API*/ #define XLOG_LOGICAL_MESSAGE 0x00 diff --git a/src/include/replication/output_plugin.h b/src/include/replication/output_plugin.h index 3a2ca985fb..7911cc0a29 100644 --- a/src/include/replication/output_plugin.h +++ b/src/include/replication/output_plugin.h @@ -78,12 +78,12 @@ typedef void (*LogicalDecodeCommitCB) ( */ typedef void (*LogicalDecodeMessageCB) ( struct LogicalDecodingContext *, - ReorderBufferTXN *txn, - XLogRecPtr message_lsn, - bool transactional, - const char *prefix, - Size message_size, - const char *message); + ReorderBufferTXN *txn, + XLogRecPtr message_lsn, + bool transactional, + const char *prefix, + Size message_size, + const char *message); /* * Filter changes by origin. diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h index e0708940a0..9e209aef4f 100644 --- a/src/include/replication/reorderbuffer.h +++ b/src/include/replication/reorderbuffer.h @@ -27,7 +27,7 @@ typedef struct ReorderBufferTupleBuf HeapTupleData tuple; /* pre-allocated size of tuple buffer, different from tuple size */ - Size alloc_tuple_size; + Size alloc_tuple_size; /* actual tuple data follows */ } ReorderBufferTupleBuf; @@ -102,10 +102,10 @@ typedef struct ReorderBufferChange /* Message with arbitrary data. */ struct { - char *prefix; - Size message_size; - char *message; - } msg; + char *prefix; + Size message_size; + char *message; + } msg; /* New snapshot, set when action == *_INTERNAL_SNAPSHOT */ Snapshot snapshot; @@ -285,12 +285,12 @@ typedef void (*ReorderBufferCommitCB) ( /* message callback signature */ typedef void (*ReorderBufferMessageCB) ( - ReorderBuffer *rb, - ReorderBufferTXN *txn, - XLogRecPtr message_lsn, - bool transactional, - const char *prefix, Size sz, - const char *message); + ReorderBuffer *rb, + ReorderBufferTXN *txn, + XLogRecPtr message_lsn, + bool transactional, + const char *prefix, Size sz, + const char *message); struct ReorderBuffer { @@ -369,9 +369,9 @@ ReorderBufferChange *ReorderBufferGetChange(ReorderBuffer *); void ReorderBufferReturnChange(ReorderBuffer *, ReorderBufferChange *); void ReorderBufferQueueChange(ReorderBuffer *, TransactionId, XLogRecPtr lsn, ReorderBufferChange *); -void ReorderBufferQueueMessage(ReorderBuffer *, TransactionId, Snapshot snapshot, XLogRecPtr lsn, - bool transactional, const char *prefix, - Size message_size, const char *message); +void ReorderBufferQueueMessage(ReorderBuffer *, TransactionId, Snapshot snapshot, XLogRecPtr lsn, + bool transactional, const char *prefix, + Size message_size, const char *message); void ReorderBufferCommit(ReorderBuffer *, TransactionId, XLogRecPtr commit_lsn, XLogRecPtr end_lsn, TimestampTz commit_time, RepOriginId origin_id, XLogRecPtr origin_lsn); @@ -392,7 +392,7 @@ void ReorderBufferAddNewTupleCids(ReorderBuffer *, TransactionId, XLogRecPtr lsn void ReorderBufferAddInvalidations(ReorderBuffer *, TransactionId, XLogRecPtr lsn, Size nmsgs, SharedInvalidationMessage *msgs); void ReorderBufferImmediateInvalidation(ReorderBuffer *, uint32 ninvalidations, - SharedInvalidationMessage *invalidations); + SharedInvalidationMessage *invalidations); void ReorderBufferProcessXid(ReorderBuffer *, TransactionId xid, XLogRecPtr lsn); void ReorderBufferXidSetCatalogChanges(ReorderBuffer *, TransactionId xid, XLogRecPtr lsn); bool ReorderBufferXidHasCatalogChanges(ReorderBuffer *, TransactionId xid); diff --git a/src/include/replication/snapbuild.h b/src/include/replication/snapbuild.h index c4127a1cf7..df229a895c 100644 --- a/src/include/replication/snapbuild.h +++ b/src/include/replication/snapbuild.h @@ -64,7 +64,7 @@ extern void SnapBuildClearExportedSnapshot(void); extern SnapBuildState SnapBuildCurrentState(SnapBuild *snapstate); extern Snapshot SnapBuildGetOrBuildSnapshot(SnapBuild *builder, - TransactionId xid); + TransactionId xid); extern bool SnapBuildXactNeedsSkip(SnapBuild *snapstate, XLogRecPtr ptr); diff --git a/src/include/storage/fd.h b/src/include/storage/fd.h index fc84fe8b1d..cbc2224685 100644 --- a/src/include/storage/fd.h +++ b/src/include/storage/fd.h @@ -77,7 +77,7 @@ extern int FileTruncate(File file, off_t offset); extern void FileWriteback(File file, off_t offset, off_t nbytes); extern char *FilePathName(File file); extern int FileGetRawDesc(File file); -extern int FileGetRawFlags(File file); +extern int FileGetRawFlags(File file); extern int FileGetRawMode(File file); /* Operations that allow use of regular stdio --- USE WITH CAUTION */ diff --git a/src/include/storage/freespace.h b/src/include/storage/freespace.h index ce95ef3064..77b3bc3dc7 100644 --- a/src/include/storage/freespace.h +++ b/src/include/storage/freespace.h @@ -33,8 +33,8 @@ extern void XLogRecordPageWithFreeSpace(RelFileNode rnode, BlockNumber heapBlk, extern void FreeSpaceMapTruncateRel(Relation rel, BlockNumber nblocks); extern void FreeSpaceMapVacuum(Relation rel); extern void UpdateFreeSpaceMap(Relation rel, - BlockNumber startBlkNum, - BlockNumber endBlkNum, - Size freespace); + BlockNumber startBlkNum, + BlockNumber endBlkNum, + Size freespace); #endif /* FREESPACE_H_ */ diff --git a/src/include/storage/lockdefs.h b/src/include/storage/lockdefs.h index 434a20e865..dd7cb164c8 100644 --- a/src/include/storage/lockdefs.h +++ b/src/include/storage/lockdefs.h @@ -53,4 +53,4 @@ typedef struct xl_standby_lock Oid relOid; } xl_standby_lock; -#endif /* LOCKDEF_H_ */ +#endif /* LOCKDEF_H_ */ diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h index c3b462c949..775c66a197 100644 --- a/src/include/storage/proc.h +++ b/src/include/storage/proc.h @@ -84,7 +84,7 @@ struct PGPROC { /* proc->links MUST BE FIRST IN STRUCT (see ProcSleep,ProcWakeup,etc) */ SHM_QUEUE links; /* list link if process is in a list */ - PGPROC **procgloballist; /* procglobal list that owns this PGPROC */ + PGPROC **procgloballist; /* procglobal list that owns this PGPROC */ PGSemaphoreData sem; /* ONE semaphore to sleep on */ int waitStatus; /* STATUS_WAITING, STATUS_OK or STATUS_ERROR */ @@ -143,16 +143,17 @@ struct PGPROC /* Support for group XID clearing. */ /* true, if member of ProcArray group waiting for XID clear */ - bool procArrayGroupMember; + bool procArrayGroupMember; /* next ProcArray group member waiting for XID clear */ - pg_atomic_uint32 procArrayGroupNext; + pg_atomic_uint32 procArrayGroupNext; + /* * latest transaction id among the transaction's main XID and * subtransactions */ - TransactionId procArrayGroupMemberXid; + TransactionId procArrayGroupMemberXid; - uint32 wait_event_info; /* proc's wait information */ + uint32 wait_event_info; /* proc's wait information */ /* Per-backend LWLock. Protects fields below (but not group fields). */ LWLock backendLock; @@ -169,8 +170,8 @@ struct PGPROC * leader to get the LWLock protecting these fields. */ PGPROC *lockGroupLeader; /* lock group leader, if I'm a member */ - dlist_head lockGroupMembers; /* list of members, if I'm a leader */ - dlist_node lockGroupLink; /* my member link, if I'm a member */ + dlist_head lockGroupMembers; /* list of members, if I'm a leader */ + dlist_node lockGroupLink; /* my member link, if I'm a member */ }; /* NOTE: "typedef struct PGPROC PGPROC" appears in storage/lock.h. */ diff --git a/src/include/storage/standby.h b/src/include/storage/standby.h index 52058840a5..dcebf72f85 100644 --- a/src/include/storage/standby.h +++ b/src/include/storage/standby.h @@ -86,6 +86,6 @@ extern void LogAccessExclusiveLockPrepare(void); extern XLogRecPtr LogStandbySnapshot(void); extern void LogStandbyInvalidations(int nmsgs, SharedInvalidationMessage *msgs, - bool relcacheInitFileInval); + bool relcacheInitFileInval); #endif /* STANDBY_H */ diff --git a/src/include/storage/standbydefs.h b/src/include/storage/standbydefs.h index bd3c97fe43..ea22d77e07 100644 --- a/src/include/storage/standbydefs.h +++ b/src/include/storage/standbydefs.h @@ -24,9 +24,9 @@ extern void standby_redo(XLogReaderState *record); extern void standby_desc(StringInfo buf, XLogReaderState *record); extern const char *standby_identify(uint8 info); extern void standby_desc_invalidations(StringInfo buf, - int nmsgs, SharedInvalidationMessage *msgs, - Oid dbId, Oid tsId, - bool relcacheInitFileInval); + int nmsgs, SharedInvalidationMessage *msgs, + Oid dbId, Oid tsId, + bool relcacheInitFileInval); /* * XLOG message types diff --git a/src/include/tsearch/dicts/spell.h b/src/include/tsearch/dicts/spell.h index cbd5174209..d1df1f9f4b 100644 --- a/src/include/tsearch/dicts/spell.h +++ b/src/include/tsearch/dicts/spell.h @@ -28,9 +28,9 @@ typedef struct { uint32 val:8, isword:1, - /* Stores compound flags listed below */ + /* Stores compound flags listed below */ compoundflag:4, - /* Reference to an entry of the AffixData field */ + /* Reference to an entry of the AffixData field */ affix:19; struct SPNode *node; } SPNodeData; @@ -63,8 +63,8 @@ typedef struct spell_struct union { /* - * flag is filled in by NIImportDictionary(). After NISortDictionary(), - * d is used instead of flag. + * flag is filled in by NIImportDictionary(). After + * NISortDictionary(), d is used instead of flag. */ char *flag; /* d is used in mkSPNode() */ @@ -87,7 +87,7 @@ typedef struct spell_struct typedef struct aff_struct { char *flag; - /* FF_SUFFIX or FF_PREFIX */ + /* FF_SUFFIX or FF_PREFIX */ uint32 type:1, flagflags:7, issimple:1, @@ -151,9 +151,9 @@ typedef struct */ typedef enum { - FM_CHAR, /* one character (like ispell) */ - FM_LONG, /* two characters */ - FM_NUM /* number, >= 0 and < 65536 */ + FM_CHAR, /* one character (like ispell) */ + FM_LONG, /* two characters */ + FM_NUM /* number, >= 0 and < 65536 */ } FlagMode; /* @@ -202,11 +202,11 @@ typedef struct */ /* Array of Hunspell options in affix file */ - CompoundAffixFlag *CompoundAffixFlags; + CompoundAffixFlag *CompoundAffixFlags; /* number of entries in CompoundAffixFlags array */ - int nCompoundAffixFlag; + int nCompoundAffixFlag; /* allocated length of CompoundAffixFlags array */ - int mCompoundAffixFlag; + int mCompoundAffixFlag; /* * Remaining fields are only used during dictionary construction; they are diff --git a/src/include/tsearch/ts_public.h b/src/include/tsearch/ts_public.h index 9364eee438..b75a078ec5 100644 --- a/src/include/tsearch/ts_public.h +++ b/src/include/tsearch/ts_public.h @@ -34,17 +34,17 @@ typedef struct */ typedef struct { - uint32 selected: 1, - in: 1, - replace: 1, - repeated: 1, - skip: 1, - unused: 3, - type: 8, - len: 16; - WordEntryPos pos; - char *word; - QueryOperand *item; + uint32 selected:1, + in:1, + replace:1, + repeated:1, + skip:1, + unused:3, + type:8, + len:16; + WordEntryPos pos; + char *word; + QueryOperand *item; } HeadlineWordEntry; typedef struct @@ -52,7 +52,7 @@ typedef struct HeadlineWordEntry *words; int32 lenwords; int32 curwords; - int32 vectorpos; /* positions a-la tsvector */ + int32 vectorpos; /* positions a-la tsvector */ char *startsel; char *stopsel; char *fragdelim; diff --git a/src/include/tsearch/ts_type.h b/src/include/tsearch/ts_type.h index 8d24b32fac..80eb75c14e 100644 --- a/src/include/tsearch/ts_type.h +++ b/src/include/tsearch/ts_type.h @@ -49,7 +49,7 @@ typedef struct #define MAXSTRLEN ( (1<<11) - 1) #define MAXSTRPOS ( (1<<20) - 1) -extern int compareWordEntryPos(const void *a, const void *b); +extern int compareWordEntryPos(const void *a, const void *b); /* * Equivalent to @@ -230,16 +230,15 @@ typedef struct extern const int tsearch_op_priority[OP_COUNT]; -#define NOT_PHRASE_P 5 /* - * OP_PHRASE negation operations must have greater - * priority in order to force infix() to surround - * the whole OP_PHRASE expression with parentheses. - */ +#define NOT_PHRASE_P 5 /* OP_PHRASE negation operations must have + * greater priority in order to force infix() + * to surround the whole OP_PHRASE expression + * with parentheses. */ -#define TOP_PRIORITY 6 /* highest priority for val nodes */ +#define TOP_PRIORITY 6 /* highest priority for val nodes */ /* get operation priority by its code*/ -#define OP_PRIORITY(x) ( tsearch_op_priority[(x) - 1] ) +#define OP_PRIORITY(x) ( tsearch_op_priority[(x) - 1] ) /* get QueryOperator priority */ #define QO_PRIORITY(x) OP_PRIORITY(((QueryOperator *) (x))->oper) /* special case: get QueryOperator priority for correct printing !(a <-> b>) */ diff --git a/src/include/tsearch/ts_utils.h b/src/include/tsearch/ts_utils.h index 855bbfecd6..e16ddaf72f 100644 --- a/src/include/tsearch/ts_utils.h +++ b/src/include/tsearch/ts_utils.h @@ -106,9 +106,9 @@ extern text *generateHeadline(HeadlineParsedText *prs); */ typedef struct ExecPhraseData { - int npos; - bool allocated; - WordEntryPos *pos; + int npos; + bool allocated; + WordEntryPos *pos; } ExecPhraseData; extern bool TS_execute(QueryItem *curitem, void *checkval, bool calcnot, diff --git a/src/include/utils/acl.h b/src/include/utils/acl.h index d91437b234..4cc49f0c0c 100644 --- a/src/include/utils/acl.h +++ b/src/include/utils/acl.h @@ -231,7 +231,7 @@ extern void check_is_member_of_role(Oid member, Oid role); extern Oid get_role_oid(const char *rolename, bool missing_ok); extern Oid get_role_oid_or_public(const char *rolename); extern Oid get_rolespec_oid(const Node *node, bool missing_ok); -extern void check_rolespec_name(const Node *node, const char *detail_msg); +extern void check_rolespec_name(const Node *node, const char *detail_msg); extern HeapTuple get_rolespec_tuple(const Node *node); extern char *get_rolespec_name(const Node *node); diff --git a/src/include/utils/elog.h b/src/include/utils/elog.h index c43e5b8943..f4ff03ec8a 100644 --- a/src/include/utils/elog.h +++ b/src/include/utils/elog.h @@ -256,7 +256,7 @@ extern PGDLLIMPORT ErrorContextCallback *error_context_stack; * PG_END_TRY(); * * (The braces are not actually necessary, but are recommended so that - * pgindent will indent the construct nicely.) The error recovery code + * pgindent will indent the construct nicely.) The error recovery code * can optionally do PG_RE_THROW() to propagate the same error outwards. * * Note: while the system will correctly propagate any new ereport(ERROR) diff --git a/src/include/utils/geo_decls.h b/src/include/utils/geo_decls.h index acf320207c..fe9bc60782 100644 --- a/src/include/utils/geo_decls.h +++ b/src/include/utils/geo_decls.h @@ -427,11 +427,11 @@ extern Datum gist_point_distance(PG_FUNCTION_ARGS); extern Datum gist_point_fetch(PG_FUNCTION_ARGS); /* utils/adt/geo_spgist.c */ -Datum spg_box_quad_config(PG_FUNCTION_ARGS); -Datum spg_box_quad_choose(PG_FUNCTION_ARGS); -Datum spg_box_quad_picksplit(PG_FUNCTION_ARGS); -Datum spg_box_quad_inner_consistent(PG_FUNCTION_ARGS); -Datum spg_box_quad_leaf_consistent(PG_FUNCTION_ARGS); +Datum spg_box_quad_config(PG_FUNCTION_ARGS); +Datum spg_box_quad_choose(PG_FUNCTION_ARGS); +Datum spg_box_quad_picksplit(PG_FUNCTION_ARGS); +Datum spg_box_quad_inner_consistent(PG_FUNCTION_ARGS); +Datum spg_box_quad_leaf_consistent(PG_FUNCTION_ARGS); /* geo_selfuncs.c */ extern Datum areasel(PG_FUNCTION_ARGS); diff --git a/src/include/utils/jsonapi.h b/src/include/utils/jsonapi.h index 46ab8dd11d..b041fa4ed3 100644 --- a/src/include/utils/jsonapi.h +++ b/src/include/utils/jsonapi.h @@ -108,7 +108,7 @@ extern void pg_parse_json(JsonLexContext *lex, JsonSemAction *sem); * number of elements in passed array lex context. It should be called from an * array_start action. */ -extern int json_count_array_elements(JsonLexContext *lex); +extern int json_count_array_elements(JsonLexContext *lex); /* * constructors for JsonLexContext, with or without strval element. diff --git a/src/include/utils/jsonb.h b/src/include/utils/jsonb.h index 5d8e4a9f88..fa52afcb5c 100644 --- a/src/include/utils/jsonb.h +++ b/src/include/utils/jsonb.h @@ -244,7 +244,7 @@ struct JsonbValue union { Numeric numeric; - bool boolean; + bool boolean; struct { int len; diff --git a/src/include/utils/plancache.h b/src/include/utils/plancache.h index 7a98c5fa97..251f2d7186 100644 --- a/src/include/utils/plancache.h +++ b/src/include/utils/plancache.h @@ -135,7 +135,7 @@ typedef struct CachedPlan * changes from this value */ int generation; /* parent's generation number for this plan */ int refcount; /* count of live references to this struct */ - bool has_foreign_join; /* plan has pushed down a foreign join */ + bool has_foreign_join; /* plan has pushed down a foreign join */ MemoryContext context; /* context containing this CachedPlan */ } CachedPlan; diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h index 3b270b0fb6..fd858fd8c7 100644 --- a/src/include/utils/rel.h +++ b/src/include/utils/rel.h @@ -204,7 +204,7 @@ typedef struct StdRdOptions AutoVacOpts autovacuum; /* autovacuum-related options */ bool user_catalog_table; /* use as an additional catalog * relation */ - int parallel_workers; /* max number of parallel workers */ + int parallel_workers; /* max number of parallel workers */ } StdRdOptions; #define HEAP_MIN_FILLFACTOR 10 diff --git a/src/include/utils/snapmgr.h b/src/include/utils/snapmgr.h index 42706966f1..7a587fa18e 100644 --- a/src/include/utils/snapmgr.h +++ b/src/include/utils/snapmgr.h @@ -78,7 +78,7 @@ extern bool XactHasExportedSnapshots(void); extern void DeleteAllExportedSnapshotFiles(void); extern bool ThereAreNoPriorRegisteredSnapshots(void); extern TransactionId TransactionIdLimitedForOldSnapshots(TransactionId recentXmin, - Relation relation); + Relation relation); extern void MaintainOldSnapshotTimeMapping(int64 whenTaken, TransactionId xmin); extern char *ExportSnapshot(Snapshot snapshot); diff --git a/src/interfaces/ecpg/compatlib/informix.c b/src/interfaces/ecpg/compatlib/informix.c index 9f7776ee91..e50aa5ec65 100644 --- a/src/interfaces/ecpg/compatlib/informix.c +++ b/src/interfaces/ecpg/compatlib/informix.c @@ -666,7 +666,7 @@ dttofmtasc(timestamp * ts, char *output, int str_len, char *fmtstr) int intoasc(interval * i, char *str) { - char *tmp; + char *tmp; errno = 0; tmp = PGTYPESinterval_to_asc(i); @@ -1032,6 +1032,7 @@ void ECPG_informix_reset_sqlca(void) { struct sqlca_t *sqlca = ECPGget_sqlca(); + if (sqlca == NULL) return; diff --git a/src/interfaces/ecpg/ecpglib/extern.h b/src/interfaces/ecpg/ecpglib/extern.h index 263e0014fd..c3082beb4a 100644 --- a/src/interfaces/ecpg/ecpglib/extern.h +++ b/src/interfaces/ecpg/ecpglib/extern.h @@ -44,7 +44,7 @@ struct ECPGtype_information_cache { struct ECPGtype_information_cache *next; int oid; - enum ARRAY_TYPE isarray; + enum ARRAY_TYPE isarray; }; /* structure to store one statement */ diff --git a/src/interfaces/ecpg/preproc/descriptor.c b/src/interfaces/ecpg/preproc/descriptor.c index ebd95d3c4b..9b4eb630a9 100644 --- a/src/interfaces/ecpg/preproc/descriptor.c +++ b/src/interfaces/ecpg/preproc/descriptor.c @@ -175,7 +175,7 @@ output_get_descr(char *desc_name, char *index) for (results = assignments; results != NULL; results = results->next) { const struct variable *v = find_variable(results->variable); - char *str_zero = mm_strdup("0"); + char *str_zero = mm_strdup("0"); switch (results->value) { @@ -295,7 +295,8 @@ output_set_descr(char *desc_name, char *index) case ECPGd_length: case ECPGd_type: { - char *str_zero = mm_strdup("0"); + char *str_zero = mm_strdup("0"); + fprintf(yyout, "%s,", get_dtype(results->value)); ECPGdump_a_type(yyout, v->name, v->type, v->brace_level, NULL, NULL, -1, NULL, NULL, str_zero, NULL, NULL); free(str_zero); diff --git a/src/interfaces/ecpg/preproc/output.c b/src/interfaces/ecpg/preproc/output.c index cf2ed58d0a..c1ba55d517 100644 --- a/src/interfaces/ecpg/preproc/output.c +++ b/src/interfaces/ecpg/preproc/output.c @@ -197,9 +197,9 @@ output_escaped_str(char *str, bool quoted) int i = 0; int len = strlen(str); - if (quoted && str[0] == '"' && str[len - 1] == '"') /* do not escape quotes - * at beginning and end - * if quoted string */ + if (quoted && str[0] == '"' && str[len - 1] == '"') /* do not escape quotes + * at beginning and end + * if quoted string */ { i = 1; len--; diff --git a/src/interfaces/libpq/fe-secure-openssl.c b/src/interfaces/libpq/fe-secure-openssl.c index 9392506dbd..f6ce1c7a13 100644 --- a/src/interfaces/libpq/fe-secure-openssl.c +++ b/src/interfaces/libpq/fe-secure-openssl.c @@ -212,14 +212,15 @@ pgtls_read(PGconn *conn, void *ptr, size_t len) unsigned long ecode; rloop: + /* * Prepare to call SSL_get_error() by clearing thread's OpenSSL error * queue. In general, the current thread's error queue must be empty - * before the TLS/SSL I/O operation is attempted, or SSL_get_error() - * will not work reliably. Since the possibility exists that other - * OpenSSL clients running in the same thread but not under our control - * will fail to call ERR_get_error() themselves (after their own I/O - * operations), pro-actively clear the per-thread error queue now. + * before the TLS/SSL I/O operation is attempted, or SSL_get_error() will + * not work reliably. Since the possibility exists that other OpenSSL + * clients running in the same thread but not under our control will fail + * to call ERR_get_error() themselves (after their own I/O operations), + * pro-actively clear the per-thread error queue now. */ SOCK_ERRNO_SET(0); ERR_clear_error(); @@ -228,11 +229,11 @@ rloop: /* * Other clients of OpenSSL may fail to call ERR_get_error(), but we - * always do, so as to not cause problems for OpenSSL clients that - * don't call ERR_clear_error() defensively. Be sure that this - * happens by calling now. SSL_get_error() relies on the OpenSSL - * per-thread error queue being intact, so this is the earliest - * possible point ERR_get_error() may be called. + * always do, so as to not cause problems for OpenSSL clients that don't + * call ERR_clear_error() defensively. Be sure that this happens by + * calling now. SSL_get_error() relies on the OpenSSL per-thread error + * queue being intact, so this is the earliest possible point + * ERR_get_error() may be called. */ ecode = (err != SSL_ERROR_NONE || n < 0) ? ERR_get_error() : 0; switch (err) diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c index 2aeab96f33..b628c2811b 100644 --- a/src/pl/plpgsql/src/pl_comp.c +++ b/src/pl/plpgsql/src/pl_comp.c @@ -1326,10 +1326,10 @@ make_datum_param(PLpgSQL_expr *expr, int dno, int location) param->paramkind = PARAM_EXTERN; param->paramid = dno + 1; plpgsql_exec_get_datum_type_info(estate, - datum, - ¶m->paramtype, - ¶m->paramtypmod, - ¶m->paramcollid); + datum, + ¶m->paramtype, + ¶m->paramtypmod, + ¶m->paramcollid); param->location = location; return (Node *) param; diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c index b63ecacdec..586ff1f329 100644 --- a/src/pl/plpgsql/src/pl_exec.c +++ b/src/pl/plpgsql/src/pl_exec.c @@ -2887,7 +2887,7 @@ exec_stmt_return_query(PLpgSQL_execstate *estate, while (true) { - uint64 i; + uint64 i; SPI_cursor_fetch(portal, true, 50); if (SPI_processed == 0) @@ -4767,7 +4767,7 @@ exec_eval_datum(PLpgSQL_execstate *estate, */ Oid plpgsql_exec_get_datum_type(PLpgSQL_execstate *estate, - PLpgSQL_datum *datum) + PLpgSQL_datum *datum) { Oid typeid; @@ -4849,8 +4849,8 @@ plpgsql_exec_get_datum_type(PLpgSQL_execstate *estate, */ void plpgsql_exec_get_datum_type_info(PLpgSQL_execstate *estate, - PLpgSQL_datum *datum, - Oid *typeid, int32 *typmod, Oid *collation) + PLpgSQL_datum *datum, + Oid *typeid, int32 *typmod, Oid *collation) { switch (datum->dtype) { @@ -5089,7 +5089,7 @@ exec_run_select(PLpgSQL_execstate *estate, */ if (expr->plan == NULL) exec_prepare_plan(estate, expr, parallelOK ? - CURSOR_OPT_PARALLEL_OK : 0); + CURSOR_OPT_PARALLEL_OK : 0); /* * If a portal was requested, put the query into the portal diff --git a/src/pl/plpgsql/src/plpgsql.h b/src/pl/plpgsql/src/plpgsql.h index 2deece43eb..140bf4badd 100644 --- a/src/pl/plpgsql/src/plpgsql.h +++ b/src/pl/plpgsql/src/plpgsql.h @@ -1000,10 +1000,10 @@ extern void plpgsql_xact_cb(XactEvent event, void *arg); extern void plpgsql_subxact_cb(SubXactEvent event, SubTransactionId mySubid, SubTransactionId parentSubid, void *arg); extern Oid plpgsql_exec_get_datum_type(PLpgSQL_execstate *estate, - PLpgSQL_datum *datum); + PLpgSQL_datum *datum); extern void plpgsql_exec_get_datum_type_info(PLpgSQL_execstate *estate, - PLpgSQL_datum *datum, - Oid *typeid, int32 *typmod, Oid *collation); + PLpgSQL_datum *datum, + Oid *typeid, int32 *typmod, Oid *collation); /* ---------- * Functions for namespace handling in pl_funcs.c diff --git a/src/pl/plpython/plpy_elog.c b/src/pl/plpython/plpy_elog.c index 7495e7d51d..d61493fac8 100644 --- a/src/pl/plpython/plpy_elog.c +++ b/src/pl/plpython/plpy_elog.c @@ -24,12 +24,12 @@ PyObject *PLy_exc_spi_error = NULL; static void PLy_traceback(PyObject *e, PyObject *v, PyObject *tb, char **xmsg, char **tbmsg, int *tb_depth); static void PLy_get_spi_error_data(PyObject *exc, int *sqlerrcode, char **detail, - char **hint, char **query, int *position, - char **schema_name, char **table_name, char **column_name, - char **datatype_name, char **constraint_name); + char **hint, char **query, int *position, + char **schema_name, char **table_name, char **column_name, + char **datatype_name, char **constraint_name); static void PLy_get_error_data(PyObject *exc, int *sqlerrcode, char **detail, - char **hint, char **schema_name, char **table_name, char **column_name, - char **datatype_name, char **constraint_name); + char **hint, char **schema_name, char **table_name, char **column_name, + char **datatype_name, char **constraint_name); static char *get_source_line(const char *src, int lineno); static void get_string_attr(PyObject *obj, char *attrname, char **str); @@ -136,8 +136,8 @@ PLy_elog(int elevel, const char *fmt,...) column_name) : 0, (datatype_name) ? err_generic_string(PG_DIAG_DATATYPE_NAME, datatype_name) : 0, - (constraint_name) ? err_generic_string(PG_DIAG_CONSTRAINT_NAME, - constraint_name) : 0)); + (constraint_name) ? err_generic_string(PG_DIAG_CONSTRAINT_NAME, + constraint_name) : 0)); } PG_CATCH(); { @@ -544,7 +544,7 @@ PLy_exception_set_with_details(PyObject *excclass, ErrorData *edata) goto failure; if (!set_string_attr(error, "sqlstate", - unpack_sql_state(edata->sqlerrcode))) + unpack_sql_state(edata->sqlerrcode))) goto failure; if (!set_string_attr(error, "detail", edata->detail)) @@ -589,7 +589,7 @@ failure: static void get_string_attr(PyObject *obj, char *attrname, char **str) { - PyObject *val; + PyObject *val; val = PyObject_GetAttrString(obj, attrname); if (val != NULL && val != Py_None) @@ -605,8 +605,8 @@ get_string_attr(PyObject *obj, char *attrname, char **str) static bool set_string_attr(PyObject *obj, char *attrname, char *str) { - int result; - PyObject *val; + int result; + PyObject *val; if (str != NULL) { diff --git a/src/pl/plpython/plpy_plpymodule.c b/src/pl/plpython/plpy_plpymodule.c index e3322d12ba..9725fce932 100644 --- a/src/pl/plpython/plpy_plpymodule.c +++ b/src/pl/plpython/plpy_plpymodule.c @@ -57,13 +57,13 @@ static PyMethodDef PLy_methods[] = { /* * logging methods */ - {"debug", (PyCFunction) PLy_debug, METH_VARARGS|METH_KEYWORDS, NULL}, - {"log", (PyCFunction) PLy_log, METH_VARARGS|METH_KEYWORDS, NULL}, - {"info", (PyCFunction) PLy_info, METH_VARARGS|METH_KEYWORDS, NULL}, - {"notice", (PyCFunction) PLy_notice, METH_VARARGS|METH_KEYWORDS, NULL}, - {"warning", (PyCFunction) PLy_warning, METH_VARARGS|METH_KEYWORDS, NULL}, - {"error", (PyCFunction) PLy_error, METH_VARARGS|METH_KEYWORDS, NULL}, - {"fatal", (PyCFunction) PLy_fatal, METH_VARARGS|METH_KEYWORDS, NULL}, + {"debug", (PyCFunction) PLy_debug, METH_VARARGS | METH_KEYWORDS, NULL}, + {"log", (PyCFunction) PLy_log, METH_VARARGS | METH_KEYWORDS, NULL}, + {"info", (PyCFunction) PLy_info, METH_VARARGS | METH_KEYWORDS, NULL}, + {"notice", (PyCFunction) PLy_notice, METH_VARARGS | METH_KEYWORDS, NULL}, + {"warning", (PyCFunction) PLy_warning, METH_VARARGS | METH_KEYWORDS, NULL}, + {"error", (PyCFunction) PLy_error, METH_VARARGS | METH_KEYWORDS, NULL}, + {"fatal", (PyCFunction) PLy_fatal, METH_VARARGS | METH_KEYWORDS, NULL}, /* * create a stored plan @@ -272,7 +272,7 @@ PLy_generate_spi_exceptions(PyObject *mod, PyObject *base) * don't confuse these with PLy_elog */ static PyObject *PLy_output(volatile int level, PyObject *self, - PyObject *args, PyObject *kw); + PyObject *args, PyObject *kw); static PyObject * PLy_debug(PyObject *self, PyObject *args, PyObject *kw) @@ -375,11 +375,11 @@ object_to_string(PyObject *obj) { if (obj) { - PyObject *so = PyObject_Str(obj); + PyObject *so = PyObject_Str(obj); if (so != NULL) { - char *str; + char *str; str = pstrdup(PyString_AsString(so)); Py_DECREF(so); @@ -394,20 +394,21 @@ object_to_string(PyObject *obj) static PyObject * PLy_output(volatile int level, PyObject *self, PyObject *args, PyObject *kw) { - int sqlstate = 0; - char *volatile sqlstatestr = NULL; - char *volatile message = NULL; - char *volatile detail = NULL; - char *volatile hint = NULL; - char *volatile column = NULL; - char *volatile constraint = NULL; - char *volatile datatype = NULL; - char *volatile table = NULL; - char *volatile schema = NULL; + int sqlstate = 0; + char *volatile sqlstatestr = NULL; + char *volatile message = NULL; + char *volatile detail = NULL; + char *volatile hint = NULL; + char *volatile column = NULL; + char *volatile constraint = NULL; + char *volatile datatype = NULL; + char *volatile table = NULL; + char *volatile schema = NULL; volatile MemoryContext oldcontext; - PyObject *key, *value; - PyObject *volatile so; - Py_ssize_t pos = 0; + PyObject *key, + *value; + PyObject *volatile so; + Py_ssize_t pos = 0; if (PyTuple_Size(args) == 1) { @@ -437,7 +438,7 @@ PLy_output(volatile int level, PyObject *self, PyObject *args, PyObject *kw) { while (PyDict_Next(kw, &pos, &key, &value)) { - char *keyword = PyString_AsString(key); + char *keyword = PyString_AsString(key); if (strcmp(keyword, "message") == 0) { @@ -465,9 +466,9 @@ PLy_output(volatile int level, PyObject *self, PyObject *args, PyObject *kw) datatype = object_to_string(value); else if (strcmp(keyword, "constraint") == 0) constraint = object_to_string(value); - else - PLy_elog(ERROR, "'%s' is an invalid keyword argument for this function", - keyword); + else + PLy_elog(ERROR, "'%s' is an invalid keyword argument for this function", + keyword); } } @@ -480,10 +481,10 @@ PLy_output(volatile int level, PyObject *self, PyObject *args, PyObject *kw) PLy_elog(ERROR, "invalid SQLSTATE code"); sqlstate = MAKE_SQLSTATE(sqlstatestr[0], - sqlstatestr[1], - sqlstatestr[2], - sqlstatestr[3], - sqlstatestr[4]); + sqlstatestr[1], + sqlstatestr[2], + sqlstatestr[3], + sqlstatestr[4]); } oldcontext = CurrentMemoryContext; @@ -524,7 +525,7 @@ PLy_output(volatile int level, PyObject *self, PyObject *args, PyObject *kw) } PG_CATCH(); { - ErrorData *edata; + ErrorData *edata; MemoryContextSwitchTo(oldcontext); edata = CopyErrorData(); diff --git a/src/pl/plpython/plpy_spi.c b/src/pl/plpython/plpy_spi.c index f50d6545e0..1e965cf85f 100644 --- a/src/pl/plpython/plpy_spi.c +++ b/src/pl/plpython/plpy_spi.c @@ -554,7 +554,9 @@ PLy_spi_subtransaction_abort(MemoryContext oldcontext, ResourceOwner oldowner) /* Look up the correct exception */ entry = hash_search(PLy_spi_exceptions, &(edata->sqlerrcode), HASH_FIND, NULL); - /* This could be a custom error code, if that's the case fallback to + + /* + * This could be a custom error code, if that's the case fallback to * SPIError */ exc = entry ? entry->exc : PLy_exc_spi_error; @@ -583,9 +585,9 @@ PLy_spi_exception_set(PyObject *excclass, ErrorData *edata) if (!spierror) goto failure; - spidata= Py_BuildValue("(izzzizzzzz)", edata->sqlerrcode, edata->detail, edata->hint, + spidata = Py_BuildValue("(izzzizzzzz)", edata->sqlerrcode, edata->detail, edata->hint, edata->internalquery, edata->internalpos, - edata->schema_name, edata->table_name, edata->column_name, + edata->schema_name, edata->table_name, edata->column_name, edata->datatype_name, edata->constraint_name); if (!spidata) goto failure; diff --git a/src/port/chklocale.c b/src/port/chklocale.c index 7a7d88a6cf..3c0ef6a253 100644 --- a/src/port/chklocale.c +++ b/src/port/chklocale.c @@ -239,8 +239,8 @@ win32_langinfo(const char *ctype) MultiByteToWideChar(CP_ACP, 0, ctype, -1, wctype, LOCALE_NAME_MAX_LENGTH); if (GetLocaleInfoEx(wctype, - LOCALE_IDEFAULTANSICODEPAGE | LOCALE_RETURN_NUMBER, - (LPWSTR) &cp, sizeof(cp) / sizeof(WCHAR)) > 0) + LOCALE_IDEFAULTANSICODEPAGE | LOCALE_RETURN_NUMBER, + (LPWSTR) &cp, sizeof(cp) / sizeof(WCHAR)) > 0) { r = malloc(16); /* excess */ if (r != NULL) @@ -249,7 +249,6 @@ win32_langinfo(const char *ctype) else #endif { - /* * Locale format on Win32 is _. . For * example, English_United States.1252. diff --git a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c index afbb7e2217..739daa6b90 100644 --- a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c +++ b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c @@ -1,6 +1,6 @@ /*---------------------------------------------------------------------- * test_ddl_deparse.c - * Support functions for the test_ddl_deparse module + * Support functions for the test_ddl_deparse module * * Copyright (c) 2014-2016, PostgreSQL Global Development Group *