mirror of https://github.com/postgres/postgres
Fix typos in comments.
Backpatch to all supported versions, where applicable, to make backpatching of future fixes go more smoothly. Josh Soref Discussion: https://www.postgresql.org/message-id/CACZqfqCf+5qRztLPgmmosr-B0Ye4srWzzw_mo4c_8_B_mtjmJQ@mail.gmail.com
This commit is contained in:
parent
9863017b87
commit
181bdb90ba
|
@ -7088,7 +7088,7 @@ test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
|
|||
test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
|
||||
|
||||
# When Autoconf chooses install-sh as install program it tries to generate
|
||||
# a relative path to it in each makefile where it subsitutes it. This clashes
|
||||
# a relative path to it in each makefile where it substitutes it. This clashes
|
||||
# with our Makefile.global concept. This workaround helps.
|
||||
case $INSTALL in
|
||||
*install-sh*) install_bin='';;
|
||||
|
@ -7232,7 +7232,7 @@ fi
|
|||
$as_echo "$MKDIR_P" >&6; }
|
||||
|
||||
# When Autoconf chooses install-sh as mkdir -p program it tries to generate
|
||||
# a relative path to it in each makefile where it subsitutes it. This clashes
|
||||
# a relative path to it in each makefile where it substitutes it. This clashes
|
||||
# with our Makefile.global concept. This workaround helps.
|
||||
case $MKDIR_P in
|
||||
*install-sh*) MKDIR_P='\${SHELL} \${top_srcdir}/config/install-sh -c -d';;
|
||||
|
|
|
@ -887,7 +887,7 @@ fi
|
|||
|
||||
AC_PROG_INSTALL
|
||||
# When Autoconf chooses install-sh as install program it tries to generate
|
||||
# a relative path to it in each makefile where it subsitutes it. This clashes
|
||||
# a relative path to it in each makefile where it substitutes it. This clashes
|
||||
# with our Makefile.global concept. This workaround helps.
|
||||
case $INSTALL in
|
||||
*install-sh*) install_bin='';;
|
||||
|
@ -900,7 +900,7 @@ AC_PROG_LN_S
|
|||
AC_PROG_AWK
|
||||
AC_PROG_MKDIR_P
|
||||
# When Autoconf chooses install-sh as mkdir -p program it tries to generate
|
||||
# a relative path to it in each makefile where it subsitutes it. This clashes
|
||||
# a relative path to it in each makefile where it substitutes it. This clashes
|
||||
# with our Makefile.global concept. This workaround helps.
|
||||
case $MKDIR_P in
|
||||
*install-sh*) MKDIR_P='\${SHELL} \${top_srcdir}/config/install-sh -c -d';;
|
||||
|
|
|
@ -51,7 +51,7 @@ blbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
|
|||
initBloomState(&state, index);
|
||||
|
||||
/*
|
||||
* Interate over the pages. We don't care about concurrently added pages,
|
||||
* Iterate over the pages. We don't care about concurrently added pages,
|
||||
* they can't contain tuples to delete.
|
||||
*/
|
||||
npages = RelationGetNumberOfBlocks(index);
|
||||
|
|
|
@ -1056,7 +1056,7 @@ SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
|
|||
5
|
||||
(1 row)
|
||||
|
||||
-- Test of cube_ll_coord function (retrieves LL coodinate values)
|
||||
-- Test of cube_ll_coord function (retrieves LL coordinate values)
|
||||
--
|
||||
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
|
||||
cube_ll_coord
|
||||
|
@ -1112,7 +1112,7 @@ SELECT cube_ll_coord('(42,137)'::cube, 3);
|
|||
0
|
||||
(1 row)
|
||||
|
||||
-- Test of cube_ur_coord function (retrieves UR coodinate values)
|
||||
-- Test of cube_ur_coord function (retrieves UR coordinate values)
|
||||
--
|
||||
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
|
||||
cube_ur_coord
|
||||
|
|
|
@ -1056,7 +1056,7 @@ SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
|
|||
5
|
||||
(1 row)
|
||||
|
||||
-- Test of cube_ll_coord function (retrieves LL coodinate values)
|
||||
-- Test of cube_ll_coord function (retrieves LL coordinate values)
|
||||
--
|
||||
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
|
||||
cube_ll_coord
|
||||
|
@ -1112,7 +1112,7 @@ SELECT cube_ll_coord('(42,137)'::cube, 3);
|
|||
0
|
||||
(1 row)
|
||||
|
||||
-- Test of cube_ur_coord function (retrieves UR coodinate values)
|
||||
-- Test of cube_ur_coord function (retrieves UR coordinate values)
|
||||
--
|
||||
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
|
||||
cube_ur_coord
|
||||
|
|
|
@ -256,7 +256,7 @@ SELECT cube_dim('(0,0,0)'::cube);
|
|||
SELECT cube_dim('(42,42,42),(42,42,42)'::cube);
|
||||
SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
|
||||
|
||||
-- Test of cube_ll_coord function (retrieves LL coodinate values)
|
||||
-- Test of cube_ll_coord function (retrieves LL coordinate values)
|
||||
--
|
||||
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
|
||||
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 2);
|
||||
|
@ -268,7 +268,7 @@ SELECT cube_ll_coord('(42,137)'::cube, 1);
|
|||
SELECT cube_ll_coord('(42,137)'::cube, 2);
|
||||
SELECT cube_ll_coord('(42,137)'::cube, 3);
|
||||
|
||||
-- Test of cube_ur_coord function (retrieves UR coodinate values)
|
||||
-- Test of cube_ur_coord function (retrieves UR coordinate values)
|
||||
--
|
||||
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
|
||||
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 2);
|
||||
|
|
|
@ -11,7 +11,7 @@ CREATE FUNCTION earth() RETURNS float8
|
|||
LANGUAGE SQL IMMUTABLE PARALLEL SAFE
|
||||
AS 'SELECT ''6378168''::float8';
|
||||
|
||||
-- Astromers may want to change the earth function so that distances will be
|
||||
-- Astronomers may want to change the earth function so that distances will be
|
||||
-- returned in degrees. To do this comment out the above definition and
|
||||
-- uncomment the one below. Note that doing this will break the regression
|
||||
-- tests.
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
* Product 9 + 21 + 7 + 3 + 1 + 12 + 4 + 24 + 7 + 15 + 0 + 0 = 103
|
||||
* 103 / 10 = 10 remainder 3
|
||||
* Check digit 10 - 3 = 7
|
||||
* => 977-1144875-00-7 ?? <- suplemental number (number of the week, month, etc.)
|
||||
* => 977-1144875-00-7 ?? <- supplemental number (number of the week, month, etc.)
|
||||
* ^^ 00 for non-daily publications (01=Monday, 02=Tuesday, ...)
|
||||
*
|
||||
* The hyphenation is always in after the four digits of the ISSN code.
|
||||
|
|
|
@ -160,7 +160,7 @@ dehyphenate(char *bufO, char *bufI)
|
|||
* into bufO using the given hyphenation range TABLE.
|
||||
* Assumes the input string to be used is of only digits.
|
||||
*
|
||||
* Returns the number of characters acctually hyphenated.
|
||||
* Returns the number of characters actually hyphenated.
|
||||
*/
|
||||
static unsigned
|
||||
hyphenate(char *bufO, char *bufI, const char *(*TABLE)[2], const unsigned TABLE_index[10][2])
|
||||
|
@ -748,7 +748,7 @@ string2ean(const char *str, bool errorOK, ean13 *result,
|
|||
}
|
||||
else if (*aux2 == '!' && *(aux2 + 1) == '\0')
|
||||
{
|
||||
/* the invalid check digit sufix was found, set it */
|
||||
/* the invalid check digit suffix was found, set it */
|
||||
if (!magic)
|
||||
valid = false;
|
||||
magic = true;
|
||||
|
|
|
@ -1113,7 +1113,7 @@ SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e,*.df}';
|
|||
t
|
||||
(1 row)
|
||||
|
||||
--exractors
|
||||
--extractors
|
||||
SELECT ('{3456,1.2.3.34}'::ltree[] ?@> '1.2.3.4') is null;
|
||||
?column?
|
||||
----------
|
||||
|
|
|
@ -197,7 +197,7 @@ pushval_asis(QPRS_STATE *state, int type, char *strval, int lenval, uint16 flag)
|
|||
|
||||
#define STACKDEPTH 32
|
||||
/*
|
||||
* make polish notaion of query
|
||||
* make polish notation of query
|
||||
*/
|
||||
static int32
|
||||
makepol(QPRS_STATE *state)
|
||||
|
|
|
@ -209,7 +209,7 @@ SELECT 'a.b.c.d.e'::ltree ? '{A.b.c.d.e, a.*}';
|
|||
SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e}';
|
||||
SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e,*.df}';
|
||||
|
||||
--exractors
|
||||
--extractors
|
||||
SELECT ('{3456,1.2.3.34}'::ltree[] ?@> '1.2.3.4') is null;
|
||||
SELECT '{3456,1.2.3}'::ltree[] ?@> '1.2.3.4';
|
||||
SELECT '{3456,1.2.3.4}'::ltree[] ?<@ '1.2.3';
|
||||
|
|
|
@ -779,7 +779,7 @@ main(int argc, char **argv)
|
|||
{
|
||||
/*
|
||||
* Once we have restored this file successfully we can remove some
|
||||
* prior WAL files. If this restore fails we musn't remove any
|
||||
* prior WAL files. If this restore fails we mustn't remove any
|
||||
* file because some of them will be requested again immediately
|
||||
* after the failed restore, or when we restart recovery.
|
||||
*/
|
||||
|
|
|
@ -139,7 +139,7 @@ typedef struct Counters
|
|||
{
|
||||
int64 calls; /* # of times executed */
|
||||
double total_time; /* total execution time, in msec */
|
||||
double min_time; /* minimim execution time in msec */
|
||||
double min_time; /* minimum execution time in msec */
|
||||
double max_time; /* maximum execution time in msec */
|
||||
double mean_time; /* mean execution time in msec */
|
||||
double sum_var_time; /* sum of variances in execution time in msec */
|
||||
|
|
|
@ -413,7 +413,7 @@ comp_ptrgm(const void *v1, const void *v2)
|
|||
* ulen1: count of unique trigrams of array "trg1".
|
||||
* len2: length of array "trg2" and array "trg2indexes".
|
||||
* len: length of the array "found".
|
||||
* check_only: if true then only check existaince of similar search pattern in
|
||||
* check_only: if true then only check existence of similar search pattern in
|
||||
* text.
|
||||
*
|
||||
* Returns word similarity.
|
||||
|
@ -456,7 +456,7 @@ iterate_word_similarity(int *trg2indexes,
|
|||
lastpos[trgindex] = i;
|
||||
}
|
||||
|
||||
/* Adjust lower bound if this trigram is present in required substing */
|
||||
/* Adjust lower bound if this trigram is present in required substring */
|
||||
if (found[trgindex])
|
||||
{
|
||||
int prev_lower,
|
||||
|
@ -547,7 +547,7 @@ iterate_word_similarity(int *trg2indexes,
|
|||
*
|
||||
* str1: search pattern string, of length slen1 bytes.
|
||||
* str2: text in which we are looking for a word, of length slen2 bytes.
|
||||
* check_only: if true then only check existaince of similar search pattern in
|
||||
* check_only: if true then only check existence of similar search pattern in
|
||||
* text.
|
||||
*
|
||||
* Returns word similarity.
|
||||
|
|
|
@ -311,7 +311,7 @@ pullf_read_max(PullFilter *pf, int len, uint8 **data_p, uint8 *tmpbuf)
|
|||
}
|
||||
|
||||
/*
|
||||
* caller wants exatly len bytes and dont bother with references
|
||||
* caller wants exactly len bytes and don't bother with references
|
||||
*/
|
||||
int
|
||||
pullf_read_fixed(PullFilter *src, int len, uint8 *dst)
|
||||
|
|
|
@ -141,7 +141,7 @@ bn_to_mpi(mpz_t *bn)
|
|||
}
|
||||
|
||||
/*
|
||||
* Decide the number of bits in the random componont k
|
||||
* Decide the number of bits in the random component k
|
||||
*
|
||||
* It should be in the same range as p for signing (which
|
||||
* is deprecated), but can be much smaller for encrypting.
|
||||
|
@ -149,8 +149,8 @@ bn_to_mpi(mpz_t *bn)
|
|||
* Until I research it further, I just mimic gpg behaviour.
|
||||
* It has a special mapping table, for values <= 5120,
|
||||
* above that it uses 'arbitrary high number'. Following
|
||||
* algorihm hovers 10-70 bits above gpg values. And for
|
||||
* larger p, it uses gpg's algorihm.
|
||||
* algorithm hovers 10-70 bits above gpg values. And for
|
||||
* larger p, it uses gpg's algorithm.
|
||||
*
|
||||
* The point is - if k gets large, encryption will be
|
||||
* really slow. It does not matter for decryption.
|
||||
|
|
|
@ -74,7 +74,7 @@ bn_to_mpi(BIGNUM *bn)
|
|||
}
|
||||
|
||||
/*
|
||||
* Decide the number of bits in the random componont k
|
||||
* Decide the number of bits in the random component k
|
||||
*
|
||||
* It should be in the same range as p for signing (which
|
||||
* is deprecated), but can be much smaller for encrypting.
|
||||
|
@ -82,8 +82,8 @@ bn_to_mpi(BIGNUM *bn)
|
|||
* Until I research it further, I just mimic gpg behaviour.
|
||||
* It has a special mapping table, for values <= 5120,
|
||||
* above that it uses 'arbitrary high number'. Following
|
||||
* algorihm hovers 10-70 bits above gpg values. And for
|
||||
* larger p, it uses gpg's algorihm.
|
||||
* algorithm hovers 10-70 bits above gpg values. And for
|
||||
* larger p, it uses gpg's algorithm.
|
||||
*
|
||||
* The point is - if k gets large, encryption will be
|
||||
* really slow. It does not matter for decryption.
|
||||
|
|
|
@ -2057,7 +2057,7 @@ SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM
|
|||
1
|
||||
(10 rows)
|
||||
|
||||
-- non-Var items in targelist of the nullable rel of a join preventing
|
||||
-- non-Var items in targetlist of the nullable rel of a join preventing
|
||||
-- push-down in some cases
|
||||
-- unable to push {ft1, ft2}
|
||||
EXPLAIN (VERBOSE, COSTS OFF)
|
||||
|
|
|
@ -493,7 +493,7 @@ EXPLAIN (VERBOSE, COSTS OFF)
|
|||
SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10;
|
||||
SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10;
|
||||
|
||||
-- non-Var items in targelist of the nullable rel of a join preventing
|
||||
-- non-Var items in targetlist of the nullable rel of a join preventing
|
||||
-- push-down in some cases
|
||||
-- unable to push {ft1, ft2}
|
||||
EXPLAIN (VERBOSE, COSTS OFF)
|
||||
|
|
|
@ -888,7 +888,7 @@ restore(char *result, float val, int n)
|
|||
if (Abs(exp) <= 4)
|
||||
{
|
||||
/*
|
||||
* remove the decimal point from the mantyssa and write the digits
|
||||
* remove the decimal point from the mantissa and write the digits
|
||||
* to the buf array
|
||||
*/
|
||||
for (p = result + sign, i = 10, dp = 0; *p != 'e'; p++, i++)
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
* When we ask SELinux whether the required privileges are allowed or not,
|
||||
* we use security_compute_av(3). It needs us to represent object classes
|
||||
* and access vectors using 'external' codes defined in the security policy.
|
||||
* It is determinded in the runtime, not build time. So, it needs an internal
|
||||
* It is determined in the runtime, not build time. So, it needs an internal
|
||||
* service to translate object class/access vectors which we want to check
|
||||
* into the code which kernel want to be given.
|
||||
*/
|
||||
|
|
|
@ -206,7 +206,7 @@ SELECT * FROM auth_tbl; -- failed
|
|||
SELECT sepgsql_setcon(NULL); -- end of session
|
||||
SELECT sepgsql_getcon();
|
||||
|
||||
-- the pooler cannot touch these tables directry
|
||||
-- the pooler cannot touch these tables directly
|
||||
SELECT * FROM foo_tbl; -- failed
|
||||
|
||||
SELECT * FROM var_tbl; -- failed
|
||||
|
|
|
@ -89,7 +89,7 @@ check_primary_key(PG_FUNCTION_ARGS)
|
|||
/* internal error */
|
||||
elog(ERROR, "check_primary_key: cannot process DELETE events");
|
||||
|
||||
/* If UPDATion the must check new Tuple, not old one */
|
||||
/* If UPDATE, then must check new Tuple, not old one */
|
||||
else
|
||||
tuple = trigdata->tg_newtuple;
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
# modified by Ray Aspeitia 12-03-2003 :
|
||||
# added log rotation script to db startup
|
||||
# modified StartupParameters.plist "Provides" parameter to make it easier to
|
||||
# start and stop with the SystemStarter utitlity
|
||||
# start and stop with the SystemStarter utility
|
||||
|
||||
# use the below command in order to correctly start/stop/restart PG with log rotation script:
|
||||
# SystemStarter [start|stop|restart] PostgreSQL
|
||||
|
|
|
@ -414,7 +414,7 @@ CREATE FUNCTION stat(text,text)
|
|||
LANGUAGE INTERNAL
|
||||
RETURNS NULL ON NULL INPUT;
|
||||
|
||||
--reset - just for debuging
|
||||
--reset - just for debugging
|
||||
CREATE FUNCTION reset_tsearch()
|
||||
RETURNS void
|
||||
as 'MODULE_PATHNAME', 'tsa_reset_tsearch'
|
||||
|
|
|
@ -610,7 +610,7 @@ xpath_table(PG_FUNCTION_ARGS)
|
|||
|
||||
/*
|
||||
* At the moment we assume that the returned attributes make sense for the
|
||||
* XPath specififed (i.e. we trust the caller). It's not fatal if they get
|
||||
* XPath specified (i.e. we trust the caller). It's not fatal if they get
|
||||
* it wrong - the input function for the column type will raise an error
|
||||
* if the path result can't be converted into the correct binary
|
||||
* representation.
|
||||
|
|
|
@ -377,7 +377,7 @@ $(shlib): $(OBJS) $(DLL_DEFFILE) | $(SHLIB_PREREQS)
|
|||
$(CC) $(CFLAGS) -shared -static-libgcc -o $@ $(OBJS) $(DLL_DEFFILE) $(LDFLAGS) $(LDFLAGS_SL) $(SHLIB_LINK) $(LIBS) -Wl,--out-implib=$(stlib)
|
||||
endif
|
||||
|
||||
endif # PORTNAME == cgywin
|
||||
endif # PORTNAME == cygwin
|
||||
endif # PORTNAME == cygwin || PORTNAME == win32
|
||||
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ The current implementation of GiST supports:
|
|||
|
||||
The support for concurrency implemented in PostgreSQL was developed based on
|
||||
the paper "Access Methods for Next-Generation Database Systems" by
|
||||
Marcel Kornaker:
|
||||
Marcel Kornacker:
|
||||
|
||||
http://www.sai.msu.su/~megera/postgres/gist/papers/concurrency/access-methods-for-next-generation.pdf.gz
|
||||
|
||||
|
|
|
@ -1077,7 +1077,7 @@ _hash_splitbucket_guts(Relation rel,
|
|||
* already moved before the split operation was previously interrupted.
|
||||
*
|
||||
* The caller must hold a pin, but no lock, on the metapage and old bucket's
|
||||
* primay page buffer. The buffers are returned in the same state. (The
|
||||
* primary page buffer. The buffers are returned in the same state. (The
|
||||
* metapage is only touched if it becomes necessary to add or remove overflow
|
||||
* pages.)
|
||||
*/
|
||||
|
|
|
@ -209,7 +209,7 @@ typedef struct RewriteMappingFile
|
|||
} RewriteMappingFile;
|
||||
|
||||
/*
|
||||
* A single In-Memeory logical rewrite mapping, hanging of
|
||||
* A single In-Memory logical rewrite mapping, hanging off
|
||||
* RewriteMappingFile->mappings.
|
||||
*/
|
||||
typedef struct RewriteMappingDataEntry
|
||||
|
|
|
@ -615,7 +615,7 @@ CommitTsParameterChange(bool newvalue, bool oldvalue)
|
|||
|
||||
/*
|
||||
* Activate this module whenever necessary.
|
||||
* This must happen during postmaster or standalong-backend startup,
|
||||
* This must happen during postmaster or standalone-backend startup,
|
||||
* or during WAL replay anytime the track_commit_timestamp setting is
|
||||
* changed in the master.
|
||||
*
|
||||
|
|
|
@ -2752,7 +2752,7 @@ CommitTransactionCommand(void)
|
|||
* These shouldn't happen. TBLOCK_DEFAULT means the previous
|
||||
* StartTransactionCommand didn't set the STARTED state
|
||||
* appropriately, while TBLOCK_PARALLEL_INPROGRESS should be ended
|
||||
* by EndParallelWorkerTranaction(), not this function.
|
||||
* by EndParallelWorkerTransaction(), not this function.
|
||||
*/
|
||||
case TBLOCK_DEFAULT:
|
||||
case TBLOCK_PARALLEL_INPROGRESS:
|
||||
|
|
|
@ -770,7 +770,7 @@ static void getRelationIdentity(StringInfo buffer, Oid relid, List **objname);
|
|||
*
|
||||
* Note: If the object is not found, we don't give any indication of the
|
||||
* reason. (It might have been a missing schema if the name was qualified, or
|
||||
* an inexistant type name in case of a cast, function or operator; etc).
|
||||
* a nonexistent type name in case of a cast, function or operator; etc).
|
||||
* Currently there is only one caller that might be interested in such info, so
|
||||
* we don't spend much effort here. If more callers start to care, it might be
|
||||
* better to add some support for that in this function.
|
||||
|
|
|
@ -34,7 +34,7 @@ static const char *get_am_type_string(char amtype);
|
|||
|
||||
|
||||
/*
|
||||
* CreateAcessMethod
|
||||
* CreateAccessMethod
|
||||
* Registers a new access method.
|
||||
*/
|
||||
ObjectAddress
|
||||
|
|
|
@ -685,7 +685,7 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
|
|||
|
||||
/*
|
||||
* Force synchronous commit, thus minimizing the window between
|
||||
* creation of the database files and commital of the transaction. If
|
||||
* creation of the database files and committal of the transaction. If
|
||||
* we crash before committing, we'll have a DB that's taking up disk
|
||||
* space but is not in pg_database, which is not good.
|
||||
*/
|
||||
|
@ -955,7 +955,7 @@ dropdb(const char *dbname, bool missing_ok)
|
|||
|
||||
/*
|
||||
* Force synchronous commit, thus minimizing the window between removal of
|
||||
* the database files and commital of the transaction. If we crash before
|
||||
* the database files and committal of the transaction. If we crash before
|
||||
* committing, we'll have a DB that's gone on disk but still there
|
||||
* according to pg_database, which is not good.
|
||||
*/
|
||||
|
@ -1309,7 +1309,7 @@ movedb(const char *dbname, const char *tblspcname)
|
|||
|
||||
/*
|
||||
* Force synchronous commit, thus minimizing the window between
|
||||
* copying the database files and commital of the transaction. If we
|
||||
* copying the database files and committal of the transaction. If we
|
||||
* crash before committing, we'll leave an orphaned set of files on
|
||||
* disk, which is not fatal but not good either.
|
||||
*/
|
||||
|
|
|
@ -3401,7 +3401,7 @@ ExplainYAMLLineStarting(ExplainState *es)
|
|||
}
|
||||
|
||||
/*
|
||||
* YAML is a superset of JSON; unfortuantely, the YAML quoting rules are
|
||||
* YAML is a superset of JSON; unfortunately, the YAML quoting rules are
|
||||
* ridiculously complicated -- as documented in sections 5.3 and 7.3.3 of
|
||||
* http://yaml.org/spec/1.2/spec.html -- so we chose to just quote everything.
|
||||
* Empty strings, strings with leading or trailing whitespace, and strings
|
||||
|
|
|
@ -1040,7 +1040,7 @@ CreateFunction(ParseState *pstate, CreateFunctionStmt *stmt)
|
|||
}
|
||||
else
|
||||
{
|
||||
/* store SQL NULL instead of emtpy array */
|
||||
/* store SQL NULL instead of empty array */
|
||||
trftypes = NULL;
|
||||
}
|
||||
|
||||
|
@ -1441,7 +1441,7 @@ CreateCast(CreateCastStmt *stmt)
|
|||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("cast will be ignored because the target data type is a domain")));
|
||||
|
||||
/* Detemine the cast method */
|
||||
/* Determine the cast method */
|
||||
if (stmt->func != NULL)
|
||||
castmethod = COERCION_METHOD_FUNCTION;
|
||||
else if (stmt->inout)
|
||||
|
|
|
@ -99,7 +99,7 @@ static void RangeVarCallbackForReindexIndex(const RangeVar *relation,
|
|||
* Errors arising from the attribute list still apply.
|
||||
*
|
||||
* Most column type changes that can skip a table rewrite do not invalidate
|
||||
* indexes. We ackowledge this when all operator classes, collations and
|
||||
* indexes. We acknowledge this when all operator classes, collations and
|
||||
* exclusion operators match. Though we could further permit intra-opfamily
|
||||
* changes for btree and hash indexes, that adds subtle complexity with no
|
||||
* concrete benefit for core types.
|
||||
|
@ -965,7 +965,7 @@ CheckMutability(Expr *expr)
|
|||
* indxpath.c could do something with. However, that seems overly
|
||||
* restrictive. One useful application of partial indexes is to apply
|
||||
* a UNIQUE constraint across a subset of a table, and in that scenario
|
||||
* any evaluatable predicate will work. So accept any predicate here
|
||||
* any evaluable predicate will work. So accept any predicate here
|
||||
* (except ones requiring a plan), and let indxpath.c fend for itself.
|
||||
*/
|
||||
static void
|
||||
|
|
|
@ -525,7 +525,7 @@ OpenTableList(List *tables)
|
|||
myrelid = RelationGetRelid(rel);
|
||||
/*
|
||||
* filter out duplicates when user specifies "foo, foo"
|
||||
* Note that this algrithm is know to not be very effective (O(N^2))
|
||||
* Note that this algorithm is know to not be very effective (O(N^2))
|
||||
* but given that it only works on list of tables given to us by user
|
||||
* it's deemed acceptable.
|
||||
*/
|
||||
|
|
|
@ -474,7 +474,7 @@ DropSubscription(DropSubscriptionStmt *stmt)
|
|||
InvokeObjectDropHook(SubscriptionRelationId, subid, 0);
|
||||
|
||||
/*
|
||||
* Lock the subscription so noboby else can do anything with it
|
||||
* Lock the subscription so nobody else can do anything with it
|
||||
* (including the replication workers).
|
||||
*/
|
||||
LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock);
|
||||
|
|
|
@ -6630,7 +6630,7 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
|
|||
|
||||
/*
|
||||
* Check if ONLY was specified with ALTER TABLE. If so, allow the
|
||||
* contraint creation only if there are no children currently. Error out
|
||||
* constraint creation only if there are no children currently. Error out
|
||||
* otherwise.
|
||||
*/
|
||||
if (!recurse && children != NIL)
|
||||
|
|
|
@ -1261,7 +1261,7 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo,
|
|||
resultRelInfo->ri_projectReturning = NULL;
|
||||
|
||||
/*
|
||||
* If partition_root has been specified, that means we are builiding the
|
||||
* If partition_root has been specified, that means we are building the
|
||||
* ResultRelationInfo for one of its leaf partitions. In that case, we
|
||||
* need *not* initialize the leaf partition's constraint, but rather the
|
||||
* the partition_root's (if any). We must do that explicitly like this,
|
||||
|
|
|
@ -533,7 +533,7 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate,
|
|||
int plan_node_id = planstate->plan->plan_node_id;
|
||||
MemoryContext oldcontext;
|
||||
|
||||
/* Find the instumentation for this node. */
|
||||
/* Find the instrumentation for this node. */
|
||||
for (i = 0; i < instrumentation->num_plan_nodes; ++i)
|
||||
if (instrumentation->plan_node_id[i] == plan_node_id)
|
||||
break;
|
||||
|
|
|
@ -391,7 +391,7 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot)
|
|||
if (rel->rd_att->constr)
|
||||
ExecConstraints(resultRelInfo, slot, slot, estate);
|
||||
|
||||
/* Store the slot into tuple that we can insett. */
|
||||
/* Store the slot into tuple that we can inspect. */
|
||||
tuple = ExecMaterializeSlot(slot);
|
||||
|
||||
/* OK, store the tuple and create index entries for it */
|
||||
|
|
|
@ -304,7 +304,7 @@ typedef struct AggStatePerTransData
|
|||
/*
|
||||
* Slots for holding the evaluated input arguments. These are set up
|
||||
* during ExecInitAgg() and then used for each input row requiring
|
||||
* procesessing besides what's done in AggState->evalproj.
|
||||
* processing besides what's done in AggState->evalproj.
|
||||
*/
|
||||
TupleTableSlot *sortslot; /* current input tuple */
|
||||
TupleTableSlot *uniqslot; /* used for multi-column DISTINCT */
|
||||
|
|
|
@ -354,7 +354,7 @@ advance_windowaggregate(WindowAggState *winstate,
|
|||
|
||||
/*
|
||||
* We must track the number of rows included in transValue, since to
|
||||
* remove the last input, advance_windowaggregate_base() musn't call the
|
||||
* remove the last input, advance_windowaggregate_base() mustn't call the
|
||||
* inverse transition function, but simply reset transValue back to its
|
||||
* initial value.
|
||||
*/
|
||||
|
|
|
@ -109,7 +109,7 @@ static MemoryContext parsed_hba_context = NULL;
|
|||
*
|
||||
* NOTE: the IdentLine structs can contain pre-compiled regular expressions
|
||||
* that live outside the memory context. Before destroying or resetting the
|
||||
* memory context, they need to be expliticly free'd.
|
||||
* memory context, they need to be explicitly free'd.
|
||||
*/
|
||||
static List *parsed_ident_lines = NIL;
|
||||
static MemoryContext parsed_ident_context = NULL;
|
||||
|
|
|
@ -111,7 +111,7 @@ gimme_edge_table(PlannerInfo *root, Gene *tour1, Gene *tour2,
|
|||
for (index1 = 0; index1 < num_gene; index1++)
|
||||
{
|
||||
/*
|
||||
* presume the tour is circular, i.e. 1->2, 2->3, 3->1 this operaton
|
||||
* presume the tour is circular, i.e. 1->2, 2->3, 3->1 this operation
|
||||
* maps n back to 1
|
||||
*/
|
||||
|
||||
|
@ -314,7 +314,7 @@ gimme_gene(PlannerInfo *root, Edge edge, Edge *edge_table)
|
|||
/*
|
||||
* give priority to candidates with fewest remaining unused edges;
|
||||
* find out what the minimum number of unused edges is
|
||||
* (minimum_edges); if there is more than one cadidate with the
|
||||
* (minimum_edges); if there is more than one candidate with the
|
||||
* minimum number of unused edges keep count of this number
|
||||
* (minimum_count);
|
||||
*/
|
||||
|
|
|
@ -1618,7 +1618,7 @@ select_mergejoin_clauses(PlannerInfo *root,
|
|||
/*
|
||||
* Insist that each side have a non-redundant eclass. This
|
||||
* restriction is needed because various bits of the planner expect
|
||||
* that each clause in a merge be associatable with some pathkey in a
|
||||
* that each clause in a merge be associable with some pathkey in a
|
||||
* canonical pathkey list, but redundant eclasses can't appear in
|
||||
* canonical sort orderings. (XXX it might be worth relaxing this,
|
||||
* but not enough time to address it for 8.3.)
|
||||
|
|
|
@ -195,7 +195,7 @@ query_planner(PlannerInfo *root, List *tlist,
|
|||
/*
|
||||
* Now distribute "placeholders" to base rels as needed. This has to be
|
||||
* done after join removal because removal could change whether a
|
||||
* placeholder is evaluatable at a base rel.
|
||||
* placeholder is evaluable at a base rel.
|
||||
*/
|
||||
add_placeholders_to_base_rels(root);
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
* Detect whether there is a joinclause that involves
|
||||
* the two given relations.
|
||||
*
|
||||
* Note: the joinclause does not have to be evaluatable with only these two
|
||||
* Note: the joinclause does not have to be evaluable with only these two
|
||||
* relations. This is intentional. For example consider
|
||||
* SELECT * FROM a, b, c WHERE a.x = (b.y + c.z)
|
||||
* If a is much larger than the other tables, it may be worthwhile to
|
||||
|
|
|
@ -515,7 +515,7 @@ join_clause_is_movable_into(RestrictInfo *rinfo,
|
|||
Relids currentrelids,
|
||||
Relids current_and_outer)
|
||||
{
|
||||
/* Clause must be evaluatable given available context */
|
||||
/* Clause must be evaluable given available context */
|
||||
if (!bms_is_subset(rinfo->clause_relids, current_and_outer))
|
||||
return false;
|
||||
|
||||
|
|
|
@ -11312,7 +11312,7 @@ table_ref: relation_expr opt_alias_clause
|
|||
n->lateral = true;
|
||||
n->subquery = $2;
|
||||
n->alias = $3;
|
||||
/* same coment as above */
|
||||
/* same comment as above */
|
||||
if ($3 == NULL)
|
||||
{
|
||||
if (IsA($2, SelectStmt) &&
|
||||
|
|
|
@ -3050,7 +3050,7 @@ transformAttachPartition(CreateStmtContext *cxt, PartitionCmd *cmd)
|
|||
errmsg("\"%s\" is not partitioned",
|
||||
RelationGetRelationName(parentRel))));
|
||||
|
||||
/* tranform the values */
|
||||
/* transform the values */
|
||||
Assert(RelationGetPartitionKey(parentRel) != NULL);
|
||||
cxt->partbound = transformPartitionBound(cxt->pstate, parentRel,
|
||||
cmd->bound);
|
||||
|
|
|
@ -211,7 +211,7 @@ BackgroundWriterMain(void)
|
|||
/* Flush any leaked data in the top-level context */
|
||||
MemoryContextResetAndDeleteChildren(bgwriter_context);
|
||||
|
||||
/* re-initilialize to avoid repeated errors causing problems */
|
||||
/* re-initialize to avoid repeated errors causing problems */
|
||||
WritebackContextInit(&wb_context, &bgwriter_flush_after);
|
||||
|
||||
/* Now we can allow interrupts again */
|
||||
|
|
|
@ -5156,7 +5156,7 @@ RandomCancelKey(int32 *cancel_key)
|
|||
}
|
||||
|
||||
/*
|
||||
* Count up number of child processes of specified types (dead_end chidren
|
||||
* Count up number of child processes of specified types (dead_end children
|
||||
* are always excluded).
|
||||
*/
|
||||
static int
|
||||
|
|
|
@ -170,7 +170,7 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker,
|
|||
|
||||
/*
|
||||
* Worker started and attached to our shmem. This check is safe
|
||||
* because only laucher ever starts the workers, so nobody can steal
|
||||
* because only launcher ever starts the workers, so nobody can steal
|
||||
* the worker slot.
|
||||
*/
|
||||
if (status == BGWH_STARTED && worker->proc)
|
||||
|
@ -180,7 +180,7 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker,
|
|||
return false;
|
||||
|
||||
/*
|
||||
* We need timeout because we generaly don't get notified via latch
|
||||
* We need timeout because we generally don't get notified via latch
|
||||
* about the worker attach.
|
||||
*/
|
||||
rc = WaitLatch(MyLatch,
|
||||
|
@ -533,7 +533,7 @@ AtCommit_ApplyLauncher(void)
|
|||
/*
|
||||
* Request wakeup of the launcher on commit of the transaction.
|
||||
*
|
||||
* This is used to send launcher signal to stop sleeping and proccess the
|
||||
* This is used to send launcher signal to stop sleeping and process the
|
||||
* subscriptions when current transaction commits. Should be used when new
|
||||
* tuple was added to the pg_subscription catalog.
|
||||
*/
|
||||
|
@ -638,7 +638,7 @@ ApplyLauncherMain(Datum main_arg)
|
|||
else
|
||||
{
|
||||
/*
|
||||
* The wait in previous cycle was interruped in less than
|
||||
* The wait in previous cycle was interrupted in less than
|
||||
* wal_retrieve_retry_interval since last worker was started,
|
||||
* this usually means crash of the worker, so we should retry
|
||||
* in wal_retrieve_retry_interval again.
|
||||
|
|
|
@ -1250,7 +1250,7 @@ pg_replication_origin_session_is_setup(PG_FUNCTION_ARGS)
|
|||
* Return the replication progress for origin setup in the current session.
|
||||
*
|
||||
* If 'flush' is set to true it is ensured that the returned value corresponds
|
||||
* to a local transaction that has been flushed. this is useful if asychronous
|
||||
* to a local transaction that has been flushed. this is useful if asynchronous
|
||||
* commits are used when replaying replicated transactions.
|
||||
*/
|
||||
Datum
|
||||
|
@ -1336,7 +1336,7 @@ pg_replication_origin_advance(PG_FUNCTION_ARGS)
|
|||
* Return the replication progress for an individual replication origin.
|
||||
*
|
||||
* If 'flush' is set to true it is ensured that the returned value corresponds
|
||||
* to a local transaction that has been flushed. this is useful if asychronous
|
||||
* to a local transaction that has been flushed. this is useful if asynchronous
|
||||
* commits are used when replaying replicated transactions.
|
||||
*/
|
||||
Datum
|
||||
|
|
|
@ -539,7 +539,7 @@ logicalrep_write_attrs(StringInfo out, Relation rel)
|
|||
if (att->attisdropped)
|
||||
continue;
|
||||
|
||||
/* REPLICA IDENTITY FULL means all colums are sent as part of key. */
|
||||
/* REPLICA IDENTITY FULL means all columns are sent as part of key. */
|
||||
if (replidentfull ||
|
||||
bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber,
|
||||
idattrs))
|
||||
|
|
|
@ -1714,7 +1714,7 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
|
|||
*
|
||||
* NB: Transactions handled here have to have actively aborted (i.e. have
|
||||
* produced an abort record). Implicitly aborted transactions are handled via
|
||||
* ReorderBufferAbortOld(); transactions we're just not interesteded in, but
|
||||
* ReorderBufferAbortOld(); transactions we're just not interested in, but
|
||||
* which have committed are handled in ReorderBufferForget().
|
||||
*
|
||||
* This function purges this transaction and its contents from memory and
|
||||
|
@ -1782,7 +1782,7 @@ ReorderBufferAbortOld(ReorderBuffer *rb, TransactionId oldestRunningXid)
|
|||
* toplevel xid.
|
||||
*
|
||||
* This is significantly different to ReorderBufferAbort() because
|
||||
* transactions that have committed need to be treated differenly from aborted
|
||||
* transactions that have committed need to be treated differently from aborted
|
||||
* ones since they may have modified the catalog.
|
||||
*
|
||||
* Note that this is only allowed to be called in the moment a transaction
|
||||
|
@ -2660,7 +2660,7 @@ StartupReorderBuffer(void)
|
|||
|
||||
/*
|
||||
* ok, has to be a surviving logical slot, iterate and delete
|
||||
* everythign starting with xid-*
|
||||
* everything starting with xid-*
|
||||
*/
|
||||
sprintf(path, "pg_replslot/%s", logical_de->d_name);
|
||||
|
||||
|
|
|
@ -614,7 +614,7 @@ SnapBuildGetOrBuildSnapshot(SnapBuild *builder, TransactionId xid)
|
|||
if (builder->snapshot == NULL)
|
||||
{
|
||||
builder->snapshot = SnapBuildBuildSnapshot(builder, xid);
|
||||
/* inrease refcount for the snapshot builder */
|
||||
/* increase refcount for the snapshot builder */
|
||||
SnapBuildSnapIncRefcount(builder->snapshot);
|
||||
}
|
||||
|
||||
|
@ -678,7 +678,7 @@ SnapBuildProcessChange(SnapBuild *builder, TransactionId xid, XLogRecPtr lsn)
|
|||
if (builder->snapshot == NULL)
|
||||
{
|
||||
builder->snapshot = SnapBuildBuildSnapshot(builder, xid);
|
||||
/* inrease refcount for the snapshot builder */
|
||||
/* increase refcount for the snapshot builder */
|
||||
SnapBuildSnapIncRefcount(builder->snapshot);
|
||||
}
|
||||
|
||||
|
@ -911,7 +911,7 @@ SnapBuildEndTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid)
|
|||
{
|
||||
/*
|
||||
* None of the originally running transaction is running anymore,
|
||||
* so our incrementaly built snapshot now is consistent.
|
||||
* so our incrementally built snapshot now is consistent.
|
||||
*/
|
||||
ereport(LOG,
|
||||
(errmsg("logical decoding found consistent point at %X/%X",
|
||||
|
|
|
@ -327,7 +327,7 @@ slot_store_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel,
|
|||
/*
|
||||
* Modify slot with user data provided as C strigs.
|
||||
* This is somewhat similar to heap_modify_tuple but also calls the type
|
||||
* input fuction on the user data as the input is the text representation
|
||||
* input function on the user data as the input is the text representation
|
||||
* of the types.
|
||||
*/
|
||||
static void
|
||||
|
|
|
@ -172,7 +172,7 @@ pgoutput_startup(LogicalDecodingContext * ctx, OutputPluginOptions *opt,
|
|||
&data->protocol_version,
|
||||
&data->publication_names);
|
||||
|
||||
/* Check if we support requested protol */
|
||||
/* Check if we support requested protocol */
|
||||
if (data->protocol_version != LOGICALREP_PROTO_VERSION_NUM)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
|
@ -424,7 +424,7 @@ publication_invalidation_cb(Datum arg, int cacheid, uint32 hashvalue)
|
|||
/*
|
||||
* Initialize the relation schema sync cache for a decoding session.
|
||||
*
|
||||
* The hash table is destoyed at the end of a decoding session. While
|
||||
* The hash table is destroyed at the end of a decoding session. While
|
||||
* relcache invalidations still exist and will still be invoked, they
|
||||
* will just see the null hash table global and take no action.
|
||||
*/
|
||||
|
@ -540,7 +540,7 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid)
|
|||
|
||||
/*
|
||||
* We can get here if the plugin was used in SQL interface as the
|
||||
* RelSchemaSyncCache is detroyed when the decoding finishes, but there
|
||||
* RelSchemaSyncCache is destroyed when the decoding finishes, but there
|
||||
* is no way to unregister the relcache invalidation callback.
|
||||
*/
|
||||
if (RelationSyncCache == NULL)
|
||||
|
@ -580,7 +580,7 @@ rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue)
|
|||
|
||||
/*
|
||||
* We can get here if the plugin was used in SQL interface as the
|
||||
* RelSchemaSyncCache is detroyed when the decoding finishes, but there
|
||||
* RelSchemaSyncCache is destroyed when the decoding finishes, but there
|
||||
* is no way to unregister the relcache invalidation callback.
|
||||
*/
|
||||
if (RelationSyncCache == NULL)
|
||||
|
|
|
@ -860,7 +860,7 @@ WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event)
|
|||
* reached. At most nevents occurred events are returned.
|
||||
*
|
||||
* If timeout = -1, block until an event occurs; if 0, check sockets for
|
||||
* readiness, but don't block; if > 0, block for at most timeout miliseconds.
|
||||
* readiness, but don't block; if > 0, block for at most timeout milliseconds.
|
||||
*
|
||||
* Returns the number of events occurred, or 0 if the timeout was reached.
|
||||
*
|
||||
|
|
|
@ -501,7 +501,7 @@ shm_mq_sendv(shm_mq_handle *mqh, shm_mq_iovec *iov, int iovcnt, bool nowait)
|
|||
* it will point to a temporary buffer. This mostly avoids data copying in
|
||||
* the hoped-for case where messages are short compared to the buffer size,
|
||||
* while still allowing longer messages. In either case, the return value
|
||||
* remains valid until the next receive operation is perfomed on the queue.
|
||||
* remains valid until the next receive operation is performed on the queue.
|
||||
*
|
||||
* When nowait = false, we'll wait on our process latch when the ring buffer
|
||||
* is empty and we have not yet received a full message. The sender will
|
||||
|
|
|
@ -967,7 +967,7 @@ LogStandbySnapshot(void)
|
|||
* similar. We keep them separate because xl_xact_running_xacts is a
|
||||
* contiguous chunk of memory and never exists fully until it is assembled in
|
||||
* WAL. The inserted records are marked as not being important for durability,
|
||||
* to avoid triggering superflous checkpoint / archiving activity.
|
||||
* to avoid triggering superfluous checkpoint / archiving activity.
|
||||
*/
|
||||
static XLogRecPtr
|
||||
LogCurrentRunningXacts(RunningTransactions CurrRunningXacts)
|
||||
|
|
|
@ -2778,7 +2778,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
|
|||
vxids = (VirtualTransactionId *)
|
||||
palloc0(sizeof(VirtualTransactionId) * (MaxBackends + 1));
|
||||
|
||||
/* Compute hash code and partiton lock, and look up conflicting modes. */
|
||||
/* Compute hash code and partition lock, and look up conflicting modes. */
|
||||
hashcode = LockTagHashCode(locktag);
|
||||
partitionLock = LockHashPartitionLock(hashcode);
|
||||
conflictMask = lockMethodTable->conflictTab[lockmode];
|
||||
|
|
|
@ -781,7 +781,7 @@ LWLockAttemptLock(LWLock *lock, LWLockMode mode)
|
|||
return false;
|
||||
}
|
||||
else
|
||||
return true; /* someobdy else has the lock */
|
||||
return true; /* somebody else has the lock */
|
||||
}
|
||||
}
|
||||
pg_unreachable();
|
||||
|
@ -953,7 +953,7 @@ LWLockWakeup(LWLock *lock)
|
|||
* that happens before the list unlink happens, the list would end up
|
||||
* being corrupted.
|
||||
*
|
||||
* The barrier pairs with the LWLockWaitListLock() when enqueueing for
|
||||
* The barrier pairs with the LWLockWaitListLock() when enqueuing for
|
||||
* another lock.
|
||||
*/
|
||||
pg_write_barrier();
|
||||
|
@ -1029,7 +1029,7 @@ LWLockDequeueSelf(LWLock *lock)
|
|||
|
||||
/*
|
||||
* Can't just remove ourselves from the list, but we need to iterate over
|
||||
* all entries as somebody else could have unqueued us.
|
||||
* all entries as somebody else could have dequeued us.
|
||||
*/
|
||||
proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
|
||||
{
|
||||
|
|
|
@ -3193,7 +3193,7 @@ ReleasePredicateLocks(bool isCommit)
|
|||
/*
|
||||
* We can't trust XactReadOnly here, because a transaction which started
|
||||
* as READ WRITE can show as READ ONLY later, e.g., within
|
||||
* substransactions. We want to flag a transaction as READ ONLY if it
|
||||
* subtransactions. We want to flag a transaction as READ ONLY if it
|
||||
* commits without writing so that de facto READ ONLY transactions get the
|
||||
* benefit of some RO optimizations, so we will use this local variable to
|
||||
* get some cleanup logic right which is based on whether the transaction
|
||||
|
|
|
@ -1728,7 +1728,7 @@ _fdvec_resize(SMgrRelation reln,
|
|||
else
|
||||
{
|
||||
/*
|
||||
* It doesn't seem worthwile complicating the code by having a more
|
||||
* It doesn't seem worthwhile complicating the code by having a more
|
||||
* aggressive growth strategy here; the number of segments doesn't
|
||||
* grow that fast, and the memory context internally will sometimes
|
||||
* avoid doing an actual reallocation.
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
* Spell field. The AffixData field is initialized if AF parameter is not
|
||||
* defined.
|
||||
* - NISortAffixes():
|
||||
* - builds a list of compond affixes from the affix list and stores it
|
||||
* - builds a list of compound affixes from the affix list and stores it
|
||||
* in the CompoundAffix.
|
||||
* - builds prefix trees (Trie) from the affix list for prefixes and suffixes
|
||||
* and stores them in Suffix and Prefix fields.
|
||||
|
|
|
@ -179,7 +179,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem)
|
|||
if (ld->curDictId == InvalidOid)
|
||||
{
|
||||
/*
|
||||
* usial mode: dictionary wants only one word, but we should keep in
|
||||
* usual mode: dictionary wants only one word, but we should keep in
|
||||
* mind that we should go through all stack
|
||||
*/
|
||||
|
||||
|
@ -272,7 +272,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem)
|
|||
|
||||
/*
|
||||
* We should be sure that current type of lexeme is recognized
|
||||
* by our dictinonary: we just check is it exist in list of
|
||||
* by our dictionary: we just check is it exist in list of
|
||||
* dictionaries ?
|
||||
*/
|
||||
for (i = 0; i < map->len && !dictExists; i++)
|
||||
|
@ -627,7 +627,7 @@ generateHeadline(HeadlineParsedText *prs)
|
|||
/* start of a new fragment */
|
||||
infrag = 1;
|
||||
numfragments++;
|
||||
/* add a fragment delimitor if this is after the first one */
|
||||
/* add a fragment delimiter if this is after the first one */
|
||||
if (numfragments > 1)
|
||||
{
|
||||
memcpy(ptr, prs->fragdelim, prs->fragdelimlen);
|
||||
|
|
|
@ -2445,7 +2445,7 @@ mark_hl_words(HeadlineParsedText *prs, TSQuery query, int highlight,
|
|||
break;
|
||||
}
|
||||
if (curlen < min_words && i >= prs->curwords)
|
||||
{ /* got end of text and our cover is shoter
|
||||
{ /* got end of text and our cover is shorter
|
||||
* than min_words */
|
||||
for (i = p - 1; i >= 0; i--)
|
||||
{
|
||||
|
|
|
@ -2265,7 +2265,7 @@ seq_search(char *name, const char *const * array, int type, int max, int *len)
|
|||
|
||||
for (last = 0, a = array; *a != NULL; a++)
|
||||
{
|
||||
/* comperate first chars */
|
||||
/* compare first chars */
|
||||
if (*name != **a)
|
||||
continue;
|
||||
|
||||
|
|
|
@ -533,7 +533,7 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata,
|
|||
{
|
||||
/*
|
||||
* Lower bound no longer matters. Just estimate the fraction
|
||||
* with an upper bound <= const uppert bound
|
||||
* with an upper bound <= const upper bound
|
||||
*/
|
||||
hist_selec =
|
||||
calc_hist_selectivity_scalar(typcache, &const_upper,
|
||||
|
|
|
@ -2687,7 +2687,7 @@ is_input_argument(int nth, const char *argmodes)
|
|||
}
|
||||
|
||||
/*
|
||||
* Append used transformated types to specified buffer
|
||||
* Append used transformed types to specified buffer
|
||||
*/
|
||||
static void
|
||||
print_function_trftypes(StringInfo buf, HeapTuple proctup)
|
||||
|
|
|
@ -899,7 +899,7 @@ calc_rank_cd(const float4 *arrdata, TSVector txt, TSQuery query, int method)
|
|||
|
||||
/*
|
||||
* if doc are big enough then ext.q may be equal to ext.p due to limit
|
||||
* of posional information. In this case we approximate number of
|
||||
* of positional information. In this case we approximate number of
|
||||
* noise word as half cover's length
|
||||
*/
|
||||
nNoise = (ext.q - ext.p) - (ext.end - ext.begin);
|
||||
|
@ -908,7 +908,7 @@ calc_rank_cd(const float4 *arrdata, TSVector txt, TSQuery query, int method)
|
|||
Wdoc += Cpos / ((double) (1 + nNoise));
|
||||
|
||||
CurExtPos = ((double) (ext.q + ext.p)) / 2.0;
|
||||
if (NExtent > 0 && CurExtPos > PrevExtPos /* prevent devision by
|
||||
if (NExtent > 0 && CurExtPos > PrevExtPos /* prevent division by
|
||||
* zero in a case of
|
||||
multiple lexize */ )
|
||||
SumDist += 1.0 / (CurExtPos - PrevExtPos);
|
||||
|
|
|
@ -342,7 +342,7 @@ window_lag(PG_FUNCTION_ARGS)
|
|||
|
||||
/*
|
||||
* lag_with_offset
|
||||
* returns the value of VE evelulated on a row that is OFFSET
|
||||
* returns the value of VE evaluated on a row that is OFFSET
|
||||
* rows before the current row within a partition,
|
||||
* per spec.
|
||||
*/
|
||||
|
|
|
@ -1433,7 +1433,7 @@ RelationInitPhysicalAddr(Relation relation)
|
|||
* points to the current file since the older file will be gone (or
|
||||
* truncated). The new file will still contain older rows so lookups
|
||||
* in them will work correctly. This wouldn't work correctly if
|
||||
* rewrites were allowed to change the schema in a noncompatible way,
|
||||
* rewrites were allowed to change the schema in an incompatible way,
|
||||
* but those are prevented both on catalog tables and on user tables
|
||||
* declared as additional catalog tables.
|
||||
*/
|
||||
|
|
|
@ -879,7 +879,7 @@ get_func_arg_info(HeapTuple procTup,
|
|||
/*
|
||||
* get_func_trftypes
|
||||
*
|
||||
* Returns a number of transformated types used by function.
|
||||
* Returns the number of transformed types used by function.
|
||||
*/
|
||||
int
|
||||
get_func_trftypes(HeapTuple procTup,
|
||||
|
|
|
@ -1108,7 +1108,7 @@ process_settings(Oid databaseid, Oid roleid)
|
|||
|
||||
relsetting = heap_open(DbRoleSettingRelationId, AccessShareLock);
|
||||
|
||||
/* read all the settings under the same snapsot for efficiency */
|
||||
/* read all the settings under the same snapshot for efficiency */
|
||||
snapshot = RegisterSnapshot(GetCatalogSnapshot(DbRoleSettingRelationId));
|
||||
|
||||
/* Later settings are ignored if set earlier. */
|
||||
|
|
|
@ -19,7 +19,7 @@ OBJS = backend_random.o guc.o help_config.o pg_config.o pg_controldata.o \
|
|||
tzparser.o
|
||||
|
||||
# This location might depend on the installation directories. Therefore
|
||||
# we can't subsitute it into pg_config.h.
|
||||
# we can't substitute it into pg_config.h.
|
||||
ifdef krb_srvtab
|
||||
override CPPFLAGS += -DPG_KRB_SRVTAB='"$(krb_srvtab)"'
|
||||
endif
|
||||
|
|
|
@ -318,7 +318,7 @@ sum_free_pages(FreePageManager *fpm)
|
|||
|
||||
/*
|
||||
* Compute the size of the largest run of pages that the user could
|
||||
* succesfully get.
|
||||
* successfully get.
|
||||
*/
|
||||
static Size
|
||||
FreePageManagerLargestContiguous(FreePageManager *fpm)
|
||||
|
@ -360,7 +360,7 @@ FreePageManagerLargestContiguous(FreePageManager *fpm)
|
|||
|
||||
/*
|
||||
* Recompute the size of the largest run of pages that the user could
|
||||
* succesfully get, if it has been marked dirty.
|
||||
* successfully get, if it has been marked dirty.
|
||||
*/
|
||||
static void
|
||||
FreePageManagerUpdateLargest(FreePageManager *fpm)
|
||||
|
@ -1704,7 +1704,7 @@ FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages,
|
|||
* The act of allocating pages for use in constructing our btree
|
||||
* should never cause any page to become more full, so the new
|
||||
* split depth should be no greater than the old one, and perhaps
|
||||
* less if we fortutiously allocated a chunk that freed up a slot
|
||||
* less if we fortuitously allocated a chunk that freed up a slot
|
||||
* on the page we need to update.
|
||||
*/
|
||||
Assert(result.split_pages <= fpm->btree_recycle_count);
|
||||
|
|
|
@ -1625,7 +1625,7 @@ HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
|
|||
}
|
||||
|
||||
/*
|
||||
* check whether the transaciont id 'xid' is in the pre-sorted array 'xip'.
|
||||
* check whether the transaction id 'xid' is in the pre-sorted array 'xip'.
|
||||
*/
|
||||
static bool
|
||||
TransactionIdInArray(TransactionId xid, TransactionId *xip, Size num)
|
||||
|
|
|
@ -198,7 +198,7 @@ InitArchiveFmt_Custom(ArchiveHandle *AH)
|
|||
*
|
||||
* Optional.
|
||||
*
|
||||
* Set up extrac format-related TOC data.
|
||||
* Set up extract format-related TOC data.
|
||||
*/
|
||||
static void
|
||||
_ArchiveEntry(ArchiveHandle *AH, TocEntry *te)
|
||||
|
|
|
@ -3500,7 +3500,7 @@ getPublicationTables(Archive *fout, TableInfo tblinfo[], int numTables)
|
|||
|
||||
resetPQExpBuffer(query);
|
||||
|
||||
/* Get the publication memebership for the table. */
|
||||
/* Get the publication membership for the table. */
|
||||
appendPQExpBuffer(query,
|
||||
"SELECT pr.tableoid, pr.oid, p.pubname "
|
||||
"FROM pg_catalog.pg_publication_rel pr,"
|
||||
|
|
|
@ -828,7 +828,7 @@ StoreQueryTuple(const PGresult *result)
|
|||
char *varname;
|
||||
char *value;
|
||||
|
||||
/* concate prefix and column name */
|
||||
/* concatenate prefix and column name */
|
||||
varname = psprintf("%s%s", pset.gset_prefix, colname);
|
||||
|
||||
if (!PQgetisnull(result, 0, i))
|
||||
|
|
|
@ -2127,7 +2127,7 @@ describeOneTableDetails(const char *schemaname,
|
|||
printTableAddFooter(&cont, _("Check constraints:"));
|
||||
for (i = 0; i < tuples; i++)
|
||||
{
|
||||
/* untranslated contraint name and def */
|
||||
/* untranslated constraint name and def */
|
||||
printfPQExpBuffer(&buf, " \"%s\" %s",
|
||||
PQgetvalue(result, i, 0),
|
||||
PQgetvalue(result, i, 1));
|
||||
|
@ -3197,7 +3197,7 @@ listTables(const char *tabtypes, const char *pattern, bool verbose, bool showSys
|
|||
if (verbose)
|
||||
{
|
||||
/*
|
||||
* As of PostgreSQL 9.0, use pg_table_size() to show a more acurate
|
||||
* As of PostgreSQL 9.0, use pg_table_size() to show a more accurate
|
||||
* size of a table, including FSM, VM and TOAST tables.
|
||||
*/
|
||||
if (pset.sversion >= 90000)
|
||||
|
@ -5108,7 +5108,7 @@ describeSubscriptions(const char *pattern, bool verbose)
|
|||
gettext_noop("Conninfo"));
|
||||
}
|
||||
|
||||
/* Only display subscritpions in current database. */
|
||||
/* Only display subscriptions in current database. */
|
||||
appendPQExpBufferStr(&buf,
|
||||
"FROM pg_catalog.pg_subscription\n"
|
||||
"WHERE subdbid = (SELECT oid\n"
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#define VISIBILITYMAP_ALL_VISIBLE 0x01
|
||||
#define VISIBILITYMAP_ALL_FROZEN 0x02
|
||||
#define VISIBILITYMAP_VALID_BITS 0x03 /* OR of all valid
|
||||
* visiblitymap flags bits */
|
||||
* visibilitymap flags bits */
|
||||
|
||||
/* Macros for visibilitymap test */
|
||||
#define VM_ALL_VISIBLE(r, b, v) \
|
||||
|
|
|
@ -65,7 +65,7 @@ typedef enum
|
|||
* apply */
|
||||
} SyncCommitLevel;
|
||||
|
||||
/* Define the default setting for synchonous_commit */
|
||||
/* Define the default setting for synchronous_commit */
|
||||
#define SYNCHRONOUS_COMMIT_ON SYNCHRONOUS_COMMIT_REMOTE_FLUSH
|
||||
|
||||
/* Synchronous commit level */
|
||||
|
|
|
@ -989,7 +989,7 @@ typedef NameData *Name;
|
|||
/* gettext domain name mangling */
|
||||
|
||||
/*
|
||||
* To better support parallel installations of major PostgeSQL
|
||||
* To better support parallel installations of major PostgreSQL
|
||||
* versions as well as parallel installations of major library soname
|
||||
* versions, we mangle the gettext domain name by appending those
|
||||
* version numbers. The coding rule ought to be that wherever the
|
||||
|
|
|
@ -41,7 +41,7 @@ typedef struct PartitionDescData *PartitionDesc;
|
|||
|
||||
/*-----------------------
|
||||
* PartitionDispatch - information about one partitioned table in a partition
|
||||
* hiearchy required to route a tuple to one of its partitions
|
||||
* hierarchy required to route a tuple to one of its partitions
|
||||
*
|
||||
* reldesc Relation descriptor of the table
|
||||
* key Partition key information of the table
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#define SubscriptionRelation_Rowtype_Id 6101
|
||||
|
||||
/*
|
||||
* Technicaly, the subscriptions live inside the database, so a shared catalog
|
||||
* Technically, the subscriptions live inside the database, so a shared catalog
|
||||
* seems weird, but the replication launcher process needs to access all of
|
||||
* them to be able to start the workers, so we have to put them in a shared,
|
||||
* nailed catalog.
|
||||
|
@ -35,7 +35,7 @@ CATALOG(pg_subscription,6100) BKI_SHARED_RELATION BKI_ROWTYPE_OID(6101) BKI_SCHE
|
|||
|
||||
Oid subowner; /* Owner of the subscription */
|
||||
|
||||
bool subenabled; /* True if the subsription is enabled
|
||||
bool subenabled; /* True if the subscription is enabled
|
||||
* (the worker should be running) */
|
||||
|
||||
#ifdef CATALOG_VARLEN /* variable-length fields start here */
|
||||
|
@ -65,7 +65,7 @@ typedef FormData_pg_subscription *Form_pg_subscription;
|
|||
typedef struct Subscription
|
||||
{
|
||||
Oid oid; /* Oid of the subscription */
|
||||
Oid dbid; /* Oid of the database which dubscription is in */
|
||||
Oid dbid; /* Oid of the database which subscription is in */
|
||||
char *name; /* Name of the subscription */
|
||||
Oid owner; /* Oid of the subscription owner */
|
||||
bool enabled; /* Indicates if the subscription is enabled */
|
||||
|
|
|
@ -345,7 +345,7 @@ SH_GROW(SH_TYPE *tb, uint32 newsize)
|
|||
* we need. We neither want tb->members increased, nor do we need to do
|
||||
* deal with deleted elements, nor do we need to compare keys. So a
|
||||
* special-cased implementation is lot faster. As resizing can be time
|
||||
* consuming and frequent, that's worthwile to optimize.
|
||||
* consuming and frequent, that's worthwhile to optimize.
|
||||
*
|
||||
* To be able to simply move entries over, we have to start not at the
|
||||
* first bucket (i.e olddata[0]), but find the first bucket that's either
|
||||
|
@ -620,7 +620,7 @@ SH_DELETE(SH_TYPE *tb, SH_KEY_TYPE key)
|
|||
|
||||
/*
|
||||
* Backward shift following elements till either an empty element
|
||||
* or an element at its optimal position is encounterered.
|
||||
* or an element at its optimal position is encountered.
|
||||
*
|
||||
* While that sounds expensive, the average chain length is short,
|
||||
* and deletions would otherwise require toombstones.
|
||||
|
|
|
@ -842,7 +842,7 @@ typedef LONG slock_t;
|
|||
#define SPIN_DELAY() spin_delay()
|
||||
|
||||
/* If using Visual C++ on Win64, inline assembly is unavailable.
|
||||
* Use a _mm_pause instrinsic instead of rep nop.
|
||||
* Use a _mm_pause intrinsic instead of rep nop.
|
||||
*/
|
||||
#if defined(_WIN64)
|
||||
static __forceinline void
|
||||
|
|
|
@ -147,7 +147,7 @@ typedef struct
|
|||
} CMPDAffix;
|
||||
|
||||
/*
|
||||
* Type of encoding affix flags in Hunspel dictionaries
|
||||
* Type of encoding affix flags in Hunspell dictionaries
|
||||
*/
|
||||
typedef enum
|
||||
{
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
/*
|
||||
* The aim is to get a simpler interface to the database routines.
|
||||
* All the tidieous messing around with tuples is supposed to be hidden
|
||||
* All the tedious messing around with tuples is supposed to be hidden
|
||||
* by this function.
|
||||
*/
|
||||
/* Author: Linus Tolke
|
||||
|
|
|
@ -324,7 +324,7 @@ PGTYPESdate_fmt_asc(date dDate, const char *fmtstring, char *outbuf)
|
|||
*
|
||||
* function works as follows:
|
||||
* - first we analyze the parameters
|
||||
* - if this is a special case with no delimiters, add delimters
|
||||
* - if this is a special case with no delimiters, add delimiters
|
||||
* - find the tokens. First we look for numerical values. If we have found
|
||||
* less than 3 tokens, we check for the months' names and thereafter for
|
||||
* the abbreviations of the months' names.
|
||||
|
|
|
@ -1368,11 +1368,11 @@ PGTYPESnumeric_cmp(numeric *var1, numeric *var2)
|
|||
{
|
||||
/* use cmp_abs function to calculate the result */
|
||||
|
||||
/* both are positive: normal comparation with cmp_abs */
|
||||
/* both are positive: normal comparison with cmp_abs */
|
||||
if (var1->sign == NUMERIC_POS && var2->sign == NUMERIC_POS)
|
||||
return cmp_abs(var1, var2);
|
||||
|
||||
/* both are negative: return the inverse of the normal comparation */
|
||||
/* both are negative: return the inverse of the normal comparison */
|
||||
if (var1->sign == NUMERIC_NEG && var2->sign == NUMERIC_NEG)
|
||||
{
|
||||
/*
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue