Fix typos and duplicate words

This fixes various typos, duplicated words, and tiny bits of whitespace
mainly in code comments but also in docs.

Author: Daniel Gustafsson <daniel@yesql.se>
Author: Heikki Linnakangas <hlinnaka@iki.fi>
Author: Alexander Lakhin <exclusion@gmail.com>
Author: David Rowley <dgrowleyml@gmail.com>
Author: Nazir Bilal Yavuz <byavuz81@gmail.com>
Discussion: https://postgr.es/m/3F577953-A29E-4722-98AD-2DA9EFF2CBB8@yesql.se
This commit is contained in:
Daniel Gustafsson 2024-04-18 21:28:07 +02:00
parent fbed6ebe41
commit 950d4a2cb1
67 changed files with 93 additions and 92 deletions

View File

@ -2,7 +2,7 @@ CREATE TABLE bttest_a(id int8);
CREATE TABLE bttest_b(id int8); CREATE TABLE bttest_b(id int8);
CREATE TABLE bttest_multi(id int8, data int8); CREATE TABLE bttest_multi(id int8, data int8);
CREATE TABLE delete_test_table (a bigint, b bigint, c bigint, d bigint); CREATE TABLE delete_test_table (a bigint, b bigint, c bigint, d bigint);
-- Stabalize tests -- Stabilize tests
ALTER TABLE bttest_a SET (autovacuum_enabled = false); ALTER TABLE bttest_a SET (autovacuum_enabled = false);
ALTER TABLE bttest_b SET (autovacuum_enabled = false); ALTER TABLE bttest_b SET (autovacuum_enabled = false);
ALTER TABLE bttest_multi SET (autovacuum_enabled = false); ALTER TABLE bttest_multi SET (autovacuum_enabled = false);

View File

@ -3,7 +3,7 @@ CREATE TABLE bttest_b(id int8);
CREATE TABLE bttest_multi(id int8, data int8); CREATE TABLE bttest_multi(id int8, data int8);
CREATE TABLE delete_test_table (a bigint, b bigint, c bigint, d bigint); CREATE TABLE delete_test_table (a bigint, b bigint, c bigint, d bigint);
-- Stabalize tests -- Stabilize tests
ALTER TABLE bttest_a SET (autovacuum_enabled = false); ALTER TABLE bttest_a SET (autovacuum_enabled = false);
ALTER TABLE bttest_b SET (autovacuum_enabled = false); ALTER TABLE bttest_b SET (autovacuum_enabled = false);
ALTER TABLE bttest_multi SET (autovacuum_enabled = false); ALTER TABLE bttest_multi SET (autovacuum_enabled = false);

View File

@ -3036,7 +3036,7 @@ bt_normalize_tuple(BtreeCheckState *state, IndexTuple itup)
* In the heap, tuples may contain short varlena datums with both 1B * In the heap, tuples may contain short varlena datums with both 1B
* header and 4B headers. But the corresponding index tuple should always * header and 4B headers. But the corresponding index tuple should always
* have such varlena's with 1B headers. So, if there is a short varlena * have such varlena's with 1B headers. So, if there is a short varlena
* with 4B header, we need to convert it for for fingerprinting. * with 4B header, we need to convert it for fingerprinting.
* *
* Note that we rely on deterministic index_form_tuple() TOAST compression * Note that we rely on deterministic index_form_tuple() TOAST compression
* of normalized input. * of normalized input.

View File

@ -802,7 +802,7 @@ HINT: Execute a database-wide VACUUM in that database.
<para> <para>
Similar to the XID case, if autovacuum fails to clear old MXIDs from a table, the Similar to the XID case, if autovacuum fails to clear old MXIDs from a table, the
system will begin to emit warning messages when the database's oldest MXIDs reach forty system will begin to emit warning messages when the database's oldest MXIDs reach forty
million transactions from the wraparound point. And, just as an the XID case, if these million transactions from the wraparound point. And, just as in the XID case, if these
warnings are ignored, the system will refuse to generate new MXIDs once there are fewer warnings are ignored, the system will refuse to generate new MXIDs once there are fewer
than three million left until wraparound. than three million left until wraparound.
</para> </para>

View File

@ -207,7 +207,7 @@ if docs_dep.found()
alias_target('man', man) alias_target('man', man)
alias_target('install-man', install_doc_man) alias_target('install-man', install_doc_man)
# built and installed as part of the the docs target # built and installed as part of the docs target
installdocs += install_doc_man installdocs += install_doc_man
docs += man docs += man
endif endif

View File

@ -766,7 +766,7 @@ ginFinishSplit(GinBtree btree, GinBtreeStack *stack, bool freestack,
/* /*
* An entry point to ginFinishSplit() that is used when we stumble upon an * An entry point to ginFinishSplit() that is used when we stumble upon an
* existing incompletely split page in the tree, as opposed to completing a * existing incompletely split page in the tree, as opposed to completing a
* split that we just made outselves. The difference is that stack->buffer may * split that we just made ourselves. The difference is that stack->buffer may
* be merely share-locked on entry, and will be upgraded to exclusive mode. * be merely share-locked on entry, and will be upgraded to exclusive mode.
* *
* Note: Upgrading the lock momentarily releases it. Doing that in a scan * Note: Upgrading the lock momentarily releases it. Doing that in a scan

View File

@ -33,7 +33,7 @@
typedef struct typedef struct
{ {
/*------------------------------------------------------- /*-------------------------------------------------------
* Arguments passed to heap_page_and_freeze() * Arguments passed to heap_page_prune_and_freeze()
*------------------------------------------------------- *-------------------------------------------------------
*/ */
@ -306,7 +306,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
* If the HEAP_PRUNE_FREEZE option is set, we will also freeze tuples if it's * If the HEAP_PRUNE_FREEZE option is set, we will also freeze tuples if it's
* required in order to advance relfrozenxid / relminmxid, or if it's * required in order to advance relfrozenxid / relminmxid, or if it's
* considered advantageous for overall system performance to do so now. The * considered advantageous for overall system performance to do so now. The
* 'cutoffs', 'presult', 'new_refrozen_xid' and 'new_relmin_mxid' arguments * 'cutoffs', 'presult', 'new_relfrozen_xid' and 'new_relmin_mxid' arguments
* are required when freezing. When HEAP_PRUNE_FREEZE option is set, we also * are required when freezing. When HEAP_PRUNE_FREEZE option is set, we also
* set presult->all_visible and presult->all_frozen on exit, to indicate if * set presult->all_visible and presult->all_frozen on exit, to indicate if
* the VM bits can be set. They are always set to false when the * the VM bits can be set. They are always set to false when the
@ -337,7 +337,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
* off_loc is the offset location required by the caller to use in error * off_loc is the offset location required by the caller to use in error
* callback. * callback.
* *
* new_relfrozen_xid and new_relmin_xid must provided by the caller if the * new_relfrozen_xid and new_relmin_mxid must provided by the caller if the
* HEAP_PRUNE_FREEZE option is set. On entry, they contain the oldest XID and * HEAP_PRUNE_FREEZE option is set. On entry, they contain the oldest XID and
* multi-XID seen on the relation so far. They will be updated with oldest * multi-XID seen on the relation so far. They will be updated with oldest
* values present on the page after pruning. After processing the whole * values present on the page after pruning. After processing the whole

View File

@ -1756,7 +1756,7 @@ _bt_start_prim_scan(IndexScanDesc scan, ScanDirection dir)
* *
* (The rules are the same for backwards scans, except that the operators are * (The rules are the same for backwards scans, except that the operators are
* flipped: just replace the precondition's >= operator with a <=, and the * flipped: just replace the precondition's >= operator with a <=, and the
* postcondition's <= operator with with a >=. In other words, just swap the * postcondition's <= operator with a >=. In other words, just swap the
* precondition with the postcondition.) * precondition with the postcondition.)
* *
* We also deal with "advancing" non-required arrays here. Callers whose * We also deal with "advancing" non-required arrays here. Callers whose
@ -4133,7 +4133,7 @@ _bt_checkkeys_look_ahead(IndexScanDesc scan, BTReadPageState *pstate,
else else
{ {
/* /*
* Failure -- "ahead" tuple is too far ahead (we were too aggresive). * Failure -- "ahead" tuple is too far ahead (we were too aggressive).
* *
* Reset the number of rechecks, and aggressively reduce the target * Reset the number of rechecks, and aggressively reduce the target
* distance (we're much more aggressive here than we were when the * distance (we're much more aggressive here than we were when the

View File

@ -25,7 +25,7 @@
* Parse the WAL format of an xact commit and abort records into an easier to * Parse the WAL format of an xact commit and abort records into an easier to
* understand format. * understand format.
* *
* This routines are in xactdesc.c because they're accessed in backend (when * These routines are in xactdesc.c because they're accessed in backend (when
* replaying WAL) and frontend (pg_waldump) code. This file is the only xact * replaying WAL) and frontend (pg_waldump) code. This file is the only xact
* specific one shared between both. They're complicated enough that * specific one shared between both. They're complicated enough that
* duplication would be bothersome. * duplication would be bothersome.

View File

@ -1668,7 +1668,7 @@ DeconstructFkConstraintRow(HeapTuple tuple, int *numfks,
} }
/* /*
* FindFkPeriodOpers - * FindFKPeriodOpers -
* *
* Looks up the operator oids used for the PERIOD part of a temporal foreign key. * Looks up the operator oids used for the PERIOD part of a temporal foreign key.
* The opclass should be the opclass of that PERIOD element. * The opclass should be the opclass of that PERIOD element.

View File

@ -5,7 +5,7 @@
* *
* src/backend/catalog/system_functions.sql * src/backend/catalog/system_functions.sql
* *
* This file redefines certain built-in functions that it's impractical * This file redefines certain built-in functions that are impractical
* to fully define in pg_proc.dat. In most cases that's because they use * to fully define in pg_proc.dat. In most cases that's because they use
* SQL-standard function bodies and/or default expressions. The node * SQL-standard function bodies and/or default expressions. The node
* tree representations of those are too unreadable, platform-dependent, * tree representations of those are too unreadable, platform-dependent,

View File

@ -167,7 +167,7 @@ get_index_am_oid(const char *amname, bool missing_ok)
/* /*
* get_table_am_oid - given an access method name, look up its OID * get_table_am_oid - given an access method name, look up its OID
* and verify it corresponds to an table AM. * and verify it corresponds to a table AM.
*/ */
Oid Oid
get_table_am_oid(const char *amname, bool missing_ok) get_table_am_oid(const char *amname, bool missing_ok)

View File

@ -996,7 +996,7 @@ CopyFrom(CopyFromState cstate)
cstate->escontext->error_occurred) cstate->escontext->error_occurred)
{ {
/* /*
* Soft error occured, skip this tuple and deal with error * Soft error occurred, skip this tuple and deal with error
* information according to ON_ERROR. * information according to ON_ERROR.
*/ */
if (cstate->opts.on_error == COPY_ON_ERROR_IGNORE) if (cstate->opts.on_error == COPY_ON_ERROR_IGNORE)

View File

@ -3312,7 +3312,7 @@ dbase_redo(XLogReaderState *record)
*/ */
FlushDatabaseBuffers(xlrec->src_db_id); FlushDatabaseBuffers(xlrec->src_db_id);
/* Close all sgmr fds in all backends. */ /* Close all smgr fds in all backends. */
WaitForProcSignalBarrier(EmitProcSignalBarrier(PROCSIGNAL_BARRIER_SMGRRELEASE)); WaitForProcSignalBarrier(EmitProcSignalBarrier(PROCSIGNAL_BARRIER_SMGRRELEASE));
/* /*
@ -3378,7 +3378,7 @@ dbase_redo(XLogReaderState *record)
/* Clean out the xlog relcache too */ /* Clean out the xlog relcache too */
XLogDropDatabase(xlrec->db_id); XLogDropDatabase(xlrec->db_id);
/* Close all sgmr fds in all backends. */ /* Close all smgr fds in all backends. */
WaitForProcSignalBarrier(EmitProcSignalBarrier(PROCSIGNAL_BARRIER_SMGRRELEASE)); WaitForProcSignalBarrier(EmitProcSignalBarrier(PROCSIGNAL_BARRIER_SMGRRELEASE));
for (i = 0; i < xlrec->ntablespaces; i++) for (i = 0; i < xlrec->ntablespaces; i++)

View File

@ -5687,7 +5687,7 @@ ATParseTransformCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
case AT_AddIndex: case AT_AddIndex:
/* /*
* A primary key on a inheritance parent needs supporting NOT * A primary key on an inheritance parent needs supporting NOT
* NULL constraint on its children; enqueue commands to create * NULL constraint on its children; enqueue commands to create
* those or mark them inherited if they already exist. * those or mark them inherited if they already exist.
*/ */

View File

@ -8,7 +8,7 @@
* *
* In a parallel vacuum, we perform both index bulk deletion and index cleanup * In a parallel vacuum, we perform both index bulk deletion and index cleanup
* with parallel worker processes. Individual indexes are processed by one * with parallel worker processes. Individual indexes are processed by one
* vacuum process. ParalleVacuumState contains shared information as well as * vacuum process. ParallelVacuumState contains shared information as well as
* the memory space for storing dead items allocated in the DSA area. We * the memory space for storing dead items allocated in the DSA area. We
* launch parallel worker processes at the start of parallel index * launch parallel worker processes at the start of parallel index
* bulk-deletion and index cleanup and once all indexes are processed, the * bulk-deletion and index cleanup and once all indexes are processed, the

View File

@ -4400,7 +4400,7 @@ ExecInitJsonExpr(JsonExpr *jsexpr, ExprState *state,
/* /*
* Add a special step, if needed, to check if the coercion evaluation ran * Add a special step, if needed, to check if the coercion evaluation ran
* into an error but was not thrown because the ON ERROR behavior is not * into an error but was not thrown because the ON ERROR behavior is not
* ERROR. It will set jsesestate->error if an error did occur. * ERROR. It will set jsestate->error if an error did occur.
*/ */
if (jsestate->jump_eval_coercion >= 0 && escontext != NULL) if (jsestate->jump_eval_coercion >= 0 && escontext != NULL)
{ {

View File

@ -2885,7 +2885,7 @@ add_child_join_rel_equivalences(PlannerInfo *root,
/* /*
* add_setop_child_rel_equivalences * add_setop_child_rel_equivalences
* Add equivalence members for each non-resjunk target in 'child_tlist' * Add equivalence members for each non-resjunk target in 'child_tlist'
* to the EquivalenceClass in the corresponding setop_pathkey's pk_class. * to the EquivalenceClass in the corresponding setop_pathkey's pk_eclass.
* *
* 'root' is the PlannerInfo belonging to the top-level set operation. * 'root' is the PlannerInfo belonging to the top-level set operation.
* 'child_rel' is the RelOptInfo of the child relation we're adding * 'child_rel' is the RelOptInfo of the child relation we're adding

View File

@ -384,7 +384,7 @@ group_keys_reorder_by_pathkeys(List *pathkeys, List **group_pathkeys,
* *group_pathkeys containing grouping pathkeys altogether with aggregate * *group_pathkeys containing grouping pathkeys altogether with aggregate
* pathkeys. If we process aggregate pathkeys we could get an invalid * pathkeys. If we process aggregate pathkeys we could get an invalid
* result of get_sortgroupref_clause_noerr(), because their * result of get_sortgroupref_clause_noerr(), because their
* pathkey->pk_eclass->ec_sortref doesn't referece query targetlist. So, * pathkey->pk_eclass->ec_sortref doesn't reference query targetlist. So,
* we allocate a separate list of pathkeys for lookups. * we allocate a separate list of pathkeys for lookups.
*/ */
grouping_pathkeys = list_copy_head(*group_pathkeys, num_groupby_pathkeys); grouping_pathkeys = list_copy_head(*group_pathkeys, num_groupby_pathkeys);

View File

@ -214,7 +214,8 @@ set_operation_ordered_results_useful(SetOperationStmt *setop)
* *
* Returns a RelOptInfo for the subtree, as well as these output parameters: * Returns a RelOptInfo for the subtree, as well as these output parameters:
* *pTargetList: receives the fully-fledged tlist for the subtree's top plan * *pTargetList: receives the fully-fledged tlist for the subtree's top plan
* *istrivial_tlist: true iif datatypes between parent and child match. * *istrivial_tlist: true if, and only if, datatypes between parent and child
* match.
* *
* The pTargetList output parameter is mostly redundant with the pathtarget * The pTargetList output parameter is mostly redundant with the pathtarget
* of the returned RelOptInfo, but for the moment we need it because much of * of the returned RelOptInfo, but for the moment we need it because much of

View File

@ -70,7 +70,7 @@ static JsonTablePlan *makeJsonTableSiblingJoin(JsonTablePlan *lplan,
* (jt->context_item) and the column-generating expressions (jt->columns) to * (jt->context_item) and the column-generating expressions (jt->columns) to
* populate TableFunc.docexpr and TableFunc.colvalexprs, respectively. Also, * populate TableFunc.docexpr and TableFunc.colvalexprs, respectively. Also,
* the PASSING values (jt->passing) are transformed and added into * the PASSING values (jt->passing) are transformed and added into
* TableFunc.passvalexprs. * TableFunc.passingvalexprs.
*/ */
ParseNamespaceItem * ParseNamespaceItem *
transformJsonTable(ParseState *pstate, JsonTable *jt) transformJsonTable(ParseState *pstate, JsonTable *jt)

View File

@ -3451,7 +3451,7 @@ checkPartition(Relation rel, Oid partRelOid)
/* /*
* transformPartitionCmdForSplit * transformPartitionCmdForSplit
* Analyze the ALTER TABLLE ... SPLIT PARTITION command * Analyze the ALTER TABLE ... SPLIT PARTITION command
* *
* For each new partition sps->bound is set to the transformed value of bound. * For each new partition sps->bound is set to the transformed value of bound.
* Does checks for bounds of new partitions. * Does checks for bounds of new partitions.
@ -3490,7 +3490,7 @@ transformPartitionCmdForSplit(CreateStmtContext *cxt, PartitionCmd *partcmd)
/* /*
* transformPartitionCmdForMerge * transformPartitionCmdForMerge
* Analyze the ALTER TABLLE ... MERGE PARTITIONS command * Analyze the ALTER TABLE ... MERGE PARTITIONS command
* *
* Does simple checks for merged partitions. Calculates bound of resulting * Does simple checks for merged partitions. Calculates bound of resulting
* partition. * partition.

View File

@ -5146,7 +5146,7 @@ get_partition_bound_spec(Oid partOid, RangeVar *name)
* the first of new partitions) then lower bound of "spec" should be equal (or * the first of new partitions) then lower bound of "spec" should be equal (or
* greater than or equal in case defaultPart=true) to lower bound of split * greater than or equal in case defaultPart=true) to lower bound of split
* partition. If last=true (this means that "spec" is the last of new * partition. If last=true (this means that "spec" is the last of new
* partitions) then upper bound of of "spec" should be equal (or less than or * partitions) then upper bound of "spec" should be equal (or less than or
* equal in case defaultPart=true) to upper bound of split partition. * equal in case defaultPart=true) to upper bound of split partition.
* *
* parent: partitioned table * parent: partitioned table
@ -5245,8 +5245,8 @@ check_partition_bounds_for_split_range(Relation parent,
false, split_upper); false, split_upper);
/* /*
* Upper bound of of "spec" should be equal (or less than or equal * Upper bound of "spec" should be equal (or less than or equal in
* in case defaultPart=true) to upper bound of split partition. * case defaultPart=true) to upper bound of split partition.
*/ */
if ((!defaultPart && cmpval) || (defaultPart && cmpval > 0)) if ((!defaultPart && cmpval) || (defaultPart && cmpval > 0))
overlap = true; overlap = true;

View File

@ -1825,7 +1825,7 @@ match_clause_to_partition_key(GeneratePruningStepsContext *context,
BooleanTest *new_booltest = (BooleanTest *) copyObject(clause); BooleanTest *new_booltest = (BooleanTest *) copyObject(clause);
NullTest *nulltest; NullTest *nulltest;
/* We expect 'noteq' to only be set to true for BooleanTests */ /* We expect 'notclause' to only be set to true for BooleanTests */
Assert(IsA(clause, BooleanTest)); Assert(IsA(clause, BooleanTest));
/* reverse the bool test */ /* reverse the bool test */

View File

@ -108,7 +108,7 @@ static WalSummarizerData *WalSummarizerCtl;
/* /*
* When we reach end of WAL and need to read more, we sleep for a number of * When we reach end of WAL and need to read more, we sleep for a number of
* milliseconds that is a integer multiple of MS_PER_SLEEP_QUANTUM. This is * milliseconds that is an integer multiple of MS_PER_SLEEP_QUANTUM. This is
* the multiplier. It should vary between 1 and MAX_SLEEP_QUANTA, depending * the multiplier. It should vary between 1 and MAX_SLEEP_QUANTA, depending
* on system activity. See summarizer_wait_for_wal() for how we adjust this. * on system activity. See summarizer_wait_for_wal() for how we adjust this.
*/ */

View File

@ -88,7 +88,7 @@
* overwrites. * overwrites.
* *
* The 'last_start_time' is needed by postmaster to start the slot sync worker * The 'last_start_time' is needed by postmaster to start the slot sync worker
* once per SLOTSYNC_RESTART_INTERVAL_SEC. In cases where a immediate restart * once per SLOTSYNC_RESTART_INTERVAL_SEC. In cases where an immediate restart
* is expected (e.g., slot sync GUCs change), slot sync worker will reset * is expected (e.g., slot sync GUCs change), slot sync worker will reset
* last_start_time before exiting, so that postmaster can start the worker * last_start_time before exiting, so that postmaster can start the worker
* without waiting for SLOTSYNC_RESTART_INTERVAL_SEC. * without waiting for SLOTSYNC_RESTART_INTERVAL_SEC.

View File

@ -3493,7 +3493,7 @@ WalSndDone(WalSndSendDataCallback send_data)
* Returns the latest point in WAL that has been safely flushed to disk. * Returns the latest point in WAL that has been safely flushed to disk.
* This should only be called when in recovery. * This should only be called when in recovery.
* *
* This is called either by cascading walsender to find WAL postion to be sent * This is called either by cascading walsender to find WAL position to be sent
* to a cascaded standby or by slot synchronization operation to validate remote * to a cascaded standby or by slot synchronization operation to validate remote
* slot's lsn before syncing it locally. * slot's lsn before syncing it locally.
* *

View File

@ -794,7 +794,7 @@ dependency_is_compatible_clause(Node *clause, Index relid, AttrNumber *attnum)
} }
else if (IsA(clause, ScalarArrayOpExpr)) else if (IsA(clause, ScalarArrayOpExpr))
{ {
/* If it's an scalar array operator, check for Var IN Const. */ /* If it's a scalar array operator, check for Var IN Const. */
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause; ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
/* /*
@ -1222,7 +1222,7 @@ dependency_is_compatible_expression(Node *clause, Index relid, List *statlist, N
} }
else if (IsA(clause, ScalarArrayOpExpr)) else if (IsA(clause, ScalarArrayOpExpr))
{ {
/* If it's an scalar array operator, check for Var IN Const. */ /* If it's a scalar array operator, check for Var IN Const. */
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause; ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
/* /*

View File

@ -541,9 +541,9 @@ read_stream_begin_relation(int flags,
stream->distance = 1; stream->distance = 1;
/* /*
* Since we always always access the same relation, we can initialize * Since we always access the same relation, we can initialize parts of
* parts of the ReadBuffersOperation objects and leave them that way, to * the ReadBuffersOperation objects and leave them that way, to avoid
* avoid wasting CPU cycles writing to them for each read. * wasting CPU cycles writing to them for each read.
*/ */
for (int i = 0; i < max_ios; ++i) for (int i = 0; i < max_ios; ++i)
{ {

View File

@ -1073,7 +1073,7 @@ PinBufferForBlock(Relation rel,
/* /*
* If there is no Relation it usually implies recovery and thus permanent, * If there is no Relation it usually implies recovery and thus permanent,
* but we take an argmument because CreateAndCopyRelationData can reach us * but we take an argument because CreateAndCopyRelationData can reach us
* with only an SMgrRelation for an unlogged relation that we don't want * with only an SMgrRelation for an unlogged relation that we don't want
* to flag with BM_PERMANENT. * to flag with BM_PERMANENT.
*/ */

View File

@ -1032,7 +1032,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
/* /*
* Sleep till someone wakes me up. We do this even in the dontWait * Sleep till someone wakes me up. We do this even in the dontWait
* case, beause while trying to go to sleep, we may discover that we * case, because while trying to go to sleep, we may discover that we
* can acquire the lock immediately after all. * can acquire the lock immediately after all.
*/ */

View File

@ -1047,7 +1047,7 @@ AuxiliaryPidGetProc(int pid)
* called, because it could be that when we try to find a position at which * called, because it could be that when we try to find a position at which
* to insert ourself into the wait queue, we discover that we must be inserted * to insert ourself into the wait queue, we discover that we must be inserted
* ahead of everyone who wants a lock that conflict with ours. In that case, * ahead of everyone who wants a lock that conflict with ours. In that case,
* we get the lock immediately. Beause of this, it's sensible for this function * we get the lock immediately. Because of this, it's sensible for this function
* to have a dontWait argument, despite the name. * to have a dontWait argument, despite the name.
* *
* The lock table's partition lock must be held at entry, and will be held * The lock table's partition lock must be held at entry, and will be held

View File

@ -4221,7 +4221,7 @@ JsonTableSetDocument(TableFuncScanState *state, Datum value)
} }
/* /*
* Evaluate a JsonTablePlan's jsonpath to get a new row pattren from * Evaluate a JsonTablePlan's jsonpath to get a new row pattern from
* the given context item * the given context item
*/ */
static void static void
@ -4339,7 +4339,7 @@ JsonTablePlanScanNextRow(JsonTablePlanState *planstate)
/* /*
* Now fetch the nested plan's current row to be joined against the * Now fetch the nested plan's current row to be joined against the
* parent row. Any further nested plans' paths will be re-evaluated * parent row. Any further nested plans' paths will be re-evaluated
* reursively, level at a time, after setting each nested plan's * recursively, level at a time, after setting each nested plan's
* current row. * current row.
*/ */
(void) JsonTablePlanNextRow(planstate->nested); (void) JsonTablePlanNextRow(planstate->nested);

View File

@ -330,7 +330,7 @@ multirange_out(PG_FUNCTION_ARGS)
} }
/* /*
* Binary representation: First a int32-sized count of ranges, followed by * Binary representation: First an int32-sized count of ranges, followed by
* ranges in their native binary representation. * ranges in their native binary representation.
*/ */
Datum Datum

View File

@ -6968,7 +6968,7 @@ btcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
* btree scans, making the top-level scan look like a continuous scan * btree scans, making the top-level scan look like a continuous scan
* (as opposed to num_sa_scans-many primitive index scans). After * (as opposed to num_sa_scans-many primitive index scans). After
* all, btree scans mostly work like that at runtime. However, such a * all, btree scans mostly work like that at runtime. However, such a
* scheme would badly bias genericcostestimate's simplistic appraoch * scheme would badly bias genericcostestimate's simplistic approach
* to calculating numIndexPages through prorating. * to calculating numIndexPages through prorating.
* *
* Stick with the approach taken by non-native SAOP scans for now. * Stick with the approach taken by non-native SAOP scans for now.

View File

@ -979,8 +979,8 @@ AllocSetAlloc(MemoryContext context, Size size, int flags)
Assert(set->blocks != NULL); Assert(set->blocks != NULL);
/* /*
* If requested size exceeds maximum for chunks we hand the the request * If requested size exceeds maximum for chunks we hand the request off to
* off to AllocSetAllocLarge(). * AllocSetAllocLarge().
*/ */
if (size > set->allocChunkLimit) if (size > set->allocChunkLimit)
return AllocSetAllocLarge(context, size, flags); return AllocSetAllocLarge(context, size, flags);

View File

@ -505,8 +505,8 @@ BumpAlloc(MemoryContext context, Size size, int flags)
#endif #endif
/* /*
* If requested size exceeds maximum for chunks we hand the the request * If requested size exceeds maximum for chunks we hand the request off to
* off to BumpAllocLarge(). * BumpAllocLarge().
*/ */
if (chunk_size > set->allocChunkLimit) if (chunk_size > set->allocChunkLimit)
return BumpAllocLarge(context, size, flags); return BumpAllocLarge(context, size, flags);

View File

@ -541,8 +541,8 @@ GenerationAlloc(MemoryContext context, Size size, int flags)
#endif #endif
/* /*
* If requested size exceeds maximum for chunks we hand the the request * If requested size exceeds maximum for chunks we hand the request off to
* off to GenerationAllocLarge(). * GenerationAllocLarge().
*/ */
if (chunk_size > set->allocChunkLimit) if (chunk_size > set->allocChunkLimit)
return GenerationAllocLarge(context, size, flags); return GenerationAllocLarge(context, size, flags);

View File

@ -345,7 +345,7 @@ bbstreamer_tar_parser_free(bbstreamer *streamer)
} }
/* /*
* Create an bbstreamer that can generate a tar archive. * Create a bbstreamer that can generate a tar archive.
* *
* This is intended to be usable either for generating a brand-new tar archive * This is intended to be usable either for generating a brand-new tar archive
* or for modifying one on the fly. The input should be a series of typed * or for modifying one on the fly. The input should be a series of typed

View File

@ -1177,7 +1177,7 @@ remember_to_cleanup_directory(char *target_path, bool rmtopdir)
} }
/* /*
* Empty out the list of directories scheduled for cleanup a exit. * Empty out the list of directories scheduled for cleanup at exit.
* *
* We want to remove the output directories only on a failure, so call this * We want to remove the output directories only on a failure, so call this
* function when we know that the operation has succeeded. * function when we know that the operation has succeeded.

View File

@ -756,7 +756,7 @@ write_block(int fd, char *output_filename,
} }
/* /*
* Read a block of data (BLCKSZ bytes) into the the buffer. * Read a block of data (BLCKSZ bytes) into the buffer.
*/ */
static void static void
read_block(rfile *s, off_t off, uint8 *buffer) read_block(rfile *s, off_t off, uint8 *buffer)

View File

@ -241,7 +241,7 @@ my $tab_upgraded2_oid = $old_sub->safe_psql('postgres',
$old_sub->stop; $old_sub->stop;
# Change configuration so that initial table sync sync does not get started # Change configuration so that initial table sync does not get started
# automatically # automatically
$new_sub->append_conf('postgresql.conf', $new_sub->append_conf('postgresql.conf',
"max_logical_replication_workers = 0"); "max_logical_replication_workers = 0");

View File

@ -23,7 +23,7 @@
/* /*
* Create bitmasks from pg_unicode_category values for efficient comparison of * Create bitmasks from pg_unicode_category values for efficient comparison of
* multiple categories. For instance, PG_U_MN_MASK is a bitmask representing * multiple categories. For instance, PG_U_MN_MASK is a bitmask representing
* the general cateogry Mn; and PG_U_M_MASK represents general categories Mn, * the general category Mn; and PG_U_M_MASK represents general categories Mn,
* Me, and Mc. * Me, and Mc.
* *
* The number of Unicode General Categories should never grow, so a 32-bit * The number of Unicode General Categories should never grow, so a 32-bit

View File

@ -287,7 +287,7 @@ typedef struct xl_heap_prune
uint8 flags; uint8 flags;
/* /*
* If XLHP_HAS_CONFLICT_HORIZON is set, the conflict horzion XID follows, * If XLHP_HAS_CONFLICT_HORIZON is set, the conflict horizon XID follows,
* unaligned * unaligned
*/ */
} xl_heap_prune; } xl_heap_prune;
@ -322,7 +322,7 @@ typedef struct xl_heap_prune
#define XLHP_HAS_FREEZE_PLANS (1 << 4) #define XLHP_HAS_FREEZE_PLANS (1 << 4)
/* /*
* XLHP_HAS_REDIRECTIONS, XLHP_HAS_DEAD_ITEMS, and XLHP_HAS_NOW_UNUSED * XLHP_HAS_REDIRECTIONS, XLHP_HAS_DEAD_ITEMS, and XLHP_HAS_NOW_UNUSED_ITEMS
* indicate that xlhp_prune_items sub-records with redirected, dead, and * indicate that xlhp_prune_items sub-records with redirected, dead, and
* unused item offsets are present. * unused item offsets are present.
*/ */
@ -354,9 +354,9 @@ typedef struct xlhp_freeze_plan
* *
* The backup block's data contains an array of xlhp_freeze_plan structs (with * The backup block's data contains an array of xlhp_freeze_plan structs (with
* nplans elements). The individual item offsets are located in an array at * nplans elements). The individual item offsets are located in an array at
* the end of the entire record with with nplans * (each plan's ntuples) * the end of the entire record with nplans * (each plan's ntuples) members
* members. Those offsets are in the same order as the plans. The REDO * Those offsets are in the same order as the plans. The REDO routine uses
* routine uses the offsets to freeze the corresponding heap tuples. * the offsets to freeze the corresponding heap tuples.
* *
* (As of PostgreSQL 17, XLOG_HEAP2_PRUNE_VACUUM_SCAN records replace the * (As of PostgreSQL 17, XLOG_HEAP2_PRUNE_VACUUM_SCAN records replace the
* separate XLOG_HEAP2_FREEZE_PAGE records.) * separate XLOG_HEAP2_FREEZE_PAGE records.)

View File

@ -73,7 +73,7 @@
* *
* For longer or variable-length input, fasthash_accum() is a more * For longer or variable-length input, fasthash_accum() is a more
* flexible, but more verbose method. The standalone functions use this * flexible, but more verbose method. The standalone functions use this
* internally, so see fasthash64() for an an example of this. * internally, so see fasthash64() for an example of this.
* *
* After all inputs have been mixed in, finalize the hash: * After all inputs have been mixed in, finalize the hash:
* *

View File

@ -64,7 +64,7 @@
* small enough. * small enough.
* *
* There are two other techniques described in the paper that are not * There are two other techniques described in the paper that are not
* impemented here: * implemented here:
* - path compression "...removes all inner nodes that have only a single child." * - path compression "...removes all inner nodes that have only a single child."
* - lazy path expansion "...inner nodes are only created if they are required * - lazy path expansion "...inner nodes are only created if they are required
* to distinguish at least two leaf nodes." * to distinguish at least two leaf nodes."
@ -385,7 +385,7 @@ typedef struct RT_NODE
/* /*
* Number of children. uint8 is sufficient for all node kinds, because * Number of children. uint8 is sufficient for all node kinds, because
* nodes shrink when this number gets lower than some thresold. Since * nodes shrink when this number gets lower than some threshold. Since
* node256 cannot possibly have zero children, we let the counter overflow * node256 cannot possibly have zero children, we let the counter overflow
* and we interpret zero as "256" for this node kind. * and we interpret zero as "256" for this node kind.
*/ */
@ -1581,7 +1581,7 @@ RT_EXTEND_UP(RT_RADIX_TREE * tree, uint64 key)
Assert(shift < target_shift); Assert(shift < target_shift);
/* Grow tree upwards until start shift can accomodate the key */ /* Grow tree upwards until start shift can accommodate the key */
while (shift < target_shift) while (shift < target_shift)
{ {
RT_CHILD_PTR node; RT_CHILD_PTR node;

View File

@ -2867,7 +2867,7 @@ typedef struct PlaceHolderVar
* are not set meaningfully within such structs. * are not set meaningfully within such structs.
* *
* We also create transient SpecialJoinInfos for child joins during * We also create transient SpecialJoinInfos for child joins during
* partiotionwise join planning, which are also not present in join_info_list. * partitionwise join planning, which are also not present in join_info_list.
*/ */
#ifndef HAVE_SPECIALJOININFO_TYPEDEF #ifndef HAVE_SPECIALJOININFO_TYPEDEF
typedef struct SpecialJoinInfo SpecialJoinInfo; typedef struct SpecialJoinInfo SpecialJoinInfo;

View File

@ -1791,7 +1791,7 @@ typedef struct JsonExpr
/* Format of the above expression needed by ruleutils.c */ /* Format of the above expression needed by ruleutils.c */
JsonFormat *format; JsonFormat *format;
/* jsopath-valued expression containing the query pattern */ /* jsonpath-valued expression containing the query pattern */
Node *path_spec; Node *path_spec;
/* Expected type/format of the output. */ /* Expected type/format of the output. */

View File

@ -202,7 +202,7 @@ struct PGPROC
LocalTransactionId lxid; /* local id of top-level transaction LocalTransactionId lxid; /* local id of top-level transaction
* currently * being executed by this * currently * being executed by this
* proc, if running; else * proc, if running; else
* InvalidLocaltransactionId */ * InvalidLocalTransactionId */
} vxid; } vxid;
/* These fields are zero while a backend is still starting up: */ /* These fields are zero while a backend is still starting up: */

View File

@ -210,7 +210,7 @@ PQcancelPoll(PGcancelConn *cancelConn)
int n; int n;
/* /*
* We leave most of the connection establishement to PQconnectPoll, since * We leave most of the connection establishment to PQconnectPoll, since
* it's very similar to normal connection establishment. But once we get * it's very similar to normal connection establishment. But once we get
* to the CONNECTION_AWAITING_RESPONSE we need to start doing our own * to the CONNECTION_AWAITING_RESPONSE we need to start doing our own
* thing. * thing.

View File

@ -1655,7 +1655,7 @@ pgtls_close(PGconn *conn)
{ {
/* /*
* In the non-SSL case, just remove the crypto callbacks if the * In the non-SSL case, just remove the crypto callbacks if the
* connection has then loaded. This code path has no dependency on * connection has them loaded. This code path has no dependency on
* any pending SSL calls. * any pending SSL calls.
*/ */
if (conn->crypto_loaded) if (conn->crypto_loaded)

View File

@ -9,7 +9,7 @@ step s1_create_temp_objects:
CREATE OR REPLACE FUNCTION pg_temp.long() RETURNS text LANGUAGE sql AS $body$ SELECT %L; $body$$outer$, CREATE OR REPLACE FUNCTION pg_temp.long() RETURNS text LANGUAGE sql AS $body$ SELECT %L; $body$$outer$,
(SELECT string_agg(g.i::text||':'||random()::text, '|') FROM generate_series(1, 100) g(i)))); (SELECT string_agg(g.i::text||':'||random()::text, '|') FROM generate_series(1, 100) g(i))));
-- The above bug requirs function removal to happen after a catalog -- The above bug requires function removal to happen after a catalog
-- invalidation. dependency.c sorts objects in descending oid order so -- invalidation. dependency.c sorts objects in descending oid order so
-- that newer objects are deleted before older objects, so create a -- that newer objects are deleted before older objects, so create a
-- table after. -- table after.
@ -66,7 +66,7 @@ step s1_create_temp_objects:
CREATE OR REPLACE FUNCTION pg_temp.long() RETURNS text LANGUAGE sql AS $body$ SELECT %L; $body$$outer$, CREATE OR REPLACE FUNCTION pg_temp.long() RETURNS text LANGUAGE sql AS $body$ SELECT %L; $body$$outer$,
(SELECT string_agg(g.i::text||':'||random()::text, '|') FROM generate_series(1, 100) g(i)))); (SELECT string_agg(g.i::text||':'||random()::text, '|') FROM generate_series(1, 100) g(i))));
-- The above bug requirs function removal to happen after a catalog -- The above bug requires function removal to happen after a catalog
-- invalidation. dependency.c sorts objects in descending oid order so -- invalidation. dependency.c sorts objects in descending oid order so
-- that newer objects are deleted before older objects, so create a -- that newer objects are deleted before older objects, so create a
-- table after. -- table after.

View File

@ -30,7 +30,7 @@ step s1_create_temp_objects {
CREATE OR REPLACE FUNCTION pg_temp.long() RETURNS text LANGUAGE sql AS $body$ SELECT %L; $body$$outer$, CREATE OR REPLACE FUNCTION pg_temp.long() RETURNS text LANGUAGE sql AS $body$ SELECT %L; $body$$outer$,
(SELECT string_agg(g.i::text||':'||random()::text, '|') FROM generate_series(1, 100) g(i)))); (SELECT string_agg(g.i::text||':'||random()::text, '|') FROM generate_series(1, 100) g(i))));
-- The above bug requirs function removal to happen after a catalog -- The above bug requires function removal to happen after a catalog
-- invalidation. dependency.c sorts objects in descending oid order so -- invalidation. dependency.c sorts objects in descending oid order so
-- that newer objects are deleted before older objects, so create a -- that newer objects are deleted before older objects, so create a
-- table after. -- table after.

View File

@ -196,7 +196,7 @@ GetTotalResourceCount(ManyTestResourceKind *kinds, int nkinds)
* Remember lots of resources, belonging to 'nkinds' different resource types * Remember lots of resources, belonging to 'nkinds' different resource types
* with different priorities. Then forget some of them, and finally, release * with different priorities. Then forget some of them, and finally, release
* the resource owner. We use a custom resource type that performs various * the resource owner. We use a custom resource type that performs various
* sanity checks to verify that all the the resources are released, and in the * sanity checks to verify that all the resources are released, and in the
* correct order. * correct order.
*/ */
PG_FUNCTION_INFO_V1(test_resowner_many); PG_FUNCTION_INFO_V1(test_resowner_many);

View File

@ -2889,7 +2889,7 @@ GROUP BY c1.w, c1.z;
RESET enable_nestloop; RESET enable_nestloop;
RESET enable_hashjoin; RESET enable_hashjoin;
DROP TABLE group_agg_pk; DROP TABLE group_agg_pk;
-- Test the case where the the ordering of scan matches the ordering within the -- Test the case where the ordering of the scan matches the ordering within the
-- aggregate but cannot be found in the group-by list -- aggregate but cannot be found in the group-by list
CREATE TABLE agg_sort_order (c1 int PRIMARY KEY, c2 int); CREATE TABLE agg_sort_order (c1 int PRIMARY KEY, c2 int);
CREATE UNIQUE INDEX agg_sort_order_c2_idx ON agg_sort_order(c2); CREATE UNIQUE INDEX agg_sort_order_c2_idx ON agg_sort_order(c2);

View File

@ -276,8 +276,8 @@ CREATE TABLE parted_si_p_even PARTITION OF parted_si FOR VALUES IN (0);
CREATE TABLE parted_si_p_odd PARTITION OF parted_si FOR VALUES IN (1); CREATE TABLE parted_si_p_odd PARTITION OF parted_si FOR VALUES IN (1);
-- Test that bulk relation extension handles reusing a single BulkInsertState -- Test that bulk relation extension handles reusing a single BulkInsertState
-- across partitions. Without the fix applied, this reliably reproduces -- across partitions. Without the fix applied, this reliably reproduces
-- #18130 unless shared_buffers is extremely small (preventing any use use of -- #18130 unless shared_buffers is extremely small (preventing any use of bulk
-- bulk relation extension). See -- relation extension). See
-- https://postgr.es/m/18130-7a86a7356a75209d%40postgresql.org -- https://postgr.es/m/18130-7a86a7356a75209d%40postgresql.org
-- https://postgr.es/m/257696.1695670946%40sss.pgh.pa.us -- https://postgr.es/m/257696.1695670946%40sss.pgh.pa.us
\set filename :abs_srcdir '/data/desc.data' \set filename :abs_srcdir '/data/desc.data'

View File

@ -2734,7 +2734,7 @@ UPDATE fkpart10.tbl1 SET f1 = 2 WHERE f1 = 1;
INSERT INTO fkpart10.tbl1 VALUES (0), (1); INSERT INTO fkpart10.tbl1 VALUES (0), (1);
COMMIT; COMMIT;
-- test that cross-partition updates correctly enforces the foreign key -- test that cross-partition updates correctly enforces the foreign key
-- restriction (specifically testing INITIAILLY DEFERRED) -- restriction (specifically testing INITIALLY DEFERRED)
BEGIN; BEGIN;
UPDATE fkpart10.tbl1 SET f1 = 3 WHERE f1 = 0; UPDATE fkpart10.tbl1 SET f1 = 3 WHERE f1 = 0;
UPDATE fkpart10.tbl3 SET f1 = f1 * -1; UPDATE fkpart10.tbl3 SET f1 = f1 * -1;

View File

@ -945,10 +945,10 @@ ALTER TABLE rf_tbl_abcd_part_pk ATTACH PARTITION rf_tbl_abcd_part_pk_1 FOR VALUE
SET client_min_messages = 'ERROR'; SET client_min_messages = 'ERROR';
CREATE PUBLICATION testpub6 FOR TABLE rf_tbl_abcd_pk (a, b); CREATE PUBLICATION testpub6 FOR TABLE rf_tbl_abcd_pk (a, b);
RESET client_min_messages; RESET client_min_messages;
-- ok - (a,b) coverts all PK cols -- ok - (a,b) covers all PK cols
UPDATE rf_tbl_abcd_pk SET a = 1; UPDATE rf_tbl_abcd_pk SET a = 1;
ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a, b, c); ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a, b, c);
-- ok - (a,b,c) coverts all PK cols -- ok - (a,b,c) covers all PK cols
UPDATE rf_tbl_abcd_pk SET a = 1; UPDATE rf_tbl_abcd_pk SET a = 1;
ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a); ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a);
-- fail - "b" is missing from the column list -- fail - "b" is missing from the column list

View File

@ -689,7 +689,7 @@ CREATE TEXT SEARCH DICTIONARY tsdict_case
ERROR: unrecognized Ispell parameter: "DictFile" ERROR: unrecognized Ispell parameter: "DictFile"
-- Test grammar for configurations -- Test grammar for configurations
CREATE TEXT SEARCH CONFIGURATION dummy_tst (COPY=english); CREATE TEXT SEARCH CONFIGURATION dummy_tst (COPY=english);
-- Overriden mapping change with duplicated tokens. -- Overridden mapping change with duplicated tokens.
ALTER TEXT SEARCH CONFIGURATION dummy_tst ALTER TEXT SEARCH CONFIGURATION dummy_tst
ALTER MAPPING FOR word, word WITH ispell; ALTER MAPPING FOR word, word WITH ispell;
-- Not a token supported by the configuration's parser, fails. -- Not a token supported by the configuration's parser, fails.

View File

@ -1257,7 +1257,7 @@ RESET enable_nestloop;
RESET enable_hashjoin; RESET enable_hashjoin;
DROP TABLE group_agg_pk; DROP TABLE group_agg_pk;
-- Test the case where the the ordering of scan matches the ordering within the -- Test the case where the ordering of the scan matches the ordering within the
-- aggregate but cannot be found in the group-by list -- aggregate but cannot be found in the group-by list
CREATE TABLE agg_sort_order (c1 int PRIMARY KEY, c2 int); CREATE TABLE agg_sort_order (c1 int PRIMARY KEY, c2 int);
CREATE UNIQUE INDEX agg_sort_order_c2_idx ON agg_sort_order(c2); CREATE UNIQUE INDEX agg_sort_order_c2_idx ON agg_sort_order(c2);

View File

@ -306,8 +306,8 @@ CREATE TABLE parted_si_p_odd PARTITION OF parted_si FOR VALUES IN (1);
-- Test that bulk relation extension handles reusing a single BulkInsertState -- Test that bulk relation extension handles reusing a single BulkInsertState
-- across partitions. Without the fix applied, this reliably reproduces -- across partitions. Without the fix applied, this reliably reproduces
-- #18130 unless shared_buffers is extremely small (preventing any use use of -- #18130 unless shared_buffers is extremely small (preventing any use of bulk
-- bulk relation extension). See -- relation extension). See
-- https://postgr.es/m/18130-7a86a7356a75209d%40postgresql.org -- https://postgr.es/m/18130-7a86a7356a75209d%40postgresql.org
-- https://postgr.es/m/257696.1695670946%40sss.pgh.pa.us -- https://postgr.es/m/257696.1695670946%40sss.pgh.pa.us
\set filename :abs_srcdir '/data/desc.data' \set filename :abs_srcdir '/data/desc.data'

View File

@ -1943,7 +1943,7 @@ INSERT INTO fkpart10.tbl1 VALUES (0), (1);
COMMIT; COMMIT;
-- test that cross-partition updates correctly enforces the foreign key -- test that cross-partition updates correctly enforces the foreign key
-- restriction (specifically testing INITIAILLY DEFERRED) -- restriction (specifically testing INITIALLY DEFERRED)
BEGIN; BEGIN;
UPDATE fkpart10.tbl1 SET f1 = 3 WHERE f1 = 0; UPDATE fkpart10.tbl1 SET f1 = 3 WHERE f1 = 0;
UPDATE fkpart10.tbl3 SET f1 = f1 * -1; UPDATE fkpart10.tbl3 SET f1 = f1 * -1;

View File

@ -603,10 +603,10 @@ ALTER TABLE rf_tbl_abcd_part_pk ATTACH PARTITION rf_tbl_abcd_part_pk_1 FOR VALUE
SET client_min_messages = 'ERROR'; SET client_min_messages = 'ERROR';
CREATE PUBLICATION testpub6 FOR TABLE rf_tbl_abcd_pk (a, b); CREATE PUBLICATION testpub6 FOR TABLE rf_tbl_abcd_pk (a, b);
RESET client_min_messages; RESET client_min_messages;
-- ok - (a,b) coverts all PK cols -- ok - (a,b) covers all PK cols
UPDATE rf_tbl_abcd_pk SET a = 1; UPDATE rf_tbl_abcd_pk SET a = 1;
ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a, b, c); ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a, b, c);
-- ok - (a,b,c) coverts all PK cols -- ok - (a,b,c) covers all PK cols
UPDATE rf_tbl_abcd_pk SET a = 1; UPDATE rf_tbl_abcd_pk SET a = 1;
ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a); ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a);
-- fail - "b" is missing from the column list -- fail - "b" is missing from the column list

View File

@ -254,7 +254,7 @@ CREATE TEXT SEARCH DICTIONARY tsdict_case
-- Test grammar for configurations -- Test grammar for configurations
CREATE TEXT SEARCH CONFIGURATION dummy_tst (COPY=english); CREATE TEXT SEARCH CONFIGURATION dummy_tst (COPY=english);
-- Overriden mapping change with duplicated tokens. -- Overridden mapping change with duplicated tokens.
ALTER TEXT SEARCH CONFIGURATION dummy_tst ALTER TEXT SEARCH CONFIGURATION dummy_tst
ALTER MAPPING FOR word, word WITH ispell; ALTER MAPPING FOR word, word WITH ispell;
-- Not a token supported by the configuration's parser, fails. -- Not a token supported by the configuration's parser, fails.

View File

@ -165,7 +165,7 @@ $node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
# When DROP SUBSCRIPTION tries to drop the tablesync slot, the slot may not # When DROP SUBSCRIPTION tries to drop the tablesync slot, the slot may not
# have been created, which causes the slot to be created after the DROP # have been created, which causes the slot to be created after the DROP
# SUSCRIPTION finishes. Such slots eventually get dropped at walsender exit # SUBSCRIPTION finishes. Such slots eventually get dropped at walsender exit
# time. So, to prevent being affected by such ephemeral tablesync slots, we # time. So, to prevent being affected by such ephemeral tablesync slots, we
# wait until all the slots have been cleaned. # wait until all the slots have been cleaned.
ok( $node_publisher->poll_query_until( ok( $node_publisher->poll_query_until(

View File

@ -271,7 +271,7 @@ is( $node_subscriber->safe_psql(
my $sub2_oid = $node_subscriber->safe_psql($db, my $sub2_oid = $node_subscriber->safe_psql($db,
qq(SELECT oid FROM pg_subscription WHERE subname = '$sub2_name')); qq(SELECT oid FROM pg_subscription WHERE subname = '$sub2_name'));
# Diassociate the subscription 2 from its replication slot and drop it # Disassociate the subscription 2 from its replication slot and drop it
$node_subscriber->safe_psql( $node_subscriber->safe_psql(
$db, $db,
qq( qq(