Fix typos and grammar in code comments and docs

Author: Alexander Lakhin
Discussion: https://postgr.es/m/f7e514cf-2446-21f1-a5d2-8c089a6e2168@gmail.com
This commit is contained in:
Michael Paquier 2024-09-03 14:49:04 +09:00
parent c7cd2d6ed0
commit 4236825197
19 changed files with 24 additions and 24 deletions

View File

@ -39,7 +39,7 @@ step "s2_get_changes_slot0" { SELECT data FROM pg_logical_slot_get_changes('slot
# serializes consistent snapshots to the disk at LSNs where are before # serializes consistent snapshots to the disk at LSNs where are before
# s0-transaction's commit. After s0-transaction commits, "s1_init" resumes but # s0-transaction's commit. After s0-transaction commits, "s1_init" resumes but
# must not restore any serialized snapshots and will reach the consistent state # must not restore any serialized snapshots and will reach the consistent state
# when decoding a RUNNING_XACT record generated after s0-transaction's commit. # when decoding a RUNNING_XACTS record generated after s0-transaction's commit.
# We check if the get_changes on 'slot1' will not return any s0-transaction's # We check if the get_changes on 'slot1' will not return any s0-transaction's
# changes as its confirmed_flush_lsn will be after the s0-transaction's commit # changes as its confirmed_flush_lsn will be after the s0-transaction's commit
# record. # record.

View File

@ -3891,7 +3891,7 @@ static const PgStat_KindInfo custom_stats = {
it with <literal>pgstat_register_kind</literal> and a unique ID used to it with <literal>pgstat_register_kind</literal> and a unique ID used to
store the entries related to this type of statistics: store the entries related to this type of statistics:
<programlisting> <programlisting>
extern PgStat_Kind pgstat_add_kind(PgStat_Kind kind, extern PgStat_Kind pgstat_register_kind(PgStat_Kind kind,
const PgStat_KindInfo *kind_info); const PgStat_KindInfo *kind_info);
</programlisting> </programlisting>
While developing a new extension, use While developing a new extension, use

View File

@ -2017,7 +2017,7 @@ check_multixact_offset_buffers(int *newval, void **extra, GucSource source)
} }
/* /*
* GUC check_hook for multixact_member_buffer * GUC check_hook for multixact_member_buffers
*/ */
bool bool
check_multixact_member_buffers(int *newval, void **extra, GucSource source) check_multixact_member_buffers(int *newval, void **extra, GucSource source)

View File

@ -382,7 +382,7 @@ RefreshMatViewByOid(Oid matviewOid, bool is_create, bool skipData,
* command tag is left false in cmdtaglist.h. Otherwise, the change of * command tag is left false in cmdtaglist.h. Otherwise, the change of
* completion tag output might break applications using it. * completion tag output might break applications using it.
* *
* When called from CREATE MATERIALIZED VIEW comand, the rowcount is * When called from CREATE MATERIALIZED VIEW command, the rowcount is
* displayed with the command tag CMDTAG_SELECT. * displayed with the command tag CMDTAG_SELECT.
*/ */
if (qc) if (qc)

View File

@ -369,7 +369,7 @@ pg_wal_replay_wait(PG_FUNCTION_ARGS)
*/ */
InvalidateCatalogSnapshot(); InvalidateCatalogSnapshot();
/* Give up if there is still an active or registered sanpshot. */ /* Give up if there is still an active or registered snapshot. */
if (GetOldestSnapshot()) if (GetOldestSnapshot())
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),

View File

@ -1101,7 +1101,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
EEO_CASE(EEOP_PARAM_SET) EEO_CASE(EEOP_PARAM_SET)
{ {
/* out of line, unlikely to matter performancewise */ /* out of line, unlikely to matter performance-wise */
ExecEvalParamSet(state, op, econtext); ExecEvalParamSet(state, op, econtext);
EEO_NEXT(); EEO_NEXT();
} }
@ -4762,7 +4762,7 @@ ExecEvalJsonCoercionFinish(ExprState *state, ExprEvalStep *op)
if (SOFT_ERROR_OCCURRED(&jsestate->escontext)) if (SOFT_ERROR_OCCURRED(&jsestate->escontext))
{ {
/* /*
* jsestate->error or jsetate->empty being set means that the error * jsestate->error or jsestate->empty being set means that the error
* occurred when coercing the JsonBehavior value. Throw the error in * occurred when coercing the JsonBehavior value. Throw the error in
* that case with the actual coercion error message shown in the * that case with the actual coercion error message shown in the
* DETAIL part. * DETAIL part.

View File

@ -2628,7 +2628,8 @@ CleanupBackend(Backend *bp,
BackgroundWorkerStopNotifications(bp->pid); BackgroundWorkerStopNotifications(bp->pid);
/* /*
* If it was a background worker, also update its RegisteredWorker entry. * If it was a background worker, also update its RegisteredBgWorker
* entry.
*/ */
if (bp->bkend_type == BACKEND_TYPE_BGWORKER) if (bp->bkend_type == BACKEND_TYPE_BGWORKER)
{ {

View File

@ -83,7 +83,7 @@
* this flag is set. Note that we don't need to reset this variable as after * this flag is set. Note that we don't need to reset this variable as after
* promotion the slot sync worker won't be restarted because the pmState * promotion the slot sync worker won't be restarted because the pmState
* changes to PM_RUN from PM_HOT_STANDBY and we don't support demoting * changes to PM_RUN from PM_HOT_STANDBY and we don't support demoting
* primary without restarting the server. See MaybeStartSlotSyncWorker. * primary without restarting the server. See LaunchMissingBackgroundProcesses.
* *
* The 'syncing' flag is needed to prevent concurrent slot syncs to avoid slot * The 'syncing' flag is needed to prevent concurrent slot syncs to avoid slot
* overwrites. * overwrites.

View File

@ -449,7 +449,7 @@ read_stream_begin_impl(int flags,
queue_size = max_pinned_buffers + 1; queue_size = max_pinned_buffers + 1;
/* /*
* Allocate the object, the buffers, the ios and per_data_data space in * Allocate the object, the buffers, the ios and per_buffer_data space in
* one big chunk. Though we have queue_size buffers, we want to be able * one big chunk. Though we have queue_size buffers, we want to be able
* to assume that all the buffers for a single read are contiguous (i.e. * to assume that all the buffers for a single read are contiguous (i.e.
* don't wrap around halfway through), so we allow temporary overflows of * don't wrap around halfway through), so we allow temporary overflows of

View File

@ -396,7 +396,7 @@ LockManagerShmemInit(void)
/* /*
* Compute init/max size to request for lock hashtables. Note these * Compute init/max size to request for lock hashtables. Note these
* calculations must agree with LockShmemSize! * calculations must agree with LockManagerShmemSize!
*/ */
max_table_size = NLOCKENTS(); max_table_size = NLOCKENTS();
init_table_size = max_table_size / 2; init_table_size = max_table_size / 2;

View File

@ -367,9 +367,9 @@ lookup_type_cache(Oid type_id, int flags)
ctl.entrysize = sizeof(TypeCacheEntry); ctl.entrysize = sizeof(TypeCacheEntry);
/* /*
* TypeEntry takes hash value from the system cache. For TypeCacheHash * TypeCacheEntry takes hash value from the system cache. For
* we use the same hash in order to speedup search by hash value. This * TypeCacheHash we use the same hash in order to speedup search by
* is used by hash_seq_init_with_hash_value(). * hash value. This is used by hash_seq_init_with_hash_value().
*/ */
ctl.hash = type_cache_syshash; ctl.hash = type_cache_syshash;

View File

@ -54,7 +54,7 @@ recovery_target_action = 'pause'
EOM EOM
$node2->start(); $node2->start();
# Wait until recoveery pauses, then promote. # Wait until recovery pauses, then promote.
$node2->poll_query_until('postgres', "SELECT pg_get_wal_replay_pause_state() = 'paused';"); $node2->poll_query_until('postgres', "SELECT pg_get_wal_replay_pause_state() = 'paused';");
$node2->safe_psql('postgres', "SELECT pg_promote()"); $node2->safe_psql('postgres', "SELECT pg_promote()");
@ -65,7 +65,7 @@ INSERT INTO mytable VALUES (2, 'blackberry');
EOM EOM
# Now take an incremental backup. If WAL summarization didn't follow the # Now take an incremental backup. If WAL summarization didn't follow the
# timeline cange correctly, something should break at this point. # timeline change correctly, something should break at this point.
my $backup2path = $node1->backup_dir . '/backup2'; my $backup2path = $node1->backup_dir . '/backup2';
$node2->command_ok( $node2->command_ok(
[ 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast', [ 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',

View File

@ -1715,7 +1715,7 @@ ExecQueryAndProcessResults(const char *query,
{ {
/* /*
* Display the current chunk of results, unless the output * Display the current chunk of results, unless the output
* stream stopped working or we got cancelled. We skip use of * stream stopped working or we got canceled. We skip use of
* PrintQueryResult and go directly to printQuery, so that we * PrintQueryResult and go directly to printQuery, so that we
* can pass the correct is_pager value and because we don't * can pass the correct is_pager value and because we don't
* want PrintQueryStatus to happen yet. Above, we rejected * want PrintQueryStatus to happen yet. Above, we rejected

View File

@ -13,7 +13,7 @@
* taken here is less flexible, because a writer can only write to a file, * taken here is less flexible, because a writer can only write to a file,
* while a compressor can write to a subsequent astreamer which is free * while a compressor can write to a subsequent astreamer which is free
* to do whatever it likes. The reason it's like this is because this * to do whatever it likes. The reason it's like this is because this
* code was adapated from old, less-modular pg_basebackup code that used * code was adapted from old, less-modular pg_basebackup code that used
* the same APIs that astreamer_gzip_writer now uses, and it didn't seem * the same APIs that astreamer_gzip_writer now uses, and it didn't seem
* necessary to change anything at the time. * necessary to change anything at the time.
* *

View File

@ -66,7 +66,6 @@ extern ReadStream *read_stream_begin_smgr_relation(int flags,
ReadStreamBlockNumberCB callback, ReadStreamBlockNumberCB callback,
void *callback_private_data, void *callback_private_data,
size_t per_buffer_data_size); size_t per_buffer_data_size);
extern Buffer read_stream_next_buffer(ReadStream *stream, void **per_buffer_private);
extern void read_stream_reset(ReadStream *stream); extern void read_stream_reset(ReadStream *stream);
extern void read_stream_end(ReadStream *stream); extern void read_stream_end(ReadStream *stream);

View File

@ -12,7 +12,7 @@
#define INJECTION_POINT_H #define INJECTION_POINT_H
/* /*
* Injections points require --enable-injection-points. * Injection points require --enable-injection-points.
*/ */
#ifdef USE_INJECTION_POINTS #ifdef USE_INJECTION_POINTS
#define INJECTION_POINT_LOAD(name) InjectionPointLoad(name) #define INJECTION_POINT_LOAD(name) InjectionPointLoad(name)

View File

@ -948,7 +948,7 @@ fillPGconn(PGconn *conn, PQconninfoOption *connOptions)
* Copy over option values from srcConn to dstConn * Copy over option values from srcConn to dstConn
* *
* Don't put anything cute here --- intelligence should be in * Don't put anything cute here --- intelligence should be in
* connectOptions2 ... * pqConnectOptions2 ...
* *
* Returns true on success. On failure, returns false and sets error message of * Returns true on success. On failure, returns false and sets error message of
* dstConn. * dstConn.

View File

@ -67,7 +67,7 @@ like(
my $offset = -s $node->logfile; my $offset = -s $node->logfile;
# Role with pg_signal_autovacuum can terminate autovacuum worker. # Role with pg_signal_autovacuum_worker can terminate autovacuum worker.
my $terminate_with_pg_signal_av = $node->psql( my $terminate_with_pg_signal_av = $node->psql(
'postgres', qq( 'postgres', qq(
SET ROLE regress_worker_role; SET ROLE regress_worker_role;

View File

@ -76,7 +76,7 @@ $node_publisher->safe_psql(
INSERT INTO tab_full VALUES (11); INSERT INTO tab_full VALUES (11);
PREPARE TRANSACTION 'test_prepared_tab_full';"); PREPARE TRANSACTION 'test_prepared_tab_full';");
# Confirm the ERROR is reported becasue max_prepared_transactions is zero # Confirm the ERROR is reported because max_prepared_transactions is zero
$node_subscriber->wait_for_log( $node_subscriber->wait_for_log(
qr/ERROR: ( [A-Z0-9]+:)? prepared transactions are disabled/); qr/ERROR: ( [A-Z0-9]+:)? prepared transactions are disabled/);