Fix inconsistencies and typos in the tree
This is numbered take 7, and addresses a set of issues around: - Fixes for typos and incorrect reference names. - Removal of unneeded comments. - Removal of unreferenced functions and structures. - Fixes regarding variable name consistency. Author: Alexander Lakhin Discussion: https://postgr.es/m/10bfd4ac-3e7c-40ab-2b2e-355ed15495e8@gmail.com
This commit is contained in:
parent
4c3d05d875
commit
0896ae561b
@ -100,7 +100,7 @@ bool g_cube_leaf_consistent(NDBOX *key, NDBOX *query, StrategyNumber strategy);
|
||||
bool g_cube_internal_consistent(NDBOX *key, NDBOX *query, StrategyNumber strategy);
|
||||
|
||||
/*
|
||||
** Auxiliary funxtions
|
||||
** Auxiliary functions
|
||||
*/
|
||||
static double distance_1D(double a1, double a2, double b1, double b2);
|
||||
static bool cube_is_point_internal(NDBOX *cube);
|
||||
|
@ -85,12 +85,6 @@ typedef struct
|
||||
|
||||
#define GETSIGN(x) ( (BITVECP)( (char*)x+GTHDRSIZE ) )
|
||||
|
||||
/*
|
||||
* types for functions
|
||||
*/
|
||||
typedef ArrayType *(*formarray) (ArrayType *, ArrayType *);
|
||||
typedef void (*formfloat) (ArrayType *, float *);
|
||||
|
||||
/*
|
||||
* useful functions
|
||||
*/
|
||||
|
@ -368,7 +368,7 @@ queryin(char *buf)
|
||||
state.str = tmp;
|
||||
}
|
||||
|
||||
/* set user friendly-operand view */
|
||||
/* set user-friendly operand view */
|
||||
memcpy((void *) GETOPERAND(query), (void *) state.op, state.sumlen);
|
||||
pfree(state.op);
|
||||
|
||||
|
@ -727,7 +727,7 @@ pgstathashindex(PG_FUNCTION_ARGS)
|
||||
}
|
||||
|
||||
/* -------------------------------------------------
|
||||
* GetHashPageStatis()
|
||||
* GetHashPageStats()
|
||||
*
|
||||
* Collect statistics of single hash page
|
||||
* -------------------------------------------------
|
||||
|
@ -922,7 +922,7 @@ my_fetch(PG_FUNCTION_ARGS)
|
||||
* Convert 'fetched_data' into the a Datum of the original datatype.
|
||||
*/
|
||||
|
||||
/* fill *retval from fetch_data. */
|
||||
/* fill *retval from fetched_data. */
|
||||
gistentryinit(*retval, PointerGetDatum(converted_datum),
|
||||
entry->rel, entry->page, entry->offset, FALSE);
|
||||
|
||||
|
@ -163,7 +163,7 @@ algorithms.
|
||||
|
||||
* The posting list can be accessed with GinGetPosting(itup)
|
||||
|
||||
* If GinITupIsCompressed(itup), the posting list is stored in compressed
|
||||
* If GinItupIsCompressed(itup), the posting list is stored in compressed
|
||||
format. Otherwise it is just an array of ItemPointers. New tuples are always
|
||||
stored in compressed format, uncompressed items can be present if the
|
||||
database was migrated from 9.3 or earlier version.
|
||||
|
@ -1013,7 +1013,7 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
|
||||
|
||||
/*
|
||||
* As pending list pages can have a high churn rate, it is desirable to
|
||||
* recycle them immediately to the FreeSpace Map when ordinary backends
|
||||
* recycle them immediately to the FreeSpaceMap when ordinary backends
|
||||
* clean the list.
|
||||
*/
|
||||
if (fsm_vac && fill_fsm)
|
||||
|
@ -170,7 +170,7 @@ it splits the page, and constructs the new downlink tuples for the split
|
||||
pages. The caller must then call gistplacetopage() on the parent page to
|
||||
insert the downlink tuples. The parent page that holds the downlink to
|
||||
the child might have migrated as a result of concurrent splits of the
|
||||
parent, gistfindCorrectParent() is used to find the parent page.
|
||||
parent, gistFindCorrectParent() is used to find the parent page.
|
||||
|
||||
Splitting the root page works slightly differently. At root split,
|
||||
gistplacetopage() allocates the new child pages and replaces the old root
|
||||
|
@ -821,7 +821,7 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace,
|
||||
/*
|
||||
* Leaf page. Insert the new key. We've already updated all the
|
||||
* parents on the way down, but we might have to split the page if
|
||||
* it doesn't fit. gistinserthere() will take care of that.
|
||||
* it doesn't fit. gistinserttuple() will take care of that.
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -457,7 +457,7 @@ right sibling's left-link --- followed by a second WAL entry for the
|
||||
insertion on the parent level (which might itself be a page split, requiring
|
||||
an additional insertion above that, etc).
|
||||
|
||||
For a root split, the followon WAL entry is a "new root" entry rather than
|
||||
For a root split, the follow-on WAL entry is a "new root" entry rather than
|
||||
an "insertion" entry, but details are otherwise much the same.
|
||||
|
||||
Because splitting involves multiple atomic actions, it's possible that the
|
||||
|
@ -146,11 +146,6 @@ resetSpGistScanOpaque(SpGistScanOpaque so)
|
||||
{
|
||||
MemoryContext oldCtx;
|
||||
|
||||
/*
|
||||
* clear traversal context before proceeding to the next scan; this must
|
||||
* not happen before the freeScanStack above, else we get double-free
|
||||
* crashes.
|
||||
*/
|
||||
MemoryContextReset(so->traversalCxt);
|
||||
|
||||
oldCtx = MemoryContextSwitchTo(so->traversalCxt);
|
||||
|
@ -155,7 +155,7 @@ static void TransactionIdSetPageStatusInternal(TransactionId xid, int nsubxids,
|
||||
* NB: this is a low-level routine and is NOT the preferred entry point
|
||||
* for most uses; functions in transam.c are the intended callers.
|
||||
*
|
||||
* XXX Think about issuing FADVISE_WILLNEED on pages that we will need,
|
||||
* XXX Think about issuing POSIX_FADV_WILLNEED on pages that we will need,
|
||||
* but aren't yet in cache, as well as hinting pages not to fall out of
|
||||
* cache yet.
|
||||
*/
|
||||
|
@ -716,7 +716,7 @@ DefineAttr(char *name, char *type, int attnum, int nullness)
|
||||
|
||||
namestrcpy(&attrtypes[attnum]->attname, name);
|
||||
elog(DEBUG4, "column %s %s", NameStr(attrtypes[attnum]->attname), type);
|
||||
attrtypes[attnum]->attnum = attnum + 1; /* fillatt */
|
||||
attrtypes[attnum]->attnum = attnum + 1;
|
||||
|
||||
typeoid = gettype(type);
|
||||
|
||||
|
@ -2399,7 +2399,7 @@ TSParserIsVisible(Oid prsId)
|
||||
/*
|
||||
* get_ts_dict_oid - find a TS dictionary by possibly qualified name
|
||||
*
|
||||
* If not found, returns InvalidOid if failOK, else throws error
|
||||
* If not found, returns InvalidOid if missing_ok, else throws error
|
||||
*/
|
||||
Oid
|
||||
get_ts_dict_oid(List *names, bool missing_ok)
|
||||
|
@ -882,7 +882,7 @@ process_ordered_aggregate_multi(AggState *aggstate,
|
||||
* This function handles only one grouping set (already set in
|
||||
* aggstate->current_set).
|
||||
*
|
||||
* The finalfunction will be run, and the result delivered, in the
|
||||
* The finalfn will be run, and the result delivered, in the
|
||||
* output-tuple context; caller's CurrentMemoryContext does not matter.
|
||||
*
|
||||
* The finalfn uses the state as set in the transno. This also might be
|
||||
|
@ -362,8 +362,8 @@ recurse_set_operations(Node *setOp, PlannerInfo *root,
|
||||
* fix_upper_expr() to the Result node's tlist. This would fail if the
|
||||
* Vars generated by generate_setop_tlist() were not exactly equal()
|
||||
* to the corresponding tlist entries of the subplan. However, since
|
||||
* the subplan was generated by generate_union_plan() or
|
||||
* generate_nonunion_plan(), and hence its tlist was generated by
|
||||
* the subplan was generated by generate_union_paths() or
|
||||
* generate_nonunion_paths(), and hence its tlist was generated by
|
||||
* generate_append_tlist(), this will work. We just tell
|
||||
* generate_setop_tlist() to use varno 0.
|
||||
*/
|
||||
|
@ -58,7 +58,7 @@ static int check_agg_arguments(ParseState *pstate,
|
||||
static bool check_agg_arguments_walker(Node *node,
|
||||
check_agg_arguments_context *context);
|
||||
static void check_ungrouped_columns(Node *node, ParseState *pstate, Query *qry,
|
||||
List *groupClauses, List *groupClauseVars,
|
||||
List *groupClauses, List *groupClauseCommonVars,
|
||||
bool have_non_var_grouping,
|
||||
List **func_grouped_rels);
|
||||
static bool check_ungrouped_columns_walker(Node *node,
|
||||
|
@ -1208,7 +1208,7 @@ replace_rte_variables_mutator(Node *node,
|
||||
* a ConvertRowtypeExpr to map back to the rowtype expected by the expression.
|
||||
* (Therefore, to_rowtype had better be a child rowtype of the rowtype of the
|
||||
* RTE we're changing references to.) Callers that don't provide to_rowtype
|
||||
* should report an error if *found_row_type is true; we don't do that here
|
||||
* should report an error if *found_whole_row is true; we don't do that here
|
||||
* because we don't know exactly what wording for the error message would
|
||||
* be most appropriate. The caller will be aware of the context.
|
||||
*
|
||||
|
@ -438,7 +438,8 @@ static void PinBuffer_Locked(BufferDesc *buf);
|
||||
static void UnpinBuffer(BufferDesc *buf, bool fixOwner);
|
||||
static void BufferSync(int flags);
|
||||
static uint32 WaitBufHdrUnlocked(BufferDesc *buf);
|
||||
static int SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *flush_context);
|
||||
static int SyncOneBuffer(int buf_id, bool skip_recently_used,
|
||||
WritebackContext *wb_context);
|
||||
static void WaitIO(BufferDesc *buf);
|
||||
static bool StartBufferIO(BufferDesc *buf, bool forInput);
|
||||
static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty,
|
||||
@ -2346,7 +2347,7 @@ BgBufferSync(WritebackContext *wb_context)
|
||||
* BUF_REUSABLE: buffer is available for replacement, ie, it has
|
||||
* pin count 0 and usage count 0.
|
||||
*
|
||||
* (BUF_WRITTEN could be set in error if FlushBuffers finds the buffer clean
|
||||
* (BUF_WRITTEN could be set in error if FlushBuffer finds the buffer clean
|
||||
* after locking it, but we don't care all that much.)
|
||||
*
|
||||
* Note: caller must have done ResourceOwnerEnlargeBuffers.
|
||||
|
@ -664,7 +664,7 @@ BufFileSeek(BufFile *file, int fileno, off_t offset, int whence)
|
||||
|
||||
/*
|
||||
* Relative seek considers only the signed offset, ignoring
|
||||
* fileno. Note that large offsets (> 1 gig) risk overflow in this
|
||||
* fileno. Note that large offsets (> 1 GB) risk overflow in this
|
||||
* add, unless we have 64-bit off_t.
|
||||
*/
|
||||
newFile = file->curFile;
|
||||
|
@ -897,7 +897,7 @@ count_usable_fds(int max_to_probe, int *usable_fds, int *already_open)
|
||||
|
||||
/*
|
||||
* set_max_safe_fds
|
||||
* Determine number of filedescriptors that fd.c is allowed to use
|
||||
* Determine number of file descriptors that fd.c is allowed to use
|
||||
*/
|
||||
void
|
||||
set_max_safe_fds(void)
|
||||
|
@ -223,7 +223,7 @@ XLogRecordPageWithFreeSpace(RelFileNode rnode, BlockNumber heapBlk,
|
||||
}
|
||||
|
||||
/*
|
||||
* GetRecordedFreePage - return the amount of free space on a particular page,
|
||||
* GetRecordedFreeSpace - return the amount of free space on a particular page,
|
||||
* according to the FSM.
|
||||
*/
|
||||
Size
|
||||
@ -417,7 +417,7 @@ fsm_space_cat_to_avail(uint8 cat)
|
||||
|
||||
/*
|
||||
* Which category does a page need to have, to accommodate x bytes of data?
|
||||
* While fsm_size_to_avail_cat() rounds down, this needs to round up.
|
||||
* While fsm_space_avail_to_cat() rounds down, this needs to round up.
|
||||
*/
|
||||
static uint8
|
||||
fsm_space_needed_to_cat(Size needed)
|
||||
|
@ -216,9 +216,9 @@ static PROCLOCK *FastPathGetRelationLockEntry(LOCALLOCK *locallock);
|
||||
|
||||
/*
|
||||
* To make the fast-path lock mechanism work, we must have some way of
|
||||
* preventing the use of the fast-path when a conflicting lock might be
|
||||
* present. We partition* the locktag space into FAST_PATH_HASH_BUCKETS
|
||||
* partitions, and maintain an integer count of the number of "strong" lockers
|
||||
* preventing the use of the fast-path when a conflicting lock might be present.
|
||||
* We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
|
||||
* and maintain an integer count of the number of "strong" lockers
|
||||
* in each partition. When any "strong" lockers are present (which is
|
||||
* hopefully not very often), the fast-path mechanism can't be used, and we
|
||||
* must fall back to the slower method of pushing matching locks directly
|
||||
@ -2709,7 +2709,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
|
||||
}
|
||||
|
||||
/*
|
||||
* FastPathGetLockEntry
|
||||
* FastPathGetRelationLockEntry
|
||||
* Return the PROCLOCK for a lock originally taken via the fast-path,
|
||||
* transferring it to the primary lock table if necessary.
|
||||
*
|
||||
@ -2896,8 +2896,8 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
|
||||
* the lock, then we needn't examine the individual relation IDs
|
||||
* at all; none of them can be relevant.
|
||||
*
|
||||
* See FastPathTransferLocks() for discussion of why we do this
|
||||
* test after acquiring the lock.
|
||||
* See FastPathTransferRelationLocks() for discussion of why we do
|
||||
* this test after acquiring the lock.
|
||||
*/
|
||||
if (proc->databaseId != locktag->locktag_field1)
|
||||
{
|
||||
|
@ -3405,8 +3405,8 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
|
||||
*
|
||||
* If this value is changing, we don't care that much whether we get the
|
||||
* old or new value -- it is just used to determine how far
|
||||
* GlobalSerializableXmin must advance before this transaction can be
|
||||
* fully cleaned up. The worst that could happen is we wait for one more
|
||||
* SxactGlobalXmin must advance before this transaction can be fully
|
||||
* cleaned up. The worst that could happen is we wait for one more
|
||||
* transaction to complete before freeing some RAM; correctness of visible
|
||||
* behavior is not affected.
|
||||
*/
|
||||
@ -4820,7 +4820,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
|
||||
*
|
||||
* If a dangerous structure is found, the pivot (the near conflict) is
|
||||
* marked for death, because rolling back another transaction might mean
|
||||
* that we flail without ever making progress. This transaction is
|
||||
* that we fail without ever making progress. This transaction is
|
||||
* committing writes, so letting it commit ensures progress. If we
|
||||
* canceled the far conflict, it might immediately fail again on retry.
|
||||
*/
|
||||
|
@ -282,8 +282,6 @@ static const char *const numth[] = {"st", "nd", "rd", "th", NULL};
|
||||
#define ALL_UPPER 2 /* NAME */
|
||||
#define ALL_LOWER 3 /* name */
|
||||
|
||||
#define FULL_SIZ 0
|
||||
|
||||
#define MAX_MONTH_LEN 9
|
||||
#define MAX_MON_LEN 3
|
||||
#define MAX_DAY_LEN 9
|
||||
|
@ -146,7 +146,7 @@ emsgsize:
|
||||
|
||||
/*
|
||||
* static char *
|
||||
* inet_cidr_ntop_ipv6(src, bits, fakebits, dst, size)
|
||||
* inet_cidr_ntop_ipv6(src, bits, dst, size)
|
||||
* convert IPv6 network number from network to presentation format.
|
||||
* generates CIDR style result always. Picks the shortest representation
|
||||
* unless the IP is really IPv4.
|
||||
|
@ -474,7 +474,7 @@ static char *flatten_reloptions(Oid relid);
|
||||
|
||||
|
||||
/* ----------
|
||||
* get_ruledef - Do it all and return a text
|
||||
* pg_get_ruledef - Do it all and return a text
|
||||
* that could be used as a statement
|
||||
* to recreate the rule
|
||||
* ----------
|
||||
@ -594,7 +594,7 @@ pg_get_ruledef_worker(Oid ruleoid, int prettyFlags)
|
||||
|
||||
|
||||
/* ----------
|
||||
* get_viewdef - Mainly the same thing, but we
|
||||
* pg_get_viewdef - Mainly the same thing, but we
|
||||
* only return the SELECT part of a view
|
||||
* ----------
|
||||
*/
|
||||
@ -789,7 +789,7 @@ pg_get_viewdef_worker(Oid viewoid, int prettyFlags, int wrapColumn)
|
||||
}
|
||||
|
||||
/* ----------
|
||||
* get_triggerdef - Get the definition of a trigger
|
||||
* pg_get_triggerdef - Get the definition of a trigger
|
||||
* ----------
|
||||
*/
|
||||
Datum
|
||||
@ -1083,7 +1083,7 @@ pg_get_triggerdef_worker(Oid trigid, bool pretty)
|
||||
}
|
||||
|
||||
/* ----------
|
||||
* get_indexdef - Get the definition of an index
|
||||
* pg_get_indexdef - Get the definition of an index
|
||||
*
|
||||
* In the extended version, there is a colno argument as well as pretty bool.
|
||||
* if colno == 0, we want a complete index definition.
|
||||
@ -2342,7 +2342,7 @@ decompile_column_index_array(Datum column_index_array, Oid relId,
|
||||
|
||||
|
||||
/* ----------
|
||||
* get_expr - Decompile an expression tree
|
||||
* pg_get_expr - Decompile an expression tree
|
||||
*
|
||||
* Input: an expression tree in nodeToString form, and a relation OID
|
||||
*
|
||||
@ -2440,7 +2440,7 @@ pg_get_expr_worker(text *expr, Oid relid, const char *relname, int prettyFlags)
|
||||
|
||||
|
||||
/* ----------
|
||||
* get_userbyid - Get a user name by roleid and
|
||||
* pg_get_userbyid - Get a user name by roleid and
|
||||
* fallback to 'unknown (OID=n)'
|
||||
* ----------
|
||||
*/
|
||||
@ -6811,8 +6811,8 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
|
||||
|
||||
/*
|
||||
* Deparse a Var which references OUTER_VAR, INNER_VAR, or INDEX_VAR. This
|
||||
* routine is actually a callback for get_special_varno, which handles finding
|
||||
* the correct TargetEntry. We get the expression contained in that
|
||||
* routine is actually a callback for resolve_special_varno, which handles
|
||||
* finding the correct TargetEntry. We get the expression contained in that
|
||||
* TargetEntry and just need to deparse it, a job we can throw back on
|
||||
* get_rule_expr.
|
||||
*/
|
||||
@ -11254,7 +11254,7 @@ flatten_reloptions(Oid relid)
|
||||
}
|
||||
|
||||
/*
|
||||
* get_one_range_partition_bound_string
|
||||
* get_range_partbound_string
|
||||
* A C string representation of one range partition bound
|
||||
*/
|
||||
char *
|
||||
|
@ -2235,8 +2235,8 @@ check_for_freed_segments(dsa_area *area)
|
||||
|
||||
/*
|
||||
* Any other process that has freed a segment has incremented
|
||||
* free_segment_counter while holding an LWLock, and that must precede any
|
||||
* backend creating a new segment in the same slot while holding an
|
||||
* freed_segment_counter while holding an LWLock, and that must precede
|
||||
* any backend creating a new segment in the same slot while holding an
|
||||
* LWLock, and that must precede the creation of any dsa_pointer pointing
|
||||
* into the new segment which might reach us here, and the caller must
|
||||
* have sent the dsa_pointer to this process using appropriate memory
|
||||
|
@ -6772,9 +6772,7 @@ getInherits(Archive *fout, int *numInherits)
|
||||
|
||||
/*
|
||||
* Find all the inheritance information, excluding implicit inheritance
|
||||
* via partitioning. We handle that case using getPartitions(), because
|
||||
* we want more information about partitions than just the parent-child
|
||||
* relationship.
|
||||
* via partitioning.
|
||||
*/
|
||||
appendPQExpBufferStr(query, "SELECT inhrelid, inhparent FROM pg_inherits");
|
||||
|
||||
|
@ -637,9 +637,6 @@ typedef struct _extensionMemberId
|
||||
ExtensionInfo *ext; /* owning extension */
|
||||
} ExtensionMemberId;
|
||||
|
||||
/* global decls */
|
||||
extern bool force_quotes; /* double-quotes for identifiers flag */
|
||||
|
||||
/* placeholders for comment starting and ending delimiters */
|
||||
extern char g_comment_start[10];
|
||||
extern char g_comment_end[10];
|
||||
|
@ -264,7 +264,7 @@ prepare_new_cluster(void)
|
||||
{
|
||||
/*
|
||||
* It would make more sense to freeze after loading the schema, but that
|
||||
* would cause us to lose the frozenids restored by the load. We use
|
||||
* would cause us to lose the frozenxids restored by the load. We use
|
||||
* --analyze so autovacuum doesn't update statistics later
|
||||
*/
|
||||
prep_status("Analyzing all rows in the new cluster");
|
||||
|
@ -400,7 +400,7 @@ extern void freeGISTstate(GISTSTATE *giststate);
|
||||
extern void gistdoinsert(Relation r,
|
||||
IndexTuple itup,
|
||||
Size freespace,
|
||||
GISTSTATE *GISTstate,
|
||||
GISTSTATE *giststate,
|
||||
Relation heapRel,
|
||||
bool is_build);
|
||||
|
||||
|
@ -323,7 +323,7 @@ extern bool RestoreArchivedFile(char *path, const char *xlogfname,
|
||||
const char *recovername, off_t expectedSize,
|
||||
bool cleanupEnabled);
|
||||
extern void ExecuteRecoveryCommand(const char *command, const char *commandName,
|
||||
bool failOnerror);
|
||||
bool failOnSignal);
|
||||
extern void KeepFileRestoredFromArchive(const char *path, const char *xlogfname);
|
||||
extern void XLogArchiveNotify(const char *xlog);
|
||||
extern void XLogArchiveNotifySeg(XLogSegNo segno);
|
||||
|
@ -21,17 +21,6 @@
|
||||
(OidIsValid(userid) ? GetUserNameFromId(userid, false) : "public")
|
||||
|
||||
|
||||
/*
|
||||
* Generic option types for validation.
|
||||
* NB! These are treated as flags, so use only powers of two here.
|
||||
*/
|
||||
typedef enum
|
||||
{
|
||||
ServerOpt = 1, /* options applicable to SERVER */
|
||||
UserMappingOpt = 2, /* options for USER MAPPING */
|
||||
FdwOpt = 4 /* options for FOREIGN DATA WRAPPER */
|
||||
} GenericOptionFlags;
|
||||
|
||||
typedef struct ForeignDataWrapper
|
||||
{
|
||||
Oid fdwid; /* FDW Oid */
|
||||
|
@ -52,7 +52,6 @@ extern void InitProcessGlobals(void);
|
||||
|
||||
extern int MaxLivePostmasterChildren(void);
|
||||
|
||||
extern int GetNumShmemAttachedBgworkers(void);
|
||||
extern bool PostmasterMarkPIDForWorkerNotify(int);
|
||||
|
||||
#ifdef EXEC_BACKEND
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* fsm_internal.h
|
||||
* fsm_internals.h
|
||||
* internal functions for free space map
|
||||
*
|
||||
*
|
||||
|
@ -52,7 +52,7 @@ typedef uint64 SerCommitSeqNo;
|
||||
*
|
||||
* Eligibility for cleanup of committed transactions is generally determined
|
||||
* by comparing the transaction's finishedBefore field to
|
||||
* SerializableGlobalXmin.
|
||||
* SxactGlobalXmin.
|
||||
*/
|
||||
typedef struct SERIALIZABLEXACT
|
||||
{
|
||||
|
@ -811,7 +811,7 @@ rfmtlong(long lng_val, const char *fmt, char *outbuf)
|
||||
/* and fill the temp-string wit '0's up to there. */
|
||||
dotpos = getRightMostDot(fmt);
|
||||
|
||||
/* start to parse the formatstring */
|
||||
/* start to parse the format-string */
|
||||
temp[0] = '\0';
|
||||
k = value.digits - 1; /* position in the value_string */
|
||||
for (i = fmt_len - 1, j = 0; i >= 0; i--, j++)
|
||||
|
@ -172,7 +172,7 @@ static const datetkn datetktbl[] = {
|
||||
ghst
|
||||
#endif
|
||||
{"gilt", TZ, 43200}, /* Gilbert Islands Time */
|
||||
{"gmt", TZ, 0}, /* Greenwish Mean Time */
|
||||
{"gmt", TZ, 0}, /* Greenwich Mean Time */
|
||||
{"gst", TZ, 36000}, /* Guam Std Time, USSR Zone 9 */
|
||||
{"gyt", TZ, -14400}, /* Guyana Time */
|
||||
{"h", UNITS, DTK_HOUR}, /* "hour" */
|
||||
|
@ -83,7 +83,7 @@ pqGetpwuid(uid_t uid, struct passwd *resultbuf, char *buffer,
|
||||
/*
|
||||
* Wrapper around gethostbyname() or gethostbyname_r() to mimic
|
||||
* POSIX gethostbyname_r() behaviour, if it is not available or required.
|
||||
* This function is called _only_ by our getaddinfo() portability function.
|
||||
* This function is called _only_ by our getaddrinfo() portability function.
|
||||
*/
|
||||
#ifndef HAVE_GETADDRINFO
|
||||
int
|
||||
|
@ -430,7 +430,7 @@ sub command_exit_is
|
||||
# header file). IPC::Run's result function always returns exit code >> 8,
|
||||
# assuming the Unix convention, which will always return 0 on Windows as
|
||||
# long as the process was not terminated by an exception. To work around
|
||||
# that, use $h->full_result on Windows instead.
|
||||
# that, use $h->full_results on Windows instead.
|
||||
my $result =
|
||||
($Config{osname} eq "MSWin32")
|
||||
? ($h->full_results)[0]
|
||||
|
Loading…
x
Reference in New Issue
Block a user