diff --git a/contrib/tablefunc/tablefunc.c b/contrib/tablefunc/tablefunc.c index 779bd4415e6..52b272f2989 100644 --- a/contrib/tablefunc/tablefunc.c +++ b/contrib/tablefunc/tablefunc.c @@ -1480,7 +1480,7 @@ validateConnectbyTupleDesc(TupleDesc td, bool show_branch, bool show_serial) "fifth column must be type %s", format_type_be(INT4OID)))); - /* check that the type of the fifth column is INT4 */ + /* check that the type of the fourth column is INT4 */ if (!show_branch && show_serial && TupleDescAttr(td, 3)->atttypid != INT4OID) ereport(ERROR, diff --git a/src/backend/commands/statscmds.c b/src/backend/commands/statscmds.c index afe6744e237..9938b65083f 100644 --- a/src/backend/commands/statscmds.c +++ b/src/backend/commands/statscmds.c @@ -213,14 +213,14 @@ CreateStatistics(CreateStatsStmt *stmt) * Convert the expression list to a simple array of attnums, but also keep * a list of more complex expressions. While at it, enforce some * constraints - we don't allow extended statistics on system attributes, - * and we require the data type to have less-than operator. + * and we require the data type to have a less-than operator. * - * There are many ways how to "mask" a simple attribute refenrece as an + * There are many ways to "mask" a simple attribute reference as an * expression, for example "(a+0)" etc. We can't possibly detect all of - * them, but we handle at least the simple case with attribute in parens. - * There'll always be a way around this, if the user is determined (like - * the "(a+0)" example), but this makes it somewhat consistent with how - * indexes treat attributes/expressions. + * them, but we handle at least the simple case with the attribute in + * parens. There'll always be a way around this, if the user is determined + * (like the "(a+0)" example), but this makes it somewhat consistent with + * how indexes treat attributes/expressions. */ foreach(cell, stmt->exprs) { diff --git a/src/backend/executor/nodeTableFuncscan.c b/src/backend/executor/nodeTableFuncscan.c index 27dfa1b9564..f9fafa9e5ba 100644 --- a/src/backend/executor/nodeTableFuncscan.c +++ b/src/backend/executor/nodeTableFuncscan.c @@ -14,11 +14,11 @@ */ /* * INTERFACE ROUTINES - * ExecTableFuncscan scans a function. + * ExecTableFuncScan scans a function. * ExecFunctionNext retrieve next tuple in sequential order. - * ExecInitTableFuncscan creates and initializes a TableFuncscan node. - * ExecEndTableFuncscan releases any storage allocated. - * ExecReScanTableFuncscan rescans the function + * ExecInitTableFuncScan creates and initializes a TableFuncscan node. + * ExecEndTableFuncScan releases any storage allocated. + * ExecReScanTableFuncScan rescans the function */ #include "postgres.h" @@ -46,7 +46,7 @@ static void tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext); /* ---------------------------------------------------------------- * TableFuncNext * - * This is a workhorse for ExecTableFuncscan + * This is a workhorse for ExecTableFuncScan * ---------------------------------------------------------------- */ static TupleTableSlot * @@ -84,7 +84,7 @@ TableFuncRecheck(TableFuncScanState *node, TupleTableSlot *slot) } /* ---------------------------------------------------------------- - * ExecTableFuncscan(node) + * ExecTableFuncScan(node) * * Scans the function sequentially and returns the next qualifying * tuple. @@ -103,7 +103,7 @@ ExecTableFuncScan(PlanState *pstate) } /* ---------------------------------------------------------------- - * ExecInitTableFuncscan + * ExecInitTableFuncScan * ---------------------------------------------------------------- */ TableFuncScanState * @@ -205,7 +205,7 @@ ExecInitTableFuncScan(TableFuncScan *node, EState *estate, int eflags) } /* ---------------------------------------------------------------- - * ExecEndTableFuncscan + * ExecEndTableFuncScan * * frees any storage allocated through C routines. * ---------------------------------------------------------------- @@ -234,7 +234,7 @@ ExecEndTableFuncScan(TableFuncScanState *node) } /* ---------------------------------------------------------------- - * ExecReScanTableFuncscan + * ExecReScanTableFuncScan * * Rescans the relation. * ---------------------------------------------------------------- diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c index cedb3848dde..e53d381e199 100644 --- a/src/backend/optimizer/util/pathnode.c +++ b/src/backend/optimizer/util/pathnode.c @@ -105,7 +105,7 @@ compare_path_costs(Path *path1, Path *path2, CostSelector criterion) } /* - * compare_path_fractional_costs + * compare_fractional_path_costs * Return -1, 0, or +1 according as path1 is cheaper, the same cost, * or more expensive than path2 for fetching the specified fraction * of the total tuples. diff --git a/src/backend/statistics/README b/src/backend/statistics/README index 7fda13e75be..13a97a35662 100644 --- a/src/backend/statistics/README +++ b/src/backend/statistics/README @@ -12,7 +12,7 @@ hopefully improving the estimates and producing better plans. Types of statistics ------------------- -There are currently two kinds of extended statistics: +There are currently several kinds of extended statistics: (a) ndistinct coefficients @@ -73,8 +73,8 @@ it will do if: When the above conditions are met, clauselist_selectivity() first attempts to pass the clause list off to the extended statistics selectivity estimation -function. This functions may not find any clauses which is can perform any -estimations on. In such cases these clauses are simply ignored. When actual +function. This function may not find any clauses which it can perform any +estimations on. In such cases, these clauses are simply ignored. When actual estimation work is performed in these functions they're expected to mark which clauses they've performed estimations for so that any other function performing estimations knows which clauses are to be skipped. diff --git a/src/backend/statistics/README.mcv b/src/backend/statistics/README.mcv index 8455b0d13f6..a918fb5634f 100644 --- a/src/backend/statistics/README.mcv +++ b/src/backend/statistics/README.mcv @@ -2,7 +2,7 @@ MCV lists ========= Multivariate MCV (most-common values) lists are a straightforward extension of -regular MCV list, tracking most frequent combinations of values for a group of +regular MCV lists, tracking most frequent combinations of values for a group of attributes. This works particularly well for columns with a small number of distinct values, @@ -18,7 +18,7 @@ Estimates of some clauses (e.g. equality) based on MCV lists are more accurate than when using histograms. Also, MCV lists don't necessarily require sorting of the values (the fact that -we use sorting when building them is implementation detail), but even more +we use sorting when building them is an implementation detail), but even more importantly the ordering is not built into the approximation (while histograms are built on ordering). So MCV lists work well even for attributes where the ordering of the data type is disconnected from the meaning of the data. For @@ -53,7 +53,7 @@ Hashed MCV (not yet implemented) Regular MCV lists have to include actual values for each item, so if those items are large the list may be quite large. This is especially true for multivariate MCV lists, although the current implementation partially mitigates this by -performing de-duplicating the values before storing them on disk. +de-duplicating the values before storing them on disk. It's possible to only store hashes (32-bit values) instead of the actual values, significantly reducing the space requirements. Obviously, this would only make @@ -77,7 +77,7 @@ to select the columns from pg_stats. The data is encoded as anyarrays, and all the items have the same data type, so anyarray provides a simple way to get a text representation. -With multivariate MCV lists the columns may use different data types, making +With multivariate MCV lists, the columns may use different data types, making it impossible to use anyarrays. It might be possible to produce a similar array-like representation, but that would complicate further processing and analysis of the MCV list. diff --git a/src/backend/statistics/extended_stats.c b/src/backend/statistics/extended_stats.c index 5fa36e0036e..4c352234577 100644 --- a/src/backend/statistics/extended_stats.c +++ b/src/backend/statistics/extended_stats.c @@ -699,11 +699,11 @@ examine_expression(Node *expr, int stattarget) } /* - * Using 'vacatts' of size 'nvacatts' as input data, return a newly built + * Using 'vacatts' of size 'nvacatts' as input data, return a newly-built * VacAttrStats array which includes only the items corresponding to - * attributes indicated by 'stxkeys'. If we don't have all of the per column - * stats available to compute the extended stats, then we return NULL to indicate - * to the caller that the stats should not be built. + * attributes indicated by 'attrs'. If we don't have all of the per-column + * stats available to compute the extended stats, then we return NULL to + * indicate to the caller that the stats should not be built. */ static VacAttrStats ** lookup_var_attr_stats(Relation rel, Bitmapset *attrs, List *exprs, diff --git a/src/common/pg_lzcompress.c b/src/common/pg_lzcompress.c index a30a2c2eb83..72e6a7ea61c 100644 --- a/src/common/pg_lzcompress.c +++ b/src/common/pg_lzcompress.c @@ -825,7 +825,7 @@ pglz_decompress(const char *source, int32 slen, char *dest, /* ---------- - * pglz_max_compressed_size - + * pglz_maximum_compressed_size - * * Calculate the maximum compressed size for a given amount of raw data. * Return the maximum size, or total compressed size if maximum size is