mirror of
https://github.com/postgres/postgres.git
synced 2025-07-02 09:02:37 +03:00
Run pgindent on 9.2 source tree in preparation for first 9.3
commit-fest.
This commit is contained in:
@ -513,7 +513,7 @@ cclass(struct vars * v, /* context */
|
||||
{
|
||||
size_t len;
|
||||
struct cvec *cv = NULL;
|
||||
const char * const *namePtr;
|
||||
const char *const * namePtr;
|
||||
int i,
|
||||
index;
|
||||
|
||||
@ -521,7 +521,7 @@ cclass(struct vars * v, /* context */
|
||||
* The following arrays define the valid character class names.
|
||||
*/
|
||||
|
||||
static const char * const classNames[] = {
|
||||
static const char *const classNames[] = {
|
||||
"alnum", "alpha", "ascii", "blank", "cntrl", "digit", "graph",
|
||||
"lower", "print", "punct", "space", "upper", "xdigit", NULL
|
||||
};
|
||||
@ -562,8 +562,8 @@ cclass(struct vars * v, /* context */
|
||||
index = (int) CC_ALPHA;
|
||||
|
||||
/*
|
||||
* Now compute the character class contents. For classes that are
|
||||
* based on the behavior of a <wctype.h> or <ctype.h> function, we use
|
||||
* Now compute the character class contents. For classes that are based
|
||||
* on the behavior of a <wctype.h> or <ctype.h> function, we use
|
||||
* pg_ctype_get_cache so that we can cache the results. Other classes
|
||||
* have definitions that are hard-wired here, and for those we just
|
||||
* construct a transient cvec on the fly.
|
||||
@ -605,10 +605,11 @@ cclass(struct vars * v, /* context */
|
||||
cv = pg_ctype_get_cache(pg_wc_ispunct);
|
||||
break;
|
||||
case CC_XDIGIT:
|
||||
|
||||
/*
|
||||
* It's not clear how to define this in non-western locales, and
|
||||
* even less clear that there's any particular use in trying.
|
||||
* So just hard-wire the meaning.
|
||||
* even less clear that there's any particular use in trying. So
|
||||
* just hard-wire the meaning.
|
||||
*/
|
||||
cv = getcvec(v, 0, 3);
|
||||
if (cv)
|
||||
|
@ -680,9 +680,9 @@ typedef int (*pg_wc_probefunc) (pg_wchar c);
|
||||
|
||||
typedef struct pg_ctype_cache
|
||||
{
|
||||
pg_wc_probefunc probefunc; /* pg_wc_isalpha or a sibling */
|
||||
Oid collation; /* collation this entry is for */
|
||||
struct cvec cv; /* cache entry contents */
|
||||
pg_wc_probefunc probefunc; /* pg_wc_isalpha or a sibling */
|
||||
Oid collation; /* collation this entry is for */
|
||||
struct cvec cv; /* cache entry contents */
|
||||
struct pg_ctype_cache *next; /* chain link */
|
||||
} pg_ctype_cache;
|
||||
|
||||
@ -730,7 +730,7 @@ store_match(pg_ctype_cache *pcc, pg_wchar chr1, int nchrs)
|
||||
|
||||
/*
|
||||
* Given a probe function (e.g., pg_wc_isalpha) get a struct cvec for all
|
||||
* chrs satisfying the probe function. The active collation is the one
|
||||
* chrs satisfying the probe function. The active collation is the one
|
||||
* previously set by pg_set_regex_collation. Return NULL if out of memory.
|
||||
*
|
||||
* Note that the result must not be freed or modified by caller.
|
||||
@ -777,7 +777,7 @@ pg_ctype_get_cache(pg_wc_probefunc probefunc)
|
||||
* UTF8 go up to 0x7FF, which is a pretty arbitrary cutoff but we cannot
|
||||
* extend it as far as we'd like (say, 0xFFFF, the end of the Basic
|
||||
* Multilingual Plane) without creating significant performance issues due
|
||||
* to too many characters being fed through the colormap code. This will
|
||||
* to too many characters being fed through the colormap code. This will
|
||||
* need redesign to fix reasonably, but at least for the moment we have
|
||||
* all common European languages covered. Otherwise (not C, not UTF8) go
|
||||
* up to 255. These limits are interrelated with restrictions discussed
|
||||
|
@ -1119,11 +1119,11 @@ parseqatom(struct vars * v,
|
||||
{
|
||||
/*
|
||||
* If there's no backrefs involved, we can turn x{m,n} into
|
||||
* x{m-1,n-1}x, with capturing parens in only the second x. This
|
||||
* is valid because we only care about capturing matches from the
|
||||
* final iteration of the quantifier. It's a win because we can
|
||||
* implement the backref-free left side as a plain DFA node, since
|
||||
* we don't really care where its submatches are.
|
||||
* x{m-1,n-1}x, with capturing parens in only the second x. This is
|
||||
* valid because we only care about capturing matches from the final
|
||||
* iteration of the quantifier. It's a win because we can implement
|
||||
* the backref-free left side as a plain DFA node, since we don't
|
||||
* really care where its submatches are.
|
||||
*/
|
||||
dupnfa(v->nfa, atom->begin, atom->end, s, atom->begin);
|
||||
assert(m >= 1 && m != INFINITY && n >= 1);
|
||||
|
@ -272,7 +272,7 @@ static struct dfa *
|
||||
newdfa(struct vars * v,
|
||||
struct cnfa * cnfa,
|
||||
struct colormap * cm,
|
||||
struct smalldfa * sml) /* preallocated space, may be NULL */
|
||||
struct smalldfa * sml) /* preallocated space, may be NULL */
|
||||
{
|
||||
struct dfa *d;
|
||||
size_t nss = cnfa->nstates * 2;
|
||||
|
@ -46,7 +46,7 @@ static struct rerr
|
||||
|
||||
{
|
||||
/* the actual table is built from regex.h */
|
||||
#include "regex/regerrs.h" /* pgrminclude ignore */
|
||||
#include "regex/regerrs.h" /* pgrminclude ignore */
|
||||
{
|
||||
-1, "", "oops"
|
||||
}, /* explanation special-cased in code */
|
||||
|
@ -531,7 +531,7 @@ zaptreesubs(struct vars * v,
|
||||
{
|
||||
if (t->op == '(')
|
||||
{
|
||||
int n = t->subno;
|
||||
int n = t->subno;
|
||||
|
||||
assert(n > 0);
|
||||
if ((size_t) n < v->nmatch)
|
||||
@ -948,7 +948,7 @@ citerdissect(struct vars * v,
|
||||
}
|
||||
|
||||
/*
|
||||
* We need workspace to track the endpoints of each sub-match. Normally
|
||||
* We need workspace to track the endpoints of each sub-match. Normally
|
||||
* we consider only nonzero-length sub-matches, so there can be at most
|
||||
* end-begin of them. However, if min is larger than that, we will also
|
||||
* consider zero-length sub-matches in order to find enough matches.
|
||||
@ -977,8 +977,8 @@ citerdissect(struct vars * v,
|
||||
/*
|
||||
* Our strategy is to first find a set of sub-match endpoints that are
|
||||
* valid according to the child node's DFA, and then recursively dissect
|
||||
* each sub-match to confirm validity. If any validity check fails,
|
||||
* backtrack the last sub-match and try again. And, when we next try for
|
||||
* each sub-match to confirm validity. If any validity check fails,
|
||||
* backtrack the last sub-match and try again. And, when we next try for
|
||||
* a validity check, we need not recheck any successfully verified
|
||||
* sub-matches that we didn't move the endpoints of. nverified remembers
|
||||
* how many sub-matches are currently known okay.
|
||||
@ -1028,10 +1028,10 @@ citerdissect(struct vars * v,
|
||||
}
|
||||
|
||||
/*
|
||||
* We've identified a way to divide the string into k sub-matches
|
||||
* that works so far as the child DFA can tell. If k is an allowed
|
||||
* number of matches, start the slow part: recurse to verify each
|
||||
* sub-match. We always have k <= max_matches, needn't check that.
|
||||
* We've identified a way to divide the string into k sub-matches that
|
||||
* works so far as the child DFA can tell. If k is an allowed number
|
||||
* of matches, start the slow part: recurse to verify each sub-match.
|
||||
* We always have k <= max_matches, needn't check that.
|
||||
*/
|
||||
if (k < min_matches)
|
||||
goto backtrack;
|
||||
@ -1065,13 +1065,14 @@ citerdissect(struct vars * v,
|
||||
/* match failed to verify, so backtrack */
|
||||
|
||||
backtrack:
|
||||
|
||||
/*
|
||||
* Must consider shorter versions of the current sub-match. However,
|
||||
* we'll only ask for a zero-length match if necessary.
|
||||
*/
|
||||
while (k > 0)
|
||||
{
|
||||
chr *prev_end = endpts[k - 1];
|
||||
chr *prev_end = endpts[k - 1];
|
||||
|
||||
if (endpts[k] > prev_end)
|
||||
{
|
||||
@ -1132,7 +1133,7 @@ creviterdissect(struct vars * v,
|
||||
}
|
||||
|
||||
/*
|
||||
* We need workspace to track the endpoints of each sub-match. Normally
|
||||
* We need workspace to track the endpoints of each sub-match. Normally
|
||||
* we consider only nonzero-length sub-matches, so there can be at most
|
||||
* end-begin of them. However, if min is larger than that, we will also
|
||||
* consider zero-length sub-matches in order to find enough matches.
|
||||
@ -1161,8 +1162,8 @@ creviterdissect(struct vars * v,
|
||||
/*
|
||||
* Our strategy is to first find a set of sub-match endpoints that are
|
||||
* valid according to the child node's DFA, and then recursively dissect
|
||||
* each sub-match to confirm validity. If any validity check fails,
|
||||
* backtrack the last sub-match and try again. And, when we next try for
|
||||
* each sub-match to confirm validity. If any validity check fails,
|
||||
* backtrack the last sub-match and try again. And, when we next try for
|
||||
* a validity check, we need not recheck any successfully verified
|
||||
* sub-matches that we didn't move the endpoints of. nverified remembers
|
||||
* how many sub-matches are currently known okay.
|
||||
@ -1214,10 +1215,10 @@ creviterdissect(struct vars * v,
|
||||
}
|
||||
|
||||
/*
|
||||
* We've identified a way to divide the string into k sub-matches
|
||||
* that works so far as the child DFA can tell. If k is an allowed
|
||||
* number of matches, start the slow part: recurse to verify each
|
||||
* sub-match. We always have k <= max_matches, needn't check that.
|
||||
* We've identified a way to divide the string into k sub-matches that
|
||||
* works so far as the child DFA can tell. If k is an allowed number
|
||||
* of matches, start the slow part: recurse to verify each sub-match.
|
||||
* We always have k <= max_matches, needn't check that.
|
||||
*/
|
||||
if (k < min_matches)
|
||||
goto backtrack;
|
||||
@ -1251,6 +1252,7 @@ creviterdissect(struct vars * v,
|
||||
/* match failed to verify, so backtrack */
|
||||
|
||||
backtrack:
|
||||
|
||||
/*
|
||||
* Must consider longer versions of the current sub-match.
|
||||
*/
|
||||
|
Reference in New Issue
Block a user