1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-28 23:42:10 +03:00

Remove useless whitespace at end of lines

This commit is contained in:
Peter Eisentraut
2010-11-23 22:27:50 +02:00
parent 44475e782f
commit fc946c39ae
517 changed files with 3463 additions and 3508 deletions

View File

@ -258,7 +258,7 @@ RANLIB = @RANLIB@
WINDRES = @WINDRES@
X = @EXEEXT@
# Perl
# Perl
ifneq (@PERL@,)
# quoted to protect pathname with spaces
@ -391,7 +391,7 @@ endif
# This macro is for use by libraries linking to libpq. (Because libpgport
# isn't created with the same link flags as libpq, it can't be used.)
libpq = -L$(libpq_builddir) -lpq
# If doing static linking, shared library dependency info isn't available,
# so add in the libraries that libpq depends on.
ifeq ($(enable_shared), no)
@ -400,9 +400,9 @@ libpq += $(filter -lintl -lssl -lcrypto -lkrb5 -lcrypt, $(LIBS)) \
endif
# This macro is for use by client executables (not libraries) that use libpq.
# We force clients to pull symbols from the non-shared library libpgport
# rather than pulling some libpgport symbols from libpq just because
# libpq uses those functions too. This makes applications less
# We force clients to pull symbols from the non-shared library libpgport
# rather than pulling some libpgport symbols from libpq just because
# libpq uses those functions too. This makes applications less
# dependent on changes in libpq's usage of pgport. To do this we link to
# pgport before libpq. This does cause duplicate -lpgport's to appear
# on client link lines.
@ -517,7 +517,7 @@ $(top_builddir)/src/include/pg_config.h: $(top_builddir)/src/include/stamp-h
$(top_builddir)/src/include/stamp-h: $(top_srcdir)/src/include/pg_config.h.in $(top_builddir)/config.status
cd $(top_builddir) && ./config.status src/include/pg_config.h
# Also remake ecpg_config.h from ecpg_config.h.in if the latter changed, same
# Also remake ecpg_config.h from ecpg_config.h.in if the latter changed, same
# logic as above.
$(top_builddir)/src/interfaces/ecpg/include/ecpg_config.h: $(top_builddir)/src/interfaces/ecpg/include/stamp-h

View File

@ -271,7 +271,7 @@ endif
ifeq ($(PORTNAME), sunos4)
LINK.shared = $(LD) -assert pure-text -Bdynamic
endif
ifeq ($(PORTNAME), osf)
LINK.shared = $(LD) -shared -expect_unresolved '*'
endif

View File

@ -187,7 +187,7 @@ distprep:
$(MAKE) -C parser gram.c gram.h scan.c
$(MAKE) -C bootstrap bootparse.c bootscanner.c
$(MAKE) -C catalog schemapg.h postgres.bki postgres.description postgres.shdescription
$(MAKE) -C utils fmgrtab.c fmgroids.h
$(MAKE) -C utils fmgrtab.c fmgroids.h
$(MAKE) -C utils/misc guc-file.c
@ -305,7 +305,7 @@ maintainer-clean: distclean
#
# Support for code development.
#
# Use target "quick" to build "postgres" when you know all the subsystems
# Use target "quick" to build "postgres" when you know all the subsystems
# are up to date. It saves the time of doing all the submakes.
.PHONY: quick
quick: $(OBJS)

View File

@ -9,27 +9,27 @@ Gin stands for Generalized Inverted Index and should be considered as a genie,
not a drink.
Generalized means that the index does not know which operation it accelerates.
It instead works with custom strategies, defined for specific data types (read
"Index Method Strategies" in the PostgreSQL documentation). In that sense, Gin
It instead works with custom strategies, defined for specific data types (read
"Index Method Strategies" in the PostgreSQL documentation). In that sense, Gin
is similar to GiST and differs from btree indices, which have predefined,
comparison-based operations.
An inverted index is an index structure storing a set of (key, posting list)
pairs, where 'posting list' is a set of documents in which the key occurs.
(A text document would usually contain many keys.) The primary goal of
An inverted index is an index structure storing a set of (key, posting list)
pairs, where 'posting list' is a set of documents in which the key occurs.
(A text document would usually contain many keys.) The primary goal of
Gin indices is support for highly scalable, full-text search in PostgreSQL.
Gin consists of a B-tree index constructed over entries (ET, entries tree),
where each entry is an element of the indexed value (element of array, lexeme
for tsvector) and where each tuple in a leaf page is either a pointer to a
B-tree over item pointers (PT, posting tree), or a list of item pointers
for tsvector) and where each tuple in a leaf page is either a pointer to a
B-tree over item pointers (PT, posting tree), or a list of item pointers
(PL, posting list) if the tuple is small enough.
Note: There is no delete operation for ET. The reason for this is that in
our experience, the set of distinct words in a large corpus changes very
rarely. This greatly simplifies the code and concurrency algorithms.
Gin comes with built-in support for one-dimensional arrays (eg. integer[],
Gin comes with built-in support for one-dimensional arrays (eg. integer[],
text[]), but no support for NULL elements. The following operations are
available:
@ -59,25 +59,25 @@ Gin Fuzzy Limit
There are often situations when a full-text search returns a very large set of
results. Since reading tuples from the disk and sorting them could take a
lot of time, this is unacceptable for production. (Note that the search
lot of time, this is unacceptable for production. (Note that the search
itself is very fast.)
Such queries usually contain very frequent lexemes, so the results are not
very helpful. To facilitate execution of such queries Gin has a configurable
soft upper limit on the size of the returned set, determined by the
'gin_fuzzy_search_limit' GUC variable. This is set to 0 by default (no
Such queries usually contain very frequent lexemes, so the results are not
very helpful. To facilitate execution of such queries Gin has a configurable
soft upper limit on the size of the returned set, determined by the
'gin_fuzzy_search_limit' GUC variable. This is set to 0 by default (no
limit).
If a non-zero search limit is set, then the returned set is a subset of the
whole result set, chosen at random.
"Soft" means that the actual number of returned results could slightly differ
from the specified limit, depending on the query and the quality of the
from the specified limit, depending on the query and the quality of the
system's random number generator.
From experience, a value of 'gin_fuzzy_search_limit' in the thousands
(eg. 5000-20000) works well. This means that 'gin_fuzzy_search_limit' will
have no effect for queries returning a result set with less tuples than this
have no effect for queries returning a result set with less tuples than this
number.
Limitations
@ -115,5 +115,5 @@ Distant future:
Authors
-------
All work was done by Teodor Sigaev (teodor@sigaev.ru) and Oleg Bartunov
All work was done by Teodor Sigaev (teodor@sigaev.ru) and Oleg Bartunov
(oleg@sai.msu.su).

View File

@ -24,21 +24,21 @@ The current implementation of GiST supports:
* Concurrency
* Recovery support via WAL logging
The support for concurrency implemented in PostgreSQL was developed based on
the paper "Access Methods for Next-Generation Database Systems" by
The support for concurrency implemented in PostgreSQL was developed based on
the paper "Access Methods for Next-Generation Database Systems" by
Marcel Kornaker:
http://www.sai.msu.su/~megera/postgres/gist/papers/concurrency/access-methods-for-next-generation.pdf.gz
The original algorithms were modified in several ways:
* They should be adapted to PostgreSQL conventions. For example, the SEARCH
algorithm was considerably changed, because in PostgreSQL function search
should return one tuple (next), not all tuples at once. Also, it should
* They should be adapted to PostgreSQL conventions. For example, the SEARCH
algorithm was considerably changed, because in PostgreSQL function search
should return one tuple (next), not all tuples at once. Also, it should
release page locks between calls.
* Since we added support for variable length keys, it's not possible to
guarantee enough free space for all keys on pages after splitting. User
defined function picksplit doesn't have information about size of tuples
* Since we added support for variable length keys, it's not possible to
guarantee enough free space for all keys on pages after splitting. User
defined function picksplit doesn't have information about size of tuples
(each tuple may contain several keys as in multicolumn index while picksplit
could work with only one key) and pages.
* We modified original INSERT algorithm for performance reason. In particular,
@ -67,7 +67,7 @@ gettuple(search-pred)
ptr = top of stack
while(true)
latch( ptr->page, S-mode )
if ( ptr->page->lsn != ptr->lsn )
if ( ptr->page->lsn != ptr->lsn )
ptr->lsn = ptr->page->lsn
currentposition=0
if ( ptr->parentlsn < ptr->page->nsn )
@ -88,7 +88,7 @@ gettuple(search-pred)
else if ( ptr->page is leaf )
unlatch( ptr->page )
return tuple
else
else
add to stack child page
end
currentposition++
@ -99,20 +99,20 @@ gettuple(search-pred)
Insert Algorithm
----------------
INSERT guarantees that the GiST tree remains balanced. User defined key method
Penalty is used for choosing a subtree to insert; method PickSplit is used for
the node splitting algorithm; method Union is used for propagating changes
INSERT guarantees that the GiST tree remains balanced. User defined key method
Penalty is used for choosing a subtree to insert; method PickSplit is used for
the node splitting algorithm; method Union is used for propagating changes
upward to maintain the tree properties.
NOTICE: We modified original INSERT algorithm for performance reason. In
NOTICE: We modified original INSERT algorithm for performance reason. In
particularly, it is now a single-pass algorithm.
Function findLeaf is used to identify subtree for insertion. Page, in which
insertion is proceeded, is locked as well as its parent page. Functions
findParent and findPath are used to find parent pages, which could be changed
because of concurrent access. Function pageSplit is recurrent and could split
page by more than 2 pages, which could be necessary if keys have different
lengths or more than one key are inserted (in such situation, user defined
Function findLeaf is used to identify subtree for insertion. Page, in which
insertion is proceeded, is locked as well as its parent page. Functions
findParent and findPath are used to find parent pages, which could be changed
because of concurrent access. Function pageSplit is recurrent and could split
page by more than 2 pages, which could be necessary if keys have different
lengths or more than one key are inserted (in such situation, user defined
function pickSplit cannot guarantee free space on page).
findLeaf(new-key)
@ -143,7 +143,7 @@ findLeaf(new-key)
end
findPath( stack item )
push stack, [root, 0, 0] // page, LSN, parent
push stack, [root, 0, 0] // page, LSN, parent
while( stack )
ptr = top of stack
latch( ptr->page, S-mode )
@ -152,7 +152,7 @@ findPath( stack item )
end
for( each tuple on page )
if ( tuple->pagepointer == item->page )
return stack
return stack
else
add to stack at the end [tuple->pagepointer,0, ptr]
end
@ -160,12 +160,12 @@ findPath( stack item )
unlatch( ptr->page )
pop stack
end
findParent( stack item )
parent = item->parent
latch( parent->page, X-mode )
if ( parent->page->lsn != parent->lsn )
while(true)
while(true)
search parent tuple on parent->page, if found the return
rightlink = parent->page->rightlink
unlatch( parent->page )
@ -214,7 +214,7 @@ placetopage(page, keysarray)
keysarray = [ union(keysarray) ]
end
end
insert(new-key)
stack = findLeaf(new-key)
keysarray = [new-key]
@ -236,4 +236,4 @@ insert(new-key)
Authors:
Teodor Sigaev <teodor@sigaev.ru>
Oleg Bartunov <oleg@sai.msu.su>
Oleg Bartunov <oleg@sai.msu.su>

View File

@ -154,7 +154,7 @@ even pages that don't contain any deletable tuples. This guarantees that
the btbulkdelete call cannot return while any indexscan is still holding
a copy of a deleted index tuple. Note that this requirement does not say
that btbulkdelete must visit the pages in any particular order. (See also
on-the-fly deletion, below.)
on-the-fly deletion, below.)
There is no such interlocking for deletion of items in internal pages,
since backends keep no lock nor pin on a page they have descended past.

View File

@ -5608,7 +5608,7 @@ GetLatestXTime(void)
* Returns timestamp of latest processed commit/abort record.
*
* When the server has been started normally without recovery the function
* returns NULL.
* returns NULL.
*/
Datum
pg_last_xact_replay_timestamp(PG_FUNCTION_ARGS)

View File

@ -12,7 +12,7 @@ include $(top_builddir)/src/Makefile.global
override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS)
OBJS= bootparse.o bootstrap.o
OBJS= bootparse.o bootstrap.o
include $(top_srcdir)/src/backend/common.mk

View File

@ -1269,7 +1269,7 @@ GRANT SELECT ON role_routine_grants TO PUBLIC;
-- not tracked by PostgreSQL
/*
/*
* 5.47
* ROUTINE_SEQUENCE_USAGE view
*/
@ -1385,7 +1385,7 @@ CREATE VIEW routines AS
CAST(null AS sql_identifier) AS result_cast_scope_schema,
CAST(null AS sql_identifier) AS result_cast_scope_name,
CAST(null AS cardinal_number) AS result_cast_maximum_cardinality,
CAST(null AS sql_identifier) AS result_cast_dtd_identifier
CAST(null AS sql_identifier) AS result_cast_dtd_identifier
FROM pg_namespace n, pg_proc p, pg_language l,
pg_type t, pg_namespace nt
@ -2323,7 +2323,7 @@ CREATE VIEW element_types AS
CAST(null AS cardinal_number) AS datetime_precision,
CAST(null AS character_data) AS interval_type,
CAST(null AS character_data) AS interval_precision,
CAST(null AS character_data) AS domain_default, -- XXX maybe a bug in the standard
CAST(current_database() AS sql_identifier) AS udt_catalog,

View File

@ -552,7 +552,7 @@ object_exists(ObjectAddress address)
else
{
found = ((Form_pg_attribute) GETSTRUCT(atttup))->attisdropped;
ReleaseSysCache(atttup);
ReleaseSysCache(atttup);
}
return found;
}
@ -654,5 +654,5 @@ object_exists(ObjectAddress address)
found = HeapTupleIsValid(systable_getnext(sd));
systable_endscan(sd);
heap_close(rel, AccessShareLock);
return found;
return found;
}

View File

@ -6,8 +6,8 @@
* src/backend/catalog/system_views.sql
*/
CREATE VIEW pg_roles AS
SELECT
CREATE VIEW pg_roles AS
SELECT
rolname,
rolsuper,
rolinherit,
@ -47,72 +47,72 @@ CREATE VIEW pg_group AS
FROM pg_authid
WHERE NOT rolcanlogin;
CREATE VIEW pg_user AS
SELECT
usename,
usesysid,
usecreatedb,
usesuper,
usecatupd,
'********'::text as passwd,
valuntil,
useconfig
CREATE VIEW pg_user AS
SELECT
usename,
usesysid,
usecreatedb,
usesuper,
usecatupd,
'********'::text as passwd,
valuntil,
useconfig
FROM pg_shadow;
CREATE VIEW pg_rules AS
SELECT
N.nspname AS schemaname,
C.relname AS tablename,
R.rulename AS rulename,
pg_get_ruledef(R.oid) AS definition
FROM (pg_rewrite R JOIN pg_class C ON (C.oid = R.ev_class))
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
CREATE VIEW pg_rules AS
SELECT
N.nspname AS schemaname,
C.relname AS tablename,
R.rulename AS rulename,
pg_get_ruledef(R.oid) AS definition
FROM (pg_rewrite R JOIN pg_class C ON (C.oid = R.ev_class))
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE R.rulename != '_RETURN';
CREATE VIEW pg_views AS
SELECT
N.nspname AS schemaname,
C.relname AS viewname,
pg_get_userbyid(C.relowner) AS viewowner,
pg_get_viewdef(C.oid) AS definition
FROM pg_class C LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
CREATE VIEW pg_views AS
SELECT
N.nspname AS schemaname,
C.relname AS viewname,
pg_get_userbyid(C.relowner) AS viewowner,
pg_get_viewdef(C.oid) AS definition
FROM pg_class C LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE C.relkind = 'v';
CREATE VIEW pg_tables AS
SELECT
N.nspname AS schemaname,
C.relname AS tablename,
pg_get_userbyid(C.relowner) AS tableowner,
CREATE VIEW pg_tables AS
SELECT
N.nspname AS schemaname,
C.relname AS tablename,
pg_get_userbyid(C.relowner) AS tableowner,
T.spcname AS tablespace,
C.relhasindex AS hasindexes,
C.relhasrules AS hasrules,
C.relhastriggers AS hastriggers
FROM pg_class C LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
C.relhasindex AS hasindexes,
C.relhasrules AS hasrules,
C.relhastriggers AS hastriggers
FROM pg_class C LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
LEFT JOIN pg_tablespace T ON (T.oid = C.reltablespace)
WHERE C.relkind = 'r';
CREATE VIEW pg_indexes AS
SELECT
N.nspname AS schemaname,
C.relname AS tablename,
I.relname AS indexname,
CREATE VIEW pg_indexes AS
SELECT
N.nspname AS schemaname,
C.relname AS tablename,
I.relname AS indexname,
T.spcname AS tablespace,
pg_get_indexdef(I.oid) AS indexdef
FROM pg_index X JOIN pg_class C ON (C.oid = X.indrelid)
JOIN pg_class I ON (I.oid = X.indexrelid)
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
pg_get_indexdef(I.oid) AS indexdef
FROM pg_index X JOIN pg_class C ON (C.oid = X.indrelid)
JOIN pg_class I ON (I.oid = X.indexrelid)
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
LEFT JOIN pg_tablespace T ON (T.oid = I.reltablespace)
WHERE C.relkind = 'r' AND I.relkind = 'i';
CREATE VIEW pg_stats AS
SELECT
nspname AS schemaname,
relname AS tablename,
attname AS attname,
stainherit AS inherited,
stanullfrac AS null_frac,
stawidth AS avg_width,
stadistinct AS n_distinct,
CREATE VIEW pg_stats AS
SELECT
nspname AS schemaname,
relname AS tablename,
attname AS attname,
stainherit AS inherited,
stanullfrac AS null_frac,
stawidth AS avg_width,
stadistinct AS n_distinct,
CASE
WHEN stakind1 IN (1, 4) THEN stavalues1
WHEN stakind2 IN (1, 4) THEN stavalues2
@ -137,14 +137,14 @@ CREATE VIEW pg_stats AS
WHEN stakind3 = 3 THEN stanumbers3[1]
WHEN stakind4 = 3 THEN stanumbers4[1]
END AS correlation
FROM pg_statistic s JOIN pg_class c ON (c.oid = s.starelid)
JOIN pg_attribute a ON (c.oid = attrelid AND attnum = s.staattnum)
LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace)
FROM pg_statistic s JOIN pg_class c ON (c.oid = s.starelid)
JOIN pg_attribute a ON (c.oid = attrelid AND attnum = s.staattnum)
LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace)
WHERE NOT attisdropped AND has_column_privilege(c.oid, a.attnum, 'select');
REVOKE ALL on pg_statistic FROM public;
CREATE VIEW pg_locks AS
CREATE VIEW pg_locks AS
SELECT * FROM pg_lock_status() AS L;
CREATE VIEW pg_cursors AS
@ -268,16 +268,16 @@ FROM
WHERE
l.objsubid = 0;
CREATE VIEW pg_settings AS
SELECT * FROM pg_show_all_settings() AS A;
CREATE VIEW pg_settings AS
SELECT * FROM pg_show_all_settings() AS A;
CREATE RULE pg_settings_u AS
ON UPDATE TO pg_settings
WHERE new.name = old.name DO
CREATE RULE pg_settings_u AS
ON UPDATE TO pg_settings
WHERE new.name = old.name DO
SELECT set_config(old.name, new.setting, 'f');
CREATE RULE pg_settings_n AS
ON UPDATE TO pg_settings
CREATE RULE pg_settings_n AS
ON UPDATE TO pg_settings
DO INSTEAD NOTHING;
GRANT SELECT, UPDATE ON pg_settings TO PUBLIC;
@ -290,21 +290,21 @@ CREATE VIEW pg_timezone_names AS
-- Statistics views
CREATE VIEW pg_stat_all_tables AS
SELECT
C.oid AS relid,
N.nspname AS schemaname,
C.relname AS relname,
pg_stat_get_numscans(C.oid) AS seq_scan,
pg_stat_get_tuples_returned(C.oid) AS seq_tup_read,
sum(pg_stat_get_numscans(I.indexrelid))::bigint AS idx_scan,
CREATE VIEW pg_stat_all_tables AS
SELECT
C.oid AS relid,
N.nspname AS schemaname,
C.relname AS relname,
pg_stat_get_numscans(C.oid) AS seq_scan,
pg_stat_get_tuples_returned(C.oid) AS seq_tup_read,
sum(pg_stat_get_numscans(I.indexrelid))::bigint AS idx_scan,
sum(pg_stat_get_tuples_fetched(I.indexrelid))::bigint +
pg_stat_get_tuples_fetched(C.oid) AS idx_tup_fetch,
pg_stat_get_tuples_inserted(C.oid) AS n_tup_ins,
pg_stat_get_tuples_updated(C.oid) AS n_tup_upd,
pg_stat_get_tuples_fetched(C.oid) AS idx_tup_fetch,
pg_stat_get_tuples_inserted(C.oid) AS n_tup_ins,
pg_stat_get_tuples_updated(C.oid) AS n_tup_upd,
pg_stat_get_tuples_deleted(C.oid) AS n_tup_del,
pg_stat_get_tuples_hot_updated(C.oid) AS n_tup_hot_upd,
pg_stat_get_live_tuples(C.oid) AS n_live_tup,
pg_stat_get_live_tuples(C.oid) AS n_live_tup,
pg_stat_get_dead_tuples(C.oid) AS n_dead_tup,
pg_stat_get_last_vacuum_time(C.oid) as last_vacuum,
pg_stat_get_last_autovacuum_time(C.oid) as last_autovacuum,
@ -314,9 +314,9 @@ CREATE VIEW pg_stat_all_tables AS
pg_stat_get_autovacuum_count(C.oid) AS autovacuum_count,
pg_stat_get_analyze_count(C.oid) AS analyze_count,
pg_stat_get_autoanalyze_count(C.oid) AS autoanalyze_count
FROM pg_class C LEFT JOIN
pg_index I ON C.oid = I.indrelid
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
FROM pg_class C LEFT JOIN
pg_index I ON C.oid = I.indrelid
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE C.relkind IN ('r', 't')
GROUP BY C.oid, N.nspname, C.relname;
@ -340,8 +340,8 @@ CREATE VIEW pg_stat_xact_all_tables AS
WHERE C.relkind IN ('r', 't')
GROUP BY C.oid, N.nspname, C.relname;
CREATE VIEW pg_stat_sys_tables AS
SELECT * FROM pg_stat_all_tables
CREATE VIEW pg_stat_sys_tables AS
SELECT * FROM pg_stat_all_tables
WHERE schemaname IN ('pg_catalog', 'information_schema') OR
schemaname ~ '^pg_toast';
@ -350,8 +350,8 @@ CREATE VIEW pg_stat_xact_sys_tables AS
WHERE schemaname IN ('pg_catalog', 'information_schema') OR
schemaname ~ '^pg_toast';
CREATE VIEW pg_stat_user_tables AS
SELECT * FROM pg_stat_all_tables
CREATE VIEW pg_stat_user_tables AS
SELECT * FROM pg_stat_all_tables
WHERE schemaname NOT IN ('pg_catalog', 'information_schema') AND
schemaname !~ '^pg_toast';
@ -360,117 +360,117 @@ CREATE VIEW pg_stat_xact_user_tables AS
WHERE schemaname NOT IN ('pg_catalog', 'information_schema') AND
schemaname !~ '^pg_toast';
CREATE VIEW pg_statio_all_tables AS
SELECT
C.oid AS relid,
N.nspname AS schemaname,
C.relname AS relname,
pg_stat_get_blocks_fetched(C.oid) -
pg_stat_get_blocks_hit(C.oid) AS heap_blks_read,
pg_stat_get_blocks_hit(C.oid) AS heap_blks_hit,
sum(pg_stat_get_blocks_fetched(I.indexrelid) -
pg_stat_get_blocks_hit(I.indexrelid))::bigint AS idx_blks_read,
sum(pg_stat_get_blocks_hit(I.indexrelid))::bigint AS idx_blks_hit,
pg_stat_get_blocks_fetched(T.oid) -
pg_stat_get_blocks_hit(T.oid) AS toast_blks_read,
pg_stat_get_blocks_hit(T.oid) AS toast_blks_hit,
pg_stat_get_blocks_fetched(X.oid) -
pg_stat_get_blocks_hit(X.oid) AS tidx_blks_read,
pg_stat_get_blocks_hit(X.oid) AS tidx_blks_hit
FROM pg_class C LEFT JOIN
pg_index I ON C.oid = I.indrelid LEFT JOIN
pg_class T ON C.reltoastrelid = T.oid LEFT JOIN
pg_class X ON T.reltoastidxid = X.oid
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
CREATE VIEW pg_statio_all_tables AS
SELECT
C.oid AS relid,
N.nspname AS schemaname,
C.relname AS relname,
pg_stat_get_blocks_fetched(C.oid) -
pg_stat_get_blocks_hit(C.oid) AS heap_blks_read,
pg_stat_get_blocks_hit(C.oid) AS heap_blks_hit,
sum(pg_stat_get_blocks_fetched(I.indexrelid) -
pg_stat_get_blocks_hit(I.indexrelid))::bigint AS idx_blks_read,
sum(pg_stat_get_blocks_hit(I.indexrelid))::bigint AS idx_blks_hit,
pg_stat_get_blocks_fetched(T.oid) -
pg_stat_get_blocks_hit(T.oid) AS toast_blks_read,
pg_stat_get_blocks_hit(T.oid) AS toast_blks_hit,
pg_stat_get_blocks_fetched(X.oid) -
pg_stat_get_blocks_hit(X.oid) AS tidx_blks_read,
pg_stat_get_blocks_hit(X.oid) AS tidx_blks_hit
FROM pg_class C LEFT JOIN
pg_index I ON C.oid = I.indrelid LEFT JOIN
pg_class T ON C.reltoastrelid = T.oid LEFT JOIN
pg_class X ON T.reltoastidxid = X.oid
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE C.relkind IN ('r', 't')
GROUP BY C.oid, N.nspname, C.relname, T.oid, X.oid;
CREATE VIEW pg_statio_sys_tables AS
SELECT * FROM pg_statio_all_tables
CREATE VIEW pg_statio_sys_tables AS
SELECT * FROM pg_statio_all_tables
WHERE schemaname IN ('pg_catalog', 'information_schema') OR
schemaname ~ '^pg_toast';
CREATE VIEW pg_statio_user_tables AS
SELECT * FROM pg_statio_all_tables
CREATE VIEW pg_statio_user_tables AS
SELECT * FROM pg_statio_all_tables
WHERE schemaname NOT IN ('pg_catalog', 'information_schema') AND
schemaname !~ '^pg_toast';
CREATE VIEW pg_stat_all_indexes AS
SELECT
C.oid AS relid,
I.oid AS indexrelid,
N.nspname AS schemaname,
C.relname AS relname,
I.relname AS indexrelname,
pg_stat_get_numscans(I.oid) AS idx_scan,
pg_stat_get_tuples_returned(I.oid) AS idx_tup_read,
pg_stat_get_tuples_fetched(I.oid) AS idx_tup_fetch
FROM pg_class C JOIN
pg_index X ON C.oid = X.indrelid JOIN
pg_class I ON I.oid = X.indexrelid
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
CREATE VIEW pg_stat_all_indexes AS
SELECT
C.oid AS relid,
I.oid AS indexrelid,
N.nspname AS schemaname,
C.relname AS relname,
I.relname AS indexrelname,
pg_stat_get_numscans(I.oid) AS idx_scan,
pg_stat_get_tuples_returned(I.oid) AS idx_tup_read,
pg_stat_get_tuples_fetched(I.oid) AS idx_tup_fetch
FROM pg_class C JOIN
pg_index X ON C.oid = X.indrelid JOIN
pg_class I ON I.oid = X.indexrelid
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE C.relkind IN ('r', 't');
CREATE VIEW pg_stat_sys_indexes AS
SELECT * FROM pg_stat_all_indexes
CREATE VIEW pg_stat_sys_indexes AS
SELECT * FROM pg_stat_all_indexes
WHERE schemaname IN ('pg_catalog', 'information_schema') OR
schemaname ~ '^pg_toast';
CREATE VIEW pg_stat_user_indexes AS
SELECT * FROM pg_stat_all_indexes
CREATE VIEW pg_stat_user_indexes AS
SELECT * FROM pg_stat_all_indexes
WHERE schemaname NOT IN ('pg_catalog', 'information_schema') AND
schemaname !~ '^pg_toast';
CREATE VIEW pg_statio_all_indexes AS
SELECT
C.oid AS relid,
I.oid AS indexrelid,
N.nspname AS schemaname,
C.relname AS relname,
I.relname AS indexrelname,
pg_stat_get_blocks_fetched(I.oid) -
pg_stat_get_blocks_hit(I.oid) AS idx_blks_read,
pg_stat_get_blocks_hit(I.oid) AS idx_blks_hit
FROM pg_class C JOIN
pg_index X ON C.oid = X.indrelid JOIN
pg_class I ON I.oid = X.indexrelid
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
CREATE VIEW pg_statio_all_indexes AS
SELECT
C.oid AS relid,
I.oid AS indexrelid,
N.nspname AS schemaname,
C.relname AS relname,
I.relname AS indexrelname,
pg_stat_get_blocks_fetched(I.oid) -
pg_stat_get_blocks_hit(I.oid) AS idx_blks_read,
pg_stat_get_blocks_hit(I.oid) AS idx_blks_hit
FROM pg_class C JOIN
pg_index X ON C.oid = X.indrelid JOIN
pg_class I ON I.oid = X.indexrelid
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE C.relkind IN ('r', 't');
CREATE VIEW pg_statio_sys_indexes AS
SELECT * FROM pg_statio_all_indexes
CREATE VIEW pg_statio_sys_indexes AS
SELECT * FROM pg_statio_all_indexes
WHERE schemaname IN ('pg_catalog', 'information_schema') OR
schemaname ~ '^pg_toast';
CREATE VIEW pg_statio_user_indexes AS
SELECT * FROM pg_statio_all_indexes
CREATE VIEW pg_statio_user_indexes AS
SELECT * FROM pg_statio_all_indexes
WHERE schemaname NOT IN ('pg_catalog', 'information_schema') AND
schemaname !~ '^pg_toast';
CREATE VIEW pg_statio_all_sequences AS
SELECT
C.oid AS relid,
N.nspname AS schemaname,
C.relname AS relname,
pg_stat_get_blocks_fetched(C.oid) -
pg_stat_get_blocks_hit(C.oid) AS blks_read,
pg_stat_get_blocks_hit(C.oid) AS blks_hit
FROM pg_class C
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
CREATE VIEW pg_statio_all_sequences AS
SELECT
C.oid AS relid,
N.nspname AS schemaname,
C.relname AS relname,
pg_stat_get_blocks_fetched(C.oid) -
pg_stat_get_blocks_hit(C.oid) AS blks_read,
pg_stat_get_blocks_hit(C.oid) AS blks_hit
FROM pg_class C
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE C.relkind = 'S';
CREATE VIEW pg_statio_sys_sequences AS
SELECT * FROM pg_statio_all_sequences
CREATE VIEW pg_statio_sys_sequences AS
SELECT * FROM pg_statio_all_sequences
WHERE schemaname IN ('pg_catalog', 'information_schema') OR
schemaname ~ '^pg_toast';
CREATE VIEW pg_statio_user_sequences AS
SELECT * FROM pg_statio_all_sequences
CREATE VIEW pg_statio_user_sequences AS
SELECT * FROM pg_statio_all_sequences
WHERE schemaname NOT IN ('pg_catalog', 'information_schema') AND
schemaname !~ '^pg_toast';
CREATE VIEW pg_stat_activity AS
SELECT
CREATE VIEW pg_stat_activity AS
SELECT
S.datid AS datid,
D.datname AS datname,
S.procpid,
@ -485,18 +485,18 @@ CREATE VIEW pg_stat_activity AS
S.waiting,
S.current_query
FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_authid U
WHERE S.datid = D.oid AND
WHERE S.datid = D.oid AND
S.usesysid = U.oid;
CREATE VIEW pg_stat_database AS
SELECT
D.oid AS datid,
D.datname AS datname,
pg_stat_get_db_numbackends(D.oid) AS numbackends,
pg_stat_get_db_xact_commit(D.oid) AS xact_commit,
pg_stat_get_db_xact_rollback(D.oid) AS xact_rollback,
pg_stat_get_db_blocks_fetched(D.oid) -
pg_stat_get_db_blocks_hit(D.oid) AS blks_read,
CREATE VIEW pg_stat_database AS
SELECT
D.oid AS datid,
D.datname AS datname,
pg_stat_get_db_numbackends(D.oid) AS numbackends,
pg_stat_get_db_xact_commit(D.oid) AS xact_commit,
pg_stat_get_db_xact_rollback(D.oid) AS xact_rollback,
pg_stat_get_db_blocks_fetched(D.oid) -
pg_stat_get_db_blocks_hit(D.oid) AS blks_read,
pg_stat_get_db_blocks_hit(D.oid) AS blks_hit,
pg_stat_get_db_tuples_returned(D.oid) AS tup_returned,
pg_stat_get_db_tuples_fetched(D.oid) AS tup_fetched,
@ -505,16 +505,16 @@ CREATE VIEW pg_stat_database AS
pg_stat_get_db_tuples_deleted(D.oid) AS tup_deleted
FROM pg_database D;
CREATE VIEW pg_stat_user_functions AS
CREATE VIEW pg_stat_user_functions AS
SELECT
P.oid AS funcid,
P.oid AS funcid,
N.nspname AS schemaname,
P.proname AS funcname,
pg_stat_get_function_calls(P.oid) AS calls,
pg_stat_get_function_time(P.oid) / 1000 AS total_time,
pg_stat_get_function_self_time(P.oid) / 1000 AS self_time
FROM pg_proc P LEFT JOIN pg_namespace N ON (N.oid = P.pronamespace)
WHERE P.prolang != 12 -- fast check to eliminate built-in functions
WHERE P.prolang != 12 -- fast check to eliminate built-in functions
AND pg_stat_get_function_calls(P.oid) IS NOT NULL;
CREATE VIEW pg_stat_xact_user_functions AS
@ -580,7 +580,7 @@ CREATE FUNCTION ts_debug(IN config regconfig, IN document text,
OUT lexemes text[])
RETURNS SETOF record AS
$$
SELECT
SELECT
tt.alias AS alias,
tt.description AS description,
parse.token AS token,
@ -602,7 +602,7 @@ SELECT
LIMIT 1
) AS lexemes
FROM pg_catalog.ts_parse(
(SELECT cfgparser FROM pg_catalog.pg_ts_config WHERE oid = $1 ), $2
(SELECT cfgparser FROM pg_catalog.pg_ts_config WHERE oid = $1 ), $2
) AS parse,
pg_catalog.ts_token_type(
(SELECT cfgparser FROM pg_catalog.pg_ts_config WHERE oid = $1 )

View File

@ -208,7 +208,7 @@ CommentObject(CommentStmt *stmt)
* catalog. Comments on all other objects are recorded in pg_description.
*/
if (stmt->objtype == OBJECT_DATABASE || stmt->objtype == OBJECT_TABLESPACE
|| stmt->objtype == OBJECT_ROLE)
|| stmt->objtype == OBJECT_ROLE)
CreateSharedComments(address.objectId, address.classId, stmt->comment);
else
CreateComments(address.objectId, address.classId, address.objectSubId,

View File

@ -2064,7 +2064,7 @@ CopyFrom(CopyState cstate)
done = true;
break;
}
if (fld_count == -1)
{
/*

View File

@ -1191,7 +1191,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
{
ExplainOpenGroup("Plans", "Plans", false, es);
/* Pass current PlanState as head of ancestors list for children */
ancestors = lcons(planstate, ancestors);
ancestors = lcons(planstate, ancestors);
}
/* initPlan-s */
@ -1251,7 +1251,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
/* end of child plans */
if (haschildren)
{
ancestors = list_delete_first(ancestors);
ancestors = list_delete_first(ancestors);
ExplainCloseGroup("Plans", "Plans", false, es);
}

View File

@ -608,7 +608,7 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid)
errmsg("could not remove symbolic link \"%s\": %m",
linkloc)));
}
/*
* Create the symlink under PGDATA
*/

View File

@ -28,7 +28,7 @@ SSL
|
|
Normal startup

View File

@ -40,7 +40,7 @@ FILES IN src/include/nodes/
relation.h - planner internal nodes
execnodes.h - executor nodes
memnodes.h - memory nodes
pg_list.h - generic list
pg_list.h - generic list
Steps to Add a Node
@ -69,7 +69,7 @@ Suppose you wanna define a node Foo:
Historical Note
---------------
Prior to the current simple C structure definitions, the Node structures
Prior to the current simple C structure definitions, the Node structures
used a pseudo-inheritance system which automatically generated creator and
accessor functions. Since every node inherited from LispValue, the whole thing
was a mess. Here's a little anecdote:

View File

@ -37,19 +37,19 @@ This is some implementation notes and opened issues...
First, implementation uses new type of parameters - PARAM_EXEC - to deal
with correlation Vars. When query_planner() is called, it first tries to
replace all upper queries Var referenced in current query with Param of
this type. Some global variables are used to keep mapping of Vars to
Params and Params to Vars.
replace all upper queries Var referenced in current query with Param of
this type. Some global variables are used to keep mapping of Vars to
Params and Params to Vars.
After this, all current query' SubLinks are processed: for each SubLink
found in query' qual union_planner() (old planner() function) will be
called to plan corresponding subselect (union_planner() calls
query_planner() for "simple" query and supports UNIONs). After subselect
are planned, optimizer knows about is this correlated, un-correlated or
_undirect_ correlated (references some grand-parent Vars but no parent
ones: uncorrelated from the parent' point of view) query.
After this, all current query' SubLinks are processed: for each SubLink
found in query' qual union_planner() (old planner() function) will be
called to plan corresponding subselect (union_planner() calls
query_planner() for "simple" query and supports UNIONs). After subselect
are planned, optimizer knows about is this correlated, un-correlated or
_undirect_ correlated (references some grand-parent Vars but no parent
ones: uncorrelated from the parent' point of view) query.
For uncorrelated and undirect correlated subqueries of EXPRession or
For uncorrelated and undirect correlated subqueries of EXPRession or
EXISTS type SubLinks will be replaced with "normal" clauses from
SubLink->Oper list (I changed this list to be list of EXPR nodes,
not just Oper ones). Right sides of these nodes are replaced with
@ -81,7 +81,7 @@ plan->qual) - to initialize them and let them know about changed
Params (from the list of their "interests").
After all SubLinks are processed, query_planner() calls qual'
canonificator and does "normal" work. By using Params optimizer
canonificator and does "normal" work. By using Params optimizer
is mostly unchanged.
Well, Executor. To get subplans re-evaluated without ExecutorStart()
@ -91,7 +91,7 @@ on each call) ExecReScan() now supports most of Plan types...
Explanation of EXPLAIN.
vac=> explain select * from tmp where x >= (select max(x2) from test2
vac=> explain select * from tmp where x >= (select max(x2) from test2
where y2 = y and exists (select * from tempx where tx = x));
NOTICE: QUERY PLAN:
@ -128,17 +128,17 @@ Opened issues.
for each parent tuple - very slow...
Results of some test. TMP is table with x,y (int4-s), x in 0-9,
y = 100 - x, 1000 tuples (10 duplicates of each tuple). TEST2 is table
y = 100 - x, 1000 tuples (10 duplicates of each tuple). TEST2 is table
with x2, y2 (int4-s), x2 in 1-99, y2 = 100 -x2, 10000 tuples (100 dups).
Trying
Trying
select * from tmp where x >= (select max(x2) from test2 where y2 = y);
and
begin;
select y as ty, max(x2) as mx into table tsub from test2, tmp
select y as ty, max(x2) as mx into table tsub from test2, tmp
where y2 = y group by ty;
vacuum tsub;
select x, y from tmp, tsub where x >= mx and y = ty;

View File

@ -247,8 +247,8 @@ xqinside [^']+
/* $foo$ style quotes ("dollar quoting")
* The quoted string starts with $foo$ where "foo" is an optional string
* in the form of an identifier, except that it may not contain "$",
* and extends to the first occurrence of an identical string.
* in the form of an identifier, except that it may not contain "$",
* and extends to the first occurrence of an identical string.
* There is *no* processing of the quoted text.
*
* {dolqfailed} is an error rule to avoid scanner backup when {dolqdelim}
@ -334,7 +334,7 @@ self [,()\[\].;\:\+\-\*\/\%\^\<\>\=]
op_chars [\~\!\@\#\^\&\|\`\?\+\-\*\/\%\<\>\=]
operator {op_chars}+
/* we no longer allow unary minus in numbers.
/* we no longer allow unary minus in numbers.
* instead we pass it separately to parser. there it gets
* coerced via doNegate() -- Leon aug 20 1999
*

View File

@ -4,13 +4,13 @@
# Makefile for the port-specific subsystem of the backend
#
# We have two different modes of operation: 1) put stuff specific to Port X
# in subdirectory X and have that subdirectory's make file make it all, and
# in subdirectory X and have that subdirectory's make file make it all, and
# 2) use conditional statements in the present make file to include what's
# necessary for a specific port in our own output. (1) came first, but (2)
# is superior for many things, like when the same thing needs to be done for
# multiple ports and you don't want to duplicate files in multiple
# multiple ports and you don't want to duplicate files in multiple
# subdirectories. Much of the stuff done via Method 1 today should probably
# be converted to Method 2.
# be converted to Method 2.
#
# IDENTIFICATION
# src/backend/port/Makefile

View File

@ -9,13 +9,13 @@
# mkldexport objectfile [location]
# where
# objectfile is the current location of the object file.
# location is the eventual (installed) location of the
# location is the eventual (installed) location of the
# object file (if different from the current
# working directory).
#
# [This file comes from the Postgres 4.2 distribution. - ay 7/95]
#
# Header: /usr/local/devel/postgres/src/tools/mkldexport/RCS/mkldexport.sh,v 1.2 1994/03/13 04:59:12 aoki Exp
# Header: /usr/local/devel/postgres/src/tools/mkldexport/RCS/mkldexport.sh,v 1.2 1994/03/13 04:59:12 aoki Exp
#
# setting this to nm -B might be better

View File

@ -16,7 +16,7 @@ that a backend attempting to execute CREATE DATABASE core-dumps.) I would
love to know why there is a discrepancy between the published source and
the actual behavior --- tgl 7-Nov-2001.
Appropriate bug reports have been filed with Apple --- see
Appropriate bug reports have been filed with Apple --- see
Radar Bug#s 2767956, 2683531, 2805147. One hopes we can retire this
kluge in the not too distant future.

View File

@ -24,14 +24,14 @@
.global pg_atomic_cas
pg_atomic_cas:
! "cas" only works on sparcv9 and sparcv8plus chips, and
! requies a compiler targeting these CPUs. It will fail
! on a compiler targeting sparcv8, and of course will not
! be understood by a sparcv8 CPU. gcc continues to use
! "ldstub" because it targets sparcv7.
!
! There is actually a trick for embedding "cas" in a
! There is actually a trick for embedding "cas" in a
! sparcv8-targeted compiler, but it can only be run
! on a sparcv8plus/v9 cpus:
!

View File

@ -83,7 +83,7 @@ include $(top_srcdir)/src/Makefile.shlib
$(SQLSCRIPT): Makefile snowball_func.sql.in snowball.sql.in
ifeq ($(enable_shared), yes)
echo '-- Language-specific snowball dictionaries' > $@
cat $(srcdir)/snowball_func.sql.in >> $@
cat $(srcdir)/snowball_func.sql.in >> $@
@set -e; \
set $(LANGUAGES) ; \
while [ "$$#" -gt 0 ] ; \

View File

@ -264,7 +264,7 @@ while scanning the buffers. (This is a very substantial improvement in
the contention cost of the writer compared to PG 8.0.)
During a checkpoint, the writer's strategy must be to write every dirty
buffer (pinned or not!). We may as well make it start this scan from
buffer (pinned or not!). We may as well make it start this scan from
NextVictimBuffer, however, so that the first-to-be-written pages are the
ones that backends might otherwise have to write for themselves soon.

View File

@ -84,7 +84,7 @@ backends are concurrently inserting into a relation, contention can be avoided
by having them insert into different pages. But it is also desirable to fill
up pages in sequential order, to get the benefit of OS prefetching and batched
writes. The FSM is responsible for making that happen, and the next slot
pointer helps provide the desired behavior.
pointer helps provide the desired behavior.
Higher-level structure
----------------------

View File

@ -7,7 +7,7 @@ Mon Jul 18 11:09:22 PDT 1988 W.KLAS
The cache synchronization is done using a message queue. Every
backend can register a message which then has to be read by
all backends. A message read by all backends is removed from the
all backends. A message read by all backends is removed from the
queue automatically. If a message has been lost because the buffer
was full, all backends that haven't read this message will be
told that they have to reset their cache state. This is done

View File

@ -27,5 +27,5 @@ s_lock_test: s_lock.c $(top_builddir)/src/port/libpgport.a
check: s_lock_test
./s_lock_test
clean distclean maintainer-clean:
clean distclean maintainer-clean:
rm -f s_lock_test

View File

@ -31,7 +31,7 @@ arrival order. There is no timeout.
* Regular locks (a/k/a heavyweight locks). The regular lock manager
supports a variety of lock modes with table-driven semantics, and it has
full deadlock detection and automatic release at transaction end.
full deadlock detection and automatic release at transaction end.
Regular locks should be used for all user-driven lock requests.
Acquisition of either a spinlock or a lightweight lock causes query
@ -260,7 +260,7 @@ A key design consideration is that we want to make routine operations
(lock grant and release) run quickly when there is no deadlock, and
avoid the overhead of deadlock handling as much as possible. We do this
using an "optimistic waiting" approach: if a process cannot acquire the
lock it wants immediately, it goes to sleep without any deadlock check.
lock it wants immediately, it goes to sleep without any deadlock check.
But it also sets a delay timer, with a delay of DeadlockTimeout
milliseconds (typically set to one second). If the delay expires before
the process is granted the lock it wants, it runs the deadlock

View File

@ -423,8 +423,8 @@ TParserCopyClose(TParser *prs)
* Character-type support functions, equivalent to is* macros, but
* working with any possible encodings and locales. Notes:
* - with multibyte encoding and C-locale isw* function may fail
* or give wrong result.
* - multibyte encoding and C-locale often are used for
* or give wrong result.
* - multibyte encoding and C-locale often are used for
* Asian languages.
* - if locale is C the we use pgwstr instead of wstr
*/
@ -761,8 +761,8 @@ p_isURLPath(TParser *prs)
/*
* returns true if current character has zero display length or
* it's a special sign in several languages. Such characters
* aren't a word-breaker although they aren't an isalpha.
* In beginning of word they aren't a part of it.
* aren't a word-breaker although they aren't an isalpha.
* In beginning of word they aren't a part of it.
*/
static int
p_isspecial(TParser *prs)
@ -2099,7 +2099,7 @@ hlCover(HeadlineParsedText *prs, TSQuery query, int *p, int *q)
return false;
}
static void
static void
mark_fragment(HeadlineParsedText *prs, int highlight, int startpos, int endpos)
{
int i;
@ -2125,7 +2125,7 @@ mark_fragment(HeadlineParsedText *prs, int highlight, int startpos, int endpos)
}
}
typedef struct
typedef struct
{
int4 startpos;
int4 endpos;
@ -2135,16 +2135,16 @@ typedef struct
int2 excluded;
} CoverPos;
static void
static void
get_next_fragment(HeadlineParsedText *prs, int *startpos, int *endpos,
int *curlen, int *poslen, int max_words)
{
int i;
/* Objective: Generate a fragment of words between startpos and endpos
* such that it has at most max_words and both ends has query words.
* If the startpos and endpos are the endpoints of the cover and the
* cover has fewer words than max_words, then this function should
* just return the cover
/* Objective: Generate a fragment of words between startpos and endpos
* such that it has at most max_words and both ends has query words.
* If the startpos and endpos are the endpoints of the cover and the
* cover has fewer words than max_words, then this function should
* just return the cover
*/
/* first move startpos to an item */
for(i = *startpos; i <= *endpos; i++)
@ -2156,14 +2156,14 @@ get_next_fragment(HeadlineParsedText *prs, int *startpos, int *endpos,
/* cut endpos to have only max_words */
*curlen = 0;
*poslen = 0;
for(i = *startpos; i <= *endpos && *curlen < max_words; i++)
for(i = *startpos; i <= *endpos && *curlen < max_words; i++)
{
if (!NONWORDTOKEN(prs->words[i].type))
*curlen += 1;
if (prs->words[i].item && !prs->words[i].repeated)
*poslen += 1;
}
/* if the cover was cut then move back endpos to a query item */
/* if the cover was cut then move back endpos to a query item */
if (*endpos > i)
{
*endpos = i;
@ -2174,31 +2174,31 @@ get_next_fragment(HeadlineParsedText *prs, int *startpos, int *endpos,
break;
if (!NONWORDTOKEN(prs->words[i].type))
*curlen -= 1;
}
}
}
}
}
static void
mark_hl_fragments(HeadlineParsedText *prs, TSQuery query, int highlight,
int shortword, int min_words,
int shortword, int min_words,
int max_words, int max_fragments)
{
int4 poslen, curlen, i, f, num_f = 0;
int4 stretch, maxstretch, posmarker;
int4 startpos = 0,
endpos = 0,
int4 startpos = 0,
endpos = 0,
p = 0,
q = 0;
int4 numcovers = 0,
int4 numcovers = 0,
maxcovers = 32;
int4 minI, minwords, maxitems;
CoverPos *covers;
covers = palloc(maxcovers * sizeof(CoverPos));
/* get all covers */
while (hlCover(prs, query, &p, &q))
{
@ -2207,7 +2207,7 @@ mark_hl_fragments(HeadlineParsedText *prs, TSQuery query, int highlight,
/* Break the cover into smaller fragments such that each fragment
* has at most max_words. Also ensure that each end of the fragment
* is a query word. This will allow us to stretch the fragment in
* is a query word. This will allow us to stretch the fragment in
* either direction
*/
@ -2228,9 +2228,9 @@ mark_hl_fragments(HeadlineParsedText *prs, TSQuery query, int highlight,
numcovers ++;
startpos = endpos + 1;
endpos = q;
}
}
/* move p to generate the next cover */
p++;
p++;
}
/* choose best covers */
@ -2240,13 +2240,13 @@ mark_hl_fragments(HeadlineParsedText *prs, TSQuery query, int highlight,
minwords = 0x7fffffff;
minI = -1;
/* Choose the cover that contains max items.
* In case of tie choose the one with smaller
* number of words.
* In case of tie choose the one with smaller
* number of words.
*/
for (i = 0; i < numcovers; i ++)
{
if (!covers[i].in && !covers[i].excluded &&
(maxitems < covers[i].poslen || (maxitems == covers[i].poslen
if (!covers[i].in && !covers[i].excluded &&
(maxitems < covers[i].poslen || (maxitems == covers[i].poslen
&& minwords > covers[i].curlen)))
{
maxitems = covers[i].poslen;
@ -2263,15 +2263,15 @@ mark_hl_fragments(HeadlineParsedText *prs, TSQuery query, int highlight,
endpos = covers[minI].endpos;
curlen = covers[minI].curlen;
/* stretch the cover if cover size is lower than max_words */
if (curlen < max_words)
if (curlen < max_words)
{
/* divide the stretch on both sides of cover */
maxstretch = (max_words - curlen)/2;
/* first stretch the startpos
* stop stretching if
* 1. we hit the beginning of document
* 2. exceed maxstretch
* 3. we hit an already marked fragment
/* first stretch the startpos
* stop stretching if
* 1. we hit the beginning of document
* 2. exceed maxstretch
* 3. we hit an already marked fragment
*/
stretch = 0;
posmarker = startpos;
@ -2297,7 +2297,7 @@ mark_hl_fragments(HeadlineParsedText *prs, TSQuery query, int highlight,
{
if (!NONWORDTOKEN(prs->words[i].type))
curlen ++;
posmarker = i;
posmarker = i;
}
/* cut back endpos till we find a non-short token */
for ( i = posmarker; i > endpos && (NOENDTOKEN(prs->words[i].type) || prs->words[i].len <= shortword); i--)
@ -2316,7 +2316,7 @@ mark_hl_fragments(HeadlineParsedText *prs, TSQuery query, int highlight,
/* exclude overlapping covers */
for (i = 0; i < numcovers; i ++)
{
if (i != minI && ( (covers[i].startpos >= covers[minI].startpos && covers[i].startpos <= covers[minI].endpos) || (covers[i].endpos >= covers[minI].startpos && covers[i].endpos <= covers[minI].endpos)))
if (i != minI && ( (covers[i].startpos >= covers[minI].startpos && covers[i].startpos <= covers[minI].endpos) || (covers[i].endpos >= covers[minI].startpos && covers[i].endpos <= covers[minI].endpos)))
covers[i].excluded = 1;
}
}
@ -2340,7 +2340,7 @@ mark_hl_fragments(HeadlineParsedText *prs, TSQuery query, int highlight,
}
static void
mark_hl_words(HeadlineParsedText *prs, TSQuery query, int highlight,
mark_hl_words(HeadlineParsedText *prs, TSQuery query, int highlight,
int shortword, int min_words, int max_words)
{
int p = 0,
@ -2552,7 +2552,7 @@ prsd_headline(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("MaxFragments should be >= 0")));
}
}
if (max_fragments == 0)
/* call the default headline generator */

View File

@ -93,7 +93,7 @@ my $tabfile = $output_path . 'fmgrtab.c';
open H, '>', $oidsfile . $tmpext or die "Could not open $oidsfile$tmpext: $!";
open T, '>', $tabfile . $tmpext or die "Could not open $tabfile$tmpext: $!";
print H
print H
qq|/*-------------------------------------------------------------------------
*
* fmgroids.h

View File

@ -102,7 +102,7 @@ typedef int16 NumericDigit;
* remaining bits are never examined. Currently, we always initialize these
* to zero, but it might be possible to use them for some other purpose in
* the future.
*
*
* In the NumericShort format, the remaining 14 bits of the header word
* (n_short.n_header) are allocated as follows: 1 for sign (positive or
* negative), 6 for dynamic scale, and 7 for weight. In practice, most
@ -3725,7 +3725,7 @@ make_result(NumericVar *var)
len = NUMERIC_HDRSZ_SHORT + n * sizeof(NumericDigit);
result = (Numeric) palloc(len);
SET_VARSIZE(result, len);
result->choice.n_short.n_header =
result->choice.n_short.n_header =
(sign == NUMERIC_NEG ? (NUMERIC_SHORT | NUMERIC_SHORT_SIGN_MASK)
: NUMERIC_SHORT)
| (var->dscale << NUMERIC_SHORT_DSCALE_SHIFT)

View File

@ -3054,7 +3054,7 @@ text_to_array_internal(PG_FUNCTION_ARGS)
int start_posn;
int end_posn;
int chunk_len;
text_position_setup(inputstring, fldsep, &state);
/*
@ -3085,7 +3085,7 @@ text_to_array_internal(PG_FUNCTION_ARGS)
PointerGetDatum(inputstring),
is_null, 1));
}
start_posn = 1;
/* start_ptr points to the start_posn'th character of inputstring */
start_ptr = VARDATA_ANY(inputstring);
@ -3110,7 +3110,7 @@ text_to_array_internal(PG_FUNCTION_ARGS)
/* must build a temp text datum to pass to accumArrayResult */
result_text = cstring_to_text_with_len(start_ptr, chunk_len);
is_null = null_string ? text_isequal(result_text, null_string) : false;
/* stash away this field */
astate = accumArrayResult(astate,
PointerGetDatum(result_text),
@ -3133,19 +3133,19 @@ text_to_array_internal(PG_FUNCTION_ARGS)
}
else
{
/*
/*
* When fldsep is NULL, each character in the inputstring becomes an
* element in the result array. The separator is effectively the space
* between characters.
*/
inputstring_len = VARSIZE_ANY_EXHDR(inputstring);
/* return empty array for empty input string */
if (inputstring_len < 1)
PG_RETURN_ARRAYTYPE_P(construct_empty_array(TEXTOID));
start_ptr = VARDATA_ANY(inputstring);
while (inputstring_len > 0)
{
int chunk_len = pg_mblen(start_ptr);
@ -3155,7 +3155,7 @@ text_to_array_internal(PG_FUNCTION_ARGS)
/* must build a temp text datum to pass to accumArrayResult */
result_text = cstring_to_text_with_len(start_ptr, chunk_len);
is_null = null_string ? text_isequal(result_text, null_string) : false;
/* stash away this field */
astate = accumArrayResult(astate,
PointerGetDatum(result_text),
@ -3205,7 +3205,7 @@ array_to_text_null(PG_FUNCTION_ARGS)
/* returns NULL when first or second parameter is NULL */
if (PG_ARGISNULL(0) || PG_ARGISNULL(1))
PG_RETURN_NULL();
v = PG_GETARG_ARRAYTYPE_P(0);
fldsep = text_to_cstring(PG_GETARG_TEXT_PP(1));
@ -3332,7 +3332,7 @@ array_to_text_internal(FunctionCallInfo fcinfo, ArrayType *v,
}
}
}
result = cstring_to_text_with_len(buf.data, buf.len);
pfree(buf.data);

View File

@ -3601,7 +3601,7 @@ xml_is_well_formed(PG_FUNCTION_ARGS)
{
#ifdef USE_LIBXML
text *data = PG_GETARG_TEXT_P(0);
PG_RETURN_BOOL(wellformed_xml(data, xmloption));
#else
NO_XML_SUPPORT();
@ -3614,7 +3614,7 @@ xml_is_well_formed_document(PG_FUNCTION_ARGS)
{
#ifdef USE_LIBXML
text *data = PG_GETARG_TEXT_P(0);
PG_RETURN_BOOL(wellformed_xml(data, XMLOPTION_DOCUMENT));
#else
NO_XML_SUPPORT();
@ -3627,7 +3627,7 @@ xml_is_well_formed_content(PG_FUNCTION_ARGS)
{
#ifdef USE_LIBXML
text *data = PG_GETARG_TEXT_P(0);
PG_RETURN_BOOL(wellformed_xml(data, XMLOPTION_CONTENT));
#else
NO_XML_SUPPORT();

View File

@ -8,7 +8,7 @@
# map files provided by Unicode organization.
# Unfortunately it is prohibited by the organization
# to distribute the map files. So if you try to use this script,
# you have to obtain GB2312.TXT from
# you have to obtain GB2312.TXT from
# the organization's ftp site.
#
# GB2312.TXT format:

View File

@ -45,7 +45,7 @@ while($line = <FILE> ){
} else {
next;
}
$ucs = hex($u);
$code = hex($c);
$utf = &ucs2utf($ucs);
@ -73,7 +73,7 @@ for $index ( sort {$a <=> $b} keys( %array ) ){
if( $count == 0 ){
printf FILE " {0x%08x, 0x%06x} /* %s */\n", $index, $code, $comment{ $code };
} else {
printf FILE " {0x%08x, 0x%06x}, /* %s */\n", $index, $code, $comment{ $code };
printf FILE " {0x%08x, 0x%06x}, /* %s */\n", $index, $code, $comment{ $code };
}
}
@ -135,7 +135,7 @@ if ($TEST == 1) {
($code >= 0x8ea1 && $code <= 0x8efe) ||
($code >= 0x8fa1a1 && $code <= 0x8ffefe) ||
($code >= 0xa1a1 && $code <= 0x8fefe))) {
$v1 = hex(substr($index, 0, 8));
$v2 = hex(substr($index, 8, 8));
@ -192,7 +192,7 @@ while($line = <FILE> ){
} else {
next;
}
$ucs = hex($u);
$code = hex($c);
$utf = &ucs2utf($ucs);
@ -220,7 +220,7 @@ for $index ( sort {$a <=> $b} keys( %array ) ){
if( $count == 0 ){
printf FILE " {0x%06x, 0x%08x} /* %s */\n", $index, $code, $comment{ $code };
} else {
printf FILE " {0x%06x, 0x%08x}, /* %s */\n", $index, $code, $comment{ $code };
printf FILE " {0x%06x, 0x%08x}, /* %s */\n", $index, $code, $comment{ $code };
}
}

View File

@ -8,7 +8,7 @@
# map files provided by Unicode organization.
# Unfortunately it is prohibited by the organization
# to distribute the map files. So if you try to use this script,
# you have to obtain JIS0201.TXT, JIS0208.TXT, JIS0212.TXT from
# you have to obtain JIS0201.TXT, JIS0208.TXT, JIS0212.TXT from
# the organization's ftp site.
#
# JIS0201.TXT format:

View File

@ -8,7 +8,7 @@
# map files provided by Unicode organization.
# Unfortunately it is prohibited by the organization
# to distribute the map files. So if you try to use this script,
# you have to obtain OLD5601.TXT from
# you have to obtain OLD5601.TXT from
# the organization's ftp site.
#
# OLD5601.TXT format:

View File

@ -8,7 +8,7 @@
# map files provided by Unicode organization.
# Unfortunately it is prohibited by the organization
# to distribute the map files. So if you try to use this script,
# you have to obtain CNS11643.TXT from
# you have to obtain CNS11643.TXT from
# the organization's ftp site.
#
# CNS11643.TXT format:

View File

@ -43,7 +43,7 @@ while($line = <FILE> ){
} else {
next;
}
$ucs = hex($u);
$code = hex($c);
$utf = &ucs2utf($ucs);
@ -71,7 +71,7 @@ for $index ( sort {$a <=> $b} keys( %array ) ){
if( $count == 0 ){
printf FILE " {0x%08x, 0x%06x} /* %s */\n", $index, $code, $comment{ $code };
} else {
printf FILE " {0x%08x, 0x%06x}, /* %s */\n", $index, $code, $comment{ $code };
printf FILE " {0x%08x, 0x%06x}, /* %s */\n", $index, $code, $comment{ $code };
}
}
@ -132,7 +132,7 @@ while($line = <FILE> ){
} else {
next;
}
$ucs = hex($u);
$code = hex($c);
$utf = &ucs2utf($ucs);
@ -161,7 +161,7 @@ for $index ( sort {$a <=> $b} keys( %array ) ){
if( $count == 0 ){
printf FILE " {0x%04x, 0x%08x} /* %s */\n", $index, $code, $comment{ $code };
} else {
printf FILE " {0x%04x, 0x%08x}, /* %s */\n", $index, $code, $comment{ $code };
printf FILE " {0x%04x, 0x%08x}, /* %s */\n", $index, $code, $comment{ $code };
}
}

View File

@ -8,7 +8,7 @@
# map files provided by Unicode organization.
# Unfortunately it is prohibited by the organization
# to distribute the map files. So if you try to use this script,
# you have to obtain SHIFTJIS.TXT from
# you have to obtain SHIFTJIS.TXT from
# the organization's ftp site.
#
# SHIFTJIS.TXT format:

View File

@ -13,12 +13,12 @@ sub ucs2utf {
} elsif ($ucs > 0x007f && $ucs <= 0x07ff) {
$utf = (($ucs & 0x003f) | 0x80) | ((($ucs >> 6) | 0xc0) << 8);
} elsif ($ucs > 0x07ff && $ucs <= 0xffff) {
$utf = ((($ucs >> 12) | 0xe0) << 16) |
$utf = ((($ucs >> 12) | 0xe0) << 16) |
(((($ucs & 0x0fc0) >> 6) | 0x80) << 8) |
(($ucs & 0x003f) | 0x80);
} else {
$utf = ((($ucs >> 18) | 0xf0) << 24) |
(((($ucs & 0x3ffff) >> 12) | 0x80) << 16) |
(((($ucs & 0x3ffff) >> 12) | 0x80) << 16) |
(((($ucs & 0x0fc0) >> 6) | 0x80) << 8) |
(($ucs & 0x003f) | 0x80);
}

View File

@ -37,5 +37,5 @@ endif
# Note: guc-file.c is not deleted by 'make clean',
# since we want to ship it in distribution tarballs.
clean:
clean:
@rm -f lex.yy.c

View File

@ -4,7 +4,7 @@
## in postgresql.conf.sample:
## 1) the valid config settings may be preceded by a '#', but NOT '# '
## (we use this to skip comments)
## 2) the valid config settings will be followed immediately by ' ='
## 2) the valid config settings will be followed immediately by ' ='
## (at least one space preceding the '=')
## in guc.c:
## 3) the options have PGC_ on the same line as the option
@ -14,7 +14,7 @@
## 1) Don't know what to do with TRANSACTION ISOLATION LEVEL
## if an option is valid but shows up in only one file (guc.c but not
## postgresql.conf.sample), it should be listed here so that it
## postgresql.conf.sample), it should be listed here so that it
## can be ignored
INTENTIONALLY_NOT_INCLUDED="autocommit debug_deadlocks \
is_superuser lc_collate lc_ctype lc_messages lc_monetary lc_numeric lc_time \
@ -23,35 +23,35 @@ session_authorization trace_lock_oidmin trace_lock_table trace_locks trace_lwloc
trace_notify trace_userlocks transaction_isolation transaction_read_only \
zero_damaged_pages"
### What options are listed in postgresql.conf.sample, but don't appear
### What options are listed in postgresql.conf.sample, but don't appear
### in guc.c?
# grab everything that looks like a setting and convert it to lower case
SETTINGS=`grep ' =' postgresql.conf.sample |
SETTINGS=`grep ' =' postgresql.conf.sample |
grep -v '^# ' | # strip comments
sed -e 's/^#//' |
sed -e 's/^#//' |
awk '{print $1}'`
SETTINGS=`echo "$SETTINGS" | tr 'A-Z' 'a-z'`
for i in $SETTINGS ; do
for i in $SETTINGS ; do
hidden=0
## it sure would be nice to replace this with an sql "not in" statement
## it doesn't seem to make sense to have things in .sample and not in guc.c
# for hidethis in $INTENTIONALLY_NOT_INCLUDED ; do
# if [ "$hidethis" = "$i" ] ; then
# if [ "$hidethis" = "$i" ] ; then
# hidden=1
# fi
# done
if [ "$hidden" -eq 0 ] ; then
grep -i '"'$i'"' guc.c > /dev/null
if [ $? -ne 0 ] ; then
echo "$i seems to be missing from guc.c";
fi;
if [ $? -ne 0 ] ; then
echo "$i seems to be missing from guc.c";
fi;
fi
done
### What options are listed in guc.c, but don't appear
### What options are listed in guc.c, but don't appear
### in postgresql.conf.sample?
# grab everything that looks like a setting and convert it to lower case

View File

@ -463,9 +463,9 @@ ParseConfigFile(const char *config_file, const char *calling_file,
/* now we must have the option value */
if (token != GUC_ID &&
token != GUC_STRING &&
token != GUC_INTEGER &&
token != GUC_REAL &&
token != GUC_STRING &&
token != GUC_INTEGER &&
token != GUC_REAL &&
token != GUC_UNQUOTED_STRING)
goto parse_error;
if (token == GUC_STRING) /* strip quotes and escapes */
@ -573,7 +573,7 @@ ParseConfigFile(const char *config_file, const char *calling_file,
else
ereport(elevel,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error in file \"%s\" line %u, near token \"%s\"",
errmsg("syntax error in file \"%s\" line %u, near token \"%s\"",
config_file, ConfigFileLineno, yytext)));
OK = false;

View File

@ -62,7 +62,7 @@
# (change requires restart)
#port = 5432 # (change requires restart)
#max_connections = 100 # (change requires restart)
# Note: Increasing max_connections costs ~400 bytes of shared memory per
# Note: Increasing max_connections costs ~400 bytes of shared memory per
# connection slot, plus lock space (see max_locks_per_transaction).
#superuser_reserved_connections = 3 # (change requires restart)
#unix_socket_directory = '' # (change requires restart)
@ -154,7 +154,7 @@
# (change requires restart)
#fsync = on # turns forced synchronization on or off
#synchronous_commit = on # immediate fsync at commit
#wal_sync_method = fsync # the default is the first option
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync
@ -246,7 +246,7 @@
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#join_collapse_limit = 8 # 1 disables collapsing of explicit
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
@ -284,7 +284,7 @@
# in all cases.
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
@ -412,7 +412,7 @@
# AUTOVACUUM PARAMETERS
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
# their durations, > 0 logs only
@ -423,7 +423,7 @@
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_analyze_threshold = 50 # min number of row updates before
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze

View File

@ -377,7 +377,7 @@ constraining context-type designers very much.)
Given this, the pfree routine will look something like
StandardChunkHeader * header =
StandardChunkHeader * header =
(StandardChunkHeader *) ((char *) p - sizeof(StandardChunkHeader));
(*header->mycontext->methods->free_p) (p);

View File

@ -19,17 +19,17 @@
!IF "$(OS)" == "Windows_NT"
NULL=
!ELSE
!ELSE
NULL=nul
!ENDIF
!ENDIF
ALL:
ALL:
cd include
if not exist pg_config.h copy pg_config.h.win32 pg_config.h
if not exist pg_config_os.h copy port\win32.h pg_config_os.h
cd ..
cd interfaces\libpq
make -N -DCFG=$(CFG) /f bcc32.mak
make -N -DCFG=$(CFG) /f bcc32.mak
cd ..\..
echo All Win32 parts have been built!

View File

@ -19,7 +19,7 @@ or, to dump in TAR format
pg_dump <db-name> -Ft > <backup-file>
To restore, try
To list contents:
pg_restore -l <backup-file> | less
@ -62,12 +62,12 @@ or, simply:
TAR
===
The TAR archive that pg_dump creates currently has a blank username & group for the files,
The TAR archive that pg_dump creates currently has a blank username & group for the files,
but should be otherwise valid. It also includes a 'restore.sql' script which is there for
the benefit of humans. The script is never used by pg_restore.
Note: the TAR format archive can only be used as input into pg_restore if it is in TAR form.
(ie. you should not extract the files then expect pg_restore to work).
(ie. you should not extract the files then expect pg_restore to work).
You can extract, edit, and tar the files again, and it should work, but the 'toc'
file should go at the start, the data files be in the order they are used, and

View File

@ -10498,7 +10498,7 @@ dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
}
/*
* dumpSecLabel
* dumpSecLabel
*
* This routine is used to dump any security labels associated with the
* object handed to this routine. The routine takes a constant character

View File

@ -277,8 +277,8 @@ xqinside [^']+
/* $foo$ style quotes ("dollar quoting")
* The quoted string starts with $foo$ where "foo" is an optional string
* in the form of an identifier, except that it may not contain "$",
* and extends to the first occurrence of an identical string.
* in the form of an identifier, except that it may not contain "$",
* and extends to the first occurrence of an identical string.
* There is *no* processing of the quoted text.
*
* {dolqfailed} is an error rule to avoid scanner backup when {dolqdelim}
@ -364,7 +364,7 @@ self [,()\[\].;\:\+\-\*\/\%\^\<\>\=]
op_chars [\~\!\@\#\^\&\|\`\?\+\-\*\/\%\<\>\=]
operator {op_chars}+
/* we no longer allow unary minus in numbers.
/* we no longer allow unary minus in numbers.
* instead we pass it separately to parser. there it gets
* coerced via doNegate() -- Leon aug 20 1999
*

View File

@ -10,7 +10,7 @@
*
*-------------------------------------------------------------------------
*/
#ifndef OBJECTADDRESS_H
#ifndef OBJECTADDRESS_H
#define OBJECTADDRESS_H
#include "nodes/parsenodes.h"

View File

@ -179,7 +179,7 @@
#define HAVE_INTTYPES_H 1
/* Define to 1 if you have the global variable 'int timezone'. */
#define HAVE_INT_TIMEZONE
#define HAVE_INT_TIMEZONE
/* Define to 1 if you have support for IPv6. */
#define HAVE_IPV6 1
@ -249,7 +249,7 @@
/* Define to 1 if `long long int' works and is 64 bits. */
#if (_MSC_VER > 1200)
#define HAVE_LONG_LONG_INT_64
#define HAVE_LONG_LONG_INT_64
#endif
/* Define to 1 if you have the `memmove' function. */

View File

@ -856,7 +856,7 @@ spin_delay(void)
#endif
#endif /* !defined(HAS_TEST_AND_SET) */

View File

@ -3,7 +3,7 @@ src/interfaces/ecpg/README.dynSQL
descriptor statements have the following shortcomings
- input descriptors (USING DESCRIPTOR <name>) are not supported
Reason: to fully support dynamic SQL the frontend/backend communication
should change to recognize input parameters.
Since this is not likely to happen in the near future and you

View File

@ -164,7 +164,7 @@ ECPGprepare(int lineno, const char *connection_name, const bool questionmarks, c
struct prepared_statement *this,
*prev;
(void) questionmarks; /* quiet the compiler */
(void) questionmarks; /* quiet the compiler */
con = ecpg_get_connection(connection_name);
if (!ecpg_init(con, connection_name, lineno))

View File

@ -58,7 +58,7 @@ else
endif
preproc.y: ../../../backend/parser/gram.y parse.pl ecpg.addons ecpg.header ecpg.tokens ecpg.trailer ecpg.type
$(PERL) $(srcdir)/parse.pl $(srcdir) < $< > $@
$(PERL) $(srcdir)/parse.pl $(srcdir) < $< > $@
$(PERL) $(srcdir)/check_rules.pl $(srcdir) $<
ecpg_keywords.o c_keywords.o keywords.o preproc.o parser.o: preproc.h

View File

@ -102,7 +102,7 @@ while (<GRAM>) {
$block = $block . $arr[$fieldIndexer];
}
}
}
}
close GRAM;
@ -113,7 +113,7 @@ line: while (<ECPG>) {
@Fld = split(' ', $_, -1);
if (!/^ECPG:/) {
next line;
next line;
}
if ($found{$Fld[2]} ne 'found') {

View File

@ -40,7 +40,7 @@ ECPG: stmtPrepareStmt block
{
if ($1.type == NULL || strlen($1.type) == 0)
output_prepare_statement($1.name, $1.stmt);
else
else
output_statement(cat_str(5, make_str("prepare"), $1.name, $1.type, make_str("as"), $1.stmt), 0, ECPGst_normal);
}
ECPG: stmtTransactionStmt block
@ -109,7 +109,7 @@ ECPG: stmtViewStmt rule
if (!strcmp($1, "all"))
fprintf(yyout, "{ ECPGdeallocate_all(__LINE__, %d, %s);", compat, con);
else if ($1[0] == ':')
else if ($1[0] == ':')
fprintf(yyout, "{ ECPGdeallocate(__LINE__, %d, %s, %s);", compat, con, $1+1);
else
fprintf(yyout, "{ ECPGdeallocate(__LINE__, %d, %s, \"%s\");", compat, con, $1);

View File

@ -103,7 +103,7 @@ mmerror(int error_code, enum errortype type, const char *error, ...)
fclose(yyin);
if (yyout)
fclose(yyout);
if (strcmp(output_filename, "-") != 0 && unlink(output_filename) != 0)
fprintf(stderr, _("could not remove output file \"%s\"\n"), output_filename);
exit(error_code);

View File

@ -3,7 +3,7 @@
/* special embedded SQL tokens */
%token SQL_ALLOCATE SQL_AUTOCOMMIT SQL_BOOL SQL_BREAK
SQL_CALL SQL_CARDINALITY SQL_CONNECT
SQL_COUNT
SQL_COUNT
SQL_DATETIME_INTERVAL_CODE
SQL_DATETIME_INTERVAL_PRECISION SQL_DESCRIBE
SQL_DESCRIPTOR SQL_DISCONNECT SQL_FOUND
@ -23,5 +23,5 @@
S_STATIC S_SUB S_VOLATILE
S_TYPEDEF
%token CSTRING CVARIABLE CPP_LINE IP
%token CSTRING CVARIABLE CPP_LINE IP
%token DOLCONST ECONST NCONST UCONST UIDENT

View File

@ -70,7 +70,7 @@ connection_target: opt_database_name opt_server opt_port
/* old style: dbname[@server][:port] */
if (strlen($2) > 0 && *($2) != '@')
mmerror(PARSE_ERROR, ET_ERROR, "expected \"@\", found \"%s\"", $2);
/* C strings need to be handled differently */
if ($1[0] == '\"')
$$ = $1;
@ -241,7 +241,7 @@ opt_options: Op connect_options
| /*EMPTY*/ { $$ = EMPTY; }
;
connect_options: ColId opt_opt_value
connect_options: ColId opt_opt_value
{ $$ = make2_str($1, $2); }
| ColId opt_opt_value Op connect_options
{
@ -347,7 +347,7 @@ ECPGCursorStmt: DECLARE cursor_name cursor_options CURSOR opt_hold FOR prepared
;
ECPGExecuteImmediateStmt: EXECUTE IMMEDIATE execstring
{
{
/* execute immediate means prepare the statement and
* immediately execute it */
$$ = $3;
@ -631,7 +631,7 @@ var_type: simple_type
$$.type_index = this->type->type_index;
if (this->type->type_sizeof && strlen(this->type->type_sizeof) != 0)
$$.type_sizeof = this->type->type_sizeof;
else
else
$$.type_sizeof = cat_str(3, make_str("sizeof("), mm_strdup(this->name), make_str(")"));
struct_member_list[struct_level] = ECPGstruct_member_dup(this->struct_member_list);
@ -862,7 +862,7 @@ variable: opt_pointer ECPGColLabel opt_array_bounds opt_bit_field opt_initialize
type = ECPGmake_simple_type(actual_type[struct_level].type_enum, length, varchar_counter);
else
type = ECPGmake_array_type(ECPGmake_simple_type(actual_type[struct_level].type_enum, length, varchar_counter), dimension);
if (strcmp(dimension, "0") == 0 || abs(atoi(dimension)) == 1)
*dim = '\0';
else
@ -1037,7 +1037,7 @@ UsingValue: UsingConst
}
| civar { $$ = EMPTY; }
| civarind { $$ = EMPTY; }
;
;
UsingConst: Iconst { $$ = $1; }
| '+' Iconst { $$ = cat_str(2, make_str("+"), $2); }
@ -1857,7 +1857,7 @@ execute_rest: /* EMPTY */ { $$ = EMPTY; }
| ecpg_into ecpg_using { $$ = EMPTY; }
| ecpg_using { $$ = EMPTY; }
| ecpg_into { $$ = EMPTY; }
;
;
ecpg_into: INTO into_list { $$ = EMPTY; }
| into_descriptor { $$ = $1; }

View File

@ -113,7 +113,7 @@
%type <str> variable
%type <str> variable_declarations
%type <str> variable_list
%type <str> vt_declarations
%type <str> vt_declarations
%type <str> Op
%type <str> IntConstVar

View File

@ -93,7 +93,7 @@ line: while (<>) {
chomp; # strip record separator
@Fld = split(' ', $_, -1);
# Dump the action for a rule -
# Dump the action for a rule -
# mode indicates if we are processing the 'stmt:' rule (mode==0 means normal, mode==1 means stmt:)
# flds are the fields to use. These may start with a '$' - in which case they are the result of a previous non-terminal
# if they dont start with a '$' then they are token name
@ -235,8 +235,8 @@ line: while (<>) {
if ($replace_token{$arr[$fieldIndexer]}) {
$arr[$fieldIndexer] = $replace_token{$arr[$fieldIndexer]};
}
# Are we looking at a declaration of a non-terminal ?
# Are we looking at a declaration of a non-terminal ?
if (($arr[$fieldIndexer] =~ '[A-Za-z0-9]+:') || $arr[$fieldIndexer + 1] eq ':') {
$non_term_id = $arr[$fieldIndexer];
$s = ':', $non_term_id =~ s/$s//g;
@ -253,7 +253,7 @@ line: while (<>) {
$copymode = 'on';
}
$line = $line . ' ' . $arr[$fieldIndexer];
# Do we have the : attached already ?
# Do we have the : attached already ?
# If yes, we'll have already printed the ':'
if (!($arr[$fieldIndexer] =~ '[A-Za-z0-9]+:')) {
# Consume the ':' which is next...
@ -261,7 +261,7 @@ line: while (<>) {
$fieldIndexer++;
}
# Special mode?
# Special mode?
if ($non_term_id eq 'stmt') {
$stmt_mode = 1;
}
@ -380,7 +380,7 @@ sub dump {
sub dump_fields {
local($mode, *flds, $len, $ln) = @_;
if ($mode == 0) {
#Normal
#Normal
&add_to_buffer('rules', $ln);
if ($feature_not_supported == 1) {
# we found an unsupported feature, but we have to
@ -393,7 +393,7 @@ sub dump_fields {
}
if ($len == 0) {
# We have no fields ?
# We have no fields ?
&add_to_buffer('rules', " \$\$=EMPTY; }");
}
else {
@ -418,7 +418,7 @@ sub dump_fields {
}
}
# So - how many fields did we end up with ?
# So - how many fields did we end up with ?
if ($cnt == 1) {
# Straight assignement
$str = " \$\$ = " . $flds_new{0} . ';';

View File

@ -58,8 +58,8 @@ static bool isinformixdefine(void);
char *token_start;
int state_before;
struct _yy_buffer
{
struct _yy_buffer
{
YY_BUFFER_STATE buffer;
long lineno;
char *filename;
@ -71,7 +71,7 @@ static char *old;
#define MAX_NESTED_IF 128
static short preproc_tos;
static short ifcond;
static struct _if_value
static struct _if_value
{
short condition;
short else_branch;
@ -87,7 +87,7 @@ static struct _if_value
%option yylineno
%x C SQL incl def def_ident undef
%x C SQL incl def def_ident undef
/*
* OK, here is a short description of lex/flex rules behavior.
@ -518,7 +518,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*.
/* throw back all but the initial "$" */
yyless(1);
/* and treat it as {other} */
return yytext[0];
return yytext[0];
}
<SQL>{dolqdelim} {
token_start = yytext;
@ -737,7 +737,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*.
}
<SQL>{identifier} {
const ScanKeyword *keyword;
if (!isdefine())
{
/* Is it an SQL/ECPG keyword? */
@ -764,7 +764,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*.
}
<SQL>{other} { return yytext[0]; }
<C>{exec_sql} { BEGIN(SQL); return SQL_START; }
<C>{informix_special} {
<C>{informix_special} {
/* are we simulating Informix? */
if (INFORMIX_MODE)
{
@ -939,7 +939,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*.
yyterminate();
}
<C>{exec_sql}{include}{space}* { BEGIN(incl); }
<C>{informix_special}{include}{space}* {
<C>{informix_special}{include}{space}* {
/* are we simulating Informix? */
if (INFORMIX_MODE)
{
@ -952,7 +952,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*.
}
}
<C,xskip>{exec_sql}{ifdef}{space}* { ifcond = TRUE; BEGIN(xcond); }
<C,xskip>{informix_special}{ifdef}{space}* {
<C,xskip>{informix_special}{ifdef}{space}* {
/* are we simulating Informix? */
if (INFORMIX_MODE)
{
@ -966,7 +966,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*.
}
}
<C,xskip>{exec_sql}{ifndef}{space}* { ifcond = FALSE; BEGIN(xcond); }
<C,xskip>{informix_special}{ifndef}{space}* {
<C,xskip>{informix_special}{ifndef}{space}* {
/* are we simulating Informix? */
if (INFORMIX_MODE)
{
@ -990,7 +990,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*.
ifcond = TRUE; BEGIN(xcond);
}
<C,xskip>{informix_special}{elif}{space}* {
<C,xskip>{informix_special}{elif}{space}* {
/* are we simulating Informix? */
if (INFORMIX_MODE)
{
@ -1089,7 +1089,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*.
<xcond>{identifier}{space}*";" {
if (preproc_tos >= MAX_NESTED_IF-1)
mmerror(PARSE_ERROR, ET_FATAL, "too many nested EXEC SQL IFDEF conditions");
else
else
{
struct _defines *defptr;
unsigned int i;
@ -1132,7 +1132,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*.
<def_ident>{other}|\n {
mmerror(PARSE_ERROR, ET_FATAL, "missing identifier in EXEC SQL DEFINE command");
yyterminate();
}
}
<def>{space}*";" {
struct _defines *ptr, *this;
@ -1170,7 +1170,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*.
<<EOF>> {
if (yy_buffer == NULL)
{
if ( preproc_tos > 0 )
if ( preproc_tos > 0 )
{
preproc_tos = 0;
mmerror(PARSE_ERROR, ET_FATAL, "missing \"EXEC SQL ENDIF;\"");
@ -1189,7 +1189,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*.
ptr->used = NULL;
break;
}
if (yyin != NULL)
fclose(yyin);
@ -1209,7 +1209,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*.
if (i != 0)
output_line_number();
}
}
<INITIAL>{other}|\n { mmerror(PARSE_ERROR, ET_FATAL, "internal error: unreachable state; please report this to <pgsql-bugs@postgresql.org>"); }
@ -1244,7 +1244,7 @@ addlit(char *ytext, int yleng)
/* enlarge buffer if needed */
if ((literallen+yleng) >= literalalloc)
{
do
do
literalalloc *= 2;
while ((literallen+yleng) >= literalalloc);
literalbuf = (char *) realloc(literalbuf, literalalloc);
@ -1290,7 +1290,7 @@ parse_include(void)
/*
* skip the ";" if there is one and trailing whitespace. Note that
* yytext contains at least one non-space character plus the ";"
* yytext contains at least one non-space character plus the ";"
*/
for (i = strlen(yytext)-2;
i > 0 && ecpg_isspace(yytext[i]);
@ -1301,7 +1301,7 @@ parse_include(void)
i--;
yytext[i+1] = '\0';
yyin = NULL;
/* If file name is enclosed in '"' remove these and look only in '.' */
@ -1311,7 +1311,7 @@ parse_include(void)
{
yytext[i] = '\0';
memmove(yytext, yytext+1, strlen(yytext));
strncpy(inc_file, yytext, sizeof(inc_file));
yyin = fopen(inc_file, "r");
if (!yyin)
@ -1322,7 +1322,7 @@ parse_include(void)
yyin = fopen(inc_file, "r");
}
}
}
else
{
@ -1331,7 +1331,7 @@ parse_include(void)
yytext[i] = '\0';
memmove(yytext, yytext+1, strlen(yytext));
}
for (ip = include_paths; yyin == NULL && ip != NULL; ip = ip->next)
{
if (strlen(ip->path) + strlen(yytext) + 3 > MAXPGPATH)

View File

@ -1,6 +1,6 @@
override CPPFLAGS := -I../../include -I$(top_srcdir)/src/interfaces/ecpg/include \
-I$(libpq_srcdir) $(CPPFLAGS)
override CFLAGS += $(PTHREAD_CFLAGS)
-I$(libpq_srcdir) $(CPPFLAGS)
override CFLAGS += $(PTHREAD_CFLAGS)
override LDFLAGS := -L../../ecpglib -L../../pgtypeslib $(filter-out -l%, $(libpq)) $(LDFLAGS)
override LIBS := -lecpg -lpgtypes $(filter -l%, $(libpq)) $(LIBS) $(PTHREAD_LIBS)

View File

@ -192,7 +192,7 @@ exec sql end declare section;
strcpy(msg, "commit");
exec sql commit;
strcpy(msg, "disconnect");
strcpy(msg, "disconnect");
exec sql disconnect;
return (0);

View File

@ -106,7 +106,7 @@ exec sql end declare section;
while (1)
{
strcpy(msg, "fetch");
exec sql fetch 1 from mycur1 into descriptor outp_sqlda;
exec sql fetch 1 from mycur1 into descriptor outp_sqlda;
printf("FETCH RECORD %d\n", ++rec);
dump_sqlda(outp_sqlda);

View File

@ -11,7 +11,7 @@ static void dosqlprint(void) {
int main(void)
{
$int i = 14;
$int i = 14;
$decimal j, m, n;
$string c[10];

View File

@ -67,8 +67,8 @@ int main(void)
EXEC SQL create table history (customerid integer, timestamp timestamp without time zone, action_taken char(5), narrative varchar(100));
sql_check("main", "create", 0);
EXEC SQL insert into history
EXEC SQL insert into history
(customerid, timestamp, action_taken, narrative)
values(1, '2003-05-07 13:28:34 CEST', 'test', 'test');
sql_check("main", "insert", 0);
@ -96,7 +96,7 @@ int main(void)
(customerid, timestamp, action_taken, narrative)
values(:c, :e, 'test', 'test');
sql_check("main", "update", 0);
EXEC SQL commit;
EXEC SQL drop table history;

View File

@ -455,7 +455,7 @@ if (sqlca.sqlcode < 0) exit (1);}
#line 193 "describe.pgc"
strcpy(msg, "disconnect");
strcpy(msg, "disconnect");
{ ECPGdisconnect(__LINE__, "CURRENT");
#line 196 "describe.pgc"

View File

@ -268,7 +268,7 @@ if (sqlca.sqlcode == ECPG_NOT_FOUND) break;
if (sqlca.sqlcode < 0) exit (1);}
#line 109 "sqlda.pgc"
printf("FETCH RECORD %d\n", ++rec);
dump_sqlda(outp_sqlda);

View File

@ -36,7 +36,7 @@ int main(void)
int i = 14 ;
#line 14 "test_informix.pgc"
#line 15 "test_informix.pgc"
decimal j , m , n ;

View File

@ -193,7 +193,7 @@ if (sqlca.sqlcode < 0) sqlprint();}
#line 68 "test_informix2.pgc"
sql_check("main", "create", 0);
{ ECPGdo(__LINE__, 1, 1, NULL, 0, ECPGst_normal, "insert into history ( customerid , timestamp , action_taken , narrative ) values ( 1 , '2003-05-07 13:28:34 CEST' , 'test' , 'test' )", ECPGt_EOIT, ECPGt_EORT);
#line 73 "test_informix2.pgc"
@ -244,7 +244,7 @@ if (sqlca.sqlcode < 0) sqlprint();}
#line 97 "test_informix2.pgc"
sql_check("main", "update", 0);
{ ECPGtrans(__LINE__, NULL, "commit");
#line 100 "test_informix2.pgc"

View File

@ -55,17 +55,17 @@ main(void)
char *t1 = "2000-7-12 17:34:29";
int i;
ECPGdebug(1, stderr);
/* exec sql whenever sqlerror do sqlprint ( ) ; */
ECPGdebug(1, stderr);
/* exec sql whenever sqlerror do sqlprint ( ) ; */
#line 27 "dt_test.pgc"
{ ECPGconnect(__LINE__, 0, "regress1" , NULL, NULL , NULL, 0);
{ ECPGconnect(__LINE__, 0, "regress1" , NULL, NULL , NULL, 0);
#line 28 "dt_test.pgc"
if (sqlca.sqlcode < 0) sqlprint ( );}
#line 28 "dt_test.pgc"
{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "create table date_test ( d date , ts timestamp )", ECPGt_EOIT, ECPGt_EORT);
{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "create table date_test ( d date , ts timestamp )", ECPGt_EOIT, ECPGt_EORT);
#line 29 "dt_test.pgc"
if (sqlca.sqlcode < 0) sqlprint ( );}
@ -84,8 +84,8 @@ if (sqlca.sqlcode < 0) sqlprint ( );}
#line 31 "dt_test.pgc"
date1 = PGTYPESdate_from_asc(d1, NULL);
ts1 = PGTYPEStimestamp_from_asc(t1, NULL);
date1 = PGTYPESdate_from_asc(d1, NULL);
ts1 = PGTYPEStimestamp_from_asc(t1, NULL);
{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "insert into date_test ( d , ts ) values ( $1 , $2 )",
ECPGt_date,&(date1),(long)1,(long)1,sizeof(date),

View File

@ -120,7 +120,7 @@ int main()
ECPGdebug(1, stderr);
{ ECPGconnect(__LINE__, 0, "regress1" , NULL, NULL , NULL, 0);
#line 50 "array_of_struct.pgc"

View File

@ -754,7 +754,7 @@ if (sqlca.sqlcode < 0) exit (1);}
#line 239 "cursor.pgc"
strcpy(msg, "disconnect");
strcpy(msg, "disconnect");
{ ECPGdisconnect(__LINE__, "CURRENT");
#line 242 "cursor.pgc"

View File

@ -146,7 +146,7 @@ int main(void)
/* = 1L */
#line 60 "init.pgc"
@ -250,7 +250,7 @@ if (sqlca.sqlcode < 0) fe ( ENUM0 );}
/* exec sql whenever sqlerror do sqlnotice ( NULL , 0 ) ; */
#line 97 "init.pgc"
{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "select now ( )", ECPGt_EOIT, ECPGt_EORT);
#line 98 "init.pgc"

View File

@ -363,7 +363,7 @@ if (sqlca.sqlcode < 0) exit (1);}
#line 118 "outofscope.pgc"
strcpy(msg, "disconnect");
strcpy(msg, "disconnect");
{ ECPGdisconnect(__LINE__, "CURRENT");
#line 121 "outofscope.pgc"

View File

@ -264,7 +264,7 @@ if (sqlca.sqlcode < 0) exit (1);}
#line 95 "variable.pgc"
strcpy(msg, "disconnect");
strcpy(msg, "disconnect");
{ ECPGdisconnect(__LINE__, "CURRENT");
#line 98 "variable.pgc"

View File

@ -243,4 +243,4 @@ if (sqlca.sqlcode < 0) exit (1);}
#line 65 "whenever.pgc"
exit (0);
}
}

View File

@ -148,7 +148,7 @@ if (sqlca.sqlcode < 0) sqlprint();}
#line 29 "array.pgc"
{ ECPGtrans(__LINE__, NULL, "begin work");
{ ECPGtrans(__LINE__, NULL, "begin work");
#line 31 "array.pgc"
if (sqlca.sqlcode < 0) sqlprint();}
@ -205,7 +205,7 @@ if (sqlca.sqlcode < 0) sqlprint();}
if (sqlca.sqlcode < 0) sqlprint();}
#line 43 "array.pgc"
{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "select f , text from test where i = 1", ECPGt_EOIT,
ECPGt_double,&(f),(long)1,(long)1,sizeof(double),

View File

@ -104,7 +104,7 @@ int main()
ECPGdebug(1,stderr);
{ ECPGconnect(__LINE__, 0, "regress1" , NULL, NULL , NULL, 0); }
#line 15 "code100.pgc"
@ -118,7 +118,7 @@ int main()
#line 22 "code100.pgc"
if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
for (index=0;index<10;++index)
{ { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "insert into test ( payload , index ) values ( 0 , $1 )",
ECPGt_int,&(index),(long)1,(long)1,sizeof(int),
@ -131,12 +131,12 @@ int main()
#line 31 "code100.pgc"
if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "update test set payload = payload + 1 where index = - 1", ECPGt_EOIT, ECPGt_EORT);}
#line 35 "code100.pgc"
if (sqlca.sqlcode!=100) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "delete from test where index = - 1", ECPGt_EOIT, ECPGt_EORT);}
#line 38 "code100.pgc"
@ -155,7 +155,7 @@ int main()
#line 46 "code100.pgc"
if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
{ ECPGdisconnect(__LINE__, "CURRENT");}
#line 49 "code100.pgc"

View File

@ -453,7 +453,7 @@ if (sqlca.sqlcode < 0) exit (1);}
#line 193 "describe.pgc"
strcpy(msg, "disconnect");
strcpy(msg, "disconnect");
{ ECPGdisconnect(__LINE__, "CURRENT");
#line 196 "describe.pgc"

View File

@ -296,28 +296,28 @@ if (sqlca.sqlcode < 0) sqlprint ( );}
for (i=0;i<sqlca.sqlerrd[2];++i)
{
if (i1[i]) printf("NULL, ");
else printf("%d, ",d1[i]);
else printf("%d, ",d1[i]);
if (i2[i]) printf("NULL, ");
else printf("%f, ",d2[i]);
else printf("%f, ",d2[i]);
if (i3[i]) printf("NULL, ");
else printf("'%s', ",d3[i]);
else printf("'%s', ",d3[i]);
if (i4[i]) printf("NULL, ");
else printf("'%s', ",d4[i]);
else printf("'%s', ",d4[i]);
if (i5[i]) printf("NULL, ");
else printf("'%s', ",d5[i]);
else printf("'%s', ",d5[i]);
if (i6[i]) printf("NULL, ");
else printf("'%s', ",d6[i]);
else printf("'%s', ",d6[i]);
if (i7[i]) printf("NULL, ");
else printf("'%s', ",d7[i]);
else printf("'%s', ",d7[i]);
if (i9[i]) printf("NULL, ");
else printf("'%s', ",d9[i]);
else printf("'%s', ",d9[i]);
printf("\n");
}

View File

@ -225,10 +225,10 @@ if (sqlca.sqlcode < 0) sqlprint ( );}
for (i=0;i < sqlca.sqlerrd[2];++i)
{
if (ipointer1[i]) printf("NULL, ");
else printf("%d, ",ip1[i]);
else printf("%d, ",ip1[i]);
if (ipointer2[i]) printf("NULL, ");
else printf("'%s', ",cp2[i]);
else printf("'%s', ",cp2[i]);
printf("\n");
}
ECPGfree_auto_mem();

View File

@ -25,7 +25,7 @@
int main() {
/* exec sql begin declare section */
#line 9 "fetch.pgc"
char str [ 25 ] ;

View File

@ -278,7 +278,7 @@ if (sqlca.sqlcode == ECPG_NOT_FOUND) break;
if (sqlca.sqlcode < 0) exit (1);}
#line 111 "sqlda.pgc"
printf("FETCH RECORD %d\n", ++rec);
dump_sqlda(outp_sqlda);

View File

@ -23,15 +23,15 @@ main(void)
char *t1 = "2000-7-12 17:34:29";
int i;
ECPGdebug(1, stderr);
exec sql whenever sqlerror do sqlprint();
exec sql connect to REGRESSDB1;
exec sql create table date_test (d date, ts timestamp);
ECPGdebug(1, stderr);
exec sql whenever sqlerror do sqlprint();
exec sql connect to REGRESSDB1;
exec sql create table date_test (d date, ts timestamp);
exec sql set datestyle to iso;
exec sql set intervalstyle to postgres_verbose;
date1 = PGTYPESdate_from_asc(d1, NULL);
ts1 = PGTYPEStimestamp_from_asc(t1, NULL);
date1 = PGTYPESdate_from_asc(d1, NULL);
ts1 = PGTYPEStimestamp_from_asc(t1, NULL);
exec sql insert into date_test(d, ts) values (:date1, :ts1);

View File

@ -46,7 +46,7 @@ int main()
EXEC SQL end declare section;
ECPGdebug(1, stderr);
EXEC SQL connect to REGRESSDB1;
EXEC SQL create table customers (c varchar(50), p int);

View File

@ -238,7 +238,7 @@ exec sql end declare section;
strcpy(msg, "commit");
exec sql commit;
strcpy(msg, "disconnect");
strcpy(msg, "disconnect");
exec sql disconnect;
return (0);

View File

@ -67,7 +67,7 @@ int main(void)
int e=y->member;
int c=10>>2;
bool h=2||1;
bool h=2||1;
long iay /* = 1L */ ;
exec sql end declare section;
@ -94,7 +94,7 @@ int main(void)
exec sql select now();
exec sql whenever sqlerror do fe(ENUM0);
exec sql select now();
exec sql whenever sqlerror do sqlnotice(NULL, NONO);
exec sql whenever sqlerror do sqlnotice(NULL, NONO);
exec sql select now();
return 0;
}

View File

@ -117,7 +117,7 @@ main (void)
strcpy(msg, "commit");
exec sql commit;
strcpy(msg, "disconnect");
strcpy(msg, "disconnect");
exec sql disconnect;
return (0);

View File

@ -94,7 +94,7 @@ exec sql end declare section;
strcpy(msg, "commit");
exec sql commit;
strcpy(msg, "disconnect");
strcpy(msg, "disconnect");
exec sql disconnect;
return (0);

View File

@ -64,4 +64,4 @@ int main(void)
exec sql select 1 into :i;
exec sql rollback;
exit (0);
}
}

View File

@ -22,7 +22,7 @@ TESTS = array array.c \
parser parser.c \
quote quote.c \
show show.c \
insupd insupd.c
insupd insupd.c
all: $(TESTS)

View File

@ -28,7 +28,7 @@ EXEC SQL END DECLARE SECTION;
EXEC SQL SET AUTOCOMMIT = ON;
EXEC SQL BEGIN WORK;
EXEC SQL BEGIN WORK;
EXEC SQL CREATE TABLE test (f float, i int, a int[10], text char(10));
@ -40,7 +40,7 @@ EXEC SQL END DECLARE SECTION;
EXEC SQL COMMIT;
EXEC SQL BEGIN WORK;
EXEC SQL BEGIN WORK;
EXEC SQL SELECT f,text
INTO :f,:text

View File

@ -11,7 +11,7 @@ int main()
ECPGdebug(1,stderr);
exec sql connect to REGRESSDB1;
if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
@ -21,7 +21,7 @@ int main()
if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
exec sql commit work;
if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
for (index=0;index<10;++index)
{ exec sql insert into test
(payload, index)
@ -30,11 +30,11 @@ int main()
}
exec sql commit work;
if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
exec sql update test
set payload=payload+1 where index=-1;
set payload=payload+1 where index=-1;
if (sqlca.sqlcode!=100) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
exec sql delete from test where index=-1;
if (sqlca.sqlcode!=100) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
@ -45,7 +45,7 @@ int main()
if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
exec sql commit work;
if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
exec sql disconnect;
if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
return 0;

View File

@ -192,7 +192,7 @@ exec sql end declare section;
strcpy(msg, "commit");
exec sql commit;
strcpy(msg, "disconnect");
strcpy(msg, "disconnect");
exec sql disconnect;
return (0);

View File

@ -55,28 +55,28 @@ int main(void)
for (i=0;i<sqlca.sqlerrd[2];++i)
{
if (i1[i]) printf("NULL, ");
else printf("%d, ",d1[i]);
else printf("%d, ",d1[i]);
if (i2[i]) printf("NULL, ");
else printf("%f, ",d2[i]);
else printf("%f, ",d2[i]);
if (i3[i]) printf("NULL, ");
else printf("'%s', ",d3[i]);
else printf("'%s', ",d3[i]);
if (i4[i]) printf("NULL, ");
else printf("'%s', ",d4[i]);
else printf("'%s', ",d4[i]);
if (i5[i]) printf("NULL, ");
else printf("'%s', ",d5[i]);
else printf("'%s', ",d5[i]);
if (i6[i]) printf("NULL, ");
else printf("'%s', ",d6[i]);
else printf("'%s', ",d6[i]);
if (i7[i]) printf("NULL, ");
else printf("'%s', ",d7[i]);
else printf("'%s', ",d7[i]);
if (i9[i]) printf("NULL, ");
else printf("'%s', ",d9[i]);
else printf("'%s', ",d9[i]);
printf("\n");
}

Some files were not shown because too many files have changed in this diff Show More