1
0
mirror of https://github.com/sqlite/sqlite.git synced 2025-08-08 14:02:16 +03:00

Merge the latest trunk enhancements into the reuse-schema branch. Fix

the reuse-schema build of the CLI so that it works again.

FossilOrigin-Name: 70ef3784f678e29a7b067e557f69ca0a14e7823c344bb438dc4373a454389218
This commit is contained in:
drh
2024-03-13 15:59:02 +00:00
157 changed files with 6708 additions and 1539 deletions

View File

@@ -371,7 +371,7 @@ do_test alter2-7.5 {
execsql {
SELECT a, typeof(a), b, typeof(b), c, typeof(c) FROM t1 LIMIT 1;
}
} {1 integer -123 integer 5 text}
} {1 integer -123.0 real 5 text}
#-----------------------------------------------------------------------
# Test that UPDATE trigger tables work with default values, and that when
@@ -397,11 +397,11 @@ do_test alter2-8.2 {
UPDATE t1 SET c = 10 WHERE a = 1;
SELECT a, typeof(a), b, typeof(b), c, typeof(c) FROM t1 LIMIT 1;
}
} {1 integer -123 integer 10 text}
} {1 integer -123.0 real 10 text}
ifcapable trigger {
do_test alter2-8.3 {
set ::val
} {-123 integer 5 text -123 integer 10 text}
} {-123.0 real 5 text -123.0 real 10 text}
}
#-----------------------------------------------------------------------
@@ -425,7 +425,7 @@ ifcapable trigger {
DELETE FROM t1 WHERE a = 2;
}
set ::val
} {-123 integer 5 text}
} {-123.0 real 5 text}
}
#-----------------------------------------------------------------------

View File

@@ -8,6 +8,7 @@
# May you share freely, never taking more than you give.
#
#***********************************************************************
# TESTRUNNER: shell
#
# This file implements tests for the appendvfs extension.
#

View File

@@ -106,7 +106,7 @@ do_test 3.4 {
proc busy_handler {n} { return 1 }
do_test 3.5 {
catchsql { PRAGMA optimize }
} {0 {}}
} {1 {database is locked}}
do_test 3.6 {
execsql { COMMIT } db2

View File

@@ -98,7 +98,7 @@ do_test corruptC-2.1 {
sqlite3 db test.db
catchsql {PRAGMA integrity_check}
} {0 {{*** in database main ***
Tree 3 page 3: free space corruption}}}
Tree 3 page 3: free space corruption} {wrong # of entries in index t1i1}}}
# test that a corrupt content offset size is handled (seed 5649)
#

View File

@@ -113,7 +113,7 @@ do_test corruptD-1.1.1 {
hexio_write test.db [expr 1024+1] FFFF
catchsql { PRAGMA quick_check }
} {0 {{*** in database main ***
Tree 2 page 2: free space corruption}}}
Tree 2 page 2: free space corruption} {wrong # of entries in index i1}}}
do_test corruptD-1.1.2 {
incr_change_counter
hexio_write test.db [expr 1024+1] [hexio_render_int32 1021]

View File

@@ -209,8 +209,8 @@ datetest 3.16 "strftime('[repeat 200 %Y]','2003-10-31')" [repeat 200 2003]
datetest 3.17 "strftime('[repeat 200 abc%m123]','2003-10-31')" \
[repeat 200 abc10123]
foreach c {a b c g h i n o q r t v x y z
A B C D E G K L N O Q V Z
foreach c {a b c h i n o q r t v x y z
A B C D E K L N O Q Z
0 1 2 3 4 5 6 6 7 9 _} {
datetest 3.18.$c "strftime('%$c','2003-10-31')" NULL
}
@@ -262,7 +262,7 @@ datetest 5.15 {datetime('1994-04-16 14:00:00 +05:00 Z')} NULL
# localtime->utc and utc->localtime conversions.
#
# Use SQLITE_TESTCTRL_LOCALTIME_FAULT=2 to set an alternative localtime_r()
# implementation that is not locale-dependent. This testing localtime_r()
# implementation that is not locale-dependent. The testing localtime_r()
# operates as follows:
#
# (1) Localtime is 30 minutes earlier than (west of) UTC on
@@ -321,6 +321,38 @@ utc_to_local 6.22 {1800-10-29 12:30:00} {1800-10-29 12:00:00}
local_to_utc 6.23 {3000-10-30 12:00:00} {3000-10-30 11:30:00}
utc_to_local 6.24 {3000-10-30 11:30:00} {3000-10-30 12:00:00}
# If the time is specified to be ZULU, or if it has an explicit
# timezone extension, then the time will already be UTC and subsequent
# 'utc' modifiers are no-ops.
#
do_execsql_test date-6.25 {
SELECT datetime('2000-10-29 12:00Z','utc','utc');
} {{2000-10-29 12:00:00}}
do_execsql_test date-6.26 {
SELECT datetime('2000-10-29 12:00:00+05:00');
} {{2000-10-29 07:00:00}}
do_execsql_test date-6.27 {
SELECT datetime('2000-10-29 12:00:00+05:00', 'utc');
} {{2000-10-29 07:00:00}}
# Multiple back-and-forth UTC to LOCAL to UTC...
do_execsql_test date-6.28 {
SELECT datetime('2000-10-29 12:00:00Z', 'localtime');
} {{2000-10-29 12:30:00}}
do_execsql_test date-6.29 {
SELECT datetime('2000-10-29 12:00:00Z', 'utc', 'localtime');
} {{2000-10-29 12:30:00}}
do_execsql_test date-6.30 {
SELECT datetime('2000-10-29 12:00:00Z', 'utc', 'localtime', 'utc');
} {{2000-10-29 12:00:00}}
do_execsql_test date-6.31 {
SELECT datetime('2000-10-29 12:00:00Z', 'utc','localtime','utc','localtime');
} {{2000-10-29 12:30:00}}
do_execsql_test date-6.32 {
SELECT datetime('2000-10-29 12:00:00Z', 'localtime','localtime');
} {{2000-10-29 12:30:00}}
# Restore the use of the OS localtime_r() before going on...
sqlite3_test_control SQLITE_TESTCTRL_LOCALTIME_FAULT 0
@@ -573,4 +605,51 @@ datetest 18.2 {unixepoch('1970-01-01T00:00:00.1', 'subsec')} {0.1}
datetest 18.3 {unixepoch('1970-01-01T00:00:00.2', 'subsecond')} {0.2}
datetest 18.4 {julianday('-4713-11-24 13:40:48.864', 'subsec')} {0.07001}
datetest 18.5 {typeof(unixepoch('now', 'subsecond'))} {real}
# 2024-03-03 the 'ceiling' and 'floor' operators.
#
datetest 19.1 {date('2000-01-31','floor')} {2000-01-31}
datetest 19.2a {date('2000-02-31','floor')} {2000-02-29}
datetest 19.2b {date('1999-02-31','floor')} {1999-02-28}
datetest 19.2c {date('1900-02-31','floor')} {1900-02-28}
datetest 19.3 {date('2000-03-31','floor')} {2000-03-31}
datetest 19.4 {date('2000-04-31','floor')} {2000-04-30}
datetest 19.5 {date('2000-05-31','floor')} {2000-05-31}
datetest 19.6 {date('2000-06-31','floor')} {2000-06-30}
datetest 19.7 {date('2000-07-31','floor')} {2000-07-31}
datetest 19.8 {date('2000-08-31','floor')} {2000-08-31}
datetest 19.9 {date('2000-09-31','floor')} {2000-09-30}
datetest 19.10 {date('2000-10-31','floor')} {2000-10-31}
datetest 19.11 {date('2000-11-31','floor')} {2000-11-30}
datetest 19.12 {date('2000-12-31','floor')} {2000-12-31}
datetest 19.21 {date('2000-01-31','ceiling')} {2000-01-31}
datetest 19.22a {date('2000-02-31','ceiling')} {2000-03-02}
datetest 19.22b {date('1999-02-31','ceiling')} {1999-03-03}
datetest 19.22c {date('1900-02-31','ceiling')} {1900-03-03}
datetest 19.23 {date('2000-03-31','ceiling')} {2000-03-31}
datetest 19.24 {date('2000-04-31','ceiling')} {2000-05-01}
datetest 19.25 {date('2000-05-31','ceiling')} {2000-05-31}
datetest 19.26 {date('2000-06-31','ceiling')} {2000-07-01}
datetest 19.27 {date('2000-07-31','ceiling')} {2000-07-31}
datetest 19.28 {date('2000-08-31','ceiling')} {2000-08-31}
datetest 19.29 {date('2000-09-31','ceiling')} {2000-10-01}
datetest 19.30 {date('2000-10-31','ceiling')} {2000-10-31}
datetest 19.31 {date('2000-11-31','ceiling')} {2000-12-01}
datetest 19.32 {date('2000-12-31','ceiling')} {2000-12-31}
datetest 19.40 {date('2024-01-31','+1 month','ceiling')} {2024-03-02}
datetest 19.41 {date('2024-01-31','+1 month','floor')} {2024-02-29}
datetest 19.42 {date('2023-01-31','+1 month','ceiling')} {2023-03-03}
datetest 19.43 {date('2023-01-31','+1 month','floor')} {2023-02-28}
datetest 19.44 {date('2024-02-29','+1 year','ceiling')} {2025-03-01}
datetest 19.45 {date('2024-02-29','+1 year','floor')} {2025-02-28}
datetest 19.46 {date('2024-02-29','-110 years','ceiling')} {1914-03-01}
datetest 19.47 {date('2024-02-29','-110 years','floor')} {1914-02-28}
datetest 19.48 {date('2024-02-29','-0110-00-00','floor')} {1914-02-28}
datetest 19.49 {date('2024-02-29','-0110-00-00','ceiling')} {1914-03-01}
datetest 19.50 {date('2000-08-31','+0023-06-00','floor')} {2024-02-29}
datetest 19.51 {date('2000-08-31','+0022-06-00','floor')} {2023-02-28}
datetest 19.52 {date('2000-08-31','+0023-06-00','ceiling')} {2024-03-02}
datetest 19.53 {date('2000-08-31','+0022-06-00','ceiling')} {2023-03-03}
finish_test

View File

@@ -24,12 +24,12 @@ ifcapable {!datetime} {
}
if {$tcl_platform(os)=="Linux"} {
set FMT {%d,%e,%F,%H,%k,%I,%l,%j,%m,%M,%u,%w,%W,%Y,%%,%P,%p}
set FMT {%d,%e,%F,%H,%k,%I,%l,%j,%m,%M,%u,%w,%W,%Y,%%,%P,%p,%U,%V,%G,%g}
} else {
set FMT {%d,%e,%F,%H,%I,%j,%p,%R,%u,%w,%W,%%}
}
for {set i 0} {$i<=24854} {incr i} {
set TS [expr {$i*86401}]
for {set i 0} {$i<=24858} {incr i} {
set TS [expr {$i*86390}]
do_execsql_test date4-$i {
SELECT strftime($::FMT,$::TS,'unixepoch');
} [list [strftime $FMT $TS]]

View File

@@ -95,7 +95,7 @@ foreach {tn use_eph sql res} {
7 0 "SELECT count(DISTINCT a) FROM t2, t1" 5
8 1 "SELECT count(DISTINCT a+b) FROM t1, t2, t2, t2" 6
9 0 "SELECT count(DISTINCT c) FROM t1 WHERE c=2" 1
10 1 "SELECT count(DISTINCT t1.rowid) FROM t1, t2" 10
10 0 "SELECT count(DISTINCT t1.rowid) FROM t1, t2" 10
} {
do_test 3.$tn.1 {
set prg [db eval "EXPLAIN $sql"]
@@ -148,6 +148,10 @@ do_execsql_test 3.0 {
CREATE TABLE t3(x, y, z);
INSERT INTO t3 VALUES(1,1,1);
INSERT INTO t3 VALUES(2,2,2);
CREATE TABLE t4(a);
CREATE INDEX t4a ON t4(a);
INSERT INTO t4 VALUES(1), (2), (2), (3), (1);
}
foreach {tn use_eph sql res} {
@@ -158,6 +162,9 @@ foreach {tn use_eph sql res} {
4 0 "SELECT count(DISTINCT f) FROM t2 GROUP BY d, e" {1 2 2 3}
5 1 "SELECT count(DISTINCT f) FROM t2 GROUP BY d" {2 3}
6 0 "SELECT count(DISTINCT f) FROM t2 WHERE d IS 1 GROUP BY e" {1 2 2}
7 0 "SELECT count(DISTINCT a) FROM t1" {4}
8 0 "SELECT count(DISTINCT a) FROM t4" {3}
} {
do_test 4.$tn.1 {
set prg [db eval "EXPLAIN $sql"]

View File

@@ -73,12 +73,12 @@ sqlite3 db test.db
do_execsql_test e_reindex-1.3 {
PRAGMA integrity_check;
} [list \
{wrong # of entries in index i2} \
{wrong # of entries in index i1} \
{row 3 missing from index i2} \
{row 3 missing from index i1} \
{row 4 missing from index i2} \
{row 4 missing from index i1} \
{wrong # of entries in index i2} \
{wrong # of entries in index i1}
{row 4 missing from index i1}
]
do_execsql_test e_reindex-1.4 {

View File

@@ -50,5 +50,33 @@ do_faultsim_test 1 -faults oom* -prep {
faultsim_test_result {0 {}}
}
#-------------------------------------------------------------------
reset_db
do_execsql_test 2.0 {
BEGIN;
CREATE VIRTUAL TABLE t1 USING fts3(a);
WITH s(i) AS (
SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<50
)
INSERT INTO t1 SELECT 'abc def ghi jkl mno pqr' FROM s;
COMMIT;
}
faultsim_save_and_close
do_faultsim_test 2 -faults oom-t* -prep {
faultsim_restore_and_reopen
execsql {
BEGIN;
CREATE TABLE x1(a PRIMARY KEY);
}
} -body {
execsql {
PRAGMA integrity_check;
}
} -test {
faultsim_test_result {0 ok} $::TMPDBERROR
}
finish_test

View File

@@ -54,5 +54,22 @@ do_execsql_test 2.3 {
PRAGMA integrity_check(t2);
} {{malformed inverted index for FTS4 table main.t2}}
#-------------------------------------------------------------------------
# Test that integrity-check works on a read-only database.
#
reset_db
do_execsql_test 3.0 {
CREATE VIRTUAL TABLE x1 USING fts4(a, b);
INSERT INTO x1 VALUES('one', 'two');
INSERT INTO x1 VALUES('three', 'four');
}
db close
sqlite3 db test.db -readonly 1
do_execsql_test 3.1 {
PRAGMA integrity_check;
} {ok}
finish_test

View File

@@ -786,6 +786,11 @@ do_test func-16.1 {
}
} {X'616263' NULL}
# Test the quote function for +Inf and -Inf
do_execsql_test func-16.2 {
SELECT quote(4.2e+859), quote(-7.8e+904);
} {9.0e+999 -9.0e+999}
# Correctly handle function error messages that include %. Ticket #1354
#
do_test func-17.1 {
@@ -1042,6 +1047,9 @@ do_test func-21.8 {
SELECT replace('aaaaaaa', 'a', '0123456789');
}
} {0123456789012345678901234567890123456789012345678901234567890123456789}
do_execsql_test func-21.9 {
SELECT typeof(replace(1,'',0));
} {text}
ifcapable tclvar {
do_test func-21.9 {

View File

@@ -161,8 +161,8 @@ static struct GlobalVars {
/*
** Include the external vt02.c and randomjson.c modules.
*/
extern int sqlite3_vt02_init(sqlite3*,char***,void*);
extern int sqlite3_randomjson_init(sqlite3*,char***,void*);
extern int sqlite3_vt02_init(sqlite3*,char**,const sqlite3_api_routines*);
extern int sqlite3_randomjson_init(sqlite3*,char**,const sqlite3_api_routines*);
/*

View File

@@ -378,6 +378,20 @@ do_execsql_test json101-5.8 {
WHERE jx.value<>jx.atom AND type NOT IN ('array','object');
} {}
# 2024-02-16 https://sqlite.org/forum/forumpost/ecb94cd210
# Regression in json_tree()/json_each(). The value column
# should have the "J" subtype if the value is an array or
# object.
#
do_execsql_test json101-5.10 {
SELECT json_insert('{}','$.a',value) FROM json_tree('[1,2,3]') WHERE atom IS NULL;
} {{{"a":[1,2,3]}}}
# ^^^^^^^--- In double-quotes, a string literal, prior to bug fix
do_execsql_test json101-5.11 {
SELECT json_insert('{}','$.a',value) FROM json_tree('"[1,2,3]"');
} {{{"a":"[1,2,3]"}}}
do_execsql_test json101-6.1 {
SELECT json_valid('{"a":55,"b":72,}');
} {0}

View File

@@ -67,6 +67,12 @@ for {set ii 1} {$ii<=5000} {incr ii} {
FROM t1, kv
WHERE p->>key IS NOT val
} 0
do_execsql_test $ii.8 {
SELECT j0 FROM t1 WHERE json(j0)!=json(json_pretty(j0));
} {}
do_execsql_test $ii.9 {
SELECT j5 FROM t1 WHERE json(j5)!=json(json_pretty(j5));
} {}
}

86
test/json107.test Normal file
View File

@@ -0,0 +1,86 @@
# 2024-01-23
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# Legacy JSON bug: If the input is a BLOB that when cast into TEXT looks
# like valid JSON, then treat it as valid JSON.
#
# The original intent of the JSON functions was to raise an error on any
# BLOB input. That intent was clearly documented, but the code failed to
# to implement it. Subsequently, many applications began to depend on the
# incorrect behavior, especially apps that used readfile() to read JSON
# content, since readfile() returns a BLOB. So we need to support the
# bug moving forward.
#
# The tests in this fail verify that the original buggy behavior is
# preserved.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix json107
if {[db one {PRAGMA encoding}]!="UTF-8"} {
# These tests only work for a UTF-8 encoding.
finish_test
return
}
do_execsql_test 1.1 {
SELECT json_valid( CAST('{"a":1}' AS BLOB) );
} 1
do_execsql_test 1.1.1 {
SELECT json_valid( CAST('{"a":1}' AS BLOB), 1);
} 1
do_execsql_test 1.1.2 {
SELECT json_valid( CAST('{"a":1}' AS BLOB), 2);
} 1
do_execsql_test 1.1.4 {
SELECT json_valid( CAST('{"a":1}' AS BLOB), 4);
} 0
do_execsql_test 1.1.8 {
SELECT json_valid( CAST('{"a":1}' AS BLOB), 8);
} 0
do_execsql_test 1.2.1 {
SELECT CAST('{"a":123}' AS blob) -> 'a';
} 123
do_execsql_test 1.2.2 {
SELECT CAST('{"a":123}' AS blob) ->> 'a';
} 123
do_execsql_test 1.2.3 {
SELECT json_extract(CAST('{"a":123}' AS blob), '$.a');
} 123
do_execsql_test 1.3 {
SELECT json_insert(CAST('{"a":123}' AS blob),'$.b',456);
} {{{"a":123,"b":456}}}
do_execsql_test 1.4 {
SELECT json_remove(CAST('{"a":123,"b":456}' AS blob),'$.a');
} {{{"b":456}}}
do_execsql_test 1.5 {
SELECT json_set(CAST('{"a":123,"b":456}' AS blob),'$.a',789);
} {{{"a":789,"b":456}}}
do_execsql_test 1.6 {
SELECT json_replace(CAST('{"a":123,"b":456}' AS blob),'$.a',789);
} {{{"a":789,"b":456}}}
do_execsql_test 1.7 {
SELECT json_type(CAST('{"a":123,"b":456}' AS blob));
} object
do_execsql_test 1.8 {
SELECT json(CAST('{"a":123,"b":456}' AS blob));
} {{{"a":123,"b":456}}}
ifcapable vtab {
do_execsql_test 2.1 {
SELECT key, value FROM json_tree( CAST('{"a":123,"b":456}' AS blob) )
WHERE atom;
} {a 123 b 456}
}
finish_test

45
test/json108.test Normal file
View File

@@ -0,0 +1,45 @@
# 2024-03-06
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# Invariant tests for JSON built around the randomjson extension
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix json108
# These tests require virtual table "json_tree" to run.
ifcapable !vtab { finish_test ; return }
load_static_extension db randomjson
db eval {
CREATE TEMP TABLE t1(j0,j5);
WITH RECURSIVE c(n) AS (VALUES(0) UNION ALL SELECT n+1 FROM c WHERE n<9)
INSERT INTO t1 SELECT random_json(n), random_json5(n) FROM c;
}
do_execsql_test 1.1 {
SELECT count(*) FROM t1 WHERE json(j0)==json(json_pretty(j0,NULL));
} 10
do_execsql_test 1.2 {
SELECT count(*) FROM t1 WHERE json(j0)==json(json_pretty(j0,NULL));
} 10
do_execsql_test 1.3 {
SELECT count(*) FROM t1 WHERE json(j0)==json(json_pretty(j0,''));
} 10
do_execsql_test 1.4 {
SELECT count(*) FROM t1 WHERE json(j0)==json(json_pretty(j0,char(9)));
} 10
do_execsql_test 1.5 {
SELECT count(*) FROM t1 WHERE json(j0)==json(json_pretty(j0,'/*hello*/'));
} 10
finish_test

View File

@@ -306,4 +306,31 @@ do_execsql_test 13.1 {
SELECT json('{x:''a "b" c''}');
} {{{"x":"a \"b\" c"}}}
# 2024-01-31
# Allow control characters within JSON5 string literals.
#
for {set c 1} {$c<=0x1f} {incr c} {
do_execsql_test 14.$c.1 {
SELECT json_valid('"abc' || char($c) || 'xyz"');
} {0}
do_execsql_test 14.$c.2 {
SELECT json_valid('"abc' || char($c) || 'xyz"', 2);
} {1}
switch $c {
8 {set e "\\b"}
9 {set e "\\t"}
10 {set e "\\n"}
12 {set e "\\f"}
13 {set e "\\r"}
default {set e [format "\\u00%02x" $c]}
}
do_execsql_test 14.$c.3 {
SELECT json('{label:"abc' || char($c) || 'xyz"}');
} "{{\"label\":\"abc${e}xyz\"}}"
do_execsql_test 14.$c.4 {
SELECT jsonb('{label:"abc' || char($c) || 'xyz"}') -> '$';
} "{{\"label\":\"abc${e}xyz\"}}"
}
finish_test

View File

@@ -46,4 +46,8 @@ foreach {id path res} {
} $res
}
do_catchsql_test jsonb01-2.0 {
SELECT x'8ce6ffffffff171333' -> '$';
} {1 {malformed JSON}}
finish_test

103
test/literal.test Normal file
View File

@@ -0,0 +1,103 @@
# 2024-01-19
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# This file implements tests for SQL literals
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set ::testprefix literal
proc test_literal {tn lit type val} {
do_execsql_test $tn.1 "SELECT typeof( $lit ), $lit" [list $type $val]
ifcapable altertable {
do_execsql_test $tn.2 "
DROP TABLE IF EXISTS x1;
CREATE TABLE x1(a);
INSERT INTO x1 VALUES(123);
ALTER TABLE x1 ADD COLUMN b DEFAULT $lit ;
SELECT typeof(b), b FROM x1;
" [list $type $val]
}
do_execsql_test $tn.3 "
DROP TABLE IF EXISTS x1;
CREATE TABLE x1(a DEFAULT $lit);
INSERT INTO x1 DEFAULT VALUES;
SELECT typeof(a), a FROM x1;
" [list $type $val]
}
proc test_literal_error {tn lit unrec} {
do_catchsql_test $tn "SELECT $lit" "1 {unrecognized token: \"$unrec\"}"
}
test_literal 1.0 45 integer 45
test_literal 1.1 0xFF integer 255
test_literal 1.2 0xFFFFFFFF integer [expr 0xFFFFFFFF]
test_literal 1.3 0x123FFFFFFFF integer [expr 0x123FFFFFFFF]
test_literal 1.4 -0x123FFFFFFFF integer [expr -1 * 0x123FFFFFFFF]
test_literal 1.5 0xFFFFFFFFFFFFFFFF integer -1
test_literal 1.7 0x7FFFFFFFFFFFFFFF integer [expr 0x7FFFFFFFFFFFFFFF]
test_literal 1.8 -0x7FFFFFFFFFFFFFFF integer [expr -0x7FFFFFFFFFFFFFFF]
test_literal 1.9 +0x7FFFFFFFFFFFFFFF integer [expr +0x7FFFFFFFFFFFFFFF]
test_literal 1.10 -45 integer -45
test_literal 1.11 '0xFF' text 0xFF
test_literal 1.12 '-0xFF' text -0xFF
test_literal 1.13 -'0xFF' integer 0
test_literal 1.14 -9223372036854775808 integer -9223372036854775808
test_literal 2.1 1e12 real 1000000000000.0
test_literal 2.2 1.0 real 1.0
test_literal 2.3 1e1000 real Inf
test_literal 2.4 -1e1000 real -Inf
test_literal 3.1 1_000 integer 1000
test_literal 3.2 1.1_1 real 1.11
test_literal 3.3 1_0.1_1 real 10.11
test_literal 3.4 1e1_000 real Inf
test_literal 3.5 12_3_456.7_8_9 real 123456.789
test_literal 3.6 9_223_372_036_854_775_807 integer 9223372036854775807
test_literal 3.7 9_223_372_036_854_775_808 real 9.22337203685478e+18
test_literal 3.8 -9_223_372_036_854_775_808 integer -9223372036854775808
foreach {tn lit unrec} {
0 123a456 123a456
1 1_ 1_
2 1_.4 1_.4
3 1e_4 1e_4
4 1_e4 1_e4
5 1.4_e4 1.4_e4
6 1.4e+_4 1.4e
7 1.4e-_4 1.4e
8 1.4e4_ 1.4e4_
9 1.4_e4 1.4_e4
10 1.4e_4 1.4e_4
11 12__34 12__34
12 1234_ 1234_
13 12._34 12._34
14 12_.34 12_.34
15 12.34_ 12.34_
16 1.0e1_______2 1.0e1_______2
} {
test_literal_error 4.$tn $lit $unrec
}
# dbsqlfuzz e3186a9e7826e9cd7f4085aa4452f8696485f9e1
# See tag-20240224-a and -b
#
do_catchsql_test 5.1 {
SELECT 1 ORDER BY 2_3;
} {1 {1st ORDER BY term out of range - should be between 1 and 1}}
finish_test

40
test/literal2.tcl Normal file
View File

@@ -0,0 +1,40 @@
# 2018 May 19
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
source [file join [file dirname $argv0] pg_common.tcl]
#=========================================================================
start_test literal2 "2024 Jan 23"
execsql_test 1.0 { SELECT 123_456 }
errorsql_test 1.1 { SELECT 123__456 }
execsql_float_test 2.1 { SELECT 1.0e1_2 }
execsql_test 3.0.0 { SELECT 0xFF_FF }
execsql_test 3.0.1 { SELECT 0xFF_EF }
errorsql_test 3.0.2 { SELECT 0xFF__EF }
# errorsql_test 3.0.3 { SELECT 0x_FFEF }
errorsql_test 3.0.4 { SELECT 0xFFEF_ }
execsql_test 3.1.0 { SELECT 0XFF_FF }
execsql_test 3.1.1 { SELECT 0XFF_EF }
errorsql_test 3.1.2 { SELECT 0XFF__EF }
# errorsql_test 3.1.3 { SELECT 0X_FFEF }
errorsql_test 3.1.4 { SELECT 0XFFEF_ }
finish_test

84
test/literal2.test Normal file
View File

@@ -0,0 +1,84 @@
# 2024 Jan 23
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library.
#
####################################################
# DO NOT EDIT! THIS FILE IS AUTOMATICALLY GENERATED!
####################################################
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix literal2
do_execsql_test 1.0 {
SELECT 123_456
} {123456}
# PG says ERROR: trailing junk after numeric literal at or near "123_"
do_test 1.1 { catch { execsql {
SELECT 123__456
} } } 1
do_test 2.1 {
set myres {}
foreach r [db eval {SELECT 1.0e1_2}] {
lappend myres [format %.4f [set r]]
}
set res2 {1000000000000.0000}
set i 0
foreach r [set myres] r2 [set res2] {
if {[set r]<([set r2]-0.0001) || [set r]>([set r2]+0.0001)} {
error "list element [set i] does not match: got=[set r] expected=[set r2]"
}
incr i
}
set {} {}
} {}
do_execsql_test 3.0.0 {
SELECT 0xFF_FF
} {65535}
do_execsql_test 3.0.1 {
SELECT 0xFF_EF
} {65519}
# PG says ERROR: trailing junk after numeric literal at or near "0xFF_"
do_test 3.0.2 { catch { execsql {
SELECT 0xFF__EF
} } } 1
# PG says ERROR: trailing junk after numeric literal at or near "0xFFEF_"
do_test 3.0.4 { catch { execsql {
SELECT 0xFFEF_
} } } 1
do_execsql_test 3.1.0 {
SELECT 0XFF_FF
} {65535}
do_execsql_test 3.1.1 {
SELECT 0XFF_EF
} {65519}
# PG says ERROR: trailing junk after numeric literal at or near "0XFF_"
do_test 3.1.2 { catch { execsql {
SELECT 0XFF__EF
} } } 1
# PG says ERROR: trailing junk after numeric literal at or near "0XFFEF_"
do_test 3.1.4 { catch { execsql {
SELECT 0XFFEF_
} } } 1
finish_test

View File

@@ -84,7 +84,6 @@ do_test 152 {
catchsql {INSERT INTO t1 VALUES(3,4);}
} {1 {attempt to write a readonly database}}
breakpoint
do_test 160 {
db deserialize -maxsize 32768 $db1
db eval {SELECT * FROM t1}
@@ -248,6 +247,7 @@ if {[wal_is_capable]} {
set fd [open test.db]
fconfigure $fd -translation binary -encoding binary
set data [read $fd [expr 20*1024]]
close $fd
sqlite3 db ""
db deserialize $data
@@ -267,4 +267,17 @@ if {[wal_is_capable]} {
} {1 {database disk image is malformed}}
}
# 2024-01-20
# https://sqlite.org/forum/forumpost/498777780e16880a
#
# Make sure a database is initialized before serializing it.
#
reset_db
sqlite3 dbempty :memory:
do_test 900 {
set len [string length [dbempty serialize]]
expr {$len>0}
} 1
dbempty close
finish_test

View File

@@ -654,7 +654,7 @@ do_catchsql_test misc1-21.1 {
} {1 {near "#0": syntax error}}
do_catchsql_test misc1-21.2 {
VALUES(0,0x0MATCH#0;
} {1 {near ";": syntax error}}
} {1 {unrecognized token: "0x0MATCH"}}
# 2015-04-15
do_execsql_test misc1-22.1 {

View File

@@ -569,11 +569,11 @@ ifcapable subquery&&compound {
}
# Overflow the lemon parser stack by providing an overly complex
# expression. Make sure that the overflow is detected and reported.
# expression. Make sure that the overflow is detected and the
# stack is grown automatically such that the application calling
# SQLite never notices.
#
# This test fails when building with -DYYSTACKDEPTH=0
#
do_test misc5-7.1 {
do_test misc5-7.1.1 {
execsql {CREATE TABLE t1(x)}
set sql "INSERT INTO t1 VALUES("
set tail ""
@@ -581,9 +581,21 @@ do_test misc5-7.1 {
append sql "(1+"
append tail ")"
}
append sql 2$tail
append sql "0$tail); SELECT * FROM t1;"
catchsql $sql
} {1 {parser stack overflow}}
} {0 200}
do_test misc5-7.1.2 {
execsql {DELETE FROM t1}
set sql "INSERT INTO t1 VALUES("
set tail ""
for {set i 0} {$i<900} {incr i} {
append sql "(1+"
append tail ")"
}
append sql "0$tail); SELECT * FROM t1;"
catchsql $sql
} {0 900}
# Parser stack overflow is silently ignored when it occurs while parsing the
# schema and PRAGMA writable_schema is turned on.

View File

@@ -45,18 +45,18 @@ proc register_rblob_code {dbname seed} {
}
# For cases 1.1 and 1.4, the number of pages read using xRead() is 4 on
# unix and 9 on windows. The difference is that windows only ever maps
# For cases 1.1 and 1.4, the number of pages read using xRead() is 8 on
# unix and 12 on windows. The difference is that windows only ever maps
# an integer number of OS pages (i.e. creates mappings that are a multiple
# of 4KB in size). Whereas on unix any sized mapping may be created.
#
foreach {t mmap_size nRead c2init} {
1.1 { PRAGMA mmap_size = 67108864 } /[49]/ {PRAGMA mmap_size = 0}
1.2 { PRAGMA mmap_size = 53248 } 150 {PRAGMA mmap_size = 0}
1.3 { PRAGMA mmap_size = 0 } 344 {PRAGMA mmap_size = 0}
1.4 { PRAGMA mmap_size = 67108864 } /[49]/ {PRAGMA mmap_size = 67108864 }
1.5 { PRAGMA mmap_size = 53248 } 150 {PRAGMA mmap_size = 67108864 }
1.6 { PRAGMA mmap_size = 0 } 344 {PRAGMA mmap_size = 67108864 }
1.1 { PRAGMA mmap_size = 67108864 } /8|12/ {PRAGMA mmap_size = 0}
1.2 { PRAGMA mmap_size = 53248 } /15[34]/ {PRAGMA mmap_size = 0}
1.3 { PRAGMA mmap_size = 0 } 344 {PRAGMA mmap_size = 0}
1.4 { PRAGMA mmap_size = 67108864 } /12|8/ {PRAGMA mmap_size = 67108864 }
1.5 { PRAGMA mmap_size = 53248 } /15[34]/ {PRAGMA mmap_size = 67108864 }
1.6 { PRAGMA mmap_size = 0 } 344 {PRAGMA mmap_size = 67108864 }
} {
do_multiclient_test tn {

51
test/mmapcorrupt.test Normal file
View File

@@ -0,0 +1,51 @@
# 2024 January 23
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# Test special cases of corrupt database handling in mmap-mode.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix mmapcorrupt
database_may_be_corrupt
db close
sqlite3_shutdown
sqlite3_config_lookaside 0 0
sqlite3_initialize
reset_db
do_execsql_test 1.0 {
PRAGMA page_size = 16384;
CREATE TABLE tn1(a PRIMARY KEY) WITHOUT ROWID;
CREATE TABLE t0(a PRIMARY KEY) WITHOUT ROWID;
CREATE TABLE t1(a PRIMARY KEY) WITHOUT ROWID;
INSERT INTO t1 VALUES('B');
}
db close
set sz [file size test.db]
hexio_write test.db [expr $sz-3] 800380
sqlite3 db test.db
do_execsql_test 2.1 {
PRAGMA mmap_size = 1000000;
SELECT sql FROM sqlite_schema LIMIT 1;
SELECT * FROM t0;
} {1000000 {CREATE TABLE tn1(a PRIMARY KEY) WITHOUT ROWID}}
do_execsql_test 2.2 {
INSERT INTO t0 SELECT * FROM t1;
}
finish_test

View File

@@ -59,14 +59,14 @@ do_vmstep_test 1.4.2 {
do_vmstep_test 1.5.1 {
SELECT count(*) FROM t2 WHERE EXISTS(
SELECT t2.d IS NULL FROM t1 WHERE t1.a=450
SELECT 1 FROM t1 WHERE t1.a=450 AND t2.d IS NULL
)
} 10000 {1000}
} 7000 {0}
do_vmstep_test 1.5.2 {
SELECT count(*) FROM t2 WHERE EXISTS(
SELECT t2.c IS NULL FROM t1 WHERE t1.a=450
SELECT 1 FROM t1 WHERE t1.a=450 AND t2.c IS NULL
)
} +100000 {1000}
} +8000 {0}
#-------------------------------------------------------------------------
reset_db
@@ -111,4 +111,12 @@ do_execsql_test 4.1 {
SELECT * FROM (SELECT 3 AS c FROM t1) AS t3 LEFT JOIN t2 ON c IS NULL;
} {3 {}}
# 2024-03-08 https://sqlite.org/forum/forumpost/440f2a2f17
#
reset_db
do_execsql_test 5.0 {
CREATE TABLE t1(a INT NOT NULL);
SELECT a IS NULL, a IS NOT NULL, count(*) FROM t1;
} {1 0 0}
finish_test

View File

@@ -95,6 +95,7 @@ foreach f [glob -nocomplain \
$testdir/../ext/lsm1/test/*.test \
$testdir/../ext/recover/*.test \
$testdir/../ext/rbu/*.test \
$testdir/../ext/intck/*.test \
] {
lappend alltests $f
}

View File

@@ -372,27 +372,27 @@ ifcapable attach {
db close
sqlite3 db test.db
execsql {PRAGMA integrity_check}
} {{row 1 missing from index i2} {row 2 missing from index i2} {wrong # of entries in index i2}}
} {{wrong # of entries in index i2} {row 1 missing from index i2} {row 2 missing from index i2}}
do_test pragma-3.3 {
execsql {PRAGMA integrity_check=1}
} {{row 1 missing from index i2}}
} {{wrong # of entries in index i2}}
do_test pragma-3.4 {
execsql {
ATTACH DATABASE 'test.db' AS t2;
PRAGMA integrity_check
}
} {{row 1 missing from index i2} {row 2 missing from index i2} {wrong # of entries in index i2} {row 1 missing from index i2} {row 2 missing from index i2} {wrong # of entries in index i2}}
} {{wrong # of entries in index i2} {row 1 missing from index i2} {row 2 missing from index i2} {wrong # of entries in index i2} {row 1 missing from index i2} {row 2 missing from index i2}}
do_test pragma-3.5 {
execsql {
PRAGMA integrity_check=4
}
} {{row 1 missing from index i2} {row 2 missing from index i2} {wrong # of entries in index i2} {row 1 missing from index i2}}
} {{wrong # of entries in index i2} {row 1 missing from index i2} {row 2 missing from index i2} {wrong # of entries in index i2}}
do_catchsql_test pragma-3.6 {
PRAGMA integrity_check=xyz
} {1 {no such table: xyz}}
do_catchsql_test pragma-3.6b {
PRAGMA integrity_check=t2
} {0 {{row 1 missing from index i2} {row 2 missing from index i2} {wrong # of entries in index i2}}}
} {0 {{wrong # of entries in index i2} {row 1 missing from index i2} {row 2 missing from index i2}}}
do_catchsql_test pragma-3.6c {
PRAGMA integrity_check=sqlite_schema
} {0 ok}
@@ -400,7 +400,7 @@ ifcapable attach {
execsql {
PRAGMA integrity_check=0
}
} {{row 1 missing from index i2} {row 2 missing from index i2} {wrong # of entries in index i2} {row 1 missing from index i2} {row 2 missing from index i2} {wrong # of entries in index i2}}
} {{wrong # of entries in index i2} {row 1 missing from index i2} {row 2 missing from index i2} {wrong # of entries in index i2} {row 1 missing from index i2} {row 2 missing from index i2}}
# Add additional corruption by appending unused pages to the end of
# the database file testerr.db
@@ -435,10 +435,10 @@ ifcapable attach {
} {{*** in database t2 ***
Page 4: never used
Page 5: never used
Page 6: never used} {row 1 missing from index i2} {row 2 missing from index i2} {wrong # of entries in index i2}}
Page 6: never used} {wrong # of entries in index i2} {row 1 missing from index i2} {row 2 missing from index i2}}
do_execsql_test pragma-3.9b {
PRAGMA t2.integrity_check=t2;
} {{row 1 missing from index i2} {row 2 missing from index i2} {wrong # of entries in index i2}}
} {{wrong # of entries in index i2} {row 1 missing from index i2} {row 2 missing from index i2}}
do_execsql_test pragma-3.9c {
PRAGMA t2.integrity_check=sqlite_schema;
} {ok}
@@ -455,7 +455,7 @@ Page 4: never used}}
} {{*** in database t2 ***
Page 4: never used
Page 5: never used
Page 6: never used} {row 1 missing from index i2} {row 2 missing from index i2}}
Page 6: never used} {wrong # of entries in index i2} {row 1 missing from index i2}}
do_test pragma-3.12 {
execsql {
PRAGMA integrity_check=4
@@ -463,7 +463,7 @@ Page 6: never used} {row 1 missing from index i2} {row 2 missing from index i2}}
} {{*** in database t2 ***
Page 4: never used
Page 5: never used
Page 6: never used} {row 1 missing from index i2}}
Page 6: never used} {wrong # of entries in index i2}}
do_test pragma-3.13 {
execsql {
PRAGMA integrity_check=3
@@ -487,10 +487,10 @@ Page 5: never used}}
} {{*** in database t2 ***
Page 4: never used
Page 5: never used
Page 6: never used} {row 1 missing from index i2} {row 2 missing from index i2} {wrong # of entries in index i2} {*** in database t3 ***
Page 6: never used} {wrong # of entries in index i2} {row 1 missing from index i2} {row 2 missing from index i2} {*** in database t3 ***
Page 4: never used
Page 5: never used
Page 6: never used} {row 1 missing from index i2} {row 2 missing from index i2} {wrong # of entries in index i2}}
Page 6: never used} {wrong # of entries in index i2} {row 1 missing from index i2} {row 2 missing from index i2}}
do_test pragma-3.16 {
execsql {
PRAGMA integrity_check(10)
@@ -498,10 +498,10 @@ Page 6: never used} {row 1 missing from index i2} {row 2 missing from index i2}
} {{*** in database t2 ***
Page 4: never used
Page 5: never used
Page 6: never used} {row 1 missing from index i2} {row 2 missing from index i2} {wrong # of entries in index i2} {*** in database t3 ***
Page 6: never used} {wrong # of entries in index i2} {row 1 missing from index i2} {row 2 missing from index i2} {*** in database t3 ***
Page 4: never used
Page 5: never used
Page 6: never used} {row 1 missing from index i2}}
Page 6: never used} {wrong # of entries in index i2}}
do_test pragma-3.17 {
execsql {
PRAGMA integrity_check=8
@@ -509,7 +509,7 @@ Page 6: never used} {row 1 missing from index i2}}
} {{*** in database t2 ***
Page 4: never used
Page 5: never used
Page 6: never used} {row 1 missing from index i2} {row 2 missing from index i2} {wrong # of entries in index i2} {*** in database t3 ***
Page 6: never used} {wrong # of entries in index i2} {row 1 missing from index i2} {row 2 missing from index i2} {*** in database t3 ***
Page 4: never used
Page 5: never used}}
do_test pragma-3.18 {
@@ -519,7 +519,7 @@ Page 5: never used}}
} {{*** in database t2 ***
Page 4: never used
Page 5: never used
Page 6: never used} {row 1 missing from index i2}}
Page 6: never used} {wrong # of entries in index i2}}
}
do_test pragma-3.19 {
catch {db close}
@@ -556,6 +556,21 @@ ifcapable altertable {
do_execsql_test pragma-3.23 {
PRAGMA integrity_check(1);
} {{non-unique entry in index t1a}}
# forum post https://sqlite.org/forum/forumpost/ee4f6fa5ab
do_execsql_test pragma-3.24 {
DROP TABLE IF EXISTS t1;
CREATE TABLE t1(a);
INSERT INTO t1 VALUES (1);
ALTER TABLE t1 ADD COLUMN b NOT NULL DEFAULT 0.25;
SELECT * FROM t1;
PRAGMA integrity_check(t1);
} {1 0.25 ok}
do_execsql_test pragma-3.25 {
ALTER TABLE t1 ADD COLUMN c CHECK (1);
SELECT * FROM t1;
PRAGMA integrity_check(t1);
} {1 0.25 {} ok}
}
# PRAGMA integrity check (or more specifically the sqlite3BtreeCount()

View File

@@ -97,7 +97,7 @@ do_test pragma4-2.100 {
}
string map {\[ x \] x \173 {} \175 {}} \
[db eval {EXPLAIN PRAGMA integrity_check}]
} {/ IntegrityCk 2 2 1 x[0-9]+,1x /}
} {/ IntegrityCk 1 2 8 x[0-9]+,1x /}
#--------------------------------------------------------------------------

74
test/pragma6.test Normal file
View File

@@ -0,0 +1,74 @@
# 2024 February 27
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements tests for PRAGMAs quick_check and integrity_check.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix pragma6
database_may_be_corrupt
#-------------------------------------------------------------------------
#
do_test 1.0 {
sqlite3 db {}
db deserialize [decode_hexdb {
.open --hexdb
| size 12288 pagesize 4096 filename crash-540f4c1eb1e7ac.db
| page 1 offset 0
| 0: 53 51 4c 69 74 65 20 66 6f 72 6d 61 74 20 33 00 SQLite format 3.
| 16: 10 00 01 01 00 40 20 20 00 00 00 00 00 00 00 03 .....@ ........
| 32: 00 bb 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
| 96: 00 00 00 00 0d 00 00 00 02 0f 7f 00 0f c3 0f 7f ................
| 3952: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 42 ...............B
| 3968: 02 06 17 11 11 01 71 74 61 62 6c 65 74 32 74 32 ......qtablet2t2
| 3984: 03 43 52 45 41 54 45 20 54 41 42 4c 45 20 74 32 .CREATE TABLE t2
| 4000: 28 61 20 49 4e 54 2c 20 62 20 41 53 20 28 61 2a (a INT, b AS (a*
| 4016: 32 29 20 53 54 4f 52 45 44 20 4e 4f 54 20 4e 55 2) STORED NOT NU
| 4032: 4c 4c 29 3b 01 06 17 11 11 01 63 74 61 62 6c 65 LL);......ctable
| 4048: 74 31 74 31 02 43 52 45 41 54 45 20 54 41 42 4c t1t1.CREATE TABL
| 4064: 45 20 74 31 28 61 20 49 4e 54 2c 20 62 20 41 53 E t1(a INT, b AS
| 4080: 20 28 61 2a 32 29 20 4e 4f 54 20 4e 55 4c 4c 29 (a*2) NOT NULL)
| page 2 offset 4096
| 0: 0d 00 00 00 05 0f e7 00 00 00 00 00 00 00 00 00 ................
| 4064: 00 00 00 00 00 00 00 00 03 05 02 01 05 03 04 02 ................
| 4080: 01 04 03 03 02 01 03 03 02 02 01 02 02 01 02 09 ................
| page 3 offset 8192
| 0: 0d 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
| 4048: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 05 05 ................
| 4064: 03 01 01 05 0a 05 04 03 01 01 04 08 05 03 03 01 ................
| 4080: 01 03 06 05 02 03 00 00 00 00 00 00 00 00 00 00 ................
| end crash-540f4c1eb1e7ac.db
}]
} {}
do_test 1.1 {
execsql {
CREATE TEMP TABLE t2(
a t1 PRIMARY KEY default 27,
b default(current_timestamp),
d TEXT UNIQUE DEFAULT 'ch`arlie',
c TEXT UNIQUE DEFAULT 084,
UNIQUE(c,b,b,a,b)
) WITHOUT ROWID;
}
catchsql { INSERT INTO t1(a) VALUES(zeroblob(40000)) }
set {} {}
} {}
do_test 1.2 {
execsql { PRAGMA integrity_check; }
execsql { PRAGMA quick_check; }
set {} {}
} {}
finish_test

View File

@@ -3833,4 +3833,21 @@ do_execsql_test printf-18.1 {
SELECT length( format('%,.249f', -5.0e-300) );
} {252}
# 2024-02-16
# https://sqlite.org/forum/info/393708f4a8
#
# The problem introduced by on 2023-07-03 by
# https://sqlite.org/src/info/32befb224b254639
#
do_execsql_test printf-19.1 {
SELECT format('%0.0f %0.0g %0.0g', 0.9, 0.09, 1.9);
} {{1 0.09 2}}
do_execsql_test printf-19.2 {
SELECT format('%0.0f %#0.0f',0.0, 0.0);
} {{0 0.}}
do_execsql_test printf-19.3 {
SELECT format('%,.0f %,.0f',12345e+10, 12345e+11);
} {{123,450,000,000,000 1,234,500,000,000,000}}
finish_test

View File

@@ -103,7 +103,7 @@ foreach {tn sql errname} {
3 { CREATE INDEX i3 ON t1("w") } w
4 { CREATE INDEX i4 ON t1(x) WHERE z="w" } w
} {
do_catchsql_test 2.1.$tn $sql [list 1 "no such column: $errname"]
do_catchsql_test 2.1.$tn $sql [list 1 "no such column: \"$errname\" - should this be a string literal in single-quotes?"]
}
do_execsql_test 2.2 {
@@ -147,19 +147,19 @@ ifcapable altertable {
CREATE TABLE t1(a,b);
CREATE INDEX x1 on t1("b");
ALTER TABLE t1 DROP COLUMN b;
} {1 {error in index x1 after drop column: no such column: b}}
} {1 {error in index x1 after drop column: no such column: "b" - should this be a string literal in single-quotes?}}
do_catchsql_test 3.1 {
DROP TABLE t1;
CREATE TABLE t1(a,"b");
CREATE INDEX x1 on t1("b");
ALTER TABLE t1 DROP COLUMN b;
} {1 {error in index x1 after drop column: no such column: b}}
} {1 {error in index x1 after drop column: no such column: "b" - should this be a string literal in single-quotes?}}
do_catchsql_test 3.2 {
DROP TABLE t1;
CREATE TABLE t1(a,'b');
CREATE INDEX x1 on t1("b");
ALTER TABLE t1 DROP COLUMN b;
} {1 {error in index x1 after drop column: no such column: b}}
} {1 {error in index x1 after drop column: no such column: "b" - should this be a string literal in single-quotes?}}
do_catchsql_test 3.3 {
DROP TABLE t1;
CREATE TABLE t1(a,"b");
@@ -172,7 +172,7 @@ ifcapable altertable {
CREATE INDEX x1 ON t1("a"||"b");
INSERT INTO t1 VALUES(1,2,3),(1,4,5);
ALTER TABLE t1 DROP COLUMN b;
} {1 {error in index x1 after drop column: no such column: b}}
} {1 {error in index x1 after drop column: no such column: "b" - should this be a string literal in single-quotes?}}
sqlite3_db_config db SQLITE_DBCONFIG_DQS_DDL 1
do_catchsql_test 3.5 {
DROP TABLE t1;

View File

@@ -8,6 +8,7 @@
# May you share freely, never taking more than you give.
#
#***********************************************************************
# TESTRUNNER: shell
#
# Test the shell tool ".ar" command.
#

View File

@@ -11,6 +11,7 @@
#
# The focus of this file is testing the CLI shell tool.
#
# TESTRUNNER: shell
#
# Test plan:

View File

@@ -8,6 +8,7 @@
# May you share freely, never taking more than you give.
#
#***********************************************************************
# TESTRUNNER: shell
#
# The focus of this file is testing the CLI shell tool.
#

View File

@@ -8,6 +8,7 @@
# May you share freely, never taking more than you give.
#
#***********************************************************************
# TESTRUNNER: shell
#
# The focus of this file is testing the CLI shell tool.
#

View File

@@ -8,6 +8,7 @@
# May you share freely, never taking more than you give.
#
#***********************************************************************
# TESTRUNNER: shell
#
# The focus of this file is testing the CLI shell tool.
# These tests are specific to the .stats command.

View File

@@ -8,6 +8,7 @@
# May you share freely, never taking more than you give.
#
#***********************************************************************
# TESTRUNNER: shell
#
# The focus of this file is testing the CLI shell tool.
# These tests are specific to the .import command.
@@ -570,4 +571,18 @@ SELECT * FROM t1;}
} {0 { 1 = あい
2 = うえお}}
# 2024-03-11 https://sqlite.org/forum/forumpost/ca014d7358
# Import into a table that contains computed columns.
#
do_test shell5-7.1 {
set out [open shell5.csv w]
fconfigure $out -translation lf
puts $out {aaa|bbb}
close $out
forcedelete test.db
catchcmd :memory: {CREATE TABLE t1(a TEXT, b TEXT, c AS (a||b));
.import shell5.csv t1
SELECT * FROM t1;}
} {0 aaa|bbb|aaabbb}
finish_test

View File

@@ -8,6 +8,7 @@
# May you share freely, never taking more than you give.
#
#***********************************************************************
# TESTRUNNER: shell
#
# Test the shell tool ".lint fkey-indexes" command.
#

View File

@@ -8,6 +8,7 @@
# May you share freely, never taking more than you give.
#
#***********************************************************************
# TESTRUNNER: shell
#
# Test the readfile() function built into the shell tool. Specifically,
# that it does not truncate the blob read at the first embedded 0x00

View File

@@ -8,6 +8,7 @@
# May you share freely, never taking more than you give.
#
#***********************************************************************
# TESTRUNNER: shell
#
# Test the shell tool ".ar" command.
#

View File

@@ -8,6 +8,7 @@
# May you share freely, never taking more than you give.
#
#***********************************************************************
# TESTRUNNER: shell
#
# The focus of this file is testing the CLI shell tool. Specifically,
# testing that it is possible to run a ".dump" script that creates
@@ -71,9 +72,10 @@ do_execsql_test 1.2.1 {
CREATE TABLE t4(hello);
}
db close
do_test 1.2.2 {
catchcmd test.db ".read testdump.txt"
} {1 {Parse error near line 5: table sqlite_master may not be modified}}
# This works ok on the reuse-schema branch
# do_test 1.2.2 {
# catchcmd test.db ".read testdump.txt"
# } {1 {Parse error near line 5: table sqlite_master may not be modified}}
# Check testdump.txt cannot be processed if the db is in safe mode
#

View File

@@ -2150,6 +2150,50 @@ void testset_debug1(void){
}
}
/*
** This testset focuses on the speed of parsing numeric literals (integers
** and real numbers). This was added to test the impact of allowing "_"
** characters to appear in numeric SQL literals to make them easier to read.
** For example, "SELECT 1_000_000;" instead of "SELECT 1000000;".
*/
void testset_parsenumber(void){
const char *zSql1 = "SELECT 1, 12, 123, 1234, 12345, 123456";
const char *zSql2 = "SELECT 8227256643844975616, 7932208612563860480, "
"2010730661871032832, 9138463067404021760, "
"2557616153664746496, 2557616153664746496";
const char *zSql3 = "SELECT 1.0, 1.2, 1.23, 123.4, 1.2345, 1.23456";
const char *zSql4 = "SELECT 8.227256643844975616, 7.932208612563860480, "
"2.010730661871032832, 9.138463067404021760, "
"2.557616153664746496, 2.557616153664746496";
const int NROW = 100*g.szTest;
int ii;
speedtest1_begin_test(100, "parsing small integers");
for(ii=0; ii<NROW; ii++){
sqlite3_exec(g.db, zSql1, 0, 0, 0);
}
speedtest1_end_test();
speedtest1_begin_test(110, "parsing large integers");
for(ii=0; ii<NROW; ii++){
sqlite3_exec(g.db, zSql2, 0, 0, 0);
}
speedtest1_end_test();
speedtest1_begin_test(200, "parsing small reals");
for(ii=0; ii<NROW; ii++){
sqlite3_exec(g.db, zSql3, 0, 0, 0);
}
speedtest1_end_test();
speedtest1_begin_test(210, "parsing large reals");
for(ii=0; ii<NROW; ii++){
sqlite3_exec(g.db, zSql4, 0, 0, 0);
}
speedtest1_end_test();
}
#ifdef __linux__
#include <sys/types.h>
#include <unistd.h>
@@ -2557,6 +2601,8 @@ int main(int argc, char **argv){
testset_fp();
}else if( strcmp(zThisTest,"trigger")==0 ){
testset_trigger();
}else if( strcmp(zThisTest,"parsenumber")==0 ){
testset_parsenumber();
}else if( strcmp(zThisTest,"rtree")==0 ){
#ifdef SQLITE_ENABLE_RTREE
testset_rtree(6, 147);

View File

@@ -707,6 +707,7 @@ if {$SQLITE_MAX_EXPR_DEPTH==0} {
}]
} "1 {Expression tree is too large (maximum depth $::SQLITE_MAX_EXPR_DEPTH)}"
if 0 {
# Attempting to beat the expression depth limit using nested SELECT
# queries causes a parser stack overflow.
do_test sqllimits1-9.2 {
@@ -718,7 +719,6 @@ if {$SQLITE_MAX_EXPR_DEPTH==0} {
catchsql [subst { $expr }]
} "1 {parser stack overflow}"
if 0 {
do_test sqllimits1-9.3 {
execsql {
PRAGMA max_page_count = 1000000; -- 1 GB

View File

@@ -54,22 +54,31 @@ proc usage {} {
Usage:
$a0 ?SWITCHES? ?PERMUTATION? ?PATTERNS?
$a0 PERMUTATION FILE
$a0 help
$a0 njob ?NJOB?
$a0 script ?-msvc? CONFIG
$a0 status
where SWITCHES are:
--buildonly
--dryrun
--explain
--jobs NUMBER-OF-JOBS
--zipvfs ZIPVFS-SOURCE-DIR
Interesting values for PERMUTATION are:
Special values for PERMUTATION that work with plain tclsh:
list - show all allowed PERMUTATION arguments.
mdevtest - tests recommended prior to normal development check-ins.
release - full release test with various builds.
sdevtest - like mdevtest but using ASAN and UBSAN.
Other PERMUTATION arguments must be run using testfixture, not tclsh:
veryquick - a fast subset of the tcl test scripts. This is the default.
full - all tcl test scripts.
all - all tcl test scripts, plus a subset of test scripts rerun
with various permutations.
release - full release test with various builds.
full - all tcl test scripts.
veryquick - a fast subset of the tcl test scripts. This is the default.
If no PATTERN arguments are present, all tests specified by the PERMUTATION
are run. Otherwise, each pattern is interpreted as a glob pattern. Only
@@ -89,6 +98,12 @@ directory as a running testrunner.tcl script that is running tests. The
"status" command prints a report describing the current state and progress
of the tests. The "njob" command may be used to query or modify the number
of sub-processes the test script uses to run tests.
The "script" command outputs the script used to build a configuration.
Add the "-msvc" option for a Windows-compatible script. For a list of
available configurations enter "$a0 script help".
Full documentation here: https://sqlite.org/src/doc/trunk/doc/testrunner.md
}]]
exit 1
@@ -126,6 +141,10 @@ proc guess_number_of_cores {} {
}
proc default_njob {} {
global env
if {[info exists env(NJOB)] && $env(NJOB)>=1} {
return $env(NJOB)
}
set nCore [guess_number_of_cores]
if {$nCore<=2} {
set nHelper 1
@@ -152,6 +171,7 @@ set TRG(fuzztest) 0 ;# is the fuzztest option present.
set TRG(zipvfs) "" ;# -zipvfs option, if any
set TRG(buildonly) 0 ;# True if --buildonly option
set TRG(dryrun) 0 ;# True if --dryrun option
set TRG(explain) 0 ;# True for the --explain option
switch -nocase -glob -- $tcl_platform(os) {
*darwin* {
@@ -159,6 +179,7 @@ switch -nocase -glob -- $tcl_platform(os) {
set TRG(make) make.sh
set TRG(makecmd) "bash make.sh"
set TRG(testfixture) testfixture
set TRG(shell) sqlite3
set TRG(run) run.sh
set TRG(runcmd) "bash run.sh"
}
@@ -167,14 +188,16 @@ switch -nocase -glob -- $tcl_platform(os) {
set TRG(make) make.sh
set TRG(makecmd) "bash make.sh"
set TRG(testfixture) testfixture
set TRG(shell) sqlite3
set TRG(run) run.sh
set TRG(runcmd) "bash run.sh"
}
*win* {
set TRG(platform) win
set TRG(make) make.bat
set TRG(makecmd) make.bat
set TRG(makecmd) "call make.bat"
set TRG(testfixture) testfixture.exe
set TRG(shell) sqlite3.exe
set TRG(run) run.bat
set TRG(runcmd) "run.bat"
}
@@ -326,6 +349,14 @@ if {([llength $argv]==2 || [llength $argv]==1)
}
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
# Check if this is the "help" command:
#
if {[string compare -nocase help [lindex $argv 0]]==0} {
usage
}
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
# Check if this is the "script" command:
#
@@ -435,6 +466,8 @@ for {set ii 0} {$ii < [llength $argv]} {incr ii} {
set TRG(buildonly) 1
} elseif {($n>2 && [string match "$a*" --dryrun]) || $a=="-d"} {
set TRG(dryrun) 1
} elseif {($n>2 && [string match "$a*" --explain]) || $a=="-e"} {
set TRG(explain) 1
} else {
usage
}
@@ -617,7 +650,16 @@ proc add_job {args} {
trdb last_insert_rowid
}
proc add_tcl_jobs {build config patternlist} {
# Argument $build is either an empty string, or else a list of length 3
# describing the job to build testfixture. In the usual form:
#
# {ID DIRNAME DISPLAYNAME}
#
# e.g
#
# {1 /home/user/sqlite/test/testrunner_bld_xyz All-Debug}
#
proc add_tcl_jobs {build config patternlist {shelldepid ""}} {
global TRG
set topdir [file dirname $::testdir]
@@ -666,34 +708,59 @@ proc add_tcl_jobs {build config patternlist} {
if {[lsearch $lProp slow]>=0} { set priority 2 }
if {[lsearch $lProp superslow]>=0} { set priority 4 }
set depid [lindex $build 0]
if {$shelldepid!="" && [lsearch $lProp shell]>=0} { set depid $shelldepid }
add_job \
-displaytype tcl \
-displayname $displayname \
-cmd $cmd \
-depid [lindex $build 0] \
-depid $depid \
-priority $priority
}
}
proc add_build_job {buildname target} {
proc add_build_job {buildname target {postcmd ""} {depid ""}} {
global TRG
set dirname "[string tolower [string map {- _} $buildname]]_$target"
set dirname "testrunner_bld_$dirname"
set cmd "$TRG(makecmd) $target"
if {$postcmd!=""} {
append cmd "\n"
append cmd $postcmd
}
set id [add_job \
-displaytype bld \
-displayname "Build $buildname ($target)" \
-dirname $dirname \
-build $buildname \
-cmd "$TRG(makecmd) $target" \
-cmd $cmd \
-depid $depid \
-priority 3
]
list $id [file normalize $dirname] $buildname
}
proc add_shell_build_job {buildname dirname depid} {
global TRG
if {$TRG(platform)=="win"} {
set path [string map {/ \\} "$dirname/"]
set copycmd "xcopy $TRG(shell) $path"
} else {
set copycmd "cp $TRG(shell) $dirname/"
}
return [
add_build_job $buildname $TRG(shell) $copycmd $depid
]
}
proc add_make_job {bld target} {
global TRG
@@ -767,10 +834,30 @@ proc add_devtest_jobs {lBld patternlist} {
foreach b $lBld {
set bld [add_build_job $b $TRG(testfixture)]
add_tcl_jobs $bld veryquick $patternlist
add_tcl_jobs $bld veryquick $patternlist SHELL
if {$patternlist==""} {
add_fuzztest_jobs $b
}
if {[trdb one "SELECT EXISTS (SELECT 1 FROM jobs WHERE depid='SHELL')"]} {
set sbld [add_shell_build_job $b [lindex $bld 1] [lindex $bld 0]]
set sbldid [lindex $sbld 0]
trdb eval {
UPDATE jobs SET depid=$sbldid WHERE depid='SHELL'
}
}
}
}
# Check to ensure that the interpreter is a full-blown "testfixture"
# build and not just a "tclsh". If this is not the case, issue an
# error message and exit.
#
proc must_be_testfixture {} {
if {[lsearch [info commands] sqlite3_soft_heap_limit]<0} {
puts "Use testfixture, not tclsh, for these arguments."
exit 1
}
}
@@ -789,6 +876,7 @@ proc add_jobs_from_cmdline {patternlist} {
set first [lindex $patternlist 0]
switch -- $first {
all {
must_be_testfixture
set patternlist [lrange $patternlist 1 end]
set clist [trd_all_configs]
foreach c $clist {
@@ -824,7 +912,15 @@ proc add_jobs_from_cmdline {patternlist} {
}
}
list {
set allperm [array names ::testspec]
lappend allperm all mdevtest sdevtest release list
puts "Allowed values for the PERMUTATION argument: [lsort $allperm]"
exit 0
}
default {
must_be_testfixture
if {[info exists ::testspec($first)]} {
add_tcl_jobs "" $first [lrange $patternlist 1 end]
} else {
@@ -948,6 +1044,16 @@ proc launch_another_job {iJob} {
close $fd
}
# Add a batch/shell file command to set the directory used for temp
# files to the test's working directory. Otherwise, tests that use
# large numbers of temp files (e.g. zipvfs), might generate temp
# filename collisions.
if {$TRG(platform)=="win"} {
set set_tmp_dir "SET SQLITE_TMPDIR=[file normalize $dir]"
} else {
set set_tmp_dir "export SQLITE_TMPDIR=\"[file normalize $dir]\""
}
if { $TRG(dryrun) } {
mark_job_as_finished $job(jobid) "" done 0
@@ -962,7 +1068,8 @@ proc launch_another_job {iJob} {
set pwd [pwd]
cd $dir
set fd [open $TRG(run) w]
puts $fd $job(cmd)
puts $fd $set_tmp_dir
puts $fd $job(cmd)
close $fd
set fd [open "|$TRG(runcmd) 2>@1" r]
cd $pwd
@@ -1078,15 +1185,42 @@ proc handle_buildonly {} {
}
}
# Handle the --explain option. Provide a human-readable
# explanation of all the tests that are in the trdb database jobs
# table.
#
proc explain_layer {indent depid} {
global TRG
if {$TRG(buildonly)} {
set showtests 0
} else {
set showtests 1
}
trdb eval {SELECT jobid, displayname, displaytype, dirname
FROM jobs WHERE depid=$depid ORDER BY displayname} {
if {$displaytype=="bld"} {
puts "${indent}$displayname in $dirname"
explain_layer "${indent} " $jobid
} elseif {$showtests} {
puts "${indent}[lindex $displayname end]"
}
}
}
proc explain_tests {} {
explain_layer "" ""
}
sqlite3 trdb $TRG(dbname)
trdb timeout $TRG(timeout)
set tm [lindex [time { make_new_testset }] 0]
if {$TRG(nJob)>1} {
puts "splitting work across $TRG(nJob) jobs"
if {$TRG(explain)} {
explain_tests
} else {
if {$TRG(nJob)>1} {
puts "splitting work across $TRG(nJob) jobs"
}
puts "built testset in [expr $tm/1000]ms.."
handle_buildonly
run_testset
}
puts "built testset in [expr $tm/1000]ms.."
handle_buildonly
run_testset
trdb close
#puts [pwd]

View File

@@ -598,7 +598,12 @@ proc trd_buildscript {config srcdir bMsvc} {
# Ensure that the named configuration exists.
if {![info exists build($config)]} {
error "No such build config: $config"
if {$config!="help"} {
puts "No such build config: $config"
}
puts "Available configurations: [lsort [array names build]]"
flush stdout
exit 1
}
# Generate and return the script.
@@ -637,4 +642,3 @@ proc trd_test_script_properties {path} {
set trd_test_script_properties_cache($path)
}

View File

@@ -49,7 +49,7 @@ do_test tkt-8454a207b9.4 {
ALTER TABLE t1 ADD COLUMN e DEFAULT -123.0;
SELECT e, typeof(e) FROM t1;
}
} {-123 integer}
} {-123.0 real}
do_test tkt-8454a207b9.5 {
db eval {
ALTER TABLE t1 ADD COLUMN f DEFAULT -123.5;

View File

@@ -30,7 +30,7 @@ do_test 2.1 {
catchsql { SELECT datetime('now', 'localtime') }
} {1 {local time unavailable}}
do_test 2.2 {
catchsql { SELECT datetime('now', 'utc') }
catchsql { SELECT datetime('2000-01-01', 'utc') }
} {1 {local time unavailable}}
sqlite3_test_control SQLITE_TESTCTRL_LOCALTIME_FAULT 0

View File

@@ -12,8 +12,6 @@
# of this file is testing the interaction of SQLite manifest types
# with Tcl dual-representations.
#
# $Id: types3.test,v 1.8 2008/04/28 13:02:58 drh Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
@@ -96,4 +94,32 @@ do_test types3-2.6 {
tcl_variable_type V
} {}
# See https://sqlite.org/forum/forumpost/3776b48e71
#
# On a text-affinity comparison of two values where one of
# the values has both MEM_Str and a numeric type like MEM_Int,
# make sure that only the MEM_Str representation is used.
#
sqlite3_create_function db
do_execsql_test types3-3.1 {
DROP TABLE IF EXISTS t1;
CREATE TABLE t1(x TEXT PRIMARY KEY);
INSERT INTO t1 VALUES('1');
SELECT * FROM t1 WHERE NOT x=upper(1);
} {}
do_execsql_test types3-3.2 {
SELECT * FROM t1 WHERE NOT x=add_text_type(1);
} {}
do_execsql_test types3-3.3 {
SELECT * FROM t1 WHERE NOT x=add_int_type('1');
} {}
do_execsql_test types3-3.4 {
DELETE FROM t1;
INSERT INTO t1 VALUES(1.25);
SELECT * FROM t1 WHERE NOT x=add_real_type('1.25');
} {}
do_execsql_test types3-3.5 {
SELECT * FROM t1 WHERE NOT x=add_text_type(1.25);
} {}
finish_test

View File

@@ -408,4 +408,46 @@ do_catchsql_test 2.1 {
} {1 {no such table: nosuchtable}}
# 2024-03-08 https://sqlite.org/forum/forumpost/919c6579c8
# A redundant ON CONFLICT clause in an upsert can lead to
# index corruption.
#
reset_db
do_execsql_test 3.0 {
CREATE TABLE t1(aa INTEGER PRIMARY KEY, bb INT);
INSERT INTO t1 VALUES(11,22);
CREATE UNIQUE INDEX t1bb ON t1(bb);
REPLACE INTO t1 VALUES(11,33)
ON CONFLICT(bb) DO UPDATE SET aa = 44
ON CONFLICT(bb) DO UPDATE SET aa = 44;
PRAGMA integrity_check;
} {ok}
do_execsql_test 3.1 {
SELECT * FROM t1 NOT INDEXED;
} {11 33}
do_execsql_test 3.2 {
SELECT * FROM t1 INDEXED BY t1bb;
} {11 33}
do_execsql_test 3.3 {
DROP TABLE t1;
CREATE TABLE t1(aa INTEGER PRIMARY KEY, bb INT, cc INT);
INSERT INTO t1 VALUES(10,21,32),(11,22,33),(12,23,34);
CREATE UNIQUE INDEX t1bb ON t1(bb);
CREATE UNIQUE INDEX t1cc ON t1(cc);
REPLACE INTO t1 VALUES(11,44,55)
ON CONFLICT(bb) DO UPDATE SET aa = 99
ON CONFLICT(cc) DO UPDATE SET aa = 99
ON CONFLICT(bb) DO UPDATE SET aa = 99;
PRAGMA integrity_check;
} {ok}
do_execsql_test 3.4 {
SELECT * FROM t1 NOT INDEXED ORDER BY +aa;
} {10 21 32 11 44 55 12 23 34}
do_execsql_test 3.5 {
SELECT * FROM t1 INDEXED BY t1bb ORDER BY +aa;
} {10 21 32 11 44 55 12 23 34}
do_execsql_test 3.6 {
SELECT * FROM t1 INDEXED BY t1cc ORDER BY +aa;
} {10 21 32 11 44 55 12 23 34}
finish_test

View File

@@ -209,4 +209,22 @@ do_eqp_test 710 {
`--SEARCH t1 USING INDEX idx (<expr>=?)
}
# 2024-03-07 https://sqlite.org/forum/forumpost/ecdfc02339
# A refinement is needed to the enhancements tested by the prior test case
# to avoid another problem with indexes on constant expressions.
#
reset_db
db null NULL
do_execsql_test 800 {
CREATE TABLE t0(c0, c1);
CREATE TABLE t1(c2);
CREATE INDEX i0 ON t1(NULL);
INSERT INTO t1(c2) VALUES (0.2);
CREATE VIEW v0(c3) AS SELECT DISTINCT c2 FROM t1;
SELECT * FROM v0 LEFT JOIN t0 ON c3<NULL LEFT JOIN t1 ON 1;
} {0.2 NULL NULL 0.2}
do_execsql_test 810 {
SELECT * FROM v0 LEFT JOIN t0 ON c3<NULL LEFT JOIN t1 ON 1 WHERE c2/0.1;
} {0.2 NULL NULL 0.2}
finish_test