1
0
mirror of https://github.com/sqlite/sqlite.git synced 2025-08-07 02:42:48 +03:00

Merge the latest trunk changes into the reuse-schema branch.

FossilOrigin-Name: 858163f93893b0f481b27e39f4f7b3f51290606ad96e5f38cea2933c92398ff2
This commit is contained in:
drh
2025-02-03 15:17:31 +00:00
99 changed files with 4198 additions and 2607 deletions

View File

@@ -563,4 +563,32 @@ do_execsql_test autoindex-1120 {
SELECT * FROM t1 LEFT JOIN t2 ON (t2.c=+t1.a) LEFT JOIN t3 ON (t2.d IS NULL);
} {1 1 1 2 {} {}}
# 2025-01-18
# Added support for automatic indexes on WITHOUT ROWID tables.
#
reset_db
do_execsql_test autoindex-1200 {
CREATE TABLE t1(a INT, b INT, x INT, PRIMARY KEY(a,b)) WITHOUT ROWID;
INSERT INTO t1 VALUES(1,2,90),(1,3,91),(1,4,92);
CREATE TABLE t2a(c INTEGER PRIMARY KEY, i1 INT);
CREATE TABLE t2b(i1 INTEGER PRIMARY KEY, d INT);
CREATE VIEW t2(c,d) AS SELECT c, d FROM t2a NATURAL JOIN t2b;
INSERT INTO t2a VALUES(3,93),(4,94),(5,95),(6,96),(7,97);
INSERT INTO t2b VALUES(91,11),(92,22),(93,33),(94,44),(95,55);
CREATE TABLE dual(dummy TEXT);
INSERT INTO dual(dummy) VALUES('x');
}
db null NULL
do_execsql_test autoindex-1210 {
SELECT t1.*, t2.* FROM t2 LEFT OUTER JOIN t1 ON b=c ORDER BY +b;
} {
NULL NULL NULL 5 55
1 3 91 3 33
1 4 92 4 44
}
do_execsql_test autoindex-1211 {
EXPLAIN QUERY PLAN
SELECT t1.*, t2.* FROM t2 LEFT OUTER JOIN t1 ON b=c ORDER BY +b;
} {/SEARCH t1 USING AUTOMATIC COVERING INDEX/}
finish_test

View File

@@ -689,7 +689,9 @@ do_test capi3-6.3 {
sqlite3_finalize $STMT
} {SQLITE_OK}
if {[clang_sanitize_address]==0} {
if {0 && [clang_sanitize_address]==0} {
# This use-after-free occasionally causes segfaults during ordinary
# builds. Let's just disable it completely.
do_test capi3-6.4-misuse {
db cache flush
sqlite3_close $DB

View File

@@ -651,5 +651,13 @@ datetest 19.51 {date('2000-08-31','+0022-06-00','floor')} {2023-02-28}
datetest 19.52 {date('2000-08-31','+0023-06-00','ceiling')} {2024-03-02}
datetest 19.53 {date('2000-08-31','+0022-06-00','ceiling')} {2023-03-03}
# 2025-01-21
# https://sqlite.org/forum/forumpost/766a2c9231
#
datetest 20.1 {datetime('2024-12-31 23:59:59.9990')} {2024-12-31 23:59:59}
datetest 20.2 {datetime('2024-12-31 23:59:59.9999999999999')} \
{2024-12-31 23:59:59}
datetest 20.3 {datetime('2024-12-31 23:59:59.9995')} {2024-12-31 23:59:59}
datetest 20.4 {datetime('2024-12-31 23:59:58.9995')} {2024-12-31 23:59:58}
finish_test

View File

@@ -250,7 +250,22 @@ do_catchsql_test 4.2 {
COMMIT;
} {1 {FOREIGN KEY constraint failed}}
#-------------------------------------------------------------------------
#
reset_db
do_execsql_test 5.0 {
PRAGMA foreign_keys = 1;
CREATE TABLE p1(a INTEGER PRIMARY KEY, b);
CREATE TABLE c1(x REFERENCES p1 DEFERRABLE INITIALLY DEFERRED);
}
do_execsql_test 5.1 {
BEGIN;
INSERT INTO c1 VALUES(123);
PRAGMA defer_foreign_keys = 1;
INSERT INTO p1 VALUES(123, 'one two three');
COMMIT;
}
finish_test

View File

@@ -219,4 +219,33 @@ do_execsql_test 3.8 {
SELECT t1.a, t2.b FROM t1, t2 WHERE (t1.a, t2.b) IN ((1, 2));
} {1 2}
# 2025-01-30 Inifinite loop in byte-code discovered by dbsqlfuzz
# having to do with SubrtnSig logic. The code was using a Subroutine
# from within itself resulting in infinite recursion.
#
# This test will spin forever if the bug has not been fixed, or if
# it reappears.
#
reset_db
do_execsql_test 4.0 {
CREATE TABLE t1(a INTEGER PRIMARY KEY, b);
INSERT INTO t1 VALUES(1,x'1111');
CREATE TABLE t2(c);
CREATE TABLE t3(d);
CREATE TRIGGER t1tr UPDATE ON t1 BEGIN
UPDATE t1 SET b=x'2222' FROM t2;
UPDATE t1
SET b = (SELECT a IN (SELECT a
FROM t1
WHERE (b,a) IN (SELECT rowid, d
FROM t3
)
)
FROM t1 NATURAL RIGHT JOIN t1
);
END;
UPDATE t1 SET b=x'3333';
SELECT quote(b) FROM t1;
} {X'3333'}
finish_test

View File

@@ -275,4 +275,84 @@ do_eqp_test like3-6.240 {
}
}
#-------------------------------------------------------------------------
ifcapable utf16 {
reset_db
do_execsql_test like3-7.0 {
PRAGMA encoding = 'UTF-16be';
CREATE TABLE Example(word TEXT NOT NULL);
CREATE INDEX Example_word on Example(word);
INSERT INTO Example VALUES(char(0x307F));
}
do_execsql_test like3-7.1 {
SELECT char(0x307F)=='み';
} {1}
do_execsql_test like3-7.1 {
SELECT * FROM Example WHERE word GLOB 'み*'
} {み}
do_execsql_test like3-7.2 {
SELECT * FROM Example WHERE word >= char(0x307F) AND word < char(0x3080);
} {み}
}
#-------------------------------------------------------------------------
reset_db
foreach enc {
UTF-8
UTF-16le
UTF-16be
} {
foreach {tn expr} {
1 "CAST (X'FF' AS TEXT)"
2 "CAST (X'FFBF' AS TEXT)"
3 "CAST (X'FFBFBF' AS TEXT)"
4 "CAST (X'FFBFBFBF' AS TEXT)"
5 "'abc' || CAST (X'FF' AS TEXT)"
6 "'def' || CAST (X'FFBF' AS TEXT)"
7 "'ghi' || CAST (X'FFBFBF' AS TEXT)"
8 "'jkl' || CAST (X'FFBFBFBF' AS TEXT)"
} {
reset_db
execsql "PRAGMA encoding = '$enc'"
set tn utf[string range $enc 4 end].$tn
do_execsql_test like3-8.$tn.1 {
CREATE TABLE t1(x);
}
do_execsql_test like3-8.$tn.2 {
PRAGMA encoding
} $enc
do_execsql_test like3-8.$tn.3 "
INSERT INTO t1 VALUES( $expr )
"
do_execsql_test like3-8.$tn.4 {
SELECT typeof(x) FROM t1
} {text}
set x [db one {SELECT x || '%' FROM t1}]
do_execsql_test like3-8.$tn.5 {
SELECT rowid FROM t1 WHERE x LIKE $x
} 1
do_execsql_test like3-8.$tn.6 {
CREATE INDEX i1 ON t1(x);
}
do_execsql_test like3-8.$tn.7 {
SELECT rowid FROM t1 WHERE x LIKE $x
} 1
}
}
finish_test

53
test/speedtest.md Normal file
View File

@@ -0,0 +1,53 @@
# Performance And Size Measurements
This document shows a procedure for making performance and size
comparisons between two versions of the SQLite Amalgamation "sqlite3.c".
You will need:
* fossil
* valgrind
* tclsh
* A script or program named "open" that brings up *.txt files in an
editor for viewing. (Macs provide this by default. You'll need to
come up with your own on Linux and Windows.)
* An SQLite source tree
The procedure described in this document is not the only way to make
performance and size measurements. Use this as a guide and make
adjustments as needed.
## Establish the baseline measurement
* Begin at the root the SQLite source tree
* <b>mkdir -p ../speed</b> <br>
&uarr; Speed measurement output files will go into this directory.
You can actually put those files wherever you want. This is just a
suggestion. It might be good to keep these files outside of the
source tree so that "fossil clean" does not delete them.
* Obtain the baseline SQLite amalgamation. For the purpose of this
technical note, assume the baseline SQLite sources are in files
"../baseline/sqlite3.c" and "../baseline/sqlite3.h".
* <b>test/speedtest.tcl ../baseline/sqlite3.c ../speed/baseline.txt</b> <br>
&uarr; The performance measure will be written into ../speed/baseline.txt
and that file will be brought up in an editor for easy viewing. <br>
&uarr; The "sqlite3.h" will be taken from the directory that contains
the "sqlite3.c" amalgamation file.
## Comparing the current checkout against the baseline
* <b>make sqlite3.c</b>
* <b>test/speedtest.tcl sqlite3.c ../speed/test.txt ../speed/baseline.txt</b> <br>
&uarr; Test results written into ../speed/test.txt and then
"fossil xdiff" is run to compare ../speed/baseline.txt against
the new test results.
## When to do this
Performance and size checks should be done prior to trunk check-ins.
Sometimes a seemingly innocuous change can have large performance
impacts. A large impact does not mean that the change cannot continue,
but it is important to be aware of the impact.
## Additional hints
Use the --help option to test/speedtest.tcl to see other available options.

290
test/speedtest.tcl Executable file
View File

@@ -0,0 +1,290 @@
#!/bin/sh
# the next line restarts using tclsh \
exec tclsh "$0" ${1+"$@"}
#
# This program runs performance testing on sqlite3.c. Usage:
set usage {USAGE:
speedtest.tcl sqlite3.c x1.txt trunk.txt -Os -DSQLITE_ENABLE_STAT4
| | | `-----------------------'
File to test ----' | | |
| | `- options
Output filename --------' |
`--- optional prior output to diff
Do a cache-grind performance analysis of the sqlite3.c file named and
write the results into the output file. The ".txt" is appended to the
output file (and diff-file) name if it is not already present. If the
diff-file is specified then show a diff from the diff-file to the new
output.
Other options include:
CC=... Specify an alternative C compiler. Default is "gcc".
-D... -D and -O options are passed through to the C compiler.
--dryrun Show what would happen but don't do anything.
--help Show this help screen.
--lean "Lean" mode.
--lookaside N SZ Lookahead uses N slots of SZ bytes each.
--pagesize N Use N as the page size.
--quiet | -q "Quite". Put results in file but don't pop up editor
--testset TEST Specify the specific testset to use. The default
is "mix1". Other options include: "main", "json",
"cte", "orm", "fp", "rtree".
}
set srcfile {}
set outfile {}
set difffile {}
set cflags {-DSQLITE_OMIT_LOAD_EXTENSION -DSQLITE_THREADSAFE=0}
set cc gcc
set testset mix1
set dryrun 0
set quiet 0
set speedtestflags {--shrink-memory --reprepare --stats --heap 40000000 64}
lappend speedtestflags --journal wal --size 5
for {set i 0} {$i<[llength $argv]} {incr i} {
set arg [lindex $argv $i]
if {[string index $arg 0]=="-"} {
switch -- $arg {
-pagesize -
--pagesize {
lappend speedtestflags --pagesize
incr i
lappend speedtestflags [lindex $argv $i]
}
-lookaside -
--lookaside {
lappend speedtestflags --lookaside
incr i
lappend speedtestflags [lindex $argv $i]
incr i
lappend speedtestflags [lindex $argv $i]
}
-lean -
--lean {
lappend cflags \
-DSQLITE_DEFAULT_MEMSTATUS=0 \
-DSQLITE_DEFAULT_WAL_SYNCHRONOUS=1 \
-DSQLITE_LIKE_DOESNT_MATCH_BLOBS=1 \
-DSQLITE_MAX_EXPR_DEPTH=1 \
-DSQLITE_OMIT_DECLTYPE \
-DSQLITE_OMIT_DEPRECATED \
-DSQLITE_OMIT_PROGRESS_CALLBACK \
-DSQLITE_OMIT_SHARED_CACHE \
-DSQLITE_USE_ALLOCA
}
-testset -
--testset {
incr i
set testset [lindex $argv $i]
}
-n -
-dryrun -
--dryrun {
set dryrun 1
}
-? -
-help -
--help {
puts $usage
exit 0
}
-q -
-quiet -
--quiet {
set quiet 1
}
default {
lappend cflags $arg
}
}
continue
}
if {[string match CC=* $arg]} {
set cc [lrange $arg 3 end]
continue
}
if {[string match *.c $arg]} {
if {$srcfile!=""} {
puts stderr "multiple source files: $srcfile $arg"
exit 1
}
set srcfile $arg
continue
}
if {[lsearch {main cte rtree orm fp json parsenumber mix1} $arg]>=0} {
set testset $arg
continue
}
if {$outfile==""} {
set outfile $arg
continue
}
if {$difffile==""} {
set difffile $arg
continue
}
puts stderr "unknown option: \"$arg\". Use --help for more info."
exit 1
}
if {[lsearch -glob $cflags -O*]<0} {
lappend cflags -Os
}
if {[lsearch -glob $cflags -DSQLITE_ENABLE_MEMSYS*]<0} {
lappend cflags -DSQLITE_ENABLE_MEMSYS5
}
if {[lsearch -glob $cflags -DSQLITE_ENABLE_RTREE*]<0} {
lappend cflags -DSQLITE_ENABLE_RTREE
}
if {$srcfile==""} {
puts stderr "no sqlite3.c source file specified"
exit 1
}
if {![file readable $srcfile]} {
puts stderr "source file \"$srcfile\" does not exist"
exit 1
}
if {$outfile==""} {
puts stderr "no output file specified"
exit 1
}
if {![string match *.* [file tail $outfile]]} {
append outfile .txt
}
if {$difffile!=""} {
if {![file exists $difffile]} {
if {[file exists $difffile.txt]} {
append difffile .txt
} else {
puts stderr "No such file: \"$difffile\""
exit 1
}
}
}
set cccmd [list $cc -g]
lappend cccmd -I[file dir $srcfile]
lappend cccmd {*}[lsort $cflags]
lappend cccmd [file dir $argv0]/speedtest1.c
lappend cccmd $srcfile
lappend cccmd -o speedtest1
puts $cccmd
if {!$dryrun} {
exec {*}$cccmd
}
lappend speedtestflags --testset $testset
set stcmd [list valgrind --tool=cachegrind ./speedtest1 {*}$speedtestflags]
lappend stcmd >valgrind-out.txt 2>valgrind-err.txt
puts $stcmd
if {!$dryrun} {
exec {*}$stcmd
}
set maxmtime 0
set cgfile {}
foreach cgout [glob -nocomplain cachegrind.out.*] {
if {[file mtime $cgout]>$maxmtime} {
set cgfile $cgout
set maxmtime [file mtime $cgfile]
}
}
if {$cgfile==""} {
puts "no cachegrind output"
exit 1
}
############# Process the cachegrind.out.# file ##########################
set fd [open $outfile wb]
set in [open "|cg_annotate --show=Ir --auto=yes --context=40 $cgfile" r]
set dest !
set out(!) {}
set linenum 0
set cntlines 0 ;# true to remember cycle counts on each line
set seenSqlite3 0 ;# true if we have seen the sqlite3.c file
while {![eof $in]} {
set line [string map {\t { }} [gets $in]]
if {[regexp {^-- Auto-annotated source: (.*)} $line all name]} {
set dest $name
if {[string match */sqlite3.c $dest]} {
set cntlines 1
set seenSqlite3 1
} else {
set cntlines 0
}
} elseif {[regexp {^-- line (\d+) ------} $line all ln]} {
set line [lreplace $line 2 2 {#}]
set linenum [expr {$ln-1}]
} elseif {[regexp {^The following files chosen for } $line]} {
set dest !
}
append out($dest) $line\n
if {$cntlines} {
incr linenum
if {[regexp {^ *([0-9,]+) } $line all x]} {
set x [string map {, {}} $x]
set cycles($linenum) $x
}
}
}
foreach x [lsort [array names out]] {
puts $fd $out($x)
}
# If the sqlite3.c file has been seen, then output a summary of the
# cycle counts for each file that went into making up sqlite3.c
#
if {$seenSqlite3} {
close $in
set in [open sqlite3.c]
set linenum 0
set fn sqlite3.c
set pattern1 {^/\*+ Begin file ([^ ]+) \*}
set pattern2 {^/\*+ Continuing where we left off in ([^ ]+) \*}
while {![eof $in]} {
set line [gets $in]
incr linenum
if {[regexp $pattern1 $line all newfn]} {
set fn $newfn
} elseif {[regexp $pattern2 $line all newfn]} {
set fn $newfn
} elseif {[info exists cycles($linenum)]} {
incr fcycles($fn) $cycles($linenum)
}
}
close $in
puts $fd \
{**********************************************************************}
set lx {}
set sum 0
foreach {fn cnt} [array get fcycles] {
lappend lx [list $cnt $fn]
incr sum $cnt
}
puts $fd [format {%20s %14d %8.3f%%} TOTAL $sum 100]
foreach entry [lsort -index 0 -integer -decreasing $lx] {
foreach {cnt fn} $entry break
puts $fd [format {%20s %14d %8.3f%%} $fn $cnt [expr {$cnt*100.0/$sum}]]
}
}
puts $fd "Executable size:"
close $fd
exec size speedtest1 >>$outfile
#
# Processed cachegrind output should now be in the $outfile
#############################################################################
if {$quiet} {
# Skip this last part of popping up a GUI viewer
} elseif {$difffile!=""} {
set fossilcmd {fossil xdiff --tk -c 20}
lappend fossilcmd $difffile
lappend fossilcmd $outfile
lappend fossilcmd &
puts $fossilcmd
if {!$dryrun} {
exec {*}$fossilcmd
}
} else {
if {!$dryrun} {
exec open $outfile
}
}

View File

@@ -1,6 +1,28 @@
/*
** A program for performance testing.
**
** To build this program against an historical version of SQLite for comparison
** testing:
**
** Unix:
**
** ./configure --all
** make clean speedtest1
** mv speedtest1 speedtest1-current
** cp $HISTORICAL_SQLITE3_C_H .
** touch sqlite3.c sqlite3.h .target_source
** make speedtest1
** mv speedtest1 speedtest1-baseline
**
** Windows:
**
** nmake /f Makefile.msc clean speedtest1.exe
** mv speedtest1.exe speedtest1-current.exe
** cp $HISTORICAL_SQLITE_C_H .
** touch sqlite3.c sqlite3.h .target_source
** nmake /f Makefile.msc speedtest1.exe
** mv speedtest1.exe speedtest1-baseline.exe
**
** The available command-line options are described below:
*/
static const char zHelp[] =
@@ -42,7 +64,10 @@ static const char zHelp[] =
" --stats Show statistics at the end\n"
" --stmtscanstatus Activate SQLITE_DBCONFIG_STMT_SCANSTATUS\n"
" --temp N N from 0 to 9. 0: no temp table. 9: all temp tables\n"
" --testset T Run test-set T (main, cte, rtree, orm, fp, debug)\n"
" --testset T Run test-set T (main, cte, rtree, orm, fp, json,"
" debug)\n"
" Can be a comma-separated list of values, with /SCALE\n"
" suffixes or macro \"mix1\"\n"
" --trace Turn on SQL tracing\n"
" --threads N Use up to N threads for sorting\n"
" --utf16be Set text encoding to UTF-16BE\n"
@@ -99,6 +124,7 @@ static struct Global {
int bMemShrink; /* Call sqlite3_db_release_memory() often */
int eTemp; /* 0: no TEMP. 9: always TEMP. */
int szTest; /* Scale factor for test iterations */
int szBase; /* Base size prior to testset scaling */
int nRepeat; /* Repeat selects this many times */
int doCheckpoint; /* Run PRAGMA wal_checkpoint after each trans */
int nReserve; /* Reserve bytes */
@@ -2149,6 +2175,120 @@ void testset_debug1(void){
}
}
/*
** Performance tests for JSON.
*/
void testset_json(void){
unsigned int r = 0x12345678;
sqlite3_test_control(SQLITE_TESTCTRL_PRNG_SEED, r, g.db);
speedtest1_begin_test(100, "table J1 is %d rows of JSONB",
g.szTest*5);
speedtest1_exec(
"CREATE TABLE j1(x JSONB);\n"
"WITH RECURSIVE\n"
" jval(n,j) AS (\n"
" VALUES(0,'{}'),(1,'[]'),(2,'true'),(3,'false'),(4,'null'),\n"
" (5,'{x:1,y:2}'),(6,'0.0'),(7,'3.14159'),(8,'-99.9'),\n"
" (9,'[1,2,\"\\n\\u2192\\\"\\u2190\",4]')\n"
" ),\n"
" c(x) AS (VALUES(1) UNION ALL SELECT x+1 FROM c WHERE x<26*26-1),\n"
" array1(y) AS MATERIALIZED (\n"
" SELECT jsonb_group_array(\n"
" jsonb_object('x',x,\n"
" 'y',jsonb(coalesce(j,random()%%10000)),\n"
" 'z',hex(randomblob(50)))\n"
" )\n"
" FROM c LEFT JOIN jval ON (x%%20)=n\n"
" ),\n"
" object1(z) AS MATERIALIZED (\n"
" SELECT jsonb_group_object(char(0x61+x%%26,0x61+(x/26)%%26),\n"
" jsonb( coalesce(j,random()%%10000)))\n"
" FROM c LEFT JOIN jval ON (x%%20)=n\n"
" ),\n"
" c2(n) AS (VALUES(1) UNION ALL SELECT n+1 FROM c2 WHERE n<%d)\n"
"INSERT INTO j1(x)\n"
" SELECT jsonb_object('a',n,'b',n+10000,'c',jsonb(y),'d',jsonb(z),\n"
" 'e',n+20000,'f',n+30000)\n"
" FROM array1, object1, c2;",
g.szTest*5
);
speedtest1_end_test();
speedtest1_begin_test(110, "table J2 is %d rows from J1 converted to text", g.szTest);
speedtest1_exec(
"CREATE TABLE j2(x JSON TEXT);\n"
"INSERT INTO j2(x) SELECT json(x) FROM j1 LIMIT %d", g.szTest
);
speedtest1_end_test();
speedtest1_begin_test(120, "create indexes on JSON expressions on J1");
speedtest1_exec(
"BEGIN;\n"
"CREATE INDEX j1x1 ON j1(x->>'a');\n"
"CREATE INDEX j1x2 ON j1(x->>'b');\n"
"CREATE INDEX j1x3 ON j1(x->>'f');\n"
"COMMIT;\n"
);
speedtest1_end_test();
speedtest1_begin_test(130, "create indexes on JSON expressions on J2");
speedtest1_exec(
"BEGIN;\n"
"CREATE INDEX j2x1 ON j2(x->>'a');\n"
"CREATE INDEX j2x2 ON j2(x->>'b');\n"
"CREATE INDEX j2x3 ON j2(x->>'f');\n"
"COMMIT;\n"
);
speedtest1_end_test();
speedtest1_begin_test(140, "queries against J1");
speedtest1_exec(
"WITH c(n) AS (VALUES(0) UNION ALL SELECT n+1 FROM c WHERE n<7)\n"
" SELECT sum(x->>format('$.c[%%d].x',n)) FROM c, j1;\n"
"WITH c(n) AS (VALUES(1) UNION ALL SELECT n+1 FROM c WHERE n<5)\n"
" SELECT sum(x->>format('$.\"c\"[#-%%d].y',n)) FROM c, j1;\n"
"SELECT sum(x->>'$.d.ez' + x->>'$.d.\"xz\"' + x->>'a' + x->>'$.c[10].y') FROM j1;\n"
"SELECT x->>'$.d.tz[2]', x->'$.d.tz' FROM j1;\n"
);
speedtest1_end_test();
speedtest1_begin_test(141, "queries involving json_type()");
speedtest1_exec(
"WITH c(n) AS (VALUES(1) UNION ALL SELECT n+1 FROM c WHERE n<20)\n"
" SELECT json_type(x,format('$.c[#-%%d].y',n)), count(*)\n"
" FROM c, j1\n"
" WHERE j1.rowid=1\n"
" GROUP BY 1 ORDER BY 2;"
);
speedtest1_end_test();
speedtest1_begin_test(150, "json_insert()/set()/remove() on every row of J1");
speedtest1_exec(
"BEGIN;\n"
"UPDATE j1 SET x=jsonb_insert(x,'$.g',(x->>'f')+1,'$.h',3.14159,'$.i','hello',\n"
" '$.j',json('{x:99}'),'$.k','{y:98}');\n"
"UPDATE j1 SET x=jsonb_set(x,'$.e',(x->>'f')-1);\n"
"UPDATE j1 SET x=jsonb_remove(x,'$.d');\n"
"COMMIT;\n"
);
speedtest1_end_test();
speedtest1_begin_test(160, "json_insert()/set()/remove() on every row of J2");
speedtest1_exec(
"BEGIN;\n"
"UPDATE j2 SET x=json_insert(x,'$.g',(x->>'f')+1);\n"
"UPDATE j2 SET x=json_set(x,'$.e',(x->>'f')-1);\n"
"UPDATE j2 SET x=json_remove(x,'$.d');\n"
"COMMIT;\n"
);
speedtest1_end_test();
}
/*
** This testset focuses on the speed of parsing numeric literals (integers
** and real numbers). This was added to test the impact of allowing "_"
@@ -2168,25 +2308,25 @@ void testset_parsenumber(void){
const int NROW = 100*g.szTest;
int ii;
speedtest1_begin_test(100, "parsing small integers");
speedtest1_begin_test(100, "parsing %d small integers", NROW);
for(ii=0; ii<NROW; ii++){
sqlite3_exec(g.db, zSql1, 0, 0, 0);
}
speedtest1_end_test();
speedtest1_begin_test(110, "parsing large integers");
speedtest1_begin_test(110, "parsing %d large integers", NROW);
for(ii=0; ii<NROW; ii++){
sqlite3_exec(g.db, zSql2, 0, 0, 0);
}
speedtest1_end_test();
speedtest1_begin_test(200, "parsing small reals");
speedtest1_begin_test(200, "parsing %d small reals", NROW);
for(ii=0; ii<NROW; ii++){
sqlite3_exec(g.db, zSql3, 0, 0, 0);
}
speedtest1_end_test();
speedtest1_begin_test(210, "parsing large reals");
speedtest1_begin_test(210, "parsing %d large reals", NROW);
for(ii=0; ii<NROW; ii++){
sqlite3_exec(g.db, zSql4, 0, 0, 0);
}
@@ -2299,6 +2439,7 @@ int main(int argc, char **argv){
g.zNN = "";
g.zPK = "UNIQUE";
g.szTest = 100;
g.szBase = 100;
g.nRepeat = 1;
for(i=1; i<argc; i++){
const char *z = argv[i];
@@ -2408,7 +2549,7 @@ int main(int argc, char **argv){
g.bMemShrink = 1;
}else if( strcmp(z,"size")==0 ){
ARGC_VALUE_CHECK(1);
g.szTest = integerValue(argv[++i]);
g.szTest = g.szBase = integerValue(argv[++i]);
}else if( strcmp(z,"stats")==0 ){
showStats = 1;
}else if( strcmp(z,"temp")==0 ){
@@ -2419,8 +2560,10 @@ int main(int argc, char **argv){
}
g.eTemp = argv[i][0] - '0';
}else if( strcmp(z,"testset")==0 ){
static char zMix1Tests[] = "main,orm/25,cte/20,json,fp/3,parsenumber/25,rtree/10";
ARGC_VALUE_CHECK(1);
zTSet = argv[++i];
if( strcmp(zTSet,"mix1")==0 ) zTSet = zMix1Tests;
}else if( strcmp(z,"trace")==0 ){
doTrace = 1;
}else if( strcmp(z,"threads")==0 ){
@@ -2574,6 +2717,7 @@ int main(int argc, char **argv){
if( g.bExplain ) printf(".explain\n.echo on\n");
do{
char *zThisTest = zTSet;
char *zSep;
char *zComma = strchr(zThisTest,',');
if( zComma ){
*zComma = 0;
@@ -2581,7 +2725,20 @@ int main(int argc, char **argv){
}else{
zTSet = "";
}
if( g.iTotal>0 || zComma!=0 ){
zSep = strchr(zThisTest, '/');
if( zSep ){
int kk;
for(kk=1; zSep[kk] && ISDIGIT(zSep[kk]); kk++){}
if( kk==1 || zSep[kk]!=0 ){
fatal_error("bad modifier on testset name: \"%s\"", zThisTest);
}
g.szTest = g.szBase*integerValue(zSep+1)/100;
if( g.szTest<=0 ) g.szTest = 1;
zSep[0] = 0;
}else{
g.szTest = g.szBase;
}
if( g.iTotal>0 || zComma==0 ){
printf(" Begin testset \"%s\"\n", zThisTest);
}
if( strcmp(zThisTest,"main")==0 ){
@@ -2594,6 +2751,8 @@ int main(int argc, char **argv){
testset_cte();
}else if( strcmp(zThisTest,"fp")==0 ){
testset_fp();
}else if( strcmp(zThisTest,"json")==0 ){
testset_json();
}else if( strcmp(zThisTest,"trigger")==0 ){
testset_trigger();
}else if( strcmp(zThisTest,"parsenumber")==0 ){

View File

@@ -10,7 +10,7 @@
#***********************************************************************
#
# Test cases for the ability of the query planner to cope with
# star-schema queries on databases with goofy indexes.
# star-schema queries.
#
set testdir [file dirname $argv0]

View File

@@ -97,6 +97,7 @@ namespace eval trd {
set build(All-Debug) {
--with-debug --enable-all
-DSQLITE_ENABLE_ORDERED_SET_AGGREGATES
-DSQLITE_ENABLE_NORMALIZE
}
set build(All-O0) {
-O0 --enable-all
@@ -122,6 +123,7 @@ namespace eval trd {
CC=clang -fsanitize=address,undefined -fno-sanitize-recover=undefined
-DSQLITE_ENABLE_STAT4
-DSQLITE_OMIT_LOOKASIDE=1
-DSQLITE_ENABLE_NORMALIZE
-DCONFIG_SLOWDOWN_FACTOR=5.0
-DSQLITE_ENABLE_RBU
--with-debug
@@ -180,6 +182,7 @@ namespace eval trd {
-DSQLITE_SOUNDEX=1
-DSQLITE_ENABLE_ATOMIC_WRITE=1
-DSQLITE_ENABLE_MEMORY_MANAGEMENT=1
-DSQLITE_ENABLE_NORMALIZE
-DSQLITE_ENABLE_OVERSIZE_CELL_CHECK=1
-DSQLITE_ENABLE_STAT4
-DSQLITE_ENABLE_STMT_SCANSTATUS
@@ -195,6 +198,7 @@ namespace eval trd {
-DSQLITE_ENABLE_FTS3=1
-DSQLITE_ENABLE_RTREE=1
-DSQLITE_ENABLE_MEMSYS5=1
-DSQLITE_ENABLE_NORMALIZE
-DSQLITE_ENABLE_COLUMN_METADATA=1
-DSQLITE_ENABLE_STAT4
-DSQLITE_ENABLE_HIDDEN_COLUMNS
@@ -311,6 +315,7 @@ namespace eval trd {
-DSQLITE_ENABLE_FTS3=1
-DSQLITE_ENABLE_FTS3_PARENTHESIS=1
-DSQLITE_ENABLE_FTS3_TOKENIZER=1
-DSQLITE_ENABLE_NORMALIZE=1
-DSQLITE_ENABLE_PERSIST_WAL=1
-DSQLITE_ENABLE_PURGEABLE_PCACHE=1
-DSQLITE_ENABLE_RTREE=1

View File

@@ -342,5 +342,21 @@ do_test 12.1.2 {
sqlite3_finalize $STMT
} {SQLITE_OK}
#-------------------------------------------------------------------------
reset_db
do_execsql_test 13.0 {
CREATE TABLE T1(a, b);
INSERT INTO t1 VALUES(1, 2), (3, 4);
}
proc trace_callback {args} {}
db trace_v2 trace_callback profile
do_test 13.1 {
db eval { SELECT * FROM t1 } {
db trace_v2 "" ""
}
set {} {}
} {}
finish_test

View File

@@ -325,6 +325,12 @@ do_eqp_test 331 {
# marked with M10d_Yes and hence prohibited from participating in the
# query flattening optimization.
#
# Updated 2025-01-02.
# https://sqlite.org/forum/forumpost/8f38fc9878a92aa9
#
# The same optimization that made Grunthos's query fast made
# Jean-Noël Mayor's query slow. Bummer.
#
reset_db
db eval {
CREATE TABLE raw(country,date,total,delta, UNIQUE(country,date));